hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790a6846f5a283e248a6ffbd74514545205cb849
| 2,354
|
py
|
Python
|
src/CalculatorTest.py
|
jimishapatel/Calculator
|
7554a2342da9a98ab9739ddead7455f826631261
|
[
"MIT"
] | null | null | null |
src/CalculatorTest.py
|
jimishapatel/Calculator
|
7554a2342da9a98ab9739ddead7455f826631261
|
[
"MIT"
] | null | null | null |
src/CalculatorTest.py
|
jimishapatel/Calculator
|
7554a2342da9a98ab9739ddead7455f826631261
|
[
"MIT"
] | null | null | null |
import unittest
from CsvReader import CsvReader
from Calculator import MyCalculator
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.calculator = MyCalculator()
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, MyCalculator)
def test_addition(self):
test_data = CsvReader('src/csv/TestAddition.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.add(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_subtraction(self):
test_data = CsvReader('src/csv/TestSubtraction.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.subtract(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_multiplication(self):
test_data = CsvReader('src/csv/TestMultiplication.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.multiply(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_division(self):
test_data = CsvReader('src/csv/TestDivision.csv').data
for row in test_data:
result = float(row['Result'])
self.assertAlmostEqual(self.calculator.divide(row['Value 1'], row['Value 2']), result)
self.assertAlmostEqual(self.calculator.result, float(row['Result']))
def test_square(self):
test_data = CsvReader('src/csv/TestSquare.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.square(row['Value 1']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_square_root(self):
test_data = CsvReader('src/csv/TestSquareRoot.csv').data
for row in test_data:
result = float(row['Result'])
self.assertAlmostEqual(self.calculator.square_root(row['Value 1']), result)
self.assertAlmostEqual(self.calculator.result, float(row['Result']))
if __name__ == '__main__':
unittest.main()
| 40.586207
| 98
| 0.649108
|
import unittest
from CsvReader import CsvReader
from Calculator import MyCalculator
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.calculator = MyCalculator()
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, MyCalculator)
def test_addition(self):
test_data = CsvReader('src/csv/TestAddition.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.add(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_subtraction(self):
test_data = CsvReader('src/csv/TestSubtraction.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.subtract(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_multiplication(self):
test_data = CsvReader('src/csv/TestMultiplication.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.multiply(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_division(self):
test_data = CsvReader('src/csv/TestDivision.csv').data
for row in test_data:
result = float(row['Result'])
self.assertAlmostEqual(self.calculator.divide(row['Value 1'], row['Value 2']), result)
self.assertAlmostEqual(self.calculator.result, float(row['Result']))
def test_square(self):
test_data = CsvReader('src/csv/TestSquare.csv').data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.square(row['Value 1']), result)
self.assertEqual(self.calculator.result, int(row['Result']))
def test_square_root(self):
test_data = CsvReader('src/csv/TestSquareRoot.csv').data
for row in test_data:
result = float(row['Result'])
self.assertAlmostEqual(self.calculator.square_root(row['Value 1']), result)
self.assertAlmostEqual(self.calculator.result, float(row['Result']))
if __name__ == '__main__':
unittest.main()
| true
| true
|
790a687bfd81286b649201166ae2ebe38cb79b8a
| 3,095
|
py
|
Python
|
examples/rgbd_desk.py
|
IshitaTakeshi/DVO
|
2c5a3db1db7e651bfaa7808bbf79a6c1c6a42fc5
|
[
"Apache-2.0"
] | 7
|
2019-12-20T07:19:11.000Z
|
2021-07-22T03:25:12.000Z
|
examples/rgbd_desk.py
|
IshitaTakeshi/DVO
|
2c5a3db1db7e651bfaa7808bbf79a6c1c6a42fc5
|
[
"Apache-2.0"
] | null | null | null |
examples/rgbd_desk.py
|
IshitaTakeshi/DVO
|
2c5a3db1db7e651bfaa7808bbf79a6c1c6a42fc5
|
[
"Apache-2.0"
] | null | null | null |
import csv
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
from skimage.color import rgb2gray
import numpy as np
from tqdm import tqdm
from tadataka import VisualOdometry, CameraParameters
from tadataka.rigid import exp_se3, log_se3
from tadataka.projection import warp
from tadataka.mapping import MapBuilder
from tadataka.quaternion import quaternion_to_rotation
from tadataka.datasets.tum_rgbd import TUMDataset, PoseSequence
from visualization.plot import plot
# dataset format is explained at
# https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats#
# intrinsic_camera_calibration_of_the_kinect
dataset_root = Path("datasets", "rgbd_dataset_freiburg1_desk")
# dataset_root = Path("datasets", "rgbd_dataset_freiburg2_pioneer_360")
# dataset_root = Path("datasets", "rgbd_dataset_freiburg3_structure_texture_near")
def error(image_true, image_pred, mask):
return np.power(image_true[mask]-image_pred[mask], 2).mean()
def visualize_error_function(camera_parameters, I0, D0, I1, xi_pred):
def generate_error_curve(i, start, stop, n):
xi = np.copy(xi_pred)
vs = xi[i] + np.linspace(start, stop, n)
errors = []
for v in vs:
xi[i] = v
DG = exp_se3(xi)
estimated, mask = warp(camera_parameters, I1, D0, DG)
errors.append(error(I0, estimated, mask))
errors = np.array(errors)
return vs, errors
from matplotlib import pyplot as plt
fig = plt.figure()
for xi_index, ax_index in enumerate([1, 3, 5, 2, 4, 6]):
ax = fig.add_subplot(3, 2, ax_index)
vs, errors = generate_error_curve(xi_index,
start=-0.10, stop=0.10, n=101)
ax.set_title("Axis {}".format(xi_index+1))
ax.axvline(vs[np.argmin(errors)], label="ground truth")
ax.axvline(xi_pred[xi_index], color="red", label="prediction")
ax.legend()
ax.plot(vs, errors)
plt.show()
def main():
np.set_printoptions(suppress=True, precision=8, linewidth=1e8)
camera_parameters = CameraParameters(
focal_length=[525.0, 525.0],
offset=[319.5, 239.5]
)
dataset = TUMDataset(dataset_root)
G = np.eye(4)
frame0 = dataset.load_color(0)
sequence_pred = PoseSequence()
sequence_pred.add(frame0.timestamp_depth, G)
for i in tqdm(range(1, dataset.size)):
frame1 = dataset.load_color(i)
# TODO not necessary to convert the color of the same image twice
# we need to create a better interface
vo = VisualOdometry(camera_parameters,
rgb2gray(frame0.image), frame0.depth_map,
rgb2gray(frame1.image))
DG = vo.estimate_motion(n_coarse_to_fine=6)
G = np.dot(G, np.linalg.inv(DG))
sequence_pred.add(frame1.timestamp_depth, G)
frame0 = frame1
sequence_pred.save("poses.txt")
# TODO implement the following
# pointcloud = map_builder.export()
# export_pointcloud(pointcloud)
main()
| 29.47619
| 82
| 0.668498
|
import csv
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
from skimage.color import rgb2gray
import numpy as np
from tqdm import tqdm
from tadataka import VisualOdometry, CameraParameters
from tadataka.rigid import exp_se3, log_se3
from tadataka.projection import warp
from tadataka.mapping import MapBuilder
from tadataka.quaternion import quaternion_to_rotation
from tadataka.datasets.tum_rgbd import TUMDataset, PoseSequence
from visualization.plot import plot
dataset_root = Path("datasets", "rgbd_dataset_freiburg1_desk")
def error(image_true, image_pred, mask):
return np.power(image_true[mask]-image_pred[mask], 2).mean()
def visualize_error_function(camera_parameters, I0, D0, I1, xi_pred):
def generate_error_curve(i, start, stop, n):
xi = np.copy(xi_pred)
vs = xi[i] + np.linspace(start, stop, n)
errors = []
for v in vs:
xi[i] = v
DG = exp_se3(xi)
estimated, mask = warp(camera_parameters, I1, D0, DG)
errors.append(error(I0, estimated, mask))
errors = np.array(errors)
return vs, errors
from matplotlib import pyplot as plt
fig = plt.figure()
for xi_index, ax_index in enumerate([1, 3, 5, 2, 4, 6]):
ax = fig.add_subplot(3, 2, ax_index)
vs, errors = generate_error_curve(xi_index,
start=-0.10, stop=0.10, n=101)
ax.set_title("Axis {}".format(xi_index+1))
ax.axvline(vs[np.argmin(errors)], label="ground truth")
ax.axvline(xi_pred[xi_index], color="red", label="prediction")
ax.legend()
ax.plot(vs, errors)
plt.show()
def main():
np.set_printoptions(suppress=True, precision=8, linewidth=1e8)
camera_parameters = CameraParameters(
focal_length=[525.0, 525.0],
offset=[319.5, 239.5]
)
dataset = TUMDataset(dataset_root)
G = np.eye(4)
frame0 = dataset.load_color(0)
sequence_pred = PoseSequence()
sequence_pred.add(frame0.timestamp_depth, G)
for i in tqdm(range(1, dataset.size)):
frame1 = dataset.load_color(i)
vo = VisualOdometry(camera_parameters,
rgb2gray(frame0.image), frame0.depth_map,
rgb2gray(frame1.image))
DG = vo.estimate_motion(n_coarse_to_fine=6)
G = np.dot(G, np.linalg.inv(DG))
sequence_pred.add(frame1.timestamp_depth, G)
frame0 = frame1
sequence_pred.save("poses.txt")
main()
| true
| true
|
790a688592a18104373a0a646b965175738ee8ef
| 16,174
|
py
|
Python
|
train.py
|
YanchengWang/RegNet-Search-PyTorch
|
e15f2d33d5e2191ff22e65f5257693894156b4fd
|
[
"MIT"
] | 310
|
2020-05-14T23:03:42.000Z
|
2022-03-31T11:35:47.000Z
|
train.py
|
YanchengWang/RegNet-Search-PyTorch
|
e15f2d33d5e2191ff22e65f5257693894156b4fd
|
[
"MIT"
] | 7
|
2020-05-24T08:44:17.000Z
|
2022-02-03T03:25:24.000Z
|
train.py
|
YanchengWang/RegNet-Search-PyTorch
|
e15f2d33d5e2191ff22e65f5257693894156b4fd
|
[
"MIT"
] | 43
|
2020-05-14T23:03:48.000Z
|
2022-03-16T15:33:55.000Z
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import time
import argparse
import importlib
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.backends.cudnn as cudnn
from torch.nn.parallel import DistributedDataParallel
import autotorch as at
import encoding
from encoding.nn import LabelSmoothing, NLLMultiLabelSmooth
from encoding.utils import (accuracy, AverageMeter, MixUpWrapper, LR_Scheduler, torch_dist_sum)
try:
import apex
from apex import amp
except ModuleNotFoundError:
print('please install amp if using float16 training')
class Options():
def __init__(self):
# data settings
parser = argparse.ArgumentParser(description='Deep Encoding')
parser.add_argument('--dataset', type=str, default='imagenet',
help='training dataset (default: imagenet)')
parser.add_argument('--base-size', type=int, default=None,
help='base image size')
parser.add_argument('--crop-size', type=int, default=224,
help='crop image size')
parser.add_argument('--label-smoothing', type=float, default=0.0,
help='label-smoothing (default eta: 0.0)')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup (default eta: 0.0)')
parser.add_argument('--auto-policy', type=str, default=None,
help='path to auto augment policy')
parser.add_argument('--data-dir', type=str, default=os.path.expanduser('~/.encoding/data'),
help='data location for training')
# model params
#parser.add_argument('--model', type=str, default='resnet50',
# help='network model type (default: densenet)')
parser.add_argument('--arch', type=str, default='regnet',
help='network type (default: regnet)')
parser.add_argument('--config-file', type=str, required=True,
help='network node config file')
parser.add_argument('--last-gamma', action='store_true', default=False,
help='whether to init gamma of the last BN layer in \
each bottleneck to 0 (default: False)')
# training params
parser.add_argument('--amp', action='store_true',
default=False, help='using amp')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=120, metavar='N',
help='number of epochs to train (default: 600)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='the epoch number to start (default: 1)')
parser.add_argument('--workers', type=int, default=8,
metavar='N', help='dataloader threads')
# optimizer
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--lr-scheduler', type=str, default='cos',
help='learning rate scheduler (default: cos)')
parser.add_argument('--warmup-epochs', type=int, default=0,
help='number of warmup epochs (default: 0)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4,
metavar ='M', help='SGD weight decay (default: 1e-4)')
parser.add_argument('--no-bn-wd', action='store_true',
default=False, help='no bias decay')
# seed
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default='default',
help='set the checkpoint name')
# distributed
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
# evaluation option
parser.add_argument('--eval', action='store_true', default= False,
help='evaluating')
parser.add_argument('--export', type=str, default=None,
help='put the path to resuming file if needed')
self.parser = parser
def parse(self):
args = self.parser.parse_args()
return args
def main():
args = Options().parse()
ngpus_per_node = torch.cuda.device_count()
args.world_size = ngpus_per_node * args.world_size
args.lr = args.lr * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
# global variable
best_pred = 0.0
acclist_train = []
acclist_val = []
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
args.rank = args.rank * ngpus_per_node + gpu
# model name for checkpoint
args.model = "{}-{}".format(args.arch, os.path.splitext(os.path.basename(args.config_file))[0])
if args.gpu == 0:
print('model:', args.model)
print('rank: {} / {}'.format(args.rank, args.world_size))
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
torch.cuda.set_device(args.gpu)
# init the args
global best_pred, acclist_train, acclist_val
if args.gpu == 0:
print(args)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
# init dataloader
transform_train, transform_val = encoding.transforms.get_transform(
args.dataset, args.base_size, args.crop_size)
if args.auto_policy is not None:
print(f'Using auto_policy: {args.auto_policy}')
from augment import Augmentation
auto_policy = Augmentation(at.load(args.auto_policy))
transform_train.transforms.insert(0, auto_policy)
trainset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,
transform=transform_train, train=True, download=True)
valset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,
transform=transform_val, train=False, download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=train_sampler)
val_sampler = torch.utils.data.distributed.DistributedSampler(valset, shuffle=False)
val_loader = torch.utils.data.DataLoader(
valset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler)
# init the model
arch = importlib.import_module('arch.' + args.arch)
model = arch.config_network(args.config_file)
if args.gpu == 0:
print(model)
if args.mixup > 0:
train_loader = MixUpWrapper(args.mixup, 1000, train_loader, args.gpu)
criterion = NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
criterion = LabelSmoothing(args.label_smoothing)
else:
criterion = nn.CrossEntropyLoss()
model.cuda(args.gpu)
criterion.cuda(args.gpu)
# criterion and optimizer
if args.no_bn_wd:
parameters = model.named_parameters()
param_dict = {}
for k, v in parameters:
param_dict[k] = v
bn_params = [v for n, v in param_dict.items() if ('bn' in n or 'bias' in n)]
rest_params = [v for n, v in param_dict.items() if not ('bn' in n or 'bias' in n)]
if args.gpu == 0:
print(" Weight decay NOT applied to BN parameters ")
print(f'len(parameters): {len(list(model.parameters()))} = {len(bn_params)} + {len(rest_params)}')
optimizer = torch.optim.SGD([{'params': bn_params, 'weight_decay': 0 },
{'params': rest_params, 'weight_decay': args.wd}],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd)
else:
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd)
if args.amp:
#optimizer = amp_handle.wrap_optimizer(optimizer)
model, optimizer = amp.initialize(model, optimizer, opt_level='O2')
#from apex import amp
DDP = apex.parallel.DistributedDataParallel
model = DDP(model, delay_allreduce=True)
else:
DDP = DistributedDataParallel
model = DDP(model, device_ids=[args.gpu])
# check point
if args.resume is not None:
if os.path.isfile(args.resume):
if args.gpu == 0:
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch'] + 1 if args.start_epoch == 0 else args.start_epoch
best_pred = checkpoint['best_pred']
acclist_train = checkpoint['acclist_train']
acclist_val = checkpoint['acclist_val']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.amp:
amp.load_state_dict(checkpoint['amp'])
if args.gpu == 0:
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
raise RuntimeError ("=> no resume checkpoint found at '{}'".\
format(args.resume))
scheduler = LR_Scheduler(args.lr_scheduler,
base_lr=args.lr,
num_epochs=args.epochs,
iters_per_epoch=len(train_loader),
warmup_epochs=args.warmup_epochs)
def train(epoch):
train_sampler.set_epoch(epoch)
model.train()
losses = AverageMeter()
top1 = AverageMeter()
global best_pred, acclist_train
tic = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
scheduler(optimizer, batch_idx, epoch, best_pred)
if not args.mixup:
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if not args.mixup:
acc1 = accuracy(output, target, topk=(1,))
top1.update(acc1[0], data.size(0))
losses.update(loss.item(), data.size(0))
if batch_idx % 100 == 0 and args.gpu == 0:
iter_per_sec = 100.0 / (time.time() - tic) if batch_idx != 0 else 1.0 / (time.time() - tic)
tic = time.time()
if args.mixup:
#print('Batch: %d| Loss: %.3f'%(batch_idx, losses.avg))
print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Train loss: {:.3f}'. \
format(epoch, batch_idx, iter_per_sec, losses.avg.item()))
else:
#print('Batch: %d| Loss: %.3f | Top1: %.3f'%(batch_idx, losses.avg, top1.avg))
print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Top1: {:.3f}'. \
format(epoch, batch_idx, iter_per_sec, top1.avg.item()))
acclist_train += [top1.avg]
def validate(epoch):
model.eval()
top1 = AverageMeter()
top5 = AverageMeter()
global best_pred, acclist_train, acclist_val
is_best = False
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
with torch.no_grad():
output = model(data)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], data.size(0))
top5.update(acc5[0], data.size(0))
# sum all
sum1, cnt1, sum5, cnt5 = torch_dist_sum(args.gpu, top1.sum, top1.count, top5.sum, top5.count)
if args.eval:
if args.gpu == 0:
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
return
if args.gpu == 0:
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
# save checkpoint
acclist_val += [top1_acc]
if top1_acc > best_pred:
best_pred = top1_acc
is_best = True
state_dict = {
'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
}
if args.amp:
state_dict['amp'] = amp.state_dict()
encoding.utils.save_checkpoint(state_dict, args=args, is_best=is_best)
if args.export:
if args.gpu == 0:
torch.save(model.module.state_dict(), args.export + '.pth')
return
if args.eval:
validate(args.start_epoch)
return
for epoch in range(args.start_epoch, args.epochs):
tic = time.time()
train(epoch)
if epoch % 10 == 0:# or epoch == args.epochs-1:
validate(epoch)
elapsed = time.time() - tic
if args.gpu == 0:
print(f'Epoch: {epoch}, Time cost: {elapsed}')
if args.gpu == 0:
encoding.utils.save_checkpoint({
'epoch': args.epochs-1,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
}, args=args, is_best=False)
if __name__ == "__main__":
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
main()
| 44.312329
| 110
| 0.563992
|
utedDataParallel
import autotorch as at
import encoding
from encoding.nn import LabelSmoothing, NLLMultiLabelSmooth
from encoding.utils import (accuracy, AverageMeter, MixUpWrapper, LR_Scheduler, torch_dist_sum)
try:
import apex
from apex import amp
except ModuleNotFoundError:
print('please install amp if using float16 training')
class Options():
def __init__(self):
parser = argparse.ArgumentParser(description='Deep Encoding')
parser.add_argument('--dataset', type=str, default='imagenet',
help='training dataset (default: imagenet)')
parser.add_argument('--base-size', type=int, default=None,
help='base image size')
parser.add_argument('--crop-size', type=int, default=224,
help='crop image size')
parser.add_argument('--label-smoothing', type=float, default=0.0,
help='label-smoothing (default eta: 0.0)')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup (default eta: 0.0)')
parser.add_argument('--auto-policy', type=str, default=None,
help='path to auto augment policy')
parser.add_argument('--data-dir', type=str, default=os.path.expanduser('~/.encoding/data'),
help='data location for training')
parser.add_argument('--arch', type=str, default='regnet',
help='network type (default: regnet)')
parser.add_argument('--config-file', type=str, required=True,
help='network node config file')
parser.add_argument('--last-gamma', action='store_true', default=False,
help='whether to init gamma of the last BN layer in \
each bottleneck to 0 (default: False)')
parser.add_argument('--amp', action='store_true',
default=False, help='using amp')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=120, metavar='N',
help='number of epochs to train (default: 600)')
parser.add_argument('--start_epoch', type=int, default=0,
metavar='N', help='the epoch number to start (default: 1)')
parser.add_argument('--workers', type=int, default=8,
metavar='N', help='dataloader threads')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--lr-scheduler', type=str, default='cos',
help='learning rate scheduler (default: cos)')
parser.add_argument('--warmup-epochs', type=int, default=0,
help='number of warmup epochs (default: 0)')
parser.add_argument('--momentum', type=float, default=0.9,
metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--wd', type=float, default=1e-4,
metavar ='M', help='SGD weight decay (default: 1e-4)')
parser.add_argument('--no-bn-wd', action='store_true',
default=False, help='no bias decay')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--checkname', type=str, default='default',
help='set the checkpoint name')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--eval', action='store_true', default= False,
help='evaluating')
parser.add_argument('--export', type=str, default=None,
help='put the path to resuming file if needed')
self.parser = parser
def parse(self):
args = self.parser.parse_args()
return args
def main():
args = Options().parse()
ngpus_per_node = torch.cuda.device_count()
args.world_size = ngpus_per_node * args.world_size
args.lr = args.lr * args.world_size
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
best_pred = 0.0
acclist_train = []
acclist_val = []
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
args.rank = args.rank * ngpus_per_node + gpu
args.model = "{}-{}".format(args.arch, os.path.splitext(os.path.basename(args.config_file))[0])
if args.gpu == 0:
print('model:', args.model)
print('rank: {} / {}'.format(args.rank, args.world_size))
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
torch.cuda.set_device(args.gpu)
global best_pred, acclist_train, acclist_val
if args.gpu == 0:
print(args)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
transform_train, transform_val = encoding.transforms.get_transform(
args.dataset, args.base_size, args.crop_size)
if args.auto_policy is not None:
print(f'Using auto_policy: {args.auto_policy}')
from augment import Augmentation
auto_policy = Augmentation(at.load(args.auto_policy))
transform_train.transforms.insert(0, auto_policy)
trainset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,
transform=transform_train, train=True, download=True)
valset = encoding.datasets.get_dataset(args.dataset, root=args.data_dir,
transform=transform_val, train=False, download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=train_sampler)
val_sampler = torch.utils.data.distributed.DistributedSampler(valset, shuffle=False)
val_loader = torch.utils.data.DataLoader(
valset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler)
arch = importlib.import_module('arch.' + args.arch)
model = arch.config_network(args.config_file)
if args.gpu == 0:
print(model)
if args.mixup > 0:
train_loader = MixUpWrapper(args.mixup, 1000, train_loader, args.gpu)
criterion = NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
criterion = LabelSmoothing(args.label_smoothing)
else:
criterion = nn.CrossEntropyLoss()
model.cuda(args.gpu)
criterion.cuda(args.gpu)
if args.no_bn_wd:
parameters = model.named_parameters()
param_dict = {}
for k, v in parameters:
param_dict[k] = v
bn_params = [v for n, v in param_dict.items() if ('bn' in n or 'bias' in n)]
rest_params = [v for n, v in param_dict.items() if not ('bn' in n or 'bias' in n)]
if args.gpu == 0:
print(" Weight decay NOT applied to BN parameters ")
print(f'len(parameters): {len(list(model.parameters()))} = {len(bn_params)} + {len(rest_params)}')
optimizer = torch.optim.SGD([{'params': bn_params, 'weight_decay': 0 },
{'params': rest_params, 'weight_decay': args.wd}],
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd)
else:
optimizer = torch.optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.wd)
if args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level='O2')
DDP = apex.parallel.DistributedDataParallel
model = DDP(model, delay_allreduce=True)
else:
DDP = DistributedDataParallel
model = DDP(model, device_ids=[args.gpu])
if args.resume is not None:
if os.path.isfile(args.resume):
if args.gpu == 0:
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch'] + 1 if args.start_epoch == 0 else args.start_epoch
best_pred = checkpoint['best_pred']
acclist_train = checkpoint['acclist_train']
acclist_val = checkpoint['acclist_val']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.amp:
amp.load_state_dict(checkpoint['amp'])
if args.gpu == 0:
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
raise RuntimeError ("=> no resume checkpoint found at '{}'".\
format(args.resume))
scheduler = LR_Scheduler(args.lr_scheduler,
base_lr=args.lr,
num_epochs=args.epochs,
iters_per_epoch=len(train_loader),
warmup_epochs=args.warmup_epochs)
def train(epoch):
train_sampler.set_epoch(epoch)
model.train()
losses = AverageMeter()
top1 = AverageMeter()
global best_pred, acclist_train
tic = time.time()
for batch_idx, (data, target) in enumerate(train_loader):
scheduler(optimizer, batch_idx, epoch, best_pred)
if not args.mixup:
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if not args.mixup:
acc1 = accuracy(output, target, topk=(1,))
top1.update(acc1[0], data.size(0))
losses.update(loss.item(), data.size(0))
if batch_idx % 100 == 0 and args.gpu == 0:
iter_per_sec = 100.0 / (time.time() - tic) if batch_idx != 0 else 1.0 / (time.time() - tic)
tic = time.time()
if args.mixup:
print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Train loss: {:.3f}'. \
format(epoch, batch_idx, iter_per_sec, losses.avg.item()))
else:
print('Epoch: {}, Iter: {}, Speed: {:.3f} iter/sec, Top1: {:.3f}'. \
format(epoch, batch_idx, iter_per_sec, top1.avg.item()))
acclist_train += [top1.avg]
def validate(epoch):
model.eval()
top1 = AverageMeter()
top5 = AverageMeter()
global best_pred, acclist_train, acclist_val
is_best = False
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.cuda(args.gpu), target.cuda(args.gpu)
with torch.no_grad():
output = model(data)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], data.size(0))
top5.update(acc5[0], data.size(0))
sum1, cnt1, sum5, cnt5 = torch_dist_sum(args.gpu, top1.sum, top1.count, top5.sum, top5.count)
if args.eval:
if args.gpu == 0:
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
return
if args.gpu == 0:
top1_acc = sum(sum1) / sum(cnt1)
top5_acc = sum(sum5) / sum(cnt5)
print('Validation: Top1: %.3f | Top5: %.3f'%(top1_acc, top5_acc))
acclist_val += [top1_acc]
if top1_acc > best_pred:
best_pred = top1_acc
is_best = True
state_dict = {
'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
}
if args.amp:
state_dict['amp'] = amp.state_dict()
encoding.utils.save_checkpoint(state_dict, args=args, is_best=is_best)
if args.export:
if args.gpu == 0:
torch.save(model.module.state_dict(), args.export + '.pth')
return
if args.eval:
validate(args.start_epoch)
return
for epoch in range(args.start_epoch, args.epochs):
tic = time.time()
train(epoch)
if epoch % 10 == 0:
validate(epoch)
elapsed = time.time() - tic
if args.gpu == 0:
print(f'Epoch: {epoch}, Time cost: {elapsed}')
if args.gpu == 0:
encoding.utils.save_checkpoint({
'epoch': args.epochs-1,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'best_pred': best_pred,
'acclist_train':acclist_train,
'acclist_val':acclist_val,
}, args=args, is_best=False)
if __name__ == "__main__":
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
main()
| true
| true
|
790a68c19c86be4fe2046db8e11db87131ee7751
| 2,393
|
py
|
Python
|
django/utils/version.py
|
andreip/django
|
c61d1361d027a729d07d277879950ff133c19f4c
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2020-09-25T04:02:25.000Z
|
2020-10-15T00:01:00.000Z
|
django/utils/version.py
|
seanfagan/django
|
66bbde6819586cc3a75630e12e569dc8ae72f211
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/utils/version.py
|
seanfagan/django
|
66bbde6819586cc3a75630e12e569dc8ae72f211
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2017-02-28T17:05:19.000Z
|
2017-02-28T17:05:19.000Z
|
import datetime
import functools
import os
import subprocess
def get_version(version=None):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""
Return a tuple of the django version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@functools.lru_cache()
def get_git_changeset():
"""Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| 29.9125
| 79
| 0.64814
|
import datetime
import functools
import os
import subprocess
def get_version(version=None):
version = get_complete_version(version)
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
if version is None:
from django import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@functools.lru_cache()
def get_git_changeset():
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| true
| true
|
790a68e480d0b55f4a5a33a0466d228ee70d3a93
| 4,305
|
py
|
Python
|
hierarchical-sc/corpus.py
|
ne7ermore/deeping-flow
|
9414fa48139bac99824ae89cc45c6f59855fe7d4
|
[
"MIT"
] | 54
|
2017-11-24T07:10:25.000Z
|
2021-03-04T07:09:24.000Z
|
hierarchical-sc/corpus.py
|
ne7ermore/deeping-flow
|
9414fa48139bac99824ae89cc45c6f59855fe7d4
|
[
"MIT"
] | 9
|
2018-05-30T08:40:24.000Z
|
2020-03-04T14:12:15.000Z
|
hierarchical-sc/corpus.py
|
ne7ermore/deeping-flow
|
9414fa48139bac99824ae89cc45c6f59855fe7d4
|
[
"MIT"
] | 13
|
2017-11-24T07:10:32.000Z
|
2020-04-08T10:36:26.000Z
|
import os
import pickle
import math
import pandas as pd
from const import *
def middle_save(obj, inf):
pickle.dump(obj, open(inf, "wb"), True)
def middle_load(inf):
return pickle.load(open(inf, "rb"))
def word2idx(sents, word2idx):
return [[word2idx[w] if w in word2idx else UNK for w in s] for s in sents]
class Dictionary(object):
def __init__(self):
self.word2idx = {
WORD[PAD]: PAD,
WORD[UNK]: UNK,
WORD[BOS]: BOS,
WORD[EOS]: EOS
}
self.idx = len(self.word2idx)
def add(self, word):
if self.word2idx.get(word) is None:
self.word2idx[word] = self.idx
self.idx += 1
def __call__(self, sents, min_count):
words = [word for sent in sents for word in sent]
word_count = {w: 0 for w in set(words)}
for w in words:
word_count[w] += 1
ignored_word_count = 0
for word, count in word_count.items():
if count <= min_count:
ignored_word_count += 1
continue
self.add(word)
return ignored_word_count
def __len__(self):
return self.idx
def __str__(self):
return "%s(size = %d)".format(self.__class__.__name__, len(self.idx))
class Corpus(object):
def __init__(self, max_ori_len=128, max_sum_len=15, min_word_count=1):
self.dict = Dictionary()
self.max_ori_len = max_ori_len
self.max_sum_len = max_sum_len
self._min_word_count = min_word_count
self.parse_data("data/test.csv", False)
self.parse_data("data/train.csv")
self.save()
def parse_data(self, _file, is_train=True):
def cut(x, list, ignore, max_len, is_summ):
if isinstance(x, float) and math.isnan(x):
if is_summ:
list.append(WORD[EOS])
else:
list.append("")
else:
x = x.split()
if len(x) > max_len:
x = x[:max_len]
ignore[0] += 1
if is_summ:
x += [WORD[EOS]]
list.append(x)
origins, summurys = [], []
ignore_ori_nums = [0]
ignore_sum_nums = [0]
df = pd.read_csv(_file)
df["original"].apply(cut, args=(
origins, ignore_ori_nums, self.max_ori_len, False))
df["summary"].apply(cut, args=(
summurys, ignore_sum_nums, self.max_sum_len, True))
if is_train:
ori_ignore = self.dict(origins + summurys, self._min_word_count)
self.train_origins = origins
self.train_summurys = summurys
self.train_labels = df["score"].values - 1
print("Ignored origin counts - [{}]".format(ori_ignore))
print(
'Train data - ignore original lines - [{}]'.format(ignore_ori_nums[0]))
print(
'Train data - ignore summary lines - [{}]'.format(ignore_sum_nums[0]))
else:
self.test_origins = origins
self.test_summurys = summurys
self.test_labels = df["score"].values - 1
print(
'Test data - ignore original lines - [{}]'.format(ignore_ori_nums[0]))
print(
'Test data - ignore summary lines - [{}]'.format(ignore_sum_nums[0]))
def save(self):
data = {
'max_ori_len': self.max_ori_len,
'max_sum_len': self.max_sum_len + 1,
'dict': {
'dict': self.dict.word2idx,
'dict_size': len(self.dict),
},
'train': {
'original': word2idx(self.train_origins, self.dict.word2idx),
'summary': word2idx(self.train_summurys, self.dict.word2idx),
'label': self.train_labels
},
'test': {
'original': word2idx(self.test_origins, self.dict.word2idx),
'summary': word2idx(self.test_summurys, self.dict.word2idx),
'label': self.test_labels
}
}
middle_save(data, "data/corpus")
print('dict length - [{}]'.format(len(self.dict)))
if __name__ == "__main__":
Corpus()
| 29.689655
| 87
| 0.532172
|
import os
import pickle
import math
import pandas as pd
from const import *
def middle_save(obj, inf):
pickle.dump(obj, open(inf, "wb"), True)
def middle_load(inf):
return pickle.load(open(inf, "rb"))
def word2idx(sents, word2idx):
return [[word2idx[w] if w in word2idx else UNK for w in s] for s in sents]
class Dictionary(object):
def __init__(self):
self.word2idx = {
WORD[PAD]: PAD,
WORD[UNK]: UNK,
WORD[BOS]: BOS,
WORD[EOS]: EOS
}
self.idx = len(self.word2idx)
def add(self, word):
if self.word2idx.get(word) is None:
self.word2idx[word] = self.idx
self.idx += 1
def __call__(self, sents, min_count):
words = [word for sent in sents for word in sent]
word_count = {w: 0 for w in set(words)}
for w in words:
word_count[w] += 1
ignored_word_count = 0
for word, count in word_count.items():
if count <= min_count:
ignored_word_count += 1
continue
self.add(word)
return ignored_word_count
def __len__(self):
return self.idx
def __str__(self):
return "%s(size = %d)".format(self.__class__.__name__, len(self.idx))
class Corpus(object):
def __init__(self, max_ori_len=128, max_sum_len=15, min_word_count=1):
self.dict = Dictionary()
self.max_ori_len = max_ori_len
self.max_sum_len = max_sum_len
self._min_word_count = min_word_count
self.parse_data("data/test.csv", False)
self.parse_data("data/train.csv")
self.save()
def parse_data(self, _file, is_train=True):
def cut(x, list, ignore, max_len, is_summ):
if isinstance(x, float) and math.isnan(x):
if is_summ:
list.append(WORD[EOS])
else:
list.append("")
else:
x = x.split()
if len(x) > max_len:
x = x[:max_len]
ignore[0] += 1
if is_summ:
x += [WORD[EOS]]
list.append(x)
origins, summurys = [], []
ignore_ori_nums = [0]
ignore_sum_nums = [0]
df = pd.read_csv(_file)
df["original"].apply(cut, args=(
origins, ignore_ori_nums, self.max_ori_len, False))
df["summary"].apply(cut, args=(
summurys, ignore_sum_nums, self.max_sum_len, True))
if is_train:
ori_ignore = self.dict(origins + summurys, self._min_word_count)
self.train_origins = origins
self.train_summurys = summurys
self.train_labels = df["score"].values - 1
print("Ignored origin counts - [{}]".format(ori_ignore))
print(
'Train data - ignore original lines - [{}]'.format(ignore_ori_nums[0]))
print(
'Train data - ignore summary lines - [{}]'.format(ignore_sum_nums[0]))
else:
self.test_origins = origins
self.test_summurys = summurys
self.test_labels = df["score"].values - 1
print(
'Test data - ignore original lines - [{}]'.format(ignore_ori_nums[0]))
print(
'Test data - ignore summary lines - [{}]'.format(ignore_sum_nums[0]))
def save(self):
data = {
'max_ori_len': self.max_ori_len,
'max_sum_len': self.max_sum_len + 1,
'dict': {
'dict': self.dict.word2idx,
'dict_size': len(self.dict),
},
'train': {
'original': word2idx(self.train_origins, self.dict.word2idx),
'summary': word2idx(self.train_summurys, self.dict.word2idx),
'label': self.train_labels
},
'test': {
'original': word2idx(self.test_origins, self.dict.word2idx),
'summary': word2idx(self.test_summurys, self.dict.word2idx),
'label': self.test_labels
}
}
middle_save(data, "data/corpus")
print('dict length - [{}]'.format(len(self.dict)))
if __name__ == "__main__":
Corpus()
| true
| true
|
790a697f21c28ae85d911f2e52d29c25f778fcaf
| 29,682
|
py
|
Python
|
utils/general.py
|
vivekkhurana/handsign
|
315e40e2d7b00a7e34cad870e6f90679e7bf7100
|
[
"MIT"
] | 1
|
2020-01-12T18:14:07.000Z
|
2020-01-12T18:14:07.000Z
|
utils/general.py
|
vivekkhurana/handsign
|
315e40e2d7b00a7e34cad870e6f90679e7bf7100
|
[
"MIT"
] | null | null | null |
utils/general.py
|
vivekkhurana/handsign
|
315e40e2d7b00a7e34cad870e6f90679e7bf7100
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# ColorHandPose3DNetwork - Network for estimating 3D Hand Pose from a single RGB Image
# Copyright (C) 2017 Christian Zimmermann
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, unicode_literals
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
import numpy as np
import math
import cv2
class NetworkOps(object):
""" Operations that are frequently used within networks. """
neg_slope_of_relu = 0.01
@classmethod
def leaky_relu(cls, tensor, name='relu'):
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name=name)
return out_tensor
@classmethod
def conv(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, in_size[3], out_chan]
# conv
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer_conv2d(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv2d(in_tensor, kernel, strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[3]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
tensor = cls.conv(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@classmethod
def max_pool(cls, bottom, name='pool'):
pooled = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID', name=name)
return pooled
@classmethod
def upconv(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
kernel_shape = [kernel_size, kernel_size, in_size[3], in_size[3]]
strides = [1, stride, stride, 1]
# conv
kernel = cls.get_deconv_filter(kernel_shape, trainable)
tmp_result = tf.nn.conv2d_transpose(value=in_tensor, filter=kernel, output_shape=output_shape,
strides=strides, padding='SAME')
# bias
biases = tf.get_variable('biases', [kernel_shape[2]], tf.float32,
tf.constant_initializer(0.0), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases)
return out_tensor
@classmethod
def upconv_relu(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
tensor = cls.upconv(in_tensor, layer_name, output_shape, kernel_size, stride, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@staticmethod
def get_deconv_filter(f_shape, trainable):
width = f_shape[0]
height = f_shape[1]
f = math.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(height):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="weights", initializer=init,
shape=weights.shape, trainable=trainable, collections=['wd', 'variables', 'filters'])
@staticmethod
def fully_connected(in_tensor, layer_name, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
assert len(in_size) == 2, 'Input to a fully connected layer must be a vector.'
weights_shape = [in_size[1], out_chan]
# weight matrix
weights = tf.get_variable('weights', weights_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable)
weights = tf.check_numerics(weights, 'weights: %s' % layer_name)
# bias
biases = tf.get_variable('biases', [out_chan], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable)
biases = tf.check_numerics(biases, 'biases: %s' % layer_name)
out_tensor = tf.matmul(in_tensor, weights) + biases
return out_tensor
@classmethod
def fully_connected_relu(cls, in_tensor, layer_name, out_chan, trainable=True):
tensor = cls.fully_connected(in_tensor, layer_name, out_chan, trainable)
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name='out')
return out_tensor
@staticmethod
def dropout(in_tensor, keep_prob, evaluation):
""" Dropout: Each neuron is dropped independently. """
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=tensor_shape))
return out_tensor
@staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
""" Spatial dropout: Not each neuron is dropped independently, but feature map wise. """
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]]))
return out_tensor
def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
"""
Crops an image. When factor is not given does an central crop.
Inputs:
image: 4D tensor, [batch, height, width, channels] which will be cropped in height and width dimension
crop_location: tensor, [batch, 2] which represent the height and width location of the crop
crop_size: int, describes the extension of the crop
Outputs:
image_crop: 4D tensor, [batch, crop_size, crop_size, channels]
"""
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
assert len(s) == 4, "Image needs to be of shape [batch, width, height, channel]"
scale = tf.reshape(scale, [-1])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = crop_size / scale
y1 = crop_location[:, 0] - crop_size_scaled//2
y2 = y1 + crop_size_scaled
x1 = crop_location[:, 1] - crop_size_scaled//2
x2 = x1 + crop_size_scaled
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], -1)
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c
def find_max_location(scoremap):
""" Returns the coordinates of the given scoremap with maximum value. """
with tf.variable_scope('find_max_location'):
s = scoremap.get_shape().as_list()
if len(s) == 4:
scoremap = tf.squeeze(scoremap, [3])
if len(s) == 2:
scoremap = tf.expand_dims(scoremap, 0)
s = scoremap.get_shape().as_list()
assert len(s) == 3, "Scoremap must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "Scoremap must be [Batch, Width, Height]"
# my meshgrid
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
x_vec = tf.reshape(X, [-1])
y_vec = tf.reshape(Y, [-1])
scoremap_vec = tf.reshape(scoremap, [s[0], -1])
max_ind_vec = tf.cast(tf.argmax(scoremap_vec, dimension=1), tf.int32)
xy_loc = list()
for i in range(s[0]):
x_loc = tf.reshape(x_vec[max_ind_vec[i]], [1])
y_loc = tf.reshape(y_vec[max_ind_vec[i]], [1])
xy_loc.append(tf.concat([x_loc, y_loc], 0))
xy_loc = tf.stack(xy_loc, 0)
return xy_loc
def single_obj_scoremap(scoremap):
""" Applies my algorithm to figure out the most likely object from a given segmentation scoremap. """
with tf.variable_scope('single_obj_scoremap'):
filter_size = 21
s = scoremap.get_shape().as_list()
assert len(s) == 4, "Scoremap must be 4D."
scoremap_softmax = tf.nn.softmax(scoremap) #B, H, W, C --> normalizes across last dimension
scoremap_fg = tf.reduce_max(scoremap_softmax[:, :, :, 1:], 3) # B, H, W
detmap_fg = tf.round(scoremap_fg) # B, H, W
# find maximum in the fg scoremap
max_loc = find_max_location(scoremap_fg)
# use maximum to start "growing" our objectmap
objectmap_list = list()
kernel_dil = tf.ones((filter_size, filter_size, 1)) / float(filter_size*filter_size)
for i in range(s[0]):
# create initial objectmap (put a one at the maximum)
sparse_ind = tf.reshape(max_loc[i, :], [1, 2]) # reshape that its one point with 2dim)
objectmap = tf.sparse_to_dense(sparse_ind, [s[1], s[2]], 1.0)
# grow the map by dilation and pixelwise and
num_passes = max(s[1], s[2]) // (filter_size//2) # number of passes needes to make sure the map can spread over the whole image
for j in range(num_passes):
objectmap = tf.reshape(objectmap, [1, s[1], s[2], 1])
objectmap_dil = tf.nn.dilation2d(objectmap, kernel_dil, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME')
objectmap_dil = tf.reshape(objectmap_dil, [s[1], s[2]])
objectmap = tf.round(tf.multiply(detmap_fg[i, :, :], objectmap_dil))
objectmap = tf.reshape(objectmap, [s[1], s[2], 1])
objectmap_list.append(objectmap)
objectmap = tf.stack(objectmap_list)
return objectmap
def calc_center_bb(binary_class_mask):
""" Returns the center of mass coordinates for the given binary_class_mask. """
with tf.variable_scope('calc_center_bb'):
binary_class_mask = tf.cast(binary_class_mask, tf.int32)
binary_class_mask = tf.equal(binary_class_mask, 1)
s = binary_class_mask.get_shape().as_list()
if len(s) == 4:
binary_class_mask = tf.squeeze(binary_class_mask, [3])
s = binary_class_mask.get_shape().as_list()
assert len(s) == 3, "binary_class_mask must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "binary_class_mask must be [Batch, Width, Height]"
# my meshgrid
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
bb_list = list()
center_list = list()
crop_size_list = list()
for i in range(s[0]):
X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)
x_min = tf.reduce_min(X_masked)
x_max = tf.reduce_max(X_masked)
y_min = tf.reduce_min(Y_masked)
y_max = tf.reduce_max(Y_masked)
start = tf.stack([x_min, y_min])
end = tf.stack([x_max, y_max])
bb = tf.stack([start, end], 1)
bb_list.append(bb)
center_x = 0.5*(x_max + x_min)
center_y = 0.5*(y_max + y_min)
center = tf.stack([center_x, center_y], 0)
center = tf.cond(tf.reduce_all(tf.is_finite(center)), lambda: center,
lambda: tf.constant([160.0, 160.0]))
center.set_shape([2])
center_list.append(center)
crop_size_x = x_max - x_min
crop_size_y = y_max - y_min
crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), lambda: crop_size,
lambda: tf.constant([100.0]))
crop_size.set_shape([1])
crop_size_list.append(crop_size)
bb = tf.stack(bb_list)
center = tf.stack(center_list)
crop_size = tf.stack(crop_size_list)
return center, bb, crop_size
def detect_keypoints(scoremaps):
""" Performs detection per scoremap for the hands keypoints. """
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
keypoint_coords = np.zeros((s[2], 2))
for i in range(s[2]):
v, u = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_coords[i, 0] = v
keypoint_coords[i, 1] = u
return keypoint_coords
def trafo_coords(keypoints_crop_coords, centers, scale, crop_size):
""" Transforms coords into global image coordinates. """
keypoints_coords = np.copy(keypoints_crop_coords)
keypoints_coords -= crop_size // 2
keypoints_coords /= scale
keypoints_coords += centers
return keypoints_coords
def plot_hand(coords_hw, axis, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
# define connections and colors of the bones
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 1], coords[:, 0], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 1], coords[:, 0], color_fixed, linewidth=linewidth)
def plot_hand_3d(coords_xyz, axis, color_fixed=None, linewidth='1'):
""" Plots a hand stick figure into a matplotlib figure. """
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
# define connections and colors of the bones
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_xyz[connection[0], :]
coord2 = coords_xyz[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color_fixed, linewidth=linewidth)
axis.view_init(azim=-90., elev=90.)
def plot_hand_2d(coords_hw, image, color_fixed=None, linewidth=2):
""" Plots a hand stick figure into a matplotlib figure. """
colors = [(0, 0, 127),
(0, 0, 187),
(0, 0, 246),
(0, 32, 255),
(0, 85, 255),
(0, 140, 255),
(0, 192, 255),
(15, 248, 231),
(57, 255, 190),
(102, 1, 144),
(144, 1, 102),
(190, 1, 57),
(231, 1, 15),
(1, 211, 0),
(1, 163, 0),
(1, 111, 0),
(1, 63, 0),
(246, 11, 0),
(187, 0, 0),
(127, 0, 0)]
# define connections and colors of the bones
bones = [((0, 4), colors[0]),
((4, 3), colors[1]),
((3, 2), colors[2]),
((2, 1), colors[3]),
((0, 8), colors[4]),
((8, 7), colors[5]),
((7, 6), colors[6]),
((6, 5), colors[7]),
((0, 12), colors[8]),
((12, 11), colors[9]),
((11, 10), colors[10]),
((10, 9), colors[11]),
((0, 16), colors[12]),
((16, 15), colors[13]),
((15, 14), colors[14]),
((14, 13), colors[15]),
((0, 20), colors[16]),
((20, 19), colors[17]),
((19, 18), colors[18]),
((18, 17), colors[19])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
coord1_t = (int(coord1[1]), int(coord1[0]))
coord2_t = (int(coord2[1]), int(coord2[0]))
if color_fixed is None:
cv2.line(image, coord2_t, coord1_t, color, linewidth)
else:
cv2.line(image, coord1_t, coord2_t, color_fixed, linewidth)
class LearningRateScheduler:
"""
Provides scalar tensors at certain iteration as is needed for a multistep learning rate schedule.
"""
def __init__(self, steps, values):
self.steps = steps
self.values = values
assert len(steps)+1 == len(values), "There must be one more element in value as step."
def get_lr(self, global_step):
with tf.name_scope('lr_scheduler'):
if len(self.values) == 1: #1 value -> no step
learning_rate = tf.constant(self.values[0])
elif len(self.values) == 2: #2 values -> one step
cond = tf.greater(global_step, self.steps[0])
learning_rate = tf.where(cond, self.values[1], self.values[0])
else: # n values -> n-1 steps
cond_first = tf.less(global_step, self.steps[0])
cond_between = list()
for ind, step in enumerate(range(0, len(self.steps)-1)):
cond_between.append(tf.logical_and(tf.less(global_step, self.steps[ind+1]),
tf.greater_equal(global_step, self.steps[ind])))
cond_last = tf.greater_equal(global_step, self.steps[-1])
cond_full = [cond_first]
cond_full.extend(cond_between)
cond_full.append(cond_last)
cond_vec = tf.stack(cond_full)
lr_vec = tf.stack(self.values)
learning_rate = tf.where(cond_vec, lr_vec, tf.zeros_like(lr_vec))
learning_rate = tf.reduce_sum(learning_rate)
return learning_rate
class EvalUtil:
""" Util class for evaluation networks.
"""
def __init__(self, num_kp=21):
# init empty data storage
self.data = list()
self.num_kp = num_kp
for _ in range(num_kp):
self.data.append(list())
def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
""" Used to feed data to the class. Stores the euclidean distance between gt and pred, when it is visible. """
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert len(keypoint_gt.shape) == 2
assert len(keypoint_pred.shape) == 2
assert len(keypoint_vis.shape) == 1
# calc euclidean distance
diff = keypoint_gt - keypoint_pred
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i])
def _get_pck(self, kp_id, threshold):
""" Returns pck for one keypoint for the given threshold. """
if len(self.data[kp_id]) == 0:
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck
def _get_epe(self, kp_id):
""" Returns end point error for one keypoint. """
if len(self.data[kp_id]) == 0:
return None, None
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return epe_mean, epe_median
def get_measures(self, val_min, val_max, steps):
""" Outputs the average mean and median error as well as the pck score. """
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
# init mean measures
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
# Create one plot for each part
for part_id in range(self.num_kp):
# mean/median error
mean, median = self._get_epe(part_id)
if mean is None:
# there was no valid measurement for this keypoint
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
# pck/auc
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0) # mean only over keypoints
return epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds
def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
""" Loads weights from a snapshot except the ones indicated with discard_list. Others are possibly renamed. """
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
# Remove everything from the discard list
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
# rename everything according to rename_dict
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))
def calc_auc(x, y):
""" Given x and y values it calculates the approx. integral and normalizes it: area under curve"""
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return integral / norm
def get_stb_ref_curves():
"""
Returns results of various baseline methods on the Stereo Tracking Benchmark Dataset reported by:
Zhang et al., ‘3d Hand Pose Tracking and Estimation Using Stereo Matching’, 2016
"""
curve_list = list()
thresh_mm = np.array([20.0, 25, 30, 35, 40, 45, 50])
pso_b1 = np.array([0.32236842, 0.53947368, 0.67434211, 0.75657895, 0.80921053, 0.86513158, 0.89473684])
curve_list.append((thresh_mm, pso_b1, 'PSO (AUC=%.3f)' % calc_auc(thresh_mm, pso_b1)))
icppso_b1 = np.array([ 0.51973684, 0.64473684, 0.71710526, 0.77302632, 0.80921053, 0.84868421, 0.86842105])
curve_list.append((thresh_mm, icppso_b1, 'ICPPSO (AUC=%.3f)' % calc_auc(thresh_mm, icppso_b1)))
chpr_b1 = np.array([ 0.56578947, 0.71710526, 0.82236842, 0.88157895, 0.91447368, 0.9375, 0.96052632])
curve_list.append((thresh_mm, chpr_b1, 'CHPR (AUC=%.3f)' % calc_auc(thresh_mm, chpr_b1)))
return curve_list
| 40.05668
| 148
| 0.552018
|
from __future__ import print_function, unicode_literals
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
import numpy as np
import math
import cv2
class NetworkOps(object):
neg_slope_of_relu = 0.01
@classmethod
def leaky_relu(cls, tensor, name='relu'):
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name=name)
return out_tensor
@classmethod
def conv(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
strides = [1, stride, stride, 1]
kernel_shape = [kernel_size, kernel_size, in_size[3], out_chan]
kernel = tf.get_variable('weights', kernel_shape, tf.float32,
tf.contrib.layers.xavier_initializer_conv2d(), trainable=trainable, collections=['wd', 'variables', 'filters'])
tmp_result = tf.nn.conv2d(in_tensor, kernel, strides, padding='SAME')
biases = tf.get_variable('biases', [kernel_shape[3]], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases, name='out')
return out_tensor
@classmethod
def conv_relu(cls, in_tensor, layer_name, kernel_size, stride, out_chan, trainable=True):
tensor = cls.conv(in_tensor, layer_name, kernel_size, stride, out_chan, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@classmethod
def max_pool(cls, bottom, name='pool'):
pooled = tf.nn.max_pool(bottom, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID', name=name)
return pooled
@classmethod
def upconv(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
kernel_shape = [kernel_size, kernel_size, in_size[3], in_size[3]]
strides = [1, stride, stride, 1]
kernel = cls.get_deconv_filter(kernel_shape, trainable)
tmp_result = tf.nn.conv2d_transpose(value=in_tensor, filter=kernel, output_shape=output_shape,
strides=strides, padding='SAME')
biases = tf.get_variable('biases', [kernel_shape[2]], tf.float32,
tf.constant_initializer(0.0), trainable=trainable, collections=['wd', 'variables', 'biases'])
out_tensor = tf.nn.bias_add(tmp_result, biases)
return out_tensor
@classmethod
def upconv_relu(cls, in_tensor, layer_name, output_shape, kernel_size, stride, trainable=True):
tensor = cls.upconv(in_tensor, layer_name, output_shape, kernel_size, stride, trainable)
out_tensor = cls.leaky_relu(tensor, name='out')
return out_tensor
@staticmethod
def get_deconv_filter(f_shape, trainable):
width = f_shape[0]
height = f_shape[1]
f = math.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([f_shape[0], f_shape[1]])
for x in range(width):
for y in range(height):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(f_shape)
for i in range(f_shape[2]):
weights[:, :, i, i] = bilinear
init = tf.constant_initializer(value=weights,
dtype=tf.float32)
return tf.get_variable(name="weights", initializer=init,
shape=weights.shape, trainable=trainable, collections=['wd', 'variables', 'filters'])
@staticmethod
def fully_connected(in_tensor, layer_name, out_chan, trainable=True):
with tf.variable_scope(layer_name):
in_size = in_tensor.get_shape().as_list()
assert len(in_size) == 2, 'Input to a fully connected layer must be a vector.'
weights_shape = [in_size[1], out_chan]
weights = tf.get_variable('weights', weights_shape, tf.float32,
tf.contrib.layers.xavier_initializer(), trainable=trainable)
weights = tf.check_numerics(weights, 'weights: %s' % layer_name)
biases = tf.get_variable('biases', [out_chan], tf.float32,
tf.constant_initializer(0.0001), trainable=trainable)
biases = tf.check_numerics(biases, 'biases: %s' % layer_name)
out_tensor = tf.matmul(in_tensor, weights) + biases
return out_tensor
@classmethod
def fully_connected_relu(cls, in_tensor, layer_name, out_chan, trainable=True):
tensor = cls.fully_connected(in_tensor, layer_name, out_chan, trainable)
out_tensor = tf.maximum(tensor, cls.neg_slope_of_relu*tensor, name='out')
return out_tensor
@staticmethod
def dropout(in_tensor, keep_prob, evaluation):
with tf.variable_scope('dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=tensor_shape))
return out_tensor
@staticmethod
def spatial_dropout(in_tensor, keep_prob, evaluation):
with tf.variable_scope('spatial_dropout'):
tensor_shape = in_tensor.get_shape().as_list()
out_tensor = tf.cond(evaluation,
lambda: tf.nn.dropout(in_tensor, 1.0,
noise_shape=tensor_shape),
lambda: tf.nn.dropout(in_tensor, keep_prob,
noise_shape=[tensor_shape[0], 1, 1, tensor_shape[3]]))
return out_tensor
def crop_image_from_xy(image, crop_location, crop_size, scale=1.0):
with tf.name_scope('crop_image_from_xy'):
s = image.get_shape().as_list()
assert len(s) == 4, "Image needs to be of shape [batch, width, height, channel]"
scale = tf.reshape(scale, [-1])
crop_location = tf.cast(crop_location, tf.float32)
crop_location = tf.reshape(crop_location, [s[0], 2])
crop_size = tf.cast(crop_size, tf.float32)
crop_size_scaled = crop_size / scale
y1 = crop_location[:, 0] - crop_size_scaled//2
y2 = y1 + crop_size_scaled
x1 = crop_location[:, 1] - crop_size_scaled//2
x2 = x1 + crop_size_scaled
y1 /= s[1]
y2 /= s[1]
x1 /= s[2]
x2 /= s[2]
boxes = tf.stack([y1, x1, y2, x2], -1)
crop_size = tf.cast(tf.stack([crop_size, crop_size]), tf.int32)
box_ind = tf.range(s[0])
image_c = tf.image.crop_and_resize(tf.cast(image, tf.float32), boxes, box_ind, crop_size, name='crop')
return image_c
def find_max_location(scoremap):
with tf.variable_scope('find_max_location'):
s = scoremap.get_shape().as_list()
if len(s) == 4:
scoremap = tf.squeeze(scoremap, [3])
if len(s) == 2:
scoremap = tf.expand_dims(scoremap, 0)
s = scoremap.get_shape().as_list()
assert len(s) == 3, "Scoremap must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "Scoremap must be [Batch, Width, Height]"
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
x_vec = tf.reshape(X, [-1])
y_vec = tf.reshape(Y, [-1])
scoremap_vec = tf.reshape(scoremap, [s[0], -1])
max_ind_vec = tf.cast(tf.argmax(scoremap_vec, dimension=1), tf.int32)
xy_loc = list()
for i in range(s[0]):
x_loc = tf.reshape(x_vec[max_ind_vec[i]], [1])
y_loc = tf.reshape(y_vec[max_ind_vec[i]], [1])
xy_loc.append(tf.concat([x_loc, y_loc], 0))
xy_loc = tf.stack(xy_loc, 0)
return xy_loc
def single_obj_scoremap(scoremap):
with tf.variable_scope('single_obj_scoremap'):
filter_size = 21
s = scoremap.get_shape().as_list()
assert len(s) == 4, "Scoremap must be 4D."
scoremap_softmax = tf.nn.softmax(scoremap)
scoremap_fg = tf.reduce_max(scoremap_softmax[:, :, :, 1:], 3)
detmap_fg = tf.round(scoremap_fg)
max_loc = find_max_location(scoremap_fg)
objectmap_list = list()
kernel_dil = tf.ones((filter_size, filter_size, 1)) / float(filter_size*filter_size)
for i in range(s[0]):
sparse_ind = tf.reshape(max_loc[i, :], [1, 2])
objectmap = tf.sparse_to_dense(sparse_ind, [s[1], s[2]], 1.0)
num_passes = max(s[1], s[2]) // (filter_size//2)
for j in range(num_passes):
objectmap = tf.reshape(objectmap, [1, s[1], s[2], 1])
objectmap_dil = tf.nn.dilation2d(objectmap, kernel_dil, [1, 1, 1, 1], [1, 1, 1, 1], 'SAME')
objectmap_dil = tf.reshape(objectmap_dil, [s[1], s[2]])
objectmap = tf.round(tf.multiply(detmap_fg[i, :, :], objectmap_dil))
objectmap = tf.reshape(objectmap, [s[1], s[2], 1])
objectmap_list.append(objectmap)
objectmap = tf.stack(objectmap_list)
return objectmap
def calc_center_bb(binary_class_mask):
with tf.variable_scope('calc_center_bb'):
binary_class_mask = tf.cast(binary_class_mask, tf.int32)
binary_class_mask = tf.equal(binary_class_mask, 1)
s = binary_class_mask.get_shape().as_list()
if len(s) == 4:
binary_class_mask = tf.squeeze(binary_class_mask, [3])
s = binary_class_mask.get_shape().as_list()
assert len(s) == 3, "binary_class_mask must be 3D."
assert (s[0] < s[1]) and (s[0] < s[2]), "binary_class_mask must be [Batch, Width, Height]"
x_range = tf.expand_dims(tf.range(s[1]), 1)
y_range = tf.expand_dims(tf.range(s[2]), 0)
X = tf.tile(x_range, [1, s[2]])
Y = tf.tile(y_range, [s[1], 1])
bb_list = list()
center_list = list()
crop_size_list = list()
for i in range(s[0]):
X_masked = tf.cast(tf.boolean_mask(X, binary_class_mask[i, :, :]), tf.float32)
Y_masked = tf.cast(tf.boolean_mask(Y, binary_class_mask[i, :, :]), tf.float32)
x_min = tf.reduce_min(X_masked)
x_max = tf.reduce_max(X_masked)
y_min = tf.reduce_min(Y_masked)
y_max = tf.reduce_max(Y_masked)
start = tf.stack([x_min, y_min])
end = tf.stack([x_max, y_max])
bb = tf.stack([start, end], 1)
bb_list.append(bb)
center_x = 0.5*(x_max + x_min)
center_y = 0.5*(y_max + y_min)
center = tf.stack([center_x, center_y], 0)
center = tf.cond(tf.reduce_all(tf.is_finite(center)), lambda: center,
lambda: tf.constant([160.0, 160.0]))
center.set_shape([2])
center_list.append(center)
crop_size_x = x_max - x_min
crop_size_y = y_max - y_min
crop_size = tf.expand_dims(tf.maximum(crop_size_x, crop_size_y), 0)
crop_size = tf.cond(tf.reduce_all(tf.is_finite(crop_size)), lambda: crop_size,
lambda: tf.constant([100.0]))
crop_size.set_shape([1])
crop_size_list.append(crop_size)
bb = tf.stack(bb_list)
center = tf.stack(center_list)
crop_size = tf.stack(crop_size_list)
return center, bb, crop_size
def detect_keypoints(scoremaps):
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
keypoint_coords = np.zeros((s[2], 2))
for i in range(s[2]):
v, u = np.unravel_index(np.argmax(scoremaps[:, :, i]), (s[0], s[1]))
keypoint_coords[i, 0] = v
keypoint_coords[i, 1] = u
return keypoint_coords
def trafo_coords(keypoints_crop_coords, centers, scale, crop_size):
keypoints_coords = np.copy(keypoints_crop_coords)
keypoints_coords -= crop_size // 2
keypoints_coords /= scale
keypoints_coords += centers
return keypoints_coords
def plot_hand(coords_hw, axis, color_fixed=None, linewidth='1'):
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 1], coords[:, 0], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 1], coords[:, 0], color_fixed, linewidth=linewidth)
def plot_hand_3d(coords_xyz, axis, color_fixed=None, linewidth='1'):
colors = np.array([[0., 0., 0.5],
[0., 0., 0.73172906],
[0., 0., 0.96345811],
[0., 0.12745098, 1.],
[0., 0.33137255, 1.],
[0., 0.55098039, 1.],
[0., 0.75490196, 1.],
[0.06008855, 0.9745098, 0.90765338],
[0.22454143, 1., 0.74320051],
[0.40164453, 1., 0.56609741],
[0.56609741, 1., 0.40164453],
[0.74320051, 1., 0.22454143],
[0.90765338, 1., 0.06008855],
[1., 0.82861293, 0.],
[1., 0.63979666, 0.],
[1., 0.43645606, 0.],
[1., 0.2476398, 0.],
[0.96345811, 0.0442992, 0.],
[0.73172906, 0., 0.],
[0.5, 0., 0.]])
bones = [((0, 4), colors[0, :]),
((4, 3), colors[1, :]),
((3, 2), colors[2, :]),
((2, 1), colors[3, :]),
((0, 8), colors[4, :]),
((8, 7), colors[5, :]),
((7, 6), colors[6, :]),
((6, 5), colors[7, :]),
((0, 12), colors[8, :]),
((12, 11), colors[9, :]),
((11, 10), colors[10, :]),
((10, 9), colors[11, :]),
((0, 16), colors[12, :]),
((16, 15), colors[13, :]),
((15, 14), colors[14, :]),
((14, 13), colors[15, :]),
((0, 20), colors[16, :]),
((20, 19), colors[17, :]),
((19, 18), colors[18, :]),
((18, 17), colors[19, :])]
for connection, color in bones:
coord1 = coords_xyz[connection[0], :]
coord2 = coords_xyz[connection[1], :]
coords = np.stack([coord1, coord2])
if color_fixed is None:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color=color, linewidth=linewidth)
else:
axis.plot(coords[:, 0], coords[:, 1], coords[:, 2], color_fixed, linewidth=linewidth)
axis.view_init(azim=-90., elev=90.)
def plot_hand_2d(coords_hw, image, color_fixed=None, linewidth=2):
colors = [(0, 0, 127),
(0, 0, 187),
(0, 0, 246),
(0, 32, 255),
(0, 85, 255),
(0, 140, 255),
(0, 192, 255),
(15, 248, 231),
(57, 255, 190),
(102, 1, 144),
(144, 1, 102),
(190, 1, 57),
(231, 1, 15),
(1, 211, 0),
(1, 163, 0),
(1, 111, 0),
(1, 63, 0),
(246, 11, 0),
(187, 0, 0),
(127, 0, 0)]
bones = [((0, 4), colors[0]),
((4, 3), colors[1]),
((3, 2), colors[2]),
((2, 1), colors[3]),
((0, 8), colors[4]),
((8, 7), colors[5]),
((7, 6), colors[6]),
((6, 5), colors[7]),
((0, 12), colors[8]),
((12, 11), colors[9]),
((11, 10), colors[10]),
((10, 9), colors[11]),
((0, 16), colors[12]),
((16, 15), colors[13]),
((15, 14), colors[14]),
((14, 13), colors[15]),
((0, 20), colors[16]),
((20, 19), colors[17]),
((19, 18), colors[18]),
((18, 17), colors[19])]
for connection, color in bones:
coord1 = coords_hw[connection[0], :]
coord2 = coords_hw[connection[1], :]
coords = np.stack([coord1, coord2])
coord1_t = (int(coord1[1]), int(coord1[0]))
coord2_t = (int(coord2[1]), int(coord2[0]))
if color_fixed is None:
cv2.line(image, coord2_t, coord1_t, color, linewidth)
else:
cv2.line(image, coord1_t, coord2_t, color_fixed, linewidth)
class LearningRateScheduler:
def __init__(self, steps, values):
self.steps = steps
self.values = values
assert len(steps)+1 == len(values), "There must be one more element in value as step."
def get_lr(self, global_step):
with tf.name_scope('lr_scheduler'):
if len(self.values) == 1:
learning_rate = tf.constant(self.values[0])
elif len(self.values) == 2:
cond = tf.greater(global_step, self.steps[0])
learning_rate = tf.where(cond, self.values[1], self.values[0])
else:
cond_first = tf.less(global_step, self.steps[0])
cond_between = list()
for ind, step in enumerate(range(0, len(self.steps)-1)):
cond_between.append(tf.logical_and(tf.less(global_step, self.steps[ind+1]),
tf.greater_equal(global_step, self.steps[ind])))
cond_last = tf.greater_equal(global_step, self.steps[-1])
cond_full = [cond_first]
cond_full.extend(cond_between)
cond_full.append(cond_last)
cond_vec = tf.stack(cond_full)
lr_vec = tf.stack(self.values)
learning_rate = tf.where(cond_vec, lr_vec, tf.zeros_like(lr_vec))
learning_rate = tf.reduce_sum(learning_rate)
return learning_rate
class EvalUtil:
def __init__(self, num_kp=21):
self.data = list()
self.num_kp = num_kp
for _ in range(num_kp):
self.data.append(list())
def feed(self, keypoint_gt, keypoint_vis, keypoint_pred):
keypoint_gt = np.squeeze(keypoint_gt)
keypoint_pred = np.squeeze(keypoint_pred)
keypoint_vis = np.squeeze(keypoint_vis).astype('bool')
assert len(keypoint_gt.shape) == 2
assert len(keypoint_pred.shape) == 2
assert len(keypoint_vis.shape) == 1
diff = keypoint_gt - keypoint_pred
euclidean_dist = np.sqrt(np.sum(np.square(diff), axis=1))
num_kp = keypoint_gt.shape[0]
for i in range(num_kp):
if keypoint_vis[i]:
self.data[i].append(euclidean_dist[i])
def _get_pck(self, kp_id, threshold):
if len(self.data[kp_id]) == 0:
return None
data = np.array(self.data[kp_id])
pck = np.mean((data <= threshold).astype('float'))
return pck
def _get_epe(self, kp_id):
if len(self.data[kp_id]) == 0:
return None, None
data = np.array(self.data[kp_id])
epe_mean = np.mean(data)
epe_median = np.median(data)
return epe_mean, epe_median
def get_measures(self, val_min, val_max, steps):
thresholds = np.linspace(val_min, val_max, steps)
thresholds = np.array(thresholds)
norm_factor = np.trapz(np.ones_like(thresholds), thresholds)
epe_mean_all = list()
epe_median_all = list()
auc_all = list()
pck_curve_all = list()
for part_id in range(self.num_kp):
mean, median = self._get_epe(part_id)
if mean is None:
continue
epe_mean_all.append(mean)
epe_median_all.append(median)
pck_curve = list()
for t in thresholds:
pck = self._get_pck(part_id, t)
pck_curve.append(pck)
pck_curve = np.array(pck_curve)
pck_curve_all.append(pck_curve)
auc = np.trapz(pck_curve, thresholds)
auc /= norm_factor
auc_all.append(auc)
epe_mean_all = np.mean(np.array(epe_mean_all))
epe_median_all = np.mean(np.array(epe_median_all))
auc_all = np.mean(np.array(auc_all))
pck_curve_all = np.mean(np.array(pck_curve_all), 0)
return epe_mean_all, epe_median_all, auc_all, pck_curve_all, thresholds
def load_weights_from_snapshot(session, checkpoint_path, discard_list=None, rename_dict=None):
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
if discard_list is not None:
num_disc = 0
var_to_shape_map_new = dict()
for k, v in var_to_shape_map.items():
good = True
for dis_str in discard_list:
if dis_str in k:
good = False
if good:
var_to_shape_map_new[k] = v
else:
num_disc += 1
var_to_shape_map = dict(var_to_shape_map_new)
print('Discarded %d items' % num_disc)
num_rename = 0
var_to_shape_map_new = dict()
for name in var_to_shape_map.keys():
new_name = name
if rename_dict is not None:
for rename_str in rename_dict.keys():
if rename_str in name:
new_name = new_name.replace(rename_str, rename_dict[rename_str])
num_rename += 1
var_to_shape_map_new[new_name] = reader.get_tensor(name)
var_to_shape_map = dict(var_to_shape_map_new)
init_op, init_feed = tf.contrib.framework.assign_from_values(var_to_shape_map)
session.run(init_op, init_feed)
print('Initialized %d variables from %s.' % (len(var_to_shape_map), checkpoint_path))
def calc_auc(x, y):
integral = np.trapz(y, x)
norm = np.trapz(np.ones_like(y), x)
return integral / norm
def get_stb_ref_curves():
curve_list = list()
thresh_mm = np.array([20.0, 25, 30, 35, 40, 45, 50])
pso_b1 = np.array([0.32236842, 0.53947368, 0.67434211, 0.75657895, 0.80921053, 0.86513158, 0.89473684])
curve_list.append((thresh_mm, pso_b1, 'PSO (AUC=%.3f)' % calc_auc(thresh_mm, pso_b1)))
icppso_b1 = np.array([ 0.51973684, 0.64473684, 0.71710526, 0.77302632, 0.80921053, 0.84868421, 0.86842105])
curve_list.append((thresh_mm, icppso_b1, 'ICPPSO (AUC=%.3f)' % calc_auc(thresh_mm, icppso_b1)))
chpr_b1 = np.array([ 0.56578947, 0.71710526, 0.82236842, 0.88157895, 0.91447368, 0.9375, 0.96052632])
curve_list.append((thresh_mm, chpr_b1, 'CHPR (AUC=%.3f)' % calc_auc(thresh_mm, chpr_b1)))
return curve_list
| true
| true
|
790a6aedd03c48f724e76f642cd7f5067c735804
| 805
|
py
|
Python
|
notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/LeetCode/MinStack.py
|
side-projects-42/INTERVIEW-PREP-COMPLETE
|
627a3315cee4bbc38a0e81c256f27f928eac2d63
|
[
"MIT"
] | 13
|
2021-03-11T00:25:22.000Z
|
2022-03-19T00:19:23.000Z
|
notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/LeetCode/MinStack.py
|
side-projects-42/INTERVIEW-PREP-COMPLETE
|
627a3315cee4bbc38a0e81c256f27f928eac2d63
|
[
"MIT"
] | 160
|
2021-04-26T19:04:15.000Z
|
2022-03-26T20:18:37.000Z
|
notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/LeetCode/MinStack.py
|
side-projects-42/INTERVIEW-PREP-COMPLETE
|
627a3315cee4bbc38a0e81c256f27f928eac2d63
|
[
"MIT"
] | 12
|
2021-04-26T19:43:01.000Z
|
2022-01-31T08:36:29.000Z
|
import math
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.min = math.inf
def push(self, x: int) -> None:
self.x = x
self.stack.append(x)
if x < self.min:
self.min = x
def pop(self) -> None:
t = self.stack.pop()
if t == self.min and len(self.stack):
self.min = min(self.stack)
elif t == self.min and not len(self.stack):
self.min = math.inf
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.min
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| 21.184211
| 63
| 0.532919
|
import math
class MinStack:
def __init__(self):
self.stack = []
self.min = math.inf
def push(self, x: int) -> None:
self.x = x
self.stack.append(x)
if x < self.min:
self.min = x
def pop(self) -> None:
t = self.stack.pop()
if t == self.min and len(self.stack):
self.min = min(self.stack)
elif t == self.min and not len(self.stack):
self.min = math.inf
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.min
| true
| true
|
790a6c616cbc8e212eb9b9128c574dddf371cb52
| 4,955
|
py
|
Python
|
postcipes/bfs.py
|
Mopolino8/postcipes
|
5d67b383aa3e314b581b5262ba95f734ecb6369f
|
[
"MIT"
] | null | null | null |
postcipes/bfs.py
|
Mopolino8/postcipes
|
5d67b383aa3e314b581b5262ba95f734ecb6369f
|
[
"MIT"
] | null | null | null |
postcipes/bfs.py
|
Mopolino8/postcipes
|
5d67b383aa3e314b581b5262ba95f734ecb6369f
|
[
"MIT"
] | 1
|
2019-11-24T17:11:16.000Z
|
2019-11-24T17:11:16.000Z
|
# This file is part of postcipes
# (c) Timofey Mukha
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
import numpy as np
import h5py
__all__ = ["BackwardFacingStep"]
class BackwardFacingStep(Postcipe):
def __init__(self, path, nu, uRef):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.nu = nu
self.uRef = uRef
self.h = np.sum(tbl.edge_lengths(self.case, "stepB"))
self.H = np.sum(tbl.edge_lengths(self.case, "outletB")) - self.h
self.eRatio = (self.H + self.h)/self.H
self.tau1 = \
self.case.boundary_data("lowB1")[1]["wallShearStressMean"][:, 0]
self.tau2 = \
self.case.boundary_data("lowB2")[1]["wallShearStressMean"][:, 0]
self.tau = np.append(self.tau1, self.tau2)
self.x1 = self.case.boundary_data("lowB1")[0][:, 0]
self.x2 = self.case.boundary_data("lowB2")[0][:, 0]
self.x = np.append(self.x1, self.x2)
self.idx105h = np.argmin(np.abs(self.x1 + 1.05*self.h))
self.uTop = self.case.boundary_data("upB")[1]['UMean'][:, 0]
self.theta = None
self.delta99 = None
self.edgeU = None
def compute_delta99(self, u0='max', interpolate=True):
self.delta99 = np.zeros(self.x1.shape[0])
self.edgeU = np.zeros(self.x1.shape[0])
for i in range(self.x1.shape[0]):
x = self.x1[i]
y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),
correctDistance=True)
self.delta99[i] = tbl.delta_99(y, v['UMean'][:, 0], u0=u0,
interpolate=interpolate)
if u0 is 'max':
self.edgeU[i] = np.max(v['UMean'][:, 0])
elif u0 is 'last':
self.edgeU[i] = v['UMean'][-1, 0]
self.reDelta99 = self.delta99*self.edgeU/self.nu
self.reTau = self.delta99*np.sqrt(np.abs(self.tau1))/self.nu
self.delta99105h = self.delta99[self.idx105h]
return 0
def compute_theta(self, u0='max', interpolate=True):
self.theta = np.zeros(self.x1.shape[0])
self.edgeU = np.zeros(self.x1.shape[0])
for i in range(self.x1.shape[0]):
x = self.x1[i]
y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),
correctDistance=True)
self.theta[i] = tbl.momentum_thickness(y, v['UMean'][:, 0], u0=u0,
interpolate=interpolate)
if u0 is 'max':
self.edgeU[i] = np.max(v['UMean'][:, 0])
elif u0 is 'last':
self.edgeU[i] = v['UMean'][-1, 0]
self.reTheta = self.theta*self.edgeU/self.nu
self.reTheta105h = self.reTheta[self.idx105h]
return 0
def save(self, name):
f = h5py.File(name, 'w')
f.attrs["h"] = self.h
f.attrs["H"] = self.H
f.attrs["nu"] = self.nu
f.attrs["eRatio"] = self.eRatio
f.attrs["uRef"] = self.uRef
f.attrs["idx105h"] = self.idx105h
f.create_dataset("x1", data=self.x1)
f.create_dataset("x2", data=self.x2)
f.create_dataset("x", data=self.x)
f.create_dataset("uTop", data=self.uTop)
f.create_dataset("tau1", data=self.tau1)
f.create_dataset("tau2", data=self.tau2)
f.create_dataset("tau", data=self.tau)
if self.theta is None:
self.compute_theta()
if self.delta99 is None:
self.compute_delta99()
f.create_dataset("theta", data=self.theta)
f.create_dataset("delta99", data=self.delta99)
f.create_dataset("reTheta", data=self.reTheta)
f.create_dataset("reTau", data=self.reTau)
f.create_dataset("reDelta99", data=self.reDelta99)
f.close()
def load(self, name):
f = h5py.File(name, 'r')
self.h = f.attrs["h"]
self.H = f.attrs["H"]
self.nu = f.attrs["nu"]
self.eRatio = f.attrs["eRatio"]
self.uRef = f.attrs["uRef"]
self.idx105h = f.attrs["idx105h"]
self.x1 = f["x1"][:]
self.x2 = f["x2"][:]
self.x = f["x"][:]
self.uTop = f["uTop"][:]
self.tau1 = f["tau1"][:]
self.tau2 = f["tau2"][:]
self.tau = f["tau"][:]
self.theta = f["theta"][:]
self.delta99 = f["delta99"][:]
self.reTheta = f["reTheta"][:]
self.reTau = f["reTau"][:]
self.reDelta99 = f["reDelta99"][:]
f.close()
| 33.707483
| 79
| 0.52775
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
import numpy as np
import h5py
__all__ = ["BackwardFacingStep"]
class BackwardFacingStep(Postcipe):
def __init__(self, path, nu, uRef):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.nu = nu
self.uRef = uRef
self.h = np.sum(tbl.edge_lengths(self.case, "stepB"))
self.H = np.sum(tbl.edge_lengths(self.case, "outletB")) - self.h
self.eRatio = (self.H + self.h)/self.H
self.tau1 = \
self.case.boundary_data("lowB1")[1]["wallShearStressMean"][:, 0]
self.tau2 = \
self.case.boundary_data("lowB2")[1]["wallShearStressMean"][:, 0]
self.tau = np.append(self.tau1, self.tau2)
self.x1 = self.case.boundary_data("lowB1")[0][:, 0]
self.x2 = self.case.boundary_data("lowB2")[0][:, 0]
self.x = np.append(self.x1, self.x2)
self.idx105h = np.argmin(np.abs(self.x1 + 1.05*self.h))
self.uTop = self.case.boundary_data("upB")[1]['UMean'][:, 0]
self.theta = None
self.delta99 = None
self.edgeU = None
def compute_delta99(self, u0='max', interpolate=True):
self.delta99 = np.zeros(self.x1.shape[0])
self.edgeU = np.zeros(self.x1.shape[0])
for i in range(self.x1.shape[0]):
x = self.x1[i]
y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),
correctDistance=True)
self.delta99[i] = tbl.delta_99(y, v['UMean'][:, 0], u0=u0,
interpolate=interpolate)
if u0 is 'max':
self.edgeU[i] = np.max(v['UMean'][:, 0])
elif u0 is 'last':
self.edgeU[i] = v['UMean'][-1, 0]
self.reDelta99 = self.delta99*self.edgeU/self.nu
self.reTau = self.delta99*np.sqrt(np.abs(self.tau1))/self.nu
self.delta99105h = self.delta99[self.idx105h]
return 0
def compute_theta(self, u0='max', interpolate=True):
self.theta = np.zeros(self.x1.shape[0])
self.edgeU = np.zeros(self.x1.shape[0])
for i in range(self.x1.shape[0]):
x = self.x1[i]
y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),
correctDistance=True)
self.theta[i] = tbl.momentum_thickness(y, v['UMean'][:, 0], u0=u0,
interpolate=interpolate)
if u0 is 'max':
self.edgeU[i] = np.max(v['UMean'][:, 0])
elif u0 is 'last':
self.edgeU[i] = v['UMean'][-1, 0]
self.reTheta = self.theta*self.edgeU/self.nu
self.reTheta105h = self.reTheta[self.idx105h]
return 0
def save(self, name):
f = h5py.File(name, 'w')
f.attrs["h"] = self.h
f.attrs["H"] = self.H
f.attrs["nu"] = self.nu
f.attrs["eRatio"] = self.eRatio
f.attrs["uRef"] = self.uRef
f.attrs["idx105h"] = self.idx105h
f.create_dataset("x1", data=self.x1)
f.create_dataset("x2", data=self.x2)
f.create_dataset("x", data=self.x)
f.create_dataset("uTop", data=self.uTop)
f.create_dataset("tau1", data=self.tau1)
f.create_dataset("tau2", data=self.tau2)
f.create_dataset("tau", data=self.tau)
if self.theta is None:
self.compute_theta()
if self.delta99 is None:
self.compute_delta99()
f.create_dataset("theta", data=self.theta)
f.create_dataset("delta99", data=self.delta99)
f.create_dataset("reTheta", data=self.reTheta)
f.create_dataset("reTau", data=self.reTau)
f.create_dataset("reDelta99", data=self.reDelta99)
f.close()
def load(self, name):
f = h5py.File(name, 'r')
self.h = f.attrs["h"]
self.H = f.attrs["H"]
self.nu = f.attrs["nu"]
self.eRatio = f.attrs["eRatio"]
self.uRef = f.attrs["uRef"]
self.idx105h = f.attrs["idx105h"]
self.x1 = f["x1"][:]
self.x2 = f["x2"][:]
self.x = f["x"][:]
self.uTop = f["uTop"][:]
self.tau1 = f["tau1"][:]
self.tau2 = f["tau2"][:]
self.tau = f["tau"][:]
self.theta = f["theta"][:]
self.delta99 = f["delta99"][:]
self.reTheta = f["reTheta"][:]
self.reTau = f["reTau"][:]
self.reDelta99 = f["reDelta99"][:]
f.close()
| true
| true
|
790a6df13312576a919d632818686df6c911d77f
| 10,559
|
py
|
Python
|
pygazebo/msg/geometry_pb2.py
|
CryptoCopter/pygazebo
|
f16704f3b59cb50a1390ef92fde283558fd71f8f
|
[
"Apache-2.0"
] | null | null | null |
pygazebo/msg/geometry_pb2.py
|
CryptoCopter/pygazebo
|
f16704f3b59cb50a1390ef92fde283558fd71f8f
|
[
"Apache-2.0"
] | null | null | null |
pygazebo/msg/geometry_pb2.py
|
CryptoCopter/pygazebo
|
f16704f3b59cb50a1390ef92fde283558fd71f8f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: geometry.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import boxgeom_pb2 as boxgeom__pb2
import cylindergeom_pb2 as cylindergeom__pb2
import spheregeom_pb2 as spheregeom__pb2
import planegeom_pb2 as planegeom__pb2
import imagegeom_pb2 as imagegeom__pb2
import heightmapgeom_pb2 as heightmapgeom__pb2
import meshgeom_pb2 as meshgeom__pb2
import vector3d_pb2 as vector3d__pb2
import polylinegeom_pb2 as polylinegeom__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='geometry.proto',
package='gazebo.msgs',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0egeometry.proto\x12\x0bgazebo.msgs\x1a\rboxgeom.proto\x1a\x12\x63ylindergeom.proto\x1a\x10spheregeom.proto\x1a\x0fplanegeom.proto\x1a\x0fimagegeom.proto\x1a\x13heightmapgeom.proto\x1a\x0emeshgeom.proto\x1a\x0evector3d.proto\x1a\x12polylinegeom.proto\"\xb5\x04\n\x08Geometry\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.gazebo.msgs.Geometry.Type\x12!\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x14.gazebo.msgs.BoxGeom\x12+\n\x08\x63ylinder\x18\x03 \x01(\x0b\x32\x19.gazebo.msgs.CylinderGeom\x12%\n\x05plane\x18\x04 \x01(\x0b\x32\x16.gazebo.msgs.PlaneGeom\x12\'\n\x06sphere\x18\x05 \x01(\x0b\x32\x17.gazebo.msgs.SphereGeom\x12%\n\x05image\x18\x06 \x01(\x0b\x32\x16.gazebo.msgs.ImageGeom\x12-\n\theightmap\x18\x07 \x01(\x0b\x32\x1a.gazebo.msgs.HeightmapGeom\x12#\n\x04mesh\x18\x08 \x01(\x0b\x32\x15.gazebo.msgs.MeshGeom\x12%\n\x06points\x18\t \x03(\x0b\x32\x15.gazebo.msgs.Vector3d\x12\'\n\x08polyline\x18\n \x03(\x0b\x32\x15.gazebo.msgs.Polyline\"\x93\x01\n\x04Type\x12\x07\n\x03\x42OX\x10\x01\x12\x0c\n\x08\x43YLINDER\x10\x02\x12\n\n\x06SPHERE\x10\x03\x12\t\n\x05PLANE\x10\x04\x12\t\n\x05IMAGE\x10\x05\x12\r\n\tHEIGHTMAP\x10\x06\x12\x08\n\x04MESH\x10\x07\x12\x10\n\x0cTRIANGLE_FAN\x10\x08\x12\x0e\n\nLINE_STRIP\x10\t\x12\x0c\n\x08POLYLINE\x10\n\x12\t\n\x05\x45MPTY\x10\x0b'
,
dependencies=[boxgeom__pb2.DESCRIPTOR,cylindergeom__pb2.DESCRIPTOR,spheregeom__pb2.DESCRIPTOR,planegeom__pb2.DESCRIPTOR,imagegeom__pb2.DESCRIPTOR,heightmapgeom__pb2.DESCRIPTOR,meshgeom__pb2.DESCRIPTOR,vector3d__pb2.DESCRIPTOR,polylinegeom__pb2.DESCRIPTOR,])
_GEOMETRY_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='gazebo.msgs.Geometry.Type',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='BOX', index=0, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CYLINDER', index=1, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SPHERE', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PLANE', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IMAGE', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HEIGHTMAP', index=5, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESH', index=6, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRIANGLE_FAN', index=7, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LINE_STRIP', index=8, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='POLYLINE', index=9, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EMPTY', index=10, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=610,
serialized_end=757,
)
_sym_db.RegisterEnumDescriptor(_GEOMETRY_TYPE)
_GEOMETRY = _descriptor.Descriptor(
name='Geometry',
full_name='gazebo.msgs.Geometry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='gazebo.msgs.Geometry.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='box', full_name='gazebo.msgs.Geometry.box', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cylinder', full_name='gazebo.msgs.Geometry.cylinder', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plane', full_name='gazebo.msgs.Geometry.plane', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sphere', full_name='gazebo.msgs.Geometry.sphere', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='image', full_name='gazebo.msgs.Geometry.image', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='heightmap', full_name='gazebo.msgs.Geometry.heightmap', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mesh', full_name='gazebo.msgs.Geometry.mesh', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='points', full_name='gazebo.msgs.Geometry.points', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='polyline', full_name='gazebo.msgs.Geometry.polyline', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GEOMETRY_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=757,
)
_GEOMETRY.fields_by_name['type'].enum_type = _GEOMETRY_TYPE
_GEOMETRY.fields_by_name['box'].message_type = boxgeom__pb2._BOXGEOM
_GEOMETRY.fields_by_name['cylinder'].message_type = cylindergeom__pb2._CYLINDERGEOM
_GEOMETRY.fields_by_name['plane'].message_type = planegeom__pb2._PLANEGEOM
_GEOMETRY.fields_by_name['sphere'].message_type = spheregeom__pb2._SPHEREGEOM
_GEOMETRY.fields_by_name['image'].message_type = imagegeom__pb2._IMAGEGEOM
_GEOMETRY.fields_by_name['heightmap'].message_type = heightmapgeom__pb2._HEIGHTMAPGEOM
_GEOMETRY.fields_by_name['mesh'].message_type = meshgeom__pb2._MESHGEOM
_GEOMETRY.fields_by_name['points'].message_type = vector3d__pb2._VECTOR3D
_GEOMETRY.fields_by_name['polyline'].message_type = polylinegeom__pb2._POLYLINE
_GEOMETRY_TYPE.containing_type = _GEOMETRY
DESCRIPTOR.message_types_by_name['Geometry'] = _GEOMETRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Geometry = _reflection.GeneratedProtocolMessageType('Geometry', (_message.Message,), {
'DESCRIPTOR' : _GEOMETRY,
'__module__' : 'geometry_pb2'
# @@protoc_insertion_point(class_scope:gazebo.msgs.Geometry)
})
_sym_db.RegisterMessage(Geometry)
# @@protoc_insertion_point(module_scope)
| 46.721239
| 1,282
| 0.762572
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
import boxgeom_pb2 as boxgeom__pb2
import cylindergeom_pb2 as cylindergeom__pb2
import spheregeom_pb2 as spheregeom__pb2
import planegeom_pb2 as planegeom__pb2
import imagegeom_pb2 as imagegeom__pb2
import heightmapgeom_pb2 as heightmapgeom__pb2
import meshgeom_pb2 as meshgeom__pb2
import vector3d_pb2 as vector3d__pb2
import polylinegeom_pb2 as polylinegeom__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='geometry.proto',
package='gazebo.msgs',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0egeometry.proto\x12\x0bgazebo.msgs\x1a\rboxgeom.proto\x1a\x12\x63ylindergeom.proto\x1a\x10spheregeom.proto\x1a\x0fplanegeom.proto\x1a\x0fimagegeom.proto\x1a\x13heightmapgeom.proto\x1a\x0emeshgeom.proto\x1a\x0evector3d.proto\x1a\x12polylinegeom.proto\"\xb5\x04\n\x08Geometry\x12(\n\x04type\x18\x01 \x01(\x0e\x32\x1a.gazebo.msgs.Geometry.Type\x12!\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x14.gazebo.msgs.BoxGeom\x12+\n\x08\x63ylinder\x18\x03 \x01(\x0b\x32\x19.gazebo.msgs.CylinderGeom\x12%\n\x05plane\x18\x04 \x01(\x0b\x32\x16.gazebo.msgs.PlaneGeom\x12\'\n\x06sphere\x18\x05 \x01(\x0b\x32\x17.gazebo.msgs.SphereGeom\x12%\n\x05image\x18\x06 \x01(\x0b\x32\x16.gazebo.msgs.ImageGeom\x12-\n\theightmap\x18\x07 \x01(\x0b\x32\x1a.gazebo.msgs.HeightmapGeom\x12#\n\x04mesh\x18\x08 \x01(\x0b\x32\x15.gazebo.msgs.MeshGeom\x12%\n\x06points\x18\t \x03(\x0b\x32\x15.gazebo.msgs.Vector3d\x12\'\n\x08polyline\x18\n \x03(\x0b\x32\x15.gazebo.msgs.Polyline\"\x93\x01\n\x04Type\x12\x07\n\x03\x42OX\x10\x01\x12\x0c\n\x08\x43YLINDER\x10\x02\x12\n\n\x06SPHERE\x10\x03\x12\t\n\x05PLANE\x10\x04\x12\t\n\x05IMAGE\x10\x05\x12\r\n\tHEIGHTMAP\x10\x06\x12\x08\n\x04MESH\x10\x07\x12\x10\n\x0cTRIANGLE_FAN\x10\x08\x12\x0e\n\nLINE_STRIP\x10\t\x12\x0c\n\x08POLYLINE\x10\n\x12\t\n\x05\x45MPTY\x10\x0b'
,
dependencies=[boxgeom__pb2.DESCRIPTOR,cylindergeom__pb2.DESCRIPTOR,spheregeom__pb2.DESCRIPTOR,planegeom__pb2.DESCRIPTOR,imagegeom__pb2.DESCRIPTOR,heightmapgeom__pb2.DESCRIPTOR,meshgeom__pb2.DESCRIPTOR,vector3d__pb2.DESCRIPTOR,polylinegeom__pb2.DESCRIPTOR,])
_GEOMETRY_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='gazebo.msgs.Geometry.Type',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='BOX', index=0, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CYLINDER', index=1, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SPHERE', index=2, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PLANE', index=3, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IMAGE', index=4, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HEIGHTMAP', index=5, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MESH', index=6, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRIANGLE_FAN', index=7, number=8,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='LINE_STRIP', index=8, number=9,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='POLYLINE', index=9, number=10,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='EMPTY', index=10, number=11,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=610,
serialized_end=757,
)
_sym_db.RegisterEnumDescriptor(_GEOMETRY_TYPE)
_GEOMETRY = _descriptor.Descriptor(
name='Geometry',
full_name='gazebo.msgs.Geometry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='gazebo.msgs.Geometry.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='box', full_name='gazebo.msgs.Geometry.box', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cylinder', full_name='gazebo.msgs.Geometry.cylinder', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='plane', full_name='gazebo.msgs.Geometry.plane', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sphere', full_name='gazebo.msgs.Geometry.sphere', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='image', full_name='gazebo.msgs.Geometry.image', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='heightmap', full_name='gazebo.msgs.Geometry.heightmap', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mesh', full_name='gazebo.msgs.Geometry.mesh', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='points', full_name='gazebo.msgs.Geometry.points', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='polyline', full_name='gazebo.msgs.Geometry.polyline', index=9,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_GEOMETRY_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=757,
)
_GEOMETRY.fields_by_name['type'].enum_type = _GEOMETRY_TYPE
_GEOMETRY.fields_by_name['box'].message_type = boxgeom__pb2._BOXGEOM
_GEOMETRY.fields_by_name['cylinder'].message_type = cylindergeom__pb2._CYLINDERGEOM
_GEOMETRY.fields_by_name['plane'].message_type = planegeom__pb2._PLANEGEOM
_GEOMETRY.fields_by_name['sphere'].message_type = spheregeom__pb2._SPHEREGEOM
_GEOMETRY.fields_by_name['image'].message_type = imagegeom__pb2._IMAGEGEOM
_GEOMETRY.fields_by_name['heightmap'].message_type = heightmapgeom__pb2._HEIGHTMAPGEOM
_GEOMETRY.fields_by_name['mesh'].message_type = meshgeom__pb2._MESHGEOM
_GEOMETRY.fields_by_name['points'].message_type = vector3d__pb2._VECTOR3D
_GEOMETRY.fields_by_name['polyline'].message_type = polylinegeom__pb2._POLYLINE
_GEOMETRY_TYPE.containing_type = _GEOMETRY
DESCRIPTOR.message_types_by_name['Geometry'] = _GEOMETRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Geometry = _reflection.GeneratedProtocolMessageType('Geometry', (_message.Message,), {
'DESCRIPTOR' : _GEOMETRY,
'__module__' : 'geometry_pb2'
})
_sym_db.RegisterMessage(Geometry)
| true
| true
|
790a6eb21c9b01b275319a92d0d0a4b26f43a172
| 1,413
|
py
|
Python
|
CNN/Dense.py
|
GreatGameDota/CNN-Numpy-1D-Images
|
6016701b54d7475b0c294355801bf8f6ce534852
|
[
"MIT"
] | null | null | null |
CNN/Dense.py
|
GreatGameDota/CNN-Numpy-1D-Images
|
6016701b54d7475b0c294355801bf8f6ce534852
|
[
"MIT"
] | null | null | null |
CNN/Dense.py
|
GreatGameDota/CNN-Numpy-1D-Images
|
6016701b54d7475b0c294355801bf8f6ce534852
|
[
"MIT"
] | null | null | null |
import numpy as np
class Dense():
def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', input_shape=None):
self._units = units
self._activation = activation
self._use_bias = use_bias
self._kernal_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias = np.zeros((units, 1))
def setPrevUnits(self, units):
self._prevUnits = units
self._weights = np.zeros((self._units, units))
self._weights = np.random.standard_normal(
size=self._weights.shape) * 0.01
def forward(self, arr):
out = self._weights.dot(arr) + self._bias
if self._activation == "relu":
out[out <= 0] = 0
if self._activation == "softmax":
out = self.softmax(out)
return out
def backwardFirst(self, dout, z):
dw = dout.dot(z.T)
db = np.sum(dout, axis=1)
db = np.reshape(db, (db.shape[0], 1))
return dw, db
def backward(self, dout, next_weights, flat, z):
dz = next_weights.T.dot(dout)
if (self._activation == "relu"):
dz[z <= 0] = 0
dw = dz.dot(flat.T)
db = np.sum(dz, axis=1).reshape(self._bias.shape)
return dw, db, dz
def softmax(self, X):
out = np.exp(X)
return out/np.sum(out)
| 31.4
| 143
| 0.587403
|
import numpy as np
class Dense():
def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', input_shape=None):
self._units = units
self._activation = activation
self._use_bias = use_bias
self._kernal_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._bias = np.zeros((units, 1))
def setPrevUnits(self, units):
self._prevUnits = units
self._weights = np.zeros((self._units, units))
self._weights = np.random.standard_normal(
size=self._weights.shape) * 0.01
def forward(self, arr):
out = self._weights.dot(arr) + self._bias
if self._activation == "relu":
out[out <= 0] = 0
if self._activation == "softmax":
out = self.softmax(out)
return out
def backwardFirst(self, dout, z):
dw = dout.dot(z.T)
db = np.sum(dout, axis=1)
db = np.reshape(db, (db.shape[0], 1))
return dw, db
def backward(self, dout, next_weights, flat, z):
dz = next_weights.T.dot(dout)
if (self._activation == "relu"):
dz[z <= 0] = 0
dw = dz.dot(flat.T)
db = np.sum(dz, axis=1).reshape(self._bias.shape)
return dw, db, dz
def softmax(self, X):
out = np.exp(X)
return out/np.sum(out)
| true
| true
|
790a6fced3b1b9469c137f7c3e15dd786ec39a92
| 6,012
|
py
|
Python
|
main_server/common.py
|
yehan-xiao/SUCS
|
4110a9d858c9e484d35c08198d951c6ba734460f
|
[
"MIT"
] | null | null | null |
main_server/common.py
|
yehan-xiao/SUCS
|
4110a9d858c9e484d35c08198d951c6ba734460f
|
[
"MIT"
] | null | null | null |
main_server/common.py
|
yehan-xiao/SUCS
|
4110a9d858c9e484d35c08198d951c6ba734460f
|
[
"MIT"
] | null | null | null |
#Written by Shitao Tang
# --------------------------------------------------------
import connectDB
import time,hashlib,logging
def sign_up(username,password):
db=connectDB.database.getInstance()
if len(username)<=20:
return db.create_account(username,hashlib.sha224(password).hexdigest())
else:
return 'username must be less than 20 characters'
def account_authentication(username,password):
db=connectDB.database.getInstance()
result=db.authenticate_account(username,hashlib.sha224(password).hexdigest())
if result:
return hashlib.sha224(username+str(time.time())).hexdigest()
elif result ==False:
return None
else:
logging.error(result)
def check_keys(data,keys): #check whether a dictionary contains a list of keys
for key in keys:
if key not in data:
return key
return None
def check_float(value,min_value,max_value): #try to convert value to a float number and is between min_value and max_value
try:
value=float(value)
if value>=min_value and value<=max_value:
return value
else:
return None
except ValueError:
return None
def decode_xml(object_name,xml): #get the bounding box of the object in an image
logging.info("begin to decode")
bounding_box=[]
#print xml
import xml.etree.ElementTree as ET
try:
root=ET.fromstring(xml)
except:
return []
for obj in root.findall('object'):
if(obj.find('name').text==object_name):
score=float(obj.find("score").text)
bnd_box=obj.find('bndbox')
xmin=int((bnd_box).find('xmin').text)
ymin=int((bnd_box).find('ymin').text)
xmax=int((bnd_box).find('xmax').text)
ymax=int((bnd_box).find('ymax').text)
bounding_box.append((xmin,ymin,xmax,ymax,score))
return bounding_box
def coordinate_from_google_to_baidu(longitude,latitude):
return gcj02tobd09(longitude,latitude)
def coordinate_from_baidu_to_google(longitude,latitude):
return bd09togcj02(longitude,latitude)
def check_connection_of_image_analysis_server(address):
reponse=requests.get(address+"/ok")
print address,reponse.text
if reponse.text=="OK":
return True
else:
return False
#the following code is copied from github
import json
import requests
import math
x_pi = 3.14159265358979324 * 3000.0 / 180.0
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def geocode(address):
geocoding = {'s': 'rsv3',
'key': key,
'city': 'china',
'address': address}
res = requests.get(
"http://restapi.amap.com/v3/geocode/geo", params=geocoding)
if res.status_code == 200:
json = res.json()
status = json.get('status')
count = json.get('count')
if status == '1' and int(count) >= 1:
geocodes = json.get('geocodes')[0]
lng = float(geocodes.get('location').split(',')[0])
lat = float(geocodes.get('location').split(',')[1])
return [lng, lat]
else:
return None
else:
return None
def gcj02tobd09(lng, lat):
z = math.sqrt(lng * lng + lat * lat) + 0.00002 * math.sin(lat * x_pi)
theta = math.atan2(lat, lng) + 0.000003 * math.cos(lng * x_pi)
bd_lng = z * math.cos(theta) + 0.0065
bd_lat = z * math.sin(theta) + 0.006
return [bd_lng, bd_lat]
def bd09togcj02(bd_lon, bd_lat):
x = bd_lon - 0.0065
y = bd_lat - 0.006
z = math.sqrt(x * x + y * y) - 0.00002 * math.sin(y * x_pi)
theta = math.atan2(y, x) - 0.000003 * math.cos(x * x_pi)
gg_lng = z * math.cos(theta)
gg_lat = z * math.sin(theta)
return [gg_lng, gg_lat]
def wgs84togcj02(lng, lat):
"""
"""
if out_of_china(lng, lat):
return lng, lat
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglng, mglat]
def gcj02towgs84(lng, lat):
"""
"""
if out_of_china(lng, lat):
return lng, lat
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [lng * 2 - mglng, lat * 2 - mglat]
def transformlat(lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \
0.1 * lng * lat + 0.2 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 *
math.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lat * pi) + 40.0 *
math.sin(lat / 3.0 * pi)) * 2.0 / 3.0
ret += (160.0 * math.sin(lat / 12.0 * pi) + 320 *
math.sin(lat * pi / 30.0)) * 2.0 / 3.0
return ret
def transformlng(lng, lat):
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \
0.1 * lng * lat + 0.1 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 *
math.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lng * pi) + 40.0 *
math.sin(lng / 3.0 * pi)) * 2.0 / 3.0
ret += (150.0 * math.sin(lng / 12.0 * pi) + 300.0 *
math.sin(lng / 30.0 * pi)) * 2.0 / 3.0
return ret
def out_of_china(lng, lat):
"""
"""
return not (lng > 73.66 and lng < 135.05 and lat > 3.86 and lat < 53.55)
| 30.989691
| 122
| 0.570692
|
import connectDB
import time,hashlib,logging
def sign_up(username,password):
db=connectDB.database.getInstance()
if len(username)<=20:
return db.create_account(username,hashlib.sha224(password).hexdigest())
else:
return 'username must be less than 20 characters'
def account_authentication(username,password):
db=connectDB.database.getInstance()
result=db.authenticate_account(username,hashlib.sha224(password).hexdigest())
if result:
return hashlib.sha224(username+str(time.time())).hexdigest()
elif result ==False:
return None
else:
logging.error(result)
def check_keys(data,keys):
for key in keys:
if key not in data:
return key
return None
def check_float(value,min_value,max_value):
try:
value=float(value)
if value>=min_value and value<=max_value:
return value
else:
return None
except ValueError:
return None
def decode_xml(object_name,xml):
logging.info("begin to decode")
bounding_box=[]
import xml.etree.ElementTree as ET
try:
root=ET.fromstring(xml)
except:
return []
for obj in root.findall('object'):
if(obj.find('name').text==object_name):
score=float(obj.find("score").text)
bnd_box=obj.find('bndbox')
xmin=int((bnd_box).find('xmin').text)
ymin=int((bnd_box).find('ymin').text)
xmax=int((bnd_box).find('xmax').text)
ymax=int((bnd_box).find('ymax').text)
bounding_box.append((xmin,ymin,xmax,ymax,score))
return bounding_box
def coordinate_from_google_to_baidu(longitude,latitude):
return gcj02tobd09(longitude,latitude)
def coordinate_from_baidu_to_google(longitude,latitude):
return bd09togcj02(longitude,latitude)
def check_connection_of_image_analysis_server(address):
reponse=requests.get(address+"/ok")
print address,reponse.text
if reponse.text=="OK":
return True
else:
return False
import json
import requests
import math
x_pi = 3.14159265358979324 * 3000.0 / 180.0
pi = 3.1415926535897932384626
a = 6378245.0
ee = 0.00669342162296594323
def geocode(address):
geocoding = {'s': 'rsv3',
'key': key,
'city': 'china',
'address': address}
res = requests.get(
"http://restapi.amap.com/v3/geocode/geo", params=geocoding)
if res.status_code == 200:
json = res.json()
status = json.get('status')
count = json.get('count')
if status == '1' and int(count) >= 1:
geocodes = json.get('geocodes')[0]
lng = float(geocodes.get('location').split(',')[0])
lat = float(geocodes.get('location').split(',')[1])
return [lng, lat]
else:
return None
else:
return None
def gcj02tobd09(lng, lat):
z = math.sqrt(lng * lng + lat * lat) + 0.00002 * math.sin(lat * x_pi)
theta = math.atan2(lat, lng) + 0.000003 * math.cos(lng * x_pi)
bd_lng = z * math.cos(theta) + 0.0065
bd_lat = z * math.sin(theta) + 0.006
return [bd_lng, bd_lat]
def bd09togcj02(bd_lon, bd_lat):
x = bd_lon - 0.0065
y = bd_lat - 0.006
z = math.sqrt(x * x + y * y) - 0.00002 * math.sin(y * x_pi)
theta = math.atan2(y, x) - 0.000003 * math.cos(x * x_pi)
gg_lng = z * math.cos(theta)
gg_lat = z * math.sin(theta)
return [gg_lng, gg_lat]
def wgs84togcj02(lng, lat):
"""
"""
if out_of_china(lng, lat):
return lng, lat
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglng, mglat]
def gcj02towgs84(lng, lat):
"""
"""
if out_of_china(lng, lat):
return lng, lat
dlat = transformlat(lng - 105.0, lat - 35.0)
dlng = transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [lng * 2 - mglng, lat * 2 - mglat]
def transformlat(lng, lat):
ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \
0.1 * lng * lat + 0.2 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 *
math.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lat * pi) + 40.0 *
math.sin(lat / 3.0 * pi)) * 2.0 / 3.0
ret += (160.0 * math.sin(lat / 12.0 * pi) + 320 *
math.sin(lat * pi / 30.0)) * 2.0 / 3.0
return ret
def transformlng(lng, lat):
ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \
0.1 * lng * lat + 0.1 * math.sqrt(math.fabs(lng))
ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 *
math.sin(2.0 * lng * pi)) * 2.0 / 3.0
ret += (20.0 * math.sin(lng * pi) + 40.0 *
math.sin(lng / 3.0 * pi)) * 2.0 / 3.0
ret += (150.0 * math.sin(lng / 12.0 * pi) + 300.0 *
math.sin(lng / 30.0 * pi)) * 2.0 / 3.0
return ret
def out_of_china(lng, lat):
"""
"""
return not (lng > 73.66 and lng < 135.05 and lat > 3.86 and lat < 53.55)
| false
| true
|
790a7017b19c51e6dda79dfec8bdb0f336c8a704
| 1,218
|
py
|
Python
|
setup.py
|
luttermann/pystadel
|
41f627ab08f70bc8151d1337f886dd9ee596ae44
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
luttermann/pystadel
|
41f627ab08f70bc8151d1337f886dd9ee596ae44
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
luttermann/pystadel
|
41f627ab08f70bc8151d1337f886dd9ee596ae44
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pystadel',
version='1.0.0',
description='Class for sending SMSes using Stadel SMS gateway',
long_description=long_description,
url='https://github.com/luttermann/pystadel',
author='Lasse Luttermann Poulsen',
author_email='lasse@poulsen.dk',
license='BSD-2-Clause',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
# It might work in other versions, but these are not testet.
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='sms stadel',
py_modules=["stadel"],
)
| 27.066667
| 68
| 0.644499
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pystadel',
version='1.0.0',
description='Class for sending SMSes using Stadel SMS gateway',
long_description=long_description,
url='https://github.com/luttermann/pystadel',
author='Lasse Luttermann Poulsen',
author_email='lasse@poulsen.dk',
license='BSD-2-Clause',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='sms stadel',
py_modules=["stadel"],
)
| true
| true
|
790a70a5736ebdf613d6c54c84ce4d4cc7768399
| 13,514
|
py
|
Python
|
colcon_package_information/verb/graph.py
|
christophebedard/colcon-package-information
|
295189f2f1d6362c331c6c3ff88e13ba28a94faa
|
[
"Apache-2.0"
] | 2
|
2018-02-01T05:57:34.000Z
|
2018-02-01T06:23:21.000Z
|
colcon_package_information/verb/graph.py
|
christophebedard/colcon-package-information
|
295189f2f1d6362c331c6c3ff88e13ba28a94faa
|
[
"Apache-2.0"
] | 12
|
2018-05-09T00:19:00.000Z
|
2021-08-09T07:04:57.000Z
|
colcon_package_information/verb/graph.py
|
christophebedard/colcon-package-information
|
295189f2f1d6362c331c6c3ff88e13ba28a94faa
|
[
"Apache-2.0"
] | 6
|
2018-09-10T23:33:48.000Z
|
2021-07-28T23:14:41.000Z
|
# Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
from collections import defaultdict
from collections import OrderedDict
import itertools
import os
from pathlib import Path
from colcon_core.package_selection import add_arguments \
as add_packages_arguments
from colcon_core.package_selection import get_package_descriptors
from colcon_core.package_selection import select_package_decorators
from colcon_core.plugin_system import satisfies_version
from colcon_core.topological_order import topological_order_packages
from colcon_core.verb import VerbExtensionPoint
class GraphVerb(VerbExtensionPoint):
"""Generate a visual representation of the dependency graph."""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(VerbExtensionPoint.EXTENSION_POINT_VERSION, '^1.0')
def add_arguments(self, *, parser): # noqa: D102
# only added so that package selection arguments can be used
# which use the build directory to store state information
parser.add_argument(
'--build-base',
default='build',
help='The base path for all build directories (default: build)')
add_packages_arguments(parser)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--dot',
action='store_true',
default=False,
help='Output topological graph in DOT '
'(e.g. pass the output to dot: ` | dot -Tpng -o graph.png`), '
'legend: blue=build, red=run, tan=test, dashed=indirect')
group.add_argument(
'--density',
action='store_true',
default=False,
help='Output density of the graph (only without --dot)')
parser.add_argument(
'--legend',
action='store_true',
default=False,
help='Output legend for the graph')
parser.add_argument(
'--dot-cluster',
action='store_true',
default=False,
help='Cluster packages by their filesystem path (only affects '
'--dot)')
parser.add_argument(
'--dot-include-skipped',
action='store_true',
default=False,
help='Also output skipped packages (only affects --dot)')
def main(self, *, context): # noqa: D102
args = context.args
descriptors = get_package_descriptors(args)
decorators = topological_order_packages(
descriptors, recursive_categories=('run', ))
select_package_decorators(args, decorators)
if not args.dot:
if args.legend:
print('+ marks when the package in this row can be processed')
print('* marks a direct dependency '
'from the package indicated by the + in the same column '
'to the package in this row')
print('. marks a transitive dependency')
print()
# draw dependency graph in ASCII
shown_decorators = list(filter(lambda d: d.selected, decorators))
max_length = max([
len(m.descriptor.name) for m in shown_decorators] + [0])
lines = [
m.descriptor.name.ljust(max_length + 2)
for m in shown_decorators]
depends = [
m.descriptor.get_dependencies() for m in shown_decorators]
rec_depends = [
m.descriptor.get_recursive_dependencies(
[d.descriptor for d in decorators],
recursive_categories=('run', ))
for m in shown_decorators]
empty_cells = 0
for i, decorator in enumerate(shown_decorators):
for j in range(len(lines)):
if j == i:
# package i is being processed
lines[j] += '+'
elif shown_decorators[j].descriptor.name in depends[i]:
# package i directly depends on package j
lines[j] += '*'
elif shown_decorators[j].descriptor.name in rec_depends[i]:
# package i recursively depends on package j
lines[j] += '.'
else:
# package i doesn't depend on package j
lines[j] += ' '
empty_cells += 1
if args.density:
empty_fraction = \
empty_cells / (len(lines) * (len(lines) - 1)) \
if len(lines) > 1 else 1.0
# normalize to 200% since half of the matrix should be empty
density_percentage = 200.0 * (1.0 - empty_fraction)
print('dependency density %.2f %%' % density_percentage)
print()
else: # --dot
lines = ['digraph graphname {']
decorators_by_name = defaultdict(set)
for deco in decorators:
decorators_by_name[deco.descriptor.name].add(deco)
selected_pkg_names = [
m.descriptor.name for m in decorators
if m.selected or args.dot_include_skipped]
has_duplicate_names = \
len(selected_pkg_names) != len(set(selected_pkg_names))
selected_pkg_names = set(selected_pkg_names)
# collect selected package decorators and their parent path
nodes = OrderedDict()
for deco in reversed(decorators):
if deco.selected or args.dot_include_skipped:
nodes[deco] = Path(deco.descriptor.path).parent
# collect direct dependencies
direct_edges = defaultdict(set)
for deco in reversed(decorators):
if (
not deco.selected and
not args.dot_include_skipped
):
continue
# iterate over dependency categories
for category, deps in deco.descriptor.dependencies.items():
# iterate over dependencies
for dep in deps:
if dep not in selected_pkg_names:
continue
# store the category of each dependency
# use the decorator
# since there might be packages with the same name
direct_edges[(deco, dep)].add(category)
# collect indirect dependencies
indirect_edges = defaultdict(set)
for deco in reversed(decorators):
if not deco.selected:
continue
# iterate over dependency categories
for category, deps in deco.descriptor.dependencies.items():
# iterate over dependencies
for dep in deps:
# ignore direct dependencies
if dep in selected_pkg_names:
continue
# ignore unknown dependencies
if dep not in decorators_by_name.keys():
continue
# iterate over recursive dependencies
for rdep in itertools.chain.from_iterable(
d.recursive_dependencies
for d in decorators_by_name[dep]
):
if rdep not in selected_pkg_names:
continue
# skip edges which are redundant to direct edges
if (deco, rdep) in direct_edges:
continue
indirect_edges[(deco, rdep)].add(category)
try:
# HACK Python 3.5 can't handle Path objects
common_path = os.path.commonpath(
[str(p) for p in nodes.values()])
except ValueError:
common_path = None
def get_node_data(decorator):
nonlocal args
nonlocal has_duplicate_names
if not has_duplicate_names:
# use name where possible so the dot code is easy to read
return decorator.descriptor.name, \
'' if (
decorator.selected or
not args.dot_include_skipped
) else '[color = "gray" fontcolor = "gray"]'
# otherwise append the descriptor id to make each node unique
descriptor_id = id(decorator.descriptor)
return (
'{decorator.descriptor.name}_{descriptor_id}'
.format_map(locals()),
' [label = "{decorator.descriptor.name}"]'
.format_map(locals()),
)
if not args.dot_cluster or common_path is None:
# output nodes
for deco in nodes.keys():
if (
not deco.selected and
not args.dot_include_skipped
):
continue
node_name, attributes = get_node_data(deco)
lines.append(
' "{node_name}"{attributes};'.format_map(locals()))
else:
# output clusters
clusters = defaultdict(set)
for deco, path in nodes.items():
clusters[path.relative_to(common_path)].add(deco)
for i, cluster in zip(range(len(clusters)), clusters.items()):
path, decos = cluster
if path.name:
# wrap cluster in subgraph
lines.append(
' subgraph cluster_{i} {{'.format_map(locals()))
lines.append(
' label = "{path}";'.format_map(locals()))
indent = ' '
else:
indent = ' '
for deco in decos:
node_name, attributes = get_node_data(deco)
lines.append(
'{indent}"{node_name}"{attributes};'
.format_map(locals()))
if path.name:
lines.append(' }')
# output edges
color_mapping = OrderedDict((
('build', '#0000ff'), # blue
('run', '#ff0000'), # red
('test', '#d2b48c'), # tan
))
for style, edges in zip(
('', ', style="dashed"'),
(direct_edges, indirect_edges),
):
for (deco_start, node_end), categories in edges.items():
start_name, _ = get_node_data(deco_start)
for deco in decorators_by_name[node_end]:
end_name, _ = get_node_data(deco)
edge_alpha = '' \
if deco_start.selected and deco.selected else '77'
colors = ':'.join([
color + edge_alpha
for category, color in color_mapping.items()
if category in categories])
lines.append(
' "{start_name}" -> "{end_name}" '
'[color="{colors}"{style}];'.format_map(locals()))
if args.legend:
lines.append(' subgraph cluster_legend {')
lines.append(' color=gray')
lines.append(' label="Legend";')
lines.append(' margin=0;')
# invisible nodes between the dependency edges
lines.append(' node [label="", shape=none];')
previous_node = '_legend_first'
# an edge for each dependency type
for dependency_type, color in color_mapping.items():
next_node = '_legend_' + dependency_type
lines.append(
' {previous_node} -> {next_node} '
'[label="{dependency_type} dep.", color="{color}"];'
.format_map(locals()))
previous_node = next_node
lines.append(
' {previous_node} -> _legend_last '
'[label="indirect dep.", style="dashed"];'
.format_map(locals()))
# layout all legend nodes on the same rank
lines.append(' {')
lines.append(' rank=same;')
lines.append(' _legend_first;')
for dependency_type in color_mapping.keys():
lines.append(
' _legend_{dependency_type};'
.format_map(locals()))
lines.append(' _legend_last;')
lines.append(' }')
lines.append(' }')
lines.append('}')
for line in lines:
print(line)
| 42.496855
| 79
| 0.492896
|
from collections import defaultdict
from collections import OrderedDict
import itertools
import os
from pathlib import Path
from colcon_core.package_selection import add_arguments \
as add_packages_arguments
from colcon_core.package_selection import get_package_descriptors
from colcon_core.package_selection import select_package_decorators
from colcon_core.plugin_system import satisfies_version
from colcon_core.topological_order import topological_order_packages
from colcon_core.verb import VerbExtensionPoint
class GraphVerb(VerbExtensionPoint):
def __init__(self):
super().__init__()
satisfies_version(VerbExtensionPoint.EXTENSION_POINT_VERSION, '^1.0')
def add_arguments(self, *, parser):
parser.add_argument(
'--build-base',
default='build',
help='The base path for all build directories (default: build)')
add_packages_arguments(parser)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--dot',
action='store_true',
default=False,
help='Output topological graph in DOT '
'(e.g. pass the output to dot: ` | dot -Tpng -o graph.png`), '
'legend: blue=build, red=run, tan=test, dashed=indirect')
group.add_argument(
'--density',
action='store_true',
default=False,
help='Output density of the graph (only without --dot)')
parser.add_argument(
'--legend',
action='store_true',
default=False,
help='Output legend for the graph')
parser.add_argument(
'--dot-cluster',
action='store_true',
default=False,
help='Cluster packages by their filesystem path (only affects '
'--dot)')
parser.add_argument(
'--dot-include-skipped',
action='store_true',
default=False,
help='Also output skipped packages (only affects --dot)')
def main(self, *, context):
args = context.args
descriptors = get_package_descriptors(args)
decorators = topological_order_packages(
descriptors, recursive_categories=('run', ))
select_package_decorators(args, decorators)
if not args.dot:
if args.legend:
print('+ marks when the package in this row can be processed')
print('* marks a direct dependency '
'from the package indicated by the + in the same column '
'to the package in this row')
print('. marks a transitive dependency')
print()
shown_decorators = list(filter(lambda d: d.selected, decorators))
max_length = max([
len(m.descriptor.name) for m in shown_decorators] + [0])
lines = [
m.descriptor.name.ljust(max_length + 2)
for m in shown_decorators]
depends = [
m.descriptor.get_dependencies() for m in shown_decorators]
rec_depends = [
m.descriptor.get_recursive_dependencies(
[d.descriptor for d in decorators],
recursive_categories=('run', ))
for m in shown_decorators]
empty_cells = 0
for i, decorator in enumerate(shown_decorators):
for j in range(len(lines)):
if j == i:
lines[j] += '+'
elif shown_decorators[j].descriptor.name in depends[i]:
lines[j] += '*'
elif shown_decorators[j].descriptor.name in rec_depends[i]:
lines[j] += '.'
else:
lines[j] += ' '
empty_cells += 1
if args.density:
empty_fraction = \
empty_cells / (len(lines) * (len(lines) - 1)) \
if len(lines) > 1 else 1.0
# normalize to 200% since half of the matrix should be empty
density_percentage = 200.0 * (1.0 - empty_fraction)
print('dependency density %.2f %%' % density_percentage)
print()
else: # --dot
lines = ['digraph graphname {']
decorators_by_name = defaultdict(set)
for deco in decorators:
decorators_by_name[deco.descriptor.name].add(deco)
selected_pkg_names = [
m.descriptor.name for m in decorators
if m.selected or args.dot_include_skipped]
has_duplicate_names = \
len(selected_pkg_names) != len(set(selected_pkg_names))
selected_pkg_names = set(selected_pkg_names)
# collect selected package decorators and their parent path
nodes = OrderedDict()
for deco in reversed(decorators):
if deco.selected or args.dot_include_skipped:
nodes[deco] = Path(deco.descriptor.path).parent
# collect direct dependencies
direct_edges = defaultdict(set)
for deco in reversed(decorators):
if (
not deco.selected and
not args.dot_include_skipped
):
continue
# iterate over dependency categories
for category, deps in deco.descriptor.dependencies.items():
# iterate over dependencies
for dep in deps:
if dep not in selected_pkg_names:
continue
# store the category of each dependency
# use the decorator
# since there might be packages with the same name
direct_edges[(deco, dep)].add(category)
# collect indirect dependencies
indirect_edges = defaultdict(set)
for deco in reversed(decorators):
if not deco.selected:
continue
# iterate over dependency categories
for category, deps in deco.descriptor.dependencies.items():
# iterate over dependencies
for dep in deps:
# ignore direct dependencies
if dep in selected_pkg_names:
continue
# ignore unknown dependencies
if dep not in decorators_by_name.keys():
continue
# iterate over recursive dependencies
for rdep in itertools.chain.from_iterable(
d.recursive_dependencies
for d in decorators_by_name[dep]
):
if rdep not in selected_pkg_names:
continue
# skip edges which are redundant to direct edges
if (deco, rdep) in direct_edges:
continue
indirect_edges[(deco, rdep)].add(category)
try:
# HACK Python 3.5 can't handle Path objects
common_path = os.path.commonpath(
[str(p) for p in nodes.values()])
except ValueError:
common_path = None
def get_node_data(decorator):
nonlocal args
nonlocal has_duplicate_names
if not has_duplicate_names:
return decorator.descriptor.name, \
'' if (
decorator.selected or
not args.dot_include_skipped
) else '[color = "gray" fontcolor = "gray"]'
descriptor_id = id(decorator.descriptor)
return (
'{decorator.descriptor.name}_{descriptor_id}'
.format_map(locals()),
' [label = "{decorator.descriptor.name}"]'
.format_map(locals()),
)
if not args.dot_cluster or common_path is None:
for deco in nodes.keys():
if (
not deco.selected and
not args.dot_include_skipped
):
continue
node_name, attributes = get_node_data(deco)
lines.append(
' "{node_name}"{attributes};'.format_map(locals()))
else:
clusters = defaultdict(set)
for deco, path in nodes.items():
clusters[path.relative_to(common_path)].add(deco)
for i, cluster in zip(range(len(clusters)), clusters.items()):
path, decos = cluster
if path.name:
lines.append(
' subgraph cluster_{i} {{'.format_map(locals()))
lines.append(
' label = "{path}";'.format_map(locals()))
indent = ' '
else:
indent = ' '
for deco in decos:
node_name, attributes = get_node_data(deco)
lines.append(
'{indent}"{node_name}"{attributes};'
.format_map(locals()))
if path.name:
lines.append(' }')
color_mapping = OrderedDict((
('build', '#0000ff'),
('run', '#ff0000'),
('test', '#d2b48c'),
))
for style, edges in zip(
('', ', style="dashed"'),
(direct_edges, indirect_edges),
):
for (deco_start, node_end), categories in edges.items():
start_name, _ = get_node_data(deco_start)
for deco in decorators_by_name[node_end]:
end_name, _ = get_node_data(deco)
edge_alpha = '' \
if deco_start.selected and deco.selected else '77'
colors = ':'.join([
color + edge_alpha
for category, color in color_mapping.items()
if category in categories])
lines.append(
' "{start_name}" -> "{end_name}" '
'[color="{colors}"{style}];'.format_map(locals()))
if args.legend:
lines.append(' subgraph cluster_legend {')
lines.append(' color=gray')
lines.append(' label="Legend";')
lines.append(' margin=0;')
lines.append(' node [label="", shape=none];')
previous_node = '_legend_first'
for dependency_type, color in color_mapping.items():
next_node = '_legend_' + dependency_type
lines.append(
' {previous_node} -> {next_node} '
'[label="{dependency_type} dep.", color="{color}"];'
.format_map(locals()))
previous_node = next_node
lines.append(
' {previous_node} -> _legend_last '
'[label="indirect dep.", style="dashed"];'
.format_map(locals()))
lines.append(' {')
lines.append(' rank=same;')
lines.append(' _legend_first;')
for dependency_type in color_mapping.keys():
lines.append(
' _legend_{dependency_type};'
.format_map(locals()))
lines.append(' _legend_last;')
lines.append(' }')
lines.append(' }')
lines.append('}')
for line in lines:
print(line)
| true
| true
|
790a70eb22866d04959e1d316539871d41ec5268
| 11,843
|
py
|
Python
|
Providers/Scripts/2.4x-2.5x/Scripts/nxOMSPerfCounter.py
|
MicrosoftDocs/PowerShell-DSC-for-Linux
|
c7519c0b9166b4b0568cda75f05b5ad1cac72012
|
[
"MIT"
] | 2
|
2020-05-19T20:07:32.000Z
|
2020-08-08T00:58:15.000Z
|
Providers/Scripts/2.4x-2.5x/Scripts/nxOMSPerfCounter.py
|
MicrosoftDocs/PowerShell-DSC-for-Linux
|
c7519c0b9166b4b0568cda75f05b5ad1cac72012
|
[
"MIT"
] | null | null | null |
Providers/Scripts/2.4x-2.5x/Scripts/nxOMSPerfCounter.py
|
MicrosoftDocs/PowerShell-DSC-for-Linux
|
c7519c0b9166b4b0568cda75f05b5ad1cac72012
|
[
"MIT"
] | 4
|
2019-10-31T19:10:42.000Z
|
2022-03-15T07:42:03.000Z
|
#!/usr/bin/env python
#============================================================================
# Copyright (C) Microsoft Corporation, All rights reserved.
#============================================================================
import os
import imp
import re
import codecs
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
# backwards compatibility with pre-multi-homing bundles
conf_path = '/etc/opt/microsoft/omsagent/conf/omsagent.conf'
omi_map_path = '/etc/opt/microsoft/omsagent/conf/omsagent.d/omi_mapping.json'
omi_map = None
multi_homed = None
non_mh_heartbeat_cmd = '/opt/microsoft/omsagent/bin/omsadmin.sh -b'
oms_restart_cmd = 'sudo /opt/microsoft/omsagent/bin/service_control restart'
def init_paths(WorkspaceID):
global conf_path
global omi_map_path
global multi_homed
omsagent_dir = '/etc/opt/microsoft/omsagent/'
mh_conf_dir = omsagent_dir + WorkspaceID + '/conf'
multi_homed = os.path.isdir(mh_conf_dir)
if multi_homed:
LG().Log('INFO', 'OMSAgent is multi-homed and resource is updating workspace ' + WorkspaceID)
conf_path = mh_conf_dir + '/omsagent.conf'
omi_map_path = mh_conf_dir + '/omsagent.d/omi_mapping.json'
def init_omi_map():
global omi_map
txt = codecs.open(omi_map_path, 'r', 'utf8').read()
omi_map = eval(txt)
def init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_paths(WorkspaceID)
init_omi_map()
if WorkspaceID is not None:
WorkspaceID = WorkspaceID.encode('ascii', 'ignore')
else:
WorkspaceID = ''
if PerfCounterObject is not None:
for perf in PerfCounterObject:
new_perfs = []
if len(perf['PerformanceCounter'].value):
for perf_counter in perf['PerformanceCounter'].value:
new_perfs.append(perf_counter.encode('ascii', 'ignore'))
perf['PerformanceCounter'] = new_perfs
if perf['InstanceName'].value is None:
perf['InstanceName'] = ''
else:
perf['InstanceName'] = perf[
'InstanceName'].value.encode('ascii', 'ignore')
if perf['ObjectName'].value is None:
perf['ObjectName'] = ''
else:
perf['ObjectName'] = perf[
'ObjectName'].value.encode('ascii', 'ignore')
if perf['AllInstances'].value is None:
perf['AllInstances'] = False
else:
if perf['AllInstances'].value.value == 1:
perf['AllInstances'] = True
else:
perf['AllInstances'] = False
perf['IntervalSeconds'] = perf['IntervalSeconds'].value.value
def Set_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
def Test_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Test(HeartbeatIntervalSeconds, PerfCounterObject)
def Get_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
arg_names = list(locals().keys())
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
retval = 0
NewHeartbeatIntervalSeconds, NewPerf = Get(
HeartbeatIntervalSeconds, PerfCounterObject)
for perf in NewPerf:
if len(perf['PerformanceCounter']):
perf['PerformanceCounter'] = protocol.MI_StringA(
perf['PerformanceCounter'])
perf['ObjectName'] = protocol.MI_String(perf['ObjectName'])
perf['InstanceName'] = protocol.MI_String(perf['InstanceName'])
perf['AllInstances'] = protocol.MI_Boolean(perf['AllInstances'])
perf['IntervalSeconds'] = protocol.MI_Uint16(perf['IntervalSeconds'])
PerfCounterObject = protocol.MI_InstanceA(NewPerf)
HeartbeatIntervalSeconds = protocol.MI_Uint16(NewHeartbeatIntervalSeconds)
WorkspaceID = protocol.MI_String(WorkspaceID)
Name = protocol.MI_String(Name)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if Test(HeartbeatIntervalSeconds, PerfCounterObject) == [0]:
return [0]
if UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
return [0]
else:
return [-1]
def Test(HeartbeatIntervalSeconds, PerfCounterObject):
prune_perfs(PerfCounterObject)
NewHeartbeatIntervalSeconds, NewPerfs = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
if NewHeartbeatIntervalSeconds != HeartbeatIntervalSeconds:
return [-1]
PerfCounterObject.sort()
for perf in PerfCounterObject:
perf['PerformanceCounter'].sort()
perf['AllInstances'] = True
NewPerfs.sort()
for perf in NewPerfs:
perf['PerformanceCounter'].sort()
if PerfCounterObject != NewPerfs:
return [-1]
return [0]
def Get(HeartbeatIntervalSeconds, PerfCounterObject):
NewHeartbeatIntervalSeconds, NewPerf = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
return NewHeartbeatIntervalSeconds, NewPerf
def TranslatePerfs(object_name, perfs):
d = {}
for p in perfs:
for cname in omi_map:
for prop in cname['CimProperties']:
if (p == prop['CounterName'] or p == prop['CimPropertyName']) and cname['ObjectName'] == object_name:
if cname['ObjectName'] not in d.keys():
d[cname['ObjectName']] = [p]
else:
d[cname['ObjectName']].append(p)
return d
def ReadOMSAgentConf(HeartbeatIntervalSeconds, PerfCounterObject):
txt = ''
try:
txt = codecs.open(conf_path, 'r', 'utf8').read().encode(
'ascii', 'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to read omsagent configuration ' + conf_path + '.')
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?run_interval ([0-9]+[a-z])\n</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
m = heartbeat_srch.search(txt)
if m is not None:
interval = int(m.group(1)[:-1])
if m.group(1)[-1:] == 'm':
interval *= 60
else:
interval = None
new_heartbeat = interval
perf_src_srch_str = r'\n<source>\n type oms_omi.*?object_name "(.*?)".*?instance_regex "(.*?)".*?counter_name_regex "(.*?)".*?interval ([0-9]+[a-z]).*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
new_perfobj = []
sources = perf_src_srch.findall(txt)
inst = ''
interval = 0
for source in sources:
s_perf = []
if len(source[2]):
s_perf = source[2].strip('(').strip(')').split('|')
object_name = source[0]
interval = int(source[3][:-1])
if source[3][-1:] == 'm':
interval *= 60
inst = source[1]
inst = inst.replace('.*', '*')
new_perfobj.append({'PerformanceCounter': s_perf, 'InstanceName': inst,
'IntervalSeconds': interval, 'AllInstances': True, 'ObjectName': object_name})
return new_heartbeat, new_perfobj
def UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if os.path.exists(conf_path):
txt = codecs.open(conf_path, 'r', 'utf8').read().encode(
'ascii', 'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
else:
LG().Log(
'INFO', 'No omsagent configuration file present. Will create new configuration file at ' + conf_path + '.')
txt = ''
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
heartbeat_cmd = non_mh_heartbeat_cmd
if multi_homed:
heartbeat_cmd = 'echo'
heartbeat_src = '<source>\n type exec\n tag heartbeat.output\n command ' + heartbeat_cmd + ' > /dev/null\n format tsv\n keys severity,message\n run_interval ' + \
str(HeartbeatIntervalSeconds) + 's\n</source>\n'
txt = heartbeat_srch.sub(heartbeat_src, txt)
d = {}
perf_src_srch_str = r'\n<source>\n type oms_omi.*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
for source in perf_src_srch.findall(txt):
txt = txt.replace(source, '')
new_source = ''
for perf in PerfCounterObject:
d = TranslatePerfs(perf['ObjectName'], perf['PerformanceCounter'])
for k in d.keys():
names = '(' + reduce(lambda x, y: x + '|' + y, d[k]) + ')'
instances = re.sub(r'([><]|>|<)', '', perf['InstanceName'])
instances = re.sub(r'([*])', '.*', instances)
new_source += '\n<source>\n type oms_omi\n object_name "' + k + '"\n instance_regex "' + instances + \
'"\n counter_name_regex "' + names + '"\n interval ' + \
str(perf['IntervalSeconds']) + 's\n</source>\n'
m = heartbeat_srch.search(txt)
if m is not None:
i = m.end(0) + 1
txt = txt[:i] + new_source + txt[i:]
else:
txt = new_source
try:
codecs.open(conf_path, 'w', 'utf8').write(txt)
LG().Log(
'INFO', 'Created omsagent configuration at ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to create omsagent configuration at ' + conf_path + '.')
return False
global oms_restart_cmd
process_to_restart = 'omsagent'
if multi_homed:
restart_cmd += ' ' + WorkspaceID
process_to_restart += '-' + WorkspaceID
if os.system(restart_cmd) == 0:
LG().Log('INFO', 'Successfully restarted ' + process_to_restart + '.')
else:
LG().Log('ERROR', 'Error restarting ' + process_to_restart + '.')
return False
return True
def rm_unicode(obj):
if isinstance(obj, dict):
d = {}
for k, v in obj.iteritems():
d[rm_unicode(k)] = rm_unicode(v)
return d
elif isinstance(obj, list):
return [rm_unicode(i) for i in obj]
elif isinstance(obj, unicode):
return obj.encode('ascii', 'ignore')
else:
return obj
def prune_perfs(PerfCounterObject):
l = len(PerfCounterObject)
i = 0
while i < l:
d = TranslatePerfs(PerfCounterObject[i]['ObjectName'], PerfCounterObject[i]['PerformanceCounter'])
if PerfCounterObject[i]['ObjectName'] in d.keys():
for p in PerfCounterObject[i]['PerformanceCounter']:
if p not in d[PerfCounterObject[i]['ObjectName']]:
LG().Log('INFO', 'No match for PerformanceCounter \'' \
+ p + '\' in ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject[i]['PerformanceCounter'].remove(p)
if len(PerfCounterObject[i]['PerformanceCounter']) == 0:
PerfCounterObject.pop(i)
l -= 1
i -= 1
else:
LG().Log('INFO', 'No matches for ObjectName ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' and PerformanceCounter ' \
+ repr(PerfCounterObject[i]['PerformanceCounter']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject.pop(i)
l -= 1
i -= 1
i += 1
| 39.476667
| 172
| 0.606772
|
import os
import imp
import re
import codecs
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
conf_path = '/etc/opt/microsoft/omsagent/conf/omsagent.conf'
omi_map_path = '/etc/opt/microsoft/omsagent/conf/omsagent.d/omi_mapping.json'
omi_map = None
multi_homed = None
non_mh_heartbeat_cmd = '/opt/microsoft/omsagent/bin/omsadmin.sh -b'
oms_restart_cmd = 'sudo /opt/microsoft/omsagent/bin/service_control restart'
def init_paths(WorkspaceID):
global conf_path
global omi_map_path
global multi_homed
omsagent_dir = '/etc/opt/microsoft/omsagent/'
mh_conf_dir = omsagent_dir + WorkspaceID + '/conf'
multi_homed = os.path.isdir(mh_conf_dir)
if multi_homed:
LG().Log('INFO', 'OMSAgent is multi-homed and resource is updating workspace ' + WorkspaceID)
conf_path = mh_conf_dir + '/omsagent.conf'
omi_map_path = mh_conf_dir + '/omsagent.d/omi_mapping.json'
def init_omi_map():
global omi_map
txt = codecs.open(omi_map_path, 'r', 'utf8').read()
omi_map = eval(txt)
def init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_paths(WorkspaceID)
init_omi_map()
if WorkspaceID is not None:
WorkspaceID = WorkspaceID.encode('ascii', 'ignore')
else:
WorkspaceID = ''
if PerfCounterObject is not None:
for perf in PerfCounterObject:
new_perfs = []
if len(perf['PerformanceCounter'].value):
for perf_counter in perf['PerformanceCounter'].value:
new_perfs.append(perf_counter.encode('ascii', 'ignore'))
perf['PerformanceCounter'] = new_perfs
if perf['InstanceName'].value is None:
perf['InstanceName'] = ''
else:
perf['InstanceName'] = perf[
'InstanceName'].value.encode('ascii', 'ignore')
if perf['ObjectName'].value is None:
perf['ObjectName'] = ''
else:
perf['ObjectName'] = perf[
'ObjectName'].value.encode('ascii', 'ignore')
if perf['AllInstances'].value is None:
perf['AllInstances'] = False
else:
if perf['AllInstances'].value.value == 1:
perf['AllInstances'] = True
else:
perf['AllInstances'] = False
perf['IntervalSeconds'] = perf['IntervalSeconds'].value.value
def Set_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
def Test_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
return Test(HeartbeatIntervalSeconds, PerfCounterObject)
def Get_Marshall(Name, WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
arg_names = list(locals().keys())
init_vars(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject)
retval = 0
NewHeartbeatIntervalSeconds, NewPerf = Get(
HeartbeatIntervalSeconds, PerfCounterObject)
for perf in NewPerf:
if len(perf['PerformanceCounter']):
perf['PerformanceCounter'] = protocol.MI_StringA(
perf['PerformanceCounter'])
perf['ObjectName'] = protocol.MI_String(perf['ObjectName'])
perf['InstanceName'] = protocol.MI_String(perf['InstanceName'])
perf['AllInstances'] = protocol.MI_Boolean(perf['AllInstances'])
perf['IntervalSeconds'] = protocol.MI_Uint16(perf['IntervalSeconds'])
PerfCounterObject = protocol.MI_InstanceA(NewPerf)
HeartbeatIntervalSeconds = protocol.MI_Uint16(NewHeartbeatIntervalSeconds)
WorkspaceID = protocol.MI_String(WorkspaceID)
Name = protocol.MI_String(Name)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Set(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if Test(HeartbeatIntervalSeconds, PerfCounterObject) == [0]:
return [0]
if UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
return [0]
else:
return [-1]
def Test(HeartbeatIntervalSeconds, PerfCounterObject):
prune_perfs(PerfCounterObject)
NewHeartbeatIntervalSeconds, NewPerfs = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
if NewHeartbeatIntervalSeconds != HeartbeatIntervalSeconds:
return [-1]
PerfCounterObject.sort()
for perf in PerfCounterObject:
perf['PerformanceCounter'].sort()
perf['AllInstances'] = True
NewPerfs.sort()
for perf in NewPerfs:
perf['PerformanceCounter'].sort()
if PerfCounterObject != NewPerfs:
return [-1]
return [0]
def Get(HeartbeatIntervalSeconds, PerfCounterObject):
NewHeartbeatIntervalSeconds, NewPerf = ReadOMSAgentConf(
HeartbeatIntervalSeconds, PerfCounterObject)
return NewHeartbeatIntervalSeconds, NewPerf
def TranslatePerfs(object_name, perfs):
d = {}
for p in perfs:
for cname in omi_map:
for prop in cname['CimProperties']:
if (p == prop['CounterName'] or p == prop['CimPropertyName']) and cname['ObjectName'] == object_name:
if cname['ObjectName'] not in d.keys():
d[cname['ObjectName']] = [p]
else:
d[cname['ObjectName']].append(p)
return d
def ReadOMSAgentConf(HeartbeatIntervalSeconds, PerfCounterObject):
txt = ''
try:
txt = codecs.open(conf_path, 'r', 'utf8').read().encode(
'ascii', 'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to read omsagent configuration ' + conf_path + '.')
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?run_interval ([0-9]+[a-z])\n</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
m = heartbeat_srch.search(txt)
if m is not None:
interval = int(m.group(1)[:-1])
if m.group(1)[-1:] == 'm':
interval *= 60
else:
interval = None
new_heartbeat = interval
perf_src_srch_str = r'\n<source>\n type oms_omi.*?object_name "(.*?)".*?instance_regex "(.*?)".*?counter_name_regex "(.*?)".*?interval ([0-9]+[a-z]).*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
new_perfobj = []
sources = perf_src_srch.findall(txt)
inst = ''
interval = 0
for source in sources:
s_perf = []
if len(source[2]):
s_perf = source[2].strip('(').strip(')').split('|')
object_name = source[0]
interval = int(source[3][:-1])
if source[3][-1:] == 'm':
interval *= 60
inst = source[1]
inst = inst.replace('.*', '*')
new_perfobj.append({'PerformanceCounter': s_perf, 'InstanceName': inst,
'IntervalSeconds': interval, 'AllInstances': True, 'ObjectName': object_name})
return new_heartbeat, new_perfobj
def UpdateOMSAgentConf(WorkspaceID, HeartbeatIntervalSeconds, PerfCounterObject):
if os.path.exists(conf_path):
txt = codecs.open(conf_path, 'r', 'utf8').read().encode(
'ascii', 'ignore')
LG().Log('INFO', 'Read omsagent configuration ' + conf_path + '.')
else:
LG().Log(
'INFO', 'No omsagent configuration file present. Will create new configuration file at ' + conf_path + '.')
txt = ''
heartbeat_srch_str = r'<source>.*?tag heartbeat.*?</source>\n'
heartbeat_srch = re.compile(heartbeat_srch_str, re.M | re.S)
heartbeat_cmd = non_mh_heartbeat_cmd
if multi_homed:
heartbeat_cmd = 'echo'
heartbeat_src = '<source>\n type exec\n tag heartbeat.output\n command ' + heartbeat_cmd + ' > /dev/null\n format tsv\n keys severity,message\n run_interval ' + \
str(HeartbeatIntervalSeconds) + 's\n</source>\n'
txt = heartbeat_srch.sub(heartbeat_src, txt)
d = {}
perf_src_srch_str = r'\n<source>\n type oms_omi.*?</source>\n'
perf_src_srch = re.compile(perf_src_srch_str, re.M | re.S)
for source in perf_src_srch.findall(txt):
txt = txt.replace(source, '')
new_source = ''
for perf in PerfCounterObject:
d = TranslatePerfs(perf['ObjectName'], perf['PerformanceCounter'])
for k in d.keys():
names = '(' + reduce(lambda x, y: x + '|' + y, d[k]) + ')'
instances = re.sub(r'([><]|>|<)', '', perf['InstanceName'])
instances = re.sub(r'([*])', '.*', instances)
new_source += '\n<source>\n type oms_omi\n object_name "' + k + '"\n instance_regex "' + instances + \
'"\n counter_name_regex "' + names + '"\n interval ' + \
str(perf['IntervalSeconds']) + 's\n</source>\n'
m = heartbeat_srch.search(txt)
if m is not None:
i = m.end(0) + 1
txt = txt[:i] + new_source + txt[i:]
else:
txt = new_source
try:
codecs.open(conf_path, 'w', 'utf8').write(txt)
LG().Log(
'INFO', 'Created omsagent configuration at ' + conf_path + '.')
except:
LG().Log(
'ERROR', 'Unable to create omsagent configuration at ' + conf_path + '.')
return False
global oms_restart_cmd
process_to_restart = 'omsagent'
if multi_homed:
restart_cmd += ' ' + WorkspaceID
process_to_restart += '-' + WorkspaceID
if os.system(restart_cmd) == 0:
LG().Log('INFO', 'Successfully restarted ' + process_to_restart + '.')
else:
LG().Log('ERROR', 'Error restarting ' + process_to_restart + '.')
return False
return True
def rm_unicode(obj):
if isinstance(obj, dict):
d = {}
for k, v in obj.iteritems():
d[rm_unicode(k)] = rm_unicode(v)
return d
elif isinstance(obj, list):
return [rm_unicode(i) for i in obj]
elif isinstance(obj, unicode):
return obj.encode('ascii', 'ignore')
else:
return obj
def prune_perfs(PerfCounterObject):
l = len(PerfCounterObject)
i = 0
while i < l:
d = TranslatePerfs(PerfCounterObject[i]['ObjectName'], PerfCounterObject[i]['PerformanceCounter'])
if PerfCounterObject[i]['ObjectName'] in d.keys():
for p in PerfCounterObject[i]['PerformanceCounter']:
if p not in d[PerfCounterObject[i]['ObjectName']]:
LG().Log('INFO', 'No match for PerformanceCounter \'' \
+ p + '\' in ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject[i]['PerformanceCounter'].remove(p)
if len(PerfCounterObject[i]['PerformanceCounter']) == 0:
PerfCounterObject.pop(i)
l -= 1
i -= 1
else:
LG().Log('INFO', 'No matches for ObjectName ' \
+ repr(PerfCounterObject[i]['ObjectName']) + ' and PerformanceCounter ' \
+ repr(PerfCounterObject[i]['PerformanceCounter']) + ' in omi_mapping.json, ignoring.')
PerfCounterObject.pop(i)
l -= 1
i -= 1
i += 1
| true
| true
|
790a73d25462380330efe136b15c7ca1a8ba7290
| 141,288
|
py
|
Python
|
core/domain/suggestion_registry_test.py
|
WebFlakyTest/oppia
|
520e35490eae8171beb035fbafc2948983abec75
|
[
"Apache-2.0"
] | 1
|
2021-08-17T20:33:12.000Z
|
2021-08-17T20:33:12.000Z
|
core/domain/suggestion_registry_test.py
|
WebFlakyTest/oppia
|
520e35490eae8171beb035fbafc2948983abec75
|
[
"Apache-2.0"
] | null | null | null |
core/domain/suggestion_registry_test.py
|
WebFlakyTest/oppia
|
520e35490eae8171beb035fbafc2948983abec75
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for suggestion registry classes."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import os
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_services
from core.domain import html_validation_service
from core.domain import question_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import suggestion_registry
from core.domain import suggestion_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import utils
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class MockInvalidSuggestion(suggestion_registry.BaseSuggestion):
def __init__(self): # pylint: disable=super-init-not-called
pass
class BaseSuggestionUnitTests(test_utils.GenericTestBase):
"""Tests for the BaseSuggestion class."""
def setUp(self):
super(BaseSuggestionUnitTests, self).setUp()
self.base_suggestion = MockInvalidSuggestion()
def test_base_class_accept_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement accept.'):
self.base_suggestion.accept()
def test_base_class_get_change_list_for_accepting_suggestion_raises_error(
self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement '
'get_change_list_for_accepting_suggestion.'):
self.base_suggestion.get_change_list_for_accepting_suggestion()
def test_base_class_pre_accept_validate_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' pre_accept_validate.'):
self.base_suggestion.pre_accept_validate()
def test_base_class_populate_old_value_of_change_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' populate_old_value_of_change.'):
self.base_suggestion.populate_old_value_of_change()
def test_base_class_pre_update_validate_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' pre_update_validate.'):
self.base_suggestion.pre_update_validate({})
def test_base_class_get_all_html_content_strings(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' get_all_html_content_strings.'):
self.base_suggestion.get_all_html_content_strings()
def test_base_class_get_target_entity_html_strings(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' get_target_entity_html_strings.'):
self.base_suggestion.get_target_entity_html_strings()
def test_base_class_convert_html_in_suggestion_change(self):
def conversion_fn():
"""Temporary function."""
pass
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' convert_html_in_suggestion_change.'):
self.base_suggestion.convert_html_in_suggestion_change(
conversion_fn)
class SuggestionEditStateContentUnitTests(test_utils.GenericTestBase):
"""Tests for the SuggestionEditStateContent class."""
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionEditStateContentUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread1',
'suggestion_type': (
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': 'new suggestion content',
'old_value': None
},
'score_category': 'content.Algebra',
'language_code': None,
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_create_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'content')
self.assertEqual(suggestion.get_score_sub_type(), 'Algebra')
def test_validate_suggestion_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.suggestion_type = 'invalid_suggestion_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected suggestion_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_type = 'invalid_target_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
suggestion.validate()
def test_validate_target_version_at_submission(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_version_at_submission = 'invalid_version'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_version_at_submission to be an int'
):
suggestion.validate()
def test_validate_status(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be among allowed choices'
):
suggestion.validate()
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = self.PSEUDONYMOUS_ID
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = self.PSEUDONYMOUS_ID
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_score_category(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be a string'
):
suggestion.validate()
def test_validate_score_category_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'score.score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'invalid_score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be among allowed'
' choices'
):
suggestion.validate()
def test_validate_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to be an ExplorationChange'
):
suggestion.validate()
def test_validate_score_type_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'question.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be content'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected cmd to be edit_state_property'
):
suggestion.validate()
def test_validate_change_property_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.property_name = 'invalid_property'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected property_name to be content'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be None, received wrong_language_code'
):
suggestion.validate()
def test_pre_accept_validate_state_name(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.state_name = 'invalid_state_name'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected invalid_state_name to be a valid state name'
):
suggestion.pre_accept_validate()
def test_populate_old_value_of_change_with_invalid_state(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.change.state_name = 'invalid_state_name'
self.assertIsNone(suggestion.change.old_value)
suggestion.populate_old_value_of_change()
self.assertIsNone(suggestion.change.old_value)
def test_pre_update_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_STATE,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': suggestion.change.state_name,
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The following extra attributes are present: new_value, '
'old_value, property_name'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_property_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_PARAM_CHANGES,
'state_name': suggestion.change.state_name,
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change property_name must be equal to content'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_state_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'invalid_state',
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change state_name must be equal to state_1'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_new_value(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
new_content = state_domain.SubtitledHtml(
'content', '<p>new suggestion html</p>').to_dict()
suggestion.change.new_value = new_content
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': suggestion.change.state_name,
'new_value': new_content,
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError, 'The new html must not match the old html'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_non_equal_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to edit_state_property'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'Exploration 1 Albert title'
}))
def test_get_all_html_content_strings(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [u'new suggestion content']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': '<p>suggestion</p>'
},
'old_value': {
'content_id': 'content',
'html': html_content
}
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.
add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.old_value['html'], expected_html_content)
def test_get_target_entity_html_strings_returns_expected_strings(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': {
'content_id': 'content',
'html': 'Old content.'
}
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
expected_outcome_list = [u'Old content.']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_get_target_entity_html_with_none_old_value(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
self.assertEqual(actual_outcome_list, [])
class SuggestionTranslateContentUnitTests(test_utils.GenericTestBase):
"""Tests for the SuggestionEditStateContent class."""
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionTranslateContentUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread1',
'suggestion_type': (
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
},
'score_category': 'translation.Algebra',
'language_code': 'hi',
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_pre_update_validate_fails_for_invalid_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'Introduction'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to %s' % (
exp_domain.CMD_ADD_WRITTEN_TRANSLATION)
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_state_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'State 1',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change state_name must be equal to Introduction'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_language_code(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'en',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The language code must be equal to hi'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_content_html(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'en',
'content_html': '<p>This is the changed content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change content_html must be equal to <p>This is a ' +
'content.</p>'
):
suggestion.pre_update_validate(
exp_domain.ExplorationChange(change))
def test_create_suggestion_add_translation(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_add_translation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'translation')
self.assertEqual(suggestion.get_score_sub_type(), 'Algebra')
def test_validate_suggestion_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.suggestion_type = 'invalid_suggestion_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected suggestion_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_type = 'invalid_target_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
suggestion.validate()
def test_validate_target_version_at_submission(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_version_at_submission = 'invalid_version'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_version_at_submission to be an int'
):
suggestion.validate()
def test_validate_status(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be among allowed choices'
):
suggestion.validate()
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format.'
):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_score_category(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be a string'
):
suggestion.validate()
def test_validate_score_category_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'score.score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'invalid_score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be among allowed'
' choices'
):
suggestion.validate()
def test_validate_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to be an ExplorationChange'
):
suggestion.validate()
def test_validate_score_type_translation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'question.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be translation'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected cmd to be add_written_translation'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
expected_language_code = (
expected_suggestion_dict['change']['language_code']
)
suggestion.validate()
suggestion.language_code = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be %s, '
'received wrong_language_code' % expected_language_code
):
suggestion.validate()
def test_validate_language_code_fails_when_language_code_is_set_to_none(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError, 'language_code cannot be None'
):
suggestion.validate()
def test_validate_change_with_invalid_language_code_fails_validation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.language_code = 'invalid_code'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid_code'
):
suggestion.validate()
def test_pre_accept_validate_state_name(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': '<p>This is a content.</p>'
},
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.state_name = 'invalid_state_name'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected invalid_state_name to be a valid state name'
):
suggestion.pre_accept_validate()
def test_pre_accept_validate_content_html(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': '<p>This is a content.</p>'
},
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.content_html = 'invalid content_html'
with self.assertRaisesRegexp(
utils.ValidationError,
'The Exploration content has changed since this translation '
'was submitted.'
):
suggestion.pre_accept_validate()
def test_accept_suggestion_adds_translation_in_exploration(self):
self.save_new_default_exploration('exp1', self.author_id)
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {})
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept(
'Accepted suggestion by translator: Add translation change.')
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {
'hi': 1
})
def test_accept_suggestion_with_psedonymous_author_adds_translation(self):
self.save_new_default_exploration('exp1', self.author_id)
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {})
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.PSEUDONYMOUS_ID,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept(
'Accepted suggestion by translator: Add translation change.')
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {
'hi': 1
})
def test_get_all_html_content_strings(self):
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [
u'<p>This is translated html.</p>', u'<p>This is a content.</p>']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_get_target_entity_html_strings_returns_expected_strings(self):
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
expected_outcome_list = [self.suggestion_dict['change']['content_html']]
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
change_dict = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': html_content,
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
}
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.content_html, expected_html_content)
class SuggestionAddQuestionTest(test_utils.GenericTestBase):
"""Tests for the SuggestionAddQuestion class."""
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionAddQuestionTest, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3,
},
'score_category': 'question.topic_1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_create_suggestion_add_question(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'question')
self.assertEqual(suggestion.get_score_sub_type(), 'topic_1')
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'content.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be "question"'
):
suggestion.validate()
def test_validate_change_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = 'invalid_change'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected change to be an instance of QuestionSuggestionChange'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain cmd'
):
suggestion.validate()
def test_validate_change_cmd_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected cmd to be create_new_fully_specified_question'
):
suggestion.validate()
def test_validate_change_question_dict(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.question_dict = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain question_dict'
):
suggestion.validate()
def test_validate_change_question_state_data_schema_version(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
# We are not setting value in suggestion.change.question_dict
# directly since pylint produces unsupported-assignment-operation
# error. The detailed analysis for the same can be checked
# in this issue: https://github.com/oppia/oppia/issues/7008.
question_dict = suggestion.change.question_dict
question_dict['question_state_data_schema_version'] = 0
suggestion.change.question_dict = question_dict
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question state schema version to be %s, '
'received 0' % feconf.CURRENT_STATE_SCHEMA_VERSION
):
suggestion.validate()
def test_validate_change_skill_difficulty_none(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.skill_difficulty = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain skill_difficulty'
):
suggestion.validate()
def test_validate_change_skill_difficulty_invalid_value(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.skill_difficulty = 0.4
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected change skill_difficulty to be one of '
):
suggestion.validate()
def test_pre_accept_validate_change_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion.change.skill_id = skill_id
suggestion.pre_accept_validate()
suggestion.change.skill_id = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain skill_id'
):
suggestion.pre_accept_validate()
def test_pre_accept_validate_change_invalid_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion.change.skill_id = skill_id
suggestion.pre_accept_validate()
suggestion.change.skill_id = skill_services.get_new_skill_id()
with self.assertRaisesRegexp(
utils.ValidationError, 'The skill with the given id doesn\'t exist.'
):
suggestion.pre_accept_validate()
def test_get_change_list_for_accepting_suggestion(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertIsNone(suggestion.get_change_list_for_accepting_suggestion())
def test_populate_old_value_of_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertIsNone(suggestion.populate_old_value_of_change())
def test_cannot_accept_suggestion_with_invalid_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.change.skill_id = skill_services.get_new_skill_id()
with self.assertRaisesRegexp(
utils.ValidationError,
'The skill with the given id doesn\'t exist.'
):
suggestion.accept('commit message')
def test_pre_update_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': question_domain.QUESTION_PROPERTY_LANGUAGE_CODE,
'new_value': 'bn',
'old_value': 'en'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to '
'create_new_fully_specified_question'
):
suggestion.pre_update_validate(
question_domain.QuestionChange(change))
def test_pre_update_validate_change_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_2'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change skill_id must be equal to skill_1'
):
suggestion.pre_update_validate(
question_domain.QuestionChange(change))
def test_pre_update_validate_complains_if_nothing_changed(self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
with self.assertRaisesRegexp(
utils.ValidationError,
'At least one of the new skill_difficulty or question_dict '
'should be changed.'):
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change))
def test_pre_update_validate_accepts_a_change_in_skill_difficulty_only(
self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.6
}
self.assertEqual(
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change)), None)
def test_pre_update_validate_accepts_a_change_in_state_data_only(self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'hi',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
self.assertEqual(
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change)), None)
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format.'):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
expected_question_dict = (
expected_suggestion_dict['change']['question_dict']
)
suggestion.validate()
expected_question_dict['language_code'] = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question language_code.wrong_language_code. to be same '
'as suggestion language_code.en.'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_code_is_set_to_none(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be en, received None'):
suggestion.validate()
def test_get_all_html_conztent_strings(self):
suggestion = suggestion_registry.SuggestionAddQuestion(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [
u'', u'<p>This is a hint.</p>', u'<p>This is a solution.</p>', u'']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
answer_group = {
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': ''
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 0
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': html_content
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'hint_1': {},
'solution': {}
}
},
'written_translations': {
'translations_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'hint_1': {},
'solution': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'choices': {
'value': [{
'html': 'option 1',
'content_id': 'ca_choices_0'
}]
},
'showChoicesInShuffledOrder': {
'value': True
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_2',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'answer_is_exclusive': False,
'correct_answer': 0,
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>'
}
},
'id': 'MultipleChoiceInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': question_state_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3,
},
'score_category': 'question.skill1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date)
}
suggestion = suggestion_registry.SuggestionAddQuestion(
suggestion_dict['suggestion_id'], suggestion_dict['target_id'],
suggestion_dict['target_version_at_submission'],
suggestion_dict['status'], self.author_id, self.reviewer_id,
suggestion_dict['change'], suggestion_dict['score_category'],
suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.question_dict['question_state_data']['content'][
'html'], expected_html_content)
def test_accept_suggestion_with_images(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;img.svg&quot;}">'
'</oppia-noninteractive-math>')
question_state_dict = self._create_valid_question_data(
'default_state').to_dict()
question_state_dict['content']['html'] = html_content
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
image_context = feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS
fs_services.save_original_and_compressed_versions_of_image(
'img.svg', image_context, 'skill1',
raw_image, 'image', False)
self.save_new_skill('skill1', self.author_id, description='description')
suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': question_state_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill1',
'skill_difficulty': 0.3,
},
'score_category': 'question.skill1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date)
}
suggestion = suggestion_registry.SuggestionAddQuestion(
suggestion_dict['suggestion_id'], suggestion_dict['target_id'],
suggestion_dict['target_version_at_submission'],
suggestion_dict['status'], self.author_id, self.reviewer_id,
suggestion_dict['change'], suggestion_dict['score_category'],
suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept('commit_message')
def test_contructor_updates_state_shema_in_change_cmd(self):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id')
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': 'en',
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill_id',
'skill_difficulty': 0.3
}
self.assertEqual(
change['question_dict']['question_state_data_schema_version'], 27)
suggestion = suggestion_registry.SuggestionAddQuestion(
'suggestionId', 'target_id', 1, suggestion_models.STATUS_IN_REVIEW,
self.author_id, None, change, score_category, 'en', False,
self.fake_date)
self.assertEqual(
suggestion.change.question_dict[
'question_state_data_schema_version'],
feconf.CURRENT_STATE_SCHEMA_VERSION)
def test_contructor_raise_exception_for_invalid_state_shema_version(self):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id')
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': None,
'language_code': 'en',
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill_id',
'skill_difficulty': 0.3
}
self.assertEqual(
change['question_dict']['question_state_data_schema_version'], None)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected state schema version to be in between 25'
):
suggestion_registry.SuggestionAddQuestion(
'suggestionId', 'target_id', 1,
suggestion_models.STATUS_IN_REVIEW, self.author_id, None,
change, score_category, 'en', False, self.fake_date)
class MockInvalidVoiceoverApplication(
suggestion_registry.BaseVoiceoverApplication):
def __init__(self): # pylint: disable=super-init-not-called
pass
class BaseVoiceoverApplicationUnitTests(test_utils.GenericTestBase):
"""Tests for the BaseVoiceoverApplication class."""
def setUp(self):
super(BaseVoiceoverApplicationUnitTests, self).setUp()
self.base_voiceover_application = MockInvalidVoiceoverApplication()
def test_base_class_init_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement '
'__init__.'):
suggestion_registry.BaseVoiceoverApplication()
def test_base_class_accept_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement accept.'):
self.base_voiceover_application.accept()
def test_base_class_reject_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement reject.'):
self.base_voiceover_application.reject()
class ExplorationVoiceoverApplicationUnitTest(test_utils.GenericTestBase):
"""Tests for the ExplorationVoiceoverApplication class."""
def setUp(self):
super(ExplorationVoiceoverApplicationUnitTest, self).setUp()
self.signup('author@example.com', 'author')
self.author_id = self.get_user_id_from_email('author@example.com')
self.signup('reviewer@example.com', 'reviewer')
self.reviewer_id = self.get_user_id_from_email('reviewer@example.com')
self.voiceover_application = (
suggestion_registry.ExplorationVoiceoverApplication(
'application_id', 'exp_id', suggestion_models.STATUS_IN_REVIEW,
self.author_id, None, 'en', 'audio_file.mp3', '<p>Content</p>',
None))
def test_validation_with_invalid_target_type_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.target_type = 'invalid_target'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices, '
'received invalid_target'
):
self.voiceover_application.validate()
def test_validation_with_invalid_target_id_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.target_id = 123
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_status_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected status to be among allowed choices, '
'received invalid_status'
):
self.voiceover_application.validate()
def test_validation_with_invalid_author_id_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.author_id = 123
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_final_reviewer_id_raise_exception(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 123
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be None as the '
'voiceover application is not yet handled.'
):
self.voiceover_application.validate()
def test_validation_for_handled_application_with_invalid_final_review(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.voiceover_application.validate()
self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
self.voiceover_application.validate()
def test_validation_for_rejected_application_with_no_message(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.rejection_message, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 'reviewer_id'
self.voiceover_application.status = suggestion_models.STATUS_REJECTED
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected rejection_message to be a string for a '
'rejected application'
):
self.voiceover_application.validate()
def test_validation_for_accepted_application_with_message(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.rejection_message, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 'reviewer_id'
self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED
self.voiceover_application.rejection_message = 'Invalid message'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected rejection_message to be None for the accepted '
'voiceover application, received Invalid message'
):
self.voiceover_application.validate()
def test_validation_with_invalid_language_code_type_raise_exception(self):
self.assertEqual(self.voiceover_application.language_code, 'en')
self.voiceover_application.validate()
self.voiceover_application.language_code = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected language_code to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_language_code_raise_exception(self):
self.assertEqual(self.voiceover_application.language_code, 'en')
self.voiceover_application.validate()
self.voiceover_application.language_code = 'invalid language'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid language'
):
self.voiceover_application.validate()
def test_validation_with_invalid_filename_type_raise_exception(self):
self.assertEqual(self.voiceover_application.filename, 'audio_file.mp3')
self.voiceover_application.validate()
self.voiceover_application.filename = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected filename to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_content_type_raise_exception(self):
self.assertEqual(self.voiceover_application.content, '<p>Content</p>')
self.voiceover_application.validate()
self.voiceover_application.content = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected content to be a string'
):
self.voiceover_application.validate()
def test_to_dict_returns_correct_dict(self):
self.voiceover_application.accept(self.reviewer_id)
expected_dict = {
'voiceover_application_id': 'application_id',
'target_type': 'exploration',
'target_id': 'exp_id',
'status': 'accepted',
'author_name': 'author',
'final_reviewer_name': 'reviewer',
'language_code': 'en',
'content': '<p>Content</p>',
'filename': 'audio_file.mp3',
'rejection_message': None
}
self.assertEqual(
self.voiceover_application.to_dict(), expected_dict)
def test_is_handled_property_returns_correct_value(self):
self.assertFalse(self.voiceover_application.is_handled)
self.voiceover_application.accept(self.reviewer_id)
self.assertTrue(self.voiceover_application.is_handled)
def test_accept_voiceover_application(self):
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.assertEqual(self.voiceover_application.status, 'review')
self.voiceover_application.accept(self.reviewer_id)
self.assertEqual(
self.voiceover_application.final_reviewer_id, self.reviewer_id)
self.assertEqual(self.voiceover_application.status, 'accepted')
def test_reject_voiceover_application(self):
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.assertEqual(self.voiceover_application.status, 'review')
self.voiceover_application.reject(self.reviewer_id, 'rejection message')
self.assertEqual(
self.voiceover_application.final_reviewer_id, self.reviewer_id)
self.assertEqual(self.voiceover_application.status, 'rejected')
self.assertEqual(
self.voiceover_application.rejection_message, 'rejection message')
class CommunityContributionStatsUnitTests(test_utils.GenericTestBase):
"""Tests for the CommunityContributionStats class."""
translation_reviewer_counts_by_lang_code = {
'hi': 0,
'en': 1
}
translation_suggestion_counts_by_lang_code = {
'fr': 6,
'en': 5
}
question_reviewer_count = 1
question_suggestion_count = 4
negative_count = -1
non_integer_count = 'non_integer_count'
sample_language_code = 'en'
invalid_language_code = 'invalid'
def _assert_community_contribution_stats_is_in_default_state(self):
"""Checks if the community contribution stats is in its default
state.
"""
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
), {})
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {})
self.assertEqual(
community_contribution_stats.question_reviewer_count, 0)
self.assertEqual(
community_contribution_stats.question_suggestion_count, 0)
def test_initial_object_with_valid_arguments_has_correct_properties(self):
community_contribution_stats = (
suggestion_registry.CommunityContributionStats(
self.translation_reviewer_counts_by_lang_code,
self.translation_suggestion_counts_by_lang_code,
self.question_reviewer_count,
self.question_suggestion_count
)
)
community_contribution_stats.validate()
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
self.translation_reviewer_counts_by_lang_code)
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
self.translation_suggestion_counts_by_lang_code
)
self.assertEqual(
community_contribution_stats.question_reviewer_count,
self.question_reviewer_count
)
self.assertEqual(
community_contribution_stats.question_suggestion_count,
self.question_suggestion_count
)
def test_set_translation_reviewer_count_for_lang_code_updates_empty_dict(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_reviewer_count_for_lang_code_updates_count_value(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
) = {self.sample_language_code: 1}
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_reviewer_count_for_lang_code_adds_new_lang_key(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
) = {'en': 1}
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code('hi', 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{'en': 1, 'hi': 2}
)
def test_set_translation_suggestion_count_for_lang_code_updates_empty_dict(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {self.sample_language_code: 2}
)
def test_set_translation_suggestion_count_for_lang_code_updates_count_value(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
) = {self.sample_language_code: 1}
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_suggestion_count_for_lang_code_adds_new_lang_key(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
) = {'en': 1}
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code('hi', 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
{'en': 1, 'hi': 2}
)
def test_get_translation_language_codes_that_need_reviewers_for_one_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, {self.sample_language_code})
def test_get_translation_language_codes_that_need_reviewers_for_multi_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code('hi', 1)
stats.set_translation_suggestion_count_for_language_code('fr', 1)
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, {'hi', 'fr'})
def test_get_translation_language_codes_that_need_reviewers_for_no_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, set())
def test_translation_reviewers_are_needed_if_suggestions_but_no_reviewers(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
self.assertTrue(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_translation_reviewers_are_needed_if_num_suggestions_past_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 1)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertTrue(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_num_suggestions_eqs_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertFalse(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_num_suggestions_less_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertFalse(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_reviewers_and_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 1)
self.assertFalse(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_translation_reviewers_not_needed_if_no_reviewers_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
self._assert_community_contribution_stats_is_in_default_state()
self.assertFalse(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_question_reviewers_are_needed_if_suggestions_zero_reviewers(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 1
self.assertTrue(stats.are_question_reviewers_needed())
def test_question_reviewers_are_needed_if_num_suggestions_past_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 2
stats.question_reviewer_count = 1
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertTrue(reviewers_are_needed)
def test_question_reviewers_not_needed_if_num_suggestions_eqs_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 2
stats.question_reviewer_count = 2
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertFalse(reviewers_are_needed)
def test_question_reviewers_not_needed_if_num_suggestions_less_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 1
stats.question_reviewer_count = 2
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertFalse(reviewers_are_needed)
def test_question_reviewers_not_needed_if_no_reviewers_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
self._assert_community_contribution_stats_is_in_default_state()
self.assertFalse(stats.are_question_reviewers_needed())
def test_validate_translation_reviewer_counts_fails_for_negative_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, self.negative_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation reviewer count to be non-negative for '
'%s language code, received: %s.' % (
self.sample_language_code, self.negative_count)
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_negative_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, self.negative_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation suggestion count to be non-negative for '
'%s language code, received: %s.' % (
self.sample_language_code, self.negative_count)
):
community_contribution_stats.validate()
def test_validate_question_reviewer_count_fails_for_negative_count(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_reviewer_count = (
self.negative_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question reviewer count to be non-negative, '
'received: %s.' % (
community_contribution_stats.question_reviewer_count)
):
community_contribution_stats.validate()
def test_validate_question_suggestion_count_fails_for_negative_count(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_suggestion_count = (
self.negative_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question suggestion count to be non-negative, '
'received: %s.' % (
community_contribution_stats.question_suggestion_count)
):
community_contribution_stats.validate()
def test_validate_translation_reviewer_counts_fails_for_non_integer_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, self.non_integer_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation reviewer count to be an integer for '
'%s language code, received: %s.' % (
self.sample_language_code, self.non_integer_count)
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, self.non_integer_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation suggestion count to be an integer for '
'%s language code, received: %s.' % (
self.sample_language_code, self.non_integer_count)
):
community_contribution_stats.validate()
def test_validate_question_reviewer_count_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_reviewer_count = (
self.non_integer_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question reviewer count to be an integer, '
'received: %s.' % (
community_contribution_stats.question_reviewer_count)
):
community_contribution_stats.validate()
def test_validate_question_suggestion_count_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_suggestion_count = (
self.non_integer_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question suggestion count to be an integer, '
'received: %s.' % (
community_contribution_stats.question_suggestion_count)
):
community_contribution_stats.validate()
def test_validate_translation_reviewer_counts_fails_for_invalid_lang_code(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.invalid_language_code, 1)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Invalid language code for the translation reviewer counts: '
'%s.' % self.invalid_language_code
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_invalid_lang_code(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.invalid_language_code, 1)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Invalid language code for the translation suggestion counts: '
'%s.' % self.invalid_language_code
):
community_contribution_stats.validate()
class ReviewableSuggestionEmailInfoUnitTests(test_utils.GenericTestBase):
"""Tests for the ReviewableSuggestionEmailInfo class."""
suggestion_type = feconf.SUGGESTION_TYPE_ADD_QUESTION
language_code = 'en'
suggestion_content = 'sample question'
submission_datetime = datetime.datetime.utcnow()
def test_initial_object_with_valid_arguments_has_correct_properties(self):
reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
self.suggestion_type, self.language_code,
self.suggestion_content, self.submission_datetime
)
)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
self.suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
self.language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
self.suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
self.submission_datetime)
| 42.685196
| 80
| 0.661365
|
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import os
from core.domain import config_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import fs_services
from core.domain import html_validation_service
from core.domain import question_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import suggestion_registry
from core.domain import suggestion_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
import utils
(suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion])
class MockInvalidSuggestion(suggestion_registry.BaseSuggestion):
def __init__(self):
pass
class BaseSuggestionUnitTests(test_utils.GenericTestBase):
def setUp(self):
super(BaseSuggestionUnitTests, self).setUp()
self.base_suggestion = MockInvalidSuggestion()
def test_base_class_accept_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement accept.'):
self.base_suggestion.accept()
def test_base_class_get_change_list_for_accepting_suggestion_raises_error(
self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement '
'get_change_list_for_accepting_suggestion.'):
self.base_suggestion.get_change_list_for_accepting_suggestion()
def test_base_class_pre_accept_validate_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' pre_accept_validate.'):
self.base_suggestion.pre_accept_validate()
def test_base_class_populate_old_value_of_change_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' populate_old_value_of_change.'):
self.base_suggestion.populate_old_value_of_change()
def test_base_class_pre_update_validate_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' pre_update_validate.'):
self.base_suggestion.pre_update_validate({})
def test_base_class_get_all_html_content_strings(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' get_all_html_content_strings.'):
self.base_suggestion.get_all_html_content_strings()
def test_base_class_get_target_entity_html_strings(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' get_target_entity_html_strings.'):
self.base_suggestion.get_target_entity_html_strings()
def test_base_class_convert_html_in_suggestion_change(self):
def conversion_fn():
pass
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseSuggestion should implement'
' convert_html_in_suggestion_change.'):
self.base_suggestion.convert_html_in_suggestion_change(
conversion_fn)
class SuggestionEditStateContentUnitTests(test_utils.GenericTestBase):
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionEditStateContentUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread1',
'suggestion_type': (
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': 'new suggestion content',
'old_value': None
},
'score_category': 'content.Algebra',
'language_code': None,
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_create_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'content')
self.assertEqual(suggestion.get_score_sub_type(), 'Algebra')
def test_validate_suggestion_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.suggestion_type = 'invalid_suggestion_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected suggestion_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_type = 'invalid_target_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
suggestion.validate()
def test_validate_target_version_at_submission(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_version_at_submission = 'invalid_version'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_version_at_submission to be an int'
):
suggestion.validate()
def test_validate_status(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be among allowed choices'
):
suggestion.validate()
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = self.PSEUDONYMOUS_ID
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = self.PSEUDONYMOUS_ID
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_score_category(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be a string'
):
suggestion.validate()
def test_validate_score_category_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'score.score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'invalid_score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be among allowed'
' choices'
):
suggestion.validate()
def test_validate_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to be an ExplorationChange'
):
suggestion.validate()
def test_validate_score_type_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'question.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be content'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected cmd to be edit_state_property'
):
suggestion.validate()
def test_validate_change_property_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.property_name = 'invalid_property'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected property_name to be content'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be None, received wrong_language_code'
):
suggestion.validate()
def test_pre_accept_validate_state_name(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.state_name = 'invalid_state_name'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected invalid_state_name to be a valid state name'
):
suggestion.pre_accept_validate()
def test_populate_old_value_of_change_with_invalid_state(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.change.state_name = 'invalid_state_name'
self.assertIsNone(suggestion.change.old_value)
suggestion.populate_old_value_of_change()
self.assertIsNone(suggestion.change.old_value)
def test_pre_update_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_STATE,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': suggestion.change.state_name,
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The following extra attributes are present: new_value, '
'old_value, property_name'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_property_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_PARAM_CHANGES,
'state_name': suggestion.change.state_name,
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change property_name must be equal to content'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_state_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'invalid_state',
'new_value': 'new suggestion content',
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change state_name must be equal to state_1'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_new_value(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
new_content = state_domain.SubtitledHtml(
'content', '<p>new suggestion html</p>').to_dict()
suggestion.change.new_value = new_content
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': suggestion.change.state_name,
'new_value': new_content,
'old_value': None
}
with self.assertRaisesRegexp(
utils.ValidationError, 'The new html must not match the old html'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_non_equal_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionEditStateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to edit_state_property'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'Exploration 1 Albert title'
}))
def test_get_all_html_content_strings(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [u'new suggestion content']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
change = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'Introduction',
'new_value': {
'content_id': 'content',
'html': '<p>suggestion</p>'
},
'old_value': {
'content_id': 'content',
'html': html_content
}
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.
add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.old_value['html'], expected_html_content)
def test_get_target_entity_html_strings_returns_expected_strings(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': {
'content_id': 'content',
'html': 'Old content.'
}
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
expected_outcome_list = [u'Old content.']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_get_target_entity_html_with_none_old_value(self):
change_dict = {
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'state_name': 'state_1',
'new_value': {
'content_id': 'content',
'html': 'new suggestion content'
},
'old_value': None
}
suggestion = suggestion_registry.SuggestionEditStateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
self.assertEqual(actual_outcome_list, [])
class SuggestionTranslateContentUnitTests(test_utils.GenericTestBase):
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionTranslateContentUnitTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'exploration.exp1.thread1',
'suggestion_type': (
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT),
'target_type': feconf.ENTITY_TYPE_EXPLORATION,
'target_id': 'exp1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
},
'score_category': 'translation.Algebra',
'language_code': 'hi',
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_pre_update_validate_fails_for_invalid_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'Introduction'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to %s' % (
exp_domain.CMD_ADD_WRITTEN_TRANSLATION)
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_state_name(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'State 1',
'content_id': 'content',
'language_code': 'hi',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change state_name must be equal to Introduction'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_language_code(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'en',
'content_html': '<p>This is a content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The language code must be equal to hi'
):
suggestion.pre_update_validate(exp_domain.ExplorationChange(change))
def test_pre_update_validate_change_content_html(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], self.fake_date)
change = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'en',
'content_html': '<p>This is the changed content.</p>',
'translation_html': '<p>This is the updated translated html.</p>',
'data_format': 'html'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change content_html must be equal to <p>This is a ' +
'content.</p>'
):
suggestion.pre_update_validate(
exp_domain.ExplorationChange(change))
def test_create_suggestion_add_translation(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_add_translation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'translation')
self.assertEqual(suggestion.get_score_sub_type(), 'Algebra')
def test_validate_suggestion_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.suggestion_type = 'invalid_suggestion_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected suggestion_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_type = 'invalid_target_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices'
):
suggestion.validate()
def test_validate_target_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
suggestion.validate()
def test_validate_target_version_at_submission(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.target_version_at_submission = 'invalid_version'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_version_at_submission to be an int'
):
suggestion.validate()
def test_validate_status(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be among allowed choices'
):
suggestion.validate()
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format.'
):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'
):
suggestion.validate()
def test_validate_score_category(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected score_category to be a string'
):
suggestion.validate()
def test_validate_score_category_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'score.score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
suggestion.score_category = 'invalid_score_category'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected score_category to be of the form'
' score_type.score_sub_type'
):
suggestion.validate()
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'invalid_score_type.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be among allowed'
' choices'
):
suggestion.validate()
def test_validate_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to be an ExplorationChange'
):
suggestion.validate()
def test_validate_score_type_translation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'question.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be translation'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected cmd to be add_written_translation'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
expected_language_code = (
expected_suggestion_dict['change']['language_code']
)
suggestion.validate()
suggestion.language_code = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be %s, '
'received wrong_language_code' % expected_language_code
):
suggestion.validate()
def test_validate_language_code_fails_when_language_code_is_set_to_none(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError, 'language_code cannot be None'
):
suggestion.validate()
def test_validate_change_with_invalid_language_code_fails_validation(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.language_code = 'invalid_code'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid_code'
):
suggestion.validate()
def test_pre_accept_validate_state_name(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': '<p>This is a content.</p>'
},
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.state_name = 'invalid_state_name'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected invalid_state_name to be a valid state name'
):
suggestion.pre_accept_validate()
def test_pre_accept_validate_content_html(self):
self.save_new_default_exploration('exp1', self.author_id)
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
exp_services.update_exploration(
self.author_id, 'exp1', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'State A',
}),
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': '<p>This is a content.</p>'
},
'state_name': 'State A',
})
], 'Added state')
suggestion.change.state_name = 'State A'
suggestion.pre_accept_validate()
suggestion.change.content_html = 'invalid content_html'
with self.assertRaisesRegexp(
utils.ValidationError,
'The Exploration content has changed since this translation '
'was submitted.'
):
suggestion.pre_accept_validate()
def test_accept_suggestion_adds_translation_in_exploration(self):
self.save_new_default_exploration('exp1', self.author_id)
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {})
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept(
'Accepted suggestion by translator: Add translation change.')
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {
'hi': 1
})
def test_accept_suggestion_with_psedonymous_author_adds_translation(self):
self.save_new_default_exploration('exp1', self.author_id)
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {})
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionTranslateContent(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.PSEUDONYMOUS_ID,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept(
'Accepted suggestion by translator: Add translation change.')
exploration = exp_fetchers.get_exploration_by_id('exp1')
self.assertEqual(exploration.get_translation_counts(), {
'hi': 1
})
def test_get_all_html_content_strings(self):
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [
u'<p>This is translated html.</p>', u'<p>This is a content.</p>']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_get_target_entity_html_strings_returns_expected_strings(self):
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
actual_outcome_list = suggestion.get_target_entity_html_strings()
expected_outcome_list = [self.suggestion_dict['change']['content_html']]
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
change_dict = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': 'Introduction',
'content_id': 'content',
'language_code': 'hi',
'content_html': html_content,
'translation_html': '<p>This is translated html.</p>',
'data_format': 'html'
}
suggestion = suggestion_registry.SuggestionTranslateContent(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, change_dict,
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.content_html, expected_html_content)
class SuggestionAddQuestionTest(test_utils.GenericTestBase):
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_EMAIL = 'reviewer@example.com'
ASSIGNED_REVIEWER_EMAIL = 'assigned_reviewer@example.com'
fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0)
def setUp(self):
super(SuggestionAddQuestionTest, self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_EMAIL, 'reviewer')
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
self.suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3,
},
'score_category': 'question.topic_1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date),
'edited_by_reviewer': False
}
def test_create_suggestion_add_question(self):
expected_suggestion_dict = self.suggestion_dict
observed_suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertDictEqual(
observed_suggestion.to_dict(), expected_suggestion_dict)
def test_validate_suggestion_edit_state_content(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
def test_get_score_part_helper_methods(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertEqual(suggestion.get_score_type(), 'question')
self.assertEqual(suggestion.get_score_sub_type(), 'topic_1')
def test_validate_score_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.score_category = 'content.score_sub_type'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the first part of score_category to be "question"'
):
suggestion.validate()
def test_validate_change_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change = 'invalid_change'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected change to be an instance of QuestionSuggestionChange'
):
suggestion.validate()
def test_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain cmd'
):
suggestion.validate()
def test_validate_change_cmd_type(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.cmd = 'invalid_cmd'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected cmd to be create_new_fully_specified_question'
):
suggestion.validate()
def test_validate_change_question_dict(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.question_dict = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain question_dict'
):
suggestion.validate()
def test_validate_change_question_state_data_schema_version(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
question_dict = suggestion.change.question_dict
question_dict['question_state_data_schema_version'] = 0
suggestion.change.question_dict = question_dict
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question state schema version to be %s, '
'received 0' % feconf.CURRENT_STATE_SCHEMA_VERSION
):
suggestion.validate()
def test_validate_change_skill_difficulty_none(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.skill_difficulty = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain skill_difficulty'
):
suggestion.validate()
def test_validate_change_skill_difficulty_invalid_value(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.change.skill_difficulty = 0.4
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected change skill_difficulty to be one of '
):
suggestion.validate()
def test_pre_accept_validate_change_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion.change.skill_id = skill_id
suggestion.pre_accept_validate()
suggestion.change.skill_id = None
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected change to contain skill_id'
):
suggestion.pre_accept_validate()
def test_pre_accept_validate_change_invalid_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
skill_id = skill_services.get_new_skill_id()
self.save_new_skill(skill_id, self.author_id, description='description')
suggestion.change.skill_id = skill_id
suggestion.pre_accept_validate()
suggestion.change.skill_id = skill_services.get_new_skill_id()
with self.assertRaisesRegexp(
utils.ValidationError, 'The skill with the given id doesn\'t exist.'
):
suggestion.pre_accept_validate()
def test_get_change_list_for_accepting_suggestion(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertIsNone(suggestion.get_change_list_for_accepting_suggestion())
def test_populate_old_value_of_change(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
self.assertIsNone(suggestion.populate_old_value_of_change())
def test_cannot_accept_suggestion_with_invalid_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.change.skill_id = skill_services.get_new_skill_id()
with self.assertRaisesRegexp(
utils.ValidationError,
'The skill with the given id doesn\'t exist.'
):
suggestion.accept('commit message')
def test_pre_update_validate_change_cmd(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': question_domain.QUESTION_PROPERTY_LANGUAGE_CODE,
'new_value': 'bn',
'old_value': 'en'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change cmd must be equal to '
'create_new_fully_specified_question'
):
suggestion.pre_update_validate(
question_domain.QuestionChange(change))
def test_pre_update_validate_change_skill_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_2'
}
with self.assertRaisesRegexp(
utils.ValidationError,
'The new change skill_id must be equal to skill_1'
):
suggestion.pre_update_validate(
question_domain.QuestionChange(change))
def test_pre_update_validate_complains_if_nothing_changed(self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
with self.assertRaisesRegexp(
utils.ValidationError,
'At least one of the new skill_difficulty or question_dict '
'should be changed.'):
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change))
def test_pre_update_validate_accepts_a_change_in_skill_difficulty_only(
self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.6
}
self.assertEqual(
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change)), None)
def test_pre_update_validate_accepts_a_change_in_state_data_only(self):
change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
suggestion = suggestion_registry.SuggestionAddQuestion(
'exploration.exp1.thread1', 'exp1', 1,
suggestion_models.STATUS_ACCEPTED, self.author_id,
self.reviewer_id, change,
'question.topic_1', 'en', self.fake_date)
new_change = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': 'hi',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION)
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3
}
self.assertEqual(
suggestion.pre_update_validate(
question_domain.QuestionSuggestionChange(new_change)), None)
def test_validate_author_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'):
suggestion.validate()
def test_validate_author_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.author_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected author_id to be in a valid user ID format.'):
suggestion.validate()
def test_validate_final_reviewer_id(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'):
suggestion.validate()
def test_validate_final_reviewer_id_format(self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.final_reviewer_id = ''
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be in a valid user ID format'):
suggestion.validate()
def test_validate_language_code_fails_when_language_codes_do_not_match(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
expected_question_dict = (
expected_suggestion_dict['change']['question_dict']
)
suggestion.validate()
expected_question_dict['language_code'] = 'wrong_language_code'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected question language_code.wrong_language_code. to be same '
'as suggestion language_code.en.'
):
suggestion.validate()
def test_validate_language_code_fails_when_language_code_is_set_to_none(
self):
expected_suggestion_dict = self.suggestion_dict
suggestion = suggestion_registry.SuggestionAddQuestion(
expected_suggestion_dict['suggestion_id'],
expected_suggestion_dict['target_id'],
expected_suggestion_dict['target_version_at_submission'],
expected_suggestion_dict['status'], self.author_id,
self.reviewer_id, expected_suggestion_dict['change'],
expected_suggestion_dict['score_category'],
expected_suggestion_dict['language_code'], False, self.fake_date)
suggestion.validate()
suggestion.language_code = None
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be en, received None'):
suggestion.validate()
def test_get_all_html_conztent_strings(self):
suggestion = suggestion_registry.SuggestionAddQuestion(
self.suggestion_dict['suggestion_id'],
self.suggestion_dict['target_id'],
self.suggestion_dict['target_version_at_submission'],
self.suggestion_dict['status'], self.author_id,
self.reviewer_id, self.suggestion_dict['change'],
self.suggestion_dict['score_category'],
self.suggestion_dict['language_code'], self.fake_date)
actual_outcome_list = suggestion.get_all_html_content_strings()
expected_outcome_list = [
u'', u'<p>This is a hint.</p>', u'<p>This is a solution.</p>', u'']
self.assertEqual(expected_outcome_list, actual_outcome_list)
def test_convert_html_in_suggestion_change(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math raw_latex-with-value="&a'
'mp;quot;+,-,-,+&quot;"></oppia-noninteractive-math>')
expected_html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;&quot;}"></oppia'
'-noninteractive-math>')
answer_group = {
'outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_1',
'html': ''
},
'labelled_as_correct': True,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 0
},
'rule_type': 'Equals'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}
question_state_dict = {
'content': {
'content_id': 'content_1',
'html': html_content
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'hint_1': {},
'solution': {}
}
},
'written_translations': {
'translations_mapping': {
'content_1': {},
'feedback_1': {},
'feedback_2': {},
'hint_1': {},
'solution': {}
}
},
'interaction': {
'answer_groups': [answer_group],
'confirmed_unclassified_answers': [],
'customization_args': {
'choices': {
'value': [{
'html': 'option 1',
'content_id': 'ca_choices_0'
}]
},
'showChoicesInShuffledOrder': {
'value': True
}
},
'default_outcome': {
'dest': None,
'feedback': {
'content_id': 'feedback_2',
'html': 'Correct Answer'
},
'param_changes': [],
'refresher_exploration_id': None,
'labelled_as_correct': True,
'missing_prerequisite_skill_id': None
},
'hints': [{
'hint_content': {
'content_id': 'hint_1',
'html': 'Hint 1'
}
}],
'solution': {
'answer_is_exclusive': False,
'correct_answer': 0,
'explanation': {
'content_id': 'solution',
'html': '<p>This is a solution.</p>'
}
},
'id': 'MultipleChoiceInput'
},
'param_changes': [],
'solicit_answer_details': False,
'classifier_model_id': None
}
suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': question_state_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': 'skill_1',
'skill_difficulty': 0.3,
},
'score_category': 'question.skill1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date)
}
suggestion = suggestion_registry.SuggestionAddQuestion(
suggestion_dict['suggestion_id'], suggestion_dict['target_id'],
suggestion_dict['target_version_at_submission'],
suggestion_dict['status'], self.author_id, self.reviewer_id,
suggestion_dict['change'], suggestion_dict['score_category'],
suggestion_dict['language_code'], False, self.fake_date)
suggestion.convert_html_in_suggestion_change(
html_validation_service.add_math_content_to_math_rte_components)
self.assertEqual(
suggestion.change.question_dict['question_state_data']['content'][
'html'], expected_html_content)
def test_accept_suggestion_with_images(self):
html_content = (
'<p>Value</p><oppia-noninteractive-math math_content-with-value='
'"{&quot;raw_latex&quot;: &quot;+,-,-,+&quot;, &'
'amp;quot;svg_filename&quot;: &quot;img.svg&quot;}">'
'</oppia-noninteractive-math>')
question_state_dict = self._create_valid_question_data(
'default_state').to_dict()
question_state_dict['content']['html'] = html_content
with python_utils.open_file(
os.path.join(feconf.TESTS_DATA_DIR, 'test_svg.svg'),
'rb', encoding=None) as f:
raw_image = f.read()
image_context = feconf.IMAGE_CONTEXT_QUESTION_SUGGESTIONS
fs_services.save_original_and_compressed_versions_of_image(
'img.svg', image_context, 'skill1',
raw_image, 'image', False)
self.save_new_skill('skill1', self.author_id, description='description')
suggestion_dict = {
'suggestion_id': 'skill1.thread1',
'suggestion_type': feconf.SUGGESTION_TYPE_ADD_QUESTION,
'target_type': feconf.ENTITY_TYPE_SKILL,
'target_id': 'skill1',
'target_version_at_submission': 1,
'status': suggestion_models.STATUS_ACCEPTED,
'author_name': 'author',
'final_reviewer_id': self.reviewer_id,
'change': {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': question_state_dict,
'language_code': 'en',
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill1',
'skill_difficulty': 0.3,
},
'score_category': 'question.skill1',
'language_code': 'en',
'last_updated': utils.get_time_in_millisecs(self.fake_date)
}
suggestion = suggestion_registry.SuggestionAddQuestion(
suggestion_dict['suggestion_id'], suggestion_dict['target_id'],
suggestion_dict['target_version_at_submission'],
suggestion_dict['status'], self.author_id, self.reviewer_id,
suggestion_dict['change'], suggestion_dict['score_category'],
suggestion_dict['language_code'], False, self.fake_date)
suggestion.accept('commit_message')
def test_contructor_updates_state_shema_in_change_cmd(self):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id')
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': 27,
'language_code': 'en',
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill_id',
'skill_difficulty': 0.3
}
self.assertEqual(
change['question_dict']['question_state_data_schema_version'], 27)
suggestion = suggestion_registry.SuggestionAddQuestion(
'suggestionId', 'target_id', 1, suggestion_models.STATUS_IN_REVIEW,
self.author_id, None, change, score_category, 'en', False,
self.fake_date)
self.assertEqual(
suggestion.change.question_dict[
'question_state_data_schema_version'],
feconf.CURRENT_STATE_SCHEMA_VERSION)
def test_contructor_raise_exception_for_invalid_state_shema_version(self):
score_category = (
suggestion_models.SCORE_TYPE_QUESTION +
suggestion_models.SCORE_CATEGORY_DELIMITER + 'skill_id')
change = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self.VERSION_27_STATE_DICT,
'question_state_data_schema_version': None,
'language_code': 'en',
'linked_skill_ids': ['skill_id'],
'inapplicable_skill_misconception_ids': []
},
'skill_id': 'skill_id',
'skill_difficulty': 0.3
}
self.assertEqual(
change['question_dict']['question_state_data_schema_version'], None)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected state schema version to be in between 25'
):
suggestion_registry.SuggestionAddQuestion(
'suggestionId', 'target_id', 1,
suggestion_models.STATUS_IN_REVIEW, self.author_id, None,
change, score_category, 'en', False, self.fake_date)
class MockInvalidVoiceoverApplication(
suggestion_registry.BaseVoiceoverApplication):
def __init__(self):
pass
class BaseVoiceoverApplicationUnitTests(test_utils.GenericTestBase):
def setUp(self):
super(BaseVoiceoverApplicationUnitTests, self).setUp()
self.base_voiceover_application = MockInvalidVoiceoverApplication()
def test_base_class_init_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement '
'__init__.'):
suggestion_registry.BaseVoiceoverApplication()
def test_base_class_accept_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement accept.'):
self.base_voiceover_application.accept()
def test_base_class_reject_raises_error(self):
with self.assertRaisesRegexp(
NotImplementedError,
'Subclasses of BaseVoiceoverApplication should implement reject.'):
self.base_voiceover_application.reject()
class ExplorationVoiceoverApplicationUnitTest(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationVoiceoverApplicationUnitTest, self).setUp()
self.signup('author@example.com', 'author')
self.author_id = self.get_user_id_from_email('author@example.com')
self.signup('reviewer@example.com', 'reviewer')
self.reviewer_id = self.get_user_id_from_email('reviewer@example.com')
self.voiceover_application = (
suggestion_registry.ExplorationVoiceoverApplication(
'application_id', 'exp_id', suggestion_models.STATUS_IN_REVIEW,
self.author_id, None, 'en', 'audio_file.mp3', '<p>Content</p>',
None))
def test_validation_with_invalid_target_type_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.target_type = 'invalid_target'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected target_type to be among allowed choices, '
'received invalid_target'
):
self.voiceover_application.validate()
def test_validation_with_invalid_target_id_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.target_id = 123
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected target_id to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_status_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.status = 'invalid_status'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected status to be among allowed choices, '
'received invalid_status'
):
self.voiceover_application.validate()
def test_validation_with_invalid_author_id_raise_exception(self):
self.voiceover_application.validate()
self.voiceover_application.author_id = 123
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected author_id to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_final_reviewer_id_raise_exception(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 123
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected final_reviewer_id to be None as the '
'voiceover application is not yet handled.'
):
self.voiceover_application.validate()
def test_validation_for_handled_application_with_invalid_final_review(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.voiceover_application.validate()
self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected final_reviewer_id to be a string'
):
self.voiceover_application.validate()
def test_validation_for_rejected_application_with_no_message(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.rejection_message, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 'reviewer_id'
self.voiceover_application.status = suggestion_models.STATUS_REJECTED
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected rejection_message to be a string for a '
'rejected application'
):
self.voiceover_application.validate()
def test_validation_for_accepted_application_with_message(self):
self.assertEqual(
self.voiceover_application.status,
suggestion_models.STATUS_IN_REVIEW)
self.assertEqual(self.voiceover_application.rejection_message, None)
self.voiceover_application.validate()
self.voiceover_application.final_reviewer_id = 'reviewer_id'
self.voiceover_application.status = suggestion_models.STATUS_ACCEPTED
self.voiceover_application.rejection_message = 'Invalid message'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected rejection_message to be None for the accepted '
'voiceover application, received Invalid message'
):
self.voiceover_application.validate()
def test_validation_with_invalid_language_code_type_raise_exception(self):
self.assertEqual(self.voiceover_application.language_code, 'en')
self.voiceover_application.validate()
self.voiceover_application.language_code = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected language_code to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_language_code_raise_exception(self):
self.assertEqual(self.voiceover_application.language_code, 'en')
self.voiceover_application.validate()
self.voiceover_application.language_code = 'invalid language'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid language'
):
self.voiceover_application.validate()
def test_validation_with_invalid_filename_type_raise_exception(self):
self.assertEqual(self.voiceover_application.filename, 'audio_file.mp3')
self.voiceover_application.validate()
self.voiceover_application.filename = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected filename to be a string'
):
self.voiceover_application.validate()
def test_validation_with_invalid_content_type_raise_exception(self):
self.assertEqual(self.voiceover_application.content, '<p>Content</p>')
self.voiceover_application.validate()
self.voiceover_application.content = 1
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected content to be a string'
):
self.voiceover_application.validate()
def test_to_dict_returns_correct_dict(self):
self.voiceover_application.accept(self.reviewer_id)
expected_dict = {
'voiceover_application_id': 'application_id',
'target_type': 'exploration',
'target_id': 'exp_id',
'status': 'accepted',
'author_name': 'author',
'final_reviewer_name': 'reviewer',
'language_code': 'en',
'content': '<p>Content</p>',
'filename': 'audio_file.mp3',
'rejection_message': None
}
self.assertEqual(
self.voiceover_application.to_dict(), expected_dict)
def test_is_handled_property_returns_correct_value(self):
self.assertFalse(self.voiceover_application.is_handled)
self.voiceover_application.accept(self.reviewer_id)
self.assertTrue(self.voiceover_application.is_handled)
def test_accept_voiceover_application(self):
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.assertEqual(self.voiceover_application.status, 'review')
self.voiceover_application.accept(self.reviewer_id)
self.assertEqual(
self.voiceover_application.final_reviewer_id, self.reviewer_id)
self.assertEqual(self.voiceover_application.status, 'accepted')
def test_reject_voiceover_application(self):
self.assertEqual(self.voiceover_application.final_reviewer_id, None)
self.assertEqual(self.voiceover_application.status, 'review')
self.voiceover_application.reject(self.reviewer_id, 'rejection message')
self.assertEqual(
self.voiceover_application.final_reviewer_id, self.reviewer_id)
self.assertEqual(self.voiceover_application.status, 'rejected')
self.assertEqual(
self.voiceover_application.rejection_message, 'rejection message')
class CommunityContributionStatsUnitTests(test_utils.GenericTestBase):
translation_reviewer_counts_by_lang_code = {
'hi': 0,
'en': 1
}
translation_suggestion_counts_by_lang_code = {
'fr': 6,
'en': 5
}
question_reviewer_count = 1
question_suggestion_count = 4
negative_count = -1
non_integer_count = 'non_integer_count'
sample_language_code = 'en'
invalid_language_code = 'invalid'
def _assert_community_contribution_stats_is_in_default_state(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
), {})
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {})
self.assertEqual(
community_contribution_stats.question_reviewer_count, 0)
self.assertEqual(
community_contribution_stats.question_suggestion_count, 0)
def test_initial_object_with_valid_arguments_has_correct_properties(self):
community_contribution_stats = (
suggestion_registry.CommunityContributionStats(
self.translation_reviewer_counts_by_lang_code,
self.translation_suggestion_counts_by_lang_code,
self.question_reviewer_count,
self.question_suggestion_count
)
)
community_contribution_stats.validate()
self.assertEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
self.translation_reviewer_counts_by_lang_code)
self.assertEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
self.translation_suggestion_counts_by_lang_code
)
self.assertEqual(
community_contribution_stats.question_reviewer_count,
self.question_reviewer_count
)
self.assertEqual(
community_contribution_stats.question_suggestion_count,
self.question_suggestion_count
)
def test_set_translation_reviewer_count_for_lang_code_updates_empty_dict(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_reviewer_count_for_lang_code_updates_count_value(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
) = {self.sample_language_code: 1}
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_reviewer_count_for_lang_code_adds_new_lang_key(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
) = {'en': 1}
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code('hi', 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_reviewer_counts_by_lang_code
),
{'en': 1, 'hi': 2}
)
def test_set_translation_suggestion_count_for_lang_code_updates_empty_dict(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
), {self.sample_language_code: 2}
)
def test_set_translation_suggestion_count_for_lang_code_updates_count_value(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
) = {self.sample_language_code: 1}
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
{self.sample_language_code: 2}
)
def test_set_translation_suggestion_count_for_lang_code_adds_new_lang_key(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
self._assert_community_contribution_stats_is_in_default_state()
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
) = {'en': 1}
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code('hi', 2)
)
self.assertDictEqual(
(
community_contribution_stats
.translation_suggestion_counts_by_lang_code
),
{'en': 1, 'hi': 2}
)
def test_get_translation_language_codes_that_need_reviewers_for_one_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, {self.sample_language_code})
def test_get_translation_language_codes_that_need_reviewers_for_multi_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code('hi', 1)
stats.set_translation_suggestion_count_for_language_code('fr', 1)
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, {'hi', 'fr'})
def test_get_translation_language_codes_that_need_reviewers_for_no_lang(
self):
stats = suggestion_services.get_community_contribution_stats()
language_codes_that_need_reviewers = (
stats.get_translation_language_codes_that_need_reviewers()
)
self.assertEqual(
language_codes_that_need_reviewers, set())
def test_translation_reviewers_are_needed_if_suggestions_but_no_reviewers(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
self.assertTrue(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_translation_reviewers_are_needed_if_num_suggestions_past_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 1)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertTrue(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_num_suggestions_eqs_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 2)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertFalse(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_num_suggestions_less_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_suggestion_count_for_language_code(
self.sample_language_code, 1)
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 2)
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = (
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
self.assertFalse(reviewers_are_needed)
def test_translation_reviewers_not_needed_if_reviewers_and_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.set_translation_reviewer_count_for_language_code(
self.sample_language_code, 1)
self.assertFalse(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_translation_reviewers_not_needed_if_no_reviewers_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
self._assert_community_contribution_stats_is_in_default_state()
self.assertFalse(
stats.are_translation_reviewers_needed_for_lang_code(
self.sample_language_code))
def test_question_reviewers_are_needed_if_suggestions_zero_reviewers(
self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 1
self.assertTrue(stats.are_question_reviewers_needed())
def test_question_reviewers_are_needed_if_num_suggestions_past_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 2
stats.question_reviewer_count = 1
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertTrue(reviewers_are_needed)
def test_question_reviewers_not_needed_if_num_suggestions_eqs_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 2
stats.question_reviewer_count = 2
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertFalse(reviewers_are_needed)
def test_question_reviewers_not_needed_if_num_suggestions_less_max(self):
stats = suggestion_services.get_community_contribution_stats()
stats.question_suggestion_count = 1
stats.question_reviewer_count = 2
config_services.set_property(
'committer_id', 'max_number_of_suggestions_per_reviewer', 1)
reviewers_are_needed = stats.are_question_reviewers_needed()
self.assertFalse(reviewers_are_needed)
def test_question_reviewers_not_needed_if_no_reviewers_no_sugestions(
self):
stats = suggestion_services.get_community_contribution_stats()
self._assert_community_contribution_stats_is_in_default_state()
self.assertFalse(stats.are_question_reviewers_needed())
def test_validate_translation_reviewer_counts_fails_for_negative_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, self.negative_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation reviewer count to be non-negative for '
'%s language code, received: %s.' % (
self.sample_language_code, self.negative_count)
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_negative_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, self.negative_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation suggestion count to be non-negative for '
'%s language code, received: %s.' % (
self.sample_language_code, self.negative_count)
):
community_contribution_stats.validate()
def test_validate_question_reviewer_count_fails_for_negative_count(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_reviewer_count = (
self.negative_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question reviewer count to be non-negative, '
'received: %s.' % (
community_contribution_stats.question_reviewer_count)
):
community_contribution_stats.validate()
def test_validate_question_suggestion_count_fails_for_negative_count(self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_suggestion_count = (
self.negative_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question suggestion count to be non-negative, '
'received: %s.' % (
community_contribution_stats.question_suggestion_count)
):
community_contribution_stats.validate()
def test_validate_translation_reviewer_counts_fails_for_non_integer_counts(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.sample_language_code, self.non_integer_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation reviewer count to be an integer for '
'%s language code, received: %s.' % (
self.sample_language_code, self.non_integer_count)
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.sample_language_code, self.non_integer_count)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the translation suggestion count to be an integer for '
'%s language code, received: %s.' % (
self.sample_language_code, self.non_integer_count)
):
community_contribution_stats.validate()
def test_validate_question_reviewer_count_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_reviewer_count = (
self.non_integer_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question reviewer count to be an integer, '
'received: %s.' % (
community_contribution_stats.question_reviewer_count)
):
community_contribution_stats.validate()
def test_validate_question_suggestion_count_fails_for_non_integer_count(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
community_contribution_stats.question_suggestion_count = (
self.non_integer_count
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected the question suggestion count to be an integer, '
'received: %s.' % (
community_contribution_stats.question_suggestion_count)
):
community_contribution_stats.validate()
def test_validate_translation_reviewer_counts_fails_for_invalid_lang_code(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_reviewer_count_for_language_code(
self.invalid_language_code, 1)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Invalid language code for the translation reviewer counts: '
'%s.' % self.invalid_language_code
):
community_contribution_stats.validate()
def test_validate_translation_suggestion_counts_fails_for_invalid_lang_code(
self):
community_contribution_stats = (
suggestion_services.get_community_contribution_stats()
)
(
community_contribution_stats
.set_translation_suggestion_count_for_language_code(
self.invalid_language_code, 1)
)
with self.assertRaisesRegexp(
utils.ValidationError,
'Invalid language code for the translation suggestion counts: '
'%s.' % self.invalid_language_code
):
community_contribution_stats.validate()
class ReviewableSuggestionEmailInfoUnitTests(test_utils.GenericTestBase):
suggestion_type = feconf.SUGGESTION_TYPE_ADD_QUESTION
language_code = 'en'
suggestion_content = 'sample question'
submission_datetime = datetime.datetime.utcnow()
def test_initial_object_with_valid_arguments_has_correct_properties(self):
reviewable_suggestion_email_info = (
suggestion_registry.ReviewableSuggestionEmailInfo(
self.suggestion_type, self.language_code,
self.suggestion_content, self.submission_datetime
)
)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_type,
self.suggestion_type)
self.assertEqual(
reviewable_suggestion_email_info.language_code,
self.language_code)
self.assertEqual(
reviewable_suggestion_email_info.suggestion_content,
self.suggestion_content)
self.assertEqual(
reviewable_suggestion_email_info.submission_datetime,
self.submission_datetime)
| true
| true
|
790a73da072748e571f8d467301618f7fd859cdd
| 13,949
|
py
|
Python
|
robovat/simulation/simulator.py
|
leobxpan/robovat
|
0d360c34c677cf018c4daab0b8e758943ae1d2c1
|
[
"MIT"
] | 62
|
2020-04-08T11:26:24.000Z
|
2021-09-06T02:45:53.000Z
|
robovat/simulation/simulator.py
|
leobxpan/robovat
|
0d360c34c677cf018c4daab0b8e758943ae1d2c1
|
[
"MIT"
] | 7
|
2020-04-12T13:10:10.000Z
|
2022-03-12T00:15:03.000Z
|
robovat/simulation/simulator.py
|
leobxpan/robovat
|
0d360c34c677cf018c4daab0b8e758943ae1d2c1
|
[
"MIT"
] | 17
|
2020-04-12T17:37:01.000Z
|
2021-09-07T01:51:46.000Z
|
"""The Simulator class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
"""The Simulator class."""
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
"""Initialize the simulator.
Args:
assets_dir: The assets directory.
physics_backend: Name of the physics engine backend.
time_step: Time step of the simulation.
gravity: The gravity as a 3-dimensional vector.
worker_id: The id of the multi-threaded simulation.
use_visualizer: Render the simulation use the debugging visualizer
if True.
"""
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
# Create the physics backend.
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
"""Delete the simulator."""
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
"""Reset the simulation."""
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
"""Start the simulation."""
self.physics.start()
self._num_steps = 0
def step(self):
"""Take a simulation step."""
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
"""Add a body to the simulation.
Args:
filename: The path to the URDF file to be loaded. If the path is
not absolute path, it will be joined with the assets directory.
pose: The initial pose as an instance of Pose.
scale: The scaling factor of the body.
is_static: If True, set the base of the body to be static.
is_controllable: If True, the body can apply motor controls.
name: Used as a reference of the body in this Simulator instance.
Returns:
An instance of Body.
"""
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
# Create the body.
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
# Add the body to the dictionary.
self._bodies[body.name] = body
return body
def remove_body(self, name):
"""Remove the body.
Args:
body: An instance of Body.
"""
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
"""Add a constraint to the simulation.
Args:
parent: The parent entity as an instance of Entity.
child: The child entity as an instance of Entity.
joint_type: The type of the joint.
joint_axis: The axis of the joint.
parent_frame_pose: The pose of the joint in the parent frame.
child_frame_pose: The pose of the joint in the child frame.
max_force: Max force the constraint can apply.
max_linear_velocity: Maximum linear velocity.
max_angular_velocity: Max angular velocity.
is_controllable: If True, the constraint can apply motor controls.
Returns:
An instance of Constraint.
"""
# Create the constraint.
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
# Add the constraint to the dictionary.
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
"""Receive a robot command.
Args:
robot_command: An instance of RobotCommand.
component_type: Either 'body' or 'constraint'.
"""
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
"""Check if the loaded object is stable.
Args:
entity_a: The first entity.
entity_b: The second entity, None for any entities.
Returns:
True if they have contacts, False otherwise.
"""
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
"""Check if the loaded object is stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
Returns:
is_stable: True if the linear velocity and the angular velocity are
almost zero; False otherwise.
"""
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
"""Wait until the objects are stable.
Args:
body: An instance of body or a list of bodies.
linear_velocity_threshold: Linear velocity threshold of being
stable.
angular_velocity_threshold: Angular velocity threshold of being
stable.
check_after_steps: Number of steps before checking.
min_stable_steps: Minimum number of steps required to be stable.
max_steps: Maximum steps to wait for objects being stable.
"""
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
# Check if all bodies are stable.
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
"""Plot a 6-DoF pose or a frame in the debugging visualizer.
Args:
pose: The pose to be plot.
axis_length: The length of the axes.
text: Text showing up next to the frame.
text_size: Size of the text.
text_color: Color of the text.
"""
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
"""Plot a pose or a frame in the debugging visualizer.
Args:
start: Starting point of the line.
end: Ending point of the line.
line_color: Color of the line.
line_width: Width of the line.
"""
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
"""Clear all visualization items."""
pybullet.removeAllUserDebugItems()
| 31.136161
| 79
| 0.538533
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import pybullet
from robovat.math.pose import Pose
from robovat.simulation import physics
from robovat.simulation.body import Body
from robovat.simulation.controllable_body import ControllableBody
from robovat.simulation.constraint import Constraint
from robovat.simulation.controllable_constraint import ControllableConstraint
class Simulator(object):
def __init__(self,
assets_dir=None,
physics_backend='BulletPhysics',
time_step=1e-3,
gravity=[0, 0, -9.8],
worker_id=0,
use_visualizer=False):
self._assets_dir = os.path.abspath(assets_dir or './')
self._gravity = gravity
physics_class = getattr(physics, physics_backend)
self._physics = physics_class(
time_step=time_step,
use_visualizer=use_visualizer,
worker_id=worker_id)
self._num_steps = 0
def __del__(self):
del self._physics
@property
def assets_dir(self):
return self._assets_dir
@property
def physics(self):
return self._physics
@property
def bodies(self):
return self._bodies
@property
def num_steps(self):
return self._num_steps
@property
def time_step(self):
return self.physics.time_step
@property
def constraints(self):
return self._constraints
def reset(self):
self.physics.reset()
self.physics.set_gravity(self._gravity)
self._bodies = dict()
self._constraints = dict()
self._num_steps = 0
def start(self):
self.physics.start()
self._num_steps = 0
def step(self):
for body in self.bodies.values():
body.update()
for constraint in self.constraints.values():
constraint.update()
self.physics.step()
self._num_steps += 1
def add_body(self,
filename,
pose=None,
scale=1.0,
is_static=False,
is_controllable=False,
name=None):
if os.path.isabs(filename):
path = filename
else:
path = os.path.join(self._assets_dir, filename)
if pose is None:
pose = [[0, 0, 0], [0, 0, 0]]
if is_controllable:
body = ControllableBody(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
else:
body = Body(
simulator=self,
filename=path,
pose=pose,
scale=scale,
is_static=is_static,
name=name)
self._bodies[body.name] = body
return body
def remove_body(self, name):
self.physics.remove_body(self._bodies[name].uid)
del self._bodies[name]
def add_constraint(self,
parent,
child,
joint_type='fixed',
joint_axis=[0, 0, 0],
parent_frame_pose=None,
child_frame_pose=None,
max_force=None,
max_linear_velocity=None,
max_angular_velocity=None,
is_controllable=False,
name=None):
if is_controllable:
constraint = ControllableConstraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
max_linear_velocity=max_linear_velocity,
max_angular_velocity=max_angular_velocity,
name=name)
else:
assert max_linear_velocity is None
assert max_angular_velocity is None
constraint = Constraint(
parent,
child,
joint_type,
joint_axis,
parent_frame_pose,
child_frame_pose,
max_force=max_force,
name=name)
self._constraints[constraint.name] = constraint
return constraint
def receive_robot_commands(self,
robot_command,
component_type='body'):
if component_type == 'body':
component = self._bodies[robot_command.component]
elif component_type == 'constraint':
component = self._constraints[robot_command.component]
else:
raise ValueError('Unrecognized component type: %r' %
component_type)
command_method = getattr(component, robot_command.command_type)
command_method(**robot_command.arguments)
def check_contact(self, entity_a, entity_b=None):
def _check_contact(entity_a, entity_b=None):
a_uid = entity_a.uid
if entity_b is None:
b_uid = None
else:
b_uid = entity_b.uid
contact_points = self._physics.get_contact_points(
a_uid, b_uid)
has_contact = len(contact_points) > 0
return has_contact
if not isinstance(entity_a, (list, tuple)):
entities_a = [entity_a]
else:
entities_a = entity_a
if not isinstance(entity_b, (list, tuple)):
entities_b = [entity_b]
else:
entities_b = entity_b
has_contact = False
for a in entities_a:
for b in entities_b:
if _check_contact(a, b):
has_contact = True
break
return has_contact
def check_stable(self,
body,
linear_velocity_threshold,
angular_velocity_threshold):
linear_velocity = np.linalg.norm(body.linear_velocity)
angular_velocity = np.linalg.norm(body.angular_velocity)
if linear_velocity_threshold is None:
has_linear_velocity = False
else:
has_linear_velocity = (
linear_velocity >= linear_velocity_threshold)
if angular_velocity_threshold is None:
has_angular_velocity = False
else:
has_angular_velocity = (
angular_velocity >= angular_velocity_threshold)
is_stable = (not has_linear_velocity) and (not has_angular_velocity)
return is_stable
def wait_until_stable(self,
body,
linear_velocity_threshold=0.005,
angular_velocity_threshold=0.005,
check_after_steps=100,
min_stable_steps=100,
max_steps=2000):
if isinstance(body, (list, tuple)):
body_list = body
else:
body_list = [body]
num_steps = 0
num_stable_steps = 0
while(1):
self.step()
num_steps += 1
if num_steps < check_after_steps:
continue
all_stable = True
for b in body_list:
is_stable = self.check_stable(
b,
linear_velocity_threshold,
angular_velocity_threshold)
if not is_stable:
all_stable = False
break
if all_stable:
num_stable_steps += 1
if (num_stable_steps >= min_stable_steps or
num_steps >= max_steps):
break
def plot_pose(self,
pose,
axis_length=1.0,
text=None,
text_size=1.0,
text_color=[0, 0, 0]):
if not isinstance(pose, Pose):
pose = Pose(pose)
origin = pose.position
x_end = origin + np.dot([axis_length, 0, 0], pose.matrix3.T)
y_end = origin + np.dot([0, axis_length, 0], pose.matrix3.T)
z_end = origin + np.dot([0, 0, axis_length], pose.matrix3.T)
pybullet.addUserDebugLine(
origin,
x_end,
lineColorRGB=[1, 0, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
y_end,
lineColorRGB=[0, 1, 0],
lineWidth=2)
pybullet.addUserDebugLine(
origin,
z_end,
lineColorRGB=[0, 0, 1],
lineWidth=2)
if text is not None:
pybullet.addUserDebugText(
text,
origin,
text_color,
text_size)
def plot_line(self,
start,
end,
line_color=[0, 0, 0],
line_width=1):
pybullet.addUserDebugLine(
start,
end,
lineColorRGB=line_color,
lineWidth=line_width)
def clear_visualization(self):
pybullet.removeAllUserDebugItems()
| true
| true
|
790a73dc386e5f7248c4ddc6e27d71bea43bc2d5
| 2,363
|
py
|
Python
|
src/commercetools/platform/client/login/by_project_key_login_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 15
|
2018-11-02T14:35:52.000Z
|
2022-03-16T07:51:44.000Z
|
src/commercetools/platform/client/login/by_project_key_login_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 84
|
2018-11-02T12:50:32.000Z
|
2022-03-22T01:25:54.000Z
|
src/commercetools/platform/client/login/by_project_key_login_request_builder.py
|
labd/commercetools-python-sdk
|
d8ec285f08d56ede2e4cad45c74833f5b609ab5c
|
[
"MIT"
] | 13
|
2019-01-03T09:16:50.000Z
|
2022-02-15T18:37:19.000Z
|
# This file is automatically generated by the rmf-codegen project.
#
# The Python code generator is maintained by Lab Digital. If you want to
# contribute to this project then please do not edit this file directly
# but send a pull request to the Lab Digital fork of rmf-codegen at
# https://github.com/labd/rmf-codegen
import typing
import warnings
from ...models.customer import CustomerSignin, CustomerSignInResult
from ...models.error import ErrorResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyLoginRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def post(
self,
body: "CustomerSignin",
*,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["CustomerSignInResult"]:
"""Authenticate Customer (Sign In). Retrieves the authenticated
customer (a customer that matches the given email/password pair).
If used with an access token for Anonymous Sessions,
all orders and carts belonging to the anonymousId will be assigned to the newly created customer.
If a cart is is returned as part of the CustomerSignInResult,
it has been recalculated (It will have up-to-date prices, taxes and discounts,
and invalid line items have been removed.).
"""
headers = {} if headers is None else headers
response = self._client._post(
endpoint=f"/{self._project_key}/login",
params={},
json=body.serialize(),
headers={"Content-Type": "application/json", **headers},
options=options,
)
if response.status_code in (201, 200):
return CustomerSignInResult.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
elif response.status_code == 200:
return None
warnings.warn("Unhandled status code %d" % response.status_code)
| 36.921875
| 105
| 0.660601
|
import typing
import warnings
from ...models.customer import CustomerSignin, CustomerSignInResult
from ...models.error import ErrorResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyLoginRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def post(
self,
body: "CustomerSignin",
*,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["CustomerSignInResult"]:
headers = {} if headers is None else headers
response = self._client._post(
endpoint=f"/{self._project_key}/login",
params={},
json=body.serialize(),
headers={"Content-Type": "application/json", **headers},
options=options,
)
if response.status_code in (201, 200):
return CustomerSignInResult.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
elif response.status_code == 200:
return None
warnings.warn("Unhandled status code %d" % response.status_code)
| true
| true
|
790a742b6543de153cbeda83fe3f3076432fa81d
| 3,280
|
py
|
Python
|
snooty/test_specparser.py
|
rayangler/snooty-parser
|
3812adab1338ef78ff6f9aecae5e17d2ec5c5181
|
[
"Apache-2.0",
"CNRI-Python-GPL-Compatible"
] | 15
|
2019-04-25T15:38:31.000Z
|
2022-01-21T00:09:29.000Z
|
snooty/test_specparser.py
|
rayangler/snooty-parser
|
3812adab1338ef78ff6f9aecae5e17d2ec5c5181
|
[
"Apache-2.0",
"CNRI-Python-GPL-Compatible"
] | 69
|
2019-03-05T02:30:41.000Z
|
2022-03-02T17:51:08.000Z
|
snooty/test_specparser.py
|
rayangler/snooty-parser
|
3812adab1338ef78ff6f9aecae5e17d2ec5c5181
|
[
"Apache-2.0",
"CNRI-Python-GPL-Compatible"
] | 33
|
2019-02-05T21:18:17.000Z
|
2021-12-13T18:24:22.000Z
|
import pytest
from . import specparser
def test_load() -> None:
spec = specparser.Spec.loads(
"""
[meta]
version = 0
[enum]
user_level = ["beginner", "intermediate", "advanced"]
[directive._parent]
content_type = "block"
options.foo = ["path", "uri"]
[directive.child]
inherit = "_parent"
argument_type = "user_level"
deprecated = true
[role._parent]
help = "test-role"
type = "text"
[role.child]
inherit = "_parent"
[rstobject._parent]
help = "test-rstobject"
[rstobject.child]
inherit = "_parent"
"""
)
assert spec.meta.version == 0
assert spec.enum["user_level"] == ["beginner", "intermediate", "advanced"]
assert spec.directive["child"] == specparser.Directive(
inherit="_parent",
example=None,
help=None,
content_type="block",
argument_type="user_level",
required_context=None,
deprecated=True,
domain=None,
options={"foo": [specparser.PrimitiveType.path, specparser.PrimitiveType.uri]},
name="child",
)
# Test these in the opposite order of the definition to ensure that each "type" of definition
# has a separate inheritance namespace
assert spec.rstobject["child"].help == "test-rstobject"
assert spec.role["child"].help == "test-role"
assert spec.role["child"].type == specparser.PrimitiveRoleType.text
validator = spec.get_validator(
[specparser.PrimitiveType.nonnegative_integer, "user_level"]
)
assert validator("10") == 10
assert validator("intermediate") == "intermediate"
with pytest.raises(ValueError):
validator("-10")
with pytest.raises(ValueError):
validator("foo")
def test_inheritance_cycle() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[directive.parent]
inherit = "child"
[directive.child]
inherit = "parent"
"""
)
def test_missing_parent() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[directive._parent]
content_type = "block"
[directive.child]
inherit = "parent"
"""
)
def test_bad_type() -> None:
spec = specparser.Spec.loads(
"""
[meta]
version = 0
"""
)
with pytest.raises(ValueError):
spec.get_validator("gjriojwe")
def test_bad_version() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = -1"""
)
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 1"""
)
def test_bad_link() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[role."kotlin-sdk"]
type = {link = "https://docs.mongodb.com/realm-sdks/kotlin/latest/"}"""
)
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[role."kotlin-sdk"]
type = {link = "https://docs.mongodb.com/realm-sdks/%s/kotlin/latest/%s"}"""
)
| 21.721854
| 97
| 0.57561
|
import pytest
from . import specparser
def test_load() -> None:
spec = specparser.Spec.loads(
"""
[meta]
version = 0
[enum]
user_level = ["beginner", "intermediate", "advanced"]
[directive._parent]
content_type = "block"
options.foo = ["path", "uri"]
[directive.child]
inherit = "_parent"
argument_type = "user_level"
deprecated = true
[role._parent]
help = "test-role"
type = "text"
[role.child]
inherit = "_parent"
[rstobject._parent]
help = "test-rstobject"
[rstobject.child]
inherit = "_parent"
"""
)
assert spec.meta.version == 0
assert spec.enum["user_level"] == ["beginner", "intermediate", "advanced"]
assert spec.directive["child"] == specparser.Directive(
inherit="_parent",
example=None,
help=None,
content_type="block",
argument_type="user_level",
required_context=None,
deprecated=True,
domain=None,
options={"foo": [specparser.PrimitiveType.path, specparser.PrimitiveType.uri]},
name="child",
)
assert spec.rstobject["child"].help == "test-rstobject"
assert spec.role["child"].help == "test-role"
assert spec.role["child"].type == specparser.PrimitiveRoleType.text
validator = spec.get_validator(
[specparser.PrimitiveType.nonnegative_integer, "user_level"]
)
assert validator("10") == 10
assert validator("intermediate") == "intermediate"
with pytest.raises(ValueError):
validator("-10")
with pytest.raises(ValueError):
validator("foo")
def test_inheritance_cycle() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[directive.parent]
inherit = "child"
[directive.child]
inherit = "parent"
"""
)
def test_missing_parent() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[directive._parent]
content_type = "block"
[directive.child]
inherit = "parent"
"""
)
def test_bad_type() -> None:
spec = specparser.Spec.loads(
"""
[meta]
version = 0
"""
)
with pytest.raises(ValueError):
spec.get_validator("gjriojwe")
def test_bad_version() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = -1"""
)
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 1"""
)
def test_bad_link() -> None:
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[role."kotlin-sdk"]
type = {link = "https://docs.mongodb.com/realm-sdks/kotlin/latest/"}"""
)
with pytest.raises(ValueError):
specparser.Spec.loads(
"""
[meta]
version = 0
[role."kotlin-sdk"]
type = {link = "https://docs.mongodb.com/realm-sdks/%s/kotlin/latest/%s"}"""
)
| true
| true
|
790a74a9becc2f129d39e41df4fd43afa655eb66
| 548
|
py
|
Python
|
src/app.py
|
budavariam/activity-visualizer
|
fd77e68eb7c50f4031865257740f00c26fcb9894
|
[
"MIT"
] | null | null | null |
src/app.py
|
budavariam/activity-visualizer
|
fd77e68eb7c50f4031865257740f00c26fcb9894
|
[
"MIT"
] | null | null | null |
src/app.py
|
budavariam/activity-visualizer
|
fd77e68eb7c50f4031865257740f00c26fcb9894
|
[
"MIT"
] | 1
|
2022-02-11T13:14:20.000Z
|
2022-02-11T13:14:20.000Z
|
import os
import layout
import callbacks # layout needs to be defined before creating callbacks
import routes
import appserver
server = appserver.app.server
if __name__ == "__main__":
debug_mode = True if os.getenv("DEBUG", "false") == "true" else False
if debug_mode is True:
print(f"Initiating server. Debug mode enabled.")
# appserver.app.enable_dev_tools(debug=True)
else:
print(f"Initiating server.")
appserver.app.run_server(
debug=debug_mode,
host="0.0.0.0",
port=5000
)
| 27.4
| 73
| 0.671533
|
import os
import layout
import callbacks
import routes
import appserver
server = appserver.app.server
if __name__ == "__main__":
debug_mode = True if os.getenv("DEBUG", "false") == "true" else False
if debug_mode is True:
print(f"Initiating server. Debug mode enabled.")
else:
print(f"Initiating server.")
appserver.app.run_server(
debug=debug_mode,
host="0.0.0.0",
port=5000
)
| true
| true
|
790a75e7d4eb0149caf084cd7775eb6559a24545
| 148,921
|
py
|
Python
|
venv/lib/python3.9/site-packages/IPython/core/interactiveshell.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | null | null | null |
venv/lib/python3.9/site-packages/IPython/core/interactiveshell.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | null | null | null |
venv/lib/python3.9/site-packages/IPython/core/interactiveshell.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import abc
import ast
import atexit
import builtins as builtin_mod
import dis
import functools
import inspect
import os
import re
import runpy
import subprocess
import sys
import tempfile
import traceback
import types
import warnings
from ast import stmt
from io import open as io_open
from logging import error
from pathlib import Path
from typing import Callable
from typing import List as ListType
from typing import Optional, Tuple
from warnings import warn
from pickleshare import PickleShareDB
from tempfile import TemporaryDirectory
from traitlets import (
Any,
Bool,
CaselessStrEnum,
Dict,
Enum,
Instance,
Integer,
List,
Type,
Unicode,
default,
observe,
validate,
)
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
import IPython.core.hooks
from IPython.core import magic, oinspect, page, prefilter, ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import InterruptiblePdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.events import EventManager, available_events
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.paths import get_ipython_dir
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize, io, openpy, py3compat
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.utils.path import ensure_dir_exists, get_home_dir, get_py_filename
from IPython.utils.process import getoutput, system
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import DollarFormatter, LSString, SList, format_screen
sphinxify: Optional[Callable]
try:
import docrepr.sphinxify as sphx
def sphinxify(oinfo):
wrapped_docstring = sphx.wrap_main_docstring(oinfo)
def sphinxify_docstring(docstring):
with TemporaryDirectory() as dirname:
return {
"text/html": sphx.sphinxify(wrapped_docstring, dirname),
"text/plain": docstring,
}
return sphinxify_docstring
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
from ast import Module
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (
_asyncio_runner,
_curio_runner,
_pseudo_sync_runner,
_should_be_async,
_trio_runner,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
class SpaceInInput(Exception): pass
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
__spec__ = None
class ExecutionInfo(object):
"""The arguments used for a call to :meth:`InteractiveShell.run_cell`
Stores information about what is going to happen.
"""
raw_cell = None
store_history = False
silent = False
shell_futures = True
def __init__(self, raw_cell, store_history, silent, shell_futures):
self.raw_cell = raw_cell
self.store_history = store_history
self.silent = silent
self.shell_futures = shell_futures
def __repr__(self):
name = self.__class__.__qualname__
raw_cell = ((self.raw_cell[:50] + '..')
if len(self.raw_cell) > 50 else self.raw_cell)
return '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s>' %\
(name, id(self), raw_cell, self.store_history, self.silent, self.shell_futures)
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec: Optional[BaseException] = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
compiler_class = Type(CachingCompiler)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
"""Make this available for backward compatibility (pre-7.0 release) with existing code.
For example, ipykernel ipykernel currently uses
`shell.input_splitter.check_complete`
"""
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
self.raw_input_original = input
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
# The trio runner is used for running Trio in the foreground thread. It
# is different from `_trio_runner(async_fn)` in `async_helpers.py`
# which calls `trio.run()` for every cell. This runner runs all cells
# inside a single Trio event loop. If used, it is set from
# `ipykernel.kernelapp`.
self.trio_runner = None
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
def set_trio_runner(self, tr):
self.trio_runner = tr
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir = ProfileDir.create_profile_dir_by_name(
self.ipython_dir, "default"
)
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = self.compiler_class()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
# The files here are stored with Path from Pathlib
self.tempfiles = []
self.tempdirs = []
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
@observe('colors')
def init_syntax_highlighting(self, changes=None):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser(style=self.colors, parent=self).format
self.pycolorize = lambda src: pyformat(src,'str')
def refresh_style(self):
# No-op here, used in subclass
pass
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
@observe('colors')
def init_inspector(self, changes=None):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
self.colors,
self.object_info_string_level)
def init_io(self):
# implemented in subclasses, TerminalInteractiveShell does call
# colorama.init().
pass
def init_prompts(self):
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self, shell=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
@staticmethod
def get_path_links(p: Path):
"""Gets path links including all symlinks
Examples
--------
In [1]: from IPython.core.interactiveshell import InteractiveShell
In [2]: import sys, pathlib
In [3]: paths = InteractiveShell.get_path_links(pathlib.Path(sys.executable))
In [4]: len(paths) == len(set(paths))
Out[4]: True
In [5]: bool(paths)
Out[5]: True
"""
paths = [p]
while p.is_symlink():
new_path = Path(os.readlink(p))
if not new_path.is_absolute():
new_path = p.parent / new_path
p = new_path
paths.append(p)
return paths
def init_virtualenv(self):
"""Add the current virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
elif os.environ["VIRTUAL_ENV"] == "":
warn("Virtual env path set to '', please check if this is intended.")
return
p = Path(sys.executable)
p_venv = Path(os.environ["VIRTUAL_ENV"])
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = self.get_path_links(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if p_venv.parts[1] == "cygdrive":
drive_name = p_venv.parts[2]
p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
if any(p_venv == p.parents[1] for p in paths):
# Our exe is inside or has access to the virtualenv, don't need to do anything.
return
if sys.platform == "win32":
virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages"))
else:
virtual_env_path = Path(
os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages"
)
p_ver = sys.version_info[:2]
# Predict version from py[thon]-x.x in the $VIRTUAL_ENV
re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"])
if re_m:
predicted_path = Path(str(virtual_env_path).format(*re_m.groups()))
if predicted_path.exists():
p_ver = re_m.groups()
virtual_env = str(virtual_env_path).format(*p_ver)
warn(
"Attempting to work in a virtualenv. If you encounter problems, "
"please install IPython inside the virtualenv."
)
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
sys.stdout.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name, getattr(hooks, hook_name), 100)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self, name, hook, priority=50, str_key=None, re_key=None):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if name in IPython.core.hooks.deprecated:
alternative = IPython.core.hooks.deprecated[name]
raise ValueError(
"Hook {} has been deprecated since IPython 5.0. Use {} instead.".format(
name, alternative
)
)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
raise ValueError(
"ip.register_post_execute is deprecated since IPython 1.0, use "
"ip.events.register('post_run_cell', func) instead."
)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <aleaxit@yahoo.com>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <hohn@hooknose.lbl.gov> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True, aggressive=False):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
if aggressive and not hasattr(self, "_sys_modules_keys"):
print("Cannot restore sys.module, no snapshot")
elif aggressive:
print("culling sys module...")
current_keys = set(sys.modules.keys())
for k in current_keys - self._sys_modules_keys:
if k.startswith("multiprocessing"):
continue
del sys.modules[k]
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError as e:
raise NameError("name '%s' is not defined" % varname) from e
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError as e:
raise TypeError('regex must be a string or compiled pattern') from e
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info = self._object_find(oname, namespaces)
docformat = (
sphinxify(self.object_inspect(oname)) if self.sphinxify_docstring else None
)
if info.found:
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(
info.obj,
oname,
formatter,
info,
enable_html_pager=self.enable_html_pager,
**kw,
)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0, omit_sections=()):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
docformat = (
sphinxify(self.object_inspect(oname))
if self.sphinxify_docstring
else None
)
return self.inspector._get_info(
info.obj,
oname,
info=info,
detail_level=detail_level,
formatter=docformat,
omit_sections=omit_sections,
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = InterruptiblePdb
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls, parent=self)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
Notes
-----
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing.
"""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which expects to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
if hasattr(value, "_render_traceback_"):
stb = value._render_traceback_()
else:
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset
)
except Exception:
print(
"Unexpected exception formatting exception. Falling back to standard exception"
)
traceback.print_exc()
return None
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb: str):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
val = self.InteractiveTB.stb2text(stb)
try:
print(val)
except UnicodeEncodeError:
print(val.encode("utf-8", "backslashreplace").decode())
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.get_indent_spaces() * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (
cd_completer,
magic_run_completer,
module_completer,
reset_completer,
)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
Notes
-----
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Examples
--------
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0) -> None:
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted.
`completer` should have the following signature::
def completion(self: Completer, text: string) -> List[str]:
raise NotImplementedError
It will be bound to the current Completer instance and pass some text
and return a list with current completions to suggest to the user.
"""
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
self.register_magics(m.AsyncMagics)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.run_line_magic('colors', self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(
func, magic_kind=magic_kind, magic_name=magic_name
)
def _find_with_lazy_load(self, /, type_, magic_name: str):
"""
Try to find a magic potentially lazy-loading it.
Parameters
----------
type_: "line"|"cell"
the type of magics we are trying to find/lazy load.
magic_name: str
The name of the magic we are trying to find/lazy load
Note that this may have any side effects
"""
finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_]
fn = finder(magic_name)
if fn is not None:
return fn
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy is None:
return None
self.run_line_magic("load_ext", lazy)
res = finder(magic_name)
return res
def run_line_magic(self, magic_name: str, line, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self._find_with_lazy_load("line", magic_name)
if fn is None:
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy:
self.run_line_magic("load_ext", lazy)
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.get_local_scope(stack_depth)
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def get_local_scope(self, stack_depth):
"""Get local scope at given stack depth.
Parameters
----------
stack_depth : int
Depth relative to calling frame
"""
return sys._getframe(stack_depth + 1).f_locals
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self._find_with_lazy_load("cell", magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""
DEPRECATED
Deprecated since IPython 0.13 (warning added in
8.1), use run_line_magic(magic_name, parameter_s).
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
warnings.warn(
"`magic(...)` is deprecated since IPython 0.13 (warning added in "
"8.1), use run_line_magic(magic_name, parameter_s).",
DeprecationWarning,
stacklevel=2,
)
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# warn if there is an IPython magic alternative.
main_cmd = cmd.split()[0]
has_magic_alternatives = ("pip", "conda", "cd")
if main_cmd in has_magic_alternatives:
warnings.warn(
(
"You executed the system command !{0} which may not work "
"as expected. Try the IPython magic %{0} instead."
).format(main_cmd)
)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
"status": "error",
"traceback": stb,
"ename": etype.__name__,
"evalue": py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
*where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.suffix == ".ipynb":
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
yield fname.read_text(encoding="utf-8")
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool) -> ExecutionResult:
"""Internal method to run a complete IPython cell."""
# we need to avoid calling self.transform_cell multiple time on the same thing
# so we need to store some results:
preprocessing_exc_tuple = None
try:
transformed_cell = self.transform_cell(raw_cell)
except Exception:
transformed_cell = raw_cell
preprocessing_exc_tuple = sys.exc_info()
assert transformed_cell is not None
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(
raw_cell,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
def should_run_async(
self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell : str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded:: 7.0
"""
if not self.autoawait:
return False
if preprocessing_exc_tuple is not None:
return False
assert preprocessing_exc_tuple is None
if transformed_cell is None:
warnings.warn(
"`should_run_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
else:
cell = transformed_cell
return _should_be_async(cell)
async def run_cell_async(
self,
raw_cell: str,
store_history=False,
silent=False,
shell_futures=True,
*,
transformed_cell: Optional[str] = None,
preprocessing_exc_tuple: Optional[Any] = None
) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
transformed_cell: str
cell that was passed through transformers
preprocessing_exc_tuple:
trace if the transformation failed.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded:: 7.0
"""
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
if transformed_cell is None:
warnings.warn(
"`run_cell_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
else:
if preprocessing_exc_tuple is None:
cell = transformed_cell
else:
cell = raw_cell
# Store raw and processed history
if store_history and raw_cell.strip(" %") != "paste":
self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else self.compiler_class()
_run_async = False
with self.builtin_trap:
cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell)
with self.display_trap:
# Compile to bytecode
try:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
def _update_code_co_name(self, code):
"""Python 3.10 changed the behaviour so that whenever a code object
is assembled in the compile(ast) the co_firstlineno would be == 1.
This makes pydevd/debugpy think that all cells invoked are the same
since it caches information based on (co_firstlineno, co_name, co_filename).
Given that, this function changes the code 'co_name' to be unique
based on the first real lineno of the code (which also has a nice
side effect of customizing the name so that it's not always <module>).
See: https://github.com/ipython/ipykernel/issues/841
"""
if not hasattr(code, "replace"):
# It may not be available on older versions of Python (only
# available for 3.8 onwards).
return code
try:
first_real_line = next(dis.findlinestarts(code))[1]
except StopIteration:
return code
return code.replace(co_name="<cell line: %s>" % (first_real_line,))
async def run_ast_nodes(
self,
nodelist: ListType[stmt],
cell_name: str,
interactivity="last_expr",
compiler=compile,
result=None,
):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
def compare(code):
is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
return is_async
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, "exec"))
for node in to_run_interactive:
to_run.append((node, "single"))
for node, mode in to_run:
if mode == "exec":
mod = Module([node], [])
elif mode == "single":
mod = ast.Interactive([node])
with compiler.extra_flags(
getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
if self.autoawait
else 0x0
):
code = compiler(mod, cell_name, mode)
code = self._update_code_co_name(code)
asy = compare(code)
if await self.run_code(code, result, async_=asy):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
async def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# special value to say that anything above is IPython and should be
# hidden.
__tracebackhide__ = "__ipython_bottom__"
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
if async_:
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
code : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from matplotlib_inline.backend_inline import configure_inline_support
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dir_path = Path(tempfile.mkdtemp(prefix=prefix))
self.tempdirs.append(dir_path)
handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path))
os.close(handle) # On Windows, there can only be one open handle on a file
file_path = Path(filename)
self.tempfiles.append(file_path)
if data:
file_path.write_text(data, encoding="utf-8")
return filename
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : str
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
If empty string is given, returns history of current session
without the last input.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
text = "\n".join(x for _, _, x in lines)
# Skip the last line, as it's probably the magic that called this
if not range_str:
if "\n" not in text:
text = ""
else:
text = text[: text.rfind("\n")]
return text
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
If empty string is given, returns complete history of current
session, without the last line.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target) from e
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target) from e
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception as e:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target) from e
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
def _atexit_once(self):
"""
At exist operation that need to be called at most once.
Second call to this function per instance will do nothing.
"""
if not getattr(self, "_atexit_once_called", False):
self._atexit_once_called = True
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
self.history_manager = None
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
self._atexit_once()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
tfile.unlink()
self.tempfiles.remove(tfile)
except FileNotFoundError:
pass
del self.tempfiles
for tdir in self.tempdirs:
try:
tdir.rmdir()
self.tempdirs.remove(tdir)
except FileNotFoundError:
pass
del self.tempdirs
# Restore user's cursor
if hasattr(self, "editing_mode") and self.editing_mode == "vi":
sys.stdout.write("\x1b[0 q")
sys.stdout.flush()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
class InteractiveShellABC(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| 39.53305
| 147
| 0.587781
|
import abc
import ast
import atexit
import builtins as builtin_mod
import dis
import functools
import inspect
import os
import re
import runpy
import subprocess
import sys
import tempfile
import traceback
import types
import warnings
from ast import stmt
from io import open as io_open
from logging import error
from pathlib import Path
from typing import Callable
from typing import List as ListType
from typing import Optional, Tuple
from warnings import warn
from pickleshare import PickleShareDB
from tempfile import TemporaryDirectory
from traitlets import (
Any,
Bool,
CaselessStrEnum,
Dict,
Enum,
Instance,
Integer,
List,
Type,
Unicode,
default,
observe,
validate,
)
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
import IPython.core.hooks
from IPython.core import magic, oinspect, page, prefilter, ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import InterruptiblePdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.events import EventManager, available_events
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.paths import get_ipython_dir
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize, io, openpy, py3compat
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.utils.path import ensure_dir_exists, get_home_dir, get_py_filename
from IPython.utils.process import getoutput, system
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import DollarFormatter, LSString, SList, format_screen
sphinxify: Optional[Callable]
try:
import docrepr.sphinxify as sphx
def sphinxify(oinfo):
wrapped_docstring = sphx.wrap_main_docstring(oinfo)
def sphinxify_docstring(docstring):
with TemporaryDirectory() as dirname:
return {
"text/html": sphx.sphinxify(wrapped_docstring, dirname),
"text/plain": docstring,
}
return sphinxify_docstring
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
pass
from ast import Module
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
from .async_helpers import (
_asyncio_runner,
_curio_runner,
_pseudo_sync_runner,
_should_be_async,
_trio_runner,
)
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
@undoc
def softspace(file, newvalue):
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
class SpaceInInput(Exception): pass
class SeparateUnicode(Unicode):
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
__spec__ = None
class ExecutionInfo(object):
raw_cell = None
store_history = False
silent = False
shell_futures = True
def __init__(self, raw_cell, store_history, silent, shell_futures):
self.raw_cell = raw_cell
self.store_history = store_history
self.silent = silent
self.shell_futures = shell_futures
def __repr__(self):
name = self.__class__.__qualname__
raw_cell = ((self.raw_cell[:50] + '..')
if len(self.raw_cell) > 50 else self.raw_cell)
return '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s>' %\
(name, id(self), raw_cell, self.store_history, self.silent, self.shell_futures)
class ExecutionResult(object):
execution_count = None
error_before_exec = None
error_in_exec: Optional[BaseException] = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
class InteractiveShell(SingletonConfigurable):
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
compiler_class = Type(CachingCompiler)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
_post_execute = Dict()
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
self.configurables = [self]
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
self.init_inspector()
self.raw_input_original = input
self.init_completer()
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
self.trio_runner = None
def get_ipython(self):
return self
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
def set_trio_runner(self, tr):
self.trio_runner = tr
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir = ProfileDir.create_profile_dir_by_name(
self.ipython_dir, "default"
)
def init_instance_attrs(self):
self.more = False
self.compile = self.compiler_class()
self.meta = Struct()
self.tempfiles = []
self.tempdirs = []
self.starting_dir = os.getcwd()
self.indent_current_nsp = 0
self._post_execute = {}
def init_environment(self):
pass
def init_encoding(self):
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
@observe('colors')
def init_syntax_highlighting(self, changes=None):
pyformat = PyColorize.Parser(style=self.colors, parent=self).format
self.pycolorize = lambda src: pyformat(src,'str')
def refresh_style(self):
pass
def init_pushd_popd_magic(self):
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_builtins(self):
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
@observe('colors')
def init_inspector(self, changes=None):
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
self.colors,
self.object_info_string_level)
def init_io(self):
pass
def init_prompts(self):
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self, shell=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
self.display_trap = DisplayTrap(hook=self.displayhook)
@staticmethod
def get_path_links(p: Path):
paths = [p]
while p.is_symlink():
new_path = Path(os.readlink(p))
if not new_path.is_absolute():
new_path = p.parent / new_path
p = new_path
paths.append(p)
return paths
def init_virtualenv(self):
if 'VIRTUAL_ENV' not in os.environ:
return
elif os.environ["VIRTUAL_ENV"] == "":
warn("Virtual env path set to '', please check if this is intended.")
return
p = Path(sys.executable)
p_venv = Path(os.environ["VIRTUAL_ENV"])
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
paths = self.get_path_links(p)
if p_venv.parts[1] == "cygdrive":
drive_name = p_venv.parts[2]
p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
if any(p_venv == p.parents[1] for p in paths):
return
if sys.platform == "win32":
virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages"))
else:
virtual_env_path = Path(
os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages"
)
p_ver = sys.version_info[:2]
# Predict version from py[thon]-x.x in the $VIRTUAL_ENV
re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"])
if re_m:
predicted_path = Path(str(virtual_env_path).format(*re_m.groups()))
if predicted_path.exists():
p_ver = re_m.groups()
virtual_env = str(virtual_env_path).format(*p_ver)
warn(
"Attempting to work in a virtualenv. If you encounter problems, "
"please install IPython inside the virtualenv."
)
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
sys.stdout.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name, getattr(hooks, hook_name), 100)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self, name, hook, priority=50, str_key=None, re_key=None):
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if name in IPython.core.hooks.deprecated:
alternative = IPython.core.hooks.deprecated[name]
raise ValueError(
"Hook {} has been deprecated since IPython 5.0. Use {} instead.".format(
name, alternative
)
)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
dp = f
setattr(self.hooks,name, dp)
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
raise ValueError(
"ip.register_post_execute is deprecated since IPython 1.0, use "
"ip.events.register('post_run_cell', func) instead."
)
def _clear_warning_registry(self):
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
self.user_ns_hidden.update(ns)
self.user_ns.update(ns)
@property
def all_ns_refs(self):
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True, aggressive=False):
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
self.init_user_ns()
if aggressive and not hasattr(self, "_sys_modules_keys"):
print("Cannot restore sys.module, no snapshot")
elif aggressive:
print("culling sys module...")
current_keys = set(sys.modules.keys())
for k in current_keys - self._sys_modules_keys:
if k.startswith("multiprocessing"):
continue
del sys.modules[k]
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError as e:
raise NameError("name '%s' is not defined" % varname) from e
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
if regex is not None:
try:
m = re.compile(regex)
except TypeError as e:
raise TypeError('regex must be a string or compiled pattern') from e
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
if not isinstance(obj, type):
try:
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
if isinstance(attr, property):
return attr
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
info = self._object_find(oname, namespaces)
docformat = (
sphinxify(self.object_inspect(oname)) if self.sphinxify_docstring else None
)
if info.found:
pmethod = getattr(self.inspector, meth)
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(
info.obj,
oname,
formatter,
info,
enable_html_pager=self.enable_html_pager,
**kw,
)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found'
def object_inspect(self, oname, detail_level=0):
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0, omit_sections=()):
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
docformat = (
sphinxify(self.object_inspect(oname))
if self.sphinxify_docstring
else None
)
return self.inspector._get_info(
info.obj,
oname,
info=info,
detail_level=detail_level,
formatter=docformat,
omit_sections=omit_sections,
)
else:
raise KeyError(oname)
def init_history(self):
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
debugger_cls = InterruptiblePdb
def init_traceback_handlers(self, custom_exceptions):
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls, parent=self)
self.sys_excepthook = sys.excepthook
self.set_custom_exc(*custom_exceptions)
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
if hasattr(value, "_render_traceback_"):
stb = value._render_traceback_()
else:
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset
)
except Exception:
print(
"Unexpected exception formatting exception. Falling back to standard exception"
)
traceback.print_exc()
return None
self._showtraceback(etype, value, stb)
if self.call_pdb:
self.debugger(force=True)
return
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb: str):
val = self.InteractiveTB.stb2text(stb)
try:
print(val)
except UnicodeEncodeError:
print(val.encode("utf-8", "backslashreplace").decode())
def showsyntaxerror(self, filename=None, running_compiled_code=False):
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
pass
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
def showindentationerror(self):
self.showsyntaxerror()
@skip_doctest
def set_next_input(self, s, replace=False):
self.rl_next_input = s
def _indent_current_str(self):
return self.input_splitter.get_indent_spaces() * ' '
def init_completer(self):
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (
cd_completer,
magic_run_completer,
module_completer,
reset_completer,
)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0) -> None:
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
self.register_magics(m.AsyncMagics)
mman = self.magics_manager
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
self.run_line_magic('colors', self.colors)
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(
func, magic_kind=magic_kind, magic_name=magic_name
)
def _find_with_lazy_load(self, /, type_, magic_name: str):
finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_]
fn = finder(magic_name)
if fn is not None:
return fn
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy is None:
return None
self.run_line_magic("load_ext", lazy)
res = finder(magic_name)
return res
def run_line_magic(self, magic_name: str, line, _stack_depth=1):
fn = self._find_with_lazy_load("line", magic_name)
if fn is None:
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy:
self.run_line_magic("load_ext", lazy)
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.get_local_scope(stack_depth)
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def get_local_scope(self, stack_depth):
return sys._getframe(stack_depth + 1).f_locals
def run_cell_magic(self, magic_name, line, cell):
fn = self._find_with_lazy_load("cell", magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
warnings.warn(
"`magic(...)` is deprecated since IPython 0.13 (warning added in "
"8.1), use run_line_magic(magic_name, parameter_s).",
DeprecationWarning,
stacklevel=2,
)
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
cmd = self.var_expand(cmd, depth=1)
# warn if there is an IPython magic alternative.
main_cmd = cmd.split()[0]
has_magic_alternatives = ("pip", "conda", "cd")
if main_cmd in has_magic_alternatives:
warnings.warn(
(
"You executed the system command !{0} which may not work "
"as expected. Try the IPython magic %{0} instead."
).format(main_cmd)
)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
self.user_ns['_exit_code'] = ec
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
if cmd.rstrip().endswith('&'):
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
"status": "error",
"traceback": stb,
"ename": etype.__name__,
"evalue": py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
def get_cells():
if fname.suffix == ".ipynb":
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
yield fname.read_text(encoding="utf-8")
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool) -> ExecutionResult:
preprocessing_exc_tuple = None
try:
transformed_cell = self.transform_cell(raw_cell)
except Exception:
transformed_cell = raw_cell
preprocessing_exc_tuple = sys.exc_info()
assert transformed_cell is not None
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
)
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(
raw_cell,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
def should_run_async(
self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
) -> bool:
if not self.autoawait:
return False
if preprocessing_exc_tuple is not None:
return False
assert preprocessing_exc_tuple is None
if transformed_cell is None:
warnings.warn(
"`should_run_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
return False
else:
cell = transformed_cell
return _should_be_async(cell)
async def run_cell_async(
self,
raw_cell: str,
store_history=False,
silent=False,
shell_futures=True,
*,
transformed_cell: Optional[str] = None,
preprocessing_exc_tuple: Optional[Any] = None
) -> ExecutionResult:
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
if transformed_cell is None:
warnings.warn(
"`run_cell_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell
else:
preprocessing_exc_tuple = None
else:
if preprocessing_exc_tuple is None:
cell = transformed_cell
else:
cell = raw_cell
if store_history and raw_cell.strip(" %") != "paste":
self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
compiler = self.compile if shell_futures else self.compiler_class()
_run_async = False
with self.builtin_trap:
cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell)
with self.display_trap:
try:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
self.displayhook.exec_result = result
interactivity = "none" if silent else self.ast_node_interactivity
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
self.displayhook.exec_result = None
if store_history:
self.history_manager.store_output(self.execution_count)
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
with self.builtin_trap:
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
def _update_code_co_name(self, code):
if not hasattr(code, "replace"):
# It may not be available on older versions of Python (only
# available for 3.8 onwards).
return code
try:
first_real_line = next(dis.findlinestarts(code))[1]
except StopIteration:
return code
return code.replace(co_name="<cell line: %s>" % (first_real_line,))
async def run_ast_nodes(
self,
nodelist: ListType[stmt],
cell_name: str,
interactivity="last_expr",
compiler=compile,
result=None,
):
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
def compare(code):
is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
return is_async
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, "exec"))
for node in to_run_interactive:
to_run.append((node, "single"))
for node, mode in to_run:
if mode == "exec":
mod = Module([node], [])
elif mode == "single":
mod = ast.Interactive([node])
with compiler.extra_flags(
getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
if self.autoawait
else 0x0
):
code = compiler(mod, cell_name, mode)
code = self._update_code_co_name(code)
asy = compare(code)
if await self.run_code(code, result, async_=asy):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# is a SyntaxError, but it's safest just to catch anything and show
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
async def run_code(self, code_obj, result=None, *, async_=False):
__tracebackhide__ = "__ipython_bottom__"
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
if async_:
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
active_eventloop = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
from matplotlib_inline.backend_inline import configure_inline_support
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
configure_inline_support(self, backend)
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
pass
else:
ns.update(frame.f_locals)
try:
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
dir_path = Path(tempfile.mkdtemp(prefix=prefix))
self.tempdirs.append(dir_path)
handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path))
os.close(handle) # On Windows, there can only be one open handle on a file
file_path = Path(filename)
self.tempfiles.append(file_path)
if data:
file_path.write_text(data, encoding="utf-8")
return filename
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
text = "\n".join(x for _, _, x in lines)
# Skip the last line, as it's probably the magic that called this
if not range_str:
if "\n" not in text:
text = ""
else:
text = text[: text.rfind("\n")]
return text
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
code = self.extract_input_lines(target, raw=raw)
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target) from e
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt):
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target) from e
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try:
codeobj = eval(target, self.user_ns)
except Exception as e:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target) from e
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
def _atexit_once(self):
if not getattr(self, "_atexit_once_called", False):
self._atexit_once_called = True
self.reset(new_session=False)
self.history_manager.end_session()
self.history_manager = None
def atexit_operations(self):
self._atexit_once()
for tfile in self.tempfiles:
try:
tfile.unlink()
self.tempfiles.remove(tfile)
except FileNotFoundError:
pass
del self.tempfiles
for tdir in self.tempdirs:
try:
tdir.rmdir()
self.tempdirs.remove(tdir)
except FileNotFoundError:
pass
del self.tempdirs
if hasattr(self, "editing_mode") and self.editing_mode == "vi":
sys.stdout.write("\x1b[0 q")
sys.stdout.flush()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
class InteractiveShellABC(metaclass=abc.ABCMeta):
InteractiveShellABC.register(InteractiveShell)
| true
| true
|
790a75fd07c5de8baf26f61759be3a510f770be0
| 2,935
|
py
|
Python
|
pandas_ta/volatility/donchian.py
|
yssource/pandas-ta
|
0f975320684a91db3c04f6ea3dd739177dcb65aa
|
[
"MIT"
] | 2
|
2021-03-30T01:23:14.000Z
|
2021-04-02T18:04:51.000Z
|
pandas_ta/volatility/donchian.py
|
lukaszbinden/pandas-ta
|
98478f8bf049a4c8748d6f3c795f4f335ced05ca
|
[
"MIT"
] | 2
|
2021-03-08T14:28:33.000Z
|
2021-03-08T16:26:53.000Z
|
pandas_ta/volatility/donchian.py
|
witokondoria/pandas-ta
|
bf7e2b395596e8a75bed863e9ce0a0f34d14e829
|
[
"MIT"
] | 1
|
2021-05-08T14:28:45.000Z
|
2021-05-08T14:28:45.000Z
|
# -*- coding: utf-8 -*-
from pandas import DataFrame
from pandas_ta.utils import get_offset, verify_series
def donchian(high, low, lower_length=None, upper_length=None, offset=None, **kwargs):
"""Indicator: Donchian Channels (DC)"""
# Validate arguments
high = verify_series(high)
low = verify_series(low)
lower_length = int(lower_length) if lower_length and lower_length > 0 else 20
upper_length = int(upper_length) if upper_length and upper_length > 0 else 20
lower_min_periods = int(kwargs["lower_min_periods"]) if "lower_min_periods" in kwargs and kwargs["lower_min_periods"] is not None else lower_length
upper_min_periods = int(kwargs["upper_min_periods"]) if "upper_min_periods" in kwargs and kwargs["upper_min_periods"] is not None else upper_length
offset = get_offset(offset)
# Calculate Result
lower = low.rolling(lower_length, min_periods=lower_min_periods).min()
upper = high.rolling(upper_length, min_periods=upper_min_periods).max()
mid = 0.5 * (lower + upper)
# Handle fills
if "fillna" in kwargs:
lower.fillna(kwargs["fillna"], inplace=True)
mid.fillna(kwargs["fillna"], inplace=True)
upper.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
lower.fillna(method=kwargs["fill_method"], inplace=True)
mid.fillna(method=kwargs["fill_method"], inplace=True)
upper.fillna(method=kwargs["fill_method"], inplace=True)
# Offset
if offset != 0:
lower = lower.shift(offset)
mid = mid.shift(offset)
upper = upper.shift(offset)
# Name and Categorize it
lower.name = f"DCL_{lower_length}_{upper_length}"
mid.name = f"DCM_{lower_length}_{upper_length}"
upper.name = f"DCU_{lower_length}_{upper_length}"
mid.category = upper.category = lower.category = "volatility"
# Prepare DataFrame to return
data = {lower.name: lower, mid.name: mid, upper.name: upper}
dcdf = DataFrame(data)
dcdf.name = f"DC_{lower_length}_{upper_length}"
dcdf.category = mid.category
return dcdf
donchian.__doc__ = \
"""Donchian Channels (DC)
Donchian Channels are used to measure volatility, similar to
Bollinger Bands and Keltner Channels.
Sources:
https://www.tradingview.com/wiki/Donchian_Channels_(DC)
Calculation:
Default Inputs:
lower_length=upper_length=20
LOWER = low.rolling(lower_length).min()
UPPER = high.rolling(upper_length).max()
MID = 0.5 * (LOWER + UPPER)
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
lower_length (int): The short period. Default: 20
upper_length (int): The short period. Default: 20
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: lower, mid, upper columns.
"""
| 35.361446
| 151
| 0.69983
|
from pandas import DataFrame
from pandas_ta.utils import get_offset, verify_series
def donchian(high, low, lower_length=None, upper_length=None, offset=None, **kwargs):
high = verify_series(high)
low = verify_series(low)
lower_length = int(lower_length) if lower_length and lower_length > 0 else 20
upper_length = int(upper_length) if upper_length and upper_length > 0 else 20
lower_min_periods = int(kwargs["lower_min_periods"]) if "lower_min_periods" in kwargs and kwargs["lower_min_periods"] is not None else lower_length
upper_min_periods = int(kwargs["upper_min_periods"]) if "upper_min_periods" in kwargs and kwargs["upper_min_periods"] is not None else upper_length
offset = get_offset(offset)
lower = low.rolling(lower_length, min_periods=lower_min_periods).min()
upper = high.rolling(upper_length, min_periods=upper_min_periods).max()
mid = 0.5 * (lower + upper)
if "fillna" in kwargs:
lower.fillna(kwargs["fillna"], inplace=True)
mid.fillna(kwargs["fillna"], inplace=True)
upper.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
lower.fillna(method=kwargs["fill_method"], inplace=True)
mid.fillna(method=kwargs["fill_method"], inplace=True)
upper.fillna(method=kwargs["fill_method"], inplace=True)
if offset != 0:
lower = lower.shift(offset)
mid = mid.shift(offset)
upper = upper.shift(offset)
lower.name = f"DCL_{lower_length}_{upper_length}"
mid.name = f"DCM_{lower_length}_{upper_length}"
upper.name = f"DCU_{lower_length}_{upper_length}"
mid.category = upper.category = lower.category = "volatility"
data = {lower.name: lower, mid.name: mid, upper.name: upper}
dcdf = DataFrame(data)
dcdf.name = f"DC_{lower_length}_{upper_length}"
dcdf.category = mid.category
return dcdf
donchian.__doc__ = \
"""Donchian Channels (DC)
Donchian Channels are used to measure volatility, similar to
Bollinger Bands and Keltner Channels.
Sources:
https://www.tradingview.com/wiki/Donchian_Channels_(DC)
Calculation:
Default Inputs:
lower_length=upper_length=20
LOWER = low.rolling(lower_length).min()
UPPER = high.rolling(upper_length).max()
MID = 0.5 * (LOWER + UPPER)
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
lower_length (int): The short period. Default: 20
upper_length (int): The short period. Default: 20
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: lower, mid, upper columns.
"""
| true
| true
|
790a760f4de71492819effa48f545d3511700fb5
| 3,088
|
py
|
Python
|
run.py
|
franciscoadasme/pdb-bench
|
90bc877d1a8ac0748a5058707ce37067d639ebc0
|
[
"MIT"
] | null | null | null |
run.py
|
franciscoadasme/pdb-bench
|
90bc877d1a8ac0748a5058707ce37067d639ebc0
|
[
"MIT"
] | null | null | null |
run.py
|
franciscoadasme/pdb-bench
|
90bc877d1a8ac0748a5058707ce37067d639ebc0
|
[
"MIT"
] | null | null | null |
# /usr/bin/env python3
"""Benchmark of handling PDB files comparing multiple libraries."""
import argparse
import glob
import os
import re
import subprocess
import sys
from pathlib import Path
def gather_libs(selected_libs):
libs = []
for path in sorted(glob.iglob("bench/*")):
lib = os.path.basename(path)
if not os.path.isdir(path) or (selected_libs and lib not in selected_libs):
continue
libs.append(lib)
return libs
def gather_tests(libs, selected_tests):
tests = []
for lib in libs:
for filepath in sorted(glob.iglob(os.path.join("bench", lib, "*"))):
test, _ = os.path.splitext(os.path.basename(filepath))
if test in tests or (selected_tests and test not in selected_tests):
continue
tests.append(test)
return tests
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-t", "--tests", help="Test names to run.")
parser.add_argument("-l", "--libraries", help="Library names to test.")
opts = parser.parse_args()
if opts.tests:
opts.tests = opts.tests.split(",")
if opts.libraries:
opts.libraries = opts.libraries.split(",")
return vars(opts)
def run_test(filepath, pdbfile, repeats=10):
*_, dirname, filename = Path(filepath).parts
basename, _ = os.path.splitext(filename)
pdbid, _ = os.path.splitext(os.path.basename(pdbfile))
print(format(f"{dirname}/{basename}/{pdbid}", "<40"), end="", flush=True)
if "schrodinger" in filepath:
cmd = [
os.path.join(os.environ["SCHRODINGER"], "run"),
filepath,
pdbfile,
str(repeats),
]
elif filepath.endswith(".py"):
cmd = ["python3", filepath, pdbfile, str(repeats)]
elif filepath.endswith(".cr"):
cmd = ["crystal", "run", "--release", filepath, "--", pdbfile, str(repeats)]
elif filepath.endswith(".tcl"):
cmd = [
"vmd",
"-dispdev",
"none",
"-e",
filepath,
"-args",
pdbfile,
str(repeats),
]
try:
output = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
output = output.decode(sys.stdout.encoding).strip()
try:
elapsed = float(output)
except ValueError:
elapsed = float(re.findall(r"elapsed *= *([\d\.e\-]+)", output)[0])
print(format(elapsed, ".6f"))
except subprocess.CalledProcessError:
print("failed")
opts = parse_args(sys.argv[1:])
libs = gather_libs(opts["libraries"])
tests = gather_tests(libs, opts["tests"])
pdbs = list(map(os.path.abspath, glob.glob("data/*.pdb")))
for test in tests:
for pdbfile in pdbs if test.startswith("parse") else ["data/1ake.pdb"]:
for lib in libs:
paths = glob.glob(f"bench/{lib}/{test}.*")
if not paths:
continue
run_test(paths[0], pdbfile, repeats=10 if "1htq" not in pdbfile else 3)
print("")
| 29.409524
| 84
| 0.589702
|
import argparse
import glob
import os
import re
import subprocess
import sys
from pathlib import Path
def gather_libs(selected_libs):
libs = []
for path in sorted(glob.iglob("bench/*")):
lib = os.path.basename(path)
if not os.path.isdir(path) or (selected_libs and lib not in selected_libs):
continue
libs.append(lib)
return libs
def gather_tests(libs, selected_tests):
tests = []
for lib in libs:
for filepath in sorted(glob.iglob(os.path.join("bench", lib, "*"))):
test, _ = os.path.splitext(os.path.basename(filepath))
if test in tests or (selected_tests and test not in selected_tests):
continue
tests.append(test)
return tests
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-t", "--tests", help="Test names to run.")
parser.add_argument("-l", "--libraries", help="Library names to test.")
opts = parser.parse_args()
if opts.tests:
opts.tests = opts.tests.split(",")
if opts.libraries:
opts.libraries = opts.libraries.split(",")
return vars(opts)
def run_test(filepath, pdbfile, repeats=10):
*_, dirname, filename = Path(filepath).parts
basename, _ = os.path.splitext(filename)
pdbid, _ = os.path.splitext(os.path.basename(pdbfile))
print(format(f"{dirname}/{basename}/{pdbid}", "<40"), end="", flush=True)
if "schrodinger" in filepath:
cmd = [
os.path.join(os.environ["SCHRODINGER"], "run"),
filepath,
pdbfile,
str(repeats),
]
elif filepath.endswith(".py"):
cmd = ["python3", filepath, pdbfile, str(repeats)]
elif filepath.endswith(".cr"):
cmd = ["crystal", "run", "--release", filepath, "--", pdbfile, str(repeats)]
elif filepath.endswith(".tcl"):
cmd = [
"vmd",
"-dispdev",
"none",
"-e",
filepath,
"-args",
pdbfile,
str(repeats),
]
try:
output = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
output = output.decode(sys.stdout.encoding).strip()
try:
elapsed = float(output)
except ValueError:
elapsed = float(re.findall(r"elapsed *= *([\d\.e\-]+)", output)[0])
print(format(elapsed, ".6f"))
except subprocess.CalledProcessError:
print("failed")
opts = parse_args(sys.argv[1:])
libs = gather_libs(opts["libraries"])
tests = gather_tests(libs, opts["tests"])
pdbs = list(map(os.path.abspath, glob.glob("data/*.pdb")))
for test in tests:
for pdbfile in pdbs if test.startswith("parse") else ["data/1ake.pdb"]:
for lib in libs:
paths = glob.glob(f"bench/{lib}/{test}.*")
if not paths:
continue
run_test(paths[0], pdbfile, repeats=10 if "1htq" not in pdbfile else 3)
print("")
| true
| true
|
790a762ab44054288c5f9263fb0095ff7686f748
| 4,473
|
py
|
Python
|
ninopianino/markov_trainer.py
|
NinoDoko/nino_pianino
|
2a459695932abea720103c85f486daa1145af3b2
|
[
"MIT"
] | 2
|
2016-10-20T10:53:00.000Z
|
2020-11-16T03:18:08.000Z
|
ninopianino/markov_trainer.py
|
NinoDoko/nino_pianino
|
2a459695932abea720103c85f486daa1145af3b2
|
[
"MIT"
] | null | null | null |
ninopianino/markov_trainer.py
|
NinoDoko/nino_pianino
|
2a459695932abea720103c85f486daa1145af3b2
|
[
"MIT"
] | null | null | null |
import song_generator
from markov_gen import markov_generator
import os, json
nino_dir = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])
import generator
generator.set_dir_write_note(nino_dir + '/trainer/generated_notes')
def gen_kwargs():
kwargs = {
#What the general scale for the shond should be - chosen randomly from this list.
'song_scale' : ['major', 'minor'],
'block_same_note_range' : [0],
#How many segments the song has.
'number_of_segments_range' : [1],
'chord_exp_var': 2,
#The range of BPMs for each segment. Chooses randomly for each segment.
'bpm_range': [400],
#A range for beats per bar for each segment. Will choose randomly.
'beats_per_bar_range' : [4],
#A range for how many chords each segment should have. Chooses randomly from this list.
'chords_per_segment_range': [1],
#A list containing the program numbers of instruments that should be used. The program will choose randomly from these.
'instruments_range' : [1, 2],
#The number of instruments that will be active throughout the song.
'number_of_song_instruments_range' : [1],
#The bias_same_note chances for the main instruments. It's a list that is randomly chosen from.
'main_instrument_bias_same_note' : [0],
#The maximum pattern note length for the main instrument
'pattern_note_max_len_range' : 1,
#Each segment will have a different accent from the previous, determined by a random value from this list.
'accent_offset' : range(-5, 5),
#A list from accents from which segments will receive their base accent.
'default_accent_range' : range(70, 85),
#Volume to be added to percussions.
'percussion_accent_offset' : 10,
#Number of extra instruments per segment.
'no_segment_instruments_range' : [0],
#Range for the number of bars per segment. Will choose randomly from this list.
'number_segment_bars_range' : [32],
#Accent range offset for instrument specific blocks.
'block_default_accent_range' : range(-5, 5),
#Chance for each instrument to follow a pattern for the duration of the segment.
'segment_instrument_pattern_chance' : 1.0,
#Upper range for how long a pattern note can last. Should not be longer than the maximum amount of beats per bar for the instrument.
'pattern_note_len_range' : 1,
#And the lower range for how long the note can last. Should not be less than 1.
'pattern_note_min_len_range' : 1,
#The dir where the songs are saved.
'generate_dir' : nino_dir + '/trainer/',
#The directory for the soundfont. This is an example, and should be supplied for specific use cases.
'soundfont' : nino_dir + '/soundfonts/FluidR3_GM.sf2',
#The song generator will randomly repeat segments and then shuffle them. This is a range of the numbers of repeats for each segment.
'segment_shuffle_range' : [1],
#We may want to have segments with few instruments and no drums. This is the percentage that there are drums if the number of instruments is below the defined treshold.
'segment_percussion_chance': 0.0,
#If there's less than this many instruments in a segment, there's a chance (defined above) that there will be no percussion for that segment.
'skip_percussion_treshold' : 3,
'get_mp3' : True,
'dir_write_note' : nino_dir + '/trainer/generated_notes',
'markov_values' : nino_dir + '/trainer/results.json',
}
return kwargs
def main():
kwargs = gen_kwargs()
song_generator.generate_song(**kwargs)
markov_weight = float(raw_input('Enter a weight for the markov values. '))
generated_notes = ''
with open(nino_dir + '/trainer/generated_notes') as f:
generated_notes = '[' + f.read()[2:] + ']'
generated_notes = json.loads(generated_notes)
old_results = ''
with open(nino_dir + '/trainer/results.json') as f:
old_results = json.loads(f.read())
new_results = markov_generator.markov_from_values(old_results, generated_notes, 4, weight = markov_weight)
with open(nino_dir + '/trainer/results.json', 'w') as f:
f.write(json.dumps(new_results))
if __name__ == '__main__' :
main()
| 37.588235
| 177
| 0.667337
|
import song_generator
from markov_gen import markov_generator
import os, json
nino_dir = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])
import generator
generator.set_dir_write_note(nino_dir + '/trainer/generated_notes')
def gen_kwargs():
kwargs = {
'song_scale' : ['major', 'minor'],
'block_same_note_range' : [0],
'number_of_segments_range' : [1],
'chord_exp_var': 2,
'bpm_range': [400],
'beats_per_bar_range' : [4],
'chords_per_segment_range': [1],
'instruments_range' : [1, 2],
'number_of_song_instruments_range' : [1],
'main_instrument_bias_same_note' : [0],
#The maximum pattern note length for the main instrument
'pattern_note_max_len_range' : 1,
#Each segment will have a different accent from the previous, determined by a random value from this list.
'accent_offset' : range(-5, 5),
#A list from accents from which segments will receive their base accent.
'default_accent_range' : range(70, 85),
#Volume to be added to percussions.
'percussion_accent_offset' : 10,
#Number of extra instruments per segment.
'no_segment_instruments_range' : [0],
#Range for the number of bars per segment. Will choose randomly from this list.
'number_segment_bars_range' : [32],
#Accent range offset for instrument specific blocks.
'block_default_accent_range' : range(-5, 5),
#Chance for each instrument to follow a pattern for the duration of the segment.
'segment_instrument_pattern_chance' : 1.0,
#Upper range for how long a pattern note can last. Should not be longer than the maximum amount of beats per bar for the instrument.
'pattern_note_len_range' : 1,
#And the lower range for how long the note can last. Should not be less than 1.
'pattern_note_min_len_range' : 1,
#The dir where the songs are saved.
'generate_dir' : nino_dir + '/trainer/',
#The directory for the soundfont. This is an example, and should be supplied for specific use cases.
'soundfont' : nino_dir + '/soundfonts/FluidR3_GM.sf2',
#The song generator will randomly repeat segments and then shuffle them. This is a range of the numbers of repeats for each segment.
'segment_shuffle_range' : [1],
#We may want to have segments with few instruments and no drums. This is the percentage that there are drums if the number of instruments is below the defined treshold.
'segment_percussion_chance': 0.0,
#If there's less than this many instruments in a segment, there's a chance (defined above) that there will be no percussion for that segment.
'skip_percussion_treshold' : 3,
'get_mp3' : True,
'dir_write_note' : nino_dir + '/trainer/generated_notes',
'markov_values' : nino_dir + '/trainer/results.json',
}
return kwargs
def main():
kwargs = gen_kwargs()
song_generator.generate_song(**kwargs)
markov_weight = float(raw_input('Enter a weight for the markov values. '))
generated_notes = ''
with open(nino_dir + '/trainer/generated_notes') as f:
generated_notes = '[' + f.read()[2:] + ']'
generated_notes = json.loads(generated_notes)
old_results = ''
with open(nino_dir + '/trainer/results.json') as f:
old_results = json.loads(f.read())
new_results = markov_generator.markov_from_values(old_results, generated_notes, 4, weight = markov_weight)
with open(nino_dir + '/trainer/results.json', 'w') as f:
f.write(json.dumps(new_results))
if __name__ == '__main__' :
main()
| true
| true
|
790a772a6b60a69994d9a5b92d33ae9d661576eb
| 1,110
|
py
|
Python
|
setup.py
|
akhti/torch-blocksparse
|
49d029660dfa0fcf350f0e20f820872e9973973e
|
[
"MIT"
] | 110
|
2020-03-05T18:50:16.000Z
|
2022-03-19T07:24:00.000Z
|
setup.py
|
akhti/torch-blocksparse
|
49d029660dfa0fcf350f0e20f820872e9973973e
|
[
"MIT"
] | 32
|
2020-03-23T17:01:20.000Z
|
2021-04-05T14:35:54.000Z
|
setup.py
|
akhti/torch-blocksparse
|
49d029660dfa0fcf350f0e20f820872e9973973e
|
[
"MIT"
] | 21
|
2020-04-06T16:50:32.000Z
|
2021-08-11T07:04:33.000Z
|
#!/usr/bin/env python
import os
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension
cmdclass = {}
cmdclass['build_ext'] = BuildExtension
import setuptools
ext_modules = [
CppExtension(name='torch_blocksparse_cpp_utils',
sources=['csrc/utils.cpp'],
extra_compile_args={'cxx': ['-O2',
'-fopenmp']})
]
setuptools.setup(
name = 'torch-blocksparse',
version = '1.1.1',
description = 'Block-sparse primitives for PyTorch',
author = 'Philippe Tillet',
maintainer = 'Philippe Tillet',
maintainer_email = 'ptillet@g.harvard.edu',
install_requires = ['triton', 'torch'],
url = 'https://github.com/ptillet/torch-blocksparse',
test_suite = 'nose.collector',
tests_require = ['nose', 'parameterized'],
license = 'MIT',
packages = find_packages(exclude=["csrc"]),
ext_modules = ext_modules,
cmdclass = cmdclass
)
| 30
| 70
| 0.589189
|
import os
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension
cmdclass = {}
cmdclass['build_ext'] = BuildExtension
import setuptools
ext_modules = [
CppExtension(name='torch_blocksparse_cpp_utils',
sources=['csrc/utils.cpp'],
extra_compile_args={'cxx': ['-O2',
'-fopenmp']})
]
setuptools.setup(
name = 'torch-blocksparse',
version = '1.1.1',
description = 'Block-sparse primitives for PyTorch',
author = 'Philippe Tillet',
maintainer = 'Philippe Tillet',
maintainer_email = 'ptillet@g.harvard.edu',
install_requires = ['triton', 'torch'],
url = 'https://github.com/ptillet/torch-blocksparse',
test_suite = 'nose.collector',
tests_require = ['nose', 'parameterized'],
license = 'MIT',
packages = find_packages(exclude=["csrc"]),
ext_modules = ext_modules,
cmdclass = cmdclass
)
| true
| true
|
790a774ee823d211db822c5c0584efd5a0954fb1
| 473
|
py
|
Python
|
videos_id/provider/vimeo.py
|
RentFreeMedia/python-video-ids
|
077dffb9f26456e703fb4e396a81df883ae4d0e3
|
[
"MIT"
] | null | null | null |
videos_id/provider/vimeo.py
|
RentFreeMedia/python-video-ids
|
077dffb9f26456e703fb4e396a81df883ae4d0e3
|
[
"MIT"
] | null | null | null |
videos_id/provider/vimeo.py
|
RentFreeMedia/python-video-ids
|
077dffb9f26456e703fb4e396a81df883ae4d0e3
|
[
"MIT"
] | null | null | null |
import re
from videos_id.platform import Platform
class Vimeo(Platform):
def __init__(self):
self.platform = "Vimeo"
def check_url(self, url):
pattern = r'https?:\/\/(?:www\.|player\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/(?:[^\/]*)\/videos\/|album\/(?:\d+)\/video\/|video\/|)(\d+)(?:$|\/|\?)'
match = re.search(pattern, url, re.IGNORECASE)
if match:
return match.group(1)
else:
return None
| 27.823529
| 164
| 0.534884
|
import re
from videos_id.platform import Platform
class Vimeo(Platform):
def __init__(self):
self.platform = "Vimeo"
def check_url(self, url):
pattern = r'https?:\/\/(?:www\.|player\.)?vimeo.com\/(?:channels\/(?:\w+\/)?|groups\/(?:[^\/]*)\/videos\/|album\/(?:\d+)\/video\/|video\/|)(\d+)(?:$|\/|\?)'
match = re.search(pattern, url, re.IGNORECASE)
if match:
return match.group(1)
else:
return None
| true
| true
|
790a77f6a8f7fab10233c49133436ecca665996e
| 557
|
py
|
Python
|
day04/app04/urls.py
|
General-ITer/Django-Introduction
|
e88b12682f9abc46a90a0fc79e7443537230a506
|
[
"Apache-2.0"
] | null | null | null |
day04/app04/urls.py
|
General-ITer/Django-Introduction
|
e88b12682f9abc46a90a0fc79e7443537230a506
|
[
"Apache-2.0"
] | 1
|
2020-12-09T18:26:36.000Z
|
2020-12-09T18:26:36.000Z
|
day04/app04/urls.py
|
General-ITer/Django-Introduction
|
e88b12682f9abc46a90a0fc79e7443537230a506
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url, include
from django.contrib import admin
from .views import *
urlpatterns = [
url(r'^index$',index,name='index'),
url(r'^langs$',langs,name='langs'),
url(r'^newindex$',new_index),
url(r'^myindex/(\d+)$',
myindex_with_param,
name='myindex_with_param'),
url(r'^v1_index/(?P<p2>\d+)$',
myindex_with_param_v1,
name='myindex_with_param_v1'),
url(r'new_reverse',
new_reverse,
name = 'new_reverse'),
url(r'^home$',home,name='home')
]
| 27.85
| 42
| 0.59246
|
from django.conf.urls import url, include
from django.contrib import admin
from .views import *
urlpatterns = [
url(r'^index$',index,name='index'),
url(r'^langs$',langs,name='langs'),
url(r'^newindex$',new_index),
url(r'^myindex/(\d+)$',
myindex_with_param,
name='myindex_with_param'),
url(r'^v1_index/(?P<p2>\d+)$',
myindex_with_param_v1,
name='myindex_with_param_v1'),
url(r'new_reverse',
new_reverse,
name = 'new_reverse'),
url(r'^home$',home,name='home')
]
| true
| true
|
790a782daf803fc78611f2f7186f5aabb4791def
| 5,072
|
py
|
Python
|
tests/unit/admin/views/test_journals.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 3,103
|
2015-01-30T00:24:10.000Z
|
2022-03-31T23:21:39.000Z
|
tests/unit/admin/views/test_journals.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 6,709
|
2015-01-05T01:23:20.000Z
|
2022-03-31T14:49:46.000Z
|
tests/unit/admin/views/test_journals.py
|
fairhopeweb/warehouse
|
7d8ef742e8fe6b401190c28ce56761848041c89f
|
[
"Apache-2.0"
] | 959
|
2015-01-12T22:22:40.000Z
|
2022-03-31T22:21:51.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
from pyramid.httpexceptions import HTTPBadRequest
from warehouse.admin.views import journals as views
from ....common.db.accounts import UserFactory
from ....common.db.packaging import JournalEntryFactory, ProjectFactory
class TestProjectList:
def test_no_query(self, db_request):
journals = sorted(
[JournalEntryFactory.create() for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
result = views.journals_list(db_request)
assert result == {"journals": journals[:25], "query": None}
def test_with_page(self, db_request):
journals = sorted(
[JournalEntryFactory.create() for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
db_request.GET["page"] = "2"
result = views.journals_list(db_request)
assert result == {"journals": journals[25:], "query": None}
def test_with_invalid_page(self):
request = pretend.stub(params={"page": "not an integer"})
with pytest.raises(HTTPBadRequest):
views.journals_list(request)
def test_query_basic(self, db_request):
project0 = ProjectFactory.create()
project1 = ProjectFactory.create()
journals0 = sorted(
[
JournalEntryFactory.create(name=project0.normalized_name)
for _ in range(30)
],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(name=project1.normalized_name) for _ in range(30)]
db_request.GET["q"] = "{}".format(project0.name)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "{}".format(project0.name),
}
def test_query_term_project(self, db_request):
project0 = ProjectFactory.create()
project1 = ProjectFactory.create()
journals0 = sorted(
[
JournalEntryFactory.create(name=project0.normalized_name)
for _ in range(30)
],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(name=project1.normalized_name) for _ in range(30)]
db_request.GET["q"] = "project:{}".format(project0.name)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "project:{}".format(project0.name),
}
def test_query_term_user(self, db_request):
user0 = UserFactory.create()
user1 = UserFactory.create()
journals0 = sorted(
[JournalEntryFactory.create(submitted_by=user0) for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(submitted_by=user1) for _ in range(30)]
db_request.GET["q"] = "user:{}".format(user0.username)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "user:{}".format(user0.username),
}
def test_query_term_version(self, db_request):
journals = [JournalEntryFactory.create() for _ in range(10)]
db_request.GET["q"] = "version:{}".format(journals[0].version)
result = views.journals_list(db_request)
assert result == {
"journals": [journals[0]],
"query": "version:{}".format(journals[0].version),
}
def test_query_term_ip(self, db_request):
ipv4 = "10.6.6.6"
ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
journals0 = sorted(
[JournalEntryFactory.create(submitted_from=ipv4) for _ in range(10)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
journals1 = sorted(
[JournalEntryFactory.create(submitted_from=ipv6) for _ in range(10)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
db_request.GET["q"] = "ip:{}".format(ipv4)
result = views.journals_list(db_request)
assert result == {"journals": journals0, "query": "ip:{}".format(ipv4)}
db_request.GET["q"] = "ip:{}".format(ipv6)
result = views.journals_list(db_request)
assert result == {"journals": journals1, "query": "ip:{}".format(ipv6)}
| 34.739726
| 86
| 0.610804
|
import pretend
import pytest
from pyramid.httpexceptions import HTTPBadRequest
from warehouse.admin.views import journals as views
from ....common.db.accounts import UserFactory
from ....common.db.packaging import JournalEntryFactory, ProjectFactory
class TestProjectList:
def test_no_query(self, db_request):
journals = sorted(
[JournalEntryFactory.create() for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
result = views.journals_list(db_request)
assert result == {"journals": journals[:25], "query": None}
def test_with_page(self, db_request):
journals = sorted(
[JournalEntryFactory.create() for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
db_request.GET["page"] = "2"
result = views.journals_list(db_request)
assert result == {"journals": journals[25:], "query": None}
def test_with_invalid_page(self):
request = pretend.stub(params={"page": "not an integer"})
with pytest.raises(HTTPBadRequest):
views.journals_list(request)
def test_query_basic(self, db_request):
project0 = ProjectFactory.create()
project1 = ProjectFactory.create()
journals0 = sorted(
[
JournalEntryFactory.create(name=project0.normalized_name)
for _ in range(30)
],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(name=project1.normalized_name) for _ in range(30)]
db_request.GET["q"] = "{}".format(project0.name)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "{}".format(project0.name),
}
def test_query_term_project(self, db_request):
project0 = ProjectFactory.create()
project1 = ProjectFactory.create()
journals0 = sorted(
[
JournalEntryFactory.create(name=project0.normalized_name)
for _ in range(30)
],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(name=project1.normalized_name) for _ in range(30)]
db_request.GET["q"] = "project:{}".format(project0.name)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "project:{}".format(project0.name),
}
def test_query_term_user(self, db_request):
user0 = UserFactory.create()
user1 = UserFactory.create()
journals0 = sorted(
[JournalEntryFactory.create(submitted_by=user0) for _ in range(30)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
[JournalEntryFactory.create(submitted_by=user1) for _ in range(30)]
db_request.GET["q"] = "user:{}".format(user0.username)
result = views.journals_list(db_request)
assert result == {
"journals": journals0[:25],
"query": "user:{}".format(user0.username),
}
def test_query_term_version(self, db_request):
journals = [JournalEntryFactory.create() for _ in range(10)]
db_request.GET["q"] = "version:{}".format(journals[0].version)
result = views.journals_list(db_request)
assert result == {
"journals": [journals[0]],
"query": "version:{}".format(journals[0].version),
}
def test_query_term_ip(self, db_request):
ipv4 = "10.6.6.6"
ipv6 = "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
journals0 = sorted(
[JournalEntryFactory.create(submitted_from=ipv4) for _ in range(10)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
journals1 = sorted(
[JournalEntryFactory.create(submitted_from=ipv6) for _ in range(10)],
key=lambda j: (j.submitted_date, j.id),
reverse=True,
)
db_request.GET["q"] = "ip:{}".format(ipv4)
result = views.journals_list(db_request)
assert result == {"journals": journals0, "query": "ip:{}".format(ipv4)}
db_request.GET["q"] = "ip:{}".format(ipv6)
result = views.journals_list(db_request)
assert result == {"journals": journals1, "query": "ip:{}".format(ipv6)}
| true
| true
|
790a7848a38af06836d6b558951873681cc2d1e3
| 257
|
py
|
Python
|
cff/models/record.py
|
cariad/cff
|
662164a29652bf242a3a4f6ce7986e8f187f571f
|
[
"MIT"
] | null | null | null |
cff/models/record.py
|
cariad/cff
|
662164a29652bf242a3a4f6ce7986e8f187f571f
|
[
"MIT"
] | 1
|
2021-11-02T16:19:19.000Z
|
2021-11-02T16:19:19.000Z
|
cff/models/record.py
|
cariad/cff
|
662164a29652bf242a3a4f6ce7986e8f187f571f
|
[
"MIT"
] | null | null | null |
from typing import TypedDict
from cff.models.cloudfront_event import CloudFrontEvent
class Record(TypedDict):
"""Record of an event that raised a Lambda event."""
cf: CloudFrontEvent
"""The CloudFront event that raised this Lambda event."""
| 23.363636
| 61
| 0.747082
|
from typing import TypedDict
from cff.models.cloudfront_event import CloudFrontEvent
class Record(TypedDict):
cf: CloudFrontEvent
| true
| true
|
790a78e4612ecd1ab2460a50584e2f5845c3f3c2
| 1,688
|
py
|
Python
|
perceptron_np.py
|
oustar/scipylearn
|
f3f3223f1170b39dc420606bdf989b6fcb705410
|
[
"Apache-2.0"
] | null | null | null |
perceptron_np.py
|
oustar/scipylearn
|
f3f3223f1170b39dc420606bdf989b6fcb705410
|
[
"Apache-2.0"
] | null | null | null |
perceptron_np.py
|
oustar/scipylearn
|
f3f3223f1170b39dc420606bdf989b6fcb705410
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
class Perceptron(object):
def __init__(self, input_num, activator):
self.activator = activator
self.weights = np.zeros((input_num))
self.bias = 0.0
def __str__(self):
return 'weights\t:%s\nbias\t:%f\n' % (self.weights, self.bias)
def predict(self, input_vec):
return self.activator(np.dot(input_vec, self.weights) + self.bias)
def train(self, input_vecs, labels, iteration, rate):
for _ in range(iteration):
self._one_iteration(input_vecs, labels, rate)
def _one_iteration(self, input_vecs, labels, rate):
samples = zip(input_vecs, labels)
for input_vec, label in samples:
output = self.predict(input_vec)
self._update_weight(input_vec, output, label, rate)
def _update_weight(self, input_vec, output, label, rate):
delat = label - output
self.weights += rate * delat * input_vec
self.bias += rate * delat
def f(x):
if x > 0: return 1
else: return 0
def get_train_dataset():
vecs = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])
labels = np.array([1, 0, 0, 0])
return vecs, labels
def train_and_perceptron():
p = Perceptron(2, f)
input_vecs, labels = get_train_dataset()
p.train(input_vecs, labels, 10, 0.1)
return p
if __name__ == "__main__":
and_perceptron = train_and_perceptron()
print(and_perceptron)
print ('1 and 1 = ' , and_perceptron.predict([1, 1]))
print ('1 and 0 = ' , and_perceptron.predict([1, 0]))
print ('0 and 1 = ' , and_perceptron.predict([0, 1]))
print ('0 and 0 = ' , and_perceptron.predict([0, 0]))
| 22.210526
| 74
| 0.61019
|
import numpy as np
class Perceptron(object):
def __init__(self, input_num, activator):
self.activator = activator
self.weights = np.zeros((input_num))
self.bias = 0.0
def __str__(self):
return 'weights\t:%s\nbias\t:%f\n' % (self.weights, self.bias)
def predict(self, input_vec):
return self.activator(np.dot(input_vec, self.weights) + self.bias)
def train(self, input_vecs, labels, iteration, rate):
for _ in range(iteration):
self._one_iteration(input_vecs, labels, rate)
def _one_iteration(self, input_vecs, labels, rate):
samples = zip(input_vecs, labels)
for input_vec, label in samples:
output = self.predict(input_vec)
self._update_weight(input_vec, output, label, rate)
def _update_weight(self, input_vec, output, label, rate):
delat = label - output
self.weights += rate * delat * input_vec
self.bias += rate * delat
def f(x):
if x > 0: return 1
else: return 0
def get_train_dataset():
vecs = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])
labels = np.array([1, 0, 0, 0])
return vecs, labels
def train_and_perceptron():
p = Perceptron(2, f)
input_vecs, labels = get_train_dataset()
p.train(input_vecs, labels, 10, 0.1)
return p
if __name__ == "__main__":
and_perceptron = train_and_perceptron()
print(and_perceptron)
print ('1 and 1 = ' , and_perceptron.predict([1, 1]))
print ('1 and 0 = ' , and_perceptron.predict([1, 0]))
print ('0 and 1 = ' , and_perceptron.predict([0, 1]))
print ('0 and 0 = ' , and_perceptron.predict([0, 0]))
| true
| true
|
790a799b03ce196e8691ee4ca8e2fca0102db1c7
| 1,012
|
py
|
Python
|
src/openprocurement/agreement/cfaua/includeme.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 3
|
2020-03-13T06:44:23.000Z
|
2020-11-05T18:25:29.000Z
|
src/openprocurement/agreement/cfaua/includeme.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 2
|
2021-03-25T23:29:58.000Z
|
2022-03-21T22:18:37.000Z
|
src/openprocurement/agreement/cfaua/includeme.py
|
scrubele/prozorro-testing
|
42b93ea2f25d8cc40e66c596f582c7c05e2a9d76
|
[
"Apache-2.0"
] | 3
|
2020-10-16T16:25:14.000Z
|
2021-05-22T12:26:20.000Z
|
import os
import openprocurement.agreement.cfaua
from logging import getLogger
from pyramid.interfaces import IRequest
from openprocurement.api.interfaces import IContentConfigurator
from openprocurement.agreement.cfaua.interfaces import IClosedFrameworkAgreementUA
from openprocurement.agreement.cfaua.models.agreement import Agreement
from openprocurement.agreement.cfaua.adapters.configurator import CFAgreementUAConfigurator
from zope.configuration.xmlconfig import file as ZcmlFile
LOGGER = getLogger("openprocurement.agreement.cfaua")
def includeme(config):
LOGGER.info("Init agreement.cfaua plugin.")
config.add_agreement_type(Agreement)
config.registry.registerAdapter(
CFAgreementUAConfigurator, (IClosedFrameworkAgreementUA, IRequest), IContentConfigurator
)
config.scan("openprocurement.agreement.cfaua.views")
ZcmlFile(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "configure.zcml"),
package=openprocurement.agreement.cfaua,
)
| 32.645161
| 96
| 0.8083
|
import os
import openprocurement.agreement.cfaua
from logging import getLogger
from pyramid.interfaces import IRequest
from openprocurement.api.interfaces import IContentConfigurator
from openprocurement.agreement.cfaua.interfaces import IClosedFrameworkAgreementUA
from openprocurement.agreement.cfaua.models.agreement import Agreement
from openprocurement.agreement.cfaua.adapters.configurator import CFAgreementUAConfigurator
from zope.configuration.xmlconfig import file as ZcmlFile
LOGGER = getLogger("openprocurement.agreement.cfaua")
def includeme(config):
LOGGER.info("Init agreement.cfaua plugin.")
config.add_agreement_type(Agreement)
config.registry.registerAdapter(
CFAgreementUAConfigurator, (IClosedFrameworkAgreementUA, IRequest), IContentConfigurator
)
config.scan("openprocurement.agreement.cfaua.views")
ZcmlFile(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "configure.zcml"),
package=openprocurement.agreement.cfaua,
)
| true
| true
|
790a79f94693e2fdc88da8df0434a6b544b844e1
| 6,890
|
py
|
Python
|
custom_components/bureau_of_meteorology/PyBoM/collector.py
|
QziP22/HomeAssistantConfig
|
6b05fa20c8267222dc66f90e94f03f5d865c57a9
|
[
"Unlicense"
] | 4
|
2019-03-26T13:57:54.000Z
|
2021-11-04T04:55:49.000Z
|
custom_components/bureau_of_meteorology/PyBoM/collector.py
|
QziP22/HomeAssistantConfig
|
6b05fa20c8267222dc66f90e94f03f5d865c57a9
|
[
"Unlicense"
] | 1
|
2021-04-03T01:10:11.000Z
|
2021-04-03T01:10:11.000Z
|
custom_components/bureau_of_meteorology/PyBoM/collector.py
|
QziP22/HomeAssistantConfig
|
6b05fa20c8267222dc66f90e94f03f5d865c57a9
|
[
"Unlicense"
] | 2
|
2019-04-02T19:20:34.000Z
|
2019-08-13T16:39:52.000Z
|
"""BOM data 'collector' that downloads the observation data."""
import asyncio
import datetime
import aiohttp
import logging
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10)
BASE_URL = "https://api.weather.bom.gov.au"
DAILY_FORECASTS_URL = "/v1/locations/{}/forecasts/daily"
LOCATIONS_URL = "/v1/locations/{}"
MDI_ICON_MAP = {
"clear": "mdi:weather-night",
"cloudy": "mdi:weather-cloudy",
"cyclone": "mdi:weather-hurricane",
"dust": "mdi:weather-hazy",
"dusty": "mdi:weather-hazy",
"fog": "mdi:weather-fog",
"frost": "mdi:snowflake-melt",
"haze": "mdi:weather-hazy",
"hazy": "mdi:weather-hazy",
"heavy_shower": "mdi:weather-pouring",
"heavy_showers": "mdi:weather-pouring",
"light_rain": "mdi:weather-partly-rainy",
"light_shower": "mdi:weather-light-showers",
"light_showers": "mdi:weather-light-showers",
"mostly_sunny": "mdi:weather-sunny",
"partly_cloudy": "mdi:weather-partly-cloudy",
"rain": "mdi:weather-pouring",
"shower": "mdi:weather-rainy",
"showers": "mdi:weather-rainy",
"snow": "mdi:weather-snowy",
"storm": "mdi:weather-lightning-rainy",
"storms": "mdi:weather-lightning-rainy",
"sunny": "mdi:weather-sunny",
"tropical_cyclone": "mdi:weather-hurricane",
"wind": "mdi:weather-windy",
"windy": "mdi:weather-windy",
None: None,
}
OBSERVATIONS_URL = "https://api.weather.bom.gov.au/v1/locations/{}/observations"
UV_MAP = {
"extreme": "Extreme",
"veryhigh": "Very High",
"high": "High",
"moderate": "Moderate",
"low": "Low",
None: None,
}
class Collector:
"""Data collector for BOM integration."""
def __init__(self, latitude, longitude):
"""Init BOM data collector."""
self.observations_data = None
self.daily_forecasts_data = None
self.geohash = self.geohash_encode(latitude, longitude)
_LOGGER.debug(f"geohash: {self.geohash}")
async def get_location_name(self):
"""Get JSON location name from BOM API endpoint."""
url = BASE_URL + LOCATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
locations_data = await response.json()
self.location_name = locations_data["data"]["name"]
return True
async def get_observations_data(self):
"""Get JSON observations data from BOM API endpoint."""
url = OBSERVATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
self.observations_data = await response.json()
await self.format_observations_data()
async def format_observations_data(self):
"""Flatten out wind and gust data."""
flattened = {}
wind = self.observations_data["data"]["wind"]
flattened["wind_speed_kilometre"] = wind["speed_kilometre"]
flattened["wind_speed_knot"] = wind["speed_knot"]
flattened["wind_direction"] = wind["direction"]
if self.observations_data["data"]["gust"] is not None:
gust = self.observations_data["data"]["gust"]
flattened["gust_speed_kilometre"] = gust["speed_kilometre"]
flattened["gust_speed_knot"] = gust["speed_knot"]
else:
flattened["gust_speed_kilometre"] = None
flattened["gust_speed_knot"] = None
self.observations_data["data"].update(flattened)
async def get_daily_forecasts_data(self):
"""Get JSON daily forecasts data from BOM API endpoint."""
url = BASE_URL + DAILY_FORECASTS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
self.daily_forecasts_data = await response.json()
await self.format_forecast_data()
async def format_forecast_data(self):
"""Flatten out forecast data."""
flattened = {}
days = len(self.daily_forecasts_data["data"])
for day in range(0, days):
icon = self.daily_forecasts_data["data"][day]["icon_descriptor"]
flattened["mdi_icon"] = MDI_ICON_MAP[icon]
uv = self.daily_forecasts_data["data"][day]["uv"]
flattened["uv_category"] = UV_MAP[uv["category"]]
flattened["uv_max_index"] = uv["max_index"]
flattened["uv_start_time"] = uv["start_time"]
flattened["uv_end_time"] = uv["end_time"]
rain = self.daily_forecasts_data["data"][day]["rain"]
flattened["rain_chance"] = rain["chance"]
flattened["rain_amount_min"] = rain["amount"]["min"]
# When rain amount max is None, set as rain amount min
if rain["amount"]["max"] is None:
flattened["rain_amount_max"] = flattened["rain_amount_min"]
flattened["rain_amount_range"] = rain["amount"]["min"]
else:
flattened["rain_amount_max"] = rain["amount"]["max"]
flattened["rain_amount_range"] = "{} to {}".format(
rain["amount"]["min"],
rain["amount"]["max"],
)
self.daily_forecasts_data["data"][day].update(flattened)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Refresh the data on the collector object."""
await self.get_observations_data()
await self.get_daily_forecasts_data()
def geohash_encode(self, latitude, longitude, precision=6):
base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
lat_interval = (-90.0, 90.0)
lon_interval = (-180.0, 180.0)
geohash = []
bits = [16, 8, 4, 2, 1]
bit = 0
ch = 0
even = True
while len(geohash) < precision:
if even:
mid = (lon_interval[0] + lon_interval[1]) / 2
if longitude > mid:
ch |= bits[bit]
lon_interval = (mid, lon_interval[1])
else:
lon_interval = (lon_interval[0], mid)
else:
mid = (lat_interval[0] + lat_interval[1]) / 2
if latitude > mid:
ch |= bits[bit]
lat_interval = (mid, lat_interval[1])
else:
lat_interval = (lat_interval[0], mid)
even = not even
if bit < 4:
bit += 1
else:
geohash += base32[ch]
bit = 0
ch = 0
return ''.join(geohash)
| 36.648936
| 80
| 0.594194
|
import asyncio
import datetime
import aiohttp
import logging
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10)
BASE_URL = "https://api.weather.bom.gov.au"
DAILY_FORECASTS_URL = "/v1/locations/{}/forecasts/daily"
LOCATIONS_URL = "/v1/locations/{}"
MDI_ICON_MAP = {
"clear": "mdi:weather-night",
"cloudy": "mdi:weather-cloudy",
"cyclone": "mdi:weather-hurricane",
"dust": "mdi:weather-hazy",
"dusty": "mdi:weather-hazy",
"fog": "mdi:weather-fog",
"frost": "mdi:snowflake-melt",
"haze": "mdi:weather-hazy",
"hazy": "mdi:weather-hazy",
"heavy_shower": "mdi:weather-pouring",
"heavy_showers": "mdi:weather-pouring",
"light_rain": "mdi:weather-partly-rainy",
"light_shower": "mdi:weather-light-showers",
"light_showers": "mdi:weather-light-showers",
"mostly_sunny": "mdi:weather-sunny",
"partly_cloudy": "mdi:weather-partly-cloudy",
"rain": "mdi:weather-pouring",
"shower": "mdi:weather-rainy",
"showers": "mdi:weather-rainy",
"snow": "mdi:weather-snowy",
"storm": "mdi:weather-lightning-rainy",
"storms": "mdi:weather-lightning-rainy",
"sunny": "mdi:weather-sunny",
"tropical_cyclone": "mdi:weather-hurricane",
"wind": "mdi:weather-windy",
"windy": "mdi:weather-windy",
None: None,
}
OBSERVATIONS_URL = "https://api.weather.bom.gov.au/v1/locations/{}/observations"
UV_MAP = {
"extreme": "Extreme",
"veryhigh": "Very High",
"high": "High",
"moderate": "Moderate",
"low": "Low",
None: None,
}
class Collector:
def __init__(self, latitude, longitude):
self.observations_data = None
self.daily_forecasts_data = None
self.geohash = self.geohash_encode(latitude, longitude)
_LOGGER.debug(f"geohash: {self.geohash}")
async def get_location_name(self):
url = BASE_URL + LOCATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
locations_data = await response.json()
self.location_name = locations_data["data"]["name"]
return True
async def get_observations_data(self):
url = OBSERVATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
self.observations_data = await response.json()
await self.format_observations_data()
async def format_observations_data(self):
flattened = {}
wind = self.observations_data["data"]["wind"]
flattened["wind_speed_kilometre"] = wind["speed_kilometre"]
flattened["wind_speed_knot"] = wind["speed_knot"]
flattened["wind_direction"] = wind["direction"]
if self.observations_data["data"]["gust"] is not None:
gust = self.observations_data["data"]["gust"]
flattened["gust_speed_kilometre"] = gust["speed_kilometre"]
flattened["gust_speed_knot"] = gust["speed_knot"]
else:
flattened["gust_speed_kilometre"] = None
flattened["gust_speed_knot"] = None
self.observations_data["data"].update(flattened)
async def get_daily_forecasts_data(self):
url = BASE_URL + DAILY_FORECASTS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = await session.get(url)
if response is not None and response.status == 200:
self.daily_forecasts_data = await response.json()
await self.format_forecast_data()
async def format_forecast_data(self):
flattened = {}
days = len(self.daily_forecasts_data["data"])
for day in range(0, days):
icon = self.daily_forecasts_data["data"][day]["icon_descriptor"]
flattened["mdi_icon"] = MDI_ICON_MAP[icon]
uv = self.daily_forecasts_data["data"][day]["uv"]
flattened["uv_category"] = UV_MAP[uv["category"]]
flattened["uv_max_index"] = uv["max_index"]
flattened["uv_start_time"] = uv["start_time"]
flattened["uv_end_time"] = uv["end_time"]
rain = self.daily_forecasts_data["data"][day]["rain"]
flattened["rain_chance"] = rain["chance"]
flattened["rain_amount_min"] = rain["amount"]["min"]
if rain["amount"]["max"] is None:
flattened["rain_amount_max"] = flattened["rain_amount_min"]
flattened["rain_amount_range"] = rain["amount"]["min"]
else:
flattened["rain_amount_max"] = rain["amount"]["max"]
flattened["rain_amount_range"] = "{} to {}".format(
rain["amount"]["min"],
rain["amount"]["max"],
)
self.daily_forecasts_data["data"][day].update(flattened)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
await self.get_observations_data()
await self.get_daily_forecasts_data()
def geohash_encode(self, latitude, longitude, precision=6):
base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
lat_interval = (-90.0, 90.0)
lon_interval = (-180.0, 180.0)
geohash = []
bits = [16, 8, 4, 2, 1]
bit = 0
ch = 0
even = True
while len(geohash) < precision:
if even:
mid = (lon_interval[0] + lon_interval[1]) / 2
if longitude > mid:
ch |= bits[bit]
lon_interval = (mid, lon_interval[1])
else:
lon_interval = (lon_interval[0], mid)
else:
mid = (lat_interval[0] + lat_interval[1]) / 2
if latitude > mid:
ch |= bits[bit]
lat_interval = (mid, lat_interval[1])
else:
lat_interval = (lat_interval[0], mid)
even = not even
if bit < 4:
bit += 1
else:
geohash += base32[ch]
bit = 0
ch = 0
return ''.join(geohash)
| true
| true
|
790a7a15d1e7dba1e510ec320a05921c9cba0383
| 5,199
|
py
|
Python
|
pyclient/zeroos/orchestrator/client/JobResult.py
|
5l1v3r1/0-orchestrator
|
9373a4acb1517ff001df526925c224a7a93b3274
|
[
"Apache-2.0"
] | 3
|
2017-07-04T14:02:02.000Z
|
2019-07-06T23:34:08.000Z
|
pyclient/zeroos/orchestrator/client/JobResult.py
|
5l1v3r1/0-orchestrator
|
9373a4acb1517ff001df526925c224a7a93b3274
|
[
"Apache-2.0"
] | 497
|
2017-05-31T07:55:40.000Z
|
2018-01-03T12:10:43.000Z
|
pyclient/zeroos/orchestrator/client/JobResult.py
|
zero-os/0-orchestrator
|
9373a4acb1517ff001df526925c224a7a93b3274
|
[
"Apache-2.0"
] | 8
|
2017-06-14T09:45:56.000Z
|
2021-02-01T18:12:55.000Z
|
"""
Auto-generated class for JobResult
"""
from .EnumJobResultName import EnumJobResultName
from .EnumJobResultState import EnumJobResultState
from . import client_support
class JobResult(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(data, id, level, name, startTime, state, stderr, stdout):
"""
:type data: str
:type id: str
:type level: int
:type name: EnumJobResultName
:type startTime: int
:type state: EnumJobResultState
:type stderr: str
:type stdout: str
:rtype: JobResult
"""
return JobResult(
data=data,
id=id,
level=level,
name=name,
startTime=startTime,
state=state,
stderr=stderr,
stdout=stdout,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'JobResult'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'data'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.data = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'id'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.id = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'level'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.level = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [EnumJobResultName]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'startTime'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.startTime = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'state'
val = data.get(property_name)
if val is not None:
datatypes = [EnumJobResultState]
try:
self.state = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stderr'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.stderr = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stdout'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.stdout = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| 35.609589
| 107
| 0.601462
|
from .EnumJobResultName import EnumJobResultName
from .EnumJobResultState import EnumJobResultState
from . import client_support
class JobResult(object):
@staticmethod
def create(data, id, level, name, startTime, state, stderr, stdout):
return JobResult(
data=data,
id=id,
level=level,
name=name,
startTime=startTime,
state=state,
stderr=stderr,
stdout=stdout,
)
def __init__(self, json=None, **kwargs):
if json is None and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'JobResult'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'data'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.data = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'id'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.id = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'level'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.level = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'name'
val = data.get(property_name)
if val is not None:
datatypes = [EnumJobResultName]
try:
self.name = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'startTime'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.startTime = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'state'
val = data.get(property_name)
if val is not None:
datatypes = [EnumJobResultState]
try:
self.state = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stderr'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.stderr = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'stdout'
val = data.get(property_name)
if val is not None:
datatypes = [str]
try:
self.stdout = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
| true
| true
|
790a7a5bf46496ccc644901f088550b7840a536b
| 16,556
|
py
|
Python
|
recipes/sdl2/all/conanfile.py
|
Rapatas/community
|
e776c5a50092ab4597b3ef20586d12dbcb3cec78
|
[
"MIT"
] | null | null | null |
recipes/sdl2/all/conanfile.py
|
Rapatas/community
|
e776c5a50092ab4597b3ef20586d12dbcb3cec78
|
[
"MIT"
] | null | null | null |
recipes/sdl2/all/conanfile.py
|
Rapatas/community
|
e776c5a50092ab4597b3ef20586d12dbcb3cec78
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class SDL2Conan(ConanFile):
# TODO: When porting to CCI rename this package to SDL (without 2)
name = "sdl2"
description = "Access to audio, keyboard, mouse, joystick, and graphics hardware via OpenGL, Direct3D and Vulkan"
topics = ("sdl2", "audio", "keyboard", "graphics", "opengl")
url = "https://github.com/bincrafters/conan-sdl2"
homepage = "https://www.libsdl.org"
license = "Zlib"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = ["cmake", "pkg_config"]
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"directx": [True, False],
"alsa": [True, False],
"jack": [True, False],
"pulse": [True, False],
"sndio": [True, False],
"nas": [True, False],
"esd": [True, False],
"arts": [True, False],
"x11": [True, False],
"xcursor": [True, False],
"xinerama": [True, False],
"xinput": [True, False],
"xrandr": [True, False],
"xscrnsaver": [True, False],
"xshape": [True, False],
"xvm": [True, False],
"wayland": [True, False],
"directfb": [True, False],
"iconv": [True, False],
"video_rpi": [True, False],
"sdl2main": [True, False],
"opengl": [True, False],
"opengles": [True, False],
"vulkan": [True, False],
"libunwind": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"directx": True,
"alsa": True,
"jack": True,
"pulse": True,
"sndio": False,
"nas": True,
"esd": False,
"arts": False,
"x11": True,
"xcursor": True,
"xinerama": True,
"xinput": True,
"xrandr": True,
"xscrnsaver": True,
"xshape": True,
"xvm": True,
"wayland": False,
"directfb": False,
"iconv": True,
"video_rpi": False,
"sdl2main": True,
"opengl": True,
"opengles": True,
"vulkan": True,
"libunwind": True,
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Linux":
del self.options.alsa
del self.options.jack
del self.options.pulse
del self.options.sndio
del self.options.nas
del self.options.esd
del self.options.arts
del self.options.x11
del self.options.xcursor
del self.options.xinerama
del self.options.xinput
del self.options.xrandr
del self.options.xscrnsaver
del self.options.xshape
del self.options.xvm
del self.options.wayland
del self.options.directfb
del self.options.video_rpi
del self.options.libunwind
if self.settings.os != "Windows":
del self.options.directx
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.settings.os == "Macos" and not self.options.iconv:
raise ConanInvalidConfiguration("On macOS iconv can't be disabled")
def requirements(self):
if self.options.iconv:
self.requires("libiconv/1.16")
if self.settings.os == "Linux":
self.requires("xorg/system")
if self.options.alsa:
self.requires("libalsa/1.2.4")
if self.options.pulse:
self.requires("pulseaudio/13.0")
if self.options.opengl:
self.requires("opengl/system")
if self.options.get_safe("libunwind", False):
self.requires("libunwind/1.5.0")
def package_id(self):
del self.info.options.sdl2main
def build_requirements(self):
if self.settings.os == "Linux":
self.build_requires("pkgconf/1.7.3")
def system_requirements(self):
if self.settings.os == "Linux" and tools.os_info.is_linux:
if tools.os_info.with_apt or tools.os_info.with_yum:
installer = tools.SystemPackageTool()
packages = []
packages_apt = []
packages_yum = []
packages_apt.append("libgbm-dev")
packages_yum.append("mesa-libgbm-devel")
if self.options.jack:
packages_apt.append("libjack-dev")
packages_yum.append("jack-audio-connection-kit-devel")
if self.options.sndio:
packages_apt.append("libsndio-dev")
if self.options.nas:
packages_apt.append("libaudio-dev")
packages_yum.append("nas-devel")
if self.options.esd:
packages_apt.append("libesd0-dev")
packages_yum.append("esound-devel")
if self.options.arts:
packages_apt.append("artsc0-dev")
if self.options.wayland:
packages_apt.extend(["libwayland-dev",
"wayland-protocols"])
packages_yum.extend(["wayland-devel",
"wayland-protocols-devel"])
if self.options.directfb:
packages_apt.append("libdirectfb-dev")
if tools.os_info.with_apt:
packages = packages_apt
elif tools.os_info.with_yum:
packages = packages_yum
for package in packages:
installer.install(package)
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if tools.Version(self.version) >= "2.0.14":
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
'check_library_exists(c iconv_open "" HAVE_BUILTIN_ICONV)',
'# check_library_exists(c iconv_open "" HAVE_BUILTIN_ICONV)')
self._build_cmake()
def _check_pkg_config(self, option, package_name):
if option:
pkg_config = tools.PkgConfig(package_name)
if not pkg_config.provides:
raise ConanInvalidConfiguration("package %s is not available" % package_name)
def _check_dependencies(self):
if self.settings.os == "Linux":
self._check_pkg_config(self.options.jack, "jack")
self._check_pkg_config(self.options.esd, "esound")
self._check_pkg_config(self.options.wayland, "wayland-client")
self._check_pkg_config(self.options.wayland, "wayland-protocols")
self._check_pkg_config(self.options.directfb, "directfb")
def _configure_cmake(self):
if not self._cmake:
self._check_dependencies()
self._cmake = CMake(self)
# FIXME: self.install_folder not defined? Neccessary?
self._cmake.definitions["CONAN_INSTALL_FOLDER"] = self.install_folder
if self.settings.os != "Windows":
if not self.options.shared:
self._cmake.definitions["SDL_STATIC_PIC"] = self.options.fPIC
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self._cmake.definitions["HAVE_LIBC"] = True
self._cmake.definitions["SDL_SHARED"] = self.options.shared
self._cmake.definitions["SDL_STATIC"] = not self.options.shared
self._cmake.definitions["VIDEO_OPENGL"] = self.options.opengl
self._cmake.definitions["VIDEO_OPENGLES"] = self.options.opengles
self._cmake.definitions["VIDEO_VULKAN"] = self.options.vulkan
if self.settings.os == "Linux":
# See https://github.com/bincrafters/community/issues/696
self._cmake.definitions["SDL_VIDEO_DRIVER_X11_SUPPORTS_GENERIC_EVENTS"] = 1
self._cmake.definitions["ALSA"] = self.options.alsa
if self.options.alsa:
self._cmake.definitions["HAVE_ASOUNDLIB_H"] = True
self._cmake.definitions["HAVE_LIBASOUND"] = True
self._cmake.definitions["JACK"] = self.options.jack
self._cmake.definitions["PULSEAUDIO"] = self.options.pulse
self._cmake.definitions["SNDIO"] = self.options.sndio
self._cmake.definitions["NAS"] = self.options.nas
self._cmake.definitions["VIDEO_X11"] = self.options.x11
if self.options.x11:
self._cmake.definitions["HAVE_XEXT_H"] = True
self._cmake.definitions["VIDEO_X11_XCURSOR"] = self.options.xcursor
if self.options.xcursor:
self._cmake.definitions["HAVE_XCURSOR_H"] = True
self._cmake.definitions["VIDEO_X11_XINERAMA"] = self.options.xinerama
if self.options.xinerama:
self._cmake.definitions["HAVE_XINERAMA_H"] = True
self._cmake.definitions["VIDEO_X11_XINPUT"] = self.options.xinput
if self.options.xinput:
self._cmake.definitions["HAVE_XINPUT_H"] = True
self._cmake.definitions["VIDEO_X11_XRANDR"] = self.options.xrandr
if self.options.xrandr:
self._cmake.definitions["HAVE_XRANDR_H"] = True
self._cmake.definitions["VIDEO_X11_XSCRNSAVER"] = self.options.xscrnsaver
if self.options.xscrnsaver:
self._cmake.definitions["HAVE_XSS_H"] = True
self._cmake.definitions["VIDEO_X11_XSHAPE"] = self.options.xshape
if self.options.xshape:
self._cmake.definitions["HAVE_XSHAPE_H"] = True
self._cmake.definitions["VIDEO_X11_XVM"] = self.options.xvm
if self.options.xvm:
self._cmake.definitions["HAVE_XF86VM_H"] = True
self._cmake.definitions["VIDEO_WAYLAND"] = self.options.wayland
self._cmake.definitions["VIDEO_DIRECTFB"] = self.options.directfb
self._cmake.definitions["VIDEO_RPI"] = self.options.video_rpi
elif self.settings.os == "Windows":
self._cmake.definitions["DIRECTX"] = self.options.directx
self._cmake.definitions["HAVE_LIBUNWIND_H"] = self.options.get_safe("libunwind")
self._cmake.configure(build_dir=self._build_subfolder)
return self._cmake
def _build_cmake(self):
if self.options.get_safe("pulse"):
tools.rename("libpulse.pc", "libpulse-simple.pc")
lib_paths = [lib for dep in self.deps_cpp_info.deps for lib in self.deps_cpp_info[dep].lib_paths]
with tools.environment_append({"LIBRARY_PATH": os.pathsep.join(lib_paths)}):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "sdl2-config")
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "libdata"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def _add_libraries_from_pc(self, library, static=None):
if static is None:
static = not self.options.shared
pkg_config = tools.PkgConfig(library, static=static)
libs = [lib[2:] for lib in pkg_config.libs_only_l] # cut -l prefix
lib_paths = [lib[2:] for lib in pkg_config.libs_only_L] # cut -L prefix
self.cpp_info.components["libsdl2"].system_libs.extend(libs)
self.cpp_info.components["libsdl2"].libdirs.extend(lib_paths)
self.cpp_info.components["libsdl2"].sharedlinkflags.extend(pkg_config.libs_only_other)
self.cpp_info.components["libsdl2"].exelinkflags.extend(pkg_config.libs_only_other)
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "SDL2"
self.cpp_info.names["cmake_find_package_multi"] = "SDL2"
postfix = "d" if self.settings.build_type == "Debug" else ""
# SDL2
sdl2_cmake_target = "SDL2" if self.options.shared else "SDL2-static"
self.cpp_info.components["libsdl2"].names["cmake_find_package"] = sdl2_cmake_target
self.cpp_info.components["libsdl2"].names["cmake_find_package_multi"] = sdl2_cmake_target
self.cpp_info.components["libsdl2"].includedirs.append(os.path.join("include", "SDL2"))
self.cpp_info.components["libsdl2"].libs = ["SDL2" + postfix]
if self.options.iconv:
self.cpp_info.components["libsdl2"].requires.append("libiconv::libiconv")
if self.settings.os == "Linux":
self.cpp_info.components["libsdl2"].system_libs = ["dl", "rt", "pthread"]
self.cpp_info.components["libsdl2"].requires.append("xorg::xorg")
if self.options.alsa:
self.cpp_info.components["libsdl2"].requires.append("libalsa::libalsa")
if self.options.pulse:
self.cpp_info.components["libsdl2"].requires.append("pulseaudio::pulseaudio")
if self.options.opengl:
self.cpp_info.components["libsdl2"].requires.append("opengl::opengl")
if self.options.jack:
self._add_libraries_from_pc("jack")
if self.options.sndio:
self._add_libraries_from_pc("sndio")
if self.options.nas:
self.cpp_info.components["libsdl2"].system_libs.append("audio")
if self.options.esd:
self._add_libraries_from_pc("esound")
if self.options.directfb:
self._add_libraries_from_pc("directfb")
if self.options.video_rpi:
self.cpp_info.components["libsdl2"].system_libs.append("bcm_host")
self.cpp_info.components["libsdl2"].includedirs.extend([
"/opt/vc/include",
"/opt/vc/include/interface/vcos/pthreads",
"/opt/vc/include/interface/vmcs_host/linux"
])
self.cpp_info.components["libsdl2"].libdirs.append("/opt/vc/lib")
self.cpp_info.components["libsdl2"].sharedlinkflags.append("-Wl,-rpath,/opt/vc/lib")
self.cpp_info.components["libsdl2"].exelinkflags.append("-Wl,-rpath,/opt/vc/lib")
elif self.settings.os == "Macos":
self.cpp_info.components["libsdl2"].frameworks = ["Cocoa", "Carbon", "IOKit", "CoreVideo", "CoreAudio", "AudioToolbox", "ForceFeedback"]
if tools.Version(self.version) >= "2.0.14":
self.cpp_info.components["libsdl2"].frameworks.append("Metal")
elif self.settings.os == "Windows":
self.cpp_info.components["libsdl2"].system_libs = ["user32", "gdi32", "winmm", "imm32", "ole32", "oleaut32", "version", "uuid", "advapi32", "setupapi", "shell32"]
if self.settings.compiler == "gcc":
self.cpp_info.components["libsdl2"].system_libs.append("mingw32")
if self.options.get_safe("libunwind"):
self.cpp_info.components["libsdl2"].requires.append("libunwind::libunwind")
# SDL2main
if self.options.sdl2main:
self.cpp_info.components["sdl2main"].names["cmake_find_package"] = "SDL2main"
self.cpp_info.components["sdl2main"].names["cmake_find_package_multi"] = "SDL2main"
self.cpp_info.components["sdl2main"].libs = ["SDL2main" + postfix]
self.cpp_info.components["sdl2main"].requires = ["libsdl2"]
| 46.505618
| 174
| 0.59181
|
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class SDL2Conan(ConanFile):
name = "sdl2"
description = "Access to audio, keyboard, mouse, joystick, and graphics hardware via OpenGL, Direct3D and Vulkan"
topics = ("sdl2", "audio", "keyboard", "graphics", "opengl")
url = "https://github.com/bincrafters/conan-sdl2"
homepage = "https://www.libsdl.org"
license = "Zlib"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = ["cmake", "pkg_config"]
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"directx": [True, False],
"alsa": [True, False],
"jack": [True, False],
"pulse": [True, False],
"sndio": [True, False],
"nas": [True, False],
"esd": [True, False],
"arts": [True, False],
"x11": [True, False],
"xcursor": [True, False],
"xinerama": [True, False],
"xinput": [True, False],
"xrandr": [True, False],
"xscrnsaver": [True, False],
"xshape": [True, False],
"xvm": [True, False],
"wayland": [True, False],
"directfb": [True, False],
"iconv": [True, False],
"video_rpi": [True, False],
"sdl2main": [True, False],
"opengl": [True, False],
"opengles": [True, False],
"vulkan": [True, False],
"libunwind": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"directx": True,
"alsa": True,
"jack": True,
"pulse": True,
"sndio": False,
"nas": True,
"esd": False,
"arts": False,
"x11": True,
"xcursor": True,
"xinerama": True,
"xinput": True,
"xrandr": True,
"xscrnsaver": True,
"xshape": True,
"xvm": True,
"wayland": False,
"directfb": False,
"iconv": True,
"video_rpi": False,
"sdl2main": True,
"opengl": True,
"opengles": True,
"vulkan": True,
"libunwind": True,
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Linux":
del self.options.alsa
del self.options.jack
del self.options.pulse
del self.options.sndio
del self.options.nas
del self.options.esd
del self.options.arts
del self.options.x11
del self.options.xcursor
del self.options.xinerama
del self.options.xinput
del self.options.xrandr
del self.options.xscrnsaver
del self.options.xshape
del self.options.xvm
del self.options.wayland
del self.options.directfb
del self.options.video_rpi
del self.options.libunwind
if self.settings.os != "Windows":
del self.options.directx
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.settings.os == "Macos" and not self.options.iconv:
raise ConanInvalidConfiguration("On macOS iconv can't be disabled")
def requirements(self):
if self.options.iconv:
self.requires("libiconv/1.16")
if self.settings.os == "Linux":
self.requires("xorg/system")
if self.options.alsa:
self.requires("libalsa/1.2.4")
if self.options.pulse:
self.requires("pulseaudio/13.0")
if self.options.opengl:
self.requires("opengl/system")
if self.options.get_safe("libunwind", False):
self.requires("libunwind/1.5.0")
def package_id(self):
del self.info.options.sdl2main
def build_requirements(self):
if self.settings.os == "Linux":
self.build_requires("pkgconf/1.7.3")
def system_requirements(self):
if self.settings.os == "Linux" and tools.os_info.is_linux:
if tools.os_info.with_apt or tools.os_info.with_yum:
installer = tools.SystemPackageTool()
packages = []
packages_apt = []
packages_yum = []
packages_apt.append("libgbm-dev")
packages_yum.append("mesa-libgbm-devel")
if self.options.jack:
packages_apt.append("libjack-dev")
packages_yum.append("jack-audio-connection-kit-devel")
if self.options.sndio:
packages_apt.append("libsndio-dev")
if self.options.nas:
packages_apt.append("libaudio-dev")
packages_yum.append("nas-devel")
if self.options.esd:
packages_apt.append("libesd0-dev")
packages_yum.append("esound-devel")
if self.options.arts:
packages_apt.append("artsc0-dev")
if self.options.wayland:
packages_apt.extend(["libwayland-dev",
"wayland-protocols"])
packages_yum.extend(["wayland-devel",
"wayland-protocols-devel"])
if self.options.directfb:
packages_apt.append("libdirectfb-dev")
if tools.os_info.with_apt:
packages = packages_apt
elif tools.os_info.with_yum:
packages = packages_yum
for package in packages:
installer.install(package)
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if tools.Version(self.version) >= "2.0.14":
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
'check_library_exists(c iconv_open "" HAVE_BUILTIN_ICONV)',
'
self._build_cmake()
def _check_pkg_config(self, option, package_name):
if option:
pkg_config = tools.PkgConfig(package_name)
if not pkg_config.provides:
raise ConanInvalidConfiguration("package %s is not available" % package_name)
def _check_dependencies(self):
if self.settings.os == "Linux":
self._check_pkg_config(self.options.jack, "jack")
self._check_pkg_config(self.options.esd, "esound")
self._check_pkg_config(self.options.wayland, "wayland-client")
self._check_pkg_config(self.options.wayland, "wayland-protocols")
self._check_pkg_config(self.options.directfb, "directfb")
def _configure_cmake(self):
if not self._cmake:
self._check_dependencies()
self._cmake = CMake(self)
# FIXME: self.install_folder not defined? Neccessary?
self._cmake.definitions["CONAN_INSTALL_FOLDER"] = self.install_folder
if self.settings.os != "Windows":
if not self.options.shared:
self._cmake.definitions["SDL_STATIC_PIC"] = self.options.fPIC
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self._cmake.definitions["HAVE_LIBC"] = True
self._cmake.definitions["SDL_SHARED"] = self.options.shared
self._cmake.definitions["SDL_STATIC"] = not self.options.shared
self._cmake.definitions["VIDEO_OPENGL"] = self.options.opengl
self._cmake.definitions["VIDEO_OPENGLES"] = self.options.opengles
self._cmake.definitions["VIDEO_VULKAN"] = self.options.vulkan
if self.settings.os == "Linux":
# See https://github.com/bincrafters/community/issues/696
self._cmake.definitions["SDL_VIDEO_DRIVER_X11_SUPPORTS_GENERIC_EVENTS"] = 1
self._cmake.definitions["ALSA"] = self.options.alsa
if self.options.alsa:
self._cmake.definitions["HAVE_ASOUNDLIB_H"] = True
self._cmake.definitions["HAVE_LIBASOUND"] = True
self._cmake.definitions["JACK"] = self.options.jack
self._cmake.definitions["PULSEAUDIO"] = self.options.pulse
self._cmake.definitions["SNDIO"] = self.options.sndio
self._cmake.definitions["NAS"] = self.options.nas
self._cmake.definitions["VIDEO_X11"] = self.options.x11
if self.options.x11:
self._cmake.definitions["HAVE_XEXT_H"] = True
self._cmake.definitions["VIDEO_X11_XCURSOR"] = self.options.xcursor
if self.options.xcursor:
self._cmake.definitions["HAVE_XCURSOR_H"] = True
self._cmake.definitions["VIDEO_X11_XINERAMA"] = self.options.xinerama
if self.options.xinerama:
self._cmake.definitions["HAVE_XINERAMA_H"] = True
self._cmake.definitions["VIDEO_X11_XINPUT"] = self.options.xinput
if self.options.xinput:
self._cmake.definitions["HAVE_XINPUT_H"] = True
self._cmake.definitions["VIDEO_X11_XRANDR"] = self.options.xrandr
if self.options.xrandr:
self._cmake.definitions["HAVE_XRANDR_H"] = True
self._cmake.definitions["VIDEO_X11_XSCRNSAVER"] = self.options.xscrnsaver
if self.options.xscrnsaver:
self._cmake.definitions["HAVE_XSS_H"] = True
self._cmake.definitions["VIDEO_X11_XSHAPE"] = self.options.xshape
if self.options.xshape:
self._cmake.definitions["HAVE_XSHAPE_H"] = True
self._cmake.definitions["VIDEO_X11_XVM"] = self.options.xvm
if self.options.xvm:
self._cmake.definitions["HAVE_XF86VM_H"] = True
self._cmake.definitions["VIDEO_WAYLAND"] = self.options.wayland
self._cmake.definitions["VIDEO_DIRECTFB"] = self.options.directfb
self._cmake.definitions["VIDEO_RPI"] = self.options.video_rpi
elif self.settings.os == "Windows":
self._cmake.definitions["DIRECTX"] = self.options.directx
self._cmake.definitions["HAVE_LIBUNWIND_H"] = self.options.get_safe("libunwind")
self._cmake.configure(build_dir=self._build_subfolder)
return self._cmake
def _build_cmake(self):
if self.options.get_safe("pulse"):
tools.rename("libpulse.pc", "libpulse-simple.pc")
lib_paths = [lib for dep in self.deps_cpp_info.deps for lib in self.deps_cpp_info[dep].lib_paths]
with tools.environment_append({"LIBRARY_PATH": os.pathsep.join(lib_paths)}):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "sdl2-config")
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "libdata"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def _add_libraries_from_pc(self, library, static=None):
if static is None:
static = not self.options.shared
pkg_config = tools.PkgConfig(library, static=static)
libs = [lib[2:] for lib in pkg_config.libs_only_l] # cut -l prefix
lib_paths = [lib[2:] for lib in pkg_config.libs_only_L] # cut -L prefix
self.cpp_info.components["libsdl2"].system_libs.extend(libs)
self.cpp_info.components["libsdl2"].libdirs.extend(lib_paths)
self.cpp_info.components["libsdl2"].sharedlinkflags.extend(pkg_config.libs_only_other)
self.cpp_info.components["libsdl2"].exelinkflags.extend(pkg_config.libs_only_other)
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "SDL2"
self.cpp_info.names["cmake_find_package_multi"] = "SDL2"
postfix = "d" if self.settings.build_type == "Debug" else ""
# SDL2
sdl2_cmake_target = "SDL2" if self.options.shared else "SDL2-static"
self.cpp_info.components["libsdl2"].names["cmake_find_package"] = sdl2_cmake_target
self.cpp_info.components["libsdl2"].names["cmake_find_package_multi"] = sdl2_cmake_target
self.cpp_info.components["libsdl2"].includedirs.append(os.path.join("include", "SDL2"))
self.cpp_info.components["libsdl2"].libs = ["SDL2" + postfix]
if self.options.iconv:
self.cpp_info.components["libsdl2"].requires.append("libiconv::libiconv")
if self.settings.os == "Linux":
self.cpp_info.components["libsdl2"].system_libs = ["dl", "rt", "pthread"]
self.cpp_info.components["libsdl2"].requires.append("xorg::xorg")
if self.options.alsa:
self.cpp_info.components["libsdl2"].requires.append("libalsa::libalsa")
if self.options.pulse:
self.cpp_info.components["libsdl2"].requires.append("pulseaudio::pulseaudio")
if self.options.opengl:
self.cpp_info.components["libsdl2"].requires.append("opengl::opengl")
if self.options.jack:
self._add_libraries_from_pc("jack")
if self.options.sndio:
self._add_libraries_from_pc("sndio")
if self.options.nas:
self.cpp_info.components["libsdl2"].system_libs.append("audio")
if self.options.esd:
self._add_libraries_from_pc("esound")
if self.options.directfb:
self._add_libraries_from_pc("directfb")
if self.options.video_rpi:
self.cpp_info.components["libsdl2"].system_libs.append("bcm_host")
self.cpp_info.components["libsdl2"].includedirs.extend([
"/opt/vc/include",
"/opt/vc/include/interface/vcos/pthreads",
"/opt/vc/include/interface/vmcs_host/linux"
])
self.cpp_info.components["libsdl2"].libdirs.append("/opt/vc/lib")
self.cpp_info.components["libsdl2"].sharedlinkflags.append("-Wl,-rpath,/opt/vc/lib")
self.cpp_info.components["libsdl2"].exelinkflags.append("-Wl,-rpath,/opt/vc/lib")
elif self.settings.os == "Macos":
self.cpp_info.components["libsdl2"].frameworks = ["Cocoa", "Carbon", "IOKit", "CoreVideo", "CoreAudio", "AudioToolbox", "ForceFeedback"]
if tools.Version(self.version) >= "2.0.14":
self.cpp_info.components["libsdl2"].frameworks.append("Metal")
elif self.settings.os == "Windows":
self.cpp_info.components["libsdl2"].system_libs = ["user32", "gdi32", "winmm", "imm32", "ole32", "oleaut32", "version", "uuid", "advapi32", "setupapi", "shell32"]
if self.settings.compiler == "gcc":
self.cpp_info.components["libsdl2"].system_libs.append("mingw32")
if self.options.get_safe("libunwind"):
self.cpp_info.components["libsdl2"].requires.append("libunwind::libunwind")
# SDL2main
if self.options.sdl2main:
self.cpp_info.components["sdl2main"].names["cmake_find_package"] = "SDL2main"
self.cpp_info.components["sdl2main"].names["cmake_find_package_multi"] = "SDL2main"
self.cpp_info.components["sdl2main"].libs = ["SDL2main" + postfix]
self.cpp_info.components["sdl2main"].requires = ["libsdl2"]
| true
| true
|
790a7d984089ce5707a1f2ba9afe87dd9e67bac2
| 3,586
|
py
|
Python
|
datapack/data/scripts/quests/43_HelpTheSister/__init__.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
datapack/data/scripts/quests/43_HelpTheSister/__init__.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
datapack/data/scripts/quests/43_HelpTheSister/__init__.py
|
DigitalCoin1/L2SPERO
|
f9ec069804d7bf13f9c4bfb508db2eb6ce37ab94
|
[
"Unlicense"
] | null | null | null |
#quest by zerghase
import sys
from com.l2jfrozen import Config
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "43_HelpTheSister"
COOPER=30829
GALLADUCCI=30097
CRAFTED_DAGGER=220
MAP_PIECE=7550
MAP=7551
PET_TICKET=7584
SPECTER=20171
SORROW_MAIDEN=20197
MAX_COUNT=30
MIN_LEVEL=26
class Quest (JQuest) :
def onEvent(self, event, st):
htmltext=event
if event=="1":
htmltext="30829-01.htm"
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event=="3" and st.getQuestItemsCount(CRAFTED_DAGGER):
htmltext="30829-03.htm"
st.takeItems(CRAFTED_DAGGER,1)
st.set("cond","2")
elif event=="4" and st.getQuestItemsCount(MAP_PIECE)>=MAX_COUNT:
htmltext="30829-05.htm"
st.takeItems(MAP_PIECE,MAX_COUNT)
st.giveItems(MAP,1)
st.set("cond", "4")
elif event=="5" and st.getQuestItemsCount(MAP):
htmltext="30097-06.htm"
st.takeItems(MAP,1)
st.set("cond","5")
elif event=="7":
htmltext="30829-07.htm"
st.giveItems(PET_TICKET,1)
st.setState(COMPLETED)
st.exitQuest(0)
return htmltext
def onTalk(self, npc, player):
htmltext="<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId=npc.getNpcId()
id=st.getState()
if id==CREATED:
if player.getLevel()>=MIN_LEVEL:
htmltext="30829-00.htm"
else:
st.exitQuest(1)
htmltext="<html><body>This quest can only be taken by characters that have a minimum level of %s. Return when you are more experienced.</body></html>" % MIN_LEVEL
elif id==STARTED:
cond=st.getInt("cond")
if npcId==COOPER:
if cond==1:
if not st.getQuestItemsCount(CRAFTED_DAGGER):
htmltext="30829-01a.htm"
else:
htmltext="30829-02.htm"
elif cond==2:
htmltext="30829-03a.htm"
elif cond==3:
htmltext="30829-04.htm"
elif cond==4:
htmltext="30829-05a.htm"
elif cond==5:
htmltext="30829-06.htm"
elif npcId==GALLADUCCI:
if cond==4 and st.getQuestItemsCount(MAP):
htmltext="30097-05.htm"
elif id==COMPLETED:
st.exitQuest(0)
htmltext="<html><body>This quest has already been completed.</body></html>"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
cond=st.getInt("cond")
if cond==2:
numItems,chance = divmod(100*Config.RATE_QUESTS_REWARD,100)
if st.getRandom(100) < chance :
numItems = numItems +1
pieces=st.getQuestItemsCount(MAP_PIECE)
if pieces + numItems >= MAX_COUNT :
numItems = MAX_COUNT - pieces
if numItems != 0:
st.playSound("ItemSound.quest_middle")
st.set("cond", "3")
else :
st.playSound("ItemSound.quest_itemget")
st.giveItems(MAP_PIECE,int(numItems))
return
QUEST=Quest(43,qn,"Help The Sister!")
CREATED=State('Start', QUEST)
STARTED=State('Started', QUEST)
COMPLETED=State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(COOPER)
QUEST.addTalkId(COOPER)
QUEST.addTalkId(GALLADUCCI)
QUEST.addKillId(SPECTER)
QUEST.addKillId(SORROW_MAIDEN)
| 28.919355
| 170
| 0.652259
|
import sys
from com.l2jfrozen import Config
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "43_HelpTheSister"
COOPER=30829
GALLADUCCI=30097
CRAFTED_DAGGER=220
MAP_PIECE=7550
MAP=7551
PET_TICKET=7584
SPECTER=20171
SORROW_MAIDEN=20197
MAX_COUNT=30
MIN_LEVEL=26
class Quest (JQuest) :
def onEvent(self, event, st):
htmltext=event
if event=="1":
htmltext="30829-01.htm"
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event=="3" and st.getQuestItemsCount(CRAFTED_DAGGER):
htmltext="30829-03.htm"
st.takeItems(CRAFTED_DAGGER,1)
st.set("cond","2")
elif event=="4" and st.getQuestItemsCount(MAP_PIECE)>=MAX_COUNT:
htmltext="30829-05.htm"
st.takeItems(MAP_PIECE,MAX_COUNT)
st.giveItems(MAP,1)
st.set("cond", "4")
elif event=="5" and st.getQuestItemsCount(MAP):
htmltext="30097-06.htm"
st.takeItems(MAP,1)
st.set("cond","5")
elif event=="7":
htmltext="30829-07.htm"
st.giveItems(PET_TICKET,1)
st.setState(COMPLETED)
st.exitQuest(0)
return htmltext
def onTalk(self, npc, player):
htmltext="<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId=npc.getNpcId()
id=st.getState()
if id==CREATED:
if player.getLevel()>=MIN_LEVEL:
htmltext="30829-00.htm"
else:
st.exitQuest(1)
htmltext="<html><body>This quest can only be taken by characters that have a minimum level of %s. Return when you are more experienced.</body></html>" % MIN_LEVEL
elif id==STARTED:
cond=st.getInt("cond")
if npcId==COOPER:
if cond==1:
if not st.getQuestItemsCount(CRAFTED_DAGGER):
htmltext="30829-01a.htm"
else:
htmltext="30829-02.htm"
elif cond==2:
htmltext="30829-03a.htm"
elif cond==3:
htmltext="30829-04.htm"
elif cond==4:
htmltext="30829-05a.htm"
elif cond==5:
htmltext="30829-06.htm"
elif npcId==GALLADUCCI:
if cond==4 and st.getQuestItemsCount(MAP):
htmltext="30097-05.htm"
elif id==COMPLETED:
st.exitQuest(0)
htmltext="<html><body>This quest has already been completed.</body></html>"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
cond=st.getInt("cond")
if cond==2:
numItems,chance = divmod(100*Config.RATE_QUESTS_REWARD,100)
if st.getRandom(100) < chance :
numItems = numItems +1
pieces=st.getQuestItemsCount(MAP_PIECE)
if pieces + numItems >= MAX_COUNT :
numItems = MAX_COUNT - pieces
if numItems != 0:
st.playSound("ItemSound.quest_middle")
st.set("cond", "3")
else :
st.playSound("ItemSound.quest_itemget")
st.giveItems(MAP_PIECE,int(numItems))
return
QUEST=Quest(43,qn,"Help The Sister!")
CREATED=State('Start', QUEST)
STARTED=State('Started', QUEST)
COMPLETED=State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(COOPER)
QUEST.addTalkId(COOPER)
QUEST.addTalkId(GALLADUCCI)
QUEST.addKillId(SPECTER)
QUEST.addKillId(SORROW_MAIDEN)
| true
| true
|
790a7e29375efe0aee4b8f054dd71d6c77c4540b
| 7,709
|
py
|
Python
|
src/python/pants/reporting/reporting.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/reporting/reporting.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/reporting/reporting.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from six import StringIO
from pants.base.workunit import WorkUnitLabel
from pants.reporting.html_reporter import HtmlReporter
from pants.reporting.invalidation_report import InvalidationReport
from pants.reporting.plaintext_reporter import LabelFormat, PlainTextReporter, ToolOutputFormat
from pants.reporting.quiet_reporter import QuietReporter
from pants.reporting.report import Report
from pants.reporting.reporter import ReporterDestination
from pants.reporting.reporting_server import ReportingServerManager
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import relative_symlink, safe_mkdir
class Reporting(Subsystem):
options_scope = 'reporting'
@classmethod
def register_options(cls, register):
super(Reporting, cls).register_options(register)
register('--invalidation-report', type=bool,
help='Write a formatted report on the invalid objects to the specified path.')
register('--reports-dir', advanced=True, metavar='<dir>',
default=os.path.join(register.bootstrap.pants_workdir, 'reports'),
help='Write reports to this dir.')
register('--template-dir', advanced=True, metavar='<dir>', default=None,
help='Find templates for rendering in this dir.')
register('--console-label-format', advanced=True, type=dict,
default=PlainTextReporter.LABEL_FORMATTING,
help='Controls the printing of workunit labels to the console. Workunit types are '
'{workunits}. Possible formatting values are {formats}'.format(
workunits=WorkUnitLabel.keys(), formats=LabelFormat.keys()))
register('--console-tool-output-format', advanced=True, type=dict,
default=PlainTextReporter.TOOL_OUTPUT_FORMATTING,
help='Controls the printing of workunit tool output to the console. Workunit types are '
'{workunits}. Possible formatting values are {formats}'.format(
workunits=WorkUnitLabel.keys(), formats=ToolOutputFormat.keys()))
def initialize(self, run_tracker, start_time=None):
"""Initialize with the given RunTracker.
TODO: See `RunTracker.start`.
"""
run_id = run_tracker.initialize()
run_dir = os.path.join(self.get_options().reports_dir, run_id)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
relative_symlink(run_dir, os.path.join(self.get_options().reports_dir, 'latest'))
report = Report()
# Capture initial console reporting into a buffer. We'll do something with it once
# we know what the cmd-line flag settings are.
outfile = StringIO()
errfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(
outfile=outfile, errfile=errfile, log_level=Report.INFO,
color=False, indent=True, timing=False,
cache_stats=False,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
# Set up HTML reporting. We always want that.
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
html_dir=html_dir,
template_dir=self.get_options().template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
# Add some useful RunInfo.
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
port = ReportingServerManager().socket
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
# And start tracking the run.
run_tracker.start(report, start_time)
def _get_invalidation_report(self):
return InvalidationReport() if self.get_options().invalidation_report else None
@staticmethod
def _consume_stringio(f):
f.flush()
buffered_output = f.getvalue()
f.close()
return buffered_output
def update_reporting(self, global_options, is_quiet, run_tracker):
"""Updates reporting config once we've parsed cmd-line flags."""
# Get any output silently buffered in the old console reporter, and remove it.
removed_reporter = run_tracker.report.remove_reporter('capturing')
buffered_out = self._consume_stringio(removed_reporter.settings.outfile)
buffered_err = self._consume_stringio(removed_reporter.settings.errfile)
log_level = Report.log_level_from_string(global_options.level or 'info')
# Ideally, we'd use terminfo or somesuch to discover whether a
# terminal truly supports color, but most that don't set TERM=dumb.
color = global_options.colors and (os.getenv('TERM') != 'dumb')
timing = global_options.time
cache_stats = global_options.time # TODO: Separate flag for this?
if is_quiet:
console_reporter = QuietReporter(run_tracker,
QuietReporter.Settings(log_level=log_level, color=color,
timing=timing, cache_stats=cache_stats))
else:
# Set up the new console reporter.
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, errfile=sys.stderr,
color=color, indent=True, timing=timing, cache_stats=cache_stats,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
console_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if global_options.logdir:
# Also write plaintext logs to a file. This is completely separate from the html reports.
safe_mkdir(global_options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(global_options.logdir, '{}.log'.format(run_id)), 'w')
errfile = open(os.path.join(global_options.logdir, '{}.err.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, errfile=errfile,
color=False, indent=True, timing=True, cache_stats=True,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
invalidation_report = self._get_invalidation_report()
if invalidation_report:
run_id = run_tracker.run_info.get_info('id')
outfile = os.path.join(self.get_options().reports_dir, run_id, 'invalidation-report.csv')
invalidation_report.set_filename(outfile)
return invalidation_report
| 49.416667
| 109
| 0.705928
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from six import StringIO
from pants.base.workunit import WorkUnitLabel
from pants.reporting.html_reporter import HtmlReporter
from pants.reporting.invalidation_report import InvalidationReport
from pants.reporting.plaintext_reporter import LabelFormat, PlainTextReporter, ToolOutputFormat
from pants.reporting.quiet_reporter import QuietReporter
from pants.reporting.report import Report
from pants.reporting.reporter import ReporterDestination
from pants.reporting.reporting_server import ReportingServerManager
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import relative_symlink, safe_mkdir
class Reporting(Subsystem):
options_scope = 'reporting'
@classmethod
def register_options(cls, register):
super(Reporting, cls).register_options(register)
register('--invalidation-report', type=bool,
help='Write a formatted report on the invalid objects to the specified path.')
register('--reports-dir', advanced=True, metavar='<dir>',
default=os.path.join(register.bootstrap.pants_workdir, 'reports'),
help='Write reports to this dir.')
register('--template-dir', advanced=True, metavar='<dir>', default=None,
help='Find templates for rendering in this dir.')
register('--console-label-format', advanced=True, type=dict,
default=PlainTextReporter.LABEL_FORMATTING,
help='Controls the printing of workunit labels to the console. Workunit types are '
'{workunits}. Possible formatting values are {formats}'.format(
workunits=WorkUnitLabel.keys(), formats=LabelFormat.keys()))
register('--console-tool-output-format', advanced=True, type=dict,
default=PlainTextReporter.TOOL_OUTPUT_FORMATTING,
help='Controls the printing of workunit tool output to the console. Workunit types are '
'{workunits}. Possible formatting values are {formats}'.format(
workunits=WorkUnitLabel.keys(), formats=ToolOutputFormat.keys()))
def initialize(self, run_tracker, start_time=None):
run_id = run_tracker.initialize()
run_dir = os.path.join(self.get_options().reports_dir, run_id)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
relative_symlink(run_dir, os.path.join(self.get_options().reports_dir, 'latest'))
report = Report()
# we know what the cmd-line flag settings are.
outfile = StringIO()
errfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(
outfile=outfile, errfile=errfile, log_level=Report.INFO,
color=False, indent=True, timing=False,
cache_stats=False,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
# Set up HTML reporting. We always want that.
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
html_dir=html_dir,
template_dir=self.get_options().template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
# Add some useful RunInfo.
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
port = ReportingServerManager().socket
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
# And start tracking the run.
run_tracker.start(report, start_time)
def _get_invalidation_report(self):
return InvalidationReport() if self.get_options().invalidation_report else None
@staticmethod
def _consume_stringio(f):
f.flush()
buffered_output = f.getvalue()
f.close()
return buffered_output
def update_reporting(self, global_options, is_quiet, run_tracker):
# Get any output silently buffered in the old console reporter, and remove it.
removed_reporter = run_tracker.report.remove_reporter('capturing')
buffered_out = self._consume_stringio(removed_reporter.settings.outfile)
buffered_err = self._consume_stringio(removed_reporter.settings.errfile)
log_level = Report.log_level_from_string(global_options.level or 'info')
# Ideally, we'd use terminfo or somesuch to discover whether a
color = global_options.colors and (os.getenv('TERM') != 'dumb')
timing = global_options.time
cache_stats = global_options.time # TODO: Separate flag for this?
if is_quiet:
console_reporter = QuietReporter(run_tracker,
QuietReporter.Settings(log_level=log_level, color=color,
timing=timing, cache_stats=cache_stats))
else:
# Set up the new console reporter.
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, errfile=sys.stderr,
color=color, indent=True, timing=timing, cache_stats=cache_stats,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
console_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if global_options.logdir:
# Also write plaintext logs to a file. This is completely separate from the html reports.
safe_mkdir(global_options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(global_options.logdir, '{}.log'.format(run_id)), 'w')
errfile = open(os.path.join(global_options.logdir, '{}.err.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, errfile=errfile,
color=False, indent=True, timing=True, cache_stats=True,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
invalidation_report = self._get_invalidation_report()
if invalidation_report:
run_id = run_tracker.run_info.get_info('id')
outfile = os.path.join(self.get_options().reports_dir, run_id, 'invalidation-report.csv')
invalidation_report.set_filename(outfile)
return invalidation_report
| true
| true
|
790a7e470fa6c89b16514513cfe402cd6f124ec4
| 375
|
py
|
Python
|
ContestAnalyzerOnline/utils/migrations/0007_delete_comment.py
|
rogercaminal/HamToolsManager
|
1c55db416d0caaa039a5251ed49c57d9d1766e53
|
[
"MIT"
] | 1
|
2017-10-02T15:48:13.000Z
|
2017-10-02T15:48:13.000Z
|
ContestAnalyzerOnline/utils/migrations/0007_delete_comment.py
|
rogercaminal/HamToolsManager
|
1c55db416d0caaa039a5251ed49c57d9d1766e53
|
[
"MIT"
] | null | null | null |
ContestAnalyzerOnline/utils/migrations/0007_delete_comment.py
|
rogercaminal/HamToolsManager
|
1c55db416d0caaa039a5251ed49c57d9d1766e53
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-10-15 15:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ContestAnalyzerOnline', '0006_auto_20171015_1445'),
]
operations = [
migrations.DeleteModel(
name='Comment',
),
]
| 19.736842
| 61
| 0.64
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ContestAnalyzerOnline', '0006_auto_20171015_1445'),
]
operations = [
migrations.DeleteModel(
name='Comment',
),
]
| true
| true
|
790a7ffd1e68d0e1b9faac9add257963cfa024e5
| 16,738
|
py
|
Python
|
libcheckers/movement.py
|
YuriyGuts/libcheckers
|
7466458485c2392732bba26bb7fc1ae08a85ef3b
|
[
"MIT"
] | 1
|
2017-10-25T13:29:29.000Z
|
2017-10-25T13:29:29.000Z
|
libcheckers/movement.py
|
YuriyGuts/libcheckers
|
7466458485c2392732bba26bb7fc1ae08a85ef3b
|
[
"MIT"
] | 1
|
2017-12-13T16:10:37.000Z
|
2017-12-13T16:10:37.000Z
|
libcheckers/movement.py
|
YuriyGuts/libcheckers
|
7466458485c2392732bba26bb7fc1ae08a85ef3b
|
[
"MIT"
] | 3
|
2017-10-30T12:28:41.000Z
|
2021-10-29T19:10:42.000Z
|
from abc import abstractmethod
from collections import deque
from copy import deepcopy
from libcheckers import BoardConfig, InvalidMoveException
from libcheckers.enum import Player, PieceClass, GameOverReason
from libcheckers.utils import (
index_to_coords,
coords_to_index,
get_indexes_between,
get_lines_of_sight,
is_black_home_row,
is_white_home_row,
)
class BaseMove(object):
"""
Represents a move a player can make in the checkers game.
"""
@abstractmethod
def apply(self, board):
"""
Apply a move to a board and retrieve the board produced by the move.
Parameters
----------
board
The board to apply the move to.
Returns
-------
Board
A new board that will be produced after applying this move.
"""
return board
@abstractmethod
def __eq__(self, other):
return False
@abstractmethod
def __repr__(self):
return super(BaseMove, self).__repr__()
class ForwardMove(BaseMove):
"""
Represents a free movement action (the one that does not capture any opponent pieces).
"""
def __init__(self, start_index, end_index):
self.start_index = start_index
self.end_index = end_index
def apply(self, board):
if not board.owner[self.start_index]:
msg = 'Cannot move from an empty square ({0})'.format(self.start_index)
raise InvalidMoveException(msg)
if board.owner[self.end_index]:
msg = 'Cannot move to a non-empty square ({0})'.format(self.end_index)
raise InvalidMoveException(msg)
is_backward_move = (
(board.owner[self.start_index] == Player.WHITE and self.end_index > self.start_index) or
(board.owner[self.start_index] == Player.BLACK and self.end_index < self.start_index)
)
if is_backward_move and board.piece_class[self.start_index] != PieceClass.KING:
msg = 'Cannot freely move backwards unless the piece is a king'
raise InvalidMoveException(msg)
new_board = board.clone()
new_board.move_piece(self.start_index, self.end_index)
return new_board
def __eq__(self, other):
return (isinstance(other, ForwardMove) and
self.start_index == other.start_index and
self.end_index == other.end_index)
def __repr__(self):
return 'Move: {0} -> {1}'.format(self.start_index, self.end_index)
class CaptureMove(BaseMove):
"""
Represents a move that captures a single opponent piece.
"""
def __init__(self, start_index, end_index):
self.start_index = start_index
self.end_index = end_index
def find_opponent_square(self, board):
"""
Retrieve the index of the square that contains the enemy piece to be captured.
"""
path_indexes = get_indexes_between(self.start_index, self.end_index)
own_color = board.owner[self.start_index]
own_path_squares = [
index
for index in path_indexes
if board.owner[index] == own_color
]
opponent_path_squares = [
index
for index in path_indexes
if board.owner[index] and board.owner[index] != own_color
]
if len(own_path_squares) > 0:
msg = 'Cannot capture when own pieces are in the way: {0}'
raise InvalidMoveException(msg.format(', '.join(str(index) for index in own_path_squares)))
if len(opponent_path_squares) != 1:
msg = 'Cannot capture: must have exactly one opponent piece along the way'
raise InvalidMoveException(msg)
if not board.owner[self.start_index]:
msg = 'Cannot move from an empty square ({0})'.format(self.start_index)
raise InvalidMoveException(msg)
if board.owner[self.end_index]:
msg = 'Cannot move to a non-empty square ({0})'.format(self.end_index)
raise InvalidMoveException(msg)
return opponent_path_squares[0]
def apply(self, board):
opponent_square = self.find_opponent_square(board)
new_board = board.clone()
new_board.move_piece(self.start_index, self.end_index)
new_board.remove_piece(opponent_square)
return new_board
def __eq__(self, other):
return (isinstance(other, CaptureMove) and
self.start_index == other.start_index and
self.end_index == other.end_index)
def __repr__(self):
return 'Capture: {0} -> {1}'.format(self.start_index, self.end_index)
class ComboCaptureMove(BaseMove):
"""
Represents a chain of capture moves.
"""
def __init__(self, moves):
self.moves = moves
def apply(self, board):
new_board = board
zombies_to_clear = []
for i, move in enumerate(self.moves):
# According to the rules, men should not be promoted when merely passing through
# the home row. They actually need to finish the move there to be promoted.
old_class = new_board.piece_class[move.start_index]
# Remove captured pieces only after the move is finished. Otherwise king moves
# like "forward, capture right, then capture left" would be allowed.
opponent_square = move.find_opponent_square(new_board)
zombies_to_clear.append(opponent_square)
new_board = move.apply(new_board)
new_board.owner[opponent_square] = Player.ZOMBIE
# Restore the piece class if it was "accidentally" promoted in between the moves.
if i < len(self.moves) - 1:
new_board.piece_class[move.end_index] = old_class
# Wipe the zombies.
for zombie in zombies_to_clear:
new_board.remove_piece(zombie)
return new_board
def __eq__(self, other):
return (isinstance(other, ComboCaptureMove) and
len(self.moves) == len(other.moves) and
all(self.moves[i] == other.moves[i] for i in range(len(self.moves))))
def __repr__(self):
return 'Combo x{0}: [{1}]'.format(len(self.moves), ', '.join(str(move) for move in self.moves))
class Board(object):
"""
Represents an international checkers game board and
contains the movement logic of the game pieces.
"""
def __init__(self):
self.owner = [None] * (BoardConfig.total_squares + 1)
self.piece_class = [None] * (BoardConfig.total_squares + 1)
def move_piece(self, start_index, end_index):
"""
Move an existing game piece from point A to point B.
"""
self.owner[end_index] = self.owner[start_index]
self.owner[start_index] = None
self.piece_class[end_index] = self.piece_class[start_index]
self.piece_class[start_index] = None
# Promote the piece if it has reached the opponent's home row.
if self.owner[end_index] == Player.WHITE and is_black_home_row(end_index):
self.piece_class[end_index] = PieceClass.KING
if self.owner[end_index] == Player.BLACK and is_white_home_row(end_index):
self.piece_class[end_index] = PieceClass.KING
def add_piece(self, index, player, piece_class):
"""
Place a new piece on the board with the specified owner and class.
"""
self.owner[index] = player
self.piece_class[index] = piece_class
def remove_piece(self, index):
"""
Clear the specified square from the board.
"""
self.owner[index] = None
self.piece_class[index] = None
def get_player_squares(self, player):
"""
Get all squares on the board owned by the specified player.
"""
return [
index
for index in range(1, BoardConfig.total_squares + 1)
if self.owner[index] == player
]
def get_free_movement_destinations(self, index):
"""
Get all allowed destinations for free movement for the piece at the specified square.
"""
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = BoardConfig.board_dim if own_class == PieceClass.KING else 1
lines_of_sight = get_lines_of_sight(index, visibility_range)
# Men can only move forward, and the direction of forward depends on the color.
if own_class == PieceClass.MAN and own_color == Player.WHITE:
lines_of_sight = lines_of_sight[:2]
if own_class == PieceClass.MAN and own_color == Player.BLACK:
lines_of_sight = lines_of_sight[-2:]
result = []
for line in lines_of_sight:
for i in range(0, len(line)):
# Cannot move beyond another piece if not capturing.
if self.owner[line[i]]:
break
result.append(line[i])
return result
def get_capturable_pieces(self, index):
"""
Get all squares that contain opponent's pieces capturable from the specified position.
"""
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = BoardConfig.board_dim if own_class == PieceClass.KING else 2
lines_of_sight = get_lines_of_sight(index, visibility_range)
result = []
for line in lines_of_sight:
for i in range(0, len(line) - 1):
# Cannot jump over own pieces or previously captured pieces.
if self.owner[line[i]] in (own_color, Player.ZOMBIE):
break
# Cannot capture protected pieces.
if self.owner[line[i]] and self.owner[line[i + 1]]:
break
# Can only capture if the square following the piece is empty.
if self.owner[line[i]] and self.owner[line[i]] != own_color and not self.owner[line[i + 1]]:
result.append(line[i])
break
return result
def get_available_capture_landing_positions(self, attacker_index, capture_index):
"""
If the specified square is captured by the specified attacker,
get all possible squares the attacker can land on.
"""
own_class = self.piece_class[attacker_index]
attacker_row, attacker_col = index_to_coords(attacker_index)
capture_row, capture_col = index_to_coords(capture_index)
# Calculate the unit movement vector.
movement_row = (capture_row - attacker_row) // abs(capture_row - attacker_row)
movement_col = (capture_col - attacker_col) // abs(capture_col - attacker_col)
result = []
current_row = capture_row + movement_row
current_col = capture_col + movement_col
if own_class == PieceClass.MAN:
return [coords_to_index(current_row, current_col)]
# Kings can make arbitrarily long jumps as long as they capture only one piece.
while 1 <= current_row <= BoardConfig.board_dim and 1 <= current_col <= BoardConfig.board_dim:
current_index = coords_to_index(current_row, current_col)
if not self.owner[current_index]:
result.append(current_index)
current_row += movement_row
current_col += movement_col
else:
break
return result
def get_capture_sequence_candidates(self, player):
"""
Get all possible capture move sequences (not necessarily maximum ones)
starting from every piece owned by the specified player.
"""
player_squares = self.get_player_squares(player)
# Check if there are any pieces in our line of sight that can be captured.
attack_options = []
for attacker in player_squares:
attack_options.extend([
(attacker, target)
for target in self.get_capturable_pieces(attacker)
])
# Run a tree traversal (BFS) to find all capture sequences, and choose the longest ones.
capture_sequences = []
# Each item in the queue is a 3-tuple: (board, move, previous moves).
queue = deque()
# Initial queue items: first step in each possible sequence.
for attacker, target in attack_options:
queue.extend([
(self, CaptureMove(attacker, landing), [])
for landing in self.get_available_capture_landing_positions(attacker, target)
])
# Main search queue.
while queue:
board_before, move, prev_moves = queue.popleft()
# No not allow promoting the piece if it does not finish the move on the home row.
class_before = board_before.piece_class[move.start_index]
# Keep the captured pieces because they cannot be removed till the end of turn.
opponent_quare = move.find_opponent_square(board_before)
board_after = move.apply(board_before)
board_after.owner[opponent_quare] = Player.ZOMBIE
board_after.piece_class[move.end_index] = class_before
next_attack_options = [
(move.end_index, target)
for target in board_after.get_capturable_pieces(move.end_index)
]
# Terminal position, nothing more to capture.
if not next_attack_options:
capture_sequences.append(prev_moves + [move])
# Search deeper for the consecutive captures.
for attacker, target in next_attack_options:
queue.extend([
(board_after, CaptureMove(attacker, landing), prev_moves + [move])
for landing in board_after.get_available_capture_landing_positions(attacker, target)
])
return capture_sequences
def get_available_moves(self, player):
"""
For the specified player, get the list of all allowed moves that are applicable
to this board according to the game rules.
"""
result = []
capture_sequences = self.get_capture_sequence_candidates(player)
if not capture_sequences:
# There are no pieces we must capture. Free movement is allowed.
for source in self.get_player_squares(player):
result.extend([
ForwardMove(source, destination)
for destination in self.get_free_movement_destinations(source)
])
else:
# There's a piece we must capture. Rules demand we capture as many as possible.
max_seq_length = max(len(seq) for seq in capture_sequences)
result.extend([
ComboCaptureMove(seq) if len(seq) > 1 else seq[0]
for seq in capture_sequences
if len(seq) == max_seq_length
])
return result
def check_game_over(self, player_turn):
"""
Check if the game board is in a terminal state from the specified player's point of view.
(e.g. a certain player has won or lost, or there is a draw).
"""
white_moves = self.get_available_moves(Player.WHITE)
black_moves = self.get_available_moves(Player.BLACK)
# If a player is unable to move, they lose.
if player_turn == Player.WHITE and not white_moves:
return GameOverReason.BLACK_WON
if player_turn == Player.BLACK and not black_moves:
return GameOverReason.WHITE_WON
# If both players have only one king left, the game is a draw.
white_squares = self.get_player_squares(Player.WHITE)
black_squares = self.get_player_squares(Player.BLACK)
only_one_king_each = (
len(white_squares) == 1 and
len(black_squares) == 1 and
self.piece_class[white_squares[0]] == PieceClass.KING and
self.piece_class[black_squares[0]] == PieceClass.KING and
not self.get_capturable_pieces(white_squares[0]) and
not self.get_capturable_pieces(black_squares[0])
)
if only_one_king_each:
return GameOverReason.DRAW
return None
def clone(self):
"""
Create an independent copy of this board.
"""
return deepcopy(self)
def __repr__(self):
return 'White: {0} | Black: {1}'.format(
', '.join(str(idx) for idx in self.get_player_squares(Player.WHITE)),
', '.join(str(idx) for idx in self.get_player_squares(Player.BLACK)),
)
| 36.308026
| 108
| 0.623193
|
from abc import abstractmethod
from collections import deque
from copy import deepcopy
from libcheckers import BoardConfig, InvalidMoveException
from libcheckers.enum import Player, PieceClass, GameOverReason
from libcheckers.utils import (
index_to_coords,
coords_to_index,
get_indexes_between,
get_lines_of_sight,
is_black_home_row,
is_white_home_row,
)
class BaseMove(object):
@abstractmethod
def apply(self, board):
return board
@abstractmethod
def __eq__(self, other):
return False
@abstractmethod
def __repr__(self):
return super(BaseMove, self).__repr__()
class ForwardMove(BaseMove):
def __init__(self, start_index, end_index):
self.start_index = start_index
self.end_index = end_index
def apply(self, board):
if not board.owner[self.start_index]:
msg = 'Cannot move from an empty square ({0})'.format(self.start_index)
raise InvalidMoveException(msg)
if board.owner[self.end_index]:
msg = 'Cannot move to a non-empty square ({0})'.format(self.end_index)
raise InvalidMoveException(msg)
is_backward_move = (
(board.owner[self.start_index] == Player.WHITE and self.end_index > self.start_index) or
(board.owner[self.start_index] == Player.BLACK and self.end_index < self.start_index)
)
if is_backward_move and board.piece_class[self.start_index] != PieceClass.KING:
msg = 'Cannot freely move backwards unless the piece is a king'
raise InvalidMoveException(msg)
new_board = board.clone()
new_board.move_piece(self.start_index, self.end_index)
return new_board
def __eq__(self, other):
return (isinstance(other, ForwardMove) and
self.start_index == other.start_index and
self.end_index == other.end_index)
def __repr__(self):
return 'Move: {0} -> {1}'.format(self.start_index, self.end_index)
class CaptureMove(BaseMove):
def __init__(self, start_index, end_index):
self.start_index = start_index
self.end_index = end_index
def find_opponent_square(self, board):
path_indexes = get_indexes_between(self.start_index, self.end_index)
own_color = board.owner[self.start_index]
own_path_squares = [
index
for index in path_indexes
if board.owner[index] == own_color
]
opponent_path_squares = [
index
for index in path_indexes
if board.owner[index] and board.owner[index] != own_color
]
if len(own_path_squares) > 0:
msg = 'Cannot capture when own pieces are in the way: {0}'
raise InvalidMoveException(msg.format(', '.join(str(index) for index in own_path_squares)))
if len(opponent_path_squares) != 1:
msg = 'Cannot capture: must have exactly one opponent piece along the way'
raise InvalidMoveException(msg)
if not board.owner[self.start_index]:
msg = 'Cannot move from an empty square ({0})'.format(self.start_index)
raise InvalidMoveException(msg)
if board.owner[self.end_index]:
msg = 'Cannot move to a non-empty square ({0})'.format(self.end_index)
raise InvalidMoveException(msg)
return opponent_path_squares[0]
def apply(self, board):
opponent_square = self.find_opponent_square(board)
new_board = board.clone()
new_board.move_piece(self.start_index, self.end_index)
new_board.remove_piece(opponent_square)
return new_board
def __eq__(self, other):
return (isinstance(other, CaptureMove) and
self.start_index == other.start_index and
self.end_index == other.end_index)
def __repr__(self):
return 'Capture: {0} -> {1}'.format(self.start_index, self.end_index)
class ComboCaptureMove(BaseMove):
def __init__(self, moves):
self.moves = moves
def apply(self, board):
new_board = board
zombies_to_clear = []
for i, move in enumerate(self.moves):
old_class = new_board.piece_class[move.start_index]
opponent_square = move.find_opponent_square(new_board)
zombies_to_clear.append(opponent_square)
new_board = move.apply(new_board)
new_board.owner[opponent_square] = Player.ZOMBIE
if i < len(self.moves) - 1:
new_board.piece_class[move.end_index] = old_class
for zombie in zombies_to_clear:
new_board.remove_piece(zombie)
return new_board
def __eq__(self, other):
return (isinstance(other, ComboCaptureMove) and
len(self.moves) == len(other.moves) and
all(self.moves[i] == other.moves[i] for i in range(len(self.moves))))
def __repr__(self):
return 'Combo x{0}: [{1}]'.format(len(self.moves), ', '.join(str(move) for move in self.moves))
class Board(object):
def __init__(self):
self.owner = [None] * (BoardConfig.total_squares + 1)
self.piece_class = [None] * (BoardConfig.total_squares + 1)
def move_piece(self, start_index, end_index):
self.owner[end_index] = self.owner[start_index]
self.owner[start_index] = None
self.piece_class[end_index] = self.piece_class[start_index]
self.piece_class[start_index] = None
if self.owner[end_index] == Player.WHITE and is_black_home_row(end_index):
self.piece_class[end_index] = PieceClass.KING
if self.owner[end_index] == Player.BLACK and is_white_home_row(end_index):
self.piece_class[end_index] = PieceClass.KING
def add_piece(self, index, player, piece_class):
self.owner[index] = player
self.piece_class[index] = piece_class
def remove_piece(self, index):
self.owner[index] = None
self.piece_class[index] = None
def get_player_squares(self, player):
return [
index
for index in range(1, BoardConfig.total_squares + 1)
if self.owner[index] == player
]
def get_free_movement_destinations(self, index):
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = BoardConfig.board_dim if own_class == PieceClass.KING else 1
lines_of_sight = get_lines_of_sight(index, visibility_range)
# Men can only move forward, and the direction of forward depends on the color.
if own_class == PieceClass.MAN and own_color == Player.WHITE:
lines_of_sight = lines_of_sight[:2]
if own_class == PieceClass.MAN and own_color == Player.BLACK:
lines_of_sight = lines_of_sight[-2:]
result = []
for line in lines_of_sight:
for i in range(0, len(line)):
# Cannot move beyond another piece if not capturing.
if self.owner[line[i]]:
break
result.append(line[i])
return result
def get_capturable_pieces(self, index):
own_color = self.owner[index]
own_class = self.piece_class[index]
visibility_range = BoardConfig.board_dim if own_class == PieceClass.KING else 2
lines_of_sight = get_lines_of_sight(index, visibility_range)
result = []
for line in lines_of_sight:
for i in range(0, len(line) - 1):
# Cannot jump over own pieces or previously captured pieces.
if self.owner[line[i]] in (own_color, Player.ZOMBIE):
break
# Cannot capture protected pieces.
if self.owner[line[i]] and self.owner[line[i + 1]]:
break
# Can only capture if the square following the piece is empty.
if self.owner[line[i]] and self.owner[line[i]] != own_color and not self.owner[line[i + 1]]:
result.append(line[i])
break
return result
def get_available_capture_landing_positions(self, attacker_index, capture_index):
own_class = self.piece_class[attacker_index]
attacker_row, attacker_col = index_to_coords(attacker_index)
capture_row, capture_col = index_to_coords(capture_index)
# Calculate the unit movement vector.
movement_row = (capture_row - attacker_row) // abs(capture_row - attacker_row)
movement_col = (capture_col - attacker_col) // abs(capture_col - attacker_col)
result = []
current_row = capture_row + movement_row
current_col = capture_col + movement_col
if own_class == PieceClass.MAN:
return [coords_to_index(current_row, current_col)]
# Kings can make arbitrarily long jumps as long as they capture only one piece.
while 1 <= current_row <= BoardConfig.board_dim and 1 <= current_col <= BoardConfig.board_dim:
current_index = coords_to_index(current_row, current_col)
if not self.owner[current_index]:
result.append(current_index)
current_row += movement_row
current_col += movement_col
else:
break
return result
def get_capture_sequence_candidates(self, player):
player_squares = self.get_player_squares(player)
# Check if there are any pieces in our line of sight that can be captured.
attack_options = []
for attacker in player_squares:
attack_options.extend([
(attacker, target)
for target in self.get_capturable_pieces(attacker)
])
# Run a tree traversal (BFS) to find all capture sequences, and choose the longest ones.
capture_sequences = []
# Each item in the queue is a 3-tuple: (board, move, previous moves).
queue = deque()
# Initial queue items: first step in each possible sequence.
for attacker, target in attack_options:
queue.extend([
(self, CaptureMove(attacker, landing), [])
for landing in self.get_available_capture_landing_positions(attacker, target)
])
# Main search queue.
while queue:
board_before, move, prev_moves = queue.popleft()
# No not allow promoting the piece if it does not finish the move on the home row.
class_before = board_before.piece_class[move.start_index]
# Keep the captured pieces because they cannot be removed till the end of turn.
opponent_quare = move.find_opponent_square(board_before)
board_after = move.apply(board_before)
board_after.owner[opponent_quare] = Player.ZOMBIE
board_after.piece_class[move.end_index] = class_before
next_attack_options = [
(move.end_index, target)
for target in board_after.get_capturable_pieces(move.end_index)
]
# Terminal position, nothing more to capture.
if not next_attack_options:
capture_sequences.append(prev_moves + [move])
# Search deeper for the consecutive captures.
for attacker, target in next_attack_options:
queue.extend([
(board_after, CaptureMove(attacker, landing), prev_moves + [move])
for landing in board_after.get_available_capture_landing_positions(attacker, target)
])
return capture_sequences
def get_available_moves(self, player):
result = []
capture_sequences = self.get_capture_sequence_candidates(player)
if not capture_sequences:
# There are no pieces we must capture. Free movement is allowed.
for source in self.get_player_squares(player):
result.extend([
ForwardMove(source, destination)
for destination in self.get_free_movement_destinations(source)
])
else:
# There's a piece we must capture. Rules demand we capture as many as possible.
max_seq_length = max(len(seq) for seq in capture_sequences)
result.extend([
ComboCaptureMove(seq) if len(seq) > 1 else seq[0]
for seq in capture_sequences
if len(seq) == max_seq_length
])
return result
def check_game_over(self, player_turn):
white_moves = self.get_available_moves(Player.WHITE)
black_moves = self.get_available_moves(Player.BLACK)
if player_turn == Player.WHITE and not white_moves:
return GameOverReason.BLACK_WON
if player_turn == Player.BLACK and not black_moves:
return GameOverReason.WHITE_WON
white_squares = self.get_player_squares(Player.WHITE)
black_squares = self.get_player_squares(Player.BLACK)
only_one_king_each = (
len(white_squares) == 1 and
len(black_squares) == 1 and
self.piece_class[white_squares[0]] == PieceClass.KING and
self.piece_class[black_squares[0]] == PieceClass.KING and
not self.get_capturable_pieces(white_squares[0]) and
not self.get_capturable_pieces(black_squares[0])
)
if only_one_king_each:
return GameOverReason.DRAW
return None
def clone(self):
return deepcopy(self)
def __repr__(self):
return 'White: {0} | Black: {1}'.format(
', '.join(str(idx) for idx in self.get_player_squares(Player.WHITE)),
', '.join(str(idx) for idx in self.get_player_squares(Player.BLACK)),
)
| true
| true
|
790a80009f1236770c6e63048445d0da22b1ea8d
| 1,554
|
py
|
Python
|
day12/main.py
|
Floozutter/aoc-2019-speedrun
|
e5704b03afccc6edb8f43f19fdf3a338c43a3507
|
[
"Unlicense"
] | null | null | null |
day12/main.py
|
Floozutter/aoc-2019-speedrun
|
e5704b03afccc6edb8f43f19fdf3a338c43a3507
|
[
"Unlicense"
] | null | null | null |
day12/main.py
|
Floozutter/aoc-2019-speedrun
|
e5704b03afccc6edb8f43f19fdf3a338c43a3507
|
[
"Unlicense"
] | null | null | null |
INPUTPATH = "input.txt"
#INPUTPATH = "input-test.txt"
with open(INPUTPATH) as ifile:
raw = ifile.read()
from typing import Tuple
def line_to_pos(line: str) -> Tuple[int, ...]:
filtered = "".join(c for c in line if c.isdigit() or c in {"-", ","})
return tuple(map(int, filtered.split(",")))
starts = tuple(zip(*map(line_to_pos, raw.strip().split("\n"))))
from itertools import combinations
from typing import List, Iterable
class Axis:
poss: List[int]
vels: List[int]
def __init__(self, start_poss: Iterable[int]) -> None:
self.poss = list(start_poss)
self.vels = [0] * len(self.poss)
def __eq__(self, other) -> bool:
return self.poss == other.poss and self.vels == other.vels
def step(self) -> None:
for i, j in combinations(range(len(self.poss)), 2):
a, b = self.poss[i], self.poss[j]
diff = 1 if a < b else -1 if a > b else 0
self.vels[i] += diff
self.vels[j] -= diff
for i, vel in enumerate(self.vels):
self.poss[i] += vel
system = tuple(map(Axis, starts))
for axis in system:
for _ in range(1000):
axis.step()
pos_by_moon = zip(*(axis.poss for axis in system))
vel_by_moon = zip(*(axis.vels for axis in system))
print(sum(
sum(map(abs, pos)) * sum(map(abs, vel))
for pos, vel in zip(pos_by_moon, vel_by_moon)
))
def cycle_period(start_poss: Iterable[int]) -> int:
tort = Axis(start_poss) # Get some rest, buddy. :3
hare = Axis(tort.poss) # Up for a run? >:3c
hare.step()
steps = 1
while hare != tort:
hare.step()
steps += 1
return steps
from math import lcm
print(lcm(*map(cycle_period, starts)))
| 29.884615
| 70
| 0.664093
|
INPUTPATH = "input.txt"
with open(INPUTPATH) as ifile:
raw = ifile.read()
from typing import Tuple
def line_to_pos(line: str) -> Tuple[int, ...]:
filtered = "".join(c for c in line if c.isdigit() or c in {"-", ","})
return tuple(map(int, filtered.split(",")))
starts = tuple(zip(*map(line_to_pos, raw.strip().split("\n"))))
from itertools import combinations
from typing import List, Iterable
class Axis:
poss: List[int]
vels: List[int]
def __init__(self, start_poss: Iterable[int]) -> None:
self.poss = list(start_poss)
self.vels = [0] * len(self.poss)
def __eq__(self, other) -> bool:
return self.poss == other.poss and self.vels == other.vels
def step(self) -> None:
for i, j in combinations(range(len(self.poss)), 2):
a, b = self.poss[i], self.poss[j]
diff = 1 if a < b else -1 if a > b else 0
self.vels[i] += diff
self.vels[j] -= diff
for i, vel in enumerate(self.vels):
self.poss[i] += vel
system = tuple(map(Axis, starts))
for axis in system:
for _ in range(1000):
axis.step()
pos_by_moon = zip(*(axis.poss for axis in system))
vel_by_moon = zip(*(axis.vels for axis in system))
print(sum(
sum(map(abs, pos)) * sum(map(abs, vel))
for pos, vel in zip(pos_by_moon, vel_by_moon)
))
def cycle_period(start_poss: Iterable[int]) -> int:
tort = Axis(start_poss)
hare = Axis(tort.poss)
hare.step()
steps = 1
while hare != tort:
hare.step()
steps += 1
return steps
from math import lcm
print(lcm(*map(cycle_period, starts)))
| true
| true
|
790a8027876c66ca08f0327f7cfa1d16be2339db
| 1,206
|
py
|
Python
|
setup.py
|
fm100/simplefb
|
41f818f143fd384a96770a7db2513bcf7e3402fa
|
[
"MIT"
] | null | null | null |
setup.py
|
fm100/simplefb
|
41f818f143fd384a96770a7db2513bcf7e3402fa
|
[
"MIT"
] | null | null | null |
setup.py
|
fm100/simplefb
|
41f818f143fd384a96770a7db2513bcf7e3402fa
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='simplefb',
version='0.2.0a1',
description='A simple facebook graph api and auth Mixins',
url='https://github.com/fm100/simplefb',
author='Freddie Park',
author_email='sorelove@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='facebook graph api auth',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
)
| 31.736842
| 77
| 0.631012
|
from setuptools import setup, find_packages
setup(
name='simplefb',
version='0.2.0a1',
description='A simple facebook graph api and auth Mixins',
url='https://github.com/fm100/simplefb',
author='Freddie Park',
author_email='sorelove@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='facebook graph api auth',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
)
| true
| true
|
790a807b3ac5ba7847c3dc635b9c5b34f29964d8
| 3,813
|
py
|
Python
|
Python/k-similar-strings.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2022-01-30T06:55:28.000Z
|
2022-01-30T06:55:28.000Z
|
Python/k-similar-strings.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | null | null | null |
Python/k-similar-strings.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2021-12-31T03:56:39.000Z
|
2021-12-31T03:56:39.000Z
|
# Time: O(n * n!/(c_a!*...*c_z!), n is the length of A, B,
# c_a...c_z is the count of each alphabet,
# n = sum(c_a...c_z)
# Space: O(n * n!/(c_a!*...*c_z!)
# 854
# Strings A and B are K-similar (for some non-negative integer K)
# if we can swap the positions of two letters
# in A exactly K times so that the resulting string equals B.
#
# Given two anagrams A and B, return the smallest K for which A and B are
# K-similar.
#
# Example 1:
#
# Input: A = "ab", B = "ba"
# Output: 1
# Example 2:
#
# Input: A = "abc", B = "bca"
# Output: 2
# Example 3:
#
# Input: A = "abac", B = "baca"
# Output: 2
# Example 4:
#
# Input: A = "aabc", B = "abca"
# Output: 2
# Note:
# - 1 <= A.length == B.length <= 20
# - A and B contain only lowercase letters from
# the set {'a', 'b', 'c', 'd', 'e', 'f'}
# Solution Framework:
# The underlying graph of the problem is a graph with 6 nodes 'a', 'b', ..., 'f' and the edges A[i] -> B[i].
# Our goal is for this graph to have only self-edges (edges of the form a -> a.)
# If A = 'ca...' and B = 'ab...', then the first two edges of the underlying graph are c -> a and a -> b;
# and a swap between A[1] and A[0] changes these two edges to the single edge c -> b. Let's call this type
# of operation 'cutting corners'. Intuitively, our optimal swap schedule always increases the # of matches
# (A[i] == B[i]s) for each swap, so cutting corners is the only type of operation we need to consider.
# (This is essentially the happy swap assumption, proved in 765 - Couples Holding Hands)
#
# Now consider 'cycle decomposition' of the underlying graph. [This decomposition (or the # of cycles),
# is not necessarily unique.] Through operations of cutting corners, we'll delete all the (non-self) edges.
# Each cycle of length k requires k-1 operations to delete. Thus, the answer is just the minimum possible
# value of sum(C_k - 1), where C_1,... C_k are the lengths of the cycles in some cycle decomposition of
# the underlying graph. This can be re-written as (# of non-self edges) - (# of cycles).
# Hence, we want to maximize the # of cycles in a cycle decomposition of the underlying graph.
import collections
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def kSimilarity(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
# Perform a regular breadth-first search: the neighbors to each node string S are all the strings
# reachable with 1 swap to get the first unmatched character in S matched.
# we can prove that an optimal solution swaps the left-most unmatched character A[i] with an
# appropriate match A[j] which equals to B[i] (j > i), as this increases # of self-edges.
# Time complexity: This reduces the # of "neighbors" of a node (string state) from O(N^2) to O(N):
# O(N^2): swap any pair of chars in the string,
# O(N): only swap the first unmatched char.
def neighbors(s):
for i, c in enumerate(s):
if c != B[i]:
break
t = list(s)
for j in xrange(i+1, len(s)):
if t[j] == B[i]:
t[i], t[j] = t[j], t[i]
yield "".join(t)
t[j], t[i] = t[i], t[j]
q = collections.deque([A])
steps = {A:0} # we need a set to remove repeatedness anyway, so put 'steps' together
while q:
s = q.popleft()
if s == B:
return steps[s]
for t in neighbors(s):
if t not in steps:
steps[t] = steps[s] + 1
q.append(t)
print(Solution().kSimilarity('abac', 'baca'))
| 39.309278
| 108
| 0.586415
|
# of operation 'cutting corners'. Intuitively, our optimal swap schedule always increases the # of matches
# (A[i] == B[i]s) for each swap, so cutting corners is the only type of operation we need to consider.
# (This is essentially the happy swap assumption, proved in 765 - Couples Holding Hands)
#
# Now consider 'cycle decomposition' of the underlying graph. [This decomposition (or the # of cycles),
# is not necessarily unique.] Through operations of cutting corners, we'll delete all the (non-self) edges.
):
def kSimilarity(self, A, B):
enumerate(s):
if c != B[i]:
break
t = list(s)
for j in xrange(i+1, len(s)):
if t[j] == B[i]:
t[i], t[j] = t[j], t[i]
yield "".join(t)
t[j], t[i] = t[i], t[j]
q = collections.deque([A])
steps = {A:0}
while q:
s = q.popleft()
if s == B:
return steps[s]
for t in neighbors(s):
if t not in steps:
steps[t] = steps[s] + 1
q.append(t)
print(Solution().kSimilarity('abac', 'baca'))
| true
| true
|
790a80b2e52f62cdfc0af3c6e034400381d5e475
| 1,595
|
py
|
Python
|
test/test_input_output.py
|
ProcessMaker/pmio-sdk-python
|
49ddf9e6444c77a35ce51aa052059b254e0f5299
|
[
"Apache-2.0"
] | 2
|
2017-11-10T05:10:44.000Z
|
2020-05-14T14:20:01.000Z
|
test/test_input_output.py
|
ProcessMaker/pmio-sdk-python
|
49ddf9e6444c77a35ce51aa052059b254e0f5299
|
[
"Apache-2.0"
] | null | null | null |
test/test_input_output.py
|
ProcessMaker/pmio-sdk-python
|
49ddf9e6444c77a35ce51aa052059b254e0f5299
|
[
"Apache-2.0"
] | 4
|
2017-07-01T22:04:18.000Z
|
2020-05-14T14:33:41.000Z
|
# coding: utf-8
"""
ProcessMaker API
This ProcessMaker I/O API provides access to a BPMN 2.0 compliant workflow engine api that is designed to be used as a microservice to support enterprise cloud applications. The current Alpha 1.0 version supports most of the descriptive class of the BPMN 2.0 specification.
OpenAPI spec version: 1.0.0
Contact: support@processmaker.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ProcessMaker_PMIO
from ProcessMaker_PMIO.rest import ApiException
from ProcessMaker_PMIO.models.input_output import InputOutput
class TestInputOutput(unittest.TestCase):
""" InputOutput unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testInputOutput(self):
"""
Test InputOutput
"""
model = ProcessMaker_PMIO.models.input_output.InputOutput()
if __name__ == '__main__':
unittest.main()
| 29.537037
| 278
| 0.731661
|
from __future__ import absolute_import
import os
import sys
import unittest
import ProcessMaker_PMIO
from ProcessMaker_PMIO.rest import ApiException
from ProcessMaker_PMIO.models.input_output import InputOutput
class TestInputOutput(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testInputOutput(self):
model = ProcessMaker_PMIO.models.input_output.InputOutput()
if __name__ == '__main__':
unittest.main()
| true
| true
|
790a8150f946b50d2a5a2572e7c851230ff1f896
| 448
|
py
|
Python
|
excel2mysql/__main__.py
|
zxjsdp/excel2mysql
|
4e1ed08d58366dcebc86a08fb74d20feb5369b36
|
[
"Apache-2.0"
] | null | null | null |
excel2mysql/__main__.py
|
zxjsdp/excel2mysql
|
4e1ed08d58366dcebc86a08fb74d20feb5369b36
|
[
"Apache-2.0"
] | null | null | null |
excel2mysql/__main__.py
|
zxjsdp/excel2mysql
|
4e1ed08d58366dcebc86a08fb74d20feb5369b36
|
[
"Apache-2.0"
] | 2
|
2017-09-11T07:11:41.000Z
|
2021-11-26T06:20:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (print_function, unicode_literals,
absolute_import, with_statement)
import os
import sys
if __name__ == '__main__':
if __package__ is None:
dir_name = os.path.dirname(__file__)
sys.path.append(
os.path.abspath(
os.path.join(dir_name, '..')))
from excel2mysql.migrate import migrate
migrate()
| 20.363636
| 57
| 0.609375
|
from __future__ import (print_function, unicode_literals,
absolute_import, with_statement)
import os
import sys
if __name__ == '__main__':
if __package__ is None:
dir_name = os.path.dirname(__file__)
sys.path.append(
os.path.abspath(
os.path.join(dir_name, '..')))
from excel2mysql.migrate import migrate
migrate()
| true
| true
|
790a8186fe8570e34da2dd80ded2622a912fd126
| 551
|
py
|
Python
|
01/01.py
|
PROxZIMA/AquaQ-Challenge-Hub
|
e52579264a2c69a0a0b1bccf98a60eabdbd22f95
|
[
"MIT"
] | null | null | null |
01/01.py
|
PROxZIMA/AquaQ-Challenge-Hub
|
e52579264a2c69a0a0b1bccf98a60eabdbd22f95
|
[
"MIT"
] | null | null | null |
01/01.py
|
PROxZIMA/AquaQ-Challenge-Hub
|
e52579264a2c69a0a0b1bccf98a60eabdbd22f95
|
[
"MIT"
] | null | null | null |
from os.path import realpath
def main():
inpString = open(f'{realpath(__file__)[:-2]}txt').read()
inpString += '0' * (3 - len(inpString) % 3) # Padding to make it divisible by 3
inp = list(inpString)
for i in range(len(inp)):
if inp[i] not in '0123456789abcdef':
inp[i] = '0'
inp = ''.join(inp)
v = len(inp)//3
for i in range(0, len(inp), v):
print(inp[i : i + 2], end='') # Print first 2 char of every 1/3rd part of input
if __name__ == '__main__':
main()
| 23.956522
| 105
| 0.53539
|
from os.path import realpath
def main():
inpString = open(f'{realpath(__file__)[:-2]}txt').read()
inpString += '0' * (3 - len(inpString) % 3)
inp = list(inpString)
for i in range(len(inp)):
if inp[i] not in '0123456789abcdef':
inp[i] = '0'
inp = ''.join(inp)
v = len(inp)//3
for i in range(0, len(inp), v):
print(inp[i : i + 2], end='')
if __name__ == '__main__':
main()
| true
| true
|
790a828f42fdd9c867267ea40e22c50c7668bbb0
| 2,189
|
py
|
Python
|
train_valid_split.py
|
njamalova/whale_tail_identifier
|
507ffe8838b42ca75dbd696c2faaa71252f417da
|
[
"MIT"
] | 2
|
2020-09-12T17:06:38.000Z
|
2020-09-14T08:27:53.000Z
|
train_valid_split.py
|
purrwhite/whale_tail_identifier
|
507ffe8838b42ca75dbd696c2faaa71252f417da
|
[
"MIT"
] | 8
|
2020-09-12T16:58:41.000Z
|
2020-09-12T22:19:01.000Z
|
train_valid_split.py
|
purrwhite/whale_tail_identifier
|
507ffe8838b42ca75dbd696c2faaa71252f417da
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import cv2
import tensorflow as tf
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
# In[3]:
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from tensorflow.keras import layers
# ### Load the Training Data
# In[4]:
# curwd = str(os.getcwd())
# targetwd = '\\data\\train'
# path_train = curwd + targetwd
path_train = '...s\\Documents\\whale_identification\\whale_identification\\data\\train\\'
train = [os.path.join(path_train,f) for f in os.listdir(path_train) if f.endswith('.jpg')]
# In[6]:
train_labels = pd.read_csv("df_train.csv")
# In[7]:
train_labels.head()
# In[8]:
unique_whales = train_labels['Id'].unique()
len(unique_whales)
# ### Train-Validation Split
# In[9]:
def train_valid_split(df):
# find unique categories of whales in our dataframe
unique_whales = train_labels['Id'].unique()
# map the images to categories
mapping = {}
for whale in unique_whales:
lst_of_images = list(train_labels[train_labels['Id'] == whale]['Image'].values)
mapping[whale] = lst_of_images
# perform manual train/validation split to ensure balanced data in both sets (i.e. all categories are represented)
train_revised = []
valid_revised = []
for v in mapping.values():
cut = int(0.2*len(v)) # sample & 80-20 split
cut2 = int(0.25*len(v))
tr = v[:cut]
val = v[cut:cut2]
train_revised.append(tr)
valid_revised.append(val)
return train_revised, valid_revised
def train_valid_dict_generator(train_list, valid_list, df):
# create a dictionary mapping new training set to correct labels
train_df = {}
for i in train_list:
for j in i:
lbl = df[df['Image'] == j]['Id'].values[0]
train_df[j] = lbl
# create a dictionary mapping new validation set to correct labels
valid_df = {}
for i in valid_list:
for j in i:
lbl = df[df['Image'] == j]['Id'].values[0]
valid_df[j] = lbl
return train_df, valid_df
| 20.082569
| 118
| 0.649612
|
import os
import cv2
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from tensorflow.keras import layers
ation\\data\\train\\'
train = [os.path.join(path_train,f) for f in os.listdir(path_train) if f.endswith('.jpg')]
train_labels = pd.read_csv("df_train.csv")
train_labels.head()
unique_whales = train_labels['Id'].unique()
len(unique_whales)
].unique()
mapping = {}
for whale in unique_whales:
lst_of_images = list(train_labels[train_labels['Id'] == whale]['Image'].values)
mapping[whale] = lst_of_images
train_revised = []
valid_revised = []
for v in mapping.values():
cut = int(0.2*len(v))
cut2 = int(0.25*len(v))
tr = v[:cut]
val = v[cut:cut2]
train_revised.append(tr)
valid_revised.append(val)
return train_revised, valid_revised
def train_valid_dict_generator(train_list, valid_list, df):
train_df = {}
for i in train_list:
for j in i:
lbl = df[df['Image'] == j]['Id'].values[0]
train_df[j] = lbl
valid_df = {}
for i in valid_list:
for j in i:
lbl = df[df['Image'] == j]['Id'].values[0]
valid_df[j] = lbl
return train_df, valid_df
| true
| true
|
790a8293486af5b1dc59b7d328b8a9d2a7643089
| 2,325
|
py
|
Python
|
siuba/sql/dialects/snowflake.py
|
Techzune/siuba
|
575bffe016c40ccd4045d800f1c542e43a77aa50
|
[
"MIT"
] | null | null | null |
siuba/sql/dialects/snowflake.py
|
Techzune/siuba
|
575bffe016c40ccd4045d800f1c542e43a77aa50
|
[
"MIT"
] | null | null | null |
siuba/sql/dialects/snowflake.py
|
Techzune/siuba
|
575bffe016c40ccd4045d800f1c542e43a77aa50
|
[
"MIT"
] | null | null | null |
from sqlalchemy.sql import func as fn
from sqlalchemy import sql
from ..translate import (
SqlTranslator,
extend_base,
sql_scalar,
sql_agg,
win_agg,
win_cumul,
annotate
)
#from .postgresql import PostgresqlColumn, PostgresqlColumnAgg
from .base import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
# Data ----
class SnowflakeColumn(SqlColumn): pass
class SnowflakeColumnAgg(SqlColumnAgg, SnowflakeColumn): pass
# Translations ================================================================
@_dt.sql_func_last_day_in_period.register
def sql_func_last_day_in_period(codata: SnowflakeColumn, col, period):
return _dt.date_trunc(codata, col, period) \
+ sql.text("interval '1 %s'" % period) \
- sql.text("interval '1 day'")
# Scalar ----
extend_base(
SnowflakeColumn,
__floordiv__ = lambda _, x, y: fn.floor(x / y),
__rfloordiv__ = lambda _, x, y: fn.floor(y / x),
# connector has a bug with %
# see: https://github.com/snowflakedb/snowflake-sqlalchemy/issues/246
__mod__ = lambda _, x, y: fn.mod(x, y),
__rmod__ = lambda _, x, y: fn.mod(y, x),
mod = lambda _, x, y: fn.mod(x,y),
rmod = lambda _, x, y: fn.mod(y,x),
# TODO: str.contains
)
# Window ----
extend_base(
SnowflakeColumn,
all = win_agg("booland_agg"),
any = win_agg("boolor_agg"),
count = win_agg("count"),
cumsum = annotate(win_cumul("sum"), result_type="variable"),
# note that the number of decimal places Snowflake returns, and whether
# the result is numeric depends on the input. mark as variable, so tests
# do not check dtype
# see https://community.snowflake.com/s/question/0D50Z000079hpxvSAA/numeric-calculations-truncated-to-3-decimal-places
mean = annotate(win_agg("avg"), result_type="variable"),
std = win_agg("stddev_samp"),
sum = annotate(win_agg("sum"), result_type="variable"),
var = win_agg("var_samp"),
# str.contains
# dt methods are more like base
)
# Agg ----
extend_base(
SnowflakeColumnAgg,
all = sql_agg("booland_agg"),
any = sql_agg("boolor_agg"),
count = sql_agg("count"),
std = sql_agg("stddev_samp"),
var = sql_agg("var_samp"),
)
translator = SqlTranslator.from_mappings(
SnowflakeColumn, SnowflakeColumnAgg
)
| 27.034884
| 122
| 0.653333
|
from sqlalchemy.sql import func as fn
from sqlalchemy import sql
from ..translate import (
SqlTranslator,
extend_base,
sql_scalar,
sql_agg,
win_agg,
win_cumul,
annotate
)
from .base import SqlColumn, SqlColumnAgg
from . import _dt_generics as _dt
class SnowflakeColumn(SqlColumn): pass
class SnowflakeColumnAgg(SqlColumnAgg, SnowflakeColumn): pass
@_dt.sql_func_last_day_in_period.register
def sql_func_last_day_in_period(codata: SnowflakeColumn, col, period):
return _dt.date_trunc(codata, col, period) \
+ sql.text("interval '1 %s'" % period) \
- sql.text("interval '1 day'")
extend_base(
SnowflakeColumn,
__floordiv__ = lambda _, x, y: fn.floor(x / y),
__rfloordiv__ = lambda _, x, y: fn.floor(y / x),
__mod__ = lambda _, x, y: fn.mod(x, y),
__rmod__ = lambda _, x, y: fn.mod(y, x),
mod = lambda _, x, y: fn.mod(x,y),
rmod = lambda _, x, y: fn.mod(y,x),
)
extend_base(
SnowflakeColumn,
all = win_agg("booland_agg"),
any = win_agg("boolor_agg"),
count = win_agg("count"),
cumsum = annotate(win_cumul("sum"), result_type="variable"),
mean = annotate(win_agg("avg"), result_type="variable"),
std = win_agg("stddev_samp"),
sum = annotate(win_agg("sum"), result_type="variable"),
var = win_agg("var_samp"),
)
extend_base(
SnowflakeColumnAgg,
all = sql_agg("booland_agg"),
any = sql_agg("boolor_agg"),
count = sql_agg("count"),
std = sql_agg("stddev_samp"),
var = sql_agg("var_samp"),
)
translator = SqlTranslator.from_mappings(
SnowflakeColumn, SnowflakeColumnAgg
)
| true
| true
|
790a82a041116da64249af9b11c92b7703ff53ac
| 14,236
|
py
|
Python
|
homeassistant/components/xiaomi_miio/config_flow.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/xiaomi_miio/config_flow.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 77
|
2020-07-16T16:43:09.000Z
|
2022-03-31T06:14:37.000Z
|
homeassistant/components/xiaomi_miio/config_flow.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""Config flow to configure Xiaomi Miio."""
import logging
from re import search
from micloud import MiCloud
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_CLOUD_COUNTRY,
CONF_CLOUD_PASSWORD,
CONF_CLOUD_SUBDEVICES,
CONF_CLOUD_USERNAME,
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MAC,
CONF_MANUAL,
CONF_MODEL,
DEFAULT_CLOUD_COUNTRY,
DOMAIN,
MODELS_ALL,
MODELS_ALL_DEVICES,
MODELS_GATEWAY,
SERVER_COUNTRY_CODES,
)
from .device import ConnectXiaomiDevice
_LOGGER = logging.getLogger(__name__)
DEVICE_SETTINGS = {
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
}
DEVICE_CONFIG = vol.Schema({vol.Required(CONF_HOST): str}).extend(DEVICE_SETTINGS)
DEVICE_MODEL_CONFIG = vol.Schema({vol.Required(CONF_MODEL): vol.In(MODELS_ALL)})
DEVICE_CLOUD_CONFIG = vol.Schema(
{
vol.Optional(CONF_CLOUD_USERNAME): str,
vol.Optional(CONF_CLOUD_PASSWORD): str,
vol.Optional(CONF_CLOUD_COUNTRY, default=DEFAULT_CLOUD_COUNTRY): vol.In(
SERVER_COUNTRY_CODES
),
vol.Optional(CONF_MANUAL, default=False): bool,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if user_input is not None:
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if use_cloud and (
not cloud_username or not cloud_password or not cloud_country
):
errors["base"] = "cloud_credentials_incomplete"
# trigger re-auth flow
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
if not errors:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CLOUD_SUBDEVICES,
default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False),
): bool
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class XiaomiMiioFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Xiaomi Miio config flow."""
VERSION = 1
def __init__(self):
"""Initialize."""
self.host = None
self.mac = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices = {}
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_reauth(self, user_input=None):
"""Perform reauth upon an authentication error or missing cloud credentials."""
self.host = user_input[CONF_HOST]
self.token = user_input[CONF_TOKEN]
self.mac = user_input[CONF_MAC]
self.model = user_input.get(CONF_MODEL)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Dialog that informs the user that reauth is required."""
if user_input is not None:
return await self.async_step_cloud()
return self.async_show_form(
step_id="reauth_confirm", data_schema=vol.Schema({})
)
async def async_step_import(self, conf: dict):
"""Import a configuration from config.yaml."""
self.host = conf[CONF_HOST]
self.token = conf[CONF_TOKEN]
self.name = conf.get(CONF_NAME)
self.model = conf.get(CONF_MODEL)
self.context.update(
{"title_placeholders": {"name": f"YAML import {self.host}"}}
)
return await self.async_step_connect()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_cloud()
async def async_step_zeroconf(self, discovery_info):
"""Handle zeroconf discovery."""
name = discovery_info.get("name")
self.host = discovery_info.get("host")
self.mac = discovery_info.get("properties", {}).get("mac")
if self.mac is None:
poch = discovery_info.get("properties", {}).get("poch", "")
result = search(r"mac=\w+", poch)
if result is not None:
self.mac = result.group(0).split("=")[1]
if not name or not self.host or not self.mac:
return self.async_abort(reason="not_xiaomi_miio")
self.mac = format_mac(self.mac)
# Check which device is discovered.
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"Gateway {self.host}"}}
)
return await self.async_step_cloud()
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"{device_model} {self.host}"}}
)
return await self.async_step_cloud()
# Discovered device is not yet supported
_LOGGER.debug(
"Not yet supported Xiaomi Miio device '%s' discovered with host %s",
name,
self.host,
)
return self.async_abort(reason="not_xiaomi_miio")
def extract_cloud_info(self, cloud_device_info):
"""Extract the cloud info."""
if self.host is None:
self.host = cloud_device_info["localip"]
if self.mac is None:
self.mac = format_mac(cloud_device_info["mac"])
if self.model is None:
self.model = cloud_device_info["model"]
if self.name is None:
self.name = cloud_device_info["name"]
self.token = cloud_device_info["token"]
async def async_step_cloud(self, user_input=None):
"""Configure a xiaomi miio device through the Miio Cloud."""
errors = {}
if user_input is not None:
if user_input[CONF_MANUAL]:
return await self.async_step_manual()
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if not cloud_username or not cloud_password or not cloud_country:
errors["base"] = "cloud_credentials_incomplete"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
miio_cloud = MiCloud(cloud_username, cloud_password)
if not await self.hass.async_add_executor_job(miio_cloud.login):
errors["base"] = "cloud_login_error"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
devices_raw = await self.hass.async_add_executor_job(
miio_cloud.get_devices, cloud_country
)
if not devices_raw:
errors["base"] = "cloud_no_devices"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
self.cloud_devices = {}
for device in devices_raw:
parent_id = device.get("parent_id")
if not parent_id:
name = device["name"]
model = device["model"]
list_name = f"{name} - {model}"
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if self.host is not None:
for device in self.cloud_devices.values():
cloud_host = device.get("localip")
if cloud_host == self.host:
self.extract_cloud_info(device)
return await self.async_step_connect()
if len(self.cloud_devices) == 1:
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return await self.async_step_connect()
return await self.async_step_select()
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
async def async_step_select(self, user_input=None):
"""Handle multiple cloud devices found."""
errors = {}
if user_input is not None:
cloud_device = self.cloud_devices[user_input["select_device"]]
self.extract_cloud_info(cloud_device)
return await self.async_step_connect()
select_schema = vol.Schema(
{vol.Required("select_device"): vol.In(list(self.cloud_devices))}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
async def async_step_manual(self, user_input=None):
"""Configure a xiaomi miio device Manually."""
errors = {}
if user_input is not None:
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return await self.async_step_connect()
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id="manual", data_schema=schema, errors=errors)
async def async_step_connect(self, user_input=None):
"""Connect to a xiaomi miio device."""
errors = {}
if self.host is None or self.token is None:
return self.async_abort(reason="incomplete_info")
if user_input is not None:
self.model = user_input[CONF_MODEL]
# Try to connect to a Xiaomi Device.
connect_device_class = ConnectXiaomiDevice(self.hass)
await connect_device_class.async_connect_device(self.host, self.token)
device_info = connect_device_class.device_info
if self.model is None and device_info is not None:
self.model = device_info.model
if self.model is None:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
if self.mac is None and device_info is not None:
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = await self.async_set_unique_id(
unique_id, raise_on_progress=False
)
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if (
self.cloud_username is not None
and self.cloud_password is not None
and self.cloud_country is not None
):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
if self.name is None:
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if flow_type is None:
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if flow_type is not None:
return self.async_create_entry(
title=self.name,
data={
CONF_FLOW_TYPE: flow_type,
CONF_HOST: self.host,
CONF_TOKEN: self.token,
CONF_MODEL: self.model,
CONF_MAC: self.mac,
CONF_CLOUD_USERNAME: self.cloud_username,
CONF_CLOUD_PASSWORD: self.cloud_password,
CONF_CLOUD_COUNTRY: self.cloud_country,
},
)
errors["base"] = "unknown_device"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
| 36.502564
| 88
| 0.606631
|
import logging
from re import search
from micloud import MiCloud
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_CLOUD_COUNTRY,
CONF_CLOUD_PASSWORD,
CONF_CLOUD_SUBDEVICES,
CONF_CLOUD_USERNAME,
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MAC,
CONF_MANUAL,
CONF_MODEL,
DEFAULT_CLOUD_COUNTRY,
DOMAIN,
MODELS_ALL,
MODELS_ALL_DEVICES,
MODELS_GATEWAY,
SERVER_COUNTRY_CODES,
)
from .device import ConnectXiaomiDevice
_LOGGER = logging.getLogger(__name__)
DEVICE_SETTINGS = {
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
}
DEVICE_CONFIG = vol.Schema({vol.Required(CONF_HOST): str}).extend(DEVICE_SETTINGS)
DEVICE_MODEL_CONFIG = vol.Schema({vol.Required(CONF_MODEL): vol.In(MODELS_ALL)})
DEVICE_CLOUD_CONFIG = vol.Schema(
{
vol.Optional(CONF_CLOUD_USERNAME): str,
vol.Optional(CONF_CLOUD_PASSWORD): str,
vol.Optional(CONF_CLOUD_COUNTRY, default=DEFAULT_CLOUD_COUNTRY): vol.In(
SERVER_COUNTRY_CODES
),
vol.Optional(CONF_MANUAL, default=False): bool,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
errors = {}
if user_input is not None:
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if use_cloud and (
not cloud_username or not cloud_password or not cloud_country
):
errors["base"] = "cloud_credentials_incomplete"
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
if not errors:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CLOUD_SUBDEVICES,
default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False),
): bool
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class XiaomiMiioFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
def __init__(self):
self.host = None
self.mac = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices = {}
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
return OptionsFlowHandler(config_entry)
async def async_step_reauth(self, user_input=None):
self.host = user_input[CONF_HOST]
self.token = user_input[CONF_TOKEN]
self.mac = user_input[CONF_MAC]
self.model = user_input.get(CONF_MODEL)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
if user_input is not None:
return await self.async_step_cloud()
return self.async_show_form(
step_id="reauth_confirm", data_schema=vol.Schema({})
)
async def async_step_import(self, conf: dict):
self.host = conf[CONF_HOST]
self.token = conf[CONF_TOKEN]
self.name = conf.get(CONF_NAME)
self.model = conf.get(CONF_MODEL)
self.context.update(
{"title_placeholders": {"name": f"YAML import {self.host}"}}
)
return await self.async_step_connect()
async def async_step_user(self, user_input=None):
return await self.async_step_cloud()
async def async_step_zeroconf(self, discovery_info):
name = discovery_info.get("name")
self.host = discovery_info.get("host")
self.mac = discovery_info.get("properties", {}).get("mac")
if self.mac is None:
poch = discovery_info.get("properties", {}).get("poch", "")
result = search(r"mac=\w+", poch)
if result is not None:
self.mac = result.group(0).split("=")[1]
if not name or not self.host or not self.mac:
return self.async_abort(reason="not_xiaomi_miio")
self.mac = format_mac(self.mac)
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"Gateway {self.host}"}}
)
return await self.async_step_cloud()
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"{device_model} {self.host}"}}
)
return await self.async_step_cloud()
_LOGGER.debug(
"Not yet supported Xiaomi Miio device '%s' discovered with host %s",
name,
self.host,
)
return self.async_abort(reason="not_xiaomi_miio")
def extract_cloud_info(self, cloud_device_info):
if self.host is None:
self.host = cloud_device_info["localip"]
if self.mac is None:
self.mac = format_mac(cloud_device_info["mac"])
if self.model is None:
self.model = cloud_device_info["model"]
if self.name is None:
self.name = cloud_device_info["name"]
self.token = cloud_device_info["token"]
async def async_step_cloud(self, user_input=None):
errors = {}
if user_input is not None:
if user_input[CONF_MANUAL]:
return await self.async_step_manual()
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if not cloud_username or not cloud_password or not cloud_country:
errors["base"] = "cloud_credentials_incomplete"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
miio_cloud = MiCloud(cloud_username, cloud_password)
if not await self.hass.async_add_executor_job(miio_cloud.login):
errors["base"] = "cloud_login_error"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
devices_raw = await self.hass.async_add_executor_job(
miio_cloud.get_devices, cloud_country
)
if not devices_raw:
errors["base"] = "cloud_no_devices"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
self.cloud_devices = {}
for device in devices_raw:
parent_id = device.get("parent_id")
if not parent_id:
name = device["name"]
model = device["model"]
list_name = f"{name} - {model}"
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if self.host is not None:
for device in self.cloud_devices.values():
cloud_host = device.get("localip")
if cloud_host == self.host:
self.extract_cloud_info(device)
return await self.async_step_connect()
if len(self.cloud_devices) == 1:
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return await self.async_step_connect()
return await self.async_step_select()
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
async def async_step_select(self, user_input=None):
errors = {}
if user_input is not None:
cloud_device = self.cloud_devices[user_input["select_device"]]
self.extract_cloud_info(cloud_device)
return await self.async_step_connect()
select_schema = vol.Schema(
{vol.Required("select_device"): vol.In(list(self.cloud_devices))}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
async def async_step_manual(self, user_input=None):
errors = {}
if user_input is not None:
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return await self.async_step_connect()
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id="manual", data_schema=schema, errors=errors)
async def async_step_connect(self, user_input=None):
errors = {}
if self.host is None or self.token is None:
return self.async_abort(reason="incomplete_info")
if user_input is not None:
self.model = user_input[CONF_MODEL]
connect_device_class = ConnectXiaomiDevice(self.hass)
await connect_device_class.async_connect_device(self.host, self.token)
device_info = connect_device_class.device_info
if self.model is None and device_info is not None:
self.model = device_info.model
if self.model is None:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
if self.mac is None and device_info is not None:
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = await self.async_set_unique_id(
unique_id, raise_on_progress=False
)
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if (
self.cloud_username is not None
and self.cloud_password is not None
and self.cloud_country is not None
):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
if self.name is None:
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if flow_type is None:
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if flow_type is not None:
return self.async_create_entry(
title=self.name,
data={
CONF_FLOW_TYPE: flow_type,
CONF_HOST: self.host,
CONF_TOKEN: self.token,
CONF_MODEL: self.model,
CONF_MAC: self.mac,
CONF_CLOUD_USERNAME: self.cloud_username,
CONF_CLOUD_PASSWORD: self.cloud_password,
CONF_CLOUD_COUNTRY: self.cloud_country,
},
)
errors["base"] = "unknown_device"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
| true
| true
|
790a83767a47c40fd71d6d81a8790a3f0bbeb16d
| 2,198
|
py
|
Python
|
colossalai/amp/__init__.py
|
jiangz17THU/ColossalAI
|
354b7954d1fa6b21a5ba566f0d5ec099280ad315
|
[
"Apache-2.0"
] | null | null | null |
colossalai/amp/__init__.py
|
jiangz17THU/ColossalAI
|
354b7954d1fa6b21a5ba566f0d5ec099280ad315
|
[
"Apache-2.0"
] | null | null | null |
colossalai/amp/__init__.py
|
jiangz17THU/ColossalAI
|
354b7954d1fa6b21a5ba566f0d5ec099280ad315
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from .amp_type import AMP_TYPE
from colossalai.context import Config
import torch.nn as nn
from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
from .torch_amp import convert_to_torch_amp
from .apex_amp import convert_to_apex_amp
from .naive_amp import convert_to_naive_amp
def convert_to_amp(model: nn.Module, optimizer: Optimizer, criterion: _Loss, mode: AMP_TYPE, amp_config: Config = None):
"""A helper function to wrap training components with Torch AMP modules.
Args:
param model (:class:`torch.nn.Module`): your model object.
optimizer (:class:`torch.optim.Optimizer`): your optimizer object.
criterion (:class:`torch.nn.modules.loss._Loss`): your loss function object.
mode (:class:`colossalai.amp.AMP_TYPE`): amp mode.
amp_config (Union[:class:`colossalai.context.Config`, dict]): configuration for different amp modes.
Returns:
A tuple (model, optimizer, criterion).
Note:
``amp_config`` may vary from different mode you choose. You should check the corresponding amp mode
for more details about ``amp_config``.
For ``apex_amp``, please check
`apex_amp config <https://nvidia.github.io/apex/amp.html?highlight=apex%20amp>`_.
For ``naive_amp``, please check
`naive_amp config <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/amp/naive_amp/_fp16_optimizer.py#L42>`_.
For ``torch_amp``, please check
`torch_amp config <https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py#L97>`_.
"""
assert isinstance(mode, AMP_TYPE), \
f'expected the argument mode be AMP_TYPE, but got {type(mode)}'
if amp_config is None:
amp_config = Config()
if mode == AMP_TYPE.TORCH:
model, optimizer, criterion = convert_to_torch_amp(model, optimizer, criterion, amp_config)
elif mode == AMP_TYPE.APEX:
model, optimizer = convert_to_apex_amp(model, optimizer, amp_config)
elif mode == AMP_TYPE.NAIVE:
model, optimizer = convert_to_naive_amp(model, optimizer, amp_config)
return model, optimizer, criterion
| 43.098039
| 128
| 0.708826
|
from .amp_type import AMP_TYPE
from colossalai.context import Config
import torch.nn as nn
from torch.optim import Optimizer
from torch.nn.modules.loss import _Loss
from .torch_amp import convert_to_torch_amp
from .apex_amp import convert_to_apex_amp
from .naive_amp import convert_to_naive_amp
def convert_to_amp(model: nn.Module, optimizer: Optimizer, criterion: _Loss, mode: AMP_TYPE, amp_config: Config = None):
assert isinstance(mode, AMP_TYPE), \
f'expected the argument mode be AMP_TYPE, but got {type(mode)}'
if amp_config is None:
amp_config = Config()
if mode == AMP_TYPE.TORCH:
model, optimizer, criterion = convert_to_torch_amp(model, optimizer, criterion, amp_config)
elif mode == AMP_TYPE.APEX:
model, optimizer = convert_to_apex_amp(model, optimizer, amp_config)
elif mode == AMP_TYPE.NAIVE:
model, optimizer = convert_to_naive_amp(model, optimizer, amp_config)
return model, optimizer, criterion
| true
| true
|
790a8385a223910f9cc30d80db9f448879dcd1bd
| 1,169
|
py
|
Python
|
groupdocs/models/GetBillingAddressResponse.py
|
groupdocs-legacy-sdk/python
|
80e5ef5a9a14ac4a7815c6cf933b5b2997381455
|
[
"Apache-2.0"
] | null | null | null |
groupdocs/models/GetBillingAddressResponse.py
|
groupdocs-legacy-sdk/python
|
80e5ef5a9a14ac4a7815c6cf933b5b2997381455
|
[
"Apache-2.0"
] | null | null | null |
groupdocs/models/GetBillingAddressResponse.py
|
groupdocs-legacy-sdk/python
|
80e5ef5a9a14ac4a7815c6cf933b5b2997381455
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class GetBillingAddressResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'GetBillingAddressResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # GetBillingAddressResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
| 29.974359
| 77
| 0.656116
|
class GetBillingAddressResponse:
def __init__(self):
self.swaggerTypes = {
'result': 'GetBillingAddressResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None
self.status = None
self.error_message = None
self.composedOn = None
| true
| true
|
790a83be08a5197d889e4e751a9e8148d22046f2
| 93
|
py
|
Python
|
Chapter07/ch7_debugger3.py
|
PacktPublishing/Applied-Computational-Thinking-with-Python
|
fd9982383c5b473ffa1640998540d602876816e5
|
[
"MIT"
] | 18
|
2020-11-27T22:41:12.000Z
|
2021-12-27T08:20:46.000Z
|
Chapter07/ch7_debugger3.py
|
PacktPublishing/Applied-Computational-Thinking-with-Python
|
fd9982383c5b473ffa1640998540d602876816e5
|
[
"MIT"
] | null | null | null |
Chapter07/ch7_debugger3.py
|
PacktPublishing/Applied-Computational-Thinking-with-Python
|
fd9982383c5b473ffa1640998540d602876816e5
|
[
"MIT"
] | 8
|
2020-11-30T17:51:11.000Z
|
2021-12-25T05:23:02.000Z
|
number = 5
number2 = 'five'
print(number)
breakpoint()
print(str(number) + " " + number2)
| 10.333333
| 34
| 0.645161
|
number = 5
number2 = 'five'
print(number)
breakpoint()
print(str(number) + " " + number2)
| true
| true
|
790a840e419c9466f82615cad59b857b5512ea66
| 95
|
py
|
Python
|
krcg_api/wsgi.py
|
lionel-panhaleux/krcg-api
|
95b25211dae152c20bba82b6bedfc7204b48692d
|
[
"MIT"
] | 2
|
2020-12-18T21:22:08.000Z
|
2021-01-14T18:07:17.000Z
|
krcg_api/wsgi.py
|
lionel-panhaleux/krcg-api
|
95b25211dae152c20bba82b6bedfc7204b48692d
|
[
"MIT"
] | 6
|
2021-07-20T00:25:43.000Z
|
2022-03-07T07:08:56.000Z
|
krcg_api/wsgi.py
|
lionel-panhaleux/krcg-api
|
95b25211dae152c20bba82b6bedfc7204b48692d
|
[
"MIT"
] | null | null | null |
"""Entrypoint for the WSGI app (web API)
"""
from . import api
application = api.create_app()
| 15.833333
| 40
| 0.694737
|
from . import api
application = api.create_app()
| true
| true
|
790a845e3325bf565cd3b061ffc6a3a604aa0dfa
| 2,147
|
py
|
Python
|
gr-ieee802-15-4/python/qa_dqpsk_soft_demapper_cc.py
|
xueyuecanfeng/C-LQI
|
f489c6447428d6affb2159e9d8f895caab2868c7
|
[
"BSD-2-Clause"
] | 2
|
2021-11-30T02:35:48.000Z
|
2021-11-30T02:53:02.000Z
|
gr-ieee802-15-4/python/qa_dqpsk_soft_demapper_cc.py
|
xueyuecanfeng/C-LQI
|
f489c6447428d6affb2159e9d8f895caab2868c7
|
[
"BSD-2-Clause"
] | null | null | null |
gr-ieee802-15-4/python/qa_dqpsk_soft_demapper_cc.py
|
xueyuecanfeng/C-LQI
|
f489c6447428d6affb2159e9d8f895caab2868c7
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 Felix Wunsch, Communications Engineering Lab (CEL) / Karlsruhe Institute of Technology (KIT) <wunsch.felix@googlemail.com>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ieee802_15_4_swig as ieee802_15_4
import numpy as np
class qa_dqpsk_soft_demapper_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
pi=np.pi
data_in = [0, pi/2, pi, -pi/2, pi/2, -pi/2, -pi/2, 0, 0, pi, pi/2, pi/2]
data_in = [np.exp(1j*i) for i in data_in]
data_in = [i*np.exp(1j*pi/4) for i in data_in]
self.src = blocks.vector_source_c(data_in)
self.dqpsk = ieee802_15_4.dqpsk_soft_demapper_cc(framelen=6)
self.snk = blocks.vector_sink_c(1)
self.tb.connect(self.src, self.dqpsk, self.snk)
self.tb.run ()
# check data
data_out = self.snk.data()
ref = [0, pi/2, pi, -pi/2, pi/2, pi, -pi/2, 0, 0, pi, pi, pi/2]
ref = np.array([np.exp(1j*i) for i in ref])
print "angle in:", np.angle(data_in)/pi*180
print "angle out:", np.angle(data_out)/pi*180
print "angle ref:", np.angle(ref)/pi*180
self.assertFloatTuplesAlmostEqual(ref, data_out, 5)
if __name__ == '__main__':
gr_unittest.run(qa_dqpsk_soft_demapper_cc)
| 37.666667
| 140
| 0.670703
|
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import ieee802_15_4_swig as ieee802_15_4
import numpy as np
class qa_dqpsk_soft_demapper_cc (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
pi=np.pi
data_in = [0, pi/2, pi, -pi/2, pi/2, -pi/2, -pi/2, 0, 0, pi, pi/2, pi/2]
data_in = [np.exp(1j*i) for i in data_in]
data_in = [i*np.exp(1j*pi/4) for i in data_in]
self.src = blocks.vector_source_c(data_in)
self.dqpsk = ieee802_15_4.dqpsk_soft_demapper_cc(framelen=6)
self.snk = blocks.vector_sink_c(1)
self.tb.connect(self.src, self.dqpsk, self.snk)
self.tb.run ()
data_out = self.snk.data()
ref = [0, pi/2, pi, -pi/2, pi/2, pi, -pi/2, 0, 0, pi, pi, pi/2]
ref = np.array([np.exp(1j*i) for i in ref])
print "angle in:", np.angle(data_in)/pi*180
print "angle out:", np.angle(data_out)/pi*180
print "angle ref:", np.angle(ref)/pi*180
self.assertFloatTuplesAlmostEqual(ref, data_out, 5)
if __name__ == '__main__':
gr_unittest.run(qa_dqpsk_soft_demapper_cc)
| false
| true
|
790a85a8743405ab5506de0abc971a503424ebb9
| 1,261
|
py
|
Python
|
libalgs-py/data_structures/min_stack.py
|
tdudz/libalgs-py
|
9b2610de66217c0564193096702c47478de5db5e
|
[
"MIT"
] | null | null | null |
libalgs-py/data_structures/min_stack.py
|
tdudz/libalgs-py
|
9b2610de66217c0564193096702c47478de5db5e
|
[
"MIT"
] | null | null | null |
libalgs-py/data_structures/min_stack.py
|
tdudz/libalgs-py
|
9b2610de66217c0564193096702c47478de5db5e
|
[
"MIT"
] | null | null | null |
"""
Min Stack
-----
A LIFO abstract data type that serves as a collection of elements.
Supports retrieving the min from the stack in constant time.
"""
class MinStack(object):
def __init__(self):
"""
Attributes:
data (arr): data stored in the stack
minimum (arr): minimum values of data stored
"""
self.data = []
self.minimum = []
def empty(self):
"""
Returns whether or not the stack is empty.
Time Complexity: O(1)
Returns:
bool: whether or not the stack is empty
"""
return len(self.data) == 0
def push(self, x):
"""
Pushes an element onto the stack.
Time Complexity: O(1)
Args:
x: item to be added
"""
self.data.append(x)
if not self.minimum or x <= self.minimum[-1]:
self.minimum.append(x)
def pop(self):
"""
Pops an element off the stack.
Time Complexity: O(1)
Returns:
any: the last element on the stack
"""
x = self.data.pop()
if x == self.minimum[-1]:
self.minimum.pop()
return x
def peek(self):
"""
Returns the last item on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.data[-1]
def peek_min(self):
"""
Returns the min on the stack but doesn't remove it.
Time Complexity: O(1)
"""
return self.minimum[-1]
| 16.592105
| 66
| 0.634417
|
class MinStack(object):
def __init__(self):
self.data = []
self.minimum = []
def empty(self):
return len(self.data) == 0
def push(self, x):
self.data.append(x)
if not self.minimum or x <= self.minimum[-1]:
self.minimum.append(x)
def pop(self):
x = self.data.pop()
if x == self.minimum[-1]:
self.minimum.pop()
return x
def peek(self):
return self.data[-1]
def peek_min(self):
return self.minimum[-1]
| true
| true
|
790a85d27fe4be9c3ed141d623bf5a9b1b66fffb
| 8,596
|
py
|
Python
|
nova/virt/vmwareapi/vim_util.py
|
bopopescu/nested_quota
|
6d8443287e29c2c9e03cd4e5c5757424314280ad
|
[
"Apache-2.0"
] | 2
|
2015-06-15T02:16:33.000Z
|
2022-02-23T07:10:38.000Z
|
nova/virt/vmwareapi/vim_util.py
|
bopopescu/nested_quota
|
6d8443287e29c2c9e03cd4e5c5757424314280ad
|
[
"Apache-2.0"
] | 9
|
2015-05-20T11:20:17.000Z
|
2017-07-27T08:21:33.000Z
|
nova/virt/vmwareapi/vim_util.py
|
bopopescu/nested_quota
|
6d8443287e29c2c9e03cd4e5c5757424314280ad
|
[
"Apache-2.0"
] | 13
|
2015-05-05T09:34:04.000Z
|
2017-11-08T02:03:46.000Z
|
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API utility module.
"""
from oslo.config import cfg
from oslo.vmware import vim_util as vutil
import suds
from nova.i18n import _
from nova.openstack.common import log as logging
vmware_opts = cfg.IntOpt('maximum_objects', default=100,
help='The maximum number of ObjectContent data '
'objects that should be returned in a single '
'result. A positive value will cause the '
'operation to suspend the retrieval when the '
'count of objects reaches the specified '
'maximum. The server may still limit the count '
'to something less than the configured value. '
'Any remaining objects may be retrieved with '
'additional requests.')
CONF = cfg.CONF
CONF.register_opt(vmware_opts, 'vmware')
LOG = logging.getLogger(__name__)
def object_to_dict(obj, list_depth=1):
"""Convert Suds object into serializable format.
The calling function can limit the amount of list entries that
are converted.
"""
d = {}
for k, v in suds.sudsobject.asdict(obj).iteritems():
if hasattr(v, '__keylist__'):
d[k] = object_to_dict(v, list_depth=list_depth)
elif isinstance(v, list):
d[k] = []
used = 0
for item in v:
used = used + 1
if used > list_depth:
break
if hasattr(item, '__keylist__'):
d[k].append(object_to_dict(item, list_depth=list_depth))
else:
d[k].append(item)
else:
d[k] = v
return d
def get_moref(value, type):
return vutil.get_moref(value, type)
def get_object_properties(vim, collector, mobj, type, properties):
"""Gets the properties of the Managed object specified."""
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = (properties is None or len(properties) == 0)
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = CONF.vmware.maximum_objects
return vim.RetrievePropertiesEx(usecoll, specSet=[property_filter_spec],
options=options)
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
property_dict = get_dynamic_properties(vim, mobj, type, [property_name])
return property_dict.get(property_name)
def get_dynamic_properties(vim, mobj, type, property_names):
"""Gets the specified properties of the Managed Object."""
obj_content = get_object_properties(vim, None, mobj, type, property_names)
if obj_content is None:
return {}
if hasattr(obj_content, 'token'):
cancel_retrieve(vim, obj_content.token)
property_dict = {}
if obj_content.objects:
if hasattr(obj_content.objects[0], 'propSet'):
dynamic_properties = obj_content.objects[0].propSet
if dynamic_properties:
for prop in dynamic_properties:
property_dict[prop.name] = prop.val
# The object may have information useful for logging
if hasattr(obj_content.objects[0], 'missingSet'):
for m in obj_content.objects[0].missingSet:
LOG.warning(_("Unable to retrieve value for %(path)s "
"Reason: %(reason)s"),
{'path': m.path,
'reason': m.fault.localizedMessage})
return property_dict
def get_objects(vim, type, properties_to_collect=None, all=False):
"""Gets the list of objects of the type specified."""
return vutil.get_objects(vim, type, CONF.vmware.maximum_objects,
properties_to_collect, all)
def get_inner_objects(vim, base_obj, path, inner_type,
properties_to_collect=None, all=False):
"""Gets the list of inner objects of the type specified."""
client_factory = vim.client.factory
base_type = base_obj._type
traversal_spec = vutil.build_traversal_spec(client_factory, 'inner',
base_type, path, False, [])
object_spec = vutil.build_object_spec(client_factory,
base_obj,
[traversal_spec])
property_spec = vutil.build_property_spec(client_factory, type_=inner_type,
properties_to_collect=properties_to_collect,
all_properties=all)
property_filter_spec = vutil.build_property_filter_spec(client_factory,
[property_spec], [object_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = CONF.vmware.maximum_objects
return vim.RetrievePropertiesEx(
vim.service_content.propertyCollector,
specSet=[property_filter_spec], options=options)
def cancel_retrieve(vim, token):
"""Cancels the retrieve operation."""
return vim.CancelRetrievePropertiesEx(
vim.service_content.propertyCollector,
token=token)
def continue_to_get_objects(vim, token):
"""Continues to get the list of objects of the type specified."""
return vim.ContinueRetrievePropertiesEx(
vim.service_content.propertyCollector,
token=token)
def get_prop_spec(client_factory, spec_type, properties):
"""Builds the Property Spec Object."""
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec
def get_obj_spec(client_factory, obj, select_set=None):
"""Builds the Object Spec object."""
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if select_set is not None:
obj_spec.selectSet = select_set
return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
"""Builds the Property Filter Spec Object."""
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_properties_for_a_collection_of_objects(vim, type,
obj_list, properties):
"""Gets the list of properties for the collection of
objects of the type specified.
"""
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory,
lst_obj_specs, [prop_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = CONF.vmware.maximum_objects
return vim.RetrievePropertiesEx(
vim.service_content.propertyCollector,
specSet=[prop_filter_spec], options=options)
def get_about_info(vim):
"""Get the About Info from the service content."""
return vim.service_content.about
| 39.431193
| 79
| 0.646696
|
from oslo.config import cfg
from oslo.vmware import vim_util as vutil
import suds
from nova.i18n import _
from nova.openstack.common import log as logging
vmware_opts = cfg.IntOpt('maximum_objects', default=100,
help='The maximum number of ObjectContent data '
'objects that should be returned in a single '
'result. A positive value will cause the '
'operation to suspend the retrieval when the '
'count of objects reaches the specified '
'maximum. The server may still limit the count '
'to something less than the configured value. '
'Any remaining objects may be retrieved with '
'additional requests.')
CONF = cfg.CONF
CONF.register_opt(vmware_opts, 'vmware')
LOG = logging.getLogger(__name__)
def object_to_dict(obj, list_depth=1):
d = {}
for k, v in suds.sudsobject.asdict(obj).iteritems():
if hasattr(v, '__keylist__'):
d[k] = object_to_dict(v, list_depth=list_depth)
elif isinstance(v, list):
d[k] = []
used = 0
for item in v:
used = used + 1
if used > list_depth:
break
if hasattr(item, '__keylist__'):
d[k].append(object_to_dict(item, list_depth=list_depth))
else:
d[k].append(item)
else:
d[k] = v
return d
def get_moref(value, type):
return vutil.get_moref(value, type)
def get_object_properties(vim, collector, mobj, type, properties):
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.service_content.propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = (properties is None or len(properties) == 0)
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = CONF.vmware.maximum_objects
return vim.RetrievePropertiesEx(usecoll, specSet=[property_filter_spec],
options=options)
def get_dynamic_property(vim, mobj, type, property_name):
property_dict = get_dynamic_properties(vim, mobj, type, [property_name])
return property_dict.get(property_name)
def get_dynamic_properties(vim, mobj, type, property_names):
obj_content = get_object_properties(vim, None, mobj, type, property_names)
if obj_content is None:
return {}
if hasattr(obj_content, 'token'):
cancel_retrieve(vim, obj_content.token)
property_dict = {}
if obj_content.objects:
if hasattr(obj_content.objects[0], 'propSet'):
dynamic_properties = obj_content.objects[0].propSet
if dynamic_properties:
for prop in dynamic_properties:
property_dict[prop.name] = prop.val
if hasattr(obj_content.objects[0], 'missingSet'):
for m in obj_content.objects[0].missingSet:
LOG.warning(_("Unable to retrieve value for %(path)s "
"Reason: %(reason)s"),
{'path': m.path,
'reason': m.fault.localizedMessage})
return property_dict
def get_objects(vim, type, properties_to_collect=None, all=False):
return vutil.get_objects(vim, type, CONF.vmware.maximum_objects,
properties_to_collect, all)
def get_inner_objects(vim, base_obj, path, inner_type,
properties_to_collect=None, all=False):
client_factory = vim.client.factory
base_type = base_obj._type
traversal_spec = vutil.build_traversal_spec(client_factory, 'inner',
base_type, path, False, [])
object_spec = vutil.build_object_spec(client_factory,
base_obj,
[traversal_spec])
property_spec = vutil.build_property_spec(client_factory, type_=inner_type,
properties_to_collect=properties_to_collect,
all_properties=all)
property_filter_spec = vutil.build_property_filter_spec(client_factory,
[property_spec], [object_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = CONF.vmware.maximum_objects
return vim.RetrievePropertiesEx(
vim.service_content.propertyCollector,
specSet=[property_filter_spec], options=options)
def cancel_retrieve(vim, token):
return vim.CancelRetrievePropertiesEx(
vim.service_content.propertyCollector,
token=token)
def continue_to_get_objects(vim, token):
return vim.ContinueRetrievePropertiesEx(
vim.service_content.propertyCollector,
token=token)
def get_prop_spec(client_factory, spec_type, properties):
prop_spec = client_factory.create('ns0:PropertySpec')
prop_spec.type = spec_type
prop_spec.pathSet = properties
return prop_spec
def get_obj_spec(client_factory, obj, select_set=None):
obj_spec = client_factory.create('ns0:ObjectSpec')
obj_spec.obj = obj
obj_spec.skip = False
if select_set is not None:
obj_spec.selectSet = select_set
return obj_spec
def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec
def get_properties_for_a_collection_of_objects(vim, type,
obj_list, properties):
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory,
lst_obj_specs, [prop_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = CONF.vmware.maximum_objects
return vim.RetrievePropertiesEx(
vim.service_content.propertyCollector,
specSet=[prop_filter_spec], options=options)
def get_about_info(vim):
return vim.service_content.about
| true
| true
|
790a8617f84c30e29c8e29dee32a41b23c4f3615
| 2,250
|
py
|
Python
|
RAMP/disposableredis/__init__.py
|
MPalarya/RAMP
|
9792879e52b49586b06ac3fc002eab0c21c11bc6
|
[
"BSD-2-Clause"
] | null | null | null |
RAMP/disposableredis/__init__.py
|
MPalarya/RAMP
|
9792879e52b49586b06ac3fc002eab0c21c11bc6
|
[
"BSD-2-Clause"
] | null | null | null |
RAMP/disposableredis/__init__.py
|
MPalarya/RAMP
|
9792879e52b49586b06ac3fc002eab0c21c11bc6
|
[
"BSD-2-Clause"
] | null | null | null |
import subprocess
import socket
import tempfile
import redis
import time
import os
import itertools
import sys
# Environment variable pointing to the redis executable
REDIS_PATH_ENVVAR = 'REDIS_PATH'
def get_random_port():
sock = socket.socket()
sock.listen(0)
_, port = sock.getsockname()
sock.close()
return port
class DisposableRedis(object):
def __init__(self, port=None, path='redis-server', **extra_args):
"""
:param port: port number to start the redis server on. Specify none to automatically generate
:type port: int|None
:param extra_args: any extra arguments kwargs will be passed to redis server as --key val
"""
self._port = port
# this will hold the actual port the redis is listening on. It's equal to `_port` unless `_port` is None
# in that case `port` is randomly generated
self.port = None
self.extra_args = list(itertools.chain(
*(('--%s'%k, v) for k, v in extra_args.items())
))
self.path = os.getenv(REDIS_PATH_ENVVAR, path)
def __enter__(self):
if self._port is None:
self.port = get_random_port()
else:
self.port = self._port
args = [self.path,
'--port', str(self.port),
'--dir', tempfile.gettempdir(),
'--save', ''] + self.extra_args
self.process = subprocess.Popen(
args,
#cwd=os.getcwd(),
stdin=subprocess.PIPE,
stdout=open(os.devnull, 'w')
# stdout=sys.stdout,
# env=os.environ.copy()
)
while True:
try:
self.client().ping()
break
except redis.ConnectionError:
self.process.poll()
if self.process.returncode is not None:
raise RuntimeError("Process has exited")
time.sleep(0.1)
return self.client()
def __exit__(self, exc_type, exc_val, exc_tb):
self.process.terminate()
def client(self):
"""
:rtype: redis.StrictRedis
"""
return redis.StrictRedis(port=self.port, decode_responses=True)
| 27.439024
| 112
| 0.569333
|
import subprocess
import socket
import tempfile
import redis
import time
import os
import itertools
import sys
REDIS_PATH_ENVVAR = 'REDIS_PATH'
def get_random_port():
sock = socket.socket()
sock.listen(0)
_, port = sock.getsockname()
sock.close()
return port
class DisposableRedis(object):
def __init__(self, port=None, path='redis-server', **extra_args):
self._port = port
# in that case `port` is randomly generated
self.port = None
self.extra_args = list(itertools.chain(
*(('--%s'%k, v) for k, v in extra_args.items())
))
self.path = os.getenv(REDIS_PATH_ENVVAR, path)
def __enter__(self):
if self._port is None:
self.port = get_random_port()
else:
self.port = self._port
args = [self.path,
'--port', str(self.port),
'--dir', tempfile.gettempdir(),
'--save', ''] + self.extra_args
self.process = subprocess.Popen(
args,
#cwd=os.getcwd(),
stdin=subprocess.PIPE,
stdout=open(os.devnull, 'w')
# stdout=sys.stdout,
# env=os.environ.copy()
)
while True:
try:
self.client().ping()
break
except redis.ConnectionError:
self.process.poll()
if self.process.returncode is not None:
raise RuntimeError("Process has exited")
time.sleep(0.1)
return self.client()
def __exit__(self, exc_type, exc_val, exc_tb):
self.process.terminate()
def client(self):
return redis.StrictRedis(port=self.port, decode_responses=True)
| true
| true
|
790a863e1b7c7976c78fdf15265431950cd90024
| 5,163
|
py
|
Python
|
espnet2/gan_tts/espnet_model.py
|
actboy/espnet
|
c0ca15e9da6e89ff6df5fe70ed08654deeca2ac0
|
[
"Apache-2.0"
] | null | null | null |
espnet2/gan_tts/espnet_model.py
|
actboy/espnet
|
c0ca15e9da6e89ff6df5fe70ed08654deeca2ac0
|
[
"Apache-2.0"
] | 1
|
2021-08-11T08:35:36.000Z
|
2021-08-13T07:12:47.000Z
|
espnet2/gan_tts/espnet_model.py
|
shirayu/espnet
|
66f0f8382b0e1195bed7c280c29711f8436b3db4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
| 35.363014
| 81
| 0.627929
|
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
@contextmanager
def autocast(enabled=True):
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
with autocast(False):
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
| true
| true
|
790a86809267d0bf3803ad8266f5d236462f5f31
| 4,626
|
py
|
Python
|
homeassistant/components/github/coordinator.py
|
aomann/core
|
5e71e7b775461cd4849c36075c6a1459a7d0ad22
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/github/coordinator.py
|
aomann/core
|
5e71e7b775461cd4849c36075c6a1459a7d0ad22
|
[
"Apache-2.0"
] | 24
|
2021-11-03T06:20:16.000Z
|
2022-03-31T06:23:17.000Z
|
homeassistant/components/github/coordinator.py
|
aomann/core
|
5e71e7b775461cd4849c36075c6a1459a7d0ad22
|
[
"Apache-2.0"
] | null | null | null |
"""Custom data update coordinators for the GitHub integration."""
from __future__ import annotations
from typing import Literal, TypedDict
from aiogithubapi import (
GitHubAPI,
GitHubCommitModel,
GitHubException,
GitHubReleaseModel,
GitHubRepositoryModel,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, T
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DEFAULT_UPDATE_INTERVAL, DOMAIN, LOGGER, IssuesPulls
CoordinatorKeyType = Literal["information", "release", "issue", "commit"]
class GitHubBaseDataUpdateCoordinator(DataUpdateCoordinator[T]):
"""Base class for GitHub data update coordinators."""
def __init__(
self,
hass: HomeAssistant,
entry: ConfigEntry,
client: GitHubAPI,
repository: str,
) -> None:
"""Initialize GitHub data update coordinator base class."""
self.config_entry = entry
self.repository = repository
self._client = client
super().__init__(
hass,
LOGGER,
name=DOMAIN,
update_interval=DEFAULT_UPDATE_INTERVAL,
)
async def fetch_data(self) -> T:
"""Fetch data from GitHub API."""
async def _async_update_data(self) -> T:
try:
return await self.fetch_data()
except GitHubException as exception:
LOGGER.exception(exception)
raise UpdateFailed(exception) from exception
class RepositoryInformationDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubRepositoryModel]
):
"""Data update coordinator for repository information."""
async def fetch_data(self) -> GitHubRepositoryModel:
"""Get the latest data from GitHub."""
result = await self._client.repos.get(self.repository)
return result.data
class RepositoryReleaseDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubReleaseModel]
):
"""Data update coordinator for repository release."""
async def fetch_data(self) -> GitHubReleaseModel | None:
"""Get the latest data from GitHub."""
result = await self._client.repos.releases.list(
self.repository, **{"params": {"per_page": 1}}
)
if not result.data:
return None
for release in result.data:
if not release.prerelease:
return release
# Fall back to the latest release if no non-prerelease release is found
return result.data[0]
class RepositoryIssueDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[IssuesPulls]
):
"""Data update coordinator for repository issues."""
async def fetch_data(self) -> IssuesPulls:
"""Get the latest data from GitHub."""
base_issue_response = await self._client.repos.issues.list(
self.repository, **{"params": {"per_page": 1}}
)
pull_response = await self._client.repos.pulls.list(
self.repository, **{"params": {"per_page": 1}}
)
pulls_count = pull_response.last_page_number or 0
issues_count = (base_issue_response.last_page_number or 0) - pulls_count
issue_last = base_issue_response.data[0] if issues_count != 0 else None
if issue_last is not None and issue_last.pull_request:
issue_response = await self._client.repos.issues.list(self.repository)
for issue in issue_response.data:
if not issue.pull_request:
issue_last = issue
break
return IssuesPulls(
issues_count=issues_count,
issue_last=issue_last,
pulls_count=pulls_count,
pull_last=pull_response.data[0] if pulls_count != 0 else None,
)
class RepositoryCommitDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubCommitModel]
):
"""Data update coordinator for repository commit."""
async def fetch_data(self) -> GitHubCommitModel | None:
"""Get the latest data from GitHub."""
result = await self._client.repos.list_commits(
self.repository, **{"params": {"per_page": 1}}
)
return result.data[0] if result.data else None
class DataUpdateCoordinators(TypedDict):
"""Custom data update coordinators for the GitHub integration."""
information: RepositoryInformationDataUpdateCoordinator
release: RepositoryReleaseDataUpdateCoordinator
issue: RepositoryIssueDataUpdateCoordinator
commit: RepositoryCommitDataUpdateCoordinator
| 32.577465
| 88
| 0.676827
|
from __future__ import annotations
from typing import Literal, TypedDict
from aiogithubapi import (
GitHubAPI,
GitHubCommitModel,
GitHubException,
GitHubReleaseModel,
GitHubRepositoryModel,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, T
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DEFAULT_UPDATE_INTERVAL, DOMAIN, LOGGER, IssuesPulls
CoordinatorKeyType = Literal["information", "release", "issue", "commit"]
class GitHubBaseDataUpdateCoordinator(DataUpdateCoordinator[T]):
def __init__(
self,
hass: HomeAssistant,
entry: ConfigEntry,
client: GitHubAPI,
repository: str,
) -> None:
self.config_entry = entry
self.repository = repository
self._client = client
super().__init__(
hass,
LOGGER,
name=DOMAIN,
update_interval=DEFAULT_UPDATE_INTERVAL,
)
async def fetch_data(self) -> T:
async def _async_update_data(self) -> T:
try:
return await self.fetch_data()
except GitHubException as exception:
LOGGER.exception(exception)
raise UpdateFailed(exception) from exception
class RepositoryInformationDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubRepositoryModel]
):
async def fetch_data(self) -> GitHubRepositoryModel:
result = await self._client.repos.get(self.repository)
return result.data
class RepositoryReleaseDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubReleaseModel]
):
async def fetch_data(self) -> GitHubReleaseModel | None:
result = await self._client.repos.releases.list(
self.repository, **{"params": {"per_page": 1}}
)
if not result.data:
return None
for release in result.data:
if not release.prerelease:
return release
return result.data[0]
class RepositoryIssueDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[IssuesPulls]
):
async def fetch_data(self) -> IssuesPulls:
base_issue_response = await self._client.repos.issues.list(
self.repository, **{"params": {"per_page": 1}}
)
pull_response = await self._client.repos.pulls.list(
self.repository, **{"params": {"per_page": 1}}
)
pulls_count = pull_response.last_page_number or 0
issues_count = (base_issue_response.last_page_number or 0) - pulls_count
issue_last = base_issue_response.data[0] if issues_count != 0 else None
if issue_last is not None and issue_last.pull_request:
issue_response = await self._client.repos.issues.list(self.repository)
for issue in issue_response.data:
if not issue.pull_request:
issue_last = issue
break
return IssuesPulls(
issues_count=issues_count,
issue_last=issue_last,
pulls_count=pulls_count,
pull_last=pull_response.data[0] if pulls_count != 0 else None,
)
class RepositoryCommitDataUpdateCoordinator(
GitHubBaseDataUpdateCoordinator[GitHubCommitModel]
):
async def fetch_data(self) -> GitHubCommitModel | None:
result = await self._client.repos.list_commits(
self.repository, **{"params": {"per_page": 1}}
)
return result.data[0] if result.data else None
class DataUpdateCoordinators(TypedDict):
information: RepositoryInformationDataUpdateCoordinator
release: RepositoryReleaseDataUpdateCoordinator
issue: RepositoryIssueDataUpdateCoordinator
commit: RepositoryCommitDataUpdateCoordinator
| true
| true
|
790a87ee16722b26778f2467d7928438934aff0e
| 1,035
|
py
|
Python
|
examples/2in13-hello-world/test.py
|
piratebriggs/micropython-waveshare-epaper
|
cd3688656276fd1622f65cf3fae65017f4fe3cec
|
[
"MIT"
] | null | null | null |
examples/2in13-hello-world/test.py
|
piratebriggs/micropython-waveshare-epaper
|
cd3688656276fd1622f65cf3fae65017f4fe3cec
|
[
"MIT"
] | null | null | null |
examples/2in13-hello-world/test.py
|
piratebriggs/micropython-waveshare-epaper
|
cd3688656276fd1622f65cf3fae65017f4fe3cec
|
[
"MIT"
] | null | null | null |
import epaper2in13
from machine import Pin,SPI
from time import sleep_ms
# SPI #2 on ESP32
spi = SPI(2,baudrate=2000000, polarity=0, phase=0) # miso=Pin(12), mosi=Pin(23), sck=Pin(18))
cs = Pin(5)
dc = Pin(2)
rst = Pin(15)
busy = Pin(4)
e = epaper2in13.EPD(spi, cs, dc, rst, busy)
e.init(e.FULL_UPDATE)
y_start = 6 # Y addresses start at 6 due to the memory layout
import framebuf
buf = bytearray(e.width * e.height // 8)
fb = framebuf.FrameBuffer(buf, e.height, e.width, framebuf.MONO_VLSB)
# --------------------
fb.fill(0)
fb.text('MicroPython!', 2, y_start + 2, 0xffff)
fb.rect(0, y_start, 250, 122, 0xffff)
e.set_frame_memory(buf,0,0,e.width,e.height)
e.display_frame()
sleep_ms(2000) # wait for 2 seconds before doing a partial update
# --------------------
e.init(e.PART_UPDATE)
fb = framebuf.FrameBuffer(buf, 200, 32, framebuf.MONO_VLSB)
fb.fill(0x0)
for i in range(0,32/2-1,2):
fb.rect(i, i, 200-i*2, 32-i*2, 0xffff)
e.set_frame_memory(buf,8,32,32,200) # 8px from bottom, 25px from left
e.display_frame()
| 23
| 93
| 0.671498
|
import epaper2in13
from machine import Pin,SPI
from time import sleep_ms
2,baudrate=2000000, polarity=0, phase=0)
cs = Pin(5)
dc = Pin(2)
rst = Pin(15)
busy = Pin(4)
e = epaper2in13.EPD(spi, cs, dc, rst, busy)
e.init(e.FULL_UPDATE)
y_start = 6
import framebuf
buf = bytearray(e.width * e.height // 8)
fb = framebuf.FrameBuffer(buf, e.height, e.width, framebuf.MONO_VLSB)
fb.fill(0)
fb.text('MicroPython!', 2, y_start + 2, 0xffff)
fb.rect(0, y_start, 250, 122, 0xffff)
e.set_frame_memory(buf,0,0,e.width,e.height)
e.display_frame()
sleep_ms(2000)
e.init(e.PART_UPDATE)
fb = framebuf.FrameBuffer(buf, 200, 32, framebuf.MONO_VLSB)
fb.fill(0x0)
for i in range(0,32/2-1,2):
fb.rect(i, i, 200-i*2, 32-i*2, 0xffff)
e.set_frame_memory(buf,8,32,32,200)
e.display_frame()
| true
| true
|
790a87ee4b1cbb7e426d112803d5040a6008abca
| 3,908
|
py
|
Python
|
spare/wallet/wallet_interested_store.py
|
Spare-Network/spare-blockchain
|
9ea2677c73570131cfd02447b9cdc64cf01e0909
|
[
"Apache-2.0"
] | 122
|
2021-06-18T23:51:22.000Z
|
2022-01-15T17:51:49.000Z
|
spare/wallet/wallet_interested_store.py
|
zcomputerwiz/spare-blockchain
|
c48fe41ac3b2aae2e76fce0e44ab0647530147ee
|
[
"Apache-2.0"
] | 165
|
2021-06-18T23:12:20.000Z
|
2021-11-14T06:02:04.000Z
|
spare/wallet/wallet_interested_store.py
|
zcomputerwiz/spare-blockchain
|
c48fe41ac3b2aae2e76fce0e44ab0647530147ee
|
[
"Apache-2.0"
] | 58
|
2021-06-18T23:10:50.000Z
|
2022-03-15T08:44:02.000Z
|
from typing import List, Tuple, Optional
import aiosqlite
from spare.types.blockchain_format.sized_bytes import bytes32
from spare.util.db_wrapper import DBWrapper
class WalletInterestedStore:
"""
Stores coin ids that we are interested in receiving
"""
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
@classmethod
async def create(cls, wrapper: DBWrapper):
self = cls()
self.db_connection = wrapper.db
self.db_wrapper = wrapper
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute("CREATE TABLE IF NOT EXISTS interested_coins(coin_name text PRIMARY KEY)")
await self.db_connection.execute(
"CREATE TABLE IF NOT EXISTS interested_puzzle_hashes(puzzle_hash text PRIMARY KEY, wallet_id integer)"
)
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM puzzle_hashes")
await cursor.close()
cursor = await self.db_connection.execute("DELETE FROM interested_coins")
await cursor.close()
await self.db_connection.commit()
async def get_interested_coin_ids(self) -> List[bytes32]:
cursor = await self.db_connection.execute("SELECT coin_name FROM interested_coins")
rows_hex = await cursor.fetchall()
return [bytes32(bytes.fromhex(row[0])) for row in rows_hex]
async def add_interested_coin_id(self, coin_id: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_coins VALUES (?)", (coin_id.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_interested_puzzle_hashes(self) -> List[Tuple[bytes32, int]]:
cursor = await self.db_connection.execute("SELECT puzzle_hash, wallet_id FROM interested_puzzle_hashes")
rows_hex = await cursor.fetchall()
return [(bytes32(bytes.fromhex(row[0])), row[1]) for row in rows_hex]
async def get_interested_puzzle_hash_wallet_id(self, puzzle_hash: bytes32) -> Optional[int]:
cursor = await self.db_connection.execute(
"SELECT wallet_id FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
if row is None:
return None
return row[0]
async def add_interested_puzzle_hash(
self, puzzle_hash: bytes32, wallet_id: int, in_transaction: bool = False
) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_puzzle_hashes VALUES (?, ?)", (puzzle_hash.hex(), wallet_id)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def remove_interested_puzzle_hash(self, puzzle_hash: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"DELETE FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
| 38.313725
| 115
| 0.651228
|
from typing import List, Tuple, Optional
import aiosqlite
from spare.types.blockchain_format.sized_bytes import bytes32
from spare.util.db_wrapper import DBWrapper
class WalletInterestedStore:
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
@classmethod
async def create(cls, wrapper: DBWrapper):
self = cls()
self.db_connection = wrapper.db
self.db_wrapper = wrapper
await self.db_connection.execute("pragma journal_mode=wal")
await self.db_connection.execute("pragma synchronous=2")
await self.db_connection.execute("CREATE TABLE IF NOT EXISTS interested_coins(coin_name text PRIMARY KEY)")
await self.db_connection.execute(
"CREATE TABLE IF NOT EXISTS interested_puzzle_hashes(puzzle_hash text PRIMARY KEY, wallet_id integer)"
)
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM puzzle_hashes")
await cursor.close()
cursor = await self.db_connection.execute("DELETE FROM interested_coins")
await cursor.close()
await self.db_connection.commit()
async def get_interested_coin_ids(self) -> List[bytes32]:
cursor = await self.db_connection.execute("SELECT coin_name FROM interested_coins")
rows_hex = await cursor.fetchall()
return [bytes32(bytes.fromhex(row[0])) for row in rows_hex]
async def add_interested_coin_id(self, coin_id: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_coins VALUES (?)", (coin_id.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_interested_puzzle_hashes(self) -> List[Tuple[bytes32, int]]:
cursor = await self.db_connection.execute("SELECT puzzle_hash, wallet_id FROM interested_puzzle_hashes")
rows_hex = await cursor.fetchall()
return [(bytes32(bytes.fromhex(row[0])), row[1]) for row in rows_hex]
async def get_interested_puzzle_hash_wallet_id(self, puzzle_hash: bytes32) -> Optional[int]:
cursor = await self.db_connection.execute(
"SELECT wallet_id FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
if row is None:
return None
return row[0]
async def add_interested_puzzle_hash(
self, puzzle_hash: bytes32, wallet_id: int, in_transaction: bool = False
) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_puzzle_hashes VALUES (?, ?)", (puzzle_hash.hex(), wallet_id)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def remove_interested_puzzle_hash(self, puzzle_hash: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"DELETE FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
| true
| true
|
790a88c4b4ff5f91bb3c1b5a2e0254f4d0d79507
| 732
|
pyw
|
Python
|
mPaintEditor/main.pyw
|
tbttfox/mPaintEditor
|
a62277a10088c384790bbe3a9cd93a09430abfd0
|
[
"MIT"
] | null | null | null |
mPaintEditor/main.pyw
|
tbttfox/mPaintEditor
|
a62277a10088c384790bbe3a9cd93a09430abfd0
|
[
"MIT"
] | null | null | null |
mPaintEditor/main.pyw
|
tbttfox/mPaintEditor
|
a62277a10088c384790bbe3a9cd93a09430abfd0
|
[
"MIT"
] | null | null | null |
##
# :namespace blurdev.mPaintEditor
#
# :remarks GUI to work with the paint in maya
#
# :author [author::email]
# :author [author::company]
# :date 03/22/17
#
# make sure this is being run as the main process
if __name__ in ("__main__", "__builtin__"):
# since this file is being executed in the main scope, we need to register the tool package to the sys.path
import blurdev
blurdev.registerScriptPath(__file__)
# depending on our environment, Python initializes the script differently for scope, so try both methods:
# importing from a sub-module
from mPaintEditor.paintEditorWidget import SkinPaintWin
blurdev.launch(SkinPaintWin, instance=True)
| 31.826087
| 112
| 0.688525
|
if __name__ in ("__main__", "__builtin__"):
import blurdev
blurdev.registerScriptPath(__file__)
from mPaintEditor.paintEditorWidget import SkinPaintWin
blurdev.launch(SkinPaintWin, instance=True)
| true
| true
|
790a89067bea003f40e96fb36818b84c4795b00a
| 17,078
|
py
|
Python
|
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
|
HackReborn/rqalpha
|
ed64335675f7229f069f23812839c4780d55df67
|
[
"Apache-2.0"
] | null | null | null |
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
|
HackReborn/rqalpha
|
ed64335675f7229f069f23812839c4780d55df67
|
[
"Apache-2.0"
] | null | null | null |
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
|
HackReborn/rqalpha
|
ed64335675f7229f069f23812839c4780d55df67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.model.base_position import BasePosition
from rqalpha.environment import Environment
from rqalpha.const import SIDE, POSITION_EFFECT, DEFAULT_ACCOUNT_TYPE
class FuturePosition(BasePosition):
__abandon_properties__ = []
def __init__(self, order_book_id):
super(FuturePosition, self).__init__(order_book_id)
self._buy_old_holding_list = []
self._sell_old_holding_list = []
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
self._buy_avg_open_price = 0.
self._sell_avg_open_price = 0.
def __repr__(self):
return 'FuturePosition({})'.format(self.__dict__)
def get_state(self):
return {
'order_book_id': self._order_book_id,
'buy_old_holding_list': self._buy_old_holding_list,
'sell_old_holding_list': self._sell_old_holding_list,
'buy_today_holding_list': self._buy_today_holding_list,
'sell_today_holding_list': self._sell_today_holding_list,
'buy_transaction_cost': self._buy_transaction_cost,
'sell_transaction_cost': self._sell_transaction_cost,
'buy_realized_pnl': self._buy_realized_pnl,
'sell_realized_pnl': self._sell_realized_pnl,
'buy_avg_open_price': self._buy_avg_open_price,
'sell_avg_open_price': self._sell_avg_open_price,
# margin rate may change
'margin_rate': self.margin_rate,
}
def set_state(self, state):
assert self._order_book_id == state['order_book_id']
self._buy_old_holding_list = state['buy_old_holding_list']
self._sell_old_holding_list = state['sell_old_holding_list']
self._buy_today_holding_list = state['buy_today_holding_list']
self._sell_today_holding_list = state['sell_today_holding_list']
self._buy_transaction_cost = state['buy_transaction_cost']
self._sell_transaction_cost = state['sell_transaction_cost']
self._buy_avg_open_price = state['buy_avg_open_price']
self._sell_avg_open_price = state['sell_avg_open_price']
@property
def type(self):
return DEFAULT_ACCOUNT_TYPE.FUTURE.name
@property
def margin_rate(self):
env = Environment.get_instance()
margin_info = env.data_proxy.get_margin_info(self.order_book_id)
margin_multiplier = env.config.base.margin_multiplier
return margin_info['long_margin_ratio'] * margin_multiplier
@property
def market_value(self):
return (self.buy_quantity - self.sell_quantity) * self.last_price * self.contract_multiplier
@property
def buy_market_value(self):
return self.buy_quantity * self.last_price * self.contract_multiplier
@property
def sell_market_value(self):
return self.sell_quantity * self.last_price * self.contract_multiplier
# -- PNL 相关
@property
def contract_multiplier(self):
return Environment.get_instance().get_instrument(self.order_book_id).contract_multiplier
@property
def open_orders(self):
return Environment.get_instance().broker.get_open_orders(self.order_book_id)
@property
def buy_holding_pnl(self):
"""
[float] 买方向当日持仓盈亏
"""
return (self.last_price - self.buy_avg_holding_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_holding_pnl(self):
"""
[float] 卖方向当日持仓盈亏
"""
return (self.sell_avg_holding_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def buy_realized_pnl(self):
"""
[float] 买方向平仓盈亏
"""
return self._buy_realized_pnl
@property
def sell_realized_pnl(self):
"""
[float] 卖方向平仓盈亏
"""
return self._sell_realized_pnl
@property
def holding_pnl(self):
"""
[float] 当日持仓盈亏
"""
return self.buy_holding_pnl + self.sell_holding_pnl
@property
def realized_pnl(self):
"""
[float] 当日平仓盈亏
"""
return self.buy_realized_pnl + self.sell_realized_pnl
@property
def buy_daily_pnl(self):
"""
[float] 当日买方向盈亏
"""
return self.buy_holding_pnl + self.buy_realized_pnl
@property
def sell_daily_pnl(self):
"""
[float] 当日卖方向盈亏
"""
return self.sell_holding_pnl + self.sell_realized_pnl
@property
def daily_pnl(self):
"""
[float] 当日盈亏
"""
return self.holding_pnl + self.realized_pnl
@property
def buy_pnl(self):
"""
[float] 买方向累计盈亏
"""
return (self.last_price - self._buy_avg_open_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_pnl(self):
"""
[float] 卖方向累计盈亏
"""
return (self._sell_avg_open_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def pnl(self):
"""
[float] 累计盈亏
"""
return self.buy_pnl + self.sell_pnl
# -- Quantity 相关
@property
def buy_open_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN)
@property
def sell_open_order_quantity(self):
"""
[int] 卖方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.SELL and order.position_effect == POSITION_EFFECT.OPEN)
@property
def buy_close_order_quantity(self):
"""
[int] 买方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def sell_close_order_quantity(self):
"""
[int] 卖方向挂单量
"""
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def _buy_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _sell_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _closable_today_sell_quantity(self):
return self.sell_today_quantity - self._buy_close_today_order_quantity
@property
def _closable_today_buy_quantity(self):
return self.buy_today_quantity - self._sell_close_today_order_quantity
@property
def buy_old_quantity(self):
"""
[int] 买方向昨仓
"""
return sum(amount for price, amount in self._buy_old_holding_list)
@property
def sell_old_quantity(self):
"""
[int] 卖方向昨仓
"""
return sum(amount for price, amount in self._sell_old_holding_list)
@property
def buy_today_quantity(self):
"""
[int] 买方向今仓
"""
return sum(amount for price, amount in self._buy_today_holding_list)
@property
def sell_today_quantity(self):
"""
[int] 卖方向今仓
"""
return sum(amount for price, amount in self._sell_today_holding_list)
@property
def buy_quantity(self):
"""
[int] 买方向持仓
"""
return self.buy_old_quantity + self.buy_today_quantity
@property
def sell_quantity(self):
"""
[int] 卖方向持仓
"""
return self.sell_old_quantity + self.sell_today_quantity
@property
def closable_buy_quantity(self):
"""
[float] 可平买方向持仓
"""
return self.buy_quantity - self.sell_close_order_quantity
@property
def closable_sell_quantity(self):
"""
[float] 可平卖方向持仓
"""
return self.sell_quantity - self.buy_close_order_quantity
# -- Margin 相关
@property
def buy_margin(self):
"""
[float] 买方向持仓保证金
"""
return self._buy_holding_cost * self.margin_rate
@property
def sell_margin(self):
"""
[float] 卖方向持仓保证金
"""
return self._sell_holding_cost * self.margin_rate
@property
def margin(self):
"""
[float] 保证金
"""
# TODO: 需要添加单向大边相关的处理逻辑
return self.buy_margin + self.sell_margin
@property
def buy_avg_holding_price(self):
"""
[float] 买方向持仓均价
"""
return 0 if self.buy_quantity == 0 else self._buy_holding_cost / self.buy_quantity / self.contract_multiplier
@property
def sell_avg_holding_price(self):
"""
[float] 卖方向持仓均价
"""
return 0 if self.sell_quantity == 0 else self._sell_holding_cost / self.sell_quantity / self.contract_multiplier
@property
def _buy_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.buy_holding_list)
@property
def _sell_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.sell_holding_list)
@property
def buy_holding_list(self):
return self._buy_old_holding_list + self._buy_today_holding_list
@property
def sell_holding_list(self):
return self._sell_old_holding_list + self._sell_today_holding_list
@property
def buy_avg_open_price(self):
return self._buy_avg_open_price
@property
def sell_avg_open_price(self):
return self._sell_avg_open_price
@property
def buy_transaction_cost(self):
return self._buy_transaction_cost
@property
def sell_transaction_cost(self):
return self._sell_transaction_cost
@property
def transaction_cost(self):
return self._buy_transaction_cost + self._sell_transaction_cost
# -- Function
def cal_close_today_amount(self, trade_amount, trade_side):
if trade_side == SIDE.SELL:
close_today_amount = trade_amount - self.buy_old_quantity
else:
close_today_amount = trade_amount - self.sell_old_quantity
return max(close_today_amount, 0)
def apply_settlement(self):
env = Environment.get_instance()
data_proxy = env.data_proxy
trading_date = env.trading_dt.date()
settle_price = data_proxy.get_settle_price(self.order_book_id, trading_date)
self._buy_old_holding_list = [(settle_price, self.buy_quantity)]
self._sell_old_holding_list = [(settle_price, self.sell_quantity)]
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
def _margin_of(self, quantity, price):
env = Environment.get_instance()
instrument = env.data_proxy.instruments(self.order_book_id)
return quantity * instrument.contract_multiplier * price * self.margin_rate
def apply_trade(self, trade):
trade_quantity = trade.last_quantity
if trade.side == SIDE.BUY:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._buy_avg_open_price = (self._buy_avg_open_price * self.buy_quantity +
trade_quantity * trade.last_price) / (self.buy_quantity + trade_quantity)
self._buy_transaction_cost += trade.transaction_cost
self._buy_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._sell_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._sell_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
else:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._sell_avg_open_price = (self._sell_avg_open_price * self.sell_quantity +
trade_quantity * trade.last_price) / (self.sell_quantity + trade_quantity)
self._sell_transaction_cost += trade.transaction_cost
self._sell_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._buy_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._buy_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
def _close_holding(self, trade):
left_quantity = trade.last_quantity
delta = 0
if trade.side == SIDE.BUY:
# 先平昨仓
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0:
old_price, old_quantity = self._sell_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
# 再平今仓
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._sell_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
else:
# 先平昨仓
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0:
old_price, old_quantity = self._buy_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
# 再平今仓
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._buy_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
left_quantity = 0
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
return delta
def _cal_realized_pnl(self, cost_price, trade_price, side, consumed_quantity):
if side == SIDE.BUY:
return (cost_price - trade_price) * consumed_quantity * self.contract_multiplier
else:
return (trade_price - cost_price) * consumed_quantity * self.contract_multiplier
| 35.285124
| 120
| 0.641293
|
from rqalpha.model.base_position import BasePosition
from rqalpha.environment import Environment
from rqalpha.const import SIDE, POSITION_EFFECT, DEFAULT_ACCOUNT_TYPE
class FuturePosition(BasePosition):
__abandon_properties__ = []
def __init__(self, order_book_id):
super(FuturePosition, self).__init__(order_book_id)
self._buy_old_holding_list = []
self._sell_old_holding_list = []
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
self._buy_avg_open_price = 0.
self._sell_avg_open_price = 0.
def __repr__(self):
return 'FuturePosition({})'.format(self.__dict__)
def get_state(self):
return {
'order_book_id': self._order_book_id,
'buy_old_holding_list': self._buy_old_holding_list,
'sell_old_holding_list': self._sell_old_holding_list,
'buy_today_holding_list': self._buy_today_holding_list,
'sell_today_holding_list': self._sell_today_holding_list,
'buy_transaction_cost': self._buy_transaction_cost,
'sell_transaction_cost': self._sell_transaction_cost,
'buy_realized_pnl': self._buy_realized_pnl,
'sell_realized_pnl': self._sell_realized_pnl,
'buy_avg_open_price': self._buy_avg_open_price,
'sell_avg_open_price': self._sell_avg_open_price,
'margin_rate': self.margin_rate,
}
def set_state(self, state):
assert self._order_book_id == state['order_book_id']
self._buy_old_holding_list = state['buy_old_holding_list']
self._sell_old_holding_list = state['sell_old_holding_list']
self._buy_today_holding_list = state['buy_today_holding_list']
self._sell_today_holding_list = state['sell_today_holding_list']
self._buy_transaction_cost = state['buy_transaction_cost']
self._sell_transaction_cost = state['sell_transaction_cost']
self._buy_avg_open_price = state['buy_avg_open_price']
self._sell_avg_open_price = state['sell_avg_open_price']
@property
def type(self):
return DEFAULT_ACCOUNT_TYPE.FUTURE.name
@property
def margin_rate(self):
env = Environment.get_instance()
margin_info = env.data_proxy.get_margin_info(self.order_book_id)
margin_multiplier = env.config.base.margin_multiplier
return margin_info['long_margin_ratio'] * margin_multiplier
@property
def market_value(self):
return (self.buy_quantity - self.sell_quantity) * self.last_price * self.contract_multiplier
@property
def buy_market_value(self):
return self.buy_quantity * self.last_price * self.contract_multiplier
@property
def sell_market_value(self):
return self.sell_quantity * self.last_price * self.contract_multiplier
@property
def contract_multiplier(self):
return Environment.get_instance().get_instrument(self.order_book_id).contract_multiplier
@property
def open_orders(self):
return Environment.get_instance().broker.get_open_orders(self.order_book_id)
@property
def buy_holding_pnl(self):
return (self.last_price - self.buy_avg_holding_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_holding_pnl(self):
return (self.sell_avg_holding_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def buy_realized_pnl(self):
return self._buy_realized_pnl
@property
def sell_realized_pnl(self):
return self._sell_realized_pnl
@property
def holding_pnl(self):
return self.buy_holding_pnl + self.sell_holding_pnl
@property
def realized_pnl(self):
return self.buy_realized_pnl + self.sell_realized_pnl
@property
def buy_daily_pnl(self):
return self.buy_holding_pnl + self.buy_realized_pnl
@property
def sell_daily_pnl(self):
return self.sell_holding_pnl + self.sell_realized_pnl
@property
def daily_pnl(self):
return self.holding_pnl + self.realized_pnl
@property
def buy_pnl(self):
return (self.last_price - self._buy_avg_open_price) * self.buy_quantity * self.contract_multiplier
@property
def sell_pnl(self):
return (self._sell_avg_open_price - self.last_price) * self.sell_quantity * self.contract_multiplier
@property
def pnl(self):
return self.buy_pnl + self.sell_pnl
@property
def buy_open_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN)
@property
def sell_open_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if
order.side == SIDE.SELL and order.position_effect == POSITION_EFFECT.OPEN)
@property
def buy_close_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def sell_close_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
@property
def _buy_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _sell_close_today_order_quantity(self):
return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and
order.position_effect == POSITION_EFFECT.CLOSE_TODAY)
@property
def _closable_today_sell_quantity(self):
return self.sell_today_quantity - self._buy_close_today_order_quantity
@property
def _closable_today_buy_quantity(self):
return self.buy_today_quantity - self._sell_close_today_order_quantity
@property
def buy_old_quantity(self):
return sum(amount for price, amount in self._buy_old_holding_list)
@property
def sell_old_quantity(self):
return sum(amount for price, amount in self._sell_old_holding_list)
@property
def buy_today_quantity(self):
return sum(amount for price, amount in self._buy_today_holding_list)
@property
def sell_today_quantity(self):
return sum(amount for price, amount in self._sell_today_holding_list)
@property
def buy_quantity(self):
return self.buy_old_quantity + self.buy_today_quantity
@property
def sell_quantity(self):
return self.sell_old_quantity + self.sell_today_quantity
@property
def closable_buy_quantity(self):
return self.buy_quantity - self.sell_close_order_quantity
@property
def closable_sell_quantity(self):
return self.sell_quantity - self.buy_close_order_quantity
@property
def buy_margin(self):
return self._buy_holding_cost * self.margin_rate
@property
def sell_margin(self):
return self._sell_holding_cost * self.margin_rate
@property
def margin(self):
return self.buy_margin + self.sell_margin
@property
def buy_avg_holding_price(self):
return 0 if self.buy_quantity == 0 else self._buy_holding_cost / self.buy_quantity / self.contract_multiplier
@property
def sell_avg_holding_price(self):
return 0 if self.sell_quantity == 0 else self._sell_holding_cost / self.sell_quantity / self.contract_multiplier
@property
def _buy_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.buy_holding_list)
@property
def _sell_holding_cost(self):
return sum(p * a * self.contract_multiplier for p, a in self.sell_holding_list)
@property
def buy_holding_list(self):
return self._buy_old_holding_list + self._buy_today_holding_list
@property
def sell_holding_list(self):
return self._sell_old_holding_list + self._sell_today_holding_list
@property
def buy_avg_open_price(self):
return self._buy_avg_open_price
@property
def sell_avg_open_price(self):
return self._sell_avg_open_price
@property
def buy_transaction_cost(self):
return self._buy_transaction_cost
@property
def sell_transaction_cost(self):
return self._sell_transaction_cost
@property
def transaction_cost(self):
return self._buy_transaction_cost + self._sell_transaction_cost
def cal_close_today_amount(self, trade_amount, trade_side):
if trade_side == SIDE.SELL:
close_today_amount = trade_amount - self.buy_old_quantity
else:
close_today_amount = trade_amount - self.sell_old_quantity
return max(close_today_amount, 0)
def apply_settlement(self):
env = Environment.get_instance()
data_proxy = env.data_proxy
trading_date = env.trading_dt.date()
settle_price = data_proxy.get_settle_price(self.order_book_id, trading_date)
self._buy_old_holding_list = [(settle_price, self.buy_quantity)]
self._sell_old_holding_list = [(settle_price, self.sell_quantity)]
self._buy_today_holding_list = []
self._sell_today_holding_list = []
self._buy_transaction_cost = 0.
self._sell_transaction_cost = 0.
self._buy_realized_pnl = 0.
self._sell_realized_pnl = 0.
def _margin_of(self, quantity, price):
env = Environment.get_instance()
instrument = env.data_proxy.instruments(self.order_book_id)
return quantity * instrument.contract_multiplier * price * self.margin_rate
def apply_trade(self, trade):
trade_quantity = trade.last_quantity
if trade.side == SIDE.BUY:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._buy_avg_open_price = (self._buy_avg_open_price * self.buy_quantity +
trade_quantity * trade.last_price) / (self.buy_quantity + trade_quantity)
self._buy_transaction_cost += trade.transaction_cost
self._buy_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._sell_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._sell_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
else:
if trade.position_effect == POSITION_EFFECT.OPEN:
self._sell_avg_open_price = (self._sell_avg_open_price * self.sell_quantity +
trade_quantity * trade.last_price) / (self.sell_quantity + trade_quantity)
self._sell_transaction_cost += trade.transaction_cost
self._sell_today_holding_list.insert(0, (trade.last_price, trade_quantity))
return -1 * self._margin_of(trade_quantity, trade.last_price)
else:
old_margin = self.margin
self._buy_transaction_cost += trade.transaction_cost
delta_realized_pnl = self._close_holding(trade)
self._buy_realized_pnl += delta_realized_pnl
return old_margin - self.margin + delta_realized_pnl
def _close_holding(self, trade):
left_quantity = trade.last_quantity
delta = 0
if trade.side == SIDE.BUY:
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0:
old_price, old_quantity = self._sell_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._sell_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
else:
if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0:
old_price, old_quantity = self._buy_old_holding_list.pop()
if old_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)]
else:
consumed_quantity = old_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity)
while True:
if left_quantity <= 0:
break
oldest_price, oldest_quantity = self._buy_today_holding_list.pop()
if oldest_quantity > left_quantity:
consumed_quantity = left_quantity
self._buy_today_holding_list.append((oldest_price, oldest_quantity - left_quantity))
left_quantity = 0
else:
consumed_quantity = oldest_quantity
left_quantity -= consumed_quantity
delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity)
return delta
def _cal_realized_pnl(self, cost_price, trade_price, side, consumed_quantity):
if side == SIDE.BUY:
return (cost_price - trade_price) * consumed_quantity * self.contract_multiplier
else:
return (trade_price - cost_price) * consumed_quantity * self.contract_multiplier
| true
| true
|
790a8923219f3926b9522e1f7e477bc3a38453de
| 809
|
py
|
Python
|
ServerlessController/profiles/admin.py
|
pacslab/ChainFaaS
|
f99dd3753de21a93c61cc411b88b7ab2c5da9efe
|
[
"Apache-2.0"
] | 7
|
2020-08-27T23:32:43.000Z
|
2022-02-18T12:08:50.000Z
|
ServerlessController/profiles/admin.py
|
pacslab/ChainFaaS
|
f99dd3753de21a93c61cc411b88b7ab2c5da9efe
|
[
"Apache-2.0"
] | 6
|
2020-11-02T07:03:22.000Z
|
2021-06-10T19:58:48.000Z
|
ServerlessController/profiles/admin.py
|
pacslab/ChainFaaS
|
f99dd3753de21a93c61cc411b88b7ab2c5da9efe
|
[
"Apache-2.0"
] | 2
|
2020-04-16T00:47:21.000Z
|
2021-04-27T07:45:52.000Z
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from profiles.models import Developer, Provider
class DeveloperInline(admin.StackedInline):
model = Developer
can_delete = False
verbose_name_plural = 'Developer'
fk_name = 'user'
class ProviderInline(admin.StackedInline):
model = Provider
can_delete = False
verbose_name_plural = 'Provider'
fk_name = 'user'
class CustomUserAdmin(UserAdmin):
inlines = (DeveloperInline, ProviderInline, )
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(CustomUserAdmin, self).get_inline_instances(request, obj)
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
| 26.096774
| 78
| 0.74042
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from profiles.models import Developer, Provider
class DeveloperInline(admin.StackedInline):
model = Developer
can_delete = False
verbose_name_plural = 'Developer'
fk_name = 'user'
class ProviderInline(admin.StackedInline):
model = Provider
can_delete = False
verbose_name_plural = 'Provider'
fk_name = 'user'
class CustomUserAdmin(UserAdmin):
inlines = (DeveloperInline, ProviderInline, )
def get_inline_instances(self, request, obj=None):
if not obj:
return list()
return super(CustomUserAdmin, self).get_inline_instances(request, obj)
admin.site.unregister(User)
admin.site.register(User, CustomUserAdmin)
| true
| true
|
790a8b8bc48ff92dce1875f35046719308d25602
| 9,340
|
py
|
Python
|
src/autoencoding_rl/latent_extractors/dyn_autoencoder/DynAutoencoder.py
|
c-rizz/autoencoding_rl
|
65775630e87184c8809a31a8ef980853d5b49f9f
|
[
"MIT"
] | null | null | null |
src/autoencoding_rl/latent_extractors/dyn_autoencoder/DynAutoencoder.py
|
c-rizz/autoencoding_rl
|
65775630e87184c8809a31a8ef980853d5b49f9f
|
[
"MIT"
] | null | null | null |
src/autoencoding_rl/latent_extractors/dyn_autoencoder/DynAutoencoder.py
|
c-rizz/autoencoding_rl
|
65775630e87184c8809a31a8ef980853d5b49f9f
|
[
"MIT"
] | null | null | null |
from typing import Tuple
import torch as th
import torch.nn as nn
from torchvision import transforms
from autoencoding_rl.latent_extractors.autoencoder.SimpleEncoder import SimpleEncoder
from autoencoding_rl.latent_extractors.autoencoder.SimpleDecoder import SimpleDecoder
from autoencoding_rl.utils import Transition
class DynAutoencoder(nn.Module):
def __init__(self, observation_width: int,
observation_height: int,
observation_channels_num: int,
dyn_encoding_size: int,
static_encoding_size: int,
action_size: int,
dynamics_nn_arch: Tuple[int, int]):
super().__init__()
self._observation_height = observation_height
self._observation_width = observation_width
self._dyn_encoding_size = dyn_encoding_size
self._static_encoding_size = static_encoding_size
self._action_size = action_size
self._observation_channels_num = observation_channels_num
self._dynamics_nn_arch = dynamics_nn_arch
self._dynEncoder = SimpleEncoder(encoding_size = self._dyn_encoding_size,
image_channels_num = self._observation_channels_num,
net_input_width = self._observation_width,
net_input_height = self._observation_height)
if self._static_encoding_size != 0:
self._staticEncoder = SimpleEncoder(encoding_size = self._static_encoding_size,
image_channels_num = self._observation_channels_num,
net_input_width = self._observation_width,
net_input_height = self._observation_height)
else:
self._staticEncoder = None
self._dynamics_net = th.nn.Sequential( th.nn.Linear(self._dyn_encoding_size+self._action_size, self._dynamics_nn_arch[0]),
th.nn.ReLU(),
th.nn.Linear(self._dynamics_nn_arch[0], self._dynamics_nn_arch[1]),
th.nn.ReLU(),
th.nn.Linear(self._dynamics_nn_arch[1], self._dyn_encoding_size+1))
self._decoder = SimpleDecoder( encoding_size = self._dyn_encoding_size + self._static_encoding_size,
image_channels_num = self._observation_channels_num,
net_output_width = self._observation_width,
net_output_height = self._observation_height)
self._resizeToInput = transforms.Resize((self._observation_height,self._observation_width))
@property
def observation_height(self):
return self._observation_height
@property
def observation_width(self):
return self._observation_width
@property
def dyn_encoding_size(self):
return self._dyn_encoding_size
@property
def static_encoding_size(self):
return self._static_encoding_size
@property
def action_size(self):
return self._action_size
def forward(self, transition_batch : Transition):
observation_batch = transition_batch.observation
action_batch = transition_batch.action
assert action_batch.size()[0] == observation_batch.size()[0], \
f"Observation batch and action batch should have the same length. Action batch size = {action_batch.size()[0]}, observation batch size = {observation_batch.size()[0]}. Action tensor size = {action_batch.size()[0]}. Observation tensor size = {observation_batch.size()[0]}"
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
assert action_batch.size()[1] == self._action_size, \
f"Each action should have size {self._action_size}, not {action_batch.size()[1]}. Tensor has size {action_batch.size()}"
#Compute 'static' features encoding
state_s_0_batch = self.encode_static(observation_batch) #Gives a (batch_size, static_encoding_size) output
#Compute 'dynamic' features encoding
state_d_0_batch = self.encode_dynamic(observation_batch) #Gives a (batch_size, dyn_encoding_size) output
state_d_1_batch, reward_d_1_batch = self.predict_dynamics(state_d_0_batch, action_batch)
#state_d_1_batch now has size (batch_size, dyn_encoding_size)
#reward_d_1_batch now has size (batch_size, 1) (still 2-dimensional)
#Will now use 'static' features vectors and predicted states to predict the observation
observation_1_batch = self.decode(state_s_0_batch,state_d_1_batch) #Gives a (batch_size, observations_channels_num, observation_height, observation_width) output
return observation_1_batch, reward_d_1_batch
def encode_dynamic(self, observation_batch : th.Tensor):
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
return self._dynEncoder(observation_batch)
def encode_static(self, observation_batch : th.Tensor):
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
if self._staticEncoder is not None:
return self._staticEncoder(observation_batch)
else:
return th.empty([observation_batch.size()[0],0]).to(observation_batch.device)
def decode(self, static_encoding_batch : th.Tensor, dynamic_encoding_batch : th.Tensor):
assert static_encoding_batch.size()[0] == dynamic_encoding_batch.size()[0], \
f"static encoding batch and dynamic encoding batch have different sizes, respectively {static_encoding_batch.size()[0]} and {dynamic_encoding_batch.size()[0]}"
assert dynamic_encoding_batch.size() == (dynamic_encoding_batch.size()[0], self._dyn_encoding_size), \
f"dynamic_encoding have wrong size, should be {(dynamic_encoding_batch.size()[0], self._dyn_encoding_size)}, but it's {dynamic_encoding_batch.size()}"
assert static_encoding_batch.size() == (static_encoding_batch.size()[0], self._static_encoding_size), \
f"static_encoding_batch have wrong size, should be {(static_encoding_batch.size()[0], self._static_encoding_size)}, but it's {static_encoding_batch.size()}"
#Combine the two vectors
state_batch = th.cat((static_encoding_batch, dynamic_encoding_batch), 1) #Gives a (batch_size, dyn_encoding_size+static_encoding_size) output
#Predict the observation
return self._decoder(state_batch) #Gives a (batch_size, observations_channels_num, observation_height, observation_width) output
def predict_dynamics(self, state_batch : th.Tensor, action_batch : th.Tensor):
assert state_batch.size()[0] == action_batch.size()[0], \
f"state batch and action batch have different sizes, respectively {state_batch.size()[0]} and {action_batch.size()[0]}"
assert state_batch.size()[1] == self._dyn_encoding_size, \
f"States have wrong size, should be {self._dyn_encoding_size}, but it's {state_batch.size()[1]}"
assert action_batch.size()[1] == self._action_size, \
f"Actions have wrong size, should be {self._action_size} but it's {action_batch.size()[1]}"
#Concatenate states and actions
state_action_batch = th.cat((state_batch, action_batch), 1) #Gives a (batch_size, dyn_encoding_size+action_size) output
nextstate_reward_batch = self._dynamics_net(state_action_batch) #Gives a (batch_size, dyn_encoding_size+1) output
nextstate_batch, reward_batch = th.split(nextstate_reward_batch, [self._dyn_encoding_size, 1], 1)
#nextstate_batch now has size (batch_size, dyn_encoding_size)
#reward_batch now has size (batch_size, 1) (still 2-dimensional)
return nextstate_batch, reward_batch
def preprocess_observations(self, observation_batch : th.Tensor):
resized_batch = self._resizeToInput(observation_batch)
# Input should be in the [0,1] range, as this is what torchvision.transforms.ToTensor does
# We move it to [-1,1]
normalized = resized_batch*2 - 1
return normalized
#return resized_batch
def postprocess_observations(self, observation_batch : th.Tensor):
return (observation_batch + 1)/2
| 57.654321
| 287
| 0.678051
|
from typing import Tuple
import torch as th
import torch.nn as nn
from torchvision import transforms
from autoencoding_rl.latent_extractors.autoencoder.SimpleEncoder import SimpleEncoder
from autoencoding_rl.latent_extractors.autoencoder.SimpleDecoder import SimpleDecoder
from autoencoding_rl.utils import Transition
class DynAutoencoder(nn.Module):
def __init__(self, observation_width: int,
observation_height: int,
observation_channels_num: int,
dyn_encoding_size: int,
static_encoding_size: int,
action_size: int,
dynamics_nn_arch: Tuple[int, int]):
super().__init__()
self._observation_height = observation_height
self._observation_width = observation_width
self._dyn_encoding_size = dyn_encoding_size
self._static_encoding_size = static_encoding_size
self._action_size = action_size
self._observation_channels_num = observation_channels_num
self._dynamics_nn_arch = dynamics_nn_arch
self._dynEncoder = SimpleEncoder(encoding_size = self._dyn_encoding_size,
image_channels_num = self._observation_channels_num,
net_input_width = self._observation_width,
net_input_height = self._observation_height)
if self._static_encoding_size != 0:
self._staticEncoder = SimpleEncoder(encoding_size = self._static_encoding_size,
image_channels_num = self._observation_channels_num,
net_input_width = self._observation_width,
net_input_height = self._observation_height)
else:
self._staticEncoder = None
self._dynamics_net = th.nn.Sequential( th.nn.Linear(self._dyn_encoding_size+self._action_size, self._dynamics_nn_arch[0]),
th.nn.ReLU(),
th.nn.Linear(self._dynamics_nn_arch[0], self._dynamics_nn_arch[1]),
th.nn.ReLU(),
th.nn.Linear(self._dynamics_nn_arch[1], self._dyn_encoding_size+1))
self._decoder = SimpleDecoder( encoding_size = self._dyn_encoding_size + self._static_encoding_size,
image_channels_num = self._observation_channels_num,
net_output_width = self._observation_width,
net_output_height = self._observation_height)
self._resizeToInput = transforms.Resize((self._observation_height,self._observation_width))
@property
def observation_height(self):
return self._observation_height
@property
def observation_width(self):
return self._observation_width
@property
def dyn_encoding_size(self):
return self._dyn_encoding_size
@property
def static_encoding_size(self):
return self._static_encoding_size
@property
def action_size(self):
return self._action_size
def forward(self, transition_batch : Transition):
observation_batch = transition_batch.observation
action_batch = transition_batch.action
assert action_batch.size()[0] == observation_batch.size()[0], \
f"Observation batch and action batch should have the same length. Action batch size = {action_batch.size()[0]}, observation batch size = {observation_batch.size()[0]}. Action tensor size = {action_batch.size()[0]}. Observation tensor size = {observation_batch.size()[0]}"
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
assert action_batch.size()[1] == self._action_size, \
f"Each action should have size {self._action_size}, not {action_batch.size()[1]}. Tensor has size {action_batch.size()}"
state_s_0_batch = self.encode_static(observation_batch)
state_d_0_batch = self.encode_dynamic(observation_batch)
state_d_1_batch, reward_d_1_batch = self.predict_dynamics(state_d_0_batch, action_batch)
observation_1_batch = self.decode(state_s_0_batch,state_d_1_batch)
return observation_1_batch, reward_d_1_batch
def encode_dynamic(self, observation_batch : th.Tensor):
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
return self._dynEncoder(observation_batch)
def encode_static(self, observation_batch : th.Tensor):
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
if self._staticEncoder is not None:
return self._staticEncoder(observation_batch)
else:
return th.empty([observation_batch.size()[0],0]).to(observation_batch.device)
def decode(self, static_encoding_batch : th.Tensor, dynamic_encoding_batch : th.Tensor):
assert static_encoding_batch.size()[0] == dynamic_encoding_batch.size()[0], \
f"static encoding batch and dynamic encoding batch have different sizes, respectively {static_encoding_batch.size()[0]} and {dynamic_encoding_batch.size()[0]}"
assert dynamic_encoding_batch.size() == (dynamic_encoding_batch.size()[0], self._dyn_encoding_size), \
f"dynamic_encoding have wrong size, should be {(dynamic_encoding_batch.size()[0], self._dyn_encoding_size)}, but it's {dynamic_encoding_batch.size()}"
assert static_encoding_batch.size() == (static_encoding_batch.size()[0], self._static_encoding_size), \
f"static_encoding_batch have wrong size, should be {(static_encoding_batch.size()[0], self._static_encoding_size)}, but it's {static_encoding_batch.size()}"
state_batch = th.cat((static_encoding_batch, dynamic_encoding_batch), 1)
return self._decoder(state_batch)
def predict_dynamics(self, state_batch : th.Tensor, action_batch : th.Tensor):
assert state_batch.size()[0] == action_batch.size()[0], \
f"state batch and action batch have different sizes, respectively {state_batch.size()[0]} and {action_batch.size()[0]}"
assert state_batch.size()[1] == self._dyn_encoding_size, \
f"States have wrong size, should be {self._dyn_encoding_size}, but it's {state_batch.size()[1]}"
assert action_batch.size()[1] == self._action_size, \
f"Actions have wrong size, should be {self._action_size} but it's {action_batch.size()[1]}"
state_action_batch = th.cat((state_batch, action_batch), 1)
nextstate_reward_batch = self._dynamics_net(state_action_batch)
nextstate_batch, reward_batch = th.split(nextstate_reward_batch, [self._dyn_encoding_size, 1], 1)
return nextstate_batch, reward_batch
def preprocess_observations(self, observation_batch : th.Tensor):
resized_batch = self._resizeToInput(observation_batch)
normalized = resized_batch*2 - 1
return normalized
def postprocess_observations(self, observation_batch : th.Tensor):
return (observation_batch + 1)/2
| true
| true
|
790a8bb29bb0f5679c10a2428bc9b7410d3a2916
| 90
|
py
|
Python
|
vae/kpmtest.py
|
vipavlovic/pyprobml
|
59a2edc682d0163955db5e2f27491ad772b60141
|
[
"MIT"
] | 4,895
|
2016-08-17T22:28:34.000Z
|
2022-03-31T17:07:15.000Z
|
vae/kpmtest.py
|
vipavlovic/pyprobml
|
59a2edc682d0163955db5e2f27491ad772b60141
|
[
"MIT"
] | 446
|
2016-09-17T14:35:29.000Z
|
2022-03-31T19:59:33.000Z
|
vae/kpmtest.py
|
vipavlovic/pyprobml
|
59a2edc682d0163955db5e2f27491ad772b60141
|
[
"MIT"
] | 1,160
|
2016-08-18T23:19:27.000Z
|
2022-03-31T12:44:07.000Z
|
import sys
sys.path.append('../scripts')
import pyprobml_utils as pml
pml.test()
print(42)
| 18
| 29
| 0.755556
|
import sys
sys.path.append('../scripts')
import pyprobml_utils as pml
pml.test()
print(42)
| true
| true
|
790a8c654a177355db1d8def9e1544123aa3fe09
| 517
|
py
|
Python
|
dp/climbing_stairs.py
|
vandesa003/leetcode_algo
|
8ebefef685cd25d8e149592f24e3552c8903504a
|
[
"MIT"
] | 1
|
2022-03-23T01:33:42.000Z
|
2022-03-23T01:33:42.000Z
|
dp/climbing_stairs.py
|
vandesa003/leetcode_algo
|
8ebefef685cd25d8e149592f24e3552c8903504a
|
[
"MIT"
] | null | null | null |
dp/climbing_stairs.py
|
vandesa003/leetcode_algo
|
8ebefef685cd25d8e149592f24e3552c8903504a
|
[
"MIT"
] | 1
|
2020-07-24T03:32:30.000Z
|
2020-07-24T03:32:30.000Z
|
"""
Leetcode 70.
Climbing Stairs.
DP.
类似斐波那契数列:
转移方程: f(n) = f(n-1) + f(n-2).
时间复杂度:O(n)
还是没看明白这跟DP有啥关系,就是递归而已。
"""
class Solution:
def climbStairs(self, n: int) -> int:
res = [-1] * (n)
def dfs(n):
if n == 1:
return 1
if n == 2:
return 2
if res[n-1] == -1:
res[n-1] = dfs(n-1) + dfs(n-2)
return res[n-1]
else:
return res[n-1]
ans = dfs(n)
return ans
| 19.148148
| 46
| 0.40619
|
class Solution:
def climbStairs(self, n: int) -> int:
res = [-1] * (n)
def dfs(n):
if n == 1:
return 1
if n == 2:
return 2
if res[n-1] == -1:
res[n-1] = dfs(n-1) + dfs(n-2)
return res[n-1]
else:
return res[n-1]
ans = dfs(n)
return ans
| true
| true
|
790a8d02c44b35e70b9a71ad5e8829cc4a08db09
| 545
|
py
|
Python
|
redditcli/api/account.py
|
gobins/python-oauth2
|
2e486ef139569a1eca9275a8a7aa4447e210b29d
|
[
"Apache-2.0"
] | 1
|
2015-07-31T03:05:52.000Z
|
2015-07-31T03:05:52.000Z
|
redditcli/api/account.py
|
gobins/python-oauth2
|
2e486ef139569a1eca9275a8a7aa4447e210b29d
|
[
"Apache-2.0"
] | null | null | null |
redditcli/api/account.py
|
gobins/python-oauth2
|
2e486ef139569a1eca9275a8a7aa4447e210b29d
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'Gobin'
from redditcli.api import base
class Account(base.Resource):
resource_name = 'Account'
class AccountManager(base.ResourceManager):
resource_class = Account
def me(self):
return self._get('/api/v1/me')
def getkarma(self):
return self._get('/api/v1/me/karma')
def getfriends(self):
return self._get('/api/v1/me/friends', 'data')
def getprefs(self):
return self._get('/api/v1/me/prefs')
def gettrophies(self):
return self._get('/api/v1/me/trophies')
| 20.961538
| 54
| 0.645872
|
__author__ = 'Gobin'
from redditcli.api import base
class Account(base.Resource):
resource_name = 'Account'
class AccountManager(base.ResourceManager):
resource_class = Account
def me(self):
return self._get('/api/v1/me')
def getkarma(self):
return self._get('/api/v1/me/karma')
def getfriends(self):
return self._get('/api/v1/me/friends', 'data')
def getprefs(self):
return self._get('/api/v1/me/prefs')
def gettrophies(self):
return self._get('/api/v1/me/trophies')
| true
| true
|
790a8d19143df62f0e2a574153286fa132a54004
| 860
|
py
|
Python
|
vc/migrations/0010_vcdomainprobvisioningconfig_vcfilter.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
vc/migrations/0010_vcdomainprobvisioningconfig_vcfilter.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
vc/migrations/0010_vcdomainprobvisioningconfig_vcfilter.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# vcdomainprovisioningconfig filter
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from django.db import models
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
VCFilter = self.db.mock_model(model_name="VCFilter", db_table="vc_vcfilter")
self.db.add_column(
"vc_vcdomainprovisioningconfig",
"vc_filter",
models.ForeignKey(
VCFilter, verbose_name="VC Filter", null=True, blank=True, on_delete=models.CASCADE
),
)
| 33.076923
| 99
| 0.495349
|
from django.db import models
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
VCFilter = self.db.mock_model(model_name="VCFilter", db_table="vc_vcfilter")
self.db.add_column(
"vc_vcdomainprovisioningconfig",
"vc_filter",
models.ForeignKey(
VCFilter, verbose_name="VC Filter", null=True, blank=True, on_delete=models.CASCADE
),
)
| true
| true
|
790a8d3e219ee2a9d7df7c77d6ce7c72b979a675
| 75
|
py
|
Python
|
by-session/ta-921/j5/list7.py
|
amiraliakbari/sharif-mabani-python
|
5d14a08d165267fe71c28389ddbafe29af7078c5
|
[
"MIT"
] | 2
|
2015-04-29T20:59:35.000Z
|
2018-09-26T13:33:43.000Z
|
by-session/ta-921/j5/list7.py
|
amiraliakbari/sharif-mabani-python
|
5d14a08d165267fe71c28389ddbafe29af7078c5
|
[
"MIT"
] | null | null | null |
by-session/ta-921/j5/list7.py
|
amiraliakbari/sharif-mabani-python
|
5d14a08d165267fe71c28389ddbafe29af7078c5
|
[
"MIT"
] | null | null | null |
a = [1, 1]
for i in range(10):
a.append(a[-1] + a[-2])
print a
| 10.714286
| 28
| 0.44
|
a = [1, 1]
for i in range(10):
a.append(a[-1] + a[-2])
print a
| false
| true
|
790a8f875b8cada0652f2a4c5df93f78340feb26
| 6,654
|
py
|
Python
|
dival/reconstructors/dip_ct_reconstructor.py
|
MBaltz/dival
|
b7c10ed471d05242312a7d4916900c92e0c36cdb
|
[
"MIT"
] | 42
|
2019-08-06T11:41:14.000Z
|
2022-03-21T08:57:41.000Z
|
dival/reconstructors/dip_ct_reconstructor.py
|
MBaltz/dival
|
b7c10ed471d05242312a7d4916900c92e0c36cdb
|
[
"MIT"
] | 17
|
2019-12-03T22:02:32.000Z
|
2021-09-04T07:29:46.000Z
|
dival/reconstructors/dip_ct_reconstructor.py
|
MBaltz/dival
|
b7c10ed471d05242312a7d4916900c92e0c36cdb
|
[
"MIT"
] | 8
|
2019-10-07T09:21:55.000Z
|
2022-02-24T09:08:01.000Z
|
from warnings import warn
from functools import partial
from tqdm import tqdm
import torch
import numpy as np
from torch.optim import Adam
from torch.nn import MSELoss
from odl.contrib.torch import OperatorModule
from dival.reconstructors import IterativeReconstructor
from dival.reconstructors.networks.unet import UNet
from dival.util.torch_losses import poisson_loss, tv_loss
from dival.util.constants import MU_MAX
MIN = -1000
MAX = 1000
class DeepImagePriorCTReconstructor(IterativeReconstructor):
"""
CT reconstructor applying DIP with TV regularization (see [2]_).
The DIP was introduced in [1]_.
References
----------
.. [1] V. Lempitsky, A. Vedaldi, and D. Ulyanov, 2018, "Deep Image Prior".
IEEE/CVF Conference on Computer Vision and Pattern Recognition.
https://doi.org/10.1109/CVPR.2018.00984
.. [2] D. Otero Baguer, J. Leuschner, M. Schmidt, 2020, "Computed
Tomography Reconstruction Using Deep Image Prior and Learned
Reconstruction Methods". Inverse Problems.
https://doi.org/10.1088/1361-6420/aba415
"""
HYPER_PARAMS = {
'lr':
{'default': 1e-3,
'range': [1e-5, 1e-1]},
'gamma':
{'default': 1e-4,
'range': [1e-7, 1e-0],
'grid_search_options': {'num_samples': 20}},
'scales':
{'default': 4,
'choices': [3, 4, 5, 6, 7]},
'channels':
{'default': [128] * 5},
'skip_channels':
{'default': [4] * 5},
'iterations':
{'default': 5000,
'range': [1, 50000]},
'loss_function':
{'default': 'mse',
'choices': ['mse', 'poisson']},
'photons_per_pixel': # used by 'poisson' loss function
{'default': 4096,
'range': [1000, 10000]},
'mu_max': # used by 'poisson' loss function
{'default': MU_MAX,
'range': [1., 10000.]}
}
def __init__(self, ray_trafo, callback_func=None,
callback_func_interval=100, show_pbar=True,
torch_manual_seed=10, **kwargs):
"""
Parameters
----------
ray_trafo : `odl.tomo.operators.RayTransform`
The forward operator
callback_func : callable, optional
Callable with signature
``callback_func(iteration, reconstruction, loss)`` that is called
after every `callback_func_interval` iterations, starting
after the first iteration. It is additionally called after the
last iteration.
Note that it differs from the inherited
`IterativeReconstructor.callback` (which is also supported) in that
the latter is of type :class:`odl.solvers.util.callback.Callback`,
which only receives the reconstruction, such that the loss would
have to be recomputed.
callback_func_interval : int, optional
Number of iterations between calls to `callback_func`.
Default: `100`.
show_pbar : bool, optional
Whether to show a tqdm progress bar during reconstruction.
torch_manual_seed : int, optional
Fixed seed to set by ``torch.manual_seed`` before reconstruction.
The default is `10`. It can be set to `None` or `False` to disable
the manual seed.
"""
super().__init__(
reco_space=ray_trafo.domain, observation_space=ray_trafo.range,
**kwargs)
self.callback_func = callback_func
self.ray_trafo = ray_trafo
self.ray_trafo_module = OperatorModule(self.ray_trafo)
self.callback_func = callback_func
self.callback_func_interval = callback_func_interval
self.show_pbar = show_pbar
self.torch_manual_seed = torch_manual_seed
def get_activation(self, layer_index):
return self.model.layer_output(self.net_input, layer_index)
def _reconstruct(self, observation, *args, **kwargs):
if self.torch_manual_seed:
torch.random.manual_seed(self.torch_manual_seed)
output_depth = 1
input_depth = 1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.net_input = 0.1 * \
torch.randn(input_depth, *self.reco_space.shape)[None].to(device)
self.model = UNet(
input_depth,
output_depth,
channels=self.channels[:self.scales],
skip_channels=self.skip_channels[:self.scales],
use_sigmoid=True,
use_norm=True).to(device)
self.optimizer = Adam(self.model.parameters(), lr=self.lr)
y_delta = torch.tensor(np.asarray(observation), dtype=torch.float32)
y_delta = y_delta.view(1, 1, *y_delta.shape)
y_delta = y_delta.to(device)
if self.loss_function == 'mse':
criterion = MSELoss()
elif self.loss_function == 'poisson':
criterion = partial(poisson_loss,
photons_per_pixel=self.photons_per_pixel,
mu_max=self.mu_max)
else:
warn('Unknown loss function, falling back to MSE')
criterion = MSELoss()
best_loss = np.inf
best_output = self.model(self.net_input).detach()
for i in tqdm(range(self.iterations),
desc='DIP', disable=not self.show_pbar):
self.optimizer.zero_grad()
output = self.model(self.net_input)
loss = criterion(self.ray_trafo_module(output),
y_delta) + self.gamma * tv_loss(output)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1)
self.optimizer.step()
for p in self.model.parameters():
p.data.clamp_(MIN, MAX)
if loss.item() < best_loss:
best_loss = loss.item()
best_output = output.detach()
if (self.callback_func is not None and
(i % self.callback_func_interval == 0
or i == self.iterations-1)):
self.callback_func(
iteration=i,
reconstruction=best_output[0, 0, ...].cpu().numpy(),
loss=best_loss)
if self.callback is not None:
self.callback(self.reco_space.element(
best_output[0, 0, ...].cpu().numpy()))
return self.reco_space.element(best_output[0, 0, ...].cpu().numpy())
| 37.382022
| 79
| 0.585512
|
from warnings import warn
from functools import partial
from tqdm import tqdm
import torch
import numpy as np
from torch.optim import Adam
from torch.nn import MSELoss
from odl.contrib.torch import OperatorModule
from dival.reconstructors import IterativeReconstructor
from dival.reconstructors.networks.unet import UNet
from dival.util.torch_losses import poisson_loss, tv_loss
from dival.util.constants import MU_MAX
MIN = -1000
MAX = 1000
class DeepImagePriorCTReconstructor(IterativeReconstructor):
HYPER_PARAMS = {
'lr':
{'default': 1e-3,
'range': [1e-5, 1e-1]},
'gamma':
{'default': 1e-4,
'range': [1e-7, 1e-0],
'grid_search_options': {'num_samples': 20}},
'scales':
{'default': 4,
'choices': [3, 4, 5, 6, 7]},
'channels':
{'default': [128] * 5},
'skip_channels':
{'default': [4] * 5},
'iterations':
{'default': 5000,
'range': [1, 50000]},
'loss_function':
{'default': 'mse',
'choices': ['mse', 'poisson']},
'photons_per_pixel':
{'default': 4096,
'range': [1000, 10000]},
'mu_max':
{'default': MU_MAX,
'range': [1., 10000.]}
}
def __init__(self, ray_trafo, callback_func=None,
callback_func_interval=100, show_pbar=True,
torch_manual_seed=10, **kwargs):
super().__init__(
reco_space=ray_trafo.domain, observation_space=ray_trafo.range,
**kwargs)
self.callback_func = callback_func
self.ray_trafo = ray_trafo
self.ray_trafo_module = OperatorModule(self.ray_trafo)
self.callback_func = callback_func
self.callback_func_interval = callback_func_interval
self.show_pbar = show_pbar
self.torch_manual_seed = torch_manual_seed
def get_activation(self, layer_index):
return self.model.layer_output(self.net_input, layer_index)
def _reconstruct(self, observation, *args, **kwargs):
if self.torch_manual_seed:
torch.random.manual_seed(self.torch_manual_seed)
output_depth = 1
input_depth = 1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.net_input = 0.1 * \
torch.randn(input_depth, *self.reco_space.shape)[None].to(device)
self.model = UNet(
input_depth,
output_depth,
channels=self.channels[:self.scales],
skip_channels=self.skip_channels[:self.scales],
use_sigmoid=True,
use_norm=True).to(device)
self.optimizer = Adam(self.model.parameters(), lr=self.lr)
y_delta = torch.tensor(np.asarray(observation), dtype=torch.float32)
y_delta = y_delta.view(1, 1, *y_delta.shape)
y_delta = y_delta.to(device)
if self.loss_function == 'mse':
criterion = MSELoss()
elif self.loss_function == 'poisson':
criterion = partial(poisson_loss,
photons_per_pixel=self.photons_per_pixel,
mu_max=self.mu_max)
else:
warn('Unknown loss function, falling back to MSE')
criterion = MSELoss()
best_loss = np.inf
best_output = self.model(self.net_input).detach()
for i in tqdm(range(self.iterations),
desc='DIP', disable=not self.show_pbar):
self.optimizer.zero_grad()
output = self.model(self.net_input)
loss = criterion(self.ray_trafo_module(output),
y_delta) + self.gamma * tv_loss(output)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1)
self.optimizer.step()
for p in self.model.parameters():
p.data.clamp_(MIN, MAX)
if loss.item() < best_loss:
best_loss = loss.item()
best_output = output.detach()
if (self.callback_func is not None and
(i % self.callback_func_interval == 0
or i == self.iterations-1)):
self.callback_func(
iteration=i,
reconstruction=best_output[0, 0, ...].cpu().numpy(),
loss=best_loss)
if self.callback is not None:
self.callback(self.reco_space.element(
best_output[0, 0, ...].cpu().numpy()))
return self.reco_space.element(best_output[0, 0, ...].cpu().numpy())
| true
| true
|
790a90d9e219283d15ab2f1ac1cc2c1fb3f96444
| 31,894
|
py
|
Python
|
tests/filtered_relation/tests.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 2
|
2022-02-22T17:20:19.000Z
|
2022-03-01T16:06:56.000Z
|
tests/filtered_relation/tests.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 1
|
2022-02-20T18:28:13.000Z
|
2022-02-20T18:32:04.000Z
|
tests/filtered_relation/tests.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
from datetime import date
from decimal import Decimal
from unittest import mock
from django.db import connection, transaction
from django.db.models import (
Case,
Count,
DecimalField,
F,
FilteredRelation,
Q,
Sum,
When,
)
from django.test import TestCase
from django.test.testcases import skipUnlessDBFeature
from .models import (
Author,
Book,
BookDailySales,
Borrower,
Currency,
Editor,
ExchangeRate,
RentalSession,
Reservation,
Seller,
)
class FilteredRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.author1 = Author.objects.create(name="Alice")
cls.author2 = Author.objects.create(name="Jane")
cls.editor_a = Editor.objects.create(name="a")
cls.editor_b = Editor.objects.create(name="b")
cls.book1 = Book.objects.create(
title="Poem by Alice",
editor=cls.editor_a,
author=cls.author1,
)
cls.book1.generic_author.set([cls.author2])
cls.book2 = Book.objects.create(
title="The book by Jane A",
editor=cls.editor_b,
author=cls.author2,
)
cls.book3 = Book.objects.create(
title="The book by Jane B",
editor=cls.editor_b,
author=cls.author2,
)
cls.book4 = Book.objects.create(
title="The book by Alice",
editor=cls.editor_a,
author=cls.author1,
)
cls.author1.favorite_books.add(cls.book2)
cls.author1.favorite_books.add(cls.book3)
def test_select_related(self):
qs = (
Author.objects.annotate(
book_join=FilteredRelation("book"),
)
.select_related("book_join__editor")
.order_by("pk", "book_join__pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.author1, self.book1, self.editor_a, self.author1),
(self.author1, self.book4, self.editor_a, self.author1),
(self.author2, self.book2, self.editor_b, self.author2),
(self.author2, self.book3, self.editor_b, self.author2),
],
lambda x: (x, x.book_join, x.book_join.editor, x.book_join.author),
)
def test_select_related_multiple(self):
qs = (
Book.objects.annotate(
author_join=FilteredRelation("author"),
editor_join=FilteredRelation("editor"),
)
.select_related("author_join", "editor_join")
.order_by("pk")
)
self.assertQuerysetEqual(
qs,
[
(self.book1, self.author1, self.editor_a),
(self.book2, self.author2, self.editor_b),
(self.book3, self.author2, self.editor_b),
(self.book4, self.author1, self.editor_a),
],
lambda x: (x, x.author_join, x.editor_join),
)
def test_select_related_with_empty_relation(self):
qs = (
Author.objects.annotate(
book_join=FilteredRelation("book", condition=Q(pk=-1)),
)
.select_related("book_join")
.order_by("pk")
)
self.assertSequenceEqual(qs, [self.author1, self.author2])
def test_select_related_foreign_key(self):
qs = (
Book.objects.annotate(
author_join=FilteredRelation("author"),
)
.select_related("author_join")
.order_by("pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.book1, self.author1),
(self.book2, self.author2),
(self.book3, self.author2),
(self.book4, self.author1),
],
lambda x: (x, x.author_join),
)
@skipUnlessDBFeature("has_select_for_update", "has_select_for_update_of")
def test_select_related_foreign_key_for_update_of(self):
with transaction.atomic():
qs = (
Book.objects.annotate(
author_join=FilteredRelation("author"),
)
.select_related("author_join")
.select_for_update(of=("self",))
.order_by("pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.book1, self.author1),
(self.book2, self.author2),
(self.book3, self.author2),
(self.book4, self.author1),
],
lambda x: (x, x.author_join),
)
def test_without_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
),
[self.author1, self.author2],
)
def test_with_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False),
[self.author1],
)
def test_with_exclude(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).exclude(book_alice__isnull=False),
[self.author2],
)
def test_with_join_and_complex_condition(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book",
condition=Q(
Q(book__title__iexact="poem by alice")
| Q(book__state=Book.RENTED)
),
),
).filter(book_alice__isnull=False),
[self.author1],
)
def test_internal_queryset_alias_mapping(self):
queryset = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
self.assertIn(
"INNER JOIN {} book_alice ON".format(
connection.ops.quote_name("filtered_relation_book")
),
str(queryset.query),
)
def test_with_multiple_filter(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_editor_a=FilteredRelation(
"book",
condition=Q(
book__title__icontains="book", book__editor_id=self.editor_a.pk
),
),
).filter(book_editor_a__isnull=False),
[self.author1],
)
def test_multiple_times(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_title_alice=FilteredRelation(
"book", condition=Q(book__title__icontains="alice")
),
)
.filter(book_title_alice__isnull=False)
.filter(book_title_alice__isnull=False)
.distinct(),
[self.author1],
)
def test_exclude_relation_with_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=~Q(book__title__icontains="alice")
),
)
.filter(book_alice__isnull=False)
.distinct(),
[self.author2],
)
def test_with_m2m(self):
qs = Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
"favorite_books",
condition=Q(favorite_books__in=[self.book2]),
),
).filter(favorite_books_written_by_jane__isnull=False)
self.assertSequenceEqual(qs, [self.author1])
def test_with_m2m_deep(self):
qs = Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
"favorite_books",
condition=Q(favorite_books__author=self.author2),
),
).filter(favorite_books_written_by_jane__title="The book by Jane B")
self.assertSequenceEqual(qs, [self.author1])
def test_with_m2m_multijoin(self):
qs = (
Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
"favorite_books",
condition=Q(favorite_books__author=self.author2),
)
)
.filter(favorite_books_written_by_jane__editor__name="b")
.distinct()
)
self.assertSequenceEqual(qs, [self.author1])
def test_values_list(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
)
.filter(book_alice__isnull=False)
.values_list("book_alice__title", flat=True),
["Poem by Alice"],
)
def test_values(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
)
.filter(book_alice__isnull=False)
.values(),
[
{
"id": self.author1.pk,
"name": "Alice",
"content_type_id": None,
"object_id": None,
}
],
)
def test_extra(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
)
.filter(book_alice__isnull=False)
.extra(where=["1 = 1"]),
[self.author1],
)
@skipUnlessDBFeature("supports_select_union")
def test_union(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation(
"book", condition=Q(book__title__iexact="the book by jane a")
),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.union(qs2), [self.author1, self.author2])
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation(
"book", condition=Q(book__title__iexact="the book by jane a")
),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.intersection(qs2), [])
@skipUnlessDBFeature("supports_select_difference")
def test_difference(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation(
"book", condition=Q(book__title__iexact="the book by jane a")
),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.difference(qs2), [self.author1])
def test_select_for_update(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_jane=FilteredRelation(
"book", condition=Q(book__title__iexact="the book by jane a")
),
)
.filter(book_jane__isnull=False)
.select_for_update(),
[self.author2],
)
def test_defer(self):
# One query for the list and one query for the deferred title.
with self.assertNumQueries(2):
self.assertQuerysetEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
)
.filter(book_alice__isnull=False)
.select_related("book_alice")
.defer("book_alice__title"),
["Poem by Alice"],
lambda author: author.book_alice.title,
)
def test_only_not_supported(self):
msg = "only() is not supported with FilteredRelation."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False).select_related("book_alice").only(
"book_alice__state"
)
def test_as_subquery(self):
inner_qs = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
qs = Author.objects.filter(id__in=inner_qs)
self.assertSequenceEqual(qs, [self.author1])
def test_nested_foreign_key(self):
qs = (
Author.objects.annotate(
book_editor_worked_with=FilteredRelation(
"book__editor",
condition=Q(book__title__icontains="book by"),
),
)
.filter(
book_editor_worked_with__isnull=False,
)
.select_related(
"book_editor_worked_with",
)
.order_by("pk", "book_editor_worked_with__pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.author1, self.editor_a),
(self.author2, self.editor_b),
(self.author2, self.editor_b),
],
lambda x: (x, x.book_editor_worked_with),
)
def test_nested_foreign_key_nested_field(self):
qs = (
Author.objects.annotate(
book_editor_worked_with=FilteredRelation(
"book__editor", condition=Q(book__title__icontains="book by")
),
)
.filter(
book_editor_worked_with__isnull=False,
)
.values(
"name",
"book_editor_worked_with__name",
)
.order_by("name", "book_editor_worked_with__name")
.distinct()
)
self.assertSequenceEqual(
qs,
[
{
"name": self.author1.name,
"book_editor_worked_with__name": self.editor_a.name,
},
{
"name": self.author2.name,
"book_editor_worked_with__name": self.editor_b.name,
},
],
)
def test_nested_foreign_key_filtered_base_object(self):
qs = (
Author.objects.annotate(
alice_editors=FilteredRelation(
"book__editor",
condition=Q(name="Alice"),
),
)
.values(
"name",
"alice_editors__pk",
)
.order_by("name", "alice_editors__name")
.distinct()
)
self.assertSequenceEqual(
qs,
[
{"name": self.author1.name, "alice_editors__pk": self.editor_a.pk},
{"name": self.author2.name, "alice_editors__pk": None},
],
)
def test_nested_m2m_filtered(self):
qs = (
Book.objects.annotate(
favorite_book=FilteredRelation(
"author__favorite_books",
condition=Q(author__favorite_books__title__icontains="book by"),
),
)
.values(
"title",
"favorite_book__pk",
)
.order_by("title", "favorite_book__title")
)
self.assertSequenceEqual(
qs,
[
{"title": self.book1.title, "favorite_book__pk": self.book2.pk},
{"title": self.book1.title, "favorite_book__pk": self.book3.pk},
{"title": self.book4.title, "favorite_book__pk": self.book2.pk},
{"title": self.book4.title, "favorite_book__pk": self.book3.pk},
{"title": self.book2.title, "favorite_book__pk": None},
{"title": self.book3.title, "favorite_book__pk": None},
],
)
def test_nested_chained_relations(self):
qs = (
Author.objects.annotate(
my_books=FilteredRelation(
"book",
condition=Q(book__title__icontains="book by"),
),
preferred_by_authors=FilteredRelation(
"my_books__preferred_by_authors",
condition=Q(my_books__preferred_by_authors__name="Alice"),
),
)
.annotate(
author=F("name"),
book_title=F("my_books__title"),
preferred_by_author_pk=F("preferred_by_authors"),
)
.order_by("author", "book_title", "preferred_by_author_pk")
)
self.assertQuerysetEqual(
qs,
[
("Alice", "The book by Alice", None),
("Jane", "The book by Jane A", self.author1.pk),
("Jane", "The book by Jane B", self.author1.pk),
],
lambda x: (x.author, x.book_title, x.preferred_by_author_pk),
)
def test_deep_nested_foreign_key(self):
qs = (
Book.objects.annotate(
author_favorite_book_editor=FilteredRelation(
"author__favorite_books__editor",
condition=Q(author__favorite_books__title__icontains="Jane A"),
),
)
.filter(
author_favorite_book_editor__isnull=False,
)
.select_related(
"author_favorite_book_editor",
)
.order_by("pk", "author_favorite_book_editor__pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.book1, self.editor_b),
(self.book4, self.editor_b),
],
lambda x: (x, x.author_favorite_book_editor),
)
def test_relation_name_lookup(self):
msg = (
"FilteredRelation's relation_name cannot contain lookups (got "
"'book__title__icontains')."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_title=FilteredRelation(
"book__title__icontains",
condition=Q(book__title="Poem by Alice"),
),
)
def test_condition_outside_relation_name(self):
msg = (
"FilteredRelation's condition doesn't support relations outside "
"the 'book__editor' (got 'book__author__name__icontains')."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_editor=FilteredRelation(
"book__editor",
condition=Q(book__author__name__icontains="book"),
),
)
def test_condition_deeper_relation_name(self):
msg = (
"FilteredRelation's condition doesn't support nested relations "
"deeper than the relation_name (got "
"'book__editor__name__icontains' for 'book')."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_editor=FilteredRelation(
"book",
condition=Q(book__editor__name__icontains="b"),
),
)
def test_with_empty_relation_name_error(self):
with self.assertRaisesMessage(ValueError, "relation_name cannot be empty."):
FilteredRelation("", condition=Q(blank=""))
def test_with_condition_as_expression_error(self):
msg = "condition argument must be a Q() instance."
expression = Case(
When(book__title__iexact="poem by alice", then=True),
default=False,
)
with self.assertRaisesMessage(ValueError, msg):
FilteredRelation("book", condition=expression)
def test_with_prefetch_related(self):
msg = "prefetch_related() is not supported with FilteredRelation."
qs = Author.objects.annotate(
book_title_contains_b=FilteredRelation(
"book", condition=Q(book__title__icontains="b")
),
).filter(
book_title_contains_b__isnull=False,
)
with self.assertRaisesMessage(ValueError, msg):
qs.prefetch_related("book_title_contains_b")
with self.assertRaisesMessage(ValueError, msg):
qs.prefetch_related("book_title_contains_b__editor")
def test_with_generic_foreign_key(self):
self.assertSequenceEqual(
Book.objects.annotate(
generic_authored_book=FilteredRelation(
"generic_author", condition=Q(generic_author__isnull=False)
),
).filter(generic_authored_book__isnull=False),
[self.book1],
)
def test_eq(self):
self.assertEqual(
FilteredRelation("book", condition=Q(book__title="b")), mock.ANY
)
class FilteredRelationAggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.author1 = Author.objects.create(name="Alice")
cls.editor_a = Editor.objects.create(name="a")
cls.book1 = Book.objects.create(
title="Poem by Alice",
editor=cls.editor_a,
author=cls.author1,
)
cls.borrower1 = Borrower.objects.create(name="Jenny")
cls.borrower2 = Borrower.objects.create(name="Kevin")
# borrower 1 reserves, rents, and returns book1.
Reservation.objects.create(
borrower=cls.borrower1,
book=cls.book1,
state=Reservation.STOPPED,
)
RentalSession.objects.create(
borrower=cls.borrower1,
book=cls.book1,
state=RentalSession.STOPPED,
)
# borrower2 reserves, rents, and returns book1.
Reservation.objects.create(
borrower=cls.borrower2,
book=cls.book1,
state=Reservation.STOPPED,
)
RentalSession.objects.create(
borrower=cls.borrower2,
book=cls.book1,
state=RentalSession.STOPPED,
)
def test_aggregate(self):
"""
filtered_relation() not only improves performance but also creates
correct results when aggregating with multiple LEFT JOINs.
Books can be reserved then rented by a borrower. Each reservation and
rental session are recorded with Reservation and RentalSession models.
Every time a reservation or a rental session is over, their state is
changed to 'stopped'.
Goal: Count number of books that are either currently reserved or
rented by borrower1 or available.
"""
qs = (
Book.objects.annotate(
is_reserved_or_rented_by=Case(
When(
reservation__state=Reservation.NEW,
then=F("reservation__borrower__pk"),
),
When(
rental_session__state=RentalSession.NEW,
then=F("rental_session__borrower__pk"),
),
default=None,
)
)
.filter(
Q(is_reserved_or_rented_by=self.borrower1.pk) | Q(state=Book.AVAILABLE)
)
.distinct()
)
self.assertEqual(qs.count(), 1)
# If count is equal to 1, the same aggregation should return in the
# same result but it returns 4.
self.assertSequenceEqual(
qs.annotate(total=Count("pk")).values("total"), [{"total": 4}]
)
# With FilteredRelation, the result is as expected (1).
qs = (
Book.objects.annotate(
active_reservations=FilteredRelation(
"reservation",
condition=Q(
reservation__state=Reservation.NEW,
reservation__borrower=self.borrower1,
),
),
)
.annotate(
active_rental_sessions=FilteredRelation(
"rental_session",
condition=Q(
rental_session__state=RentalSession.NEW,
rental_session__borrower=self.borrower1,
),
),
)
.filter(
(
Q(active_reservations__isnull=False)
| Q(active_rental_sessions__isnull=False)
)
| Q(state=Book.AVAILABLE)
)
.distinct()
)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(
qs.annotate(total=Count("pk")).values("total"), [{"total": 1}]
)
class FilteredRelationAnalyticalAggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
author = Author.objects.create(name="Author")
editor = Editor.objects.create(name="Editor")
cls.book1 = Book.objects.create(
title="Poem by Alice",
editor=editor,
author=author,
)
cls.book2 = Book.objects.create(
title="The book by Jane A",
editor=editor,
author=author,
)
cls.book3 = Book.objects.create(
title="The book by Jane B",
editor=editor,
author=author,
)
cls.seller1 = Seller.objects.create(name="Seller 1")
cls.seller2 = Seller.objects.create(name="Seller 2")
cls.usd = Currency.objects.create(currency="USD")
cls.eur = Currency.objects.create(currency="EUR")
cls.sales_date1 = date(2020, 7, 6)
cls.sales_date2 = date(2020, 7, 7)
ExchangeRate.objects.bulk_create(
[
ExchangeRate(
rate_date=cls.sales_date1,
from_currency=cls.usd,
to_currency=cls.eur,
rate=0.40,
),
ExchangeRate(
rate_date=cls.sales_date1,
from_currency=cls.eur,
to_currency=cls.usd,
rate=1.60,
),
ExchangeRate(
rate_date=cls.sales_date2,
from_currency=cls.usd,
to_currency=cls.eur,
rate=0.50,
),
ExchangeRate(
rate_date=cls.sales_date2,
from_currency=cls.eur,
to_currency=cls.usd,
rate=1.50,
),
ExchangeRate(
rate_date=cls.sales_date2,
from_currency=cls.usd,
to_currency=cls.usd,
rate=1.00,
),
]
)
BookDailySales.objects.bulk_create(
[
BookDailySales(
book=cls.book1,
sale_date=cls.sales_date1,
currency=cls.usd,
sales=100.00,
seller=cls.seller1,
),
BookDailySales(
book=cls.book2,
sale_date=cls.sales_date1,
currency=cls.eur,
sales=200.00,
seller=cls.seller1,
),
BookDailySales(
book=cls.book1,
sale_date=cls.sales_date2,
currency=cls.usd,
sales=50.00,
seller=cls.seller2,
),
BookDailySales(
book=cls.book2,
sale_date=cls.sales_date2,
currency=cls.eur,
sales=100.00,
seller=cls.seller2,
),
]
)
def test_aggregate(self):
tests = [
Q(daily_sales__sale_date__gte=self.sales_date2),
~Q(daily_sales__seller=self.seller1),
]
for condition in tests:
with self.subTest(condition=condition):
qs = (
Book.objects.annotate(
recent_sales=FilteredRelation(
"daily_sales", condition=condition
),
recent_sales_rates=FilteredRelation(
"recent_sales__currency__rates_from",
condition=Q(
recent_sales__currency__rates_from__rate_date=F(
"recent_sales__sale_date"
),
recent_sales__currency__rates_from__to_currency=(
self.usd
),
),
),
)
.annotate(
sales_sum=Sum(
F("recent_sales__sales") * F("recent_sales_rates__rate"),
output_field=DecimalField(),
),
)
.values("title", "sales_sum")
.order_by(
F("sales_sum").desc(nulls_last=True),
)
)
self.assertSequenceEqual(
qs,
[
{"title": self.book2.title, "sales_sum": Decimal(150.00)},
{"title": self.book1.title, "sales_sum": Decimal(50.00)},
{"title": self.book3.title, "sales_sum": None},
],
)
| 35.241989
| 87
| 0.512949
|
from datetime import date
from decimal import Decimal
from unittest import mock
from django.db import connection, transaction
from django.db.models import (
Case,
Count,
DecimalField,
F,
FilteredRelation,
Q,
Sum,
When,
)
from django.test import TestCase
from django.test.testcases import skipUnlessDBFeature
from .models import (
Author,
Book,
BookDailySales,
Borrower,
Currency,
Editor,
ExchangeRate,
RentalSession,
Reservation,
Seller,
)
class FilteredRelationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.author1 = Author.objects.create(name="Alice")
cls.author2 = Author.objects.create(name="Jane")
cls.editor_a = Editor.objects.create(name="a")
cls.editor_b = Editor.objects.create(name="b")
cls.book1 = Book.objects.create(
title="Poem by Alice",
editor=cls.editor_a,
author=cls.author1,
)
cls.book1.generic_author.set([cls.author2])
cls.book2 = Book.objects.create(
title="The book by Jane A",
editor=cls.editor_b,
author=cls.author2,
)
cls.book3 = Book.objects.create(
title="The book by Jane B",
editor=cls.editor_b,
author=cls.author2,
)
cls.book4 = Book.objects.create(
title="The book by Alice",
editor=cls.editor_a,
author=cls.author1,
)
cls.author1.favorite_books.add(cls.book2)
cls.author1.favorite_books.add(cls.book3)
def test_select_related(self):
qs = (
Author.objects.annotate(
book_join=FilteredRelation("book"),
)
.select_related("book_join__editor")
.order_by("pk", "book_join__pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.author1, self.book1, self.editor_a, self.author1),
(self.author1, self.book4, self.editor_a, self.author1),
(self.author2, self.book2, self.editor_b, self.author2),
(self.author2, self.book3, self.editor_b, self.author2),
],
lambda x: (x, x.book_join, x.book_join.editor, x.book_join.author),
)
def test_select_related_multiple(self):
qs = (
Book.objects.annotate(
author_join=FilteredRelation("author"),
editor_join=FilteredRelation("editor"),
)
.select_related("author_join", "editor_join")
.order_by("pk")
)
self.assertQuerysetEqual(
qs,
[
(self.book1, self.author1, self.editor_a),
(self.book2, self.author2, self.editor_b),
(self.book3, self.author2, self.editor_b),
(self.book4, self.author1, self.editor_a),
],
lambda x: (x, x.author_join, x.editor_join),
)
def test_select_related_with_empty_relation(self):
qs = (
Author.objects.annotate(
book_join=FilteredRelation("book", condition=Q(pk=-1)),
)
.select_related("book_join")
.order_by("pk")
)
self.assertSequenceEqual(qs, [self.author1, self.author2])
def test_select_related_foreign_key(self):
qs = (
Book.objects.annotate(
author_join=FilteredRelation("author"),
)
.select_related("author_join")
.order_by("pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.book1, self.author1),
(self.book2, self.author2),
(self.book3, self.author2),
(self.book4, self.author1),
],
lambda x: (x, x.author_join),
)
@skipUnlessDBFeature("has_select_for_update", "has_select_for_update_of")
def test_select_related_foreign_key_for_update_of(self):
with transaction.atomic():
qs = (
Book.objects.annotate(
author_join=FilteredRelation("author"),
)
.select_related("author_join")
.select_for_update(of=("self",))
.order_by("pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.book1, self.author1),
(self.book2, self.author2),
(self.book3, self.author2),
(self.book4, self.author1),
],
lambda x: (x, x.author_join),
)
def test_without_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
),
[self.author1, self.author2],
)
def test_with_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False),
[self.author1],
)
def test_with_exclude(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).exclude(book_alice__isnull=False),
[self.author2],
)
def test_with_join_and_complex_condition(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book",
condition=Q(
Q(book__title__iexact="poem by alice")
| Q(book__state=Book.RENTED)
),
),
).filter(book_alice__isnull=False),
[self.author1],
)
def test_internal_queryset_alias_mapping(self):
queryset = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
self.assertIn(
"INNER JOIN {} book_alice ON".format(
connection.ops.quote_name("filtered_relation_book")
),
str(queryset.query),
)
def test_with_multiple_filter(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_editor_a=FilteredRelation(
"book",
condition=Q(
book__title__icontains="book", book__editor_id=self.editor_a.pk
),
),
).filter(book_editor_a__isnull=False),
[self.author1],
)
def test_multiple_times(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_title_alice=FilteredRelation(
"book", condition=Q(book__title__icontains="alice")
),
)
.filter(book_title_alice__isnull=False)
.filter(book_title_alice__isnull=False)
.distinct(),
[self.author1],
)
def test_exclude_relation_with_join(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=~Q(book__title__icontains="alice")
),
)
.filter(book_alice__isnull=False)
.distinct(),
[self.author2],
)
def test_with_m2m(self):
qs = Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
"favorite_books",
condition=Q(favorite_books__in=[self.book2]),
),
).filter(favorite_books_written_by_jane__isnull=False)
self.assertSequenceEqual(qs, [self.author1])
def test_with_m2m_deep(self):
qs = Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
"favorite_books",
condition=Q(favorite_books__author=self.author2),
),
).filter(favorite_books_written_by_jane__title="The book by Jane B")
self.assertSequenceEqual(qs, [self.author1])
def test_with_m2m_multijoin(self):
qs = (
Author.objects.annotate(
favorite_books_written_by_jane=FilteredRelation(
"favorite_books",
condition=Q(favorite_books__author=self.author2),
)
)
.filter(favorite_books_written_by_jane__editor__name="b")
.distinct()
)
self.assertSequenceEqual(qs, [self.author1])
def test_values_list(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
)
.filter(book_alice__isnull=False)
.values_list("book_alice__title", flat=True),
["Poem by Alice"],
)
def test_values(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
)
.filter(book_alice__isnull=False)
.values(),
[
{
"id": self.author1.pk,
"name": "Alice",
"content_type_id": None,
"object_id": None,
}
],
)
def test_extra(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
)
.filter(book_alice__isnull=False)
.extra(where=["1 = 1"]),
[self.author1],
)
@skipUnlessDBFeature("supports_select_union")
def test_union(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation(
"book", condition=Q(book__title__iexact="the book by jane a")
),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.union(qs2), [self.author1, self.author2])
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation(
"book", condition=Q(book__title__iexact="the book by jane a")
),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.intersection(qs2), [])
@skipUnlessDBFeature("supports_select_difference")
def test_difference(self):
qs1 = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
qs2 = Author.objects.annotate(
book_jane=FilteredRelation(
"book", condition=Q(book__title__iexact="the book by jane a")
),
).filter(book_jane__isnull=False)
self.assertSequenceEqual(qs1.difference(qs2), [self.author1])
def test_select_for_update(self):
self.assertSequenceEqual(
Author.objects.annotate(
book_jane=FilteredRelation(
"book", condition=Q(book__title__iexact="the book by jane a")
),
)
.filter(book_jane__isnull=False)
.select_for_update(),
[self.author2],
)
def test_defer(self):
with self.assertNumQueries(2):
self.assertQuerysetEqual(
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
)
.filter(book_alice__isnull=False)
.select_related("book_alice")
.defer("book_alice__title"),
["Poem by Alice"],
lambda author: author.book_alice.title,
)
def test_only_not_supported(self):
msg = "only() is not supported with FilteredRelation."
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False).select_related("book_alice").only(
"book_alice__state"
)
def test_as_subquery(self):
inner_qs = Author.objects.annotate(
book_alice=FilteredRelation(
"book", condition=Q(book__title__iexact="poem by alice")
),
).filter(book_alice__isnull=False)
qs = Author.objects.filter(id__in=inner_qs)
self.assertSequenceEqual(qs, [self.author1])
def test_nested_foreign_key(self):
qs = (
Author.objects.annotate(
book_editor_worked_with=FilteredRelation(
"book__editor",
condition=Q(book__title__icontains="book by"),
),
)
.filter(
book_editor_worked_with__isnull=False,
)
.select_related(
"book_editor_worked_with",
)
.order_by("pk", "book_editor_worked_with__pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.author1, self.editor_a),
(self.author2, self.editor_b),
(self.author2, self.editor_b),
],
lambda x: (x, x.book_editor_worked_with),
)
def test_nested_foreign_key_nested_field(self):
qs = (
Author.objects.annotate(
book_editor_worked_with=FilteredRelation(
"book__editor", condition=Q(book__title__icontains="book by")
),
)
.filter(
book_editor_worked_with__isnull=False,
)
.values(
"name",
"book_editor_worked_with__name",
)
.order_by("name", "book_editor_worked_with__name")
.distinct()
)
self.assertSequenceEqual(
qs,
[
{
"name": self.author1.name,
"book_editor_worked_with__name": self.editor_a.name,
},
{
"name": self.author2.name,
"book_editor_worked_with__name": self.editor_b.name,
},
],
)
def test_nested_foreign_key_filtered_base_object(self):
qs = (
Author.objects.annotate(
alice_editors=FilteredRelation(
"book__editor",
condition=Q(name="Alice"),
),
)
.values(
"name",
"alice_editors__pk",
)
.order_by("name", "alice_editors__name")
.distinct()
)
self.assertSequenceEqual(
qs,
[
{"name": self.author1.name, "alice_editors__pk": self.editor_a.pk},
{"name": self.author2.name, "alice_editors__pk": None},
],
)
def test_nested_m2m_filtered(self):
qs = (
Book.objects.annotate(
favorite_book=FilteredRelation(
"author__favorite_books",
condition=Q(author__favorite_books__title__icontains="book by"),
),
)
.values(
"title",
"favorite_book__pk",
)
.order_by("title", "favorite_book__title")
)
self.assertSequenceEqual(
qs,
[
{"title": self.book1.title, "favorite_book__pk": self.book2.pk},
{"title": self.book1.title, "favorite_book__pk": self.book3.pk},
{"title": self.book4.title, "favorite_book__pk": self.book2.pk},
{"title": self.book4.title, "favorite_book__pk": self.book3.pk},
{"title": self.book2.title, "favorite_book__pk": None},
{"title": self.book3.title, "favorite_book__pk": None},
],
)
def test_nested_chained_relations(self):
qs = (
Author.objects.annotate(
my_books=FilteredRelation(
"book",
condition=Q(book__title__icontains="book by"),
),
preferred_by_authors=FilteredRelation(
"my_books__preferred_by_authors",
condition=Q(my_books__preferred_by_authors__name="Alice"),
),
)
.annotate(
author=F("name"),
book_title=F("my_books__title"),
preferred_by_author_pk=F("preferred_by_authors"),
)
.order_by("author", "book_title", "preferred_by_author_pk")
)
self.assertQuerysetEqual(
qs,
[
("Alice", "The book by Alice", None),
("Jane", "The book by Jane A", self.author1.pk),
("Jane", "The book by Jane B", self.author1.pk),
],
lambda x: (x.author, x.book_title, x.preferred_by_author_pk),
)
def test_deep_nested_foreign_key(self):
qs = (
Book.objects.annotate(
author_favorite_book_editor=FilteredRelation(
"author__favorite_books__editor",
condition=Q(author__favorite_books__title__icontains="Jane A"),
),
)
.filter(
author_favorite_book_editor__isnull=False,
)
.select_related(
"author_favorite_book_editor",
)
.order_by("pk", "author_favorite_book_editor__pk")
)
with self.assertNumQueries(1):
self.assertQuerysetEqual(
qs,
[
(self.book1, self.editor_b),
(self.book4, self.editor_b),
],
lambda x: (x, x.author_favorite_book_editor),
)
def test_relation_name_lookup(self):
msg = (
"FilteredRelation's relation_name cannot contain lookups (got "
"'book__title__icontains')."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_title=FilteredRelation(
"book__title__icontains",
condition=Q(book__title="Poem by Alice"),
),
)
def test_condition_outside_relation_name(self):
msg = (
"FilteredRelation's condition doesn't support relations outside "
"the 'book__editor' (got 'book__author__name__icontains')."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_editor=FilteredRelation(
"book__editor",
condition=Q(book__author__name__icontains="book"),
),
)
def test_condition_deeper_relation_name(self):
msg = (
"FilteredRelation's condition doesn't support nested relations "
"deeper than the relation_name (got "
"'book__editor__name__icontains' for 'book')."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.annotate(
book_editor=FilteredRelation(
"book",
condition=Q(book__editor__name__icontains="b"),
),
)
def test_with_empty_relation_name_error(self):
with self.assertRaisesMessage(ValueError, "relation_name cannot be empty."):
FilteredRelation("", condition=Q(blank=""))
def test_with_condition_as_expression_error(self):
msg = "condition argument must be a Q() instance."
expression = Case(
When(book__title__iexact="poem by alice", then=True),
default=False,
)
with self.assertRaisesMessage(ValueError, msg):
FilteredRelation("book", condition=expression)
def test_with_prefetch_related(self):
msg = "prefetch_related() is not supported with FilteredRelation."
qs = Author.objects.annotate(
book_title_contains_b=FilteredRelation(
"book", condition=Q(book__title__icontains="b")
),
).filter(
book_title_contains_b__isnull=False,
)
with self.assertRaisesMessage(ValueError, msg):
qs.prefetch_related("book_title_contains_b")
with self.assertRaisesMessage(ValueError, msg):
qs.prefetch_related("book_title_contains_b__editor")
def test_with_generic_foreign_key(self):
self.assertSequenceEqual(
Book.objects.annotate(
generic_authored_book=FilteredRelation(
"generic_author", condition=Q(generic_author__isnull=False)
),
).filter(generic_authored_book__isnull=False),
[self.book1],
)
def test_eq(self):
self.assertEqual(
FilteredRelation("book", condition=Q(book__title="b")), mock.ANY
)
class FilteredRelationAggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.author1 = Author.objects.create(name="Alice")
cls.editor_a = Editor.objects.create(name="a")
cls.book1 = Book.objects.create(
title="Poem by Alice",
editor=cls.editor_a,
author=cls.author1,
)
cls.borrower1 = Borrower.objects.create(name="Jenny")
cls.borrower2 = Borrower.objects.create(name="Kevin")
# borrower 1 reserves, rents, and returns book1.
Reservation.objects.create(
borrower=cls.borrower1,
book=cls.book1,
state=Reservation.STOPPED,
)
RentalSession.objects.create(
borrower=cls.borrower1,
book=cls.book1,
state=RentalSession.STOPPED,
)
# borrower2 reserves, rents, and returns book1.
Reservation.objects.create(
borrower=cls.borrower2,
book=cls.book1,
state=Reservation.STOPPED,
)
RentalSession.objects.create(
borrower=cls.borrower2,
book=cls.book1,
state=RentalSession.STOPPED,
)
def test_aggregate(self):
qs = (
Book.objects.annotate(
is_reserved_or_rented_by=Case(
When(
reservation__state=Reservation.NEW,
then=F("reservation__borrower__pk"),
),
When(
rental_session__state=RentalSession.NEW,
then=F("rental_session__borrower__pk"),
),
default=None,
)
)
.filter(
Q(is_reserved_or_rented_by=self.borrower1.pk) | Q(state=Book.AVAILABLE)
)
.distinct()
)
self.assertEqual(qs.count(), 1)
# If count is equal to 1, the same aggregation should return in the
# same result but it returns 4.
self.assertSequenceEqual(
qs.annotate(total=Count("pk")).values("total"), [{"total": 4}]
)
# With FilteredRelation, the result is as expected (1).
qs = (
Book.objects.annotate(
active_reservations=FilteredRelation(
"reservation",
condition=Q(
reservation__state=Reservation.NEW,
reservation__borrower=self.borrower1,
),
),
)
.annotate(
active_rental_sessions=FilteredRelation(
"rental_session",
condition=Q(
rental_session__state=RentalSession.NEW,
rental_session__borrower=self.borrower1,
),
),
)
.filter(
(
Q(active_reservations__isnull=False)
| Q(active_rental_sessions__isnull=False)
)
| Q(state=Book.AVAILABLE)
)
.distinct()
)
self.assertEqual(qs.count(), 1)
self.assertSequenceEqual(
qs.annotate(total=Count("pk")).values("total"), [{"total": 1}]
)
class FilteredRelationAnalyticalAggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
author = Author.objects.create(name="Author")
editor = Editor.objects.create(name="Editor")
cls.book1 = Book.objects.create(
title="Poem by Alice",
editor=editor,
author=author,
)
cls.book2 = Book.objects.create(
title="The book by Jane A",
editor=editor,
author=author,
)
cls.book3 = Book.objects.create(
title="The book by Jane B",
editor=editor,
author=author,
)
cls.seller1 = Seller.objects.create(name="Seller 1")
cls.seller2 = Seller.objects.create(name="Seller 2")
cls.usd = Currency.objects.create(currency="USD")
cls.eur = Currency.objects.create(currency="EUR")
cls.sales_date1 = date(2020, 7, 6)
cls.sales_date2 = date(2020, 7, 7)
ExchangeRate.objects.bulk_create(
[
ExchangeRate(
rate_date=cls.sales_date1,
from_currency=cls.usd,
to_currency=cls.eur,
rate=0.40,
),
ExchangeRate(
rate_date=cls.sales_date1,
from_currency=cls.eur,
to_currency=cls.usd,
rate=1.60,
),
ExchangeRate(
rate_date=cls.sales_date2,
from_currency=cls.usd,
to_currency=cls.eur,
rate=0.50,
),
ExchangeRate(
rate_date=cls.sales_date2,
from_currency=cls.eur,
to_currency=cls.usd,
rate=1.50,
),
ExchangeRate(
rate_date=cls.sales_date2,
from_currency=cls.usd,
to_currency=cls.usd,
rate=1.00,
),
]
)
BookDailySales.objects.bulk_create(
[
BookDailySales(
book=cls.book1,
sale_date=cls.sales_date1,
currency=cls.usd,
sales=100.00,
seller=cls.seller1,
),
BookDailySales(
book=cls.book2,
sale_date=cls.sales_date1,
currency=cls.eur,
sales=200.00,
seller=cls.seller1,
),
BookDailySales(
book=cls.book1,
sale_date=cls.sales_date2,
currency=cls.usd,
sales=50.00,
seller=cls.seller2,
),
BookDailySales(
book=cls.book2,
sale_date=cls.sales_date2,
currency=cls.eur,
sales=100.00,
seller=cls.seller2,
),
]
)
def test_aggregate(self):
tests = [
Q(daily_sales__sale_date__gte=self.sales_date2),
~Q(daily_sales__seller=self.seller1),
]
for condition in tests:
with self.subTest(condition=condition):
qs = (
Book.objects.annotate(
recent_sales=FilteredRelation(
"daily_sales", condition=condition
),
recent_sales_rates=FilteredRelation(
"recent_sales__currency__rates_from",
condition=Q(
recent_sales__currency__rates_from__rate_date=F(
"recent_sales__sale_date"
),
recent_sales__currency__rates_from__to_currency=(
self.usd
),
),
),
)
.annotate(
sales_sum=Sum(
F("recent_sales__sales") * F("recent_sales_rates__rate"),
output_field=DecimalField(),
),
)
.values("title", "sales_sum")
.order_by(
F("sales_sum").desc(nulls_last=True),
)
)
self.assertSequenceEqual(
qs,
[
{"title": self.book2.title, "sales_sum": Decimal(150.00)},
{"title": self.book1.title, "sales_sum": Decimal(50.00)},
{"title": self.book3.title, "sales_sum": None},
],
)
| true
| true
|
790a9126e29b6c50e4391ee0400fa8815cca49d9
| 842
|
py
|
Python
|
api/tests/opentrons/data/testosaur_v3.py
|
Opentrons/protocol_framework
|
ebbd6b2fe984edd6ecfcbf1dbe040db7f7356b9f
|
[
"Apache-2.0"
] | 2
|
2015-11-10T17:49:51.000Z
|
2016-01-15T04:43:37.000Z
|
api/tests/opentrons/data/testosaur_v3.py
|
Opentrons/labware
|
e21d8db51eac5818477264a45ef12c0a2d15fb72
|
[
"Apache-2.0"
] | null | null | null |
api/tests/opentrons/data/testosaur_v3.py
|
Opentrons/labware
|
e21d8db51eac5818477264a45ef12c0a2d15fb72
|
[
"Apache-2.0"
] | null | null | null |
from opentrons import protocol_api, types
metadata = {
"protocolName": "Testosaur Version 3",
"author": "Opentrons <engineering@opentrons.com>",
"description": 'A variant on "Dinosaur" for testing with Protocol API v3',
"source": "Opentrons Repository",
"apiLevel": "3.0",
}
def run(ctx: protocol_api.ProtocolContext) -> None:
tip_rack = ctx.load_labware("opentrons_96_tiprack_300ul", 8)
source = ctx.load_labware("nest_12_reservoir_15ml", 1)
dest = ctx.load_labware("corning_96_wellplate_360ul_flat", 2)
pipette = ctx.load_instrument("p300_single_gen2", types.Mount.RIGHT, [])
for i in range(4):
pipette.pick_up_tip(tip_rack.wells()[i])
pipette.aspirate(50, source.wells_by_name()["A1"])
pipette.dispense(50, dest.wells()[i])
pipette.drop_tip(tip_rack.wells()[i])
| 35.083333
| 78
| 0.690024
|
from opentrons import protocol_api, types
metadata = {
"protocolName": "Testosaur Version 3",
"author": "Opentrons <engineering@opentrons.com>",
"description": 'A variant on "Dinosaur" for testing with Protocol API v3',
"source": "Opentrons Repository",
"apiLevel": "3.0",
}
def run(ctx: protocol_api.ProtocolContext) -> None:
tip_rack = ctx.load_labware("opentrons_96_tiprack_300ul", 8)
source = ctx.load_labware("nest_12_reservoir_15ml", 1)
dest = ctx.load_labware("corning_96_wellplate_360ul_flat", 2)
pipette = ctx.load_instrument("p300_single_gen2", types.Mount.RIGHT, [])
for i in range(4):
pipette.pick_up_tip(tip_rack.wells()[i])
pipette.aspirate(50, source.wells_by_name()["A1"])
pipette.dispense(50, dest.wells()[i])
pipette.drop_tip(tip_rack.wells()[i])
| true
| true
|
790a9147e54633c23691d9a2d89b436a975cd3e9
| 27,227
|
py
|
Python
|
openbci/wifi.py
|
kaherdin-reed/thinkingCap
|
069bbcb9167261d687ec37206d1d2f59ace137d8
|
[
"MIT"
] | null | null | null |
openbci/wifi.py
|
kaherdin-reed/thinkingCap
|
069bbcb9167261d687ec37206d1d2f59ace137d8
|
[
"MIT"
] | null | null | null |
openbci/wifi.py
|
kaherdin-reed/thinkingCap
|
069bbcb9167261d687ec37206d1d2f59ace137d8
|
[
"MIT"
] | null | null | null |
"""
Core OpenBCI object for handling connections and samples from the WiFi Shield
Note that the LIB will take care on its own to print incoming ASCII messages if any (FIXME, BTW).
EXAMPLE USE:
def handle_sample(sample):
print(sample.channels_data)
wifi = OpenBCIWifi()
wifi.start(handle_sample)
TODO: Cyton/Ganglion JSON
TODO: Ganglion Raw
TODO: Cyton Raw
"""
import asyncore
import atexit
import json
import logging
import re
import socket
import timeit
try:
import urllib2
except ImportError:
import urllib
import requests
import xmltodict
from openbci.utils import k, ParseRaw, OpenBCISample, ssdp
SAMPLE_RATE = 0 # Hz
'''
#Commands for in SDK
command_stop = "s";
command_startBinary = "b";
'''
class OpenBCIWiFi(object):
"""
Handle a connection to an OpenBCI wifi shield.
Args:
ip_address: The IP address of the WiFi Shield, "None" to attempt auto-detect.
shield_name: The unique name of the WiFi Shield, such as `OpenBCI-2AD4`, will use SSDP to get IP address still,
if `shield_name` is "None" and `ip_address` is "None", will connect to the first WiFi Shield found using SSDP
sample_rate: The sample rate to set the attached board to. If the sample rate picked is not a sample rate the attached
board can support, i.e. you send 300 to Cyton, then error will be thrown.
log:
timeout: in seconds, disconnect / reconnect after a period without new data -- should be high if impedance check
max_packets_to_skip: will try to disconnect / reconnect after too many packets are skipped
"""
def __init__(self, ip_address=None, shield_name=None, sample_rate=None, log=True, timeout=3,
max_packets_to_skip=20, latency=10000, high_speed=True, ssdp_attempts=5,
num_channels=8, local_ip_address=None):
# these one are used
self.daisy = False
self.gains = None
self.high_speed = high_speed
self.impedance = False
self.ip_address = ip_address
self.latency = latency
self.log = log # print_incoming_text needs log
self.max_packets_to_skip = max_packets_to_skip
self.num_channels = num_channels
self.sample_rate = sample_rate
self.shield_name = shield_name
self.ssdp_attempts = ssdp_attempts
self.streaming = False
self.timeout = timeout
# might be handy to know API
self.board_type = "none"
# number of EEG channels
self.eeg_channels_per_sample = 0
self.read_state = 0
self.log_packet_count = 0
self.packets_dropped = 0
self.time_last_packet = 0
if self.log:
print("Welcome to OpenBCI Native WiFi Shield Driver - Please contribute code!")
self.local_ip_address = local_ip_address
if not self.local_ip_address:
self.local_ip_address = self._get_local_ip_address()
# Intentionally bind to port 0
self.local_wifi_server = WiFiShieldServer(self.local_ip_address, 0)
self.local_wifi_server_port = self.local_wifi_server.socket.getsockname()[1]
if self.log:
print("Opened socket on %s:%d" % (self.local_ip_address, self.local_wifi_server_port))
if ip_address is None:
for i in range(ssdp_attempts):
try:
self.find_wifi_shield(wifi_shield_cb=self.on_shield_found)
break
except OSError:
# Try again
if self.log:
print("Did not find any WiFi Shields")
else:
self.on_shield_found(ip_address)
def on_shield_found(self, ip_address):
self.ip_address = ip_address
self.connect()
# Disconnects from board when terminated
atexit.register(self.disconnect)
def loop(self):
asyncore.loop()
def _get_local_ip_address(self):
"""
Gets the local ip address of this computer
@returns str Local IP address
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip_address = s.getsockname()[0]
s.close()
return local_ip_address
def getBoardType(self):
""" Returns the version of the board """
return self.board_type
def setImpedance(self, flag):
""" Enable/disable impedance measure """
self.impedance = bool(flag)
def connect(self):
""" Connect to the board and configure it. Note: recreates various objects upon call. """
if self.ip_address is None:
raise ValueError('self.ip_address cannot be None')
if self.log:
print("Init WiFi connection with IP: " + self.ip_address)
"""
Docs on these HTTP requests and more are found:
https://app.swaggerhub.com/apis/pushtheworld/openbci-wifi-server/1.3.0
"""
res_board = requests.get("http://%s/board" % self.ip_address)
if res_board.status_code == 200:
board_info = res_board.json()
if not board_info['board_connected']:
raise RuntimeError("No board connected to WiFi Shield. To learn how to connect to a Cyton or Ganglion visit http://docs.openbci.com/Tutorials/03-Wifi_Getting_Started_Guide")
self.board_type = board_info['board_type']
self.eeg_channels_per_sample = board_info['num_channels']
if self.log:
print("Connected to %s with %s channels" % (self.board_type, self.eeg_channels_per_sample))
self.gains = None
if self.board_type == k.BOARD_CYTON:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = False
elif self.board_type == k.BOARD_DAISY:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = True
elif self.board_type == k.BOARD_GANGLION:
self.gains = [51, 51, 51, 51]
self.daisy = False
self.local_wifi_server.set_daisy(daisy=self.daisy)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
if self.high_speed:
output_style = 'raw'
else:
output_style = 'json'
res_tcp_post = requests.post("http://%s/tcp" % self.ip_address,
json={
'ip': self.local_ip_address,
'port': self.local_wifi_server_port,
'output': output_style,
'delimiter': True,
'latency': self.latency
})
if res_tcp_post.status_code == 200:
tcp_status = res_tcp_post.json()
if tcp_status['connected']:
if self.log:
print("WiFi Shield to Python TCP Socket Established")
else:
raise RuntimeWarning("WiFi Shield is not able to connect to local server. Please open an issue.")
def init_streaming(self):
""" Tell the board to record like crazy. """
res_stream_start = requests.get("http://%s/stream/start" % self.ip_address)
if res_stream_start.status_code == 200:
self.streaming = True
self.packets_dropped = 0
self.time_last_packet = timeit.default_timer()
else:
raise EnvironmentError("Unable to start streaming. Check API for status code %d on /stream/start" % res_stream_start.status_code)
def find_wifi_shield(self, shield_name=None, wifi_shield_cb=None):
"""Detects Ganglion board MAC address -- if more than 1 around, will select first. Needs root privilege."""
if self.log:
print("Try to find WiFi shields on your local wireless network")
print("Scanning for %d seconds nearby devices..." % self.timeout)
list_ip = []
list_id = []
found_shield = False
def wifi_shield_found(response):
res = requests.get(response.location, verify=False).text
device_description = xmltodict.parse(res)
cur_shield_name = str(device_description['root']['device']['serialNumber'])
cur_base_url = str(device_description['root']['URLBase'])
cur_ip_address = re.findall(r'[0-9]+(?:\.[0-9]+){3}', cur_base_url)[0]
list_id.append(cur_shield_name)
list_ip.append(cur_ip_address)
found_shield = True
if shield_name is None:
print("Found WiFi Shield %s with IP Address %s" % (cur_shield_name, cur_ip_address))
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
else:
if shield_name == cur_shield_name:
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
ssdp_hits = ssdp.discover("urn:schemas-upnp-org:device:Basic:1", timeout=self.timeout, wifi_found_cb=wifi_shield_found)
nb_wifi_shields = len(list_id)
if nb_wifi_shields < 1:
print("No WiFi Shields found ;(")
raise OSError('Cannot find OpenBCI WiFi Shield with local name')
if nb_wifi_shields > 1:
print(
"Found " + str(nb_wifi_shields) +
", selecting first named: " + list_id[0] +
" with IPV4: " + list_ip[0])
return list_ip[0]
def wifi_write(self, output):
"""
Pass through commands from the WiFi Shield to the Carrier board
:param output:
:return:
"""
res_command_post = requests.post("http://%s/command" % self.ip_address,
json={'command': output})
if res_command_post.status_code == 200:
ret_val = res_command_post.text
if self.log:
print(ret_val)
return ret_val
else:
if self.log:
print("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
raise RuntimeError("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
def getSampleRate(self):
return self.sample_rate
def getNbEEGChannels(self):
"""Will not get new data on impedance check."""
return self.eeg_channels_per_sample
def start_streaming(self, callback, lapse=-1):
"""
Start handling streaming data from the board. Call a provided callback
for every single sample that is processed
Args:
callback: A callback function -- or a list of functions -- that will receive a single argument of the
OpenBCISample object captured.
"""
start_time = timeit.default_timer()
# Enclose callback function in a list if it comes alone
if not isinstance(callback, list):
self.local_wifi_server.set_callback(callback)
else:
self.local_wifi_server.set_callback(callback[0])
if not self.streaming:
self.init_streaming()
# while self.streaming:
# # should the board get disconnected and we could not wait for notification anymore, a reco should be attempted through timeout mechanism
# try:
# # at most we will get one sample per packet
# self.waitForNotifications(1. / self.getSampleRate())
# except Exception as e:
# print("Something went wrong while waiting for a new sample: " + str(e))
# # retrieve current samples on the stack
# samples = self.delegate.getSamples()
# self.packets_dropped = self.delegate.getMaxPacketsDropped()
# if samples:
# self.time_last_packet = timeit.default_timer()
# for call in callback:
# for sample in samples:
# call(sample)
#
# if (lapse > 0 and timeit.default_timer() - start_time > lapse):
# self.stop();
# if self.log:
# self.log_packet_count = self.log_packet_count + 1;
#
# # Checking connection -- timeout and packets dropped
# self.check_connection()
def test_signal(self, signal):
""" Enable / disable test signal """
if signal == 0:
self.warn("Disabling synthetic square wave")
try:
self.wifi_write(']')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
elif signal == 1:
self.warn("Enabling synthetic square wave")
try:
self.wifi_write('[')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
else:
self.warn("%s is not a known test signal. Valid signal is 0-1" % signal)
def set_channel(self, channel, toggle_position):
""" Enable / disable channels """
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
# Commands to set toggle to on position
if toggle_position == 1:
if channel is 1:
self.wifi_write('!')
if channel is 2:
self.wifi_write('@')
if channel is 3:
self.wifi_write('#')
if channel is 4:
self.wifi_write('$')
if channel is 5:
self.wifi_write('%')
if channel is 6:
self.wifi_write('^')
if channel is 7:
self.wifi_write('&')
if channel is 8:
self.wifi_write('*')
if channel is 9:
self.wifi_write('Q')
if channel is 10:
self.wifi_write('W')
if channel is 11:
self.wifi_write('E')
if channel is 12:
self.wifi_write('R')
if channel is 13:
self.wifi_write('T')
if channel is 14:
self.wifi_write('Y')
if channel is 15:
self.wifi_write('U')
if channel is 16:
self.wifi_write('I')
# Commands to set toggle to off position
elif toggle_position == 0:
if channel is 1:
self.wifi_write('1')
if channel is 2:
self.wifi_write('2')
if channel is 3:
self.wifi_write('3')
if channel is 4:
self.wifi_write('4')
if channel is 5:
self.wifi_write('5')
if channel is 6:
self.wifi_write('6')
if channel is 7:
self.wifi_write('7')
if channel is 8:
self.wifi_write('8')
if channel is 9:
self.wifi_write('q')
if channel is 10:
self.wifi_write('w')
if channel is 11:
self.wifi_write('e')
if channel is 12:
self.wifi_write('r')
if channel is 13:
self.wifi_write('t')
if channel is 14:
self.wifi_write('y')
if channel is 15:
self.wifi_write('u')
if channel is 16:
self.wifi_write('i')
except Exception as e:
print("Something went wrong while setting channels: " + str(e))
# See Cyton SDK for options
def set_channel_settings(self, channel, enabled=True, gain=24, input_type=0, include_bias=True, use_srb2=True, use_srb1=True):
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
if self.board_type == k.BOARD_GANGLION:
raise ValueError('Cannot use with Ganglion')
ch_array = list("12345678QWERTYUI")
#defaults
command = list("x1060110X")
# Set channel
command[1] = ch_array[channel-1]
# Set power down if needed (default channel enabled)
if not enabled:
command[2] = '1'
# Set gain (default 24)
if gain == 1:
command[3] = '0'
if gain == 2:
command[3] = '1'
if gain == 4:
command[3] = '2'
if gain == 6:
command[3] = '3'
if gain == 8:
command[3] = '4'
if gain == 12:
command[3] = '5'
#TODO: Implement input type (default normal)
# Set bias inclusion (default include)
if not include_bias:
command[5] = '0'
# Set srb2 use (default use)
if not use_srb2:
command[6] = '0'
# Set srb1 use (default don't use)
if use_srb1:
command[6] = '1'
command_send = ''.join(command)
self.wifi_write(command_send)
#Make sure to update gain in wifi
self.gains[channel-1] = gain
self.local_wifi_server.set_gains(gains=self.gains)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
except ValueError as e:
print("Something went wrong while setting channel settings: " + str(e))
def set_sample_rate(self, sample_rate):
""" Change sample rate """
try:
if self.board_type == k.BOARD_CYTON or self.board_type == k.BOARD_DAISY:
if sample_rate == 250:
self.wifi_write('~6')
elif sample_rate == 500:
self.wifi_write('~5')
elif sample_rate == 1000:
self.wifi_write('~4')
elif sample_rate == 2000:
self.wifi_write('~3')
elif sample_rate == 4000:
self.wifi_write('~2')
elif sample_rate == 8000:
self.wifi_write('~1')
elif sample_rate == 16000:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
elif self.board_type == k.BOARD_GANGLION:
if sample_rate == 200:
self.wifi_write('~7')
elif sample_rate == 400:
self.wifi_write('~6')
elif sample_rate == 800:
self.wifi_write('~5')
elif sample_rate == 1600:
self.wifi_write('~4')
elif sample_rate == 3200:
self.wifi_write('~3')
elif sample_rate == 6400:
self.wifi_write('~2')
elif sample_rate == 12800:
self.wifi_write('~1')
elif sample_rate == 25600:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
else:
print("Board type not supported for setting sample rate")
except Exception as e:
print("Something went wrong while setting sample rate: " + str(e))
def set_accelerometer(self, toggle_position):
""" Enable / disable accelerometer """
try:
if self.board_type == k.BOARD_GANGLION:
# Commands to set toggle to on position
if toggle_position == 1:
self.wifi_write('n')
# Commands to set toggle to off position
elif toggle_position == 0:
self.wifi_write('N')
else:
print("Board type not supported for setting accelerometer")
except Exception as e:
print("Something went wrong while setting accelerometer: " + str(e))
"""
Clean Up (atexit)
"""
def stop(self):
print("Stopping streaming...")
self.streaming = False
# connection might be already down here
try:
if self.impedance:
print("Stopping with impedance testing")
self.wifi_write('Z')
else:
self.wifi_write('s')
except Exception as e:
print("Something went wrong while asking the board to stop streaming: " + str(e))
if self.log:
logging.warning('sent <s>: stopped streaming')
def disconnect(self):
if self.streaming:
self.stop()
# should not try to read/write anything after that, will crash
"""
SETTINGS AND HELPERS
"""
def warn(self, text):
if self.log:
# log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:' + str(self.log_packet_count))
self.log_packet_count = 0
logging.warning(text)
print("Warning: %s" % text)
def check_connection(self):
""" Check connection quality in term of lag and number of packets drop. Reinit connection if necessary. FIXME: parameters given to the board will be lost."""
# stop checking when we're no longer streaming
if not self.streaming:
return
# check number of dropped packets and duration without new packets, deco/reco if too large
if self.packets_dropped > self.max_packets_to_skip:
self.warn("Too many packets dropped, attempt to reconnect")
self.reconnect()
elif self.timeout > 0 and timeit.default_timer() - self.time_last_packet > self.timeout:
self.warn("Too long since got new data, attempt to reconnect")
# if error, attempt to reconect
self.reconnect()
def reconnect(self):
""" In case of poor connection, will shut down and relaunch everything. FIXME: parameters given to the board will be lost."""
self.warn('Reconnecting')
self.stop()
self.disconnect()
self.connect()
self.init_streaming()
class WiFiShieldHandler(asyncore.dispatcher_with_send):
def __init__(self, sock, callback=None, high_speed=True,
parser=None, daisy=False):
asyncore.dispatcher_with_send.__init__(self, sock)
self.callback = callback
self.daisy = daisy
self.high_speed = high_speed
self.last_odd_sample = OpenBCISample()
self.parser = parser if parser is not None else ParseRaw(gains=[24, 24, 24, 24, 24, 24, 24, 24])
def handle_read(self):
data = self.recv(3000) # 3000 is the max data the WiFi shield is allowed to send over TCP
if len(data) > 2:
if self.high_speed:
packets = int(len(data)/33)
raw_data_packets = []
for i in range(packets):
raw_data_packets.append(bytearray(data[i * k.RAW_PACKET_SIZE: i * k.RAW_PACKET_SIZE + k.RAW_PACKET_SIZE]))
samples = self.parser.transform_raw_data_packets_to_sample(raw_data_packets=raw_data_packets)
for sample in samples:
# if a daisy module is attached, wait to concatenate two samples (main board + daisy)
# before passing it to callback
if self.daisy:
# odd sample: daisy sample, save for later
if ~sample.sample_number % 2:
self.last_odd_sample = sample
# even sample: concatenate and send if last sample was the first part, otherwise drop the packet
elif sample.sample_number - 1 == self.last_odd_sample.sample_number:
# the aux data will be the average between the two samples, as the channel
# samples themselves have been averaged by the board
daisy_sample = self.parser.make_daisy_sample_object_wifi(self.last_odd_sample, sample)
if self.callback is not None:
self.callback(daisy_sample)
else:
if self.callback is not None:
self.callback(sample)
else:
try:
possible_chunks = data.split('\r\n')
if len(possible_chunks) > 1:
possible_chunks = possible_chunks[:-1]
for possible_chunk in possible_chunks:
if len(possible_chunk) > 2:
chunk_dict = json.loads(possible_chunk)
if 'chunk' in chunk_dict:
for sample in chunk_dict['chunk']:
if self.callback is not None:
self.callback(sample)
else:
print("not a sample packet")
except ValueError as e:
print("failed to parse: %s" % data)
print(e)
except BaseException as e:
print(e)
class WiFiShieldServer(asyncore.dispatcher):
def __init__(self, host, port, callback=None, gains=None, high_speed=True, daisy=False):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.daisy = daisy
self.listen(5)
self.callback = None
self.handler = None
self.parser = ParseRaw(gains=gains)
self.high_speed = high_speed
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
print('Incoming connection from %s' % repr(addr))
self.handler = WiFiShieldHandler(sock, self.callback, high_speed=self.high_speed,
parser=self.parser, daisy=self.daisy)
def set_callback(self, callback):
self.callback = callback
if self.handler is not None:
self.handler.callback = callback
def set_daisy(self, daisy):
self.daisy = daisy
if self.handler is not None:
self.handler.daisy = daisy
def set_gains(self, gains):
self.parser.set_ads1299_scale_factors(gains)
def set_parser(self, parser):
self.parser = parser
if self.handler is not None:
self.handler.parser = parser
| 39.516691
| 189
| 0.551585
|
import asyncore
import atexit
import json
import logging
import re
import socket
import timeit
try:
import urllib2
except ImportError:
import urllib
import requests
import xmltodict
from openbci.utils import k, ParseRaw, OpenBCISample, ssdp
SAMPLE_RATE = 0
class OpenBCIWiFi(object):
def __init__(self, ip_address=None, shield_name=None, sample_rate=None, log=True, timeout=3,
max_packets_to_skip=20, latency=10000, high_speed=True, ssdp_attempts=5,
num_channels=8, local_ip_address=None):
self.daisy = False
self.gains = None
self.high_speed = high_speed
self.impedance = False
self.ip_address = ip_address
self.latency = latency
self.log = log
self.max_packets_to_skip = max_packets_to_skip
self.num_channels = num_channels
self.sample_rate = sample_rate
self.shield_name = shield_name
self.ssdp_attempts = ssdp_attempts
self.streaming = False
self.timeout = timeout
self.board_type = "none"
self.eeg_channels_per_sample = 0
self.read_state = 0
self.log_packet_count = 0
self.packets_dropped = 0
self.time_last_packet = 0
if self.log:
print("Welcome to OpenBCI Native WiFi Shield Driver - Please contribute code!")
self.local_ip_address = local_ip_address
if not self.local_ip_address:
self.local_ip_address = self._get_local_ip_address()
self.local_wifi_server = WiFiShieldServer(self.local_ip_address, 0)
self.local_wifi_server_port = self.local_wifi_server.socket.getsockname()[1]
if self.log:
print("Opened socket on %s:%d" % (self.local_ip_address, self.local_wifi_server_port))
if ip_address is None:
for i in range(ssdp_attempts):
try:
self.find_wifi_shield(wifi_shield_cb=self.on_shield_found)
break
except OSError:
if self.log:
print("Did not find any WiFi Shields")
else:
self.on_shield_found(ip_address)
def on_shield_found(self, ip_address):
self.ip_address = ip_address
self.connect()
atexit.register(self.disconnect)
def loop(self):
asyncore.loop()
def _get_local_ip_address(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip_address = s.getsockname()[0]
s.close()
return local_ip_address
def getBoardType(self):
return self.board_type
def setImpedance(self, flag):
self.impedance = bool(flag)
def connect(self):
if self.ip_address is None:
raise ValueError('self.ip_address cannot be None')
if self.log:
print("Init WiFi connection with IP: " + self.ip_address)
res_board = requests.get("http://%s/board" % self.ip_address)
if res_board.status_code == 200:
board_info = res_board.json()
if not board_info['board_connected']:
raise RuntimeError("No board connected to WiFi Shield. To learn how to connect to a Cyton or Ganglion visit http://docs.openbci.com/Tutorials/03-Wifi_Getting_Started_Guide")
self.board_type = board_info['board_type']
self.eeg_channels_per_sample = board_info['num_channels']
if self.log:
print("Connected to %s with %s channels" % (self.board_type, self.eeg_channels_per_sample))
self.gains = None
if self.board_type == k.BOARD_CYTON:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = False
elif self.board_type == k.BOARD_DAISY:
self.gains = [24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24]
self.daisy = True
elif self.board_type == k.BOARD_GANGLION:
self.gains = [51, 51, 51, 51]
self.daisy = False
self.local_wifi_server.set_daisy(daisy=self.daisy)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
if self.high_speed:
output_style = 'raw'
else:
output_style = 'json'
res_tcp_post = requests.post("http://%s/tcp" % self.ip_address,
json={
'ip': self.local_ip_address,
'port': self.local_wifi_server_port,
'output': output_style,
'delimiter': True,
'latency': self.latency
})
if res_tcp_post.status_code == 200:
tcp_status = res_tcp_post.json()
if tcp_status['connected']:
if self.log:
print("WiFi Shield to Python TCP Socket Established")
else:
raise RuntimeWarning("WiFi Shield is not able to connect to local server. Please open an issue.")
def init_streaming(self):
res_stream_start = requests.get("http://%s/stream/start" % self.ip_address)
if res_stream_start.status_code == 200:
self.streaming = True
self.packets_dropped = 0
self.time_last_packet = timeit.default_timer()
else:
raise EnvironmentError("Unable to start streaming. Check API for status code %d on /stream/start" % res_stream_start.status_code)
def find_wifi_shield(self, shield_name=None, wifi_shield_cb=None):
if self.log:
print("Try to find WiFi shields on your local wireless network")
print("Scanning for %d seconds nearby devices..." % self.timeout)
list_ip = []
list_id = []
found_shield = False
def wifi_shield_found(response):
res = requests.get(response.location, verify=False).text
device_description = xmltodict.parse(res)
cur_shield_name = str(device_description['root']['device']['serialNumber'])
cur_base_url = str(device_description['root']['URLBase'])
cur_ip_address = re.findall(r'[0-9]+(?:\.[0-9]+){3}', cur_base_url)[0]
list_id.append(cur_shield_name)
list_ip.append(cur_ip_address)
found_shield = True
if shield_name is None:
print("Found WiFi Shield %s with IP Address %s" % (cur_shield_name, cur_ip_address))
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
else:
if shield_name == cur_shield_name:
if wifi_shield_cb is not None:
wifi_shield_cb(cur_ip_address)
ssdp_hits = ssdp.discover("urn:schemas-upnp-org:device:Basic:1", timeout=self.timeout, wifi_found_cb=wifi_shield_found)
nb_wifi_shields = len(list_id)
if nb_wifi_shields < 1:
print("No WiFi Shields found ;(")
raise OSError('Cannot find OpenBCI WiFi Shield with local name')
if nb_wifi_shields > 1:
print(
"Found " + str(nb_wifi_shields) +
", selecting first named: " + list_id[0] +
" with IPV4: " + list_ip[0])
return list_ip[0]
def wifi_write(self, output):
res_command_post = requests.post("http://%s/command" % self.ip_address,
json={'command': output})
if res_command_post.status_code == 200:
ret_val = res_command_post.text
if self.log:
print(ret_val)
return ret_val
else:
if self.log:
print("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
raise RuntimeError("Error code: %d %s" % (res_command_post.status_code, res_command_post.text))
def getSampleRate(self):
return self.sample_rate
def getNbEEGChannels(self):
return self.eeg_channels_per_sample
def start_streaming(self, callback, lapse=-1):
start_time = timeit.default_timer()
if not isinstance(callback, list):
self.local_wifi_server.set_callback(callback)
else:
self.local_wifi_server.set_callback(callback[0])
if not self.streaming:
self.init_streaming()
n("Disabling synthetic square wave")
try:
self.wifi_write(']')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
elif signal == 1:
self.warn("Enabling synthetic square wave")
try:
self.wifi_write('[')
except Exception as e:
print("Something went wrong while setting signal: " + str(e))
else:
self.warn("%s is not a known test signal. Valid signal is 0-1" % signal)
def set_channel(self, channel, toggle_position):
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
if toggle_position == 1:
if channel is 1:
self.wifi_write('!')
if channel is 2:
self.wifi_write('@')
if channel is 3:
self.wifi_write('#')
if channel is 4:
self.wifi_write('$')
if channel is 5:
self.wifi_write('%')
if channel is 6:
self.wifi_write('^')
if channel is 7:
self.wifi_write('&')
if channel is 8:
self.wifi_write('*')
if channel is 9:
self.wifi_write('Q')
if channel is 10:
self.wifi_write('W')
if channel is 11:
self.wifi_write('E')
if channel is 12:
self.wifi_write('R')
if channel is 13:
self.wifi_write('T')
if channel is 14:
self.wifi_write('Y')
if channel is 15:
self.wifi_write('U')
if channel is 16:
self.wifi_write('I')
elif toggle_position == 0:
if channel is 1:
self.wifi_write('1')
if channel is 2:
self.wifi_write('2')
if channel is 3:
self.wifi_write('3')
if channel is 4:
self.wifi_write('4')
if channel is 5:
self.wifi_write('5')
if channel is 6:
self.wifi_write('6')
if channel is 7:
self.wifi_write('7')
if channel is 8:
self.wifi_write('8')
if channel is 9:
self.wifi_write('q')
if channel is 10:
self.wifi_write('w')
if channel is 11:
self.wifi_write('e')
if channel is 12:
self.wifi_write('r')
if channel is 13:
self.wifi_write('t')
if channel is 14:
self.wifi_write('y')
if channel is 15:
self.wifi_write('u')
if channel is 16:
self.wifi_write('i')
except Exception as e:
print("Something went wrong while setting channels: " + str(e))
def set_channel_settings(self, channel, enabled=True, gain=24, input_type=0, include_bias=True, use_srb2=True, use_srb1=True):
try:
if channel > self.num_channels:
raise ValueError('Cannot set non-existant channel')
if self.board_type == k.BOARD_GANGLION:
raise ValueError('Cannot use with Ganglion')
ch_array = list("12345678QWERTYUI")
command = list("x1060110X")
command[1] = ch_array[channel-1]
if not enabled:
command[2] = '1'
if gain == 1:
command[3] = '0'
if gain == 2:
command[3] = '1'
if gain == 4:
command[3] = '2'
if gain == 6:
command[3] = '3'
if gain == 8:
command[3] = '4'
if gain == 12:
command[3] = '5'
if not include_bias:
command[5] = '0'
if not use_srb2:
command[6] = '0'
if use_srb1:
command[6] = '1'
command_send = ''.join(command)
self.wifi_write(command_send)
#Make sure to update gain in wifi
self.gains[channel-1] = gain
self.local_wifi_server.set_gains(gains=self.gains)
self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type))
except ValueError as e:
print("Something went wrong while setting channel settings: " + str(e))
def set_sample_rate(self, sample_rate):
try:
if self.board_type == k.BOARD_CYTON or self.board_type == k.BOARD_DAISY:
if sample_rate == 250:
self.wifi_write('~6')
elif sample_rate == 500:
self.wifi_write('~5')
elif sample_rate == 1000:
self.wifi_write('~4')
elif sample_rate == 2000:
self.wifi_write('~3')
elif sample_rate == 4000:
self.wifi_write('~2')
elif sample_rate == 8000:
self.wifi_write('~1')
elif sample_rate == 16000:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
elif self.board_type == k.BOARD_GANGLION:
if sample_rate == 200:
self.wifi_write('~7')
elif sample_rate == 400:
self.wifi_write('~6')
elif sample_rate == 800:
self.wifi_write('~5')
elif sample_rate == 1600:
self.wifi_write('~4')
elif sample_rate == 3200:
self.wifi_write('~3')
elif sample_rate == 6400:
self.wifi_write('~2')
elif sample_rate == 12800:
self.wifi_write('~1')
elif sample_rate == 25600:
self.wifi_write('~0')
else:
print("Sample rate not supported: " + str(sample_rate))
else:
print("Board type not supported for setting sample rate")
except Exception as e:
print("Something went wrong while setting sample rate: " + str(e))
def set_accelerometer(self, toggle_position):
try:
if self.board_type == k.BOARD_GANGLION:
# Commands to set toggle to on position
if toggle_position == 1:
self.wifi_write('n')
# Commands to set toggle to off position
elif toggle_position == 0:
self.wifi_write('N')
else:
print("Board type not supported for setting accelerometer")
except Exception as e:
print("Something went wrong while setting accelerometer: " + str(e))
def stop(self):
print("Stopping streaming...")
self.streaming = False
# connection might be already down here
try:
if self.impedance:
print("Stopping with impedance testing")
self.wifi_write('Z')
else:
self.wifi_write('s')
except Exception as e:
print("Something went wrong while asking the board to stop streaming: " + str(e))
if self.log:
logging.warning('sent <s>: stopped streaming')
def disconnect(self):
if self.streaming:
self.stop()
# should not try to read/write anything after that, will crash
def warn(self, text):
if self.log:
# log how many packets where sent succesfully in between warnings
if self.log_packet_count:
logging.info('Data packets received:' + str(self.log_packet_count))
self.log_packet_count = 0
logging.warning(text)
print("Warning: %s" % text)
def check_connection(self):
# stop checking when we're no longer streaming
if not self.streaming:
return
if self.packets_dropped > self.max_packets_to_skip:
self.warn("Too many packets dropped, attempt to reconnect")
self.reconnect()
elif self.timeout > 0 and timeit.default_timer() - self.time_last_packet > self.timeout:
self.warn("Too long since got new data, attempt to reconnect")
self.reconnect()
def reconnect(self):
self.warn('Reconnecting')
self.stop()
self.disconnect()
self.connect()
self.init_streaming()
class WiFiShieldHandler(asyncore.dispatcher_with_send):
def __init__(self, sock, callback=None, high_speed=True,
parser=None, daisy=False):
asyncore.dispatcher_with_send.__init__(self, sock)
self.callback = callback
self.daisy = daisy
self.high_speed = high_speed
self.last_odd_sample = OpenBCISample()
self.parser = parser if parser is not None else ParseRaw(gains=[24, 24, 24, 24, 24, 24, 24, 24])
def handle_read(self):
data = self.recv(3000)
if len(data) > 2:
if self.high_speed:
packets = int(len(data)/33)
raw_data_packets = []
for i in range(packets):
raw_data_packets.append(bytearray(data[i * k.RAW_PACKET_SIZE: i * k.RAW_PACKET_SIZE + k.RAW_PACKET_SIZE]))
samples = self.parser.transform_raw_data_packets_to_sample(raw_data_packets=raw_data_packets)
for sample in samples:
if self.daisy:
if ~sample.sample_number % 2:
self.last_odd_sample = sample
elif sample.sample_number - 1 == self.last_odd_sample.sample_number:
daisy_sample = self.parser.make_daisy_sample_object_wifi(self.last_odd_sample, sample)
if self.callback is not None:
self.callback(daisy_sample)
else:
if self.callback is not None:
self.callback(sample)
else:
try:
possible_chunks = data.split('\r\n')
if len(possible_chunks) > 1:
possible_chunks = possible_chunks[:-1]
for possible_chunk in possible_chunks:
if len(possible_chunk) > 2:
chunk_dict = json.loads(possible_chunk)
if 'chunk' in chunk_dict:
for sample in chunk_dict['chunk']:
if self.callback is not None:
self.callback(sample)
else:
print("not a sample packet")
except ValueError as e:
print("failed to parse: %s" % data)
print(e)
except BaseException as e:
print(e)
class WiFiShieldServer(asyncore.dispatcher):
def __init__(self, host, port, callback=None, gains=None, high_speed=True, daisy=False):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.daisy = daisy
self.listen(5)
self.callback = None
self.handler = None
self.parser = ParseRaw(gains=gains)
self.high_speed = high_speed
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
print('Incoming connection from %s' % repr(addr))
self.handler = WiFiShieldHandler(sock, self.callback, high_speed=self.high_speed,
parser=self.parser, daisy=self.daisy)
def set_callback(self, callback):
self.callback = callback
if self.handler is not None:
self.handler.callback = callback
def set_daisy(self, daisy):
self.daisy = daisy
if self.handler is not None:
self.handler.daisy = daisy
def set_gains(self, gains):
self.parser.set_ads1299_scale_factors(gains)
def set_parser(self, parser):
self.parser = parser
if self.handler is not None:
self.handler.parser = parser
| true
| true
|
790a921f2ad52d53457d98e97130c1978705ad61
| 4,748
|
py
|
Python
|
python-data-analysis/2019-nCoV-global/global_map.py
|
meteor1993/python-learning
|
4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40
|
[
"MIT"
] | 83
|
2019-10-15T06:54:06.000Z
|
2022-03-28T14:08:21.000Z
|
python-data-analysis/2019-nCoV-global/global_map.py
|
wenxuefeng3930/python-learning
|
4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40
|
[
"MIT"
] | 1
|
2020-04-16T08:13:19.000Z
|
2020-07-14T01:52:46.000Z
|
python-data-analysis/2019-nCoV-global/global_map.py
|
wenxuefeng3930/python-learning
|
4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40
|
[
"MIT"
] | 74
|
2019-11-02T08:10:36.000Z
|
2022-02-19T12:23:36.000Z
|
from pyecharts import options as opts
from pyecharts.charts import Map
import pandas as pd
import namemap
def read_country_code():
"""
获取国家中英文字典
:return:
"""
country_dict = {}
for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换
country_dict[val] = key
return country_dict
def read_csv():
"""
读取数据,返回国家英文名称列表和累计确诊数列表
:return:
"""
country_dict = read_country_code()
data = pd.read_csv("2019-nCoV.csv", index_col=False)
countrys_names = list()
confirmed_count = list()
for x in range(len(data.index)):
if data['name'].iloc[x] in country_dict.keys():
countrys_names.append(country_dict[data['name'].iloc[x]])
confirmed_count.append(data['confirm'].iloc[x])
else:
print(data['name'].iloc[x])
return countrys_names, confirmed_count
def draw_map():
"""
绘制世界地图
遇到一个很神奇的问题:
两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据
:return:
"""
# 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int
# 感谢公众号的 @李康伟 同学提出
countrys_names, confirmed_count = read_csv()
confirmed_count_list = []
for item in confirmed_count:
confirmed_count_list.append(int(item))
# countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', "Côte d'Ivoire", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho']
#
# confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]
c = (
Map()
.add(
"确诊人数",
[list(z) for z in zip(countrys_names, confirmed_count_list)],
is_map_symbol_show=False,
maptype="world",
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(color="rgb(49,60,72)")
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="全球 2019-nCoV 地图"),
visualmap_opts=opts.VisualMapOpts(max_=1700000),
)
.render("map_world.html")
)
if __name__ == '__main__':
draw_map()
| 60.101266
| 1,829
| 0.624684
|
from pyecharts import options as opts
from pyecharts.charts import Map
import pandas as pd
import namemap
def read_country_code():
country_dict = {}
for key, val in namemap.nameMap.items():
country_dict[val] = key
return country_dict
def read_csv():
country_dict = read_country_code()
data = pd.read_csv("2019-nCoV.csv", index_col=False)
countrys_names = list()
confirmed_count = list()
for x in range(len(data.index)):
if data['name'].iloc[x] in country_dict.keys():
countrys_names.append(country_dict[data['name'].iloc[x]])
confirmed_count.append(data['confirm'].iloc[x])
else:
print(data['name'].iloc[x])
return countrys_names, confirmed_count
def draw_map():
countrys_names, confirmed_count = read_csv()
confirmed_count_list = []
for item in confirmed_count:
confirmed_count_list.append(int(item))
#
# confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]
c = (
Map()
.add(
"确诊人数",
[list(z) for z in zip(countrys_names, confirmed_count_list)],
is_map_symbol_show=False,
maptype="world",
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(color="rgb(49,60,72)")
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="全球 2019-nCoV 地图"),
visualmap_opts=opts.VisualMapOpts(max_=1700000),
)
.render("map_world.html")
)
if __name__ == '__main__':
draw_map()
| true
| true
|
790a9231b616b2ff50de6c1b6afc57bc29343260
| 2,186
|
py
|
Python
|
docs/image-registration.py
|
santosh653/dtcwt
|
01d9e87dc9abfa244a89c1f05aebf3dec6999f3a
|
[
"BSD-2-Clause"
] | 61
|
2015-01-04T09:21:29.000Z
|
2022-03-07T16:25:02.000Z
|
docs/image-registration.py
|
santosh653/dtcwt
|
01d9e87dc9abfa244a89c1f05aebf3dec6999f3a
|
[
"BSD-2-Clause"
] | 17
|
2015-04-02T13:37:07.000Z
|
2018-03-07T09:57:57.000Z
|
docs/image-registration.py
|
santosh653/dtcwt
|
01d9e87dc9abfa244a89c1f05aebf3dec6999f3a
|
[
"BSD-2-Clause"
] | 26
|
2015-04-16T06:22:16.000Z
|
2021-12-07T09:17:44.000Z
|
#!/usr/bin/env python
"""
An example of image registration via the DTCWT.
This script demonstrates some methods for image registration using the DTCWT.
"""
from __future__ import division, print_function
import itertools
import logging
import os
from matplotlib.pyplot import *
import numpy as np
import dtcwt
from dtcwt.numpy import Transform2d
import dtcwt.sampling
from dtcwt.registration import *
logging.basicConfig(level=logging.INFO)
import datasets
def register_frames(filename):
# Load test images
logging.info('Loading frames from "{0}"'.format(filename))
f1, f2 = datasets.regframes(filename)
# Take the DTCWT of both frames.
logging.info('Taking DTCWT')
nlevels = 6
trans = Transform2d()
t1 = trans.forward(f1, nlevels=nlevels)
t2 = trans.forward(f2, nlevels=nlevels)
# Solve for transform
logging.info('Finding flow')
avecs = estimatereg(t1, t2)
logging.info('Computing warped image')
warped_f1 = warp(f1, avecs, method='bilinear')
logging.info('Computing velocity field')
step = 16
X, Y = np.meshgrid(np.arange(f1.shape[1]), np.arange(f1.shape[0]))
vxs, vys = velocityfield(avecs, f1.shape, method='nearest')
vxs -= np.median(vxs.flat)
vys -= np.median(vys.flat)
figure(figsize=(16,9))
subplot(221)
imshow(np.dstack((f1, f2, np.zeros_like(f1))))
title('Overlaid frames')
subplot(222)
imshow(np.dstack((warped_f1, f2, np.zeros_like(f2))))
title('Frame 1 warped to Frame 2 (image domain)')
subplot(223)
sc = 2
imshow(np.dstack((f1, f2, np.zeros_like(f2))))
quiver(X[::step,::step], Y[::step,::step],
-sc*vxs[::step,::step]*f1.shape[1], -sc*vys[::step,::step]*f1.shape[0],
color='b', angles='xy', scale_units='xy', scale=1)
title('Computed velocity field (median subtracted), x{0}'.format(sc))
subplot(224)
imshow(np.sqrt(vxs*vxs + vys*vys), interpolation='none', cmap=cm.hot)
colorbar()
title('Magnitude of computed velocity (median subtracted)')
# savefig(os.path.splitext(os.path.basename(filename))[0] + '-registration.png')
register_frames('traffic')
register_frames('tennis')
| 26.987654
| 84
| 0.677036
|
from __future__ import division, print_function
import itertools
import logging
import os
from matplotlib.pyplot import *
import numpy as np
import dtcwt
from dtcwt.numpy import Transform2d
import dtcwt.sampling
from dtcwt.registration import *
logging.basicConfig(level=logging.INFO)
import datasets
def register_frames(filename):
logging.info('Loading frames from "{0}"'.format(filename))
f1, f2 = datasets.regframes(filename)
logging.info('Taking DTCWT')
nlevels = 6
trans = Transform2d()
t1 = trans.forward(f1, nlevels=nlevels)
t2 = trans.forward(f2, nlevels=nlevels)
logging.info('Finding flow')
avecs = estimatereg(t1, t2)
logging.info('Computing warped image')
warped_f1 = warp(f1, avecs, method='bilinear')
logging.info('Computing velocity field')
step = 16
X, Y = np.meshgrid(np.arange(f1.shape[1]), np.arange(f1.shape[0]))
vxs, vys = velocityfield(avecs, f1.shape, method='nearest')
vxs -= np.median(vxs.flat)
vys -= np.median(vys.flat)
figure(figsize=(16,9))
subplot(221)
imshow(np.dstack((f1, f2, np.zeros_like(f1))))
title('Overlaid frames')
subplot(222)
imshow(np.dstack((warped_f1, f2, np.zeros_like(f2))))
title('Frame 1 warped to Frame 2 (image domain)')
subplot(223)
sc = 2
imshow(np.dstack((f1, f2, np.zeros_like(f2))))
quiver(X[::step,::step], Y[::step,::step],
-sc*vxs[::step,::step]*f1.shape[1], -sc*vys[::step,::step]*f1.shape[0],
color='b', angles='xy', scale_units='xy', scale=1)
title('Computed velocity field (median subtracted), x{0}'.format(sc))
subplot(224)
imshow(np.sqrt(vxs*vxs + vys*vys), interpolation='none', cmap=cm.hot)
colorbar()
title('Magnitude of computed velocity (median subtracted)')
register_frames('traffic')
register_frames('tennis')
| true
| true
|
790a92bb022245a3ec764ff48c44fb42a1a17a3b
| 41
|
py
|
Python
|
sklearn_plus/preprocessing/text/en/__init__.py
|
liuxiaoan8008/sklearn-plus
|
67258f6c9b833c82c2ffa2ec062fc2cc686b3004
|
[
"MIT"
] | null | null | null |
sklearn_plus/preprocessing/text/en/__init__.py
|
liuxiaoan8008/sklearn-plus
|
67258f6c9b833c82c2ffa2ec062fc2cc686b3004
|
[
"MIT"
] | null | null | null |
sklearn_plus/preprocessing/text/en/__init__.py
|
liuxiaoan8008/sklearn-plus
|
67258f6c9b833c82c2ffa2ec062fc2cc686b3004
|
[
"MIT"
] | null | null | null |
from punc_tokenizer import PuncTokenizer
| 20.5
| 40
| 0.902439
|
from punc_tokenizer import PuncTokenizer
| true
| true
|
790a92c9d40d91364e19a32e1de7ea417b16ae5e
| 1,227
|
py
|
Python
|
datahub/search/company/apps.py
|
reupen/data-hub-api
|
d854188f4c45da0e89075add132a15bb1227ff79
|
[
"MIT"
] | null | null | null |
datahub/search/company/apps.py
|
reupen/data-hub-api
|
d854188f4c45da0e89075add132a15bb1227ff79
|
[
"MIT"
] | 16
|
2020-04-01T15:25:35.000Z
|
2020-04-14T14:07:30.000Z
|
datahub/search/company/apps.py
|
reupen/data-hub-api
|
d854188f4c45da0e89075add132a15bb1227ff79
|
[
"MIT"
] | null | null | null |
from django.db.models import Max
from datahub.company.models import Company as DBCompany, CompanyPermission
from datahub.core.query_utils import get_aggregate_subquery
from datahub.search.apps import SearchApp
from datahub.search.company.models import Company
class CompanySearchApp(SearchApp):
"""SearchApp for company."""
name = 'company'
es_model = Company
view_permissions = (f'company.{CompanyPermission.view_company}',)
export_permission = f'company.{CompanyPermission.export_company}'
queryset = DBCompany.objects.select_related(
'archived_by',
'business_type',
'employee_range',
'export_experience_category',
'headquarter_type',
'one_list_account_owner',
'global_headquarters__one_list_account_owner',
'global_headquarters',
'address_country',
'registered_address_country',
'sector',
'sector__parent',
'sector__parent__parent',
'turnover_range',
'uk_region',
).prefetch_related(
'export_countries__country',
).annotate(
latest_interaction_date=get_aggregate_subquery(
DBCompany,
Max('interactions__date'),
),
)
| 30.675
| 74
| 0.685412
|
from django.db.models import Max
from datahub.company.models import Company as DBCompany, CompanyPermission
from datahub.core.query_utils import get_aggregate_subquery
from datahub.search.apps import SearchApp
from datahub.search.company.models import Company
class CompanySearchApp(SearchApp):
name = 'company'
es_model = Company
view_permissions = (f'company.{CompanyPermission.view_company}',)
export_permission = f'company.{CompanyPermission.export_company}'
queryset = DBCompany.objects.select_related(
'archived_by',
'business_type',
'employee_range',
'export_experience_category',
'headquarter_type',
'one_list_account_owner',
'global_headquarters__one_list_account_owner',
'global_headquarters',
'address_country',
'registered_address_country',
'sector',
'sector__parent',
'sector__parent__parent',
'turnover_range',
'uk_region',
).prefetch_related(
'export_countries__country',
).annotate(
latest_interaction_date=get_aggregate_subquery(
DBCompany,
Max('interactions__date'),
),
)
| true
| true
|
790a9301f2baeb8df509ef35cb9ef110a6f784d6
| 196
|
py
|
Python
|
celery/loaders/app.py
|
frac/celery
|
b6b32ca9a951e81722c52412c3f8a1cff67109dd
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T02:49:59.000Z
|
2015-11-05T02:49:59.000Z
|
celery/loaders/app.py
|
frac/celery
|
b6b32ca9a951e81722c52412c3f8a1cff67109dd
|
[
"BSD-3-Clause"
] | null | null | null |
celery/loaders/app.py
|
frac/celery
|
b6b32ca9a951e81722c52412c3f8a1cff67109dd
|
[
"BSD-3-Clause"
] | null | null | null |
from celery.loaders.base import BaseLoader
class AppLoader(BaseLoader):
def on_worker_init(self):
self.import_default_modules()
def read_configuration(self):
return {}
| 17.818182
| 42
| 0.709184
|
from celery.loaders.base import BaseLoader
class AppLoader(BaseLoader):
def on_worker_init(self):
self.import_default_modules()
def read_configuration(self):
return {}
| true
| true
|
790a93b9dbec3f8bad62494a7ec8393de3b647ea
| 2,956
|
py
|
Python
|
desktop/core/ext-py/dnspython-1.15.0/dns/rdtypes/ANY/TLSA.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/dnspython-1.15.0/dns/rdtypes/ANY/TLSA.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/dnspython-1.15.0/dns/rdtypes/ANY/TLSA.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import binascii
import dns.rdata
import dns.rdatatype
class TLSA(dns.rdata.Rdata):
"""TLSA record
@ivar usage: The certificate usage
@type usage: int
@ivar selector: The selector field
@type selector: int
@ivar mtype: The 'matching type' field
@type mtype: int
@ivar cert: The 'Certificate Association Data' field
@type cert: string
@see: RFC 6698"""
__slots__ = ['usage', 'selector', 'mtype', 'cert']
def __init__(self, rdclass, rdtype, usage, selector,
mtype, cert):
super(TLSA, self).__init__(rdclass, rdtype)
self.usage = usage
self.selector = selector
self.mtype = mtype
self.cert = cert
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %d %s' % (self.usage,
self.selector,
self.mtype,
dns.rdata._hexify(self.cert,
chunksize=128))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
usage = tok.get_uint8()
selector = tok.get_uint8()
mtype = tok.get_uint8()
cert_chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
cert_chunks.append(t.value.encode())
cert = b''.join(cert_chunks)
cert = binascii.unhexlify(cert)
return cls(rdclass, rdtype, usage, selector, mtype, cert)
def to_wire(self, file, compress=None, origin=None):
header = struct.pack("!BBB", self.usage, self.selector, self.mtype)
file.write(header)
file.write(self.cert)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
header = struct.unpack("!BBB", wire[current: current + 3])
current += 3
rdlen -= 3
cert = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], header[2], cert)
| 35.614458
| 75
| 0.624154
|
import struct
import binascii
import dns.rdata
import dns.rdatatype
class TLSA(dns.rdata.Rdata):
__slots__ = ['usage', 'selector', 'mtype', 'cert']
def __init__(self, rdclass, rdtype, usage, selector,
mtype, cert):
super(TLSA, self).__init__(rdclass, rdtype)
self.usage = usage
self.selector = selector
self.mtype = mtype
self.cert = cert
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %d %s' % (self.usage,
self.selector,
self.mtype,
dns.rdata._hexify(self.cert,
chunksize=128))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
usage = tok.get_uint8()
selector = tok.get_uint8()
mtype = tok.get_uint8()
cert_chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
cert_chunks.append(t.value.encode())
cert = b''.join(cert_chunks)
cert = binascii.unhexlify(cert)
return cls(rdclass, rdtype, usage, selector, mtype, cert)
def to_wire(self, file, compress=None, origin=None):
header = struct.pack("!BBB", self.usage, self.selector, self.mtype)
file.write(header)
file.write(self.cert)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
header = struct.unpack("!BBB", wire[current: current + 3])
current += 3
rdlen -= 3
cert = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], header[2], cert)
| true
| true
|
790a96999cf24260897ce867889a3d312cf4ed97
| 2,944
|
py
|
Python
|
operators/compositor_nodetree.py
|
MarcoHoo/RenderStackNode
|
e9624ccd4ebd4f72bd5b332205574bb053dbcb8d
|
[
"Apache-2.0"
] | null | null | null |
operators/compositor_nodetree.py
|
MarcoHoo/RenderStackNode
|
e9624ccd4ebd4f72bd5b332205574bb053dbcb8d
|
[
"Apache-2.0"
] | null | null | null |
operators/compositor_nodetree.py
|
MarcoHoo/RenderStackNode
|
e9624ccd4ebd4f72bd5b332205574bb053dbcb8d
|
[
"Apache-2.0"
] | null | null | null |
import bpy
from bpy.props import BoolProperty, StringProperty
import os
from ..preferences import get_pref
class RSN_OT_CreatCompositorNode(bpy.types.Operator):
bl_idname = "rsn.creat_compositor_node"
bl_label = "Separate Passes"
use_passes: BoolProperty(default=False)
view_layer: StringProperty(default="")
def set_context_layer(self):
nt = bpy.context.scene.node_tree
context_layer = None
for node in bpy.context.scene.node_tree.nodes:
if node.name == f'RSN {bpy.context.window.view_layer.name} Render Layers':
context_layer = node
if not context_layer:
context_layer = nt.nodes.new(type="CompositorNodeRLayers")
context_layer.name = f'RSN {bpy.context.window.view_layer.name} Render Layers'
try:
com = bpy.context.scene.node_tree.nodes['Composite']
nt.links.new(context_layer.outputs[0], com.inputs[0])
except Exception as e:
self.report({"ERROR"}, 'No Composite Node Found(Check its name must be "Composite") ')
def execute(self, context):
scn = context.scene
scn.use_nodes = True
nt = context.scene.node_tree
self.set_context_layer()
try:
render_layer_node = nt.nodes[f'RSN {self.view_layer} Render Layers']
except:
render_layer_node = nt.nodes.new(type="CompositorNodeRLayers")
render_layer_node.name = f'RSN {self.view_layer} Render Layers'
if self.view_layer != '':
render_layer_node.layer = self.view_layer
try:
nt.nodes.remove(nt.nodes[f'RSN {self.view_layer} Output'])
except Exception as e:
pass
if self.use_passes:
file_output_node = nt.nodes.new(type="CompositorNodeOutputFile")
file_output_node.name = f"RSN {self.view_layer} Output"
file_output_node.label = f"RSN {self.view_layer} Output"
file_output_node.base_path = os.path.join(context.scene.render.filepath, self.view_layer)
file_output_node.location = (400, -300)
file_output_node.width = 200
file_output_node.hide = True
nt = context.scene.node_tree
pref = get_pref()
separator = pref.node_file_path.file_path_separator
for i, output in enumerate(render_layer_node.outputs):
name = output.name
output_name = f"{self.view_layer}{separator}{name}{separator}"
if output_name not in file_output_node.file_slots:
file_output_node.file_slots.new(name=output_name)
nt.links.new(render_layer_node.outputs[name], file_output_node.inputs[output_name])
return {"FINISHED"}
def register():
bpy.utils.register_class(RSN_OT_CreatCompositorNode)
def unregister():
bpy.utils.unregister_class(RSN_OT_CreatCompositorNode)
| 35.047619
| 101
| 0.647758
|
import bpy
from bpy.props import BoolProperty, StringProperty
import os
from ..preferences import get_pref
class RSN_OT_CreatCompositorNode(bpy.types.Operator):
bl_idname = "rsn.creat_compositor_node"
bl_label = "Separate Passes"
use_passes: BoolProperty(default=False)
view_layer: StringProperty(default="")
def set_context_layer(self):
nt = bpy.context.scene.node_tree
context_layer = None
for node in bpy.context.scene.node_tree.nodes:
if node.name == f'RSN {bpy.context.window.view_layer.name} Render Layers':
context_layer = node
if not context_layer:
context_layer = nt.nodes.new(type="CompositorNodeRLayers")
context_layer.name = f'RSN {bpy.context.window.view_layer.name} Render Layers'
try:
com = bpy.context.scene.node_tree.nodes['Composite']
nt.links.new(context_layer.outputs[0], com.inputs[0])
except Exception as e:
self.report({"ERROR"}, 'No Composite Node Found(Check its name must be "Composite") ')
def execute(self, context):
scn = context.scene
scn.use_nodes = True
nt = context.scene.node_tree
self.set_context_layer()
try:
render_layer_node = nt.nodes[f'RSN {self.view_layer} Render Layers']
except:
render_layer_node = nt.nodes.new(type="CompositorNodeRLayers")
render_layer_node.name = f'RSN {self.view_layer} Render Layers'
if self.view_layer != '':
render_layer_node.layer = self.view_layer
try:
nt.nodes.remove(nt.nodes[f'RSN {self.view_layer} Output'])
except Exception as e:
pass
if self.use_passes:
file_output_node = nt.nodes.new(type="CompositorNodeOutputFile")
file_output_node.name = f"RSN {self.view_layer} Output"
file_output_node.label = f"RSN {self.view_layer} Output"
file_output_node.base_path = os.path.join(context.scene.render.filepath, self.view_layer)
file_output_node.location = (400, -300)
file_output_node.width = 200
file_output_node.hide = True
nt = context.scene.node_tree
pref = get_pref()
separator = pref.node_file_path.file_path_separator
for i, output in enumerate(render_layer_node.outputs):
name = output.name
output_name = f"{self.view_layer}{separator}{name}{separator}"
if output_name not in file_output_node.file_slots:
file_output_node.file_slots.new(name=output_name)
nt.links.new(render_layer_node.outputs[name], file_output_node.inputs[output_name])
return {"FINISHED"}
def register():
bpy.utils.register_class(RSN_OT_CreatCompositorNode)
def unregister():
bpy.utils.unregister_class(RSN_OT_CreatCompositorNode)
| true
| true
|
790a97a803f54aad4dd9e368e5882e838b11e592
| 1,141
|
py
|
Python
|
backend/permissions/roles/p30_normal_user.py
|
chalvern/Icarus
|
6b745f739d0c5ad218a97a05cd1e00669a5b2297
|
[
"Zlib"
] | null | null | null |
backend/permissions/roles/p30_normal_user.py
|
chalvern/Icarus
|
6b745f739d0c5ad218a97a05cd1e00669a5b2297
|
[
"Zlib"
] | null | null | null |
backend/permissions/roles/p30_normal_user.py
|
chalvern/Icarus
|
6b745f739d0c5ad218a97a05cd1e00669a5b2297
|
[
"Zlib"
] | null | null | null |
from permissions.roles.p10_visitor import merge_post_permissions_of_visitor
from permissions.roles.p20_inactive_user import inactive_user
from slim.base.permission import Ability, A, DataRecord
normal_user = Ability('user', {
'user': {
'nickname': (A.QUERY, A.READ),
'group': (A.READ,),
'biology': (A.QUERY, A.READ, A.WRITE),
'avatar': (A.QUERY, A.READ),
'type': (A.QUERY, A.READ, A.WRITE),
'url': (A.QUERY, A.READ, A.WRITE),
'location': (A.QUERY, A.READ, A.WRITE),
},
'topic': {
'title': (A.READ, A.CREATE, A.WRITE),
'board_id': (A.QUERY, A.READ, A.CREATE),
'content': (A.READ, A.CREATE, A.WRITE),
},
'comment': {
'related_id': (A.READ, A.CREATE,),
'related_type': (A.READ, A.CREATE,),
'reply_to_cmt_id': (A.READ, A.CREATE,),
'state': (A.READ, A.WRITE,),
'content': (A.READ, A.CREATE,),
},
'upload': merge_post_permissions_of_visitor({
'key': (A.READ, A.QUERY),
'size': (A.READ,),
'type_name': (A.READ, A.QUERY),
}),
'notif': A.ALL
}, based_on=inactive_user)
| 33.558824
| 75
| 0.560911
|
from permissions.roles.p10_visitor import merge_post_permissions_of_visitor
from permissions.roles.p20_inactive_user import inactive_user
from slim.base.permission import Ability, A, DataRecord
normal_user = Ability('user', {
'user': {
'nickname': (A.QUERY, A.READ),
'group': (A.READ,),
'biology': (A.QUERY, A.READ, A.WRITE),
'avatar': (A.QUERY, A.READ),
'type': (A.QUERY, A.READ, A.WRITE),
'url': (A.QUERY, A.READ, A.WRITE),
'location': (A.QUERY, A.READ, A.WRITE),
},
'topic': {
'title': (A.READ, A.CREATE, A.WRITE),
'board_id': (A.QUERY, A.READ, A.CREATE),
'content': (A.READ, A.CREATE, A.WRITE),
},
'comment': {
'related_id': (A.READ, A.CREATE,),
'related_type': (A.READ, A.CREATE,),
'reply_to_cmt_id': (A.READ, A.CREATE,),
'state': (A.READ, A.WRITE,),
'content': (A.READ, A.CREATE,),
},
'upload': merge_post_permissions_of_visitor({
'key': (A.READ, A.QUERY),
'size': (A.READ,),
'type_name': (A.READ, A.QUERY),
}),
'notif': A.ALL
}, based_on=inactive_user)
| true
| true
|
790a99e9b6fa5300ff649e42a1d71a3afc275e76
| 644
|
py
|
Python
|
jbank/migrations/0025_auto_20181101_1430.py
|
bachvtuan/django-jbank
|
1b384936d93b802d92442167efca292d2aaa2f47
|
[
"MIT"
] | null | null | null |
jbank/migrations/0025_auto_20181101_1430.py
|
bachvtuan/django-jbank
|
1b384936d93b802d92442167efca292d2aaa2f47
|
[
"MIT"
] | 2
|
2020-11-05T17:30:12.000Z
|
2021-02-24T23:54:35.000Z
|
jbank/migrations/0025_auto_20181101_1430.py
|
bachvtuan/django-jbank
|
1b384936d93b802d92442167efca292d2aaa2f47
|
[
"MIT"
] | 1
|
2021-12-16T09:27:04.000Z
|
2021-12-16T09:27:04.000Z
|
# Generated by Django 2.1.2 on 2018-11-01 14:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jbank", "0024_auto_20180425_1704"),
]
operations = [
migrations.AddField(
model_name="payout",
name="reference",
field=models.CharField(blank=True, default="", max_length=32, verbose_name="recipient reference"),
),
migrations.AlterField(
model_name="payout",
name="messages",
field=models.TextField(blank=True, default="", verbose_name="recipient messages"),
),
]
| 26.833333
| 110
| 0.604037
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jbank", "0024_auto_20180425_1704"),
]
operations = [
migrations.AddField(
model_name="payout",
name="reference",
field=models.CharField(blank=True, default="", max_length=32, verbose_name="recipient reference"),
),
migrations.AlterField(
model_name="payout",
name="messages",
field=models.TextField(blank=True, default="", verbose_name="recipient messages"),
),
]
| true
| true
|
790a9a8b489c409c977952f06eb57ba64fd7ab60
| 2,428
|
py
|
Python
|
config/config1_3_add_server_1.py
|
subincm/chain_replication
|
536522c011d2f0d31b6f0ce9c79d004723507aa8
|
[
"Apache-2.0"
] | null | null | null |
config/config1_3_add_server_1.py
|
subincm/chain_replication
|
536522c011d2f0d31b6f0ce9c79d004723507aa8
|
[
"Apache-2.0"
] | null | null | null |
config/config1_3_add_server_1.py
|
subincm/chain_replication
|
536522c011d2f0d31b6f0ce9c79d004723507aa8
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
# 1) This is a test case to verify that the deposit case works fine.
# 2) It also checks whether duplicate requests are processed correctly.
###############################################################################
####################################
# Client Settings
# The client configuration is a dictionary where each key is a list of
# all the clients of a particular bank. Each entry in the list is a key:value
# pair of all the configurations of that client
####################################
client_conf = { 'CITI':
[
{'index':0, 'account_no': 9999,'client_time_out': 8, 'num_retransmits':3, 'resend_to_new_head':1, 'msg_loss_freq':0},
],}
#The clients will issue the following requests in that order to the servers
client_seq = [('getBalance', ('UID1', 8888)),
('deposit', ('UID1', 8888, 100)),
('deposit', ('UID2', 8888, 100)),
('deposit', ('UID3', 8888, 100)),
('deposit', ('UID4', 8888, 100)),
('deposit', ('UID5', 8888, 100)),
('withdraw', ('UID6', 8888, 100)),
('withdraw', ('UID7', 8888, 100)),
('withdraw', ('UID8', 8888, 100)),
('withdraw', ('UID9', 8888, 100)),
('withdraw', ('UID10', 8888, 100)),
('getBalance', ('UID1', 8888))
]
#random(seed, numReq, probGetBalance, probDeposit, probWithdraw, probTransfer)
#client_prob_conf = [
#{'index':0, 'seed':450, 'numReq':10, 'prob':[('getBalance',0.10), ('deposit',0.5), ('withdraw',0.4), ('transfer',0)]}
#]
####################################
# Server Settings
# The server configuration is a dictionary where each key is a list of
# all the servers of a particular bank. Each entry in the list is a key:value
# pair of all the configurations of that server
####################################
server_conf = { 'CITI':
[
{'index':0, 'startup_delay': 0, 'rcv_lifetime':1000, 'snd_lifetime':1000, 'ip_addr': '127.0.0.1', 'port': 1001, 'heartbeat_interval':1},
{'index':1, 'startup_delay': 13, 'rcv_lifetime':1000, 'snd_lifetime':1000, 'ip_addr': '127.0.0.1', 'port': 1002, 'heartbeat_interval':1},
{'index':2, 'startup_delay': 0, 'rcv_lifetime':1000, 'snd_lifetime':1000, 'ip_addr': '127.0.0.1', 'port': 1003, 'heartbeat_interval':1}
],}
master_conf = { 'master_interval':5}
| 47.607843
| 141
| 0.536656
| true
| true
|
|
790a9b877b44a2d9ca1215d426930ff3cc456e34
| 15,454
|
py
|
Python
|
setup.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 58
|
2015-01-11T09:05:15.000Z
|
2022-03-17T23:44:07.000Z
|
setup.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 1,467
|
2015-01-01T16:47:44.000Z
|
2022-02-28T16:51:20.000Z
|
setup.py
|
OCHA-DAP/hdx-ckan
|
202e0c44adc4ea8d0b90141e69365b65cce68672
|
[
"Apache-2.0"
] | 17
|
2015-05-06T14:04:21.000Z
|
2021-11-11T19:58:16.000Z
|
# encoding: utf-8
import os
import os.path
from pkg_resources import parse_version
# Avoid problem releasing to pypi from vagrant
if os.environ.get('USER', '') == 'vagrant':
del os.link
try:
from setuptools import (setup, find_packages,
__version__ as setuptools_version)
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import (setup, find_packages,
__version__ as setuptools_version)
from ckan import (__version__, __description__, __long_description__,
__license__)
#
# Check setuptools version
#
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, 'requirement-setuptools.txt')) as f:
setuptools_requirement = f.read().strip()
min_setuptools_version = parse_version(setuptools_requirement.split('==')[1])
if parse_version(setuptools_version) < min_setuptools_version:
raise AssertionError(
'setuptools version error\n'
'You need a newer version of setuptools.\n'
'Install the recommended version:\n'
' pip install -r requirement-setuptools.txt\n'
'and then try again to install ckan into your python environment.'
)
entry_points = {
'paste.app_factory': [
'main = ckan.config.middleware:make_app',
],
'paste.app_install': [
'main = ckan.config.install:CKANInstaller',
],
'console_scripts': [
'ckan = ckan.cli.cli:ckan',
],
'ckan.click_command': [
'datastore = ckanext.datastore.cli:datastore',
'datapusher = ckanext.datapusher.cli:datapusher',
],
'paste.paster_create_template': [
'ckanext = ckan.pastertemplates:CkanextTemplate',
],
'ckan.forms': [
'standard = ckan.forms.package:get_standard_fieldset',
'package = ckan.forms.package:get_standard_fieldset',
'group = ckan.forms.group:get_group_fieldset',
'package_group = ckan.forms.group:get_package_group_fieldset',
],
'ckan.search': [
'sql = ckan.lib.search.sql:SqlSearchBackend',
'solr = ckan.lib.search.solr_backend:SolrSearchBackend',
],
'ckan.plugins': [
'synchronous_search = ckan.lib.search:SynchronousSearchPlugin',
'stats = ckanext.stats.plugin:StatsPlugin',
'publisher_form = ckanext.publisher_form.forms:PublisherForm',
'publisher_dataset_form = ckanext.publisher_form.forms:PublisherDatasetForm',
'multilingual_dataset = ckanext.multilingual.plugin:MultilingualDataset',
'multilingual_group = ckanext.multilingual.plugin:MultilingualGroup',
'multilingual_tag = ckanext.multilingual.plugin:MultilingualTag',
'multilingual_resource = ckanext.multilingual.plugin:MultilingualResource',
'organizations = ckanext.organizations.forms:OrganizationForm',
'organizations_dataset = ckanext.organizations.forms:OrganizationDatasetForm',
'expire_api_token = ckanext.expire_api_token.plugin:ExpireApiTokenPlugin',
'chained_functions = ckanext.chained_functions.plugin:ChainedFunctionsPlugin',
'datastore = ckanext.datastore.plugin:DatastorePlugin',
'datapusher=ckanext.datapusher.plugin:DatapusherPlugin',
'test_tag_vocab_plugin = ckanext.test_tag_vocab_plugin:MockVocabTagsPlugin',
'resource_proxy = ckanext.resourceproxy.plugin:ResourceProxy',
'text_view = ckanext.textview.plugin:TextView',
'recline_view = ckanext.reclineview.plugin:ReclineView',
'recline_grid_view = ckanext.reclineview.plugin:ReclineGridView',
'recline_graph_view = ckanext.reclineview.plugin:ReclineGraphView',
'recline_map_view = ckanext.reclineview.plugin:ReclineMapView',
'datatables_view = ckanext.datatablesview.plugin:DataTablesView',
'image_view = ckanext.imageview.plugin:ImageView',
'audio_view = ckanext.audioview.plugin:AudioView',
'video_view = ckanext.videoview.plugin:VideoView',
'webpage_view = ckanext.webpageview.plugin:WebPageView',
# FIXME: Remove deprecated resource previews below. You should use the
# versions as *_view instead.
'text_preview = ckanext.textview.plugin:TextView',
'recline_preview = ckanext.reclineview.plugin:ReclineView',
'recline_grid = ckanext.reclineview.plugin:ReclineGridView',
'recline_graph = ckanext.reclineview.plugin:ReclineGraphView',
'recline_map = ckanext.reclineview.plugin:ReclineMapView',
# End of deprecated previews
'example_itemplatehelpers = ckanext.example_itemplatehelpers.plugin:ExampleITemplateHelpersPlugin',
'example_idatasetform = ckanext.example_idatasetform.plugin:ExampleIDatasetFormPlugin',
'example_idatasetform_v1 = ckanext.example_idatasetform.plugin_v1:ExampleIDatasetFormPlugin',
'example_idatasetform_v2 = ckanext.example_idatasetform.plugin_v2:ExampleIDatasetFormPlugin',
'example_idatasetform_v3 = ckanext.example_idatasetform.plugin_v3:ExampleIDatasetFormPlugin',
'example_idatasetform_v4 = ckanext.example_idatasetform.plugin_v4:ExampleIDatasetFormPlugin',
'example_idatasetform_v5 = ckanext.example_idatasetform.plugin_v5:ExampleIDatasetFormPlugin',
'example_idatasetform_v6 = ckanext.example_idatasetform.plugin_v6:ExampleIDatasetFormPlugin',
'example_idatasetform_v7 = ckanext.example_idatasetform.plugin_v7:ExampleIDatasetFormPlugin',
'example_igroupform = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin',
'example_igroupform_v2 = ckanext.example_igroupform.plugin_v2:ExampleIGroupFormPlugin',
'example_igroupform_default_group_type = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin_DefaultGroupType',
'example_igroupform_organization = ckanext.example_igroupform.plugin:ExampleIGroupFormOrganizationPlugin',
'example_iauthfunctions_v1 = ckanext.example_iauthfunctions.plugin_v1:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v2 = ckanext.example_iauthfunctions.plugin_v2:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v3 = ckanext.example_iauthfunctions.plugin_v3:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v4 = ckanext.example_iauthfunctions.plugin_v4:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v5_custom_config_setting = ckanext.example_iauthfunctions.plugin_v5_custom_config_setting:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v6_parent_auth_functions = ckanext.example_iauthfunctions.plugin_v6_parent_auth_functions:ExampleIAuthFunctionsPlugin',
'example_theme_v01_empty_extension = ckanext.example_theme_docs.v01_empty_extension.plugin:ExampleThemePlugin',
'example_theme_v02_empty_template = ckanext.example_theme_docs.v02_empty_template.plugin:ExampleThemePlugin',
'example_theme_v03_jinja = ckanext.example_theme_docs.v03_jinja.plugin:ExampleThemePlugin',
'example_theme_v04_ckan_extends = ckanext.example_theme_docs.v04_ckan_extends.plugin:ExampleThemePlugin',
'example_theme_v05_block = ckanext.example_theme_docs.v05_block.plugin:ExampleThemePlugin',
'example_theme_v06_super = ckanext.example_theme_docs.v06_super.plugin:ExampleThemePlugin',
'example_theme_v07_helper_function = ckanext.example_theme_docs.v07_helper_function.plugin:ExampleThemePlugin',
'example_theme_v08_custom_helper_function = ckanext.example_theme_docs.v08_custom_helper_function.plugin:ExampleThemePlugin',
'example_theme_v09_snippet = ckanext.example_theme_docs.v09_snippet.plugin:ExampleThemePlugin',
'example_theme_v10_custom_snippet = ckanext.example_theme_docs.v10_custom_snippet.plugin:ExampleThemePlugin',
'example_theme_v11_HTML_and_CSS = ckanext.example_theme_docs.v11_HTML_and_CSS.plugin:ExampleThemePlugin',
'example_theme_v12_extra_public_dir = ckanext.example_theme_docs.v12_extra_public_dir.plugin:ExampleThemePlugin',
'example_theme_v13_custom_css = ckanext.example_theme_docs.v13_custom_css.plugin:ExampleThemePlugin',
'example_theme_v14_more_custom_css = ckanext.example_theme_docs.v14_more_custom_css.plugin:ExampleThemePlugin',
'example_theme_v15_fanstatic = ckanext.example_theme_docs.v15_fanstatic.plugin:ExampleThemePlugin',
'example_theme_v16_initialize_a_javascript_module = ckanext.example_theme_docs.v16_initialize_a_javascript_module.plugin:ExampleThemePlugin',
'example_theme_v17_popover = ckanext.example_theme_docs.v17_popover.plugin:ExampleThemePlugin',
'example_theme_v18_snippet_api = ckanext.example_theme_docs.v18_snippet_api.plugin:ExampleThemePlugin',
'example_theme_v19_01_error = ckanext.example_theme_docs.v19_01_error.plugin:ExampleThemePlugin',
'example_theme_v19_02_error_handling = ckanext.example_theme_docs.v19_02_error_handling.plugin:ExampleThemePlugin',
'example_theme_v20_pubsub = ckanext.example_theme_docs.v20_pubsub.plugin:ExampleThemePlugin',
'example_theme_v21_custom_jquery_plugin = ckanext.example_theme_docs.v21_custom_jquery_plugin.plugin:ExampleThemePlugin',
'example_theme_v22_fanstatic_and_webassets = ckanext.example_theme_docs.v22_fanstatic_and_webassets.plugin:ExampleThemePlugin',
'example_theme_custom_config_setting = ckanext.example_theme_docs.custom_config_setting.plugin:ExampleThemePlugin',
'example_theme_custom_emails = ckanext.example_theme_docs.custom_emails.plugin:ExampleCustomEmailsPlugin',
'example_iresourcecontroller = ckanext.example_iresourcecontroller.plugin:ExampleIResourceControllerPlugin',
'example_ivalidators = ckanext.example_ivalidators.plugin:ExampleIValidatorsPlugin',
'example_iconfigurer = ckanext.example_iconfigurer.plugin:ExampleIConfigurerPlugin',
'example_itranslation = ckanext.example_itranslation.plugin:ExampleITranslationPlugin',
'example_iconfigurer_v1 = ckanext.example_iconfigurer.plugin_v1:ExampleIConfigurerPlugin',
'example_iconfigurer_v2 = ckanext.example_iconfigurer.plugin_v2:ExampleIConfigurerPlugin',
'example_flask_iblueprint = ckanext.example_flask_iblueprint.plugin:ExampleFlaskIBlueprintPlugin',
'example_flask_streaming = ckanext.example_flask_streaming.plugin:ExampleFlaskStreamingPlugin',
'example_iuploader = ckanext.example_iuploader.plugin:ExampleIUploader',
'example_idatastorebackend = ckanext.example_idatastorebackend.plugin:ExampleIDatastoreBackendPlugin',
'example_ipermissionlabels = ckanext.example_ipermissionlabels.plugin:ExampleIPermissionLabelsPlugin',
'example_iapitoken = ckanext.example_iapitoken.plugin:ExampleIApiTokenPlugin',
'example_iclick = ckanext.example_iclick.plugin:ExampleIClickPlugin',
'example_iauthenticator = ckanext.example_iauthenticator.plugin:ExampleIAuthenticatorPlugin',
],
'ckan.system_plugins': [
'domain_object_mods = ckan.model.modification:DomainObjectModificationExtension',
],
'ckan.test_plugins': [
'routes_plugin = tests.legacy.ckantestplugins:RoutesPlugin',
'mapper_plugin = tests.legacy.ckantestplugins:MapperPlugin',
'session_plugin = tests.legacy.ckantestplugins:SessionPlugin',
'mapper_plugin2 = tests.legacy.ckantestplugins:MapperPlugin2',
'authorizer_plugin = tests.legacy.ckantestplugins:AuthorizerPlugin',
'test_observer_plugin = tests.legacy.ckantestplugins:PluginObserverPlugin',
'action_plugin = tests.legacy.ckantestplugins:ActionPlugin',
'auth_plugin = tests.legacy.ckantestplugins:AuthPlugin',
'test_group_plugin = tests.legacy.ckantestplugins:MockGroupControllerPlugin',
'test_package_controller_plugin = tests.legacy.ckantestplugins:MockPackageControllerPlugin',
'test_resource_preview = tests.legacy.ckantestplugins:MockResourcePreviewExtension',
'test_json_resource_preview = tests.legacy.ckantestplugins:JsonMockResourcePreviewExtension',
'sample_datastore_plugin = ckanext.datastore.tests.sample_datastore_plugin:SampleDataStorePlugin',
'example_datastore_deleted_with_count_plugin = ckanext.datastore.tests.test_chained_action:ExampleDataStoreDeletedWithCountPlugin',
'example_data_store_search_sql_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleDataStoreSearchSQLPlugin',
'example_external_provider_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleExternalProviderPlugin',
'test_datastore_view = ckan.tests.lib.test_datapreview:MockDatastoreBasedResourceView',
'test_datapusher_plugin = ckanext.datapusher.tests.test_interfaces:FakeDataPusherPlugin',
'test_routing_plugin = ckan.tests.config.test_middleware:MockRoutingPlugin',
'test_flash_plugin = ckan.tests.config.test_sessions:FlashMessagePlugin',
'test_helpers_plugin = ckan.tests.lib.test_helpers:TestHelpersPlugin',
'test_feed_plugin = ckan.tests.controllers.test_feed:MockFeedPlugin',
'test_js_translations_plugin = ckan.tests.lib.test_i18n:TestJSTranslationsPlugin',
'legacy_mock_search_plugin = ckan.tests.legacy.logic.test_action:MockPackageSearchPlugin',
],
'babel.extractors': [
'ckan = ckan.lib.extract:extract_ckan',
],
}
extras_require = {}
_extras_groups = [
('requirements', 'requirements.txt'), ('requirements-py2', 'requirements-py2.txt'),
('setuptools', 'requirement-setuptools.txt'), ('dev', 'dev-requirements.txt'),
]
for group, filepath in _extras_groups:
with open(os.path.join(HERE, filepath), 'r') as f:
extras_require[group] = f.readlines()
setup(
name='ckan',
version=__version__,
author='https://github.com/ckan/ckan/graphs/contributors',
author_email='info@ckan.org',
license=__license__,
url='http://ckan.org/',
description=__description__,
keywords='data packaging component tool server',
long_description=__long_description__,
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup']),
# namespace_packages=['ckanext', 'ckanext.stats'],
message_extractors={
'ckan': [
('templates/importer/**', 'ignore', None),
('templates/**.html', 'ckan', None),
('templates/**.txt', 'ckan', None),
('templates_legacy/**.html', 'ckan', None),
('public/base/test/**', 'ignore', None),
('**.py', 'python', None),
('**.js', 'javascript', None),
],
'ckanext': [
('**.py', 'python', None),
('**.js', 'javascript', None),
('**.html', 'ckan', None),
('multilingual/solr/*.txt', 'ignore', None),
]
},
entry_points=entry_points,
# setup.py test command needs a TestSuite so does not work with py.test
# tests_require=[ 'py >= 0.8.0-alpha2' ]
extras_require=extras_require,
classifiers=[
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| 60.603922
| 151
| 0.752103
|
import os
import os.path
from pkg_resources import parse_version
if os.environ.get('USER', '') == 'vagrant':
del os.link
try:
from setuptools import (setup, find_packages,
__version__ as setuptools_version)
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import (setup, find_packages,
__version__ as setuptools_version)
from ckan import (__version__, __description__, __long_description__,
__license__)
HERE = os.path.dirname(__file__)
with open(os.path.join(HERE, 'requirement-setuptools.txt')) as f:
setuptools_requirement = f.read().strip()
min_setuptools_version = parse_version(setuptools_requirement.split('==')[1])
if parse_version(setuptools_version) < min_setuptools_version:
raise AssertionError(
'setuptools version error\n'
'You need a newer version of setuptools.\n'
'Install the recommended version:\n'
' pip install -r requirement-setuptools.txt\n'
'and then try again to install ckan into your python environment.'
)
entry_points = {
'paste.app_factory': [
'main = ckan.config.middleware:make_app',
],
'paste.app_install': [
'main = ckan.config.install:CKANInstaller',
],
'console_scripts': [
'ckan = ckan.cli.cli:ckan',
],
'ckan.click_command': [
'datastore = ckanext.datastore.cli:datastore',
'datapusher = ckanext.datapusher.cli:datapusher',
],
'paste.paster_create_template': [
'ckanext = ckan.pastertemplates:CkanextTemplate',
],
'ckan.forms': [
'standard = ckan.forms.package:get_standard_fieldset',
'package = ckan.forms.package:get_standard_fieldset',
'group = ckan.forms.group:get_group_fieldset',
'package_group = ckan.forms.group:get_package_group_fieldset',
],
'ckan.search': [
'sql = ckan.lib.search.sql:SqlSearchBackend',
'solr = ckan.lib.search.solr_backend:SolrSearchBackend',
],
'ckan.plugins': [
'synchronous_search = ckan.lib.search:SynchronousSearchPlugin',
'stats = ckanext.stats.plugin:StatsPlugin',
'publisher_form = ckanext.publisher_form.forms:PublisherForm',
'publisher_dataset_form = ckanext.publisher_form.forms:PublisherDatasetForm',
'multilingual_dataset = ckanext.multilingual.plugin:MultilingualDataset',
'multilingual_group = ckanext.multilingual.plugin:MultilingualGroup',
'multilingual_tag = ckanext.multilingual.plugin:MultilingualTag',
'multilingual_resource = ckanext.multilingual.plugin:MultilingualResource',
'organizations = ckanext.organizations.forms:OrganizationForm',
'organizations_dataset = ckanext.organizations.forms:OrganizationDatasetForm',
'expire_api_token = ckanext.expire_api_token.plugin:ExpireApiTokenPlugin',
'chained_functions = ckanext.chained_functions.plugin:ChainedFunctionsPlugin',
'datastore = ckanext.datastore.plugin:DatastorePlugin',
'datapusher=ckanext.datapusher.plugin:DatapusherPlugin',
'test_tag_vocab_plugin = ckanext.test_tag_vocab_plugin:MockVocabTagsPlugin',
'resource_proxy = ckanext.resourceproxy.plugin:ResourceProxy',
'text_view = ckanext.textview.plugin:TextView',
'recline_view = ckanext.reclineview.plugin:ReclineView',
'recline_grid_view = ckanext.reclineview.plugin:ReclineGridView',
'recline_graph_view = ckanext.reclineview.plugin:ReclineGraphView',
'recline_map_view = ckanext.reclineview.plugin:ReclineMapView',
'datatables_view = ckanext.datatablesview.plugin:DataTablesView',
'image_view = ckanext.imageview.plugin:ImageView',
'audio_view = ckanext.audioview.plugin:AudioView',
'video_view = ckanext.videoview.plugin:VideoView',
'webpage_view = ckanext.webpageview.plugin:WebPageView',
'text_preview = ckanext.textview.plugin:TextView',
'recline_preview = ckanext.reclineview.plugin:ReclineView',
'recline_grid = ckanext.reclineview.plugin:ReclineGridView',
'recline_graph = ckanext.reclineview.plugin:ReclineGraphView',
'recline_map = ckanext.reclineview.plugin:ReclineMapView',
'example_itemplatehelpers = ckanext.example_itemplatehelpers.plugin:ExampleITemplateHelpersPlugin',
'example_idatasetform = ckanext.example_idatasetform.plugin:ExampleIDatasetFormPlugin',
'example_idatasetform_v1 = ckanext.example_idatasetform.plugin_v1:ExampleIDatasetFormPlugin',
'example_idatasetform_v2 = ckanext.example_idatasetform.plugin_v2:ExampleIDatasetFormPlugin',
'example_idatasetform_v3 = ckanext.example_idatasetform.plugin_v3:ExampleIDatasetFormPlugin',
'example_idatasetform_v4 = ckanext.example_idatasetform.plugin_v4:ExampleIDatasetFormPlugin',
'example_idatasetform_v5 = ckanext.example_idatasetform.plugin_v5:ExampleIDatasetFormPlugin',
'example_idatasetform_v6 = ckanext.example_idatasetform.plugin_v6:ExampleIDatasetFormPlugin',
'example_idatasetform_v7 = ckanext.example_idatasetform.plugin_v7:ExampleIDatasetFormPlugin',
'example_igroupform = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin',
'example_igroupform_v2 = ckanext.example_igroupform.plugin_v2:ExampleIGroupFormPlugin',
'example_igroupform_default_group_type = ckanext.example_igroupform.plugin:ExampleIGroupFormPlugin_DefaultGroupType',
'example_igroupform_organization = ckanext.example_igroupform.plugin:ExampleIGroupFormOrganizationPlugin',
'example_iauthfunctions_v1 = ckanext.example_iauthfunctions.plugin_v1:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v2 = ckanext.example_iauthfunctions.plugin_v2:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v3 = ckanext.example_iauthfunctions.plugin_v3:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v4 = ckanext.example_iauthfunctions.plugin_v4:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v5_custom_config_setting = ckanext.example_iauthfunctions.plugin_v5_custom_config_setting:ExampleIAuthFunctionsPlugin',
'example_iauthfunctions_v6_parent_auth_functions = ckanext.example_iauthfunctions.plugin_v6_parent_auth_functions:ExampleIAuthFunctionsPlugin',
'example_theme_v01_empty_extension = ckanext.example_theme_docs.v01_empty_extension.plugin:ExampleThemePlugin',
'example_theme_v02_empty_template = ckanext.example_theme_docs.v02_empty_template.plugin:ExampleThemePlugin',
'example_theme_v03_jinja = ckanext.example_theme_docs.v03_jinja.plugin:ExampleThemePlugin',
'example_theme_v04_ckan_extends = ckanext.example_theme_docs.v04_ckan_extends.plugin:ExampleThemePlugin',
'example_theme_v05_block = ckanext.example_theme_docs.v05_block.plugin:ExampleThemePlugin',
'example_theme_v06_super = ckanext.example_theme_docs.v06_super.plugin:ExampleThemePlugin',
'example_theme_v07_helper_function = ckanext.example_theme_docs.v07_helper_function.plugin:ExampleThemePlugin',
'example_theme_v08_custom_helper_function = ckanext.example_theme_docs.v08_custom_helper_function.plugin:ExampleThemePlugin',
'example_theme_v09_snippet = ckanext.example_theme_docs.v09_snippet.plugin:ExampleThemePlugin',
'example_theme_v10_custom_snippet = ckanext.example_theme_docs.v10_custom_snippet.plugin:ExampleThemePlugin',
'example_theme_v11_HTML_and_CSS = ckanext.example_theme_docs.v11_HTML_and_CSS.plugin:ExampleThemePlugin',
'example_theme_v12_extra_public_dir = ckanext.example_theme_docs.v12_extra_public_dir.plugin:ExampleThemePlugin',
'example_theme_v13_custom_css = ckanext.example_theme_docs.v13_custom_css.plugin:ExampleThemePlugin',
'example_theme_v14_more_custom_css = ckanext.example_theme_docs.v14_more_custom_css.plugin:ExampleThemePlugin',
'example_theme_v15_fanstatic = ckanext.example_theme_docs.v15_fanstatic.plugin:ExampleThemePlugin',
'example_theme_v16_initialize_a_javascript_module = ckanext.example_theme_docs.v16_initialize_a_javascript_module.plugin:ExampleThemePlugin',
'example_theme_v17_popover = ckanext.example_theme_docs.v17_popover.plugin:ExampleThemePlugin',
'example_theme_v18_snippet_api = ckanext.example_theme_docs.v18_snippet_api.plugin:ExampleThemePlugin',
'example_theme_v19_01_error = ckanext.example_theme_docs.v19_01_error.plugin:ExampleThemePlugin',
'example_theme_v19_02_error_handling = ckanext.example_theme_docs.v19_02_error_handling.plugin:ExampleThemePlugin',
'example_theme_v20_pubsub = ckanext.example_theme_docs.v20_pubsub.plugin:ExampleThemePlugin',
'example_theme_v21_custom_jquery_plugin = ckanext.example_theme_docs.v21_custom_jquery_plugin.plugin:ExampleThemePlugin',
'example_theme_v22_fanstatic_and_webassets = ckanext.example_theme_docs.v22_fanstatic_and_webassets.plugin:ExampleThemePlugin',
'example_theme_custom_config_setting = ckanext.example_theme_docs.custom_config_setting.plugin:ExampleThemePlugin',
'example_theme_custom_emails = ckanext.example_theme_docs.custom_emails.plugin:ExampleCustomEmailsPlugin',
'example_iresourcecontroller = ckanext.example_iresourcecontroller.plugin:ExampleIResourceControllerPlugin',
'example_ivalidators = ckanext.example_ivalidators.plugin:ExampleIValidatorsPlugin',
'example_iconfigurer = ckanext.example_iconfigurer.plugin:ExampleIConfigurerPlugin',
'example_itranslation = ckanext.example_itranslation.plugin:ExampleITranslationPlugin',
'example_iconfigurer_v1 = ckanext.example_iconfigurer.plugin_v1:ExampleIConfigurerPlugin',
'example_iconfigurer_v2 = ckanext.example_iconfigurer.plugin_v2:ExampleIConfigurerPlugin',
'example_flask_iblueprint = ckanext.example_flask_iblueprint.plugin:ExampleFlaskIBlueprintPlugin',
'example_flask_streaming = ckanext.example_flask_streaming.plugin:ExampleFlaskStreamingPlugin',
'example_iuploader = ckanext.example_iuploader.plugin:ExampleIUploader',
'example_idatastorebackend = ckanext.example_idatastorebackend.plugin:ExampleIDatastoreBackendPlugin',
'example_ipermissionlabels = ckanext.example_ipermissionlabels.plugin:ExampleIPermissionLabelsPlugin',
'example_iapitoken = ckanext.example_iapitoken.plugin:ExampleIApiTokenPlugin',
'example_iclick = ckanext.example_iclick.plugin:ExampleIClickPlugin',
'example_iauthenticator = ckanext.example_iauthenticator.plugin:ExampleIAuthenticatorPlugin',
],
'ckan.system_plugins': [
'domain_object_mods = ckan.model.modification:DomainObjectModificationExtension',
],
'ckan.test_plugins': [
'routes_plugin = tests.legacy.ckantestplugins:RoutesPlugin',
'mapper_plugin = tests.legacy.ckantestplugins:MapperPlugin',
'session_plugin = tests.legacy.ckantestplugins:SessionPlugin',
'mapper_plugin2 = tests.legacy.ckantestplugins:MapperPlugin2',
'authorizer_plugin = tests.legacy.ckantestplugins:AuthorizerPlugin',
'test_observer_plugin = tests.legacy.ckantestplugins:PluginObserverPlugin',
'action_plugin = tests.legacy.ckantestplugins:ActionPlugin',
'auth_plugin = tests.legacy.ckantestplugins:AuthPlugin',
'test_group_plugin = tests.legacy.ckantestplugins:MockGroupControllerPlugin',
'test_package_controller_plugin = tests.legacy.ckantestplugins:MockPackageControllerPlugin',
'test_resource_preview = tests.legacy.ckantestplugins:MockResourcePreviewExtension',
'test_json_resource_preview = tests.legacy.ckantestplugins:JsonMockResourcePreviewExtension',
'sample_datastore_plugin = ckanext.datastore.tests.sample_datastore_plugin:SampleDataStorePlugin',
'example_datastore_deleted_with_count_plugin = ckanext.datastore.tests.test_chained_action:ExampleDataStoreDeletedWithCountPlugin',
'example_data_store_search_sql_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleDataStoreSearchSQLPlugin',
'example_external_provider_plugin = ckanext.datastore.tests.test_chained_auth_functions:ExampleExternalProviderPlugin',
'test_datastore_view = ckan.tests.lib.test_datapreview:MockDatastoreBasedResourceView',
'test_datapusher_plugin = ckanext.datapusher.tests.test_interfaces:FakeDataPusherPlugin',
'test_routing_plugin = ckan.tests.config.test_middleware:MockRoutingPlugin',
'test_flash_plugin = ckan.tests.config.test_sessions:FlashMessagePlugin',
'test_helpers_plugin = ckan.tests.lib.test_helpers:TestHelpersPlugin',
'test_feed_plugin = ckan.tests.controllers.test_feed:MockFeedPlugin',
'test_js_translations_plugin = ckan.tests.lib.test_i18n:TestJSTranslationsPlugin',
'legacy_mock_search_plugin = ckan.tests.legacy.logic.test_action:MockPackageSearchPlugin',
],
'babel.extractors': [
'ckan = ckan.lib.extract:extract_ckan',
],
}
extras_require = {}
_extras_groups = [
('requirements', 'requirements.txt'), ('requirements-py2', 'requirements-py2.txt'),
('setuptools', 'requirement-setuptools.txt'), ('dev', 'dev-requirements.txt'),
]
for group, filepath in _extras_groups:
with open(os.path.join(HERE, filepath), 'r') as f:
extras_require[group] = f.readlines()
setup(
name='ckan',
version=__version__,
author='https://github.com/ckan/ckan/graphs/contributors',
author_email='info@ckan.org',
license=__license__,
url='http://ckan.org/',
description=__description__,
keywords='data packaging component tool server',
long_description=__long_description__,
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup']),
message_extractors={
'ckan': [
('templates/importer/**', 'ignore', None),
('templates/**.html', 'ckan', None),
('templates/**.txt', 'ckan', None),
('templates_legacy/**.html', 'ckan', None),
('public/base/test/**', 'ignore', None),
('**.py', 'python', None),
('**.js', 'javascript', None),
],
'ckanext': [
('**.py', 'python', None),
('**.js', 'javascript', None),
('**.html', 'ckan', None),
('multilingual/solr/*.txt', 'ignore', None),
]
},
entry_points=entry_points,
extras_require=extras_require,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| true
| true
|
790a9bc1921b8abaa9ff05bbcbd5651e42f1573d
| 4,952
|
py
|
Python
|
tqsdk/utils.py
|
aManOf502/tqsdk-python
|
70d0e30ad4ceda04f08a8125d9c45d4096542f4c
|
[
"Apache-2.0"
] | 1
|
2021-12-15T11:30:40.000Z
|
2021-12-15T11:30:40.000Z
|
tqsdk/utils.py
|
YountMan/tqsdk-python
|
b766b45bb82c89a0401a6a84e0e42600fa10e6f4
|
[
"Apache-2.0"
] | null | null | null |
tqsdk/utils.py
|
YountMan/tqsdk-python
|
b766b45bb82c89a0401a6a84e0e42600fa10e6f4
|
[
"Apache-2.0"
] | null | null | null |
#!usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'yanqiong'
import random
import secrets
from bisect import bisect_right
from sgqlc.operation import Operation
from pandas.core.internals import BlockManager
from tqsdk.ins_schema import ins_schema, _add_all_frags
RD = random.Random(secrets.randbits(128)) # 初始化随机数引擎,使用随机数作为seed,防止用户同时拉起多个策略,产生同样的 seed
def _generate_uuid(prefix=''):
return f"{prefix + '_' if prefix else ''}{RD.getrandbits(128):032x}"
def _query_for_quote(symbol):
"""
返回请求某个合约的合约信息的 query_pack
调用次函数应该全部都是sdk的代码主动请求合约信息
用户请求合约信息一定是 PYSDK_api 开头的请求,因为用户请求的合约信息在回测时带有 timestamp 参数,是不应该调用此函数的
"""
symbol_list = symbol if isinstance(symbol, list) else [symbol]
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(instrument_id=symbol_list)
_add_all_frags(query)
return {
"aid": "ins_query",
"query_id": _generate_uuid(prefix='PYSDK_quote_'),
"query": op.__to_graphql__()
}
def _query_for_init():
"""
返回某些类型合约的 query
todo: 为了兼容旧版提供给用户的 api._data["quote"].items() 类似用法,应该限制交易所 ["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"]
"""
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(class_=["FUTURE", "INDEX", "OPTION", "COMBINE", "CONT"],
exchange_id=["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"])
_add_all_frags(query)
return op.__to_graphql__()
night_trading_table = {
"DCE.a": ["21:00:00", "23:00:00"],
"DCE.b": ["21:00:00", "23:00:00"],
"DCE.c": ["21:00:00", "23:00:00"],
"DCE.cs": ["21:00:00", "23:00:00"],
"DCE.m": ["21:00:00", "23:00:00"],
"DCE.y": ["21:00:00", "23:00:00"],
"DCE.p": ["21:00:00", "23:00:00"],
"DCE.l": ["21:00:00", "23:00:00"],
"DCE.v": ["21:00:00", "23:00:00"],
"DCE.pp": ["21:00:00", "23:00:00"],
"DCE.j": ["21:00:00", "23:00:00"],
"DCE.jm": ["21:00:00", "23:00:00"],
"DCE.i": ["21:00:00", "23:00:00"],
"DCE.eg": ["21:00:00", "23:00:00"],
"DCE.eb": ["21:00:00", "23:00:00"],
"DCE.rr": ["21:00:00", "23:00:00"],
"DCE.pg": ["21:00:00", "23:00:00"],
"CZCE.CF": ["21:00:00", "23:00:00"],
"CZCE.CY": ["21:00:00", "23:00:00"],
"CZCE.SA": ["21:00:00", "23:00:00"],
"CZCE.SR": ["21:00:00", "23:00:00"],
"CZCE.TA": ["21:00:00", "23:00:00"],
"CZCE.OI": ["21:00:00", "23:00:00"],
"CZCE.MA": ["21:00:00", "23:00:00"],
"CZCE.FG": ["21:00:00", "23:00:00"],
"CZCE.RM": ["21:00:00", "23:00:00"],
"CZCE.ZC": ["21:00:00", "23:00:00"],
"CZCE.TC": ["21:00:00", "23:00:00"],
"SHFE.rb": ["21:00:00", "23:00:00"],
"SHFE.hc": ["21:00:00", "23:00:00"],
"SHFE.fu": ["21:00:00", "23:00:00"],
"SHFE.bu": ["21:00:00", "23:00:00"],
"SHFE.ru": ["21:00:00", "23:00:00"],
"SHFE.sp": ["21:00:00", "23:00:00"],
"INE.nr": ["21:00:00", "23:00:00"],
"SHFE.cu": ["21:00:00", "25:00:00"],
"SHFE.al": ["21:00:00", "25:00:00"],
"SHFE.zn": ["21:00:00", "25:00:00"],
"SHFE.pb": ["21:00:00", "25:00:00"],
"SHFE.ni": ["21:00:00", "25:00:00"],
"SHFE.sn": ["21:00:00", "25:00:00"],
"SHFE.ss": ["21:00:00", "25:00:00"],
"SHFE.au": ["21:00:00", "26:30:00"],
"SHFE.ag": ["21:00:00", "26:30:00"],
"INE.sc": ["21:00:00", "26:30:00"],
}
def _quotes_add_night(quotes):
"""为 quotes 中应该有夜盘但是市价合约文件中没有夜盘的品种,添加夜盘时间"""
for symbol in quotes:
product_id = quotes[symbol].get("product_id")
if quotes[symbol].get("trading_time") and product_id:
key = f"{quotes[symbol].get('exchange_id')}.{product_id}"
if key in night_trading_table and (not quotes[symbol]["trading_time"].get("night")):
quotes[symbol]["trading_time"]["night"] = [night_trading_table[key]]
def _bisect_value(a, x, priority="right"):
"""
返回 bisect_right() 取得下标对应的值,当插入点距离前后元素距离相等,priority 表示优先返回右边的值还是左边的值
a: 必须是已经排序好(升序排列)的 list
bisect_right : Return the index where to insert item x in list a, assuming a is sorted.
"""
assert priority in ['left', 'right']
insert_index = bisect_right(a, x)
if 0 < insert_index < len(a):
left_dis = x - a[insert_index - 1]
right_dis = a[insert_index] - x
if left_dis == right_dis:
mid_index = insert_index - 1 if priority == "left" else insert_index
elif left_dis < right_dis:
mid_index = insert_index - 1
else:
mid_index = insert_index
else:
assert insert_index == 0 or insert_index == len(a)
mid_index = 0 if insert_index == 0 else (len(a) - 1)
return a[mid_index]
class BlockManagerUnconsolidated(BlockManager):
"""mock BlockManager for unconsolidated, 不会因为自动合并同类型的 blocks 而导致 k 线数据不更新"""
def __init__(self, *args, **kwargs):
BlockManager.__init__(self, *args, **kwargs)
self._is_consolidated = False
self._known_consolidated = False
def _consolidate_inplace(self): pass
| 35.120567
| 108
| 0.577948
|
__author__ = 'yanqiong'
import random
import secrets
from bisect import bisect_right
from sgqlc.operation import Operation
from pandas.core.internals import BlockManager
from tqsdk.ins_schema import ins_schema, _add_all_frags
RD = random.Random(secrets.randbits(128))
def _generate_uuid(prefix=''):
return f"{prefix + '_' if prefix else ''}{RD.getrandbits(128):032x}"
def _query_for_quote(symbol):
symbol_list = symbol if isinstance(symbol, list) else [symbol]
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(instrument_id=symbol_list)
_add_all_frags(query)
return {
"aid": "ins_query",
"query_id": _generate_uuid(prefix='PYSDK_quote_'),
"query": op.__to_graphql__()
}
def _query_for_init():
op = Operation(ins_schema.rootQuery)
query = op.multi_symbol_info(class_=["FUTURE", "INDEX", "OPTION", "COMBINE", "CONT"],
exchange_id=["SHFE", "DCE", "CZCE", "INE", "CFFEX", "KQ"])
_add_all_frags(query)
return op.__to_graphql__()
night_trading_table = {
"DCE.a": ["21:00:00", "23:00:00"],
"DCE.b": ["21:00:00", "23:00:00"],
"DCE.c": ["21:00:00", "23:00:00"],
"DCE.cs": ["21:00:00", "23:00:00"],
"DCE.m": ["21:00:00", "23:00:00"],
"DCE.y": ["21:00:00", "23:00:00"],
"DCE.p": ["21:00:00", "23:00:00"],
"DCE.l": ["21:00:00", "23:00:00"],
"DCE.v": ["21:00:00", "23:00:00"],
"DCE.pp": ["21:00:00", "23:00:00"],
"DCE.j": ["21:00:00", "23:00:00"],
"DCE.jm": ["21:00:00", "23:00:00"],
"DCE.i": ["21:00:00", "23:00:00"],
"DCE.eg": ["21:00:00", "23:00:00"],
"DCE.eb": ["21:00:00", "23:00:00"],
"DCE.rr": ["21:00:00", "23:00:00"],
"DCE.pg": ["21:00:00", "23:00:00"],
"CZCE.CF": ["21:00:00", "23:00:00"],
"CZCE.CY": ["21:00:00", "23:00:00"],
"CZCE.SA": ["21:00:00", "23:00:00"],
"CZCE.SR": ["21:00:00", "23:00:00"],
"CZCE.TA": ["21:00:00", "23:00:00"],
"CZCE.OI": ["21:00:00", "23:00:00"],
"CZCE.MA": ["21:00:00", "23:00:00"],
"CZCE.FG": ["21:00:00", "23:00:00"],
"CZCE.RM": ["21:00:00", "23:00:00"],
"CZCE.ZC": ["21:00:00", "23:00:00"],
"CZCE.TC": ["21:00:00", "23:00:00"],
"SHFE.rb": ["21:00:00", "23:00:00"],
"SHFE.hc": ["21:00:00", "23:00:00"],
"SHFE.fu": ["21:00:00", "23:00:00"],
"SHFE.bu": ["21:00:00", "23:00:00"],
"SHFE.ru": ["21:00:00", "23:00:00"],
"SHFE.sp": ["21:00:00", "23:00:00"],
"INE.nr": ["21:00:00", "23:00:00"],
"SHFE.cu": ["21:00:00", "25:00:00"],
"SHFE.al": ["21:00:00", "25:00:00"],
"SHFE.zn": ["21:00:00", "25:00:00"],
"SHFE.pb": ["21:00:00", "25:00:00"],
"SHFE.ni": ["21:00:00", "25:00:00"],
"SHFE.sn": ["21:00:00", "25:00:00"],
"SHFE.ss": ["21:00:00", "25:00:00"],
"SHFE.au": ["21:00:00", "26:30:00"],
"SHFE.ag": ["21:00:00", "26:30:00"],
"INE.sc": ["21:00:00", "26:30:00"],
}
def _quotes_add_night(quotes):
for symbol in quotes:
product_id = quotes[symbol].get("product_id")
if quotes[symbol].get("trading_time") and product_id:
key = f"{quotes[symbol].get('exchange_id')}.{product_id}"
if key in night_trading_table and (not quotes[symbol]["trading_time"].get("night")):
quotes[symbol]["trading_time"]["night"] = [night_trading_table[key]]
def _bisect_value(a, x, priority="right"):
assert priority in ['left', 'right']
insert_index = bisect_right(a, x)
if 0 < insert_index < len(a):
left_dis = x - a[insert_index - 1]
right_dis = a[insert_index] - x
if left_dis == right_dis:
mid_index = insert_index - 1 if priority == "left" else insert_index
elif left_dis < right_dis:
mid_index = insert_index - 1
else:
mid_index = insert_index
else:
assert insert_index == 0 or insert_index == len(a)
mid_index = 0 if insert_index == 0 else (len(a) - 1)
return a[mid_index]
class BlockManagerUnconsolidated(BlockManager):
def __init__(self, *args, **kwargs):
BlockManager.__init__(self, *args, **kwargs)
self._is_consolidated = False
self._known_consolidated = False
def _consolidate_inplace(self): pass
| true
| true
|
790a9ef82489f2eb229ae3eda1e87ccd2595023f
| 314
|
py
|
Python
|
src/GUI/GUI.py
|
Steins7/MyLittleERP
|
f0220196136c1a0eff5dd7de469dfdb3466c1d5d
|
[
"MIT"
] | null | null | null |
src/GUI/GUI.py
|
Steins7/MyLittleERP
|
f0220196136c1a0eff5dd7de469dfdb3466c1d5d
|
[
"MIT"
] | null | null | null |
src/GUI/GUI.py
|
Steins7/MyLittleERP
|
f0220196136c1a0eff5dd7de469dfdb3466c1d5d
|
[
"MIT"
] | 1
|
2019-06-03T21:18:21.000Z
|
2019-06-03T21:18:21.000Z
|
import sys
from PySide2.QtWidgets import QApplication,QWidget,QMenuBar,QPushButton,QVBoxLayout,QMainWindow
from MainWindow import MainWindow
def start():
app = QApplication(sys.argv)
app.setApplicationName("My Little ERP")
mainWindow = MainWindow(app)
mainWindow.show()
sys.exit(app.exec_())
| 26.166667
| 95
| 0.761146
|
import sys
from PySide2.QtWidgets import QApplication,QWidget,QMenuBar,QPushButton,QVBoxLayout,QMainWindow
from MainWindow import MainWindow
def start():
app = QApplication(sys.argv)
app.setApplicationName("My Little ERP")
mainWindow = MainWindow(app)
mainWindow.show()
sys.exit(app.exec_())
| true
| true
|
790a9f8431be3a86d2d663827b7717d5cbf6b143
| 11,724
|
py
|
Python
|
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
|
cbbrainerd/WMCore
|
317969fdcfbfbb957e74aa1b45f92408d05a09a8
|
[
"Apache-2.0"
] | null | null | null |
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
|
cbbrainerd/WMCore
|
317969fdcfbfbb957e74aa1b45f92408d05a09a8
|
[
"Apache-2.0"
] | 1
|
2016-10-13T14:57:35.000Z
|
2016-10-13T14:57:35.000Z
|
src/python/WMCore/Services/PhEDEx/DataStructs/SubscriptionList.py
|
juztas/WMCore
|
f7e830a573d50fb1d7240797f18d809f994b934d
|
[
"Apache-2.0"
] | null | null | null |
"""
_SubscriptionList_
Module with data structures to handle PhEDEx subscriptions
in bulk.
"""
import logging
from WMCore.WMException import WMException
PhEDEx_VALID_SUBSCRIPTION_PRIORITIES = ['low', 'normal', 'high', 'reserved']
class PhEDExSubscriptionException(WMException):
"""
_PhEDExSubscriptionException_
Exception class for the phedex subscription
"""
pass
class PhEDExSubscription(object):
"""
_PhEDExSubscription_
Data structure which contains PHEDEx fields for
PhEDEx subscription data service
"""
def __init__(self, datasetPathList, nodeList, group, level = 'dataset',
priority = 'normal', move = 'n', static = 'n', custodial = 'n',
request_only = 'y', blocks = None, subscriptionId = -1, comments=""):
"""
Initialize PhEDEx subscription with default value
"""
if isinstance(datasetPathList, basestring):
datasetPathList = [datasetPathList]
if isinstance(nodeList, basestring):
nodeList = [nodeList]
self.datasetPaths = set(datasetPathList)
self.nodes = set(nodeList)
self.level = level.lower()
self.priority = priority.lower()
self.move = move.lower()
self.static = static.lower()
self.group = group
self.custodial = custodial.lower()
self.request_only = request_only.lower()
self.requesterID = None
self.status = "New"
self.comments = comments
# Subscription id for internal accounting
self.subscriptionIds = set([subscriptionId])
# Optional blocks for non-dataset subscriptions
self.blocks = blocks
try:
# Validation checks on the subscription
for option in (self.static, self.custodial, self.request_only, self.move):
assert option in ('y', 'n')
assert self.priority in PhEDEx_VALID_SUBSCRIPTION_PRIORITIES
assert self.level in ('dataset', 'block')
if self.level == 'block':
assert self.blocks is not None
except AssertionError:
msg = "The subscription is not a valid PhEDEx subscription.\n"
msg += "Check the options for this subscription: \n"
msg += "level: %s\n" % self.level
msg += "priority: %s\n" % self.priority
msg += "static: %s\n" % self.static
msg += "move: %s\n" % self.move
msg += "custodial: %s\n" % self.custodial
msg += "blocks: %s\n" % str(self.blocks)
raise PhEDExSubscriptionException(msg)
def __str__(self):
"""
Write out useful information for this object
:return:
"""
res = {'datasetPaths': self.datasetPaths, 'nodes': self.nodes,
'priority': self.priority, 'move': self.move,
'group': self.group, 'custodial': self.custodial,
'request_only': self.request_only, 'blocks': self.blocks}
return str(res)
def isEqualOptions(self, subscription):
return (self.level == subscription.level
and self.priority == subscription.priority
and self.request_only == subscription.request_only
and self.custodial == subscription.custodial
and self.group == subscription.group
and self.move == subscription.move
and self.static == subscription.static)
def isEqualDatasetPaths(self, subscription):
return (self.datasetPaths == subscription.datasetPaths
and self.isEqualOptions(subscription))
def isEqualNode(self, subscription):
return (self.nodes == subscription.nodes
and self.isEqualOptions(subscription))
def addDatasetPaths(self, subscription):
if self.requesterID != None:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
self.datasetPaths = self.datasetPaths.union(subscription.datasetPaths)
self.subscriptionIds = self.subscriptionIds.union(subscription.subscriptionIds)
def addNodes(self, subscription):
if self.requesterID != None:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
self.nodes = self.nodes.union(subscription.nodes)
self.subscriptionIds = self.subscriptionIds.union(subscription.subscriptionIds)
def getDatasetPaths(self):
return list(self.datasetPaths)
def getSubscriptionIds(self):
return list(self.subscriptionIds)
def getDatasetsAndBlocks(self):
"""
_getDatasetsAndBlocks_
Get the block structure
with datasets and blocks
"""
return self.blocks
def getNodes(self):
return list(self.nodes)
def getRequesterID(self):
return self.requesterID
def setRequesterID(self, requesterId):
if self.requesterID == None:
self.requesterID = requesterId
else:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
def matchesExistingTransferRequest(self, phedexDataSvc):
"""
_matchesExistingTransferRequest_
Check the given phedex data service to verify if an unapproved
transfer request equal to this subscription is already in the system.
"""
if len(self.datasetPaths) != 1 or len(self.nodes) != 1:
msg = "matchesExistingTransferRequest can only run in single node/dataset subscriptions"
raise PhEDExSubscriptionException(msg)
if self.level != 'dataset':
msg = "matchesExistingTransferRequest is only supported by dataset subscriptions"
raise PhEDExSubscriptionException(msg)
node = next(iter(self.nodes))
dataset = next(iter(self.datasetPaths))
# Get the unapproved requests involving the node and dataset in this subscription
existingRequests = phedexDataSvc.getRequestList(dataset = dataset,
node = node,
decision = 'pending')['phedex']['request']
for request in existingRequests:
# Get the detailed information in the request
requestId = request['id']
requestInfo = phedexDataSvc.getTransferRequests(request = requestId)['phedex']['request']
if not requestInfo:
logging.error("Transfer request %s doesn't exist in PhEDEx", requestId)
continue # Strange, but let it go.
requestInfo = requestInfo[0] # It's a singleton
# Make sure that the node is in the destinations
destinations = requestInfo['destinations']['node']
for nodeInfo in destinations:
if nodeInfo['name'] == node:
break
else:
continue
# Create a subscription with this info
phedexRequest = PhEDExSubscription(self.datasetPaths, self.nodes,
self.group, self.level, requestInfo['priority'],
requestInfo['move'], requestInfo['static'],
requestInfo['custodial'], self.request_only)
if self.isEqualOptions(phedexRequest):
return True
return False
def matchesExistingSubscription(self, phedexDataSvc):
"""
_matchesExistingSubscription_
Check the given phedex data service to verify if a PhEDEx subscription
equal to this subscription is already in the system.
"""
if len(self.datasetPaths) != 1 or len(self.nodes) != 1:
msg = "matchesExistingSubscription can only run in single node/dataset subscriptions"
raise PhEDExSubscriptionException(msg)
if self.level != 'dataset':
msg = "matchesExistingSubscription is only supported by dataset subscriptions"
raise PhEDExSubscriptionException(msg)
node = next(iter(self.nodes))
dataset = next(iter(self.datasetPaths))
# Check if the dataset has a subscription the given node
existingSubscription = phedexDataSvc.subscriptions(dataset = dataset,
node = node)['phedex']['dataset']
if len(existingSubscription) < 1:
# No subscriptions
return False
datasetInfo = existingSubscription[0]
for subscriptionInfo in datasetInfo['subscription']:
# Check that the node in the subscription matches the current node
if node != subscriptionInfo['node']:
continue
# Create a subscription with the info
phedexSub = PhEDExSubscription(self.datasetPaths, self.nodes,
self.group, subscriptionInfo['level'],
subscriptionInfo['priority'], subscriptionInfo['move'],
self.static, subscriptionInfo['custodial'],
self.request_only)
if self.isEqualOptions(phedexSub):
return True
return False
class SubscriptionList(object):
"""
_SubscriptionList_
Class represents collection of subscription.
This organizes the subscriptions in a way to minimize their number.
"""
def __init__(self):
self._subList = []
def addSubscription(self, subObj):
"""
_addSubscription_
Add a new subscription to the subscription policy.
If the same subscription key exist just add the node list
"""
for subscription in self._subList:
if subscription.isEqualOptions(subObj):
if subscription.isEqualNode(subObj):
subscription.addDatasetPaths(subObj)
return
self._subList.append(subObj)
return
def compact(self):
"""
_compact_
Compact the subscription list by aggregating the subscriptions where the nodes
share a list of dataset paths.
"""
# Bag the subscriptions, keep indexes of bagged items to
# avoid modifying the list in place or copying the list
bags = []
baggedIndexes = set()
for i, subscriptionA in enumerate(self._subList):
if i in baggedIndexes:
continue
bags.append([subscriptionA])
for j, subscriptionB in enumerate(self._subList[i + 1:], i + 1):
if j in baggedIndexes:
continue
if subscriptionA.isEqualOptions(subscriptionB) and \
subscriptionA.isEqualDatasetPaths(subscriptionB):
bags[-1].append(subscriptionB)
baggedIndexes.add(j)
# Aggregate the subscriptions in the bags
newSubList = []
for bag in bags:
anchorSubscription = bag[0]
for subscription in bag[1:]:
anchorSubscription.addNodes(subscription)
newSubList.append(anchorSubscription)
self._subList = newSubList
def getSubscriptionList(self):
return self._subList
| 38.821192
| 101
| 0.594336
|
import logging
from WMCore.WMException import WMException
PhEDEx_VALID_SUBSCRIPTION_PRIORITIES = ['low', 'normal', 'high', 'reserved']
class PhEDExSubscriptionException(WMException):
pass
class PhEDExSubscription(object):
def __init__(self, datasetPathList, nodeList, group, level = 'dataset',
priority = 'normal', move = 'n', static = 'n', custodial = 'n',
request_only = 'y', blocks = None, subscriptionId = -1, comments=""):
if isinstance(datasetPathList, basestring):
datasetPathList = [datasetPathList]
if isinstance(nodeList, basestring):
nodeList = [nodeList]
self.datasetPaths = set(datasetPathList)
self.nodes = set(nodeList)
self.level = level.lower()
self.priority = priority.lower()
self.move = move.lower()
self.static = static.lower()
self.group = group
self.custodial = custodial.lower()
self.request_only = request_only.lower()
self.requesterID = None
self.status = "New"
self.comments = comments
self.subscriptionIds = set([subscriptionId])
self.blocks = blocks
try:
for option in (self.static, self.custodial, self.request_only, self.move):
assert option in ('y', 'n')
assert self.priority in PhEDEx_VALID_SUBSCRIPTION_PRIORITIES
assert self.level in ('dataset', 'block')
if self.level == 'block':
assert self.blocks is not None
except AssertionError:
msg = "The subscription is not a valid PhEDEx subscription.\n"
msg += "Check the options for this subscription: \n"
msg += "level: %s\n" % self.level
msg += "priority: %s\n" % self.priority
msg += "static: %s\n" % self.static
msg += "move: %s\n" % self.move
msg += "custodial: %s\n" % self.custodial
msg += "blocks: %s\n" % str(self.blocks)
raise PhEDExSubscriptionException(msg)
def __str__(self):
res = {'datasetPaths': self.datasetPaths, 'nodes': self.nodes,
'priority': self.priority, 'move': self.move,
'group': self.group, 'custodial': self.custodial,
'request_only': self.request_only, 'blocks': self.blocks}
return str(res)
def isEqualOptions(self, subscription):
return (self.level == subscription.level
and self.priority == subscription.priority
and self.request_only == subscription.request_only
and self.custodial == subscription.custodial
and self.group == subscription.group
and self.move == subscription.move
and self.static == subscription.static)
def isEqualDatasetPaths(self, subscription):
return (self.datasetPaths == subscription.datasetPaths
and self.isEqualOptions(subscription))
def isEqualNode(self, subscription):
return (self.nodes == subscription.nodes
and self.isEqualOptions(subscription))
def addDatasetPaths(self, subscription):
if self.requesterID != None:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
self.datasetPaths = self.datasetPaths.union(subscription.datasetPaths)
self.subscriptionIds = self.subscriptionIds.union(subscription.subscriptionIds)
def addNodes(self, subscription):
if self.requesterID != None:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
self.nodes = self.nodes.union(subscription.nodes)
self.subscriptionIds = self.subscriptionIds.union(subscription.subscriptionIds)
def getDatasetPaths(self):
return list(self.datasetPaths)
def getSubscriptionIds(self):
return list(self.subscriptionIds)
def getDatasetsAndBlocks(self):
return self.blocks
def getNodes(self):
return list(self.nodes)
def getRequesterID(self):
return self.requesterID
def setRequesterID(self, requesterId):
if self.requesterID == None:
self.requesterID = requesterId
else:
msg = """ PhEDEx subscription is already made with id: %s\n
Create a new subscription
""" % (self.requesterID)
raise Exception(msg)
def matchesExistingTransferRequest(self, phedexDataSvc):
if len(self.datasetPaths) != 1 or len(self.nodes) != 1:
msg = "matchesExistingTransferRequest can only run in single node/dataset subscriptions"
raise PhEDExSubscriptionException(msg)
if self.level != 'dataset':
msg = "matchesExistingTransferRequest is only supported by dataset subscriptions"
raise PhEDExSubscriptionException(msg)
node = next(iter(self.nodes))
dataset = next(iter(self.datasetPaths))
existingRequests = phedexDataSvc.getRequestList(dataset = dataset,
node = node,
decision = 'pending')['phedex']['request']
for request in existingRequests:
requestId = request['id']
requestInfo = phedexDataSvc.getTransferRequests(request = requestId)['phedex']['request']
if not requestInfo:
logging.error("Transfer request %s doesn't exist in PhEDEx", requestId)
continue # Strange, but let it go.
requestInfo = requestInfo[0] # It's a singleton
destinations = requestInfo['destinations']['node']
for nodeInfo in destinations:
if nodeInfo['name'] == node:
break
else:
continue
phedexRequest = PhEDExSubscription(self.datasetPaths, self.nodes,
self.group, self.level, requestInfo['priority'],
requestInfo['move'], requestInfo['static'],
requestInfo['custodial'], self.request_only)
if self.isEqualOptions(phedexRequest):
return True
return False
def matchesExistingSubscription(self, phedexDataSvc):
if len(self.datasetPaths) != 1 or len(self.nodes) != 1:
msg = "matchesExistingSubscription can only run in single node/dataset subscriptions"
raise PhEDExSubscriptionException(msg)
if self.level != 'dataset':
msg = "matchesExistingSubscription is only supported by dataset subscriptions"
raise PhEDExSubscriptionException(msg)
node = next(iter(self.nodes))
dataset = next(iter(self.datasetPaths))
existingSubscription = phedexDataSvc.subscriptions(dataset = dataset,
node = node)['phedex']['dataset']
if len(existingSubscription) < 1:
return False
datasetInfo = existingSubscription[0]
for subscriptionInfo in datasetInfo['subscription']:
if node != subscriptionInfo['node']:
continue
phedexSub = PhEDExSubscription(self.datasetPaths, self.nodes,
self.group, subscriptionInfo['level'],
subscriptionInfo['priority'], subscriptionInfo['move'],
self.static, subscriptionInfo['custodial'],
self.request_only)
if self.isEqualOptions(phedexSub):
return True
return False
class SubscriptionList(object):
def __init__(self):
self._subList = []
def addSubscription(self, subObj):
for subscription in self._subList:
if subscription.isEqualOptions(subObj):
if subscription.isEqualNode(subObj):
subscription.addDatasetPaths(subObj)
return
self._subList.append(subObj)
return
def compact(self):
bags = []
baggedIndexes = set()
for i, subscriptionA in enumerate(self._subList):
if i in baggedIndexes:
continue
bags.append([subscriptionA])
for j, subscriptionB in enumerate(self._subList[i + 1:], i + 1):
if j in baggedIndexes:
continue
if subscriptionA.isEqualOptions(subscriptionB) and \
subscriptionA.isEqualDatasetPaths(subscriptionB):
bags[-1].append(subscriptionB)
baggedIndexes.add(j)
newSubList = []
for bag in bags:
anchorSubscription = bag[0]
for subscription in bag[1:]:
anchorSubscription.addNodes(subscription)
newSubList.append(anchorSubscription)
self._subList = newSubList
def getSubscriptionList(self):
return self._subList
| true
| true
|
790aa1f3d52f53fc6f2c40a5e5a4c043defca4a4
| 113,777
|
py
|
Python
|
dcase_util/datasets/tut.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
dcase_util/datasets/tut.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
dcase_util/datasets/tut.py
|
ankitshah009/dcase_util
|
738571ce78faf60b0fdfa1d59fd42f42c8944f3d
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import collections
import hashlib
import os
import pickle
import sys
import numpy
import yaml
from six import iteritems
from tqdm import tqdm
from dcase_util.datasets import AcousticSceneDataset, SyntheticSoundEventDataset, SoundEventDataset
from dcase_util.containers import MetaDataContainer, MetaDataItem, OneToOneMappingContainer, \
DictContainer, ParameterContainer
from dcase_util.utils import Path
# =====================================================
# DCASE 2018
# =====================================================
class TUTUrbanAcousticScenes_2018_DevelopmentSet(AcousticSceneDataset):
"""TUT Urban Acoustic Scenes 2018 Development dataset
This dataset is used in DCASE2018 - Task 1, Acoustic scene classification / Subtask A
"""
def __init__(self,
storage_name='TUT-urban-acoustic-scenes-2018-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-urban-acoustic-scenes-2018-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen',
'title': 'TUT Urban Acoustic Scenes 2018, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Zoom F8',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 1
kwargs['meta_filename'] ='meta.csv'
filename_base = 'TUT-urban-acoustic-scenes-2018-development'
source_url = 'https://zenodo.org/record/1228142/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + filename_base + '.doc.zip',
'remote_bytes': 10517,
'remote_md5': '28a4a9c46a6f46709ecc8eece365a3a4',
'filename': filename_base + '.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + filename_base + '.meta.zip',
'remote_bytes': 69272,
'remote_md5': 'e196065ee83c07af03a11a310364377d',
'filename': filename_base + '.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.1.zip',
'remote_bytes': 1657811579,
'remote_md5': '62f97087c447e29def8716204469bf89',
'filename': filename_base + '.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.2.zip',
'remote_bytes': 1783489370,
'remote_md5': '8e569a92025d82bff6b02b956d7c6dc9',
'filename': filename_base + '.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.3.zip',
'remote_bytes': 1809675304,
'remote_md5': '00d2020582a4535af5e65322fb2bad56',
'filename': filename_base + '.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.4.zip',
'remote_bytes': 1756582525,
'remote_md5': 'd691eb4271f83ba6ba9a28797accc497',
'filename': filename_base + '.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.5.zip',
'remote_bytes': 1724002546,
'remote_md5': 'c4d64b5483b60f85e9fe080b3435a6be',
'filename': filename_base + '.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.6.zip',
'remote_bytes': 1645753049,
'remote_md5': '2f0feee78f216697eb19497714d97642',
'filename': filename_base + '.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.7.zip',
'remote_bytes': 1671903917,
'remote_md5': '07cfefe80a0731de6819181841239f3a',
'filename': filename_base + '.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.8.zip',
'remote_bytes': 1673304843,
'remote_md5': '213f3c012859c2e9dcb74aacc8558458',
'filename': filename_base + '.audio.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.9.zip',
'remote_bytes': 1674839259,
'remote_md5': 'b724442b09abcb3bd095ebff497cef85',
'filename': filename_base + '.audio.9.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.10.zip',
'remote_bytes': 1662932947,
'remote_md5': 'a27a32fa52e283ed8013375b8a16f269',
'filename': filename_base + '.audio.10.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.11.zip',
'remote_bytes': 1751473843,
'remote_md5': '7073a121e825ffef99832507f30d6644',
'filename': filename_base + '.audio.11.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.12.zip',
'remote_bytes': 1742332198,
'remote_md5': '6567aa61db12776568b6267ce122fb18',
'filename': filename_base + '.audio.12.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.13.zip',
'remote_bytes': 798990513,
'remote_md5': 'd00eeb2db0e093d8975521323a96c519',
'filename': filename_base + '.audio.13.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTUrbanAcousticScenes_2018_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if not item.identifier:
item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
).load()
# Process, make sure each file is included only once.
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(
filename=self.meta_file
)
# Load meta and cross validation
self.load()
return self
class TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet(AcousticSceneDataset):
"""TUT Urban Acoustic Scenes 2018 Mobile Development dataset
This dataset is used in DCASE2018 - Task 1, Acoustic scene classification / Subtask B
"""
def __init__(self,
storage_name='TUT-urban-acoustic-scenes-2018-mobile-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-urban-acoustic-scenes-2018-mobile-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen',
'title': 'TUT Urban Acoustic Scenes 2018 Mobile, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Various',
'microphone_model': 'Various',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 1
kwargs['meta_filename'] = 'meta.csv'
filename_base = 'TUT-urban-acoustic-scenes-2018-mobile-development'
source_url = 'https://zenodo.org/record/1228235/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + filename_base + '.doc.zip',
'remote_bytes': 12144,
'remote_md5': '5694e9cdffa11cef8ec270673dc19ba0',
'filename': filename_base + '.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + filename_base + '.meta.zip',
'remote_bytes': 88425,
'remote_md5': 'b557b6d5d620aa4f15564ab38f1594d4',
'filename': filename_base + '.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.1.zip',
'remote_bytes': 1692337547,
'remote_md5': 'd6f2671af84032b97f393354c124517d',
'filename': filename_base + '.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.2.zip',
'remote_bytes': 1769203601,
'remote_md5': 'db8b3603af5d4e559869a592930a7620',
'filename': filename_base + '.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.3.zip',
'remote_bytes': 1674610746,
'remote_md5': '703bf73523a6ad1f40d4923cb8ba3ff0',
'filename': filename_base + '.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.4.zip',
'remote_bytes': 1634599587,
'remote_md5': '18af04ab5d6f15a72c66f16bfec0ca07',
'filename': filename_base + '.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.5.zip',
'remote_bytes': 1640894390,
'remote_md5': 'a579efb032f209a7e77fe22e4808e9ca',
'filename': filename_base + '.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.6.zip',
'remote_bytes': 1693974078,
'remote_md5': 'c2c56691047b3be3d98cb0ffd6858d9f',
'filename': filename_base + '.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.7.zip',
'remote_bytes': 1165383562,
'remote_md5': 'e182e5300867f4ed4b580389cc5b931e',
'filename': filename_base + '.audio.7.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if not item.identifier:
item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])
if not item.source_label:
item.source_label = os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[-1]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
).load()
# Process, make sure each file is included only once.
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(
filename=self.meta_file
)
# Load meta and cross validation
self.load()
return self
# =====================================================
# DCASE 2017
# =====================================================
class TUTAcousticScenes_2017_DevelopmentSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2017 development dataset
This dataset is used in DCASE2017 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2017-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2017-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2017, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/400515/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.doc.zip',
'remote_bytes': 54796,
'remote_md5': '2065495aaf3f1103e795c9899e2af1df',
'filename': 'TUT-acoustic-scenes-2017-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.meta.zip',
'remote_bytes': 104321,
'remote_md5': '9007fd4772d816590c5db5f5e9568f5d',
'filename': 'TUT-acoustic-scenes-2017-development.meta.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.error.zip',
'remote_bytes': 1432,
'remote_md5': '802c700b021769e52a2c1e3b9c117a1b',
'filename': 'TUT-acoustic-scenes-2017-development.error.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.1.zip',
'remote_bytes': 1071445248,
'remote_md5': '251325a9afaaad0326ad1c57f57d514a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.2.zip',
'remote_bytes': 1073453613,
'remote_md5': 'c26861e05147dc319b4250eb103d9d99',
'filename': 'TUT-acoustic-scenes-2017-development.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.3.zip',
'remote_bytes': 1073077819,
'remote_md5': 'a4815775f8a5e629179726ee4cd4f55a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.4.zip',
'remote_bytes': 1072822038,
'remote_md5': '1732b03afe8c53ef8bba80ba14766e57',
'filename': 'TUT-acoustic-scenes-2017-development.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.5.zip',
'remote_bytes': 1072644652,
'remote_md5': '611be754a0c951185c6ae4b7643c19a0',
'filename': 'TUT-acoustic-scenes-2017-development.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.6.zip',
'remote_bytes': 1072667888,
'remote_md5': '165a201db800d3ea76fce5a9c2bd97d7',
'filename': 'TUT-acoustic-scenes-2017-development.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.7.zip',
'remote_bytes': 1073417661,
'remote_md5': 'c7d79db84264401c0f8680dcc36013ad',
'filename': 'TUT-acoustic-scenes-2017-development.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.8.zip',
'remote_bytes': 1072381222,
'remote_md5': '35043f25123439392338c790494c7a19',
'filename': 'TUT-acoustic-scenes-2017-development.audio.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.9.zip',
'remote_bytes': 1072087738,
'remote_md5': '0805dcf5d8e6871dc9610182b2efb93a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.9.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.10.zip',
'remote_bytes': 1046262120,
'remote_md5': '5df83a191295a04e290b125c634e13e7',
'filename': 'TUT-acoustic-scenes-2017-development.audio.10.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTAcousticScenes_2017_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='train', fold=fold)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)
).load()
# Process, make sure each file is included only once.
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTAcousticScenes_2017_EvaluationSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2017 evaluation dataset
This dataset is used in DCASE2017 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2017-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2017, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1040168/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.doc.zip',
'remote_bytes': 53687,
'remote_md5': '53709a07416ea3b617c02fcf67dbeb9c',
'filename': 'TUT-acoustic-scenes-2017-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.meta.zip',
'remote_bytes': 4473,
'remote_md5': '200eee9493e8044403e1326e3d05cfde',
'filename': 'TUT-acoustic-scenes-2017-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip',
'remote_bytes': 1071856687,
'remote_md5': '3d6dda4445871e9544e0fefe7d14c7d9',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip',
'remote_bytes': 1073362972,
'remote_md5': '4085ef5fa286f2169074993a4e405953',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip',
'remote_bytes': 1071521152,
'remote_md5': 'cac432579e7cf2dff0aec7aaed248956',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip',
'remote_bytes': 382756463,
'remote_md5': '664bf09c3d24bd26c6b587f1d709de36',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip'
},
]
kwargs['audio_paths'] = ['audio']
super(TUTAcousticScenes_2017_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, filename_map=None, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
filename_map : OneToOneMappingContainer
Filename map
Default value None
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if filename_map and item.filename in filename_map:
filename_mapped = filename_map.map(item.filename)
item.identifier = os.path.split(filename_mapped)[1].split('_')[0]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
if os.path.isfile(self.evaluation_setup_filename(setup_part='evaluate')):
meta_data = collections.OrderedDict()
# Read files in
data = MetaDataContainer(
filename=os.path.join(self.evaluation_setup_path, 'evaluate.txt')
).load()
# Load filename mapping
map_filename = os.path.join(self.evaluation_setup_path, 'map.txt')
if os.path.exists(map_filename):
filename_map = OneToOneMappingContainer(filename=map_filename).load()
else:
filename_map = {}
for item in data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False,
filename_map=filename_map
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTRareSoundEvents_2017_DevelopmentSet(SyntheticSoundEventDataset):
"""TUT Acoustic scenes 2017 development dataset
This dataset is used in DCASE2017 - Task 2, Rare sound event detection
"""
def __init__(self,
storage_name='TUT-rare-sound-events-2017-development',
data_path=None,
included_content_types=None,
synth_parameters=None,
dcase_compatibility=True,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-rare-sound-events-2017-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
synth_parameters : dict
Data synthesis parameters.
Default value None
dcase_compatibility : bool
Ensure that dataset is generated same way than in DCASE2017 Challenge setup
Default value True
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['filelisthash_exclude_dirs'] = kwargs.get(
'filelisthash_exclude_dirs',
[os.path.join('data', 'mixture_data')]
)
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Rare Sound Events 2017, development dataset',
'url': None,
'audio_source': 'Synthetic',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = 1
source_url = 'https://zenodo.org/record/401395/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.doc.zip',
'remote_bytes': 21042,
'remote_md5': '47c424fe90d2bdc53d9fdd84341c2783',
'filename': 'TUT-rare-sound-events-2017-development.doc.zip'
},
{
'content_type': 'code',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.code.zip',
'remote_bytes': 81518,
'remote_md5': '4cacdf0803daf924a60bf9daa573beb7',
'filename': 'TUT-rare-sound-events-2017-development.code.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip',
'remote_bytes': 1072175672,
'remote_md5': '6f1f4156d41b541d1188fcf44c9a8267',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip',
'remote_bytes': 1073378284,
'remote_md5': 'ff5dcbe250e45cc404b7b8a6013002ac',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip',
'remote_bytes': 1069766123,
'remote_md5': 'fb356ae309a40d2f0a38fc1c746835cb',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip',
'remote_bytes': 1070042681,
'remote_md5': '2a68575b2ec7a69e2cc8b16b87fae0c9',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip',
'remote_bytes': 1073380909,
'remote_md5': '84e70d855457a18115108e42ec04501a',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip',
'remote_bytes': 1073021941,
'remote_md5': '048ce898bd434097dd489027f7ba361d',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip',
'remote_bytes': 1069890239,
'remote_md5': '3ef1c89fcfac39918a5edc5abc6ed29b',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip',
'remote_bytes': 180860904,
'remote_md5': '69dcb81e70f4e6605e178693afcd7722',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_events.zip',
'remote_bytes': 639119477,
'remote_md5': 'dc4b7eb77078b4cf1b670c6362679473',
'filename': 'TUT-rare-sound-events-2017-development.source_data_events.zip'
}
]
kwargs['audio_paths'] = ['audio']
default_synth_parameters = DictContainer({
'train': {
'seed': 42,
'event_presence_prob': 0.5,
'mixtures_per_class': 500,
'ebr_list': [-6, 0, 6],
},
'test': {
'seed': 42,
'event_presence_prob': 0.5,
'mixtures_per_class': 500,
'ebr_list': [-6, 0, 6],
}
})
if synth_parameters is None:
synth_parameters = {}
# Override synth parameters
synth_parameters = default_synth_parameters.merge(synth_parameters)
# Meta filename depends on synth_parameters
kwargs['meta_filename'] = 'meta_'+synth_parameters.get_hash_for_path()+'.txt'
self.synth_parameters = synth_parameters
# Add parameter hash
self.synth_parameters['train']['param_hash'] = hashlib.md5(
yaml.dump(
{
'event_presence_prob': self.synth_parameters['train']['event_presence_prob'],
'mixtures_per_class': self.synth_parameters['train']['mixtures_per_class'],
'ebrs': self.synth_parameters['train']['ebr_list'],
'seed': self.synth_parameters['train']['seed']
}
).encode('utf-8')).hexdigest()
self.synth_parameters['test']['param_hash'] = hashlib.md5(
yaml.dump(
{
'event_presence_prob': self.synth_parameters['test']['event_presence_prob'],
'mixtures_per_class': self.synth_parameters['test']['mixtures_per_class'],
'ebrs': self.synth_parameters['test']['ebr_list'],
'seed': self.synth_parameters['test']['seed']
}
).encode('utf-8')).hexdigest()
self.dcase_compatibility = dcase_compatibility
# Initialize baseclass
super(TUTRareSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)
# Add code package to be downloaded always
if 'code' not in self.included_content_types or 'all' not in self.included_content_types:
self.included_content_types.append('code')
def event_labels(self, scene_label=None):
"""List of unique event labels in the meta data.
Parameters
----------
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = ['babycry', 'glassbreak', 'gunshot']
labels.sort()
return labels
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
# Make sure evaluation_setup directory exists
Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))
return self
def synthesize(self):
# Create init so we can call functions
if os.path.exists(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer', '__init__.py')):
open(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer', '__init__.py'), 'a').close()
# Add synth code to the search path
sys.path.append(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer'))
from core import generate_mixture_recipes
from core import do_mixing
scene_label = 'synthetic'
subset_map = {'train': 'devtrain',
'test': 'devtest'}
data_path = os.path.join(os.path.abspath(self.local_path), 'data')
set_progress = tqdm(['train', 'test'],
desc="{0: <25s}".format('Set'),
file=sys.stdout,
leave=False,
disable=self.disable_progress_bar,
ascii=self.use_ascii_progress_bar)
for subset_label in set_progress:
if self.log_system_progress:
self.logger.info(' {title:<15s} [{subset_label:<30s}]'.format(
title='Set ',
subset_label=subset_label)
)
# Translated subset name
subset_name_on_disk = subset_map[subset_label]
# Get parameters
mixing_params = {
'event_presence_prob': self.synth_parameters[subset_label]['event_presence_prob'],
'mixtures_per_class': self.synth_parameters[subset_label]['mixtures_per_class'],
'ebrs': self.synth_parameters[subset_label]['ebr_list'],
'seed': self.synth_parameters[subset_label]['seed']
}
# Get parameter hash
param_hash = self.synth_parameters[subset_label]['param_hash']
# Save parameters
mixture_parameters = os.path.join(
self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'parameters.yaml'
)
if not os.path.isfile(mixture_parameters):
# Make sure directory exists
Path().makedirs(
path=os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash)
)
# Save
ParameterContainer(mixing_params).save(filename=mixture_parameters)
# Check do we need to generate recipes
recipes_exists = True
for event_label in self.event_labels():
recipe_filename = 'mixture_recipes_' + subset_name_on_disk + '_' + event_label + '.yaml'
if not os.path.isfile(os.path.join(self.local_path, 'data', 'mixture_data',
subset_name_on_disk, param_hash, 'meta', recipe_filename)):
recipes_exists = False
if not recipes_exists:
# Generate mixture recipes
generate_mixture_recipes(
data_path=data_path,
current_subsets=numpy.array([subset_name_on_disk]),
mixing_params=mixing_params
)
# Check do we need to generate mixtures
mixture_audio_exists = True
audio_files = Path().file_list(
path=os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'audio'))
for event_label in self.event_labels():
event_audio = []
for f in audio_files:
if event_label in f:
event_audio.append(f)
if len(event_audio) != self.synth_parameters[subset_label]['mixtures_per_class']:
mixture_audio_exists = False
if not mixture_audio_exists:
# Generate mixture audio based on recipes
do_mixing(
data_path=data_path,
current_subsets=numpy.array([subset_name_on_disk]),
magic_anticlipping_factor=0.2,
param_hash=param_hash,
dcase_compatibility_mode=True
)
if not self.meta_container.exists():
# Collect meta data
meta_data = MetaDataContainer()
for class_label in self.event_labels():
for subset_label, subset_name_on_disk in iteritems(subset_map):
subset_name_on_disk = subset_map[subset_label]
# Get parameter hash
param_hash = self.synth_parameters[subset_label]['param_hash']
mixture_path = os.path.join(
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'audio'
)
mixture_meta_path = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'meta'
)
event_list_filename = os.path.join(
mixture_meta_path,
'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'
)
if os.path.isfile(event_list_filename):
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path, item.filename)
item.scene_label = scene_label
meta_data += current_meta
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
# Evaluation setup filenames
train_filename = self.evaluation_setup_filename(
setup_part='train',
fold=1,
file_extension='txt'
)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=1,
file_extension='txt'
)
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
fold=1,
file_extension='txt'
)
# Check that evaluation setup exists
evaluation_setup_exists = True
if not os.path.isfile(train_filename) or not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):
evaluation_setup_exists = False
if not evaluation_setup_exists:
# Get parameter hash
param_hash_train = self.synth_parameters['train']['param_hash']
mixture_meta_path_train = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['train'],
param_hash_train,
'meta'
)
mixture_path_train = os.path.join(
'data',
'mixture_data',
subset_map['train'],
param_hash_train,
'audio'
)
# Get parameter hash
param_hash_test = self.synth_parameters['test']['param_hash']
mixture_meta_path_test = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['test'],
param_hash_test,
'meta'
)
mixture_path_test = os.path.join(
'data',
'mixture_data',
subset_map['test'],
param_hash_test,
'audio'
)
train_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_train,
'event_list_' + subset_map['train'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_train, item.filename)
item.scene_label = scene_label
train_meta += current_meta
train_meta.save(filename=train_filename)
test_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
current_meta_ = MetaDataContainer()
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
current_meta_.append(MetaDataItem(
{
'filename': item.filename,
'scene_label': scene_label
}
))
test_meta += current_meta_
test_meta.save(filename=test_filename)
eval_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
item.scene_label = scene_label
eval_meta += current_meta
eval_meta.save(filename=evaluate_filename)
# Load meta and cross validation
self.load()
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if setup_part == 'test' or setup_part == 'evaluate':
subset_label = 'test'
else:
subset_label = 'train'
param_hash = self.synth_parameters[subset_label]['param_hash']
if setup_part == 'train':
parts.append('train')
elif setup_part == 'test':
parts.append('test')
elif setup_part == 'evaluate':
parts.append('evaluate')
else:
message = '{name}: Unknown setup_part [{setup_part}]'.format(
name=self.__class__.__name__,
setup_part=setup_part
)
self.logger.exception(message)
raise ValueError(message)
return os.path.join(self.evaluation_setup_path, '_'.join(parts) + '_' + param_hash + '.' + file_extension)
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of training items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value "None"
scene_label : str
Scene label
Default value "None"
event_label : str
Event label
Default value "None"
filename_contains : str:
String found in filename
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['train'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of testing items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value "None"
scene_label : str
Scene label
Default value "None"
event_label : str
Event label
Default value "None"
filename_contains : str:
String found in filename
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['test'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of evaluation items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value "None"
scene_label : str
Scene label
Default value "None"
event_label : str
Event label
Default value "None"
filename_contains : str:
String found in filename
Default value "None"
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['evaluate'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
class TUTRareSoundEvents_2017_EvaluationSet(SyntheticSoundEventDataset):
"""TUT Acoustic scenes 2017 evaluation dataset
This dataset is used in DCASE2017 - Task 2, Rare sound event detection
"""
def __init__(self,
storage_name='TUT-rare-sound-events-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-rare-sound-events-2017-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['reference_data_present'] = True
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Rare Sound Events 2017, evaluation dataset',
'url': None,
'audio_source': 'Synthetic',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1160455/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.doc.zip',
'remote_bytes': 11701,
'remote_md5': '36db98a94ce871c6bdc5bd5238383114',
'filename': 'TUT-rare-sound-events-2017-evaluation.doc.zip'
},
{
'content_type': 'documentation',
'remote_file': source_url + 'LICENSE.txt',
'remote_bytes': 0,
'remote_md5': '0707857098fc74d17beb824416fb74b1',
'filename': 'LICENSE.txt'
},
{
'content_type': 'documentation',
'remote_file': source_url + 'FREESOUNDCREDITS.txt',
'remote_bytes': 0,
'remote_md5': '3ecea52bdb0eadd6e1af52a21f735d6d',
'filename': 'FREESOUNDCREDITS.txt'
},
{
'content_type': ['audio', 'meta'],
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip',
'remote_bytes': 1071143794,
'remote_md5': 'db4aecd5175dead27ceb2692e7f28bb1',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip',
'remote_bytes': 1071773516,
'remote_md5': 'e97d5842c46805cdb94e6d4017870cde',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip',
'remote_bytes': 1073505512,
'remote_md5': '1fe20c762cecd26979e2c5303c8e9f48',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip',
'remote_bytes': 1071132551,
'remote_md5': '5042cd00aed9af6b37a253e24f88554f',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip',
'remote_bytes': 308314939,
'remote_md5': '72180597ed5bfaa73491755f74b84738',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip'
}
]
kwargs['audio_paths'] = ['audio']
# Initialize base class
super(TUTRareSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
return ['synthetic']
def event_labels(self, scene_label=None):
"""List of unique event labels in the meta data.
Parameters
----------
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = ['babycry', 'glassbreak', 'gunshot']
labels.sort()
return labels
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
scene_label = 'synthetic'
subset_map = {'test': 'evaltest'}
param_hash = 'bbb81504db15a03680a0044474633b67'
# Make sure evaluation_setup directory exists
Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))
if not self.meta_container.exists() and self.reference_data_present:
# Collect meta data
meta_data = MetaDataContainer()
for class_label in self.event_labels():
for subset_label, subset_name_on_disk in iteritems(subset_map):
subset_name_on_disk = subset_map[subset_label]
mixture_path = os.path.join(
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'audio'
)
mixture_meta_path = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'meta'
)
event_list_filename = os.path.join(
mixture_meta_path,
'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'
)
if os.path.isfile(event_list_filename):
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path, item.filename)
item.scene_label = scene_label
meta_data += current_meta
# Save meta
meta_data.save(filename=self.meta_file)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=None,
file_extension='txt'
)
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
fold=None,
file_extension='txt'
)
# Check that evaluation setup exists
evaluation_setup_exists = True
if not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):
evaluation_setup_exists = False
if not evaluation_setup_exists:
# Get parameter hash
mixture_meta_path_test = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['test'],
param_hash,
'meta'
)
mixture_path_test = os.path.join(
'data',
'mixture_data',
subset_map['test'],
param_hash,
'audio'
)
test_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
current_meta_ = MetaDataContainer()
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
current_meta_.append(MetaDataItem(
{
'filename': item.filename,
'scene_label': scene_label
}
))
test_meta += current_meta_
test_meta.save(filename=test_filename)
eval_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
item.scene_label = scene_label
eval_meta += current_meta
eval_meta.save(filename=evaluate_filename)
# Load meta and cross validation
self.load()
return self
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if setup_part == 'test' or setup_part == 'evaluate':
subset_label = 'test'
else:
subset_label = 'train'
if setup_part == 'train':
parts.append('train')
elif setup_part == 'test':
parts.append('test')
elif setup_part == 'evaluate':
parts.append('evaluate')
else:
message = '{name}: Unknown setup_part [{setup_part}]'.format(
name=self.__class__.__name__,
setup_part=setup_part
)
self.logger.exception(message)
raise ValueError(message)
return os.path.join(self.evaluation_setup_path, '_'.join(parts) + '.' + file_extension)
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of training items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value None
scene_label : str
Scene label
Default value None"
event_label : str
Event label
Default value None"
filename_contains : str:
String found in filename
Default value None
Returns
-------
list
List containing all meta data assigned to training set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['train'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of testing items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value None
scene_label : str
Scene label
Default value None
event_label : str
Event label
Default value None
filename_contains : str:
String found in filename
Default value None
Returns
-------
list
List containing all meta data assigned to testing set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['test'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
"""List of evaluation items.
Parameters
----------
fold : int
Fold id, if None all meta data is returned.
Default value None
scene_label : str
Scene label
Default value None
event_label : str
Event label
Default value None
filename_contains : str:
String found in filename
Default value None
Returns
-------
list
List containing all meta data assigned to testing set for given fold.
"""
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['evaluate'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
class TUTSoundEvents_2017_DevelopmentSet(SoundEventDataset):
"""TUT Sound events 2017 development dataset
This dataset is used in DCASE2017 - Task 3, Sound event detection in real life audio
"""
def __init__(self,
storage_name='TUT-sound-events-2017-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-sound-events-2017-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/814831/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2017-development.doc.zip',
'remote_bytes': 56150,
'remote_md': 'aa6024e70f5bff3fe15d962b01753e23',
'filename': 'TUT-sound-events-2017-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2017-development.meta.zip',
'remote_bytes': 140684,
'remote_md': '50e870b3a89ed3452e2a35b508840929',
'filename': 'TUT-sound-events-2017-development.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-development.audio.1.zip',
'remote_bytes': 1062653169,
'remote_md': '6f1cd31592b8240a14be3ee513db6a23',
'filename': 'TUT-sound-events-2017-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-development.audio.2.zip',
'remote_bytes': 213232458,
'remote_md': 'adcff03341b84dc8d35f035b93c1efa0',
'filename': 'TUT-sound-events-2017-development.audio.2.zip'
}
]
kwargs['audio_paths'] = [os.path.join('audio', 'street')]
super(TUTSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTSoundEvents_2017_EvaluationSet(SoundEventDataset):
"""TUT Sound events 2017 evaluation dataset
This dataset is used in DCASE2017 - Task 3, Sound event detection in real life audio
"""
def __init__(self,
storage_name='TUT-sound-events-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-sound-events-2017-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1040179/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.doc.zip',
'remote_bytes': 54606,
'remote_md5': '8bbf41671949edee15d6cdc3f9e726c9',
'filename': 'TUT-sound-events-2017-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.meta.zip',
'remote_bytes': 762,
'remote_md5': 'a951598abaea87296ca409e30fb0b379',
'filename': 'TUT-sound-events-2017-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.audio.zip',
'remote_bytes': 388173790,
'remote_md5': '1d3aa81896be0f142130ca9ca7a2b871',
'filename': 'TUT-sound-events-2017-evaluation.audio.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUTSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
labels = ['street']
labels.sort()
return labels
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = os.path.splitext(raw_filename)[0]
item.source_label = 'mixture'
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
scene_label=self.scene_labels()[0]
)
eval_file = MetaDataContainer(filename=evaluate_filename)
if eval_file.exists():
# Get meta data from evaluation file
meta_data = MetaDataContainer()
eval_file.load()
for item in eval_file:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += eval_file
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
elif os.path.isdir(os.path.join(self.local_path, 'meta')):
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
# Get meta data from annotation files
meta_data = MetaDataContainer()
for annotation_filename in annotation_files:
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2016-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2016, development dataset',
'url': 'https://zenodo.org/record/45739',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/45739/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.doc.zip',
'remote_bytes': 69671,
'remote_md5': 'f94ad46eb36325d9fbce5d60f7fc9926',
'filename': 'TUT-acoustic-scenes-2016-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.meta.zip',
'remote_bytes': 28815,
'remote_md5': '779b33da2ebbf8bde494b3c981827251',
'filename': 'TUT-acoustic-scenes-2016-development.meta.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.error.zip',
'remote_bytes': 1283,
'remote_md5': 'a0d3e0d81b0a36ece87d0f3a9124a386',
'filename': 'TUT-acoustic-scenes-2016-development.error.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.1.zip',
'remote_bytes': 1070981236,
'remote_md5': 'e39546e65f2e72517b6335aaf0c8323d',
'filename': 'TUT-acoustic-scenes-2016-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.2.zip',
'remote_bytes': 1067186166,
'remote_md5': 'd36cf3253e2c041f68e937a3fe804807',
'filename': 'TUT-acoustic-scenes-2016-development.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.3.zip',
'remote_bytes': 1073644405,
'remote_md5': '0393a9620ab882b1c26d884eccdcffdd',
'filename': 'TUT-acoustic-scenes-2016-development.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.4.zip',
'remote_bytes': 1072111347,
'remote_md5': 'fb3e4e0cd7ea82120ec07031dee558ce',
'filename': 'TUT-acoustic-scenes-2016-development.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.5.zip',
'remote_bytes': 1069681513,
'remote_md5': 'a19cf600b33c8f88f6ad607bafd74057',
'filename': 'TUT-acoustic-scenes-2016-development.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.6.zip',
'remote_bytes': 1072890150,
'remote_md5': '591aad3219d1155342572cc1f6af5680',
'filename': 'TUT-acoustic-scenes-2016-development.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.7.zip',
'remote_bytes': 1069265197,
'remote_md5': '9e6c1897789e6bce13ac69c6caedb7ab',
'filename': 'TUT-acoustic-scenes-2016-development.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.8.zip',
'remote_bytes': 528461098,
'remote_md5': 'c4718354f48fcc9dfc7305f6cd8325c8',
'filename': 'TUT-acoustic-scenes-2016-development.audio.8.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTAcousticScenes_2016_DevelopmentSet, self).__init__(**kwargs)
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = {}
for fold in range(1, self.crossvalidation_folds):
# Read train files in
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='train', fold=fold)
).load()
# Read eval files in
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)
).load()
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
class TUTAcousticScenes_2016_EvaluationSet(AcousticSceneDataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2016-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2016, evaluation dataset',
'url': 'https://zenodo.org/record/165995',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/165995/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.doc.zip',
'remote_bytes': 69217,
'remote_md5': 'ef315bf912d1124050646888cc3ceba2',
'filename': 'TUT-acoustic-scenes-2016-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.meta.zip',
'remote_bytes': 5962,
'remote_md5': '0d5c131fc3f50c682de62e0e648aceba',
'filename': 'TUT-acoustic-scenes-2016-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip',
'remote_bytes': 1067685684,
'remote_md5': '7c6c2e54b8a9c4c37a803b81446d16fe',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip',
'remote_bytes': 1068308900,
'remote_md5': '7930f1dc26707ab3ba9526073af87333',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip',
'remote_bytes': 538894804,
'remote_md5': '17187d633d6402aee4b481122a1b28f0',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUTAcousticScenes_2016_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if item.filename_original is not None:
raw_path, raw_filename = os.path.split(item.filename_original)
item.identifier = raw_filename.split('_')[0]
del item['filename_original']
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate'
)
eval_file = MetaDataContainer(filename=evaluate_filename)
if eval_file.exists():
eval_data = eval_file.load()
meta_data = {}
for item in eval_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
# Save meta
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTSoundEvents_2016_DevelopmentSet(SoundEventDataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-development',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-acoustic-scenes-2016-development'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/45759/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2016-development.doc.zip',
'remote_bytes': 70918,
'remote_md5': '33fd26a895530aef607a07b08704eacd',
'filename': 'TUT-sound-events-2016-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2016-development.meta.zip',
'remote_bytes': 122321,
'remote_md5': '7b29f0e2b82b3f264653cb4fa43da75d',
'filename': 'TUT-sound-events-2016-development.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2016-development.audio.zip',
'remote_bytes': 1014040667,
'remote_md5': 'a6006efaa85bb69d5064b00c6802a8f8',
'filename': 'TUT-sound-events-2016-development.audio.zip'
}
]
kwargs['audio_paths'] = [
os.path.join('audio', 'home'),
os.path.join('audio', 'residential_area')
]
super(TUTSoundEvents_2016_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
"""Process single meta data item
Parameters
----------
item : MetaDataItem
Meta data item
absolute_path : bool
Convert file paths to be absolute
Default value True
"""
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = os.path.splitext(raw_filename)[0]
item.source_label = 'mixture'
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
scene_label = os.path.split(os.path.split(annotation_filename)[0])[1]
identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0]
audio_filename = os.path.join('audio', scene_label, identifier + '.wav')
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
item.filename = audio_filename
item.scene_label = scene_label
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
class TUTSoundEvents_2016_EvaluationSet(SoundEventDataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self,
storage_name='TUT-sound-events-2016-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-sound-events-2016-evaluation'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, evaluation dataset',
'url': 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/996424/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.doc.zip',
'remote_bytes': 69834,
'remote_md5': '0644b54d96f4cefd0ecb2c7ea9161aa9',
'filename': 'TUT-sound-events-2016-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.meta.zip',
'remote_bytes': 41608,
'remote_md5': '91c266b0780ac619a0d74298a3805e9e',
'filename': 'TUT-sound-events-2016-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.audio.zip',
'remote_bytes': 471072452,
'remote_md5': '29434e8c53bd51206df0234e6cf2238c',
'filename': 'TUT-sound-events-2016-evaluation.audio.zip'
}
]
kwargs['audio_paths'] = [
os.path.join('audio', 'home'),
os.path.join('audio', 'residential_area')
]
super(TUTSoundEvents_2016_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists() and os.path.isdir(os.path.join(self.local_path, 'meta')):
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
scene_label = os.path.split(os.path.split(annotation_filename)[0])[1]
identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0]
audio_filename = os.path.join('audio', scene_label, identifier + '.wav')
data = MetaDataContainer(filename=annotation_filename).load(decimal='comma')
for item in data:
item.filename = audio_filename
item.scene_label = scene_label
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
# =====================================================
# Others
# =====================================================
class TUT_SED_Synthetic_2016(SoundEventDataset):
"""TUT SED Synthetic 2016
"""
def __init__(self,
storage_name='TUT-SED-synthetic-2016',
data_path=None,
included_content_types=None,
**kwargs):
"""
Constructor
Parameters
----------
storage_name : str
Name to be used when storing dataset on disk
Default value 'TUT-SED-synthetic-2016'
data_path : str
Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')
is used.
Default value None
included_content_types : list of str or str
Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',
'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.
Default value None
"""
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Emre Cakir',
'title': 'TUT-SED Synthetic 2016',
'url': 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/tut-sed-synthetic-2016',
'audio_source': 'Field recording',
'audio_type': 'Synthetic',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = 1
source_url = 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/datasets/TUT-SED-synthetic-2016/'
kwargs['package_list'] = [
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-SED-synthetic-2016.meta.zip',
'remote_bytes': 973618,
'remote_md5': 'e2ae895bdf39f2a359a97bb0bcf76101',
'filename': 'TUT-SED-synthetic-2016.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.1.zip',
'remote_bytes': 1026369647,
'remote_md5': 'ede8b9c6d1b0d1d64bfc5791404f58fb',
'filename': 'TUT-SED-synthetic-2016.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.2.zip',
'remote_bytes': 1018650039,
'remote_md5': 'cde647a377a58fc74e3012139d65c447',
'filename': 'TUT-SED-synthetic-2016.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.3.zip',
'remote_bytes': 1070239392,
'remote_md5': '5fc2824dcce442f441f4c6a975881789',
'filename': 'TUT-SED-synthetic-2016.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.4.zip',
'remote_bytes': 1040622610,
'remote_md5': '4ba016d949171ccc8493d3d274009825',
'filename': 'TUT-SED-synthetic-2016.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.5.zip',
'remote_bytes': 264812997,
'remote_md5': '6a44578dd7738bd4ba044d5d2b9a5448',
'filename': 'TUT-SED-synthetic-2016.audio.5.zip'
},
{
'content_type': 'features',
'remote_file': source_url + 'TUT-SED-synthetic-2016.features.zip',
'remote_bytes': 480894082,
'remote_md5': '66bc0abc19a276986964a6d4a2d2f6bc',
'filename': 'TUT-SED-synthetic-2016.features.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUT_SED_Synthetic_2016, self).__init__(**kwargs)
def prepare(self):
"""Prepare dataset for the usage.
Returns
-------
self
"""
if not self.meta_container.exists():
meta_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['txt'])
meta_data = MetaDataContainer()
for meta_filename in meta_files:
audio_filename = os.path.join('audio', os.path.split(meta_filename)[1].replace('.txt', '.wav'))
data = MetaDataContainer(filename=meta_filename).load()
for item in data:
item.filename = audio_filename
item.scene_label = 'synthetic'
item.source_label = 'm'
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
# Save meta
meta_data.save(filename=self.meta_file)
# Load meta and cross validation
self.load()
return self
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if scene_label:
parts.append(scene_label)
if fold:
parts.append('fold' + str(fold))
if setup_part == 'train':
return os.path.join(self.evaluation_setup_path, 'train+validate' + '.' + file_extension)
elif setup_part == 'test':
return os.path.join(self.evaluation_setup_path, 'test' + '.' + file_extension)
elif setup_part == 'validate':
return os.path.join(self.evaluation_setup_path, 'validate' + '.' + file_extension)
elif setup_part == 'evaluate':
return os.path.join(self.evaluation_setup_path, 'evaluate' + '.' + file_extension)
def validation_split(self, fold=None, scene_label=None, **kwargs):
validation_files = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='validate', fold=fold)
).load().unique_files
for index, filename in enumerate(validation_files):
validation_files[index] = self.relative_to_absolute_path(filename)
return validation_files
def file_features(self, filename):
"""Pre-calculated acoustic features for given file
Parameters
----------
filename : str
File name
Returns
-------
data : numpy.ndarray
Matrix containing acoustic features
"""
filename_ = self.absolute_to_relative_path(filename).replace('audio/', 'features/')
filename_ = os.path.splitext(filename_)[0] + '.cpickle'
if os.path.isfile(os.path.join(self.local_path, filename_)):
feature_data = pickle.load(open(os.path.join(self.local_path, filename_), "rb"))
return feature_data['feat']
else:
return None
| 36.916613
| 124
| 0.543801
|
from __future__ import print_function, absolute_import
import collections
import hashlib
import os
import pickle
import sys
import numpy
import yaml
from six import iteritems
from tqdm import tqdm
from dcase_util.datasets import AcousticSceneDataset, SyntheticSoundEventDataset, SoundEventDataset
from dcase_util.containers import MetaDataContainer, MetaDataItem, OneToOneMappingContainer, \
DictContainer, ParameterContainer
from dcase_util.utils import Path
class TUTUrbanAcousticScenes_2018_DevelopmentSet(AcousticSceneDataset):
def __init__(self,
storage_name='TUT-urban-acoustic-scenes-2018-development',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen',
'title': 'TUT Urban Acoustic Scenes 2018, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Zoom F8',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 1
kwargs['meta_filename'] ='meta.csv'
filename_base = 'TUT-urban-acoustic-scenes-2018-development'
source_url = 'https://zenodo.org/record/1228142/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + filename_base + '.doc.zip',
'remote_bytes': 10517,
'remote_md5': '28a4a9c46a6f46709ecc8eece365a3a4',
'filename': filename_base + '.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + filename_base + '.meta.zip',
'remote_bytes': 69272,
'remote_md5': 'e196065ee83c07af03a11a310364377d',
'filename': filename_base + '.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.1.zip',
'remote_bytes': 1657811579,
'remote_md5': '62f97087c447e29def8716204469bf89',
'filename': filename_base + '.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.2.zip',
'remote_bytes': 1783489370,
'remote_md5': '8e569a92025d82bff6b02b956d7c6dc9',
'filename': filename_base + '.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.3.zip',
'remote_bytes': 1809675304,
'remote_md5': '00d2020582a4535af5e65322fb2bad56',
'filename': filename_base + '.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.4.zip',
'remote_bytes': 1756582525,
'remote_md5': 'd691eb4271f83ba6ba9a28797accc497',
'filename': filename_base + '.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.5.zip',
'remote_bytes': 1724002546,
'remote_md5': 'c4d64b5483b60f85e9fe080b3435a6be',
'filename': filename_base + '.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.6.zip',
'remote_bytes': 1645753049,
'remote_md5': '2f0feee78f216697eb19497714d97642',
'filename': filename_base + '.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.7.zip',
'remote_bytes': 1671903917,
'remote_md5': '07cfefe80a0731de6819181841239f3a',
'filename': filename_base + '.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.8.zip',
'remote_bytes': 1673304843,
'remote_md5': '213f3c012859c2e9dcb74aacc8558458',
'filename': filename_base + '.audio.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.9.zip',
'remote_bytes': 1674839259,
'remote_md5': 'b724442b09abcb3bd095ebff497cef85',
'filename': filename_base + '.audio.9.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.10.zip',
'remote_bytes': 1662932947,
'remote_md5': 'a27a32fa52e283ed8013375b8a16f269',
'filename': filename_base + '.audio.10.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.11.zip',
'remote_bytes': 1751473843,
'remote_md5': '7073a121e825ffef99832507f30d6644',
'filename': filename_base + '.audio.11.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.12.zip',
'remote_bytes': 1742332198,
'remote_md5': '6567aa61db12776568b6267ce122fb18',
'filename': filename_base + '.audio.12.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.13.zip',
'remote_bytes': 798990513,
'remote_md5': 'd00eeb2db0e093d8975521323a96c519',
'filename': filename_base + '.audio.13.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTUrbanAcousticScenes_2018_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if not item.identifier:
item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])
def prepare(self):
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
).load()
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
).load()
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
MetaDataContainer(list(meta_data.values())).save(
filename=self.meta_file
)
self.load()
return self
class TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet(AcousticSceneDataset):
def __init__(self,
storage_name='TUT-urban-acoustic-scenes-2018-mobile-development',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen',
'title': 'TUT Urban Acoustic Scenes 2018 Mobile, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Various',
'microphone_model': 'Various',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 1
kwargs['meta_filename'] = 'meta.csv'
filename_base = 'TUT-urban-acoustic-scenes-2018-mobile-development'
source_url = 'https://zenodo.org/record/1228235/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + filename_base + '.doc.zip',
'remote_bytes': 12144,
'remote_md5': '5694e9cdffa11cef8ec270673dc19ba0',
'filename': filename_base + '.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + filename_base + '.meta.zip',
'remote_bytes': 88425,
'remote_md5': 'b557b6d5d620aa4f15564ab38f1594d4',
'filename': filename_base + '.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.1.zip',
'remote_bytes': 1692337547,
'remote_md5': 'd6f2671af84032b97f393354c124517d',
'filename': filename_base + '.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.2.zip',
'remote_bytes': 1769203601,
'remote_md5': 'db8b3603af5d4e559869a592930a7620',
'filename': filename_base + '.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.3.zip',
'remote_bytes': 1674610746,
'remote_md5': '703bf73523a6ad1f40d4923cb8ba3ff0',
'filename': filename_base + '.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.4.zip',
'remote_bytes': 1634599587,
'remote_md5': '18af04ab5d6f15a72c66f16bfec0ca07',
'filename': filename_base + '.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.5.zip',
'remote_bytes': 1640894390,
'remote_md5': 'a579efb032f209a7e77fe22e4808e9ca',
'filename': filename_base + '.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.6.zip',
'remote_bytes': 1693974078,
'remote_md5': 'c2c56691047b3be3d98cb0ffd6858d9f',
'filename': filename_base + '.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + filename_base + '.audio.7.zip',
'remote_bytes': 1165383562,
'remote_md5': 'e182e5300867f4ed4b580389cc5b931e',
'filename': filename_base + '.audio.7.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if not item.identifier:
item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[1:-2])
if not item.source_label:
item.source_label = os.path.splitext(os.path.split(item.filename)[-1])[0].split('-')[-1]
def prepare(self):
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='train',
fold=fold
)
).load()
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(
setup_part='evaluate',
fold=fold
)
).load()
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
MetaDataContainer(list(meta_data.values())).save(
filename=self.meta_file
)
self.load()
return self
class TUTAcousticScenes_2017_DevelopmentSet(AcousticSceneDataset):
def __init__(self,
storage_name='TUT-acoustic-scenes-2017-development',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2017, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/400515/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.doc.zip',
'remote_bytes': 54796,
'remote_md5': '2065495aaf3f1103e795c9899e2af1df',
'filename': 'TUT-acoustic-scenes-2017-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.meta.zip',
'remote_bytes': 104321,
'remote_md5': '9007fd4772d816590c5db5f5e9568f5d',
'filename': 'TUT-acoustic-scenes-2017-development.meta.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.error.zip',
'remote_bytes': 1432,
'remote_md5': '802c700b021769e52a2c1e3b9c117a1b',
'filename': 'TUT-acoustic-scenes-2017-development.error.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.1.zip',
'remote_bytes': 1071445248,
'remote_md5': '251325a9afaaad0326ad1c57f57d514a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.2.zip',
'remote_bytes': 1073453613,
'remote_md5': 'c26861e05147dc319b4250eb103d9d99',
'filename': 'TUT-acoustic-scenes-2017-development.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.3.zip',
'remote_bytes': 1073077819,
'remote_md5': 'a4815775f8a5e629179726ee4cd4f55a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.4.zip',
'remote_bytes': 1072822038,
'remote_md5': '1732b03afe8c53ef8bba80ba14766e57',
'filename': 'TUT-acoustic-scenes-2017-development.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.5.zip',
'remote_bytes': 1072644652,
'remote_md5': '611be754a0c951185c6ae4b7643c19a0',
'filename': 'TUT-acoustic-scenes-2017-development.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.6.zip',
'remote_bytes': 1072667888,
'remote_md5': '165a201db800d3ea76fce5a9c2bd97d7',
'filename': 'TUT-acoustic-scenes-2017-development.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.7.zip',
'remote_bytes': 1073417661,
'remote_md5': 'c7d79db84264401c0f8680dcc36013ad',
'filename': 'TUT-acoustic-scenes-2017-development.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.8.zip',
'remote_bytes': 1072381222,
'remote_md5': '35043f25123439392338c790494c7a19',
'filename': 'TUT-acoustic-scenes-2017-development.audio.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.9.zip',
'remote_bytes': 1072087738,
'remote_md5': '0805dcf5d8e6871dc9610182b2efb93a',
'filename': 'TUT-acoustic-scenes-2017-development.audio.9.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.10.zip',
'remote_bytes': 1046262120,
'remote_md5': '5df83a191295a04e290b125c634e13e7',
'filename': 'TUT-acoustic-scenes-2017-development.audio.10.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTAcousticScenes_2017_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
def prepare(self):
if not self.meta_container.exists():
meta_data = collections.OrderedDict()
for fold in self.folds():
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='train', fold=fold)
).load()
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)
).load()
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
self.load()
return self
class TUTAcousticScenes_2017_EvaluationSet(AcousticSceneDataset):
def __init__(self,
storage_name='TUT-acoustic-scenes-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2017, development dataset',
'url': None,
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1040168/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.doc.zip',
'remote_bytes': 53687,
'remote_md5': '53709a07416ea3b617c02fcf67dbeb9c',
'filename': 'TUT-acoustic-scenes-2017-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.meta.zip',
'remote_bytes': 4473,
'remote_md5': '200eee9493e8044403e1326e3d05cfde',
'filename': 'TUT-acoustic-scenes-2017-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip',
'remote_bytes': 1071856687,
'remote_md5': '3d6dda4445871e9544e0fefe7d14c7d9',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip',
'remote_bytes': 1073362972,
'remote_md5': '4085ef5fa286f2169074993a4e405953',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip',
'remote_bytes': 1071521152,
'remote_md5': 'cac432579e7cf2dff0aec7aaed248956',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip',
'remote_bytes': 382756463,
'remote_md5': '664bf09c3d24bd26c6b587f1d709de36',
'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip'
},
]
kwargs['audio_paths'] = ['audio']
super(TUTAcousticScenes_2017_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, filename_map=None, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if filename_map and item.filename in filename_map:
filename_mapped = filename_map.map(item.filename)
item.identifier = os.path.split(filename_mapped)[1].split('_')[0]
def prepare(self):
if not self.meta_container.exists():
if os.path.isfile(self.evaluation_setup_filename(setup_part='evaluate')):
meta_data = collections.OrderedDict()
data = MetaDataContainer(
filename=os.path.join(self.evaluation_setup_path, 'evaluate.txt')
).load()
map_filename = os.path.join(self.evaluation_setup_path, 'map.txt')
if os.path.exists(map_filename):
filename_map = OneToOneMappingContainer(filename=map_filename).load()
else:
filename_map = {}
for item in data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False,
filename_map=filename_map
)
meta_data[item.filename] = item
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
self.load()
return self
class TUTRareSoundEvents_2017_DevelopmentSet(SyntheticSoundEventDataset):
def __init__(self,
storage_name='TUT-rare-sound-events-2017-development',
data_path=None,
included_content_types=None,
synth_parameters=None,
dcase_compatibility=True,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['filelisthash_exclude_dirs'] = kwargs.get(
'filelisthash_exclude_dirs',
[os.path.join('data', 'mixture_data')]
)
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Rare Sound Events 2017, development dataset',
'url': None,
'audio_source': 'Synthetic',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = 1
source_url = 'https://zenodo.org/record/401395/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.doc.zip',
'remote_bytes': 21042,
'remote_md5': '47c424fe90d2bdc53d9fdd84341c2783',
'filename': 'TUT-rare-sound-events-2017-development.doc.zip'
},
{
'content_type': 'code',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.code.zip',
'remote_bytes': 81518,
'remote_md5': '4cacdf0803daf924a60bf9daa573beb7',
'filename': 'TUT-rare-sound-events-2017-development.code.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip',
'remote_bytes': 1072175672,
'remote_md5': '6f1f4156d41b541d1188fcf44c9a8267',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip',
'remote_bytes': 1073378284,
'remote_md5': 'ff5dcbe250e45cc404b7b8a6013002ac',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip',
'remote_bytes': 1069766123,
'remote_md5': 'fb356ae309a40d2f0a38fc1c746835cb',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip',
'remote_bytes': 1070042681,
'remote_md5': '2a68575b2ec7a69e2cc8b16b87fae0c9',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip',
'remote_bytes': 1073380909,
'remote_md5': '84e70d855457a18115108e42ec04501a',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip',
'remote_bytes': 1073021941,
'remote_md5': '048ce898bd434097dd489027f7ba361d',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip',
'remote_bytes': 1069890239,
'remote_md5': '3ef1c89fcfac39918a5edc5abc6ed29b',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip',
'remote_bytes': 180860904,
'remote_md5': '69dcb81e70f4e6605e178693afcd7722',
'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-development.source_data_events.zip',
'remote_bytes': 639119477,
'remote_md5': 'dc4b7eb77078b4cf1b670c6362679473',
'filename': 'TUT-rare-sound-events-2017-development.source_data_events.zip'
}
]
kwargs['audio_paths'] = ['audio']
default_synth_parameters = DictContainer({
'train': {
'seed': 42,
'event_presence_prob': 0.5,
'mixtures_per_class': 500,
'ebr_list': [-6, 0, 6],
},
'test': {
'seed': 42,
'event_presence_prob': 0.5,
'mixtures_per_class': 500,
'ebr_list': [-6, 0, 6],
}
})
if synth_parameters is None:
synth_parameters = {}
synth_parameters = default_synth_parameters.merge(synth_parameters)
kwargs['meta_filename'] = 'meta_'+synth_parameters.get_hash_for_path()+'.txt'
self.synth_parameters = synth_parameters
self.synth_parameters['train']['param_hash'] = hashlib.md5(
yaml.dump(
{
'event_presence_prob': self.synth_parameters['train']['event_presence_prob'],
'mixtures_per_class': self.synth_parameters['train']['mixtures_per_class'],
'ebrs': self.synth_parameters['train']['ebr_list'],
'seed': self.synth_parameters['train']['seed']
}
).encode('utf-8')).hexdigest()
self.synth_parameters['test']['param_hash'] = hashlib.md5(
yaml.dump(
{
'event_presence_prob': self.synth_parameters['test']['event_presence_prob'],
'mixtures_per_class': self.synth_parameters['test']['mixtures_per_class'],
'ebrs': self.synth_parameters['test']['ebr_list'],
'seed': self.synth_parameters['test']['seed']
}
).encode('utf-8')).hexdigest()
self.dcase_compatibility = dcase_compatibility
super(TUTRareSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)
if 'code' not in self.included_content_types or 'all' not in self.included_content_types:
self.included_content_types.append('code')
def event_labels(self, scene_label=None):
labels = ['babycry', 'glassbreak', 'gunshot']
labels.sort()
return labels
def prepare(self):
Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))
return self
def synthesize(self):
if os.path.exists(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer', '__init__.py')):
open(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer', '__init__.py'), 'a').close()
sys.path.append(os.path.join(self.local_path, 'TUT_Rare_sound_events_mixture_synthesizer'))
from core import generate_mixture_recipes
from core import do_mixing
scene_label = 'synthetic'
subset_map = {'train': 'devtrain',
'test': 'devtest'}
data_path = os.path.join(os.path.abspath(self.local_path), 'data')
set_progress = tqdm(['train', 'test'],
desc="{0: <25s}".format('Set'),
file=sys.stdout,
leave=False,
disable=self.disable_progress_bar,
ascii=self.use_ascii_progress_bar)
for subset_label in set_progress:
if self.log_system_progress:
self.logger.info(' {title:<15s} [{subset_label:<30s}]'.format(
title='Set ',
subset_label=subset_label)
)
subset_name_on_disk = subset_map[subset_label]
mixing_params = {
'event_presence_prob': self.synth_parameters[subset_label]['event_presence_prob'],
'mixtures_per_class': self.synth_parameters[subset_label]['mixtures_per_class'],
'ebrs': self.synth_parameters[subset_label]['ebr_list'],
'seed': self.synth_parameters[subset_label]['seed']
}
param_hash = self.synth_parameters[subset_label]['param_hash']
mixture_parameters = os.path.join(
self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'parameters.yaml'
)
if not os.path.isfile(mixture_parameters):
Path().makedirs(
path=os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash)
)
ParameterContainer(mixing_params).save(filename=mixture_parameters)
recipes_exists = True
for event_label in self.event_labels():
recipe_filename = 'mixture_recipes_' + subset_name_on_disk + '_' + event_label + '.yaml'
if not os.path.isfile(os.path.join(self.local_path, 'data', 'mixture_data',
subset_name_on_disk, param_hash, 'meta', recipe_filename)):
recipes_exists = False
if not recipes_exists:
generate_mixture_recipes(
data_path=data_path,
current_subsets=numpy.array([subset_name_on_disk]),
mixing_params=mixing_params
)
mixture_audio_exists = True
audio_files = Path().file_list(
path=os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'audio'))
for event_label in self.event_labels():
event_audio = []
for f in audio_files:
if event_label in f:
event_audio.append(f)
if len(event_audio) != self.synth_parameters[subset_label]['mixtures_per_class']:
mixture_audio_exists = False
if not mixture_audio_exists:
do_mixing(
data_path=data_path,
current_subsets=numpy.array([subset_name_on_disk]),
magic_anticlipping_factor=0.2,
param_hash=param_hash,
dcase_compatibility_mode=True
)
if not self.meta_container.exists():
meta_data = MetaDataContainer()
for class_label in self.event_labels():
for subset_label, subset_name_on_disk in iteritems(subset_map):
subset_name_on_disk = subset_map[subset_label]
param_hash = self.synth_parameters[subset_label]['param_hash']
mixture_path = os.path.join(
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'audio'
)
mixture_meta_path = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'meta'
)
event_list_filename = os.path.join(
mixture_meta_path,
'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'
)
if os.path.isfile(event_list_filename):
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path, item.filename)
item.scene_label = scene_label
meta_data += current_meta
meta_data.save(filename=self.meta_file)
self.load()
train_filename = self.evaluation_setup_filename(
setup_part='train',
fold=1,
file_extension='txt'
)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=1,
file_extension='txt'
)
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
fold=1,
file_extension='txt'
)
evaluation_setup_exists = True
if not os.path.isfile(train_filename) or not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):
evaluation_setup_exists = False
if not evaluation_setup_exists:
param_hash_train = self.synth_parameters['train']['param_hash']
mixture_meta_path_train = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['train'],
param_hash_train,
'meta'
)
mixture_path_train = os.path.join(
'data',
'mixture_data',
subset_map['train'],
param_hash_train,
'audio'
)
param_hash_test = self.synth_parameters['test']['param_hash']
mixture_meta_path_test = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['test'],
param_hash_test,
'meta'
)
mixture_path_test = os.path.join(
'data',
'mixture_data',
subset_map['test'],
param_hash_test,
'audio'
)
train_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_train,
'event_list_' + subset_map['train'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_train, item.filename)
item.scene_label = scene_label
train_meta += current_meta
train_meta.save(filename=train_filename)
test_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
current_meta_ = MetaDataContainer()
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
current_meta_.append(MetaDataItem(
{
'filename': item.filename,
'scene_label': scene_label
}
))
test_meta += current_meta_
test_meta.save(filename=test_filename)
eval_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
item.scene_label = scene_label
eval_meta += current_meta
eval_meta.save(filename=evaluate_filename)
self.load()
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if setup_part == 'test' or setup_part == 'evaluate':
subset_label = 'test'
else:
subset_label = 'train'
param_hash = self.synth_parameters[subset_label]['param_hash']
if setup_part == 'train':
parts.append('train')
elif setup_part == 'test':
parts.append('test')
elif setup_part == 'evaluate':
parts.append('evaluate')
else:
message = '{name}: Unknown setup_part [{setup_part}]'.format(
name=self.__class__.__name__,
setup_part=setup_part
)
self.logger.exception(message)
raise ValueError(message)
return os.path.join(self.evaluation_setup_path, '_'.join(parts) + '_' + param_hash + '.' + file_extension)
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['train'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['test'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['evaluate'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
class TUTRareSoundEvents_2017_EvaluationSet(SyntheticSoundEventDataset):
def __init__(self,
storage_name='TUT-rare-sound-events-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['reference_data_present'] = True
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Rare Sound Events 2017, evaluation dataset',
'url': None,
'audio_source': 'Synthetic',
'audio_type': 'Natural',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1160455/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.doc.zip',
'remote_bytes': 11701,
'remote_md5': '36db98a94ce871c6bdc5bd5238383114',
'filename': 'TUT-rare-sound-events-2017-evaluation.doc.zip'
},
{
'content_type': 'documentation',
'remote_file': source_url + 'LICENSE.txt',
'remote_bytes': 0,
'remote_md5': '0707857098fc74d17beb824416fb74b1',
'filename': 'LICENSE.txt'
},
{
'content_type': 'documentation',
'remote_file': source_url + 'FREESOUNDCREDITS.txt',
'remote_bytes': 0,
'remote_md5': '3ecea52bdb0eadd6e1af52a21f735d6d',
'filename': 'FREESOUNDCREDITS.txt'
},
{
'content_type': ['audio', 'meta'],
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip',
'remote_bytes': 1071143794,
'remote_md5': 'db4aecd5175dead27ceb2692e7f28bb1',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip',
'remote_bytes': 1071773516,
'remote_md5': 'e97d5842c46805cdb94e6d4017870cde',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip',
'remote_bytes': 1073505512,
'remote_md5': '1fe20c762cecd26979e2c5303c8e9f48',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip',
'remote_bytes': 1071132551,
'remote_md5': '5042cd00aed9af6b37a253e24f88554f',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip',
'remote_bytes': 308314939,
'remote_md5': '72180597ed5bfaa73491755f74b84738',
'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUTRareSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
return ['synthetic']
def event_labels(self, scene_label=None):
labels = ['babycry', 'glassbreak', 'gunshot']
labels.sort()
return labels
def prepare(self):
scene_label = 'synthetic'
subset_map = {'test': 'evaltest'}
param_hash = 'bbb81504db15a03680a0044474633b67'
Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder))
if not self.meta_container.exists() and self.reference_data_present:
meta_data = MetaDataContainer()
for class_label in self.event_labels():
for subset_label, subset_name_on_disk in iteritems(subset_map):
subset_name_on_disk = subset_map[subset_label]
mixture_path = os.path.join(
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'audio'
)
mixture_meta_path = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_name_on_disk,
param_hash,
'meta'
)
event_list_filename = os.path.join(
mixture_meta_path,
'event_list_' + subset_name_on_disk + '_' + class_label + '.csv'
)
if os.path.isfile(event_list_filename):
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path, item.filename)
item.scene_label = scene_label
meta_data += current_meta
meta_data.save(filename=self.meta_file)
test_filename = self.evaluation_setup_filename(
setup_part='test',
fold=None,
file_extension='txt'
)
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
fold=None,
file_extension='txt'
)
evaluation_setup_exists = True
if not os.path.isfile(test_filename) or not os.path.isfile(evaluate_filename):
evaluation_setup_exists = False
if not evaluation_setup_exists:
mixture_meta_path_test = os.path.join(
self.local_path,
'data',
'mixture_data',
subset_map['test'],
param_hash,
'meta'
)
mixture_path_test = os.path.join(
'data',
'mixture_data',
subset_map['test'],
param_hash,
'audio'
)
test_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
current_meta_ = MetaDataContainer()
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
current_meta_.append(MetaDataItem(
{
'filename': item.filename,
'scene_label': scene_label
}
))
test_meta += current_meta_
test_meta.save(filename=test_filename)
eval_meta = MetaDataContainer()
for class_label in self.event_labels():
event_list_filename = os.path.join(
mixture_meta_path_test,
'event_list_' + subset_map['test'] + '_' + class_label + '.csv'
)
current_meta = MetaDataContainer(
filename=event_list_filename
).load(
fields=['filename', 'onset', 'offset', 'event_label']
)
for item in current_meta:
item.filename = os.path.join(mixture_path_test, item.filename)
item.scene_label = scene_label
eval_meta += current_meta
eval_meta.save(filename=evaluate_filename)
self.load()
return self
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if setup_part == 'test' or setup_part == 'evaluate':
subset_label = 'test'
else:
subset_label = 'train'
if setup_part == 'train':
parts.append('train')
elif setup_part == 'test':
parts.append('test')
elif setup_part == 'evaluate':
parts.append('evaluate')
else:
message = '{name}: Unknown setup_part [{setup_part}]'.format(
name=self.__class__.__name__,
setup_part=setup_part
)
self.logger.exception(message)
raise ValueError(message)
return os.path.join(self.evaluation_setup_path, '_'.join(parts) + '.' + file_extension)
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['train'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['test'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs):
if fold is None or fold == 0:
fold = 'all_data'
data = self.crossvalidation_data['evaluate'][fold]
if scene_label:
data = data.filter(scene_label=scene_label)
if event_label:
data = data.filter(event_label=event_label)
if filename_contains:
data_ = MetaDataContainer()
for item in data:
if filename_contains in item.filename:
data_.append(item)
data = data_
return data
class TUTSoundEvents_2017_DevelopmentSet(SoundEventDataset):
def __init__(self,
storage_name='TUT-sound-events-2017-development',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/814831/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2017-development.doc.zip',
'remote_bytes': 56150,
'remote_md': 'aa6024e70f5bff3fe15d962b01753e23',
'filename': 'TUT-sound-events-2017-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2017-development.meta.zip',
'remote_bytes': 140684,
'remote_md': '50e870b3a89ed3452e2a35b508840929',
'filename': 'TUT-sound-events-2017-development.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-development.audio.1.zip',
'remote_bytes': 1062653169,
'remote_md': '6f1cd31592b8240a14be3ee513db6a23',
'filename': 'TUT-sound-events-2017-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-development.audio.2.zip',
'remote_bytes': 213232458,
'remote_md': 'adcff03341b84dc8d35f035b93c1efa0',
'filename': 'TUT-sound-events-2017-development.audio.2.zip'
}
]
kwargs['audio_paths'] = [os.path.join('audio', 'street')]
super(TUTSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
def prepare(self):
if not self.meta_container.exists():
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
meta_data.save(filename=self.meta_file)
self.load()
return self
class TUTSoundEvents_2017_EvaluationSet(SoundEventDataset):
def __init__(self,
storage_name='TUT-sound-events-2017-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/1040179/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.doc.zip',
'remote_bytes': 54606,
'remote_md5': '8bbf41671949edee15d6cdc3f9e726c9',
'filename': 'TUT-sound-events-2017-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.meta.zip',
'remote_bytes': 762,
'remote_md5': 'a951598abaea87296ca409e30fb0b379',
'filename': 'TUT-sound-events-2017-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2017-evaluation.audio.zip',
'remote_bytes': 388173790,
'remote_md5': '1d3aa81896be0f142130ca9ca7a2b871',
'filename': 'TUT-sound-events-2017-evaluation.audio.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUTSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
labels = ['street']
labels.sort()
return labels
def process_meta_item(self, item, absolute_path=True, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = os.path.splitext(raw_filename)[0]
item.source_label = 'mixture'
def prepare(self):
if not self.meta_container.exists():
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate',
scene_label=self.scene_labels()[0]
)
eval_file = MetaDataContainer(filename=evaluate_filename)
if eval_file.exists():
meta_data = MetaDataContainer()
eval_file.load()
for item in eval_file:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += eval_file
meta_data.save(filename=self.meta_file)
self.load()
elif os.path.isdir(os.path.join(self.local_path, 'meta')):
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
meta_data = MetaDataContainer()
for annotation_filename in annotation_files:
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
meta_data.save(filename=self.meta_file)
self.load()
return self
class TUTAcousticScenes_2016_DevelopmentSet(AcousticSceneDataset):
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-development',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2016, development dataset',
'url': 'https://zenodo.org/record/45739',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/45739/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.doc.zip',
'remote_bytes': 69671,
'remote_md5': 'f94ad46eb36325d9fbce5d60f7fc9926',
'filename': 'TUT-acoustic-scenes-2016-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.meta.zip',
'remote_bytes': 28815,
'remote_md5': '779b33da2ebbf8bde494b3c981827251',
'filename': 'TUT-acoustic-scenes-2016-development.meta.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.error.zip',
'remote_bytes': 1283,
'remote_md5': 'a0d3e0d81b0a36ece87d0f3a9124a386',
'filename': 'TUT-acoustic-scenes-2016-development.error.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.1.zip',
'remote_bytes': 1070981236,
'remote_md5': 'e39546e65f2e72517b6335aaf0c8323d',
'filename': 'TUT-acoustic-scenes-2016-development.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.2.zip',
'remote_bytes': 1067186166,
'remote_md5': 'd36cf3253e2c041f68e937a3fe804807',
'filename': 'TUT-acoustic-scenes-2016-development.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.3.zip',
'remote_bytes': 1073644405,
'remote_md5': '0393a9620ab882b1c26d884eccdcffdd',
'filename': 'TUT-acoustic-scenes-2016-development.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.4.zip',
'remote_bytes': 1072111347,
'remote_md5': 'fb3e4e0cd7ea82120ec07031dee558ce',
'filename': 'TUT-acoustic-scenes-2016-development.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.5.zip',
'remote_bytes': 1069681513,
'remote_md5': 'a19cf600b33c8f88f6ad607bafd74057',
'filename': 'TUT-acoustic-scenes-2016-development.audio.5.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.6.zip',
'remote_bytes': 1072890150,
'remote_md5': '591aad3219d1155342572cc1f6af5680',
'filename': 'TUT-acoustic-scenes-2016-development.audio.6.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.7.zip',
'remote_bytes': 1069265197,
'remote_md5': '9e6c1897789e6bce13ac69c6caedb7ab',
'filename': 'TUT-acoustic-scenes-2016-development.audio.7.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-development.audio.8.zip',
'remote_bytes': 528461098,
'remote_md5': 'c4718354f48fcc9dfc7305f6cd8325c8',
'filename': 'TUT-acoustic-scenes-2016-development.audio.8.zip'
}
]
kwargs['audio_paths'] = [
'audio'
]
super(TUTAcousticScenes_2016_DevelopmentSet, self).__init__(**kwargs)
def prepare(self):
if not self.meta_container.exists():
meta_data = {}
for fold in range(1, self.crossvalidation_folds):
fold_data = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='train', fold=fold)
).load()
fold_data += MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)
).load()
for item in fold_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
self.load()
return self
def process_meta_item(self, item, absolute_path=True, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = raw_filename.split('_')[0]
class TUTAcousticScenes_2016_EvaluationSet(AcousticSceneDataset):
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'scene'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Acoustic Scenes 2016, evaluation dataset',
'url': 'https://zenodo.org/record/165995',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/165995/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.doc.zip',
'remote_bytes': 69217,
'remote_md5': 'ef315bf912d1124050646888cc3ceba2',
'filename': 'TUT-acoustic-scenes-2016-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.meta.zip',
'remote_bytes': 5962,
'remote_md5': '0d5c131fc3f50c682de62e0e648aceba',
'filename': 'TUT-acoustic-scenes-2016-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip',
'remote_bytes': 1067685684,
'remote_md5': '7c6c2e54b8a9c4c37a803b81446d16fe',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip',
'remote_bytes': 1068308900,
'remote_md5': '7930f1dc26707ab3ba9526073af87333',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip',
'remote_bytes': 538894804,
'remote_md5': '17187d633d6402aee4b481122a1b28f0',
'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUTAcousticScenes_2016_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
if item.filename_original is not None:
raw_path, raw_filename = os.path.split(item.filename_original)
item.identifier = raw_filename.split('_')[0]
del item['filename_original']
def prepare(self):
if not self.meta_container.exists():
evaluate_filename = self.evaluation_setup_filename(
setup_part='evaluate'
)
eval_file = MetaDataContainer(filename=evaluate_filename)
if eval_file.exists():
eval_data = eval_file.load()
meta_data = {}
for item in eval_data:
if item.filename not in meta_data:
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data[item.filename] = item
MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file)
self.load()
return self
class TUTSoundEvents_2016_DevelopmentSet(SoundEventDataset):
def __init__(self,
storage_name='TUT-acoustic-scenes-2016-development',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, development dataset',
'url': 'https://zenodo.org/record/45759',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = 4
source_url = 'https://zenodo.org/record/45759/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2016-development.doc.zip',
'remote_bytes': 70918,
'remote_md5': '33fd26a895530aef607a07b08704eacd',
'filename': 'TUT-sound-events-2016-development.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2016-development.meta.zip',
'remote_bytes': 122321,
'remote_md5': '7b29f0e2b82b3f264653cb4fa43da75d',
'filename': 'TUT-sound-events-2016-development.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2016-development.audio.zip',
'remote_bytes': 1014040667,
'remote_md5': 'a6006efaa85bb69d5064b00c6802a8f8',
'filename': 'TUT-sound-events-2016-development.audio.zip'
}
]
kwargs['audio_paths'] = [
os.path.join('audio', 'home'),
os.path.join('audio', 'residential_area')
]
super(TUTSoundEvents_2016_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs):
if absolute_path:
item.filename = self.relative_to_absolute_path(item.filename)
else:
item.filename = self.absolute_to_relative_path(item.filename)
raw_path, raw_filename = os.path.split(item.filename)
item.identifier = os.path.splitext(raw_filename)[0]
item.source_label = 'mixture'
def prepare(self):
if not self.meta_container.exists():
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
scene_label = os.path.split(os.path.split(annotation_filename)[0])[1]
identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0]
audio_filename = os.path.join('audio', scene_label, identifier + '.wav')
data = MetaDataContainer(filename=annotation_filename).load()
for item in data:
item.filename = audio_filename
item.scene_label = scene_label
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
meta_data.save(filename=self.meta_file)
self.load()
return self
class TUTSoundEvents_2016_EvaluationSet(SoundEventDataset):
def __init__(self,
storage_name='TUT-sound-events-2016-evaluation',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen',
'title': 'TUT Sound Events 2016, evaluation dataset',
'url': 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/',
'audio_source': 'Field recording',
'audio_type': 'Natural',
'recording_device_model': 'Roland Edirol R-09',
'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone',
'licence': 'free non-commercial'
}
kwargs['crossvalidation_folds'] = None
source_url = 'https://zenodo.org/record/996424/files/'
kwargs['package_list'] = [
{
'content_type': 'documentation',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.doc.zip',
'remote_bytes': 69834,
'remote_md5': '0644b54d96f4cefd0ecb2c7ea9161aa9',
'filename': 'TUT-sound-events-2016-evaluation.doc.zip'
},
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.meta.zip',
'remote_bytes': 41608,
'remote_md5': '91c266b0780ac619a0d74298a3805e9e',
'filename': 'TUT-sound-events-2016-evaluation.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-sound-events-2016-evaluation.audio.zip',
'remote_bytes': 471072452,
'remote_md5': '29434e8c53bd51206df0234e6cf2238c',
'filename': 'TUT-sound-events-2016-evaluation.audio.zip'
}
]
kwargs['audio_paths'] = [
os.path.join('audio', 'home'),
os.path.join('audio', 'residential_area')
]
super(TUTSoundEvents_2016_EvaluationSet, self).__init__(**kwargs)
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def prepare(self):
if not self.meta_container.exists() and os.path.isdir(os.path.join(self.local_path, 'meta')):
meta_data = MetaDataContainer()
annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann'])
for annotation_filename in annotation_files:
scene_label = os.path.split(os.path.split(annotation_filename)[0])[1]
identifier = os.path.splitext(os.path.split(annotation_filename)[1])[0]
audio_filename = os.path.join('audio', scene_label, identifier + '.wav')
data = MetaDataContainer(filename=annotation_filename).load(decimal='comma')
for item in data:
item.filename = audio_filename
item.scene_label = scene_label
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
meta_data.save(filename=self.meta_file)
self.load()
return self
class TUT_SED_Synthetic_2016(SoundEventDataset):
def __init__(self,
storage_name='TUT-SED-synthetic-2016',
data_path=None,
included_content_types=None,
**kwargs):
kwargs['included_content_types'] = included_content_types
kwargs['data_path'] = data_path
kwargs['storage_name'] = storage_name
kwargs['dataset_group'] = 'event'
kwargs['dataset_meta'] = {
'authors': 'Emre Cakir',
'title': 'TUT-SED Synthetic 2016',
'url': 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/tut-sed-synthetic-2016',
'audio_source': 'Field recording',
'audio_type': 'Synthetic',
'recording_device_model': 'Unknown',
'microphone_model': 'Unknown',
}
kwargs['crossvalidation_folds'] = 1
source_url = 'http://www.cs.tut.fi/sgn/arg/taslp2017-crnn-sed/datasets/TUT-SED-synthetic-2016/'
kwargs['package_list'] = [
{
'content_type': 'meta',
'remote_file': source_url + 'TUT-SED-synthetic-2016.meta.zip',
'remote_bytes': 973618,
'remote_md5': 'e2ae895bdf39f2a359a97bb0bcf76101',
'filename': 'TUT-SED-synthetic-2016.meta.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.1.zip',
'remote_bytes': 1026369647,
'remote_md5': 'ede8b9c6d1b0d1d64bfc5791404f58fb',
'filename': 'TUT-SED-synthetic-2016.audio.1.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.2.zip',
'remote_bytes': 1018650039,
'remote_md5': 'cde647a377a58fc74e3012139d65c447',
'filename': 'TUT-SED-synthetic-2016.audio.2.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.3.zip',
'remote_bytes': 1070239392,
'remote_md5': '5fc2824dcce442f441f4c6a975881789',
'filename': 'TUT-SED-synthetic-2016.audio.3.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.4.zip',
'remote_bytes': 1040622610,
'remote_md5': '4ba016d949171ccc8493d3d274009825',
'filename': 'TUT-SED-synthetic-2016.audio.4.zip'
},
{
'content_type': 'audio',
'remote_file': source_url + 'TUT-SED-synthetic-2016.audio.5.zip',
'remote_bytes': 264812997,
'remote_md5': '6a44578dd7738bd4ba044d5d2b9a5448',
'filename': 'TUT-SED-synthetic-2016.audio.5.zip'
},
{
'content_type': 'features',
'remote_file': source_url + 'TUT-SED-synthetic-2016.features.zip',
'remote_bytes': 480894082,
'remote_md5': '66bc0abc19a276986964a6d4a2d2f6bc',
'filename': 'TUT-SED-synthetic-2016.features.zip'
}
]
kwargs['audio_paths'] = ['audio']
super(TUT_SED_Synthetic_2016, self).__init__(**kwargs)
def prepare(self):
if not self.meta_container.exists():
meta_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['txt'])
meta_data = MetaDataContainer()
for meta_filename in meta_files:
audio_filename = os.path.join('audio', os.path.split(meta_filename)[1].replace('.txt', '.wav'))
data = MetaDataContainer(filename=meta_filename).load()
for item in data:
item.filename = audio_filename
item.scene_label = 'synthetic'
item.source_label = 'm'
self.process_meta_item(
item=item,
absolute_path=False
)
meta_data += data
meta_data.save(filename=self.meta_file)
self.load()
return self
def evaluation_setup_filename(self, setup_part='train', fold=None, scene_label=None, file_extension='txt'):
parts = []
if scene_label:
parts.append(scene_label)
if fold:
parts.append('fold' + str(fold))
if setup_part == 'train':
return os.path.join(self.evaluation_setup_path, 'train+validate' + '.' + file_extension)
elif setup_part == 'test':
return os.path.join(self.evaluation_setup_path, 'test' + '.' + file_extension)
elif setup_part == 'validate':
return os.path.join(self.evaluation_setup_path, 'validate' + '.' + file_extension)
elif setup_part == 'evaluate':
return os.path.join(self.evaluation_setup_path, 'evaluate' + '.' + file_extension)
def validation_split(self, fold=None, scene_label=None, **kwargs):
validation_files = MetaDataContainer(
filename=self.evaluation_setup_filename(setup_part='validate', fold=fold)
).load().unique_files
for index, filename in enumerate(validation_files):
validation_files[index] = self.relative_to_absolute_path(filename)
return validation_files
def file_features(self, filename):
filename_ = self.absolute_to_relative_path(filename).replace('audio/', 'features/')
filename_ = os.path.splitext(filename_)[0] + '.cpickle'
if os.path.isfile(os.path.join(self.local_path, filename_)):
feature_data = pickle.load(open(os.path.join(self.local_path, filename_), "rb"))
return feature_data['feat']
else:
return None
| true
| true
|
790aa33d7aa90375177f9e9b7d434c02fd3dee2b
| 2,766
|
py
|
Python
|
cart/migrations/0001_initial.py
|
theonlykingpin/snapfoodclone
|
0c1a7839424e89d9bc7bfb55c150a92055759702
|
[
"MIT"
] | 11
|
2021-09-17T07:44:05.000Z
|
2022-02-06T08:33:30.000Z
|
cart/migrations/0001_initial.py
|
theonlykingpin/snapfoodclone
|
0c1a7839424e89d9bc7bfb55c150a92055759702
|
[
"MIT"
] | 1
|
2021-10-09T07:37:14.000Z
|
2021-10-09T07:37:14.000Z
|
cart/migrations/0001_initial.py
|
theonlykingpin/snapfoodclone
|
0c1a7839424e89d9bc7bfb55c150a92055759702
|
[
"MIT"
] | 3
|
2021-09-27T14:12:13.000Z
|
2021-10-18T12:21:37.000Z
|
# Generated by Django 3.2 on 2021-09-07 12:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payment', '0002_alter_invoice_address'),
('item', '0002_alter_item_upc'),
('accounts', '0002_auto_20210831_0046'),
('service', '0008_service_available'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('is_paid', models.BooleanField(default=False, verbose_name='is paid')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='carts', to='accounts.customer', verbose_name='customer')),
('invoice', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='cart', to='payment.invoice', verbose_name='invoice')),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='carts', to='service.service', verbose_name='service')),
],
options={
'verbose_name': 'Cart',
'verbose_name_plural': 'Carts',
'db_table': 'cart',
},
),
migrations.CreateModel(
name='CartLine',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='quantity')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines', to='cart.cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='lines', to='item.item', verbose_name='item')),
],
options={
'verbose_name': 'Cart line',
'verbose_name_plural': 'Cart lines',
'db_table': 'cart_line',
'ordering': ('created_time', 'modified_time'),
'unique_together': {('item', 'cart')},
},
),
]
| 51.222222
| 187
| 0.606652
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('payment', '0002_alter_invoice_address'),
('item', '0002_alter_item_upc'),
('accounts', '0002_auto_20210831_0046'),
('service', '0008_service_available'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('is_paid', models.BooleanField(default=False, verbose_name='is paid')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='carts', to='accounts.customer', verbose_name='customer')),
('invoice', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='cart', to='payment.invoice', verbose_name='invoice')),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='carts', to='service.service', verbose_name='service')),
],
options={
'verbose_name': 'Cart',
'verbose_name_plural': 'Carts',
'db_table': 'cart',
},
),
migrations.CreateModel(
name='CartLine',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='created time')),
('modified_time', models.DateTimeField(auto_now=True, verbose_name='modified time')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='quantity')),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines', to='cart.cart', verbose_name='cart')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='lines', to='item.item', verbose_name='item')),
],
options={
'verbose_name': 'Cart line',
'verbose_name_plural': 'Cart lines',
'db_table': 'cart_line',
'ordering': ('created_time', 'modified_time'),
'unique_together': {('item', 'cart')},
},
),
]
| true
| true
|
790aa3522fd3ff8ce700562975062d5b973be574
| 193
|
py
|
Python
|
python/chap_1/1.4.3.py
|
RyodoTanaka/Cording_Matrix
|
7d357266c0b659495f226000418e9cdaee133ebf
|
[
"BSD-3-Clause"
] | null | null | null |
python/chap_1/1.4.3.py
|
RyodoTanaka/Cording_Matrix
|
7d357266c0b659495f226000418e9cdaee133ebf
|
[
"BSD-3-Clause"
] | null | null | null |
python/chap_1/1.4.3.py
|
RyodoTanaka/Cording_Matrix
|
7d357266c0b659495f226000418e9cdaee133ebf
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cplotting as cplot
S={2+2j, 3+2j, 1.75+1j, 2+1j, 2.25+1j, 2.5+1j, 2.75+1j, 3+1j, 3.25+1j}
cplot.plot({1+2j+z for z in S},4)
cplot.show()
| 19.3
| 70
| 0.590674
|
import cplotting as cplot
S={2+2j, 3+2j, 1.75+1j, 2+1j, 2.25+1j, 2.5+1j, 2.75+1j, 3+1j, 3.25+1j}
cplot.plot({1+2j+z for z in S},4)
cplot.show()
| true
| true
|
790aa38bfe25fb8993b276dd925cb65f95a11ac6
| 4,225
|
py
|
Python
|
flask_app/events/aws/sqs.py
|
andersoncontreira/flask-skeleton-python
|
4a3087cf94f387830850dc438338251da86c3cfb
|
[
"MIT"
] | 1
|
2021-08-11T21:29:50.000Z
|
2021-08-11T21:29:50.000Z
|
flask_app/events/aws/sqs.py
|
andersoncontreira/flask-skeleton-python
|
4a3087cf94f387830850dc438338251da86c3cfb
|
[
"MIT"
] | null | null | null |
flask_app/events/aws/sqs.py
|
andersoncontreira/flask-skeleton-python
|
4a3087cf94f387830850dc438338251da86c3cfb
|
[
"MIT"
] | null | null | null |
import json
import os
from flask_app.config import get_config
from flask_app.logging import get_logger
import boto3
class SQSEvents:
def __init__(self, logger=None, config=None):
# logger
self.logger = logger if logger is not None else get_logger()
# configurations
self.config = config if config is not None else get_config()
# last_exception
self.exception = None
def connect(self):
connection = None
try:
endpoint_url = self.config.SQS_ENDPOINT
profile = os.environ['AWS_PROFILE'] if 'AWS_PROFILE' in os.environ else None
self.logger.info('SQSEvents - profile: {}'.format(profile))
self.logger.info('SQSEvents - endpoint_url: {}'.format(endpoint_url))
self.logger.info('SQSEvents - self.config.REGION_NAME: {}'.format(self.config.REGION_NAME))
if profile:
session = boto3.session.Session(profile_name=profile)
connection = session.resource(
'sqs',
endpoint_url=endpoint_url,
region_name=self.config.REGION_NAME
)
else:
connection = boto3.resource(
'sqs',
endpoint_url=endpoint_url,
region_name=self.config.REGION_NAME
)
self.logger.info('SQSEvents - Connected')
except Exception as err:
self.logger.error(err)
return connection
def send_message(self, message, queue_url):
sqs = self.connect()
if queue_url is None:
raise Exception('Queue name must be informed')
queue_name = os.path.basename(queue_url)
try:
# Get the queue
queue = sqs.get_queue_by_name(QueueName=queue_name)
# Avoid double json encode
if not isinstance(message, str):
try:
message = json.dumps(message)
except Exception as err:
self.logger.error(err)
message = str(message)
# Create a new message
response = queue.send_message(MessageBody=message)
except Exception as err:
self.logger.error(err)
self.exception = err
response = None
return response
def get_message(self, queue_url):
sqs = self.connect()
if queue_url is None:
raise Exception('Queue name must be informed')
queue_name = os.path.basename(queue_url)
try:
# Get the queue
queue = sqs.get_queue_by_name(QueueName=queue_name)
# Create a new message
message = queue.receive_messages(
AttributeNames=[
'All'
],
MaxNumberOfMessages=1,
VisibilityTimeout=5,
WaitTimeSeconds=1
)
except Exception as err:
self.logger.error(err)
self.exception = err
message = None
return message
def create_queue(self, queue_name, attributes=None):
queue = None
if not attributes:
attributes = {'DelaySeconds': '5'}
sqs = self.connect()
try:
# Create the queue. This returns an SQS.Queue instance
queue = sqs.create_queue(QueueName=queue_name, Attributes=attributes)
except Exception as err:
self.logger.error(err)
self.exception = err
return queue
def delete_queue(self, queue_name):
result = True
sqs = self.connect()
try:
# Get the queue
queue = sqs.get_queue_by_name(QueueName=queue_name)
if queue is not None:
queue_url = queue.url
client = sqs.meta.client
client.delete_queue(QueueUrl=queue_url)
else:
raise Exception('queue not exists')
# QueueUrl
except Exception as err:
self.logger.error(err)
self.exception = err
result = False
return result
| 29.964539
| 103
| 0.550296
|
import json
import os
from flask_app.config import get_config
from flask_app.logging import get_logger
import boto3
class SQSEvents:
def __init__(self, logger=None, config=None):
self.logger = logger if logger is not None else get_logger()
self.config = config if config is not None else get_config()
self.exception = None
def connect(self):
connection = None
try:
endpoint_url = self.config.SQS_ENDPOINT
profile = os.environ['AWS_PROFILE'] if 'AWS_PROFILE' in os.environ else None
self.logger.info('SQSEvents - profile: {}'.format(profile))
self.logger.info('SQSEvents - endpoint_url: {}'.format(endpoint_url))
self.logger.info('SQSEvents - self.config.REGION_NAME: {}'.format(self.config.REGION_NAME))
if profile:
session = boto3.session.Session(profile_name=profile)
connection = session.resource(
'sqs',
endpoint_url=endpoint_url,
region_name=self.config.REGION_NAME
)
else:
connection = boto3.resource(
'sqs',
endpoint_url=endpoint_url,
region_name=self.config.REGION_NAME
)
self.logger.info('SQSEvents - Connected')
except Exception as err:
self.logger.error(err)
return connection
def send_message(self, message, queue_url):
sqs = self.connect()
if queue_url is None:
raise Exception('Queue name must be informed')
queue_name = os.path.basename(queue_url)
try:
queue = sqs.get_queue_by_name(QueueName=queue_name)
if not isinstance(message, str):
try:
message = json.dumps(message)
except Exception as err:
self.logger.error(err)
message = str(message)
response = queue.send_message(MessageBody=message)
except Exception as err:
self.logger.error(err)
self.exception = err
response = None
return response
def get_message(self, queue_url):
sqs = self.connect()
if queue_url is None:
raise Exception('Queue name must be informed')
queue_name = os.path.basename(queue_url)
try:
queue = sqs.get_queue_by_name(QueueName=queue_name)
message = queue.receive_messages(
AttributeNames=[
'All'
],
MaxNumberOfMessages=1,
VisibilityTimeout=5,
WaitTimeSeconds=1
)
except Exception as err:
self.logger.error(err)
self.exception = err
message = None
return message
def create_queue(self, queue_name, attributes=None):
queue = None
if not attributes:
attributes = {'DelaySeconds': '5'}
sqs = self.connect()
try:
queue = sqs.create_queue(QueueName=queue_name, Attributes=attributes)
except Exception as err:
self.logger.error(err)
self.exception = err
return queue
def delete_queue(self, queue_name):
result = True
sqs = self.connect()
try:
queue = sqs.get_queue_by_name(QueueName=queue_name)
if queue is not None:
queue_url = queue.url
client = sqs.meta.client
client.delete_queue(QueueUrl=queue_url)
else:
raise Exception('queue not exists')
except Exception as err:
self.logger.error(err)
self.exception = err
result = False
return result
| true
| true
|
790aa405529a3e3ce8a0371e0b6ea0ba805d6c37
| 257,820
|
py
|
Python
|
zerver/lib/actions.py
|
luisogandob/zulip
|
f6667b8e80b4658b8d2a5ae6b3f0c9d6eac4a2d6
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/actions.py
|
luisogandob/zulip
|
f6667b8e80b4658b8d2a5ae6b3f0c9d6eac4a2d6
|
[
"Apache-2.0"
] | null | null | null |
zerver/lib/actions.py
|
luisogandob/zulip
|
f6667b8e80b4658b8d2a5ae6b3f0c9d6eac4a2d6
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import itertools
import logging
import os
import platform
import time
from collections import defaultdict
from operator import itemgetter
from typing import (
AbstractSet,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import ujson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files import File
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import override as override_language
from django.utils.translation import ugettext as _
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat
from analytics.models import StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import get_emoji_file_name
from zerver.lib.exceptions import (
ErrorCode,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MentionData, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import (
MessageDict,
access_message,
render_markdown,
truncate_body,
truncate_topic,
update_first_visible_message_id,
)
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
get_subscribed_stream_ids_for_user,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_for_send_message,
check_stream_name,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileFieldData
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions
from zerver.models import (
MAX_MESSAGE_LENGTH,
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
EmailChangeStatus,
Message,
MultiuseInvite,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
ScheduledEmail,
ScheduledMessage,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_stream,
get_stream_by_id_in_realm,
get_stream_cache_key,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
query_for_ids,
realm_filters_for_realm,
stream_name_in_use,
validate_attachment_request,
)
from zerver.tornado.event_queue import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import downgrade_now, update_license_ledger_if_needed
# This will be used to type annotate parameters in a function if the function
# works on both str and unicode in python 2 but in python 3 it only works on str.
SizedTextIterable = Union[Sequence[str], AbstractSet[str]]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {
'id': user_id,
'flags': ['read']
}
# Store an event in the log for re-importing messages
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
# return user ids of users who can access the attributes of
# a stream, such as its name/description.
if stream.is_public():
# For a public stream, this is everyone in the realm
# except unsubscribed guest users
return public_stream_user_ids(stream)
else:
# for a private stream, it's subscribers plus realm admins.
return private_stream_user_ids(
stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()}
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST)
guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0,
UserProfile.ROLE_REALM_OWNER: 0,
UserProfile.ROLE_MEMBER: 0,
UserProfile.ROLE_GUEST: 0}
for value_dict in list(UserProfile.objects.filter(
realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))):
human_counts[value_dict['role']] = value_dict['role__count']
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def notify_new_user(user_profile: UserProfile) -> None:
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email)
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
with override_language(user_profile.realm.default_language):
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"@_**{user_profile.full_name}|{user_profile.id}**",
user_count=user_count
)
internal_send_stream_message(
user_profile.realm,
sender,
signup_notifications_stream,
_("signups"),
message
)
# We also send a notification to the Zulip administrative realm
admin_realm = sender.realm
try:
# Check whether the stream exists
signups_stream = get_signups_stream(admin_realm)
with override_language(admin_realm.default_language):
# We intentionally use the same strings as above to avoid translation burden.
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>",
user_count=user_count
)
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
user_profile.realm.display_subdomain,
message
)
except Stream.DoesNotExist:
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in
user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
"""Give you the last ONBOARDING_TOTAL_MESSAGES messages on your public
streams, so you have something to look at in your home view once
you finish the tutorial. The most recent ONBOARDING_UNREAD_MESSAGES
are marked unread.
"""
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(recipient_id__in=recipient_ids,
date_sent__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list(
'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES]))
if len(message_ids_to_use) == 0:
return
# Handle the race condition where a message arrives between
# bulk_add_subscriptions above and the Message query just above
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
# Mark the newest ONBOARDING_UNREAD_MESSAGES as unread.
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
# Does the processing for a new user account:
# * Subscribes to default/invitation streams
# * Fills in some recent historical messages
# * Notifies other users in realm and Zulip about the signup
# * Deactivates PreregistrationUser objects
# * subscribe the user to newsletter if newsletter_data is specified
def process_new_human_user(user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Mapping[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
realm_creation: bool=False) -> None:
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
prereg_user.status = confirmation_settings.STATUS_ACTIVE
prereg_user.save(update_fields=['status'])
streams = prereg_user.streams.all()
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
# If the user's invitation didn't explicitly list some streams, we
# add the default streams
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
# mit_beta_users don't have a referred_by field
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
_("{user} accepted your invitation to join Zulip!").format(user=f"{user_profile.full_name} <`{user_profile.email}`>")
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(
email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\
.update(status=confirmation_settings.STATUS_REVOKED)
if prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\
.update(status=confirmation_settings.STATUS_REVOKED)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if user_profile.realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
# to keep all the onboarding code in zerver/lib/onboarding.py.
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
# If the user was created automatically via the API, we may
# not want to register them for the newsletter
queue_json_publish(
"signups",
{
'email_address': user_profile.delivery_email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(user_profile.realm, user_profile, user_row,
# Since we don't know what the client
# supports at this point in the code, we
# just assume client_gravatar and
# user_avatar_url_field_optional = False :(
client_gravatar=False,
user_avatar_url_field_optional=False,
# We assume there's no custom profile
# field data for a new user; initial
# values are expected to be added in a
# later event.
custom_profile_field_data={})
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services = get_service_dicts_for_bot(user_profile.id),
)
# Set the owner key only when the bot has an owner.
# The default bots don't have an owner. So don't
# set the owner key while reactivating them.
if user_profile.bot_owner is not None:
bot['owner_id'] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,
bot_type: Optional[int]=None, role: Optional[int]=None,
bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,
timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream]=None,
default_events_register_stream: Optional[Stream]=None,
default_all_public_streams: Optional[bool]=None,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
source_profile: Optional[UserProfile]=None,
realm_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> UserProfile:
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name,
role=role, bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile)
event_time = user_profile.date_joined
if not acting_user:
acting_user = user_profile
RealmAuditLog.objects.create(
realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
# Note that for bots, the caller will send an additional event
# with bot-specific info like services.
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation)
return user_profile
def do_activate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
# Unlike do_activate_user, this is meant for re-activating existing users,
# so it doesn't reset their password, etc.
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any,
acting_user: Optional[UserProfile] = None) -> None:
"""Takes in a realm object, the name of an attribute to update, the
value to update and and the user who initiated the update.
"""
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
f'Cannot update {name}: {value} is not an instance of {property_type}')
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: {'property': name, 'value': old_value},
RealmAuditLog.NEW_VALUE: {'property': name, 'value': value}
}))
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile, realm)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
UserProfile.objects.bulk_update(user_profiles, ['email'])
for user_profile in user_profiles:
flush_user_profile(sender=UserProfile, instance=user_profile)
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool],
acting_user: Optional[UserProfile]=None) -> None:
old_value = realm.authentication_methods_dict()
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
updated_value = realm.authentication_methods_dict()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=timezone_now(),
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: {'property': 'authentication_methods', 'value': old_value},
RealmAuditLog.NEW_VALUE: {'property': 'authentication_methods', 'value': updated_value}
}))
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=updated_value),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.allow_community_topic_editing = allow_community_topic_editing
realm.save(update_fields=['allow_message_editing',
'allow_community_topic_editing',
'message_content_edit_limit_seconds',
],
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
allow_community_topic_editing=allow_community_topic_editing),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_deleting(realm: Realm,
message_content_delete_limit_seconds: int) -> None:
realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds
realm.save(update_fields=['message_content_delete_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Optional[Stream],
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
"""
Deactivate this realm. Do NOT deactivate the users -- we need to be able to
tell the difference between users that were intentionally deactivated,
e.g. by a realm admin, and users who can't currently use Zulip because their
realm has been deactivated.
"""
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# bumped to the login screen, where they'll get a realm deactivation
# notice when they try to log in.
delete_user_sessions(user)
event = dict(type="realm", op="deactivated",
realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
def do_scrub_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user, acting_user=acting_user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SCRUBBED)
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
# when trying to send messages to this person inside Zulip.
#
# Ideally, we need to also ensure their zephyr mirroring bot
# isn't running, but that's a separate issue.
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails([user_profile.id])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(type="realm_user", op="remove",
person=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True, acting_user: Optional[UserProfile]=None) -> None:
# Get the affected user ids *before* we deactivate everybody.
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
# Preserve as much as possible the original stream name while giving it a
# special prefix that both indicates that the stream is deactivated and
# frees up the original name for reuse.
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
# This stream has already been deactivated, keep prepending !s until
# we have a unique stream name or you've hit a rename limit.
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
# code path.
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
# If this is a default stream, remove it, properly sending a
# notification to browser clients.
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
# Remove the old stream information from remote cache.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=stream.realm, acting_user=acting_user,
modified_stream=stream, event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time=event_time)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id,
new_email=user_profile.email)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
# We notify just the target user (and eventually org admins, only
# when email_address_visibility=EMAIL_ADDRESS_VISIBILITY_ADMINS)
# about their new delivery email, since that field is private.
payload = dict(user_id=user_profile.id,
delivery_email=new_email)
event = dict(type='realm_user', op='update', person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
# If the user is using Gravatar to manage their email address,
# their Gravatar just changed, and we need to notify other
# clients.
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# Additionally, if we're also changing the publicly visible
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url,
})
language = user_profile.default_language
send_email('zerver/emails/confirm_new_email', to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language, context=context,
realm=user_profile.realm)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: str,
email_to_fullname: Callable[[str], str]) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient_id = message['message'].sender.recipient_id
if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2:
content = (
_("Congratulations on your first reply!") +
" "
":tada:"
"\n"
"\n" +
_("Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
)
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender, content)
def render_incoming_message(message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False) -> str:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton = realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except MarkdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
push_notify_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int]=set(),
possible_wildcard_mention: bool=True) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = stream_topic.get_active_subscriptions().annotate(
user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'),
user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'),
user_profile_wildcard_mentions_notify=F(
'user_profile__wildcard_mentions_notify'),
).values(
'user_profile_id',
'push_notifications',
'email_notifications',
'wildcard_mentions_notify',
'user_profile_email_notifications',
'user_profile_push_notifications',
'user_profile_wildcard_mentions_notify',
'is_muted',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row['is_muted']:
return False
if row['user_profile_id'] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row['user_profile_' + setting]
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send('push_notifications', row)
}
stream_email_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send('email_notifications', row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# determine which users would receive a wildcard mention
# notification for this message should the message indeed
# contain a wildcard mention.
#
# We don't have separate values for push/email
# notifications here; at this stage, we're just
# determining whether this wildcard mention should be
# treated as a mention (and follow the user's mention
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row['user_profile_id']
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered markdown yet, we
# don't yet know which of these possibly-mentioned users was
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id',
)
rows = list(query)
else:
# TODO: We should always have at least one user_id as a recipient
# of any message we send. Right now the exception to this
# rule is `notify_new_user`, which, at least in a possibly
# contrived test scenario, can attempt to send messages
# to an inactive bot. When we plug that hole, we can avoid
# this `else` clause and just `assert(user_ids)`.
#
# UPDATE: It's February 2020 (and a couple years after the above
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
"""Only includes users on the explicit message to line"""
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications'],
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle'],
)
# These two bot data structures need to filter from the full set
# of users who either are receiving the message or might have been
# mentioned in it, and so can't use get_ids_for.
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via markdown, we'll filter
# these data structures for just those users who are either a
# direct recipient or were mentioned; for now, we're just making
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
)
return info
def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int], active_user_ids: Set[int],
recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s',
user_profile_id, bot_type,
)
return
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
# either mentioned_user_ids (the actual mentioned users) or
# active_user_ids (the actual recipients).
#
# So even though this is implied by the logic below, we filter
# these not-actually-mentioned users here, to help keep this
# function future-proof.
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
# Mention triggers, for stream messages
if is_stream and user_profile_id in mentioned_user_ids:
trigger = 'mention'
# PM triggers for personal and huddle messages
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
return
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for message in messages:
scheduled_message = ScheduledMessage()
scheduled_message.sender = message['message'].sender
scheduled_message.recipient = message['message'].recipient
topic_name = message['message'].topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = message['message'].content
scheduled_message.sending_client = message['message'].sending_client
scheduled_message.stream = message['stream']
scheduled_message.realm = message['realm']
scheduled_message.scheduled_timestamp = message['deliver_at']
if message['delivery_type'] == 'send_later':
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif message['delivery_type'] == 'remind':
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: bool=False,
mark_as_read: Sequence[int]=[]) -> List[int]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
# Filter out messages which didn't pass internal_prep_message properly
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids: List[int] = []
new_messages: List[MutableMapping[str, Any]] = []
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed: Set[str] = set()
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['stream_email_user_ids'] = info['stream_email_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = markdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if message['message'].mentions_wildcard:
message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids']
else:
message['wildcard_mention_user_ids'] = []
'''
Once we have the actual list of mentioned ids from message
rendering, we can patch in "default bots" (aka normal bots)
who were directly mentioned in this message as eligible to
get UserMessage rows.
'''
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
# Claim attachments in message
for message in messages:
if do_claim_attachments(message['message'],
message['message'].potential_attachment_path_ids):
message['message'].has_attachment = True
message['message'].save(update_fields=['has_attachment'])
ums: List[UserMessageLite] = []
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
# they will be processed later.
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
stream_push_user_ids = message['stream_push_user_ids'],
stream_email_user_ids = message['stream_email_user_ids'],
mentioned_user_ids=mentioned_user_ids,
mark_as_read=mark_as_read,
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
for message in messages:
do_widget_post_save_actions(message)
for message in messages:
realm_id: Optional[int] = None
if message['message'].is_stream_message():
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related().get(id=stream_id)
assert message['stream'] is not None # assert needed because stubs for django are missing
realm_id = message['stream'].realm_id
# Deliver events to the real-time push system, as well as
# enqueuing any additional processing triggered by the message.
wide_message_dict = MessageDict.wide_dict(message['message'], realm_id)
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
'''
TODO: We may want to limit user_ids to only those users who have
UserMessage rows, if only for minor performance reasons.
For now we queue events for all subscribers/sendees of the
message, since downstream code may still do notifications
that don't require UserMessage rows.
Our automated tests have gotten better on this codepath,
but we may have coverage gaps, so we should be careful
about changing the next line.
'''
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
stream_email_notify=(user_id in message['stream_email_user_ids']),
wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# Note: This is where authorization for single-stream
# get_updates happens! We only attach stream data to the
# notify new_message request if it's a public stream,
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['stream'].first_message_id is None:
message['stream'].first_message_id = message['message'].id
message['stream'].save(update_fields=["first_message_id"])
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(message['realm'], event, users)
if links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
},
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# mirror single zephyr messages at a time and don't otherwise
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
'''
The Django ORM is too slow for bulk operations. This class
is optimized for the simple use case of inserting a bunch of
rows into zerver_usermessage.
'''
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read: Sequence[int] = []) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=0,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the markdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if (um.user_profile_id == message.sender.id and
message.sent_by_human()) or \
um.user_profile_id in mark_as_read:
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
um.flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
#
# See https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html#soft-deactivation
# for details on this system.
user_messages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
um.user_profile_id not in stream_push_user_ids and
um.user_profile_id not in stream_email_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
'''
Doing bulk inserts this way is much faster than using Django,
since we don't have any ORM overhead. Profiling with 1000
users shows a speedup of 0.436 -> 0.027 seconds, so we're
talking about a 15x speedup.
'''
if not ums:
return
vals = [
(um.user_profile_id, um.message_id, um.flags)
for um in ums
]
query = SQL('''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
''')
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def do_add_submessage(realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
send_event(realm, event, target_user_ids)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: str) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event: Dict[str, Any] = {
'type': 'reaction',
'op': op,
'user_id': user_profile.id,
# TODO: We plan to remove this redundant user_dict object once
# clients are updated to support accessing use user_id. See
# https://github.com/zulip/zulip/pull/14711 for details.
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type,
}
# Update the cached message since new reaction is added.
update_to_dict_cache([message])
# Recipients for message update events, including reactions, are
# everyone who got the original message. This means reactions
# won't live-update in preview narrows, but it's the right
# performance tradeoff, since otherwise we'd need to send all
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
#
# However, to ensure that reactions do live-update for any user
# who has actually participated in reacting to a message, we add a
# "historical" UserMessage row for any user who reacts to message,
# subscribing them to future notifications.
ums = UserMessage.objects.filter(message=message.id)
send_event(user_profile.realm, event, [um.user_profile_id for um in ums])
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: str, emoji_code: str, reaction_type: str) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError: # nocoverage
# This can happen when a race results in the check in views
# code not catching an attempt to double-add a reaction, or
# perhaps if the emoji_name/emoji_code mapping is busted.
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: str, reaction_type: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm,
sender: UserProfile,
recipient_user_profiles: List[UserProfile],
operator: str) -> None:
sender_dict = {'user_id': sender.id, 'email': sender.email}
# Include a list of recipients in the event body to help identify where the typing is happening
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type='typing',
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
# Only deliver the notification to active user recipients
user_ids_to_notify = [
user.id
for user in recipient_user_profiles
if user.is_active
]
send_event(realm, event, user_ids_to_notify)
# check_send_typing_notification:
# Checks the typing notification and sends it
def check_send_typing_notification(sender: UserProfile,
user_ids: List[int],
operator: str) -> None:
realm = sender.realm
if len(user_ids) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
''' The next chunk of code will go away when we upgrade old mobile
users away from versions of mobile that send emails. For the
small number of very outdated mobile clients, we do double work
here in terms of fetching users, but this structure reduces lots
of other unnecessary duplicated code and will make it convenient
to mostly delete code when we desupport old versions of the
app.'''
if sender.id not in user_ids:
user_ids.append(sender.id)
# If any of the user_ids being sent in are invalid, we will
# just reject the whole request, since a partial list of user_ids
# can create confusion related to huddles. Plus it's a good
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(
user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def ensure_stream(realm: Realm,
stream_name: str,
invite_only: bool=False,
stream_description: str="",
acting_user: Optional[UserProfile]=None) -> Stream:
return create_stream_if_needed(realm, stream_name,
invite_only=invite_only,
stream_description=stream_description,
acting_user=acting_user)[0]
def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {}
for user_profile in recipient_profiles:
recipient_profiles_map[user_profile.id] = user_profile
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map):
del recipient_profiles_map[sender.id]
assert len(recipient_profiles_map) != 0
if len(recipient_profiles_map) == 1:
user_profile = list(recipient_profiles_map.values())[0]
return user_profile.recipient
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids: Set[int] = {user_id for user_id in recipient_profiles_map}
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile],
sender: UserProfile,
allow_deactivated: bool=False) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy and
not allow_deactivated) or user_profile.realm.deactivated:
raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email))
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile, allow_deactivated: bool=False) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(user_profiles, sender,
allow_deactivated=allow_deactivated)
return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# For huddle messages, we use a 10-second window because the
# timestamps aren't guaranteed to actually match between two
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Iterable[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,
topic: str, body: str, realm: Optional[Realm]=None) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: str) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
# check_send_message:
# Returns the id of the sent message. Has same argspec as check_message.
def check_send_message(sender: UserProfile, client: Client, message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str, realm: Optional[Realm]=None,
forged: bool=False, forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id,
widget_content)
return do_send_messages([message])[0]
def check_schedule_message(sender: UserProfile, client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str], message_content: str,
delivery_type: str, deliver_at: datetime.datetime,
realm: Optional[Realm]=None,
forwarder_user_profile: Optional[UserProfile]=None,
) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm=realm,
forwarder_user_profile=forwarder_user_profile)
message['deliver_at'] = deliver_at
message['delivery_type'] = delivery_type
recipient = message['message'].recipient
if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and
recipient.type_id != sender.id)):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([message])[0]
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
))
def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,
realm: Realm,
content: str) -> None:
"""
Sends a PM error notification to a bot's owner if one hasn't already
been sent in the last 5 minutes.
"""
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# Don't send these notifications for cross-realm bot messages
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
def send_pm_if_empty_stream(stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str]=None,
stream_id: Optional[int]=None) -> None:
"""If a bot sends a message to a stream that doesn't exist or has no
subscribers, sends a notification to the bot owner (if not a
cross-realm bot) so that the owner can correct the issue."""
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": f"`{sender.delivery_email}`",
"stream_id": stream_id,
"stream_name": f"#**{stream_name}**",
"new_stream_link": "#streams/new",
}
if sender.bot_owner is not None:
with override_language(sender.bot_owner.default_language):
if stream is None:
if stream_id is not None:
content = _("Your bot {bot_identity} tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID.").format(**arg_dict)
else:
assert(stream_name is not None)
content = _("Your bot {bot_identity} tried to send a message to stream "
"{stream_name}, but that stream does not exist. "
"Click [here]({new_stream_link}) to create it.").format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _("Your bot {bot_identity} tried to send a message to "
"stream {stream_name}. The stream exists but "
"does not have any subscribers.").format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm,
sender: UserProfile) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm,
sender: UserProfile) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(realm: Realm, sender: UserProfile,
user_profiles: Sequence[UserProfile]) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender: UserProfile, client: Client, addressee: Addressee,
message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,
forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> Dict[str, Any]:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
recipient = stream.recipient
# This will raise JsonableError if there are problems.
if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT:
access_stream_for_send_message(
sender=sender,
stream=stream,
forwarder_user_profile=forwarder_user_profile)
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
check_private_message_policy(realm, sender, user_profiles)
# API Super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles,
forwarded_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
if widget_content is not None:
try:
widget_content = ujson.loads(widget_content)
except Exception:
raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))
try:
check_widget_content(widget_content)
except ValidationError as error:
raise JsonableError(_('Widgets: {error_msg}').format(
error_msg=error.message,
))
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm,
'widget_content': widget_content}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str) -> Optional[Dict[str, Any]]:
"""
Create a message object and checks it, but doesn't send it or save it to the database.
The internal function that calls this can therefore batch send a bunch of created
messages together as one database query.
Call do_send_messages with a list of the return values of this method.
"""
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
# If we have a stream name, and the stream doesn't exist, we
# create it here (though this code path should probably be removed
# eventually, moving that responsibility to the caller). If
# addressee.stream_name() is None (i.e. we're sending to a stream
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name, acting_user=sender)
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg)
return None
def internal_prep_stream_message(
realm: Realm, sender: UserProfile,
stream: Stream, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[Dict[str, Any]]:
"""
See _internal_prep_message for details of how this works.
"""
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[int]:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
realm: Realm,
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool=False) -> Optional[int]:
message = internal_prep_stream_message(
realm, sender, stream,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message], email_gateway=email_gateway)
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm, sender, stream_name,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
""" Validates whether the user can view the subscribers of a stream. Raises a JsonableError if:
* The user and the stream are in different realms
* The realm is MIT and the stream is not invite only.
* The stream is invite only, requesting_user is passed, and that user
does not subscribe to the stream.
"""
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id))
def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
"""Helper for validate_user_access_to_subscribers that doesn't require
a full stream object. This function is a bit hard to read,
because it is carefully optimized for performance in the two code
paths we call it from:
* In `bulk_get_subscriber_user_ids`, we already know whether the
user was subscribed via `sub_dict`, and so we want to avoid a
database query at all (especially since it calls this in a loop);
* In `validate_user_access_to_subscribers`, we want to only check
if the user is subscribed when we absolutely have to, since it
costs a database query.
The `check_user_subscribed` argument is a function that reports
whether the user is subscribed to the stream.
Note also that we raise a ValidationError in cases where the
caller is doing the wrong thing (maybe these should be
AssertionErrors), and JsonableError for 400 type errors.
"""
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Guest users can access subscribed public stream's subscribers
if user_profile.is_guest:
if check_user_subscribed(user_profile):
return
# We could put an AssertionError here; in that we don't have
# any code paths that would allow a guest user to access other
# streams in the first place.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if (stream_dict["invite_only"] and not check_user_subscribed(user_profile)):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_recipient.populate_with(stream_id=stream_dict["id"],
recipient_id=stream_dict["recipient_id"])
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: sub_dict[stream_dict["id"]],
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
'''
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
'''
query = SQL('''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
''')
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
'''
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
'''
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Build a query to get the subscribers list for a stream, raising a JsonableError if:
'realm' is optional in stream.
The caller can refine this query with select_related(), values(), etc. depending
on whether it wants objects or just certain fields
"""
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True,
)
return subscriptions
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
recent_traffic: Dict[int, int],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
sub_dicts = []
for (subscription, stream) in sub_pairs:
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
if field_name == "active":
# Skip the "active" field, it's implied by context
continue
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict['in_home_view'] = not subscription.is_muted
sub_dict['email_address'] = encode_email_address(stream, show_sender=True)
sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic)
sub_dict['subscribers'] = stream_user_ids(stream)
sub_dicts.append(sub_dict)
# Send a notification to the user who subscribed.
event = dict(type="subscription", op="add",
subscriptions=sub_dicts)
send_event(user_profile.realm, event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
'''
altered_user_ids is the user_ids that we are adding/removing
subscribed_user_ids is the already-subscribed user_ids
Based on stream policy, we notify the correct bystanders, while
not notifying altered_users (who get subscribers via another event)
'''
if stream.invite_only:
# PRIVATE STREAMS
# Realm admins can access all private stream subscribers. Send them an
# event even if they aren't subscribed to stream.
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
user_ids_to_notify = []
user_ids_to_notify.extend(realm_admin_ids)
user_ids_to_notify.extend(subscribed_user_ids)
return set(user_ids_to_notify) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list)
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max('id'))['id__max']
if last_id is None:
# During initial realm creation, there might be 0 messages in
# the database; in that case, the `aggregate` query returns
# None. Since we want an int for "beginning of time", use -1.
last_id = -1
return last_id
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str]={},
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams}
recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()]
stream_map: Dict[int, Stream] = {}
for stream in streams:
stream_map[recipients_map[stream.id]] = stream
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
realm = users[0].realm
already_subscribed: List[Tuple[UserProfile, Stream]] = []
subs_to_activate: List[Tuple[Subscription, Stream]] = []
new_subs: List[Tuple[UserProfile, int, Stream]] = []
for user_profile in users:
needs_new_sub: Set[int] = set(recipient_ids)
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
# Mark the sub as active, without saving, so that
# pick_color will consider this to be an active
# subscription when picking colors
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add: List[Tuple[Subscription, Stream]] = []
for (user_profile, recipient_id, stream) in new_subs:
if stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event: Dict[str, object] = dict(
type="stream",
op="occupy",
streams=[stream.to_dict() for stream in new_occupied_streams],
)
send_event(realm, event, active_user_ids(realm.id))
# Notify all existing users on streams that users have joined
# First, get all users subscribed to the streams that we care about
# We fetch all subscription information upfront, as it's used throughout
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list)
new_streams: Set[Tuple[int, int]] = set()
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and
user.id not in realm_admin_ids]
send_stream_creation_event(stream, new_users_ids)
stream_ids = {stream.id for stream in streams}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,
recent_traffic)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
stream_id=stream.id,
user_id=new_user_id)
send_event(realm, event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path('audio/notification_sounds')
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if '.' in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == '.ogg':
available_notification_sounds.append(root)
return available_notification_sounds
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate: List[Tuple[Subscription, Stream]] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'client_id': acting_client.id,
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def send_peer_remove_event(stream: Stream) -> None:
if stream.is_in_zephyr_realm and not stream.invite_only:
return
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
stream_id=stream.id,
user_id=removed_user.id)
send_event(our_realm, event, peer_user_ids)
for stream in streams:
send_peer_remove_event(stream=stream)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(our_realm, event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream, acting_user=acting_user)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: str, stream_name: str, property: str,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile: UserProfile, sub: Subscription,
stream: Stream, property_name: str, value: Any,
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
# API to the new name yet.
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
log_subscription_property_change(user_profile.email, stream.name,
database_property_name, database_value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=event_property_name,
value=event_value,
stream_id=stream.id,
name=stream.name)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: str,
acting_user: Optional[UserProfile]) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time, extra_data=old_name)
payload = dict(user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> str:
"""Verifies that the user's proposed full name is valid. The caller
is responsible for checking check permissions. Returns the new
full name, which may differ from what was passed in (because this
function strips whitespace)."""
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# Our web app will try to patch full_name even if the user didn't
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time)
update_users = bot_owner_user_ids(user_profile)
# For admins, update event is sent instead of delete/add
# event. bot_data of admin contains all the
# bots and none of them should be removed/(added again).
# Delete the bot from previous owner's bot data.
if previous_owner and not previous_owner.is_realm_admin:
send_event(user_profile.realm,
dict(type='realm_bot',
op="delete",
bot=dict(
user_id=user_profile.id,
)),
{previous_owner.id})
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id})
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
update_users)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event: Dict[str, Any] = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
api_key=new_api_key,
)),
bot_owner_user_ids(user_profile))
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
# since we're sending the URL anyway.
user_id=user_profile.id,
)
send_event(user_profile.realm,
dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str,
skip_notify: bool=False, acting_user: Optional[UserProfile]=None) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={'avatar_source': avatar_source},
event_time=event_time, acting_user=acting_user)
if not skip_notify:
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user)
delete_avatar_image(user)
def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(realm,
dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def do_change_logo_source(realm: Realm, logo_source: str, night: bool, acting_user: Optional[UserProfile]=None) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm, event_time=timezone_now(),
acting_user=acting_user)
event = dict(type='realm',
op='update_dict',
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night))
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(realm: Realm, plan_type: int) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=['plan_type'])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm, event_time=timezone_now(),
extra_data={'old_value': old_value, 'new_value': plan_type})
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb'])
event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type,
'extra_data': {'upload_quota': realm.upload_quota_bytes()}}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_user_role(user_profile: UserProfile, value: int, acting_user: Optional[UserProfile]=None) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(user_id=user_profile.id, role=user_profile.role))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None:
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
def do_change_stream_invite_only(stream: Stream, invite_only: bool,
history_public_to_subscribers: Optional[bool]=None) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:
stream.is_web_public = is_web_public
stream.save(update_fields=['is_web_public'])
def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None:
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=['stream_post_policy'])
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream,
new_name: str,
user_profile: UserProfile,
log: bool=True) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT)
with override_language(stream.realm.default_language):
internal_send_stream_message(
stream.realm,
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_('{user_name} renamed stream {old_stream_name} to {new_stream_name}.').format(
user_name=f"@_**{user_profile.full_name}|{user_profile.id}**",
old_stream_name=f"**{old_name}**",
new_stream_name=f"**{new_name}**",
),
)
# Even though the token doesn't change, the web client needs to update the
# email forwarding address to display the correctly-escaped new name.
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=['description', 'rendered_description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_message_retention_days(stream: Stream, message_retention_days: Optional[int]=None) -> None:
stream.message_retention_days = message_retention_days
stream.save(update_fields=['message_retention_days'])
event = dict(
op="update",
type="stream",
property="message_retention_days",
value=message_retention_days,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(string_id: str, name: str,
emails_restricted_to_domains: Optional[bool]=None) -> Realm:
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
# Create stream once Realm object has been saved
notifications_stream = ensure_stream(
realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:", acting_user=None)
realm.notifications_stream = notifications_stream
# With the current initial streams situation, the only public
# stream is the notifications_stream.
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.", acting_user=None)
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
# Log the event
log_event({"type": "realm_created",
"string_id": string_id,
"emails_restricted_to_domains": emails_restricted_to_domains})
sender = get_system_bot(settings.NOTIFICATION_BOT)
admin_realm = sender.realm
# Send a notification to the admin realm
with override_language(admin_realm.default_language):
signup_message = _("Signups enabled")
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist: # nocoverage
# If the signups stream hasn't been created in the admin
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str,
value: Union[bool, int, str], log: bool=True) -> None:
"""Takes in a UserProfile object, the name of a global notification
preference to update, and the value to update to
"""
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
f'Cannot update {name}: {value} is not an instance of {notification_setting_type}')
setattr(user_profile, name, value)
# Disabling digest emails should clear a user's email queue
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event['language_name'] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group {}').format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(realm: Realm, group_name: str,
description: str, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name))
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name))
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
if stream in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: str) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: str) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related().filter(realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# up creating two overlapping intervals, but that shouldn't happen
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile_id: int,
client_id: int,
query: str,
count: int,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id = user_profile_id,
client_id = client_id,
query = query,
defaults={'last_visit': log_time, 'count': count})
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
# The web app reports a client as 'website'
# The desktop app reports a client as ZulipDesktop
# due to it setting a custom user agent. We want both
# to count as web users
# Alias ZulipDesktop to website
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
# If an object was created, it has already been saved.
#
# We suppress changes from ACTIVE to IDLE before stale_status is reached;
# this protects us from the user having two clients open: one active, the
# other idle. Without this check, we would constantly toggle their status
# between the two states.
if not created and stale_status or was_idle or status == presence.status:
# The following block attempts to only update the "status"
# field in the event that it actually changed. This is
# important to avoid flushing the UserPresence cache when the
# data it would return to a client hasn't actually changed
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# timestamp update, because we rely on the browser to ping us every 50
# seconds for realm-wide status updates, and those updates should have
# recent timestamps, which means the browser won't think active users
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
# realms are pretty small.
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,
status: int, new_user_input: bool) -> None:
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_user_status(user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int) -> None:
if away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
)
event = dict(
type='user_status',
user_id=user_profile.id,
)
if away is not None:
event['away'] = away
if status_text is not None:
event['status_text'] = status_text
send_event(realm, event, active_user_ids(realm.id))
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event('bankruptcy')
# First, we clear mobile push notifications. This is safer in the
# event that the below logic times out and we're killed.
all_push_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list("message_id", flat=True)[0:10000]
do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids)
msgs = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
client: Client,
stream: Stream,
topic_name: Optional[str]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
recipient = stream.recipient
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_update_mobile_push_notification(message: Message,
prior_mention_user_ids: Set[int],
stream_push_user_ids: Set[int]) -> None:
# Called during the message edit code path to remove mobile push
# notifications for users who are no longer mentioned following
# the edit. See #15428 for details.
#
# A perfect implementation would also support updating the message
# in a sent notification if a message was edited to mention a
# group rather than a user (or vise versa), though it is likely
# not worth the effort to do such a change.
if not message.is_stream_message():
return
remove_notify_users = prior_mention_user_ids - message.mentions_user_ids - stream_push_user_ids
do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id])
def do_clear_mobile_push_notifications_for_ids(user_profile_ids: List[int],
message_ids: List[int]) -> None:
if len(message_ids) == 0:
return
# This function supports clearing notifications for several users
# only for the message-edit use case where we'll have a single message_id.
assert len(user_profile_ids) == 1 or len(message_ids) == 1
messages_by_user = defaultdict(list)
notifications_to_update = list(UserMessage.objects.filter(
message_id__in=message_ids,
user_profile_id__in=user_profile_ids,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list('user_profile_id', 'message_id'))
for (user_id, message_id) in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile_id,
"message_ids": event_message_ids,
})
def do_update_message_flags(user_profile: UserProfile,
client: Client,
operation: str,
flag: str,
messages: List[int]) -> int:
valid_flags = [item for item in UserMessage.flags
if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
# is a flag boolean on UserMessage, and UserMessage rows are
# normally created only when you receive a message to support
# searching your personal history. So we need to create one. We
# add UserMessage.flags.historical, so that features that need
# "messages you actually received" can exclude these UserMessages.
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
# Validate that the user could have read the relevant message
message = access_message(user_profile, messages[0])[0]
# OK, this is a message that you legitimately have access
# to via narrowing to the stream it is on, even though you
# didn't actually receive it. So we create a historical,
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
def notify_topic_moved_streams(user_profile: UserProfile,
old_stream: Stream, old_topic: str,
new_stream: Stream, new_topic: Optional[str],
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
# happened.
sender = get_system_bot(settings.NOTIFICATION_BOT)
if new_topic is None:
new_topic = old_topic
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if send_notification_to_new_thread:
with override_language(new_stream.realm.default_language):
internal_send_stream_message(
new_stream.realm, sender, new_stream, new_topic,
_("This topic was moved here from {old_location} by {user}").format(
old_location=old_topic_link, user=user_mention,
),
)
if send_notification_to_old_thread:
with override_language(old_stream.realm.default_language):
# Send a notification to the old stream that the topic was moved.
internal_send_stream_message(
old_stream.realm, sender, old_stream, old_topic,
_("This topic was moved by {user} to {new_location}").format(
user=user_mention, new_location=new_topic_link,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]:
"""Updates the message as stored in the to_dict cache (for serving
messages)."""
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[str],
rendered_content: Optional[str]) -> None:
event: Dict[str, Any] = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id}
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
sender_id: int
recipient_id: int
topic: str
stream_id: int
# We use transaction.atomic to support select_for_update in the attachment codepath.
@transaction.atomic
def do_update_message(user_profile: UserProfile, message: Message,
new_stream: Optional[Stream], topic_name: Optional[str],
propagate_mode: str, send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool, content: Optional[str],
rendered_content: Optional[str], prior_mention_user_ids: Set[int],
mention_user_ids: Set[int], mention_data: Optional[MentionData]=None) -> int:
"""
The main function for message editing. A message edit event can
modify:
* the message's content (in which case the caller will have
set both content and rendered_content),
* the topic, in which case the caller will have set topic_name
* or both
With topic edits, propagate_mode determines whether other message
also have their topics edited.
"""
timestamp = timezone_now()
message.last_edit_time = timestamp
event: Dict[str, Any] = {
'type': 'update_message',
'user_id': user_profile.id,
'edit_timestamp': datetime_to_timestamp(timestamp),
'message_id': message.id,
}
edit_history_event: Dict[str, Any] = {
'user_id': user_profile.id,
'timestamp': event['edit_timestamp'],
}
changed_messages = [message]
stream_being_edited = None
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm)
event['stream_name'] = stream_being_edited.name
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
assert rendered_content is not None
# mention_data is required if there's a content edit.
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in message.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
message.mentions_user_ids.update(members)
update_user_message_flags(message, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
# to clients regardless, and a client already had access to
# the original/pre-edit content of the message anyway. That
# setting must be enforced on the client side, and making a
# change here simply complicates the logic for clients parsing
# edit history events.
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
event['is_me_message'] = Message.is_status_message(content, rendered_content)
# message.has_image and message.has_link will have been
# already updated by markdown rendering in the caller.
message.has_attachment = check_attachment_reference_change(message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['stream_email_user_ids'] = list(info['stream_email_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if message.mentions_wildcard:
event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids'])
else:
event['wildcard_mention_user_ids'] = []
do_update_mobile_push_notification(message, prior_mention_user_ids, info['stream_push_user_ids'])
if topic_name is not None or new_stream is not None:
orig_topic_name = message.topic_name()
event["propagate_mode"] = propagate_mode
event["stream_id"] = message.recipient.type_id
if new_stream is not None:
assert content is None
assert message.is_stream_message()
assert stream_being_edited is not None
edit_history_event['prev_stream'] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
# When messages are moved from one stream to another, some
# users may lose access to those messages, including guest
# users and users not subscribed to the new stream (if it is a
# private stream). For those users, their experience is as
# though the messages were deleted, and we should send a
# delete_message event to them instead.
subscribers = get_active_subscriptions_for_stream_id(
stream_id).select_related("user_profile")
subs_to_new_stream = list(get_active_subscriptions_for_stream_id(
new_stream.id).select_related("user_profile"))
new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream]
# Get users who aren't subscribed to the new_stream.
subs_losing_usermessages = [
sub for sub in subscribers
if sub.user_profile_id not in new_stream_sub_ids
]
# Users who can longer access the message without some action
# from administrators.
#
# TODO: Extend this list to also contain users losing access
# due to the messages moving to a private stream they are not
# subscribed to.
subs_losing_access = [
sub for sub in subs_losing_usermessages
if sub.user_profile.is_guest
]
ums = ums.exclude(user_profile_id__in=[
sub.user_profile_id for sub in subs_losing_usermessages])
if topic_name is not None:
topic_name = truncate_topic(topic_name)
message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = topic_links(message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
delete_event_notify_user_ids: List[int] = []
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
messages_list = update_messages_for_topic_edit(
message=message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
)
changed_messages += messages_list
if new_stream is not None:
assert stream_being_edited is not None
message_ids = [msg.id for msg in changed_messages]
# Delete UserMessage objects for users who will no
# longer have access to these messages. Note: This could be
# very expensive, since it's N guest users x M messages.
UserMessage.objects.filter(
user_profile_id__in=[sub.user_profile_id for sub in
subs_losing_usermessages],
message_id__in=message_ids,
).delete()
delete_event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
'message_type': 'stream',
'stream_id': stream_being_edited.id,
'topic': orig_topic_name,
}
delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access]
send_event(user_profile.realm, delete_event, delete_event_notify_user_ids)
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
# This does message.save(update_fields=[...])
save_message_for_edit_use_case(message=message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event['message_ids'] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
# The following blocks arranges that users who are subscribed to a
# stream and can see history from before they subscribed get
# live-update when old messages are edited (e.g. if the user does
# a topic edit themself).
#
# We still don't send an update event to users who are not
# subscribed to this stream and don't have a UserMessage row. This
# means if a non-subscriber is viewing the narrow, they won't get
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by
# definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already
# in users_to_be_notified list. This is the case where a
# user both has a UserMessage row and is a current
# Subscriber
subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums])
if new_stream is not None:
assert delete_event_notify_user_ids is not None
subscribers = subscribers.exclude(user_profile_id__in=delete_event_notify_user_ids)
# All users that are subscribed to the stream must be
# notified when a message is edited
subscriber_ids = [user.user_profile_id for user in subscribers]
if new_stream is not None:
# TODO: Guest users don't see the new moved topic
# unless breadcrumb message for new stream is
# enabled. Excluding these users from receiving this
# event helps us avoid a error trackeback for our
# clients. We should figure out a way to inform the
# guest users of this new topic if sending a 'message'
# event for these messages is not an option.
#
# Don't send this event to guest subs who are not
# subscribers of the old stream but are subscribed to
# the new stream; clients will be confused.
old_stream_unsubbed_guests = [
sub for sub in subs_to_new_stream
if sub.user_profile.is_guest
and sub.user_profile_id not in subscriber_ids
]
subscribers = subscribers.exclude(user_profile_id__in=[
sub.user_profile_id for sub in old_stream_unsubbed_guests])
subscriber_ids = [user.user_profile_id for user in subscribers]
users_to_be_notified += list(map(subscriber_info, subscriber_ids))
send_event(user_profile.realm, event, users_to_be_notified)
if (len(changed_messages) > 0 and new_stream is not None and
stream_being_edited is not None):
# Notify users that the topic was moved.
notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name,
new_stream, topic_name, send_notification_to_old_thread,
send_notification_to_new_thread)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
# TODO: We should plan to remove `sender_id` here.
event['recipient_id'] = sample_message.recipient_id
event['sender_id'] = sample_message.sender_id
archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event['stream_id'] = stream_id
event['topic'] = sample_message.topic_name()
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
subscriber_ids = [user.user_profile_id for user in subscribers]
users_to_notify = list(map(subscriber_info, subscriber_ids))
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
event['message_type'] = message_type
send_event(realm, event, users_to_notify)
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id'))
if message_ids:
move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS['messages_in_stream:is_bot:day']
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property,
end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values('stream_id').annotate(value=Sum('value'))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,
recent_traffic: Dict[int, int]) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def get_web_public_subs(realm: Realm) -> SubHelperT:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict['is_muted'] = False
stream_dict['color'] = get_next_color()
stream_dict['desktop_notifications'] = True
stream_dict['audible_notifications'] = True
stream_dict['push_notifications'] = True
stream_dict['email_notifications'] = True
stream_dict['pin_to_top'] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id,
stream.date_created,
{})
stream_dict['stream_weekly_traffic'] = stream_weekly_traffic
stream_dict['email_address'] = ''
subscribed.append(stream_dict)
return (subscribed, [], [])
# In general, it's better to avoid using .values() because it makes
# the code pretty ugly, but in this case, it has significant
# performance impact for loading / for users with large numbers of
# subscriptions, so it's worth optimizing.
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
*Subscription.API_FIELDS, "recipient_id").order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids: Set[int] = set()
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values(
*Stream.API_FIELDS,
# date_created is used as an input for the stream_weekly_traffic computed field.
"date_created",
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
# the stream, so doesn't belong in API_FIELDS.
"email_token")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts}
# Add never subscribed streams to streams_subscribed_map
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient,
)
else:
# If we're not including subscribers, always return None,
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
# We first construct a dictionary based on the standard Stream
# and Subscription models' API_FIELDS.
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
# Copy Subscription.API_FIELDS except for "active", which is
# used to determine where to the put the field.
for field_name in Subscription.API_FIELDS:
stream_dict[field_name] = sub[field_name]
# Backwards-compatibility for clients that haven't been
# updated for the in_home_view => is_muted API migration.
stream_dict['in_home_view'] = not stream_dict['is_muted']
# Backwards-compatibility for clients that haven't been
# updated for the is_announcement_only -> stream_post_policy
# migration.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
# Add a few computed fields not directly from the data models.
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
stream_dict['email_address'] = encode_email_address_helper(
stream["name"], stream["email_token"], show_sender=True)
# Construct and add subscribers data
subscribers: Optional[List[int]] = subscriber_map[stream["id"]]
# Important: don't show the subscribers if the stream is invite only
# and this user isn't on it anymore (or a realm administrator).
if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin):
subscribers = None
# Guest users lose access to subscribers when they are unsubscribed.
if not sub["active"] and user_profile.is_guest:
subscribers = None
if subscribers is not None:
stream_dict['subscribers'] = subscribers
# is_active is represented in this structure by which list we include it in.
is_active = stream_dict.pop("active")
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
else:
never_subscribed_stream_ids = set()
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
# Backwards-compatibility addition of removed field.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
if is_public or user_profile.is_realm_admin:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool=False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, _ = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
if include_subscribers:
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([
email_dict[user_id] for user_id in sub['subscribers']
])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
'''
Given a list of active_user_ids, we build up a subset
of those users who fit these criteria:
* They are likely to need notifications (either due
to mentions, alert words, or being PM'ed).
* They are no longer "present" according to the
UserPresence table.
'''
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags: Iterable[str] = user_flags.get(user_id, [])
mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags
private_message = is_pm and user_id != sender_id
alerted = 'has_alert_word' in flags
if mentioned or private_message or alerted:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> str:
"""
Send the confirmation/welcome e-mail to an invited user.
"""
activation_url = create_confirmation_link(invitee, Confirmation.INVITATION)
context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email,
'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}
from_name = f"{referrer.full_name} (via Zulip)"
send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name,
from_address=FromAddress.tokenized_no_reply_address(),
language=referrer.realm.default_language, context=context,
realm=referrer.realm)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: str, errors: List[Tuple[str, str, bool]],
sent_invitations: bool) -> None:
self._msg: str = msg
self.errors: List[Tuple[str, str, bool]] = errors
self.sent_invitations: bool = sent_invitations
def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:
'''An upper bound on the number of invites sent in the last `days` days'''
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
'''Discourage using invitation emails as a vector for carrying spam.'''
msg = _("You do not have enough remaining invites. "
"Please contact {email} to have your limit raised. "
"No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
# max_invites, then we exempt them from invite limits.
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None:
check_invite_limit(user_profile.realm, len(invitee_emails))
realm = user_profile.realm
if not realm.invite_required:
# Inhibit joining an open realm to send spam invitations.
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if (user_profile.date_joined > timezone_now() - min_age
and not user_profile.is_realm_admin):
raise InvitationError(
_("Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."),
[], sent_invitations=False)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == '':
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
'''
good_emails are emails that look ok so far,
but we still need to make sure they're not
gonna conflict with existing users
'''
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as=invite_as,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
invited_by_user_id=invitee.referred_by.id,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False))
if not user_profile.is_realm_admin:
# We do not return multiuse invites to non-admin users.
return invites
lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm,
type=Confirmation.MULTIUSE_INVITE,
date_sent__gte=lowest_datetime)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
invites.append(dict(invited_by_user_id=invite.referred_by.id,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
id=invite.id,
link_url=confirmation_url(confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE),
invited_as=invite.invited_as,
is_multiuse=True))
return invites
def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int,
streams: Sequence[Stream] = []) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
# Delete both the confirmation objects and the prereg_user object.
# TODO: Probably we actually want to set the confirmation objects
# to a "revoked" status so that we can give the invited user a better
# error message.
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type,
object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
# These are two structurally for the caller's code path.
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: str,
author: UserProfile,
image_file: File) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
# The only user-controlled portion of 'emoji_file_name' is an extension,
# which can not contain '..' or '/' or '\', making it difficult to exploit
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=['file_name'])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str,
date_muted: Optional[datetime.datetime]=None) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
# NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
# RegExp syntax. In addition to JS-compatible syntax, the following features are available:
# * Named groups will be converted to numbered groups automatically
# * Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
# We may eventually use memcached to speed this up, but the DB is fast.
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain, acting_user: Optional[UserProfile]=None) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# If this was the last realm domain, we mark the realm as no
# longer restricted to domain, because the feature doesn't do
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'emails_restricted_to_domains', False, acting_user=acting_user)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
""" Get streams with subscribers """
exists_expression = Exists(
Subscription.objects.filter(active=True, user_profile__is_active=True,
user_profile__realm=realm,
recipient_id=OuterRef('recipient_id')),
)
occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \
.annotate(occupied=exists_expression).filter(occupied=True)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile, include_public: bool=True,
include_subscribed: bool=True, include_all_active: bool=False,
include_default: bool=False, include_owner_subscribed: bool=False,
) -> List[Dict[str, Any]]:
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(user_profile: UserProfile, op: str,
attachment_dict: Dict[str, Any]) -> None:
event = {
'type': 'attachment',
'op': op,
'attachment': attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# Technically, there are 2 cases here:
# * The user put something in their message that has the form
# of an upload, but doesn't correspond to a file that doesn't
# exist. validate_attachment_request will return None.
# * The user is trying to send a link to a file they don't have permission to
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id, path_id, message.id,
)
continue
claimed = True
attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(message: Message) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(message.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
op=operation,
fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(realm: Realm,
field_subtype: str) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
field = CustomProfileField(realm=realm, name=field_data['name'],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data['hint'],
field_data=ujson.dumps(dict(subtype=field_subtype)))
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,
hint: str='',
field_data: Optional[ProfileFieldData]=None) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
"""
Deleting a field will also delete the user profile data
associated with it in CustomProfileFieldValue model.
"""
field.delete()
notify_realm_custom_profile_fields(realm, 'delete')
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: str, hint: str='',
field_data: Optional[ProfileFieldData]=None) -> None:
field.name = name
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
notify_realm_custom_profile_fields(realm, 'update')
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
fields = CustomProfileField.objects.filter(realm=realm)
for field in fields:
if field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for field in fields:
field.order = order_mapping[field.id]
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'update')
def notify_user_update_custom_profile_data(user_profile: UserProfile,
field: Dict[str, Union[int, str, List[int], None]]) -> None:
data = dict(id=field['id'])
if field['type'] == CustomProfileField.USER:
data["value"] = ujson.dumps(field['value'])
else:
data['value'] = field['value']
if field['rendered_value']:
data['rendered_value'] = field['rendered_value']
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]],
) -> None:
with transaction.atomic():
for field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile,
field_id=field['id'])
if not created and field_value.value == str(field['value']):
# If the field value isn't actually being changed to a different one,
# and always_notify is disabled, we have nothing to do here for this field.
# Note: field_value.value is a TextField() so we need to cast field['value']
# to a string for the comparison in this if.
continue
field_value.value = field['value']
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(str(field['value']))
field_value.save(update_fields=['value', 'rendered_value'])
else:
field_value.save(update_fields=['value'])
notify_user_update_custom_profile_data(user_profile, {
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type})
def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None:
try:
field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile)
field_value.delete()
notify_user_update_custom_profile_data(user_profile, {'id': field_id,
'value': None,
'rendered_value': None,
'type': field.field_type})
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],
description: str) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=['name'])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(bot_profile: UserProfile,
service_interface: int,
service_payload_url: str) -> None:
# TODO: First service is chosen because currently one bot can only have one service.
# Update this once multiple services are supported.
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(base_url=service.base_url,
interface=service.interface,
token=service.token)],
),
),
bot_owner_user_ids(bot_profile))
def do_update_bot_config_data(bot_profile: UserProfile,
config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile))
def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [{'config_data': get_bot_config(user_profile),
'service_name': services[0].name,
}]
# A ConfigError just means that there are no config entries for user_profile.
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],
realm: Realm) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts
if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [{'config_data': bot_config,
'service_name': services[0].name,
}]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(user_profile: UserProfile,
include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner_id': botdict['bot_owner__id'],
'avatar_url': avatar_url_from_dict(botdict),
'services': services_by_ids[botdict['id']],
}
for botdict in result]
def do_send_user_group_members_update_event(event_name: str,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int,
realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
context = {'confirmation_url': url,
'realm_uri': realm.uri,
'realm_name': realm.name}
language = realm.default_language
send_email_to_admins(
'zerver/emails/realm_reactivation', realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language, context=context)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm, dict(type="has_zoom_token", value=token is not None), [user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
# In the future, we may want to send this event to all realm admins.
event = dict(type='realm_export',
exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
# Give mypy a hint so it knows `ujson.loads`
# isn't being passed an `Optional[str]`.
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = ujson.loads(export_extra_data)
export_path = export_data.get('export_path')
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update({'deleted_timestamp': timezone_now().timestamp()})
export.extra_data = ujson.dumps(export_data)
export.save(update_fields=['extra_data'])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream,
topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
| 43.178697
| 133
| 0.659801
|
import datetime
import itertools
import logging
import os
import platform
import time
from collections import defaultdict
from operator import itemgetter
from typing import (
AbstractSet,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import django.db.utils
import ujson
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.files import File
from django.db import IntegrityError, connection, transaction
from django.db.models import Count, Exists, F, Max, OuterRef, Q, Sum
from django.db.models.query import QuerySet
from django.utils.html import escape
from django.utils.timezone import now as timezone_now
from django.utils.translation import override as override_language
from django.utils.translation import ugettext as _
from psycopg2.extras import execute_values
from psycopg2.sql import SQL
from typing_extensions import TypedDict
from analytics.lib.counts import COUNT_STATS, RealmCount, do_increment_logging_stat
from analytics.models import StreamCount
from confirmation import settings as confirmation_settings
from confirmation.models import (
Confirmation,
confirmation_url,
create_confirmation_link,
generate_key,
)
from zerver.decorator import statsd_increment
from zerver.lib import retention as retention
from zerver.lib.addressee import Addressee
from zerver.lib.alert_words import (
add_user_alert_words,
get_alert_word_automaton,
remove_user_alert_words,
)
from zerver.lib.avatar import avatar_url, avatar_url_from_dict
from zerver.lib.bot_config import ConfigError, get_bot_config, get_bot_configs, set_bot_config
from zerver.lib.bulk_create import bulk_create_users
from zerver.lib.cache import (
bot_dict_fields,
cache_delete,
cache_delete_many,
cache_set,
cache_set_many,
cache_with_key,
delete_user_profile_caches,
display_recipient_cache_key,
flush_user_profile,
to_dict_cache_key_id,
user_profile_by_api_key_cache_key,
user_profile_by_email_cache_key,
)
from zerver.lib.context_managers import lockfile
from zerver.lib.create_user import create_user, get_display_email_address
from zerver.lib.email_mirror_helpers import encode_email_address, encode_email_address_helper
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.email_validation import (
email_reserved_for_system_bots_error,
get_existing_user_errors,
get_realm_email_validator,
validate_email_is_valid,
)
from zerver.lib.emoji import get_emoji_file_name
from zerver.lib.exceptions import (
ErrorCode,
JsonableError,
MarkdownRenderingException,
StreamDoesNotExistError,
StreamWithIDDoesNotExistError,
)
from zerver.lib.export import get_realm_exports_serialized
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.i18n import get_language_name
from zerver.lib.markdown import MentionData, topic_links
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import (
MessageDict,
access_message,
render_markdown,
truncate_body,
truncate_topic,
update_first_visible_message_id,
)
from zerver.lib.pysa import mark_sanitized
from zerver.lib.queue import queue_json_publish
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.realm_logo import get_realm_logo_data
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.send_email import (
FromAddress,
clear_scheduled_emails,
clear_scheduled_invitation_emails,
send_email,
send_email_to_admins,
)
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.sessions import delete_user_sessions
from zerver.lib.storage import static_path
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
get_active_subscriptions_for_stream_ids,
get_bulk_stream_subscriber_info,
get_stream_subscriptions_for_user,
get_stream_subscriptions_for_users,
get_subscribed_stream_ids_for_user,
num_subscribers_for_stream_id,
)
from zerver.lib.stream_topic import StreamTopicTarget
from zerver.lib.streams import (
access_stream_for_send_message,
check_stream_name,
create_stream_if_needed,
get_default_value_for_history_public_to_subscribers,
render_stream_description,
send_stream_creation_event,
subscribed_to_stream,
)
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.topic import (
LEGACY_PREV_TOPIC,
ORIG_TOPIC,
TOPIC_LINKS,
TOPIC_NAME,
filter_by_exact_message_topic,
filter_by_topic_name_via_message,
save_message_for_edit_use_case,
update_messages_for_topic_edit,
)
from zerver.lib.topic_mutes import add_topic_mute, get_topic_mutes, remove_topic_mute
from zerver.lib.types import ProfileFieldData
from zerver.lib.upload import (
claim_attachment,
delete_avatar_image,
delete_export_tarball,
delete_message_image,
upload_emoji_image,
)
from zerver.lib.user_groups import access_user_group_by_id, create_user_group
from zerver.lib.user_status import update_user_status
from zerver.lib.users import (
check_bot_name_available,
check_full_name,
format_user_row,
get_api_key,
user_profile_to_user_row,
)
from zerver.lib.utils import generate_api_key, log_statsd_event
from zerver.lib.validator import check_widget_content
from zerver.lib.widget import do_widget_post_save_actions
from zerver.models import (
MAX_MESSAGE_LENGTH,
Attachment,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
DefaultStreamGroup,
EmailChangeStatus,
Message,
MultiuseInvite,
PreregistrationUser,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
ScheduledEmail,
ScheduledMessage,
Service,
Stream,
SubMessage,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
UserStatus,
active_non_guest_user_ids,
active_user_ids,
custom_profile_fields_for_realm,
filter_to_valid_prereg_users,
get_active_streams,
get_bot_dicts_in_realm,
get_bot_services,
get_client,
get_default_stream_groups,
get_huddle_recipient,
get_huddle_user_ids,
get_old_unclaimed_attachments,
get_stream,
get_stream_by_id_in_realm,
get_stream_cache_key,
get_system_bot,
get_user_by_delivery_email,
get_user_by_id_in_realm_including_cross_realm,
get_user_profile_by_id,
is_cross_realm_bot_email,
query_for_ids,
realm_filters_for_realm,
stream_name_in_use,
validate_attachment_request,
)
from zerver.tornado.event_queue import send_event
if settings.BILLING_ENABLED:
from corporate.lib.stripe import downgrade_now, update_license_ledger_if_needed
SizedTextIterable = Union[Sequence[str], AbstractSet[str]]
ONBOARDING_TOTAL_MESSAGES = 1000
ONBOARDING_UNREAD_MESSAGES = 20
STREAM_ASSIGNMENT_COLORS = [
"#76ce90", "#fae589", "#a6c7e5", "#e79ab5",
"#bfd56f", "#f4ae55", "#b0a5fd", "#addfe5",
"#f5ce6e", "#c2726a", "#94c849", "#bd86e5",
"#ee7e4a", "#a6dcbf", "#95a5fd", "#53a063",
"#9987e1", "#e4523d", "#c2c2c2", "#4f8de4",
"#c6a8ad", "#e7cc4d", "#c8bebf", "#a47462"]
def subscriber_info(user_id: int) -> Dict[str, Any]:
return {
'id': user_id,
'flags': ['read']
}
def log_event(event: MutableMapping[str, Any]) -> None:
if settings.EVENT_LOG_DIR is None:
return
if "timestamp" not in event:
event["timestamp"] = time.time()
if not os.path.exists(settings.EVENT_LOG_DIR):
os.mkdir(settings.EVENT_LOG_DIR)
template = os.path.join(settings.EVENT_LOG_DIR,
'%s.' + platform.node() +
timezone_now().strftime('.%Y-%m-%d'))
with lockfile(template % ('lock',)):
with open(template % ('events',), 'a') as log:
log.write(ujson.dumps(event) + '\n')
def can_access_stream_user_ids(stream: Stream) -> Set[int]:
if stream.is_public():
return public_stream_user_ids(stream)
else:
return private_stream_user_ids(
stream.id) | {user.id for user in stream.realm.get_admin_users_and_bots()}
def private_stream_user_ids(stream_id: int) -> Set[int]:
# TODO: Find similar queries elsewhere and de-duplicate this code.
subscriptions = get_active_subscriptions_for_stream_id(stream_id)
return {sub['user_profile_id'] for sub in subscriptions.values('user_profile_id')}
def public_stream_user_ids(stream: Stream) -> Set[int]:
guest_subscriptions = get_active_subscriptions_for_stream_id(
stream.id).filter(user_profile__role=UserProfile.ROLE_GUEST)
guest_subscriptions = {sub['user_profile_id'] for sub in guest_subscriptions.values('user_profile_id')}
return set(active_non_guest_user_ids(stream.realm_id)) | guest_subscriptions
def bot_owner_user_ids(user_profile: UserProfile) -> Set[int]:
is_private_bot = (
user_profile.default_sending_stream and
user_profile.default_sending_stream.invite_only or
user_profile.default_events_register_stream and
user_profile.default_events_register_stream.invite_only)
if is_private_bot:
return {user_profile.bot_owner_id}
else:
users = {user.id for user in user_profile.realm.get_human_admin_users()}
users.add(user_profile.bot_owner_id)
return users
def realm_user_count(realm: Realm) -> int:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False).count()
def realm_user_count_by_role(realm: Realm) -> Dict[str, Any]:
human_counts = {UserProfile.ROLE_REALM_ADMINISTRATOR: 0,
UserProfile.ROLE_REALM_OWNER: 0,
UserProfile.ROLE_MEMBER: 0,
UserProfile.ROLE_GUEST: 0}
for value_dict in list(UserProfile.objects.filter(
realm=realm, is_bot=False, is_active=True).values('role').annotate(Count('role'))):
human_counts[value_dict['role']] = value_dict['role__count']
bot_count = UserProfile.objects.filter(realm=realm, is_bot=True, is_active=True).count()
return {
RealmAuditLog.ROLE_COUNT_HUMANS: human_counts,
RealmAuditLog.ROLE_COUNT_BOTS: bot_count,
}
def get_signups_stream(realm: Realm) -> Stream:
# This one-liner helps us work around a lint rule.
return get_stream("signups", realm)
def notify_new_user(user_profile: UserProfile) -> None:
sender_email = settings.NOTIFICATION_BOT
sender = get_system_bot(sender_email)
user_count = realm_user_count(user_profile.realm)
signup_notifications_stream = user_profile.realm.get_signup_notifications_stream()
# Send notification to realm signup notifications stream if it exists
# Don't send notification for the first user in a realm
if signup_notifications_stream is not None and user_count > 1:
with override_language(user_profile.realm.default_language):
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"@_**{user_profile.full_name}|{user_profile.id}**",
user_count=user_count
)
internal_send_stream_message(
user_profile.realm,
sender,
signup_notifications_stream,
_("signups"),
message
)
admin_realm = sender.realm
try:
signups_stream = get_signups_stream(admin_realm)
with override_language(admin_realm.default_language):
message = _("{user} just signed up for Zulip. (total: {user_count})").format(
user=f"{user_profile.full_name} <`{user_profile.email}`>",
user_count=user_count
)
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
user_profile.realm.display_subdomain,
message
)
except Stream.DoesNotExist:
# realm, don't auto-create it to send to it; just do nothing.
pass
def notify_invites_changed(user_profile: UserProfile) -> None:
event = dict(type="invites_changed")
admin_ids = [user.id for user in
user_profile.realm.get_admin_users_and_bots()]
send_event(user_profile.realm, event, admin_ids)
def add_new_user_history(user_profile: UserProfile, streams: Iterable[Stream]) -> None:
one_week_ago = timezone_now() - datetime.timedelta(weeks=1)
recipient_ids = [stream.recipient_id for stream in streams if not stream.invite_only]
recent_messages = Message.objects.filter(recipient_id__in=recipient_ids,
date_sent__gt=one_week_ago).order_by("-id")
message_ids_to_use = list(reversed(recent_messages.values_list(
'id', flat=True)[0:ONBOARDING_TOTAL_MESSAGES]))
if len(message_ids_to_use) == 0:
return
already_ids = set(UserMessage.objects.filter(message_id__in=message_ids_to_use,
user_profile=user_profile).values_list("message_id",
flat=True))
marked_unread = 0
ums_to_create = []
for message_id in reversed(message_ids_to_use):
if message_id in already_ids:
continue
um = UserMessage(user_profile=user_profile, message_id=message_id)
if marked_unread < ONBOARDING_UNREAD_MESSAGES:
marked_unread += 1
else:
um.flags = UserMessage.flags.read
ums_to_create.append(um)
UserMessage.objects.bulk_create(reversed(ums_to_create))
def process_new_human_user(user_profile: UserProfile,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Mapping[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
realm_creation: bool=False) -> None:
mit_beta_user = user_profile.realm.is_zephyr_mirror_realm
if prereg_user is not None:
prereg_user.status = confirmation_settings.STATUS_ACTIVE
prereg_user.save(update_fields=['status'])
streams = prereg_user.streams.all()
acting_user: Optional[UserProfile] = prereg_user.referred_by
else:
streams = []
acting_user = None
if len(streams) == 0:
streams = get_default_subs(user_profile)
for default_stream_group in default_stream_groups:
default_stream_group_streams = default_stream_group.streams.all()
for stream in default_stream_group_streams:
if stream not in streams:
streams.append(stream)
bulk_add_subscriptions(streams, [user_profile], acting_user=acting_user)
add_new_user_history(user_profile, streams)
if not mit_beta_user and prereg_user is not None and prereg_user.referred_by is not None:
# This is a cross-realm private message.
with override_language(prereg_user.referred_by.default_language):
internal_send_private_message(
user_profile.realm,
get_system_bot(settings.NOTIFICATION_BOT),
prereg_user.referred_by,
_("{user} accepted your invitation to join Zulip!").format(user=f"{user_profile.full_name} <`{user_profile.email}`>")
)
# Mark any other PreregistrationUsers that are STATUS_ACTIVE as
# inactive so we can keep track of the PreregistrationUser we
# actually used for analytics
if prereg_user is not None:
PreregistrationUser.objects.filter(
email__iexact=user_profile.delivery_email).exclude(id=prereg_user.id)\
.update(status=confirmation_settings.STATUS_REVOKED)
if prereg_user.referred_by is not None:
notify_invites_changed(user_profile)
else:
PreregistrationUser.objects.filter(email__iexact=user_profile.delivery_email)\
.update(status=confirmation_settings.STATUS_REVOKED)
notify_new_user(user_profile)
# Clear any scheduled invitation emails to prevent them
# from being sent after the user is created.
clear_scheduled_invitation_emails(user_profile.delivery_email)
if user_profile.realm.send_welcome_emails:
enqueue_welcome_emails(user_profile, realm_creation)
# We have an import loop here; it's intentional, because we want
from zerver.lib.onboarding import send_initial_pms
send_initial_pms(user_profile)
if newsletter_data is not None:
queue_json_publish(
"signups",
{
'email_address': user_profile.delivery_email,
'user_id': user_profile.id,
'merge_fields': {
'NAME': user_profile.full_name,
'REALM_ID': user_profile.realm_id,
'OPTIN_IP': newsletter_data["IP"],
'OPTIN_TIME': datetime.datetime.isoformat(timezone_now().replace(microsecond=0)),
},
},
lambda event: None)
def notify_created_user(user_profile: UserProfile) -> None:
user_row = user_profile_to_user_row(user_profile)
person = format_user_row(user_profile.realm, user_profile, user_row,
# supports at this point in the code, we
# just assume client_gravatar and
# user_avatar_url_field_optional = False :(
client_gravatar=False,
user_avatar_url_field_optional=False,
# We assume there's no custom profile
custom_profile_field_data={})
event: Dict[str, Any] = dict(type="realm_user", op="add", person=person)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def created_bot_event(user_profile: UserProfile) -> Dict[str, Any]:
def stream_name(stream: Optional[Stream]) -> Optional[str]:
if not stream:
return None
return stream.name
default_sending_stream_name = stream_name(user_profile.default_sending_stream)
default_events_register_stream_name = stream_name(user_profile.default_events_register_stream)
bot = dict(email=user_profile.email,
user_id=user_profile.id,
full_name=user_profile.full_name,
bot_type=user_profile.bot_type,
is_active=user_profile.is_active,
api_key=get_api_key(user_profile),
default_sending_stream=default_sending_stream_name,
default_events_register_stream=default_events_register_stream_name,
default_all_public_streams=user_profile.default_all_public_streams,
avatar_url=avatar_url(user_profile),
services = get_service_dicts_for_bot(user_profile.id),
)
if user_profile.bot_owner is not None:
bot['owner_id'] = user_profile.bot_owner.id
return dict(type="realm_bot", op="add", bot=bot)
def notify_created_bot(user_profile: UserProfile) -> None:
event = created_bot_event(user_profile)
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]], bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def do_create_user(email: str, password: Optional[str], realm: Realm, full_name: str,
bot_type: Optional[int]=None, role: Optional[int]=None,
bot_owner: Optional[UserProfile]=None, tos_version: Optional[str]=None,
timezone: str="", avatar_source: str=UserProfile.AVATAR_FROM_GRAVATAR,
default_sending_stream: Optional[Stream]=None,
default_events_register_stream: Optional[Stream]=None,
default_all_public_streams: Optional[bool]=None,
prereg_user: Optional[PreregistrationUser]=None,
newsletter_data: Optional[Dict[str, str]]=None,
default_stream_groups: Sequence[DefaultStreamGroup]=[],
source_profile: Optional[UserProfile]=None,
realm_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> UserProfile:
user_profile = create_user(email=email, password=password, realm=realm,
full_name=full_name,
role=role, bot_type=bot_type, bot_owner=bot_owner,
tos_version=tos_version, timezone=timezone, avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams,
source_profile=source_profile)
event_time = user_profile.date_joined
if not acting_user:
acting_user = user_profile
RealmAuditLog.objects.create(
realm=user_profile.realm, acting_user=acting_user, modified_user=user_profile,
event_type=RealmAuditLog.USER_CREATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if bot_type is None:
process_new_human_user(user_profile, prereg_user=prereg_user,
newsletter_data=newsletter_data,
default_stream_groups=default_stream_groups,
realm_creation=realm_creation)
return user_profile
def do_activate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
user_profile.is_active = True
user_profile.is_mirror_dummy = False
user_profile.set_unusable_password()
user_profile.date_joined = timezone_now()
user_profile.tos_version = settings.TOS_VERSION
user_profile.save(update_fields=["is_active", "date_joined", "password",
"is_mirror_dummy", "tos_version"])
event_time = user_profile.date_joined
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_ACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
def do_reactivate_user(user_profile: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
user_profile.is_active = True
user_profile.save(update_fields=["is_active"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
notify_created_user(user_profile)
if user_profile.is_bot:
notify_created_bot(user_profile)
def active_humans_in_realm(realm: Realm) -> Sequence[UserProfile]:
return UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
def do_set_realm_property(realm: Realm, name: str, value: Any,
acting_user: Optional[UserProfile] = None) -> None:
property_type = Realm.property_types[name]
assert isinstance(value, property_type), (
f'Cannot update {name}: {value} is not an instance of {property_type}')
old_value = getattr(realm, name)
setattr(realm, name, value)
realm.save(update_fields=[name])
event = dict(
type='realm',
op='update',
property=name,
value=value,
)
send_event(realm, event, active_user_ids(realm.id))
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: {'property': name, 'value': old_value},
RealmAuditLog.NEW_VALUE: {'property': name, 'value': value}
}))
if name == "email_address_visibility":
if Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE not in [old_value, value]:
# We use real email addresses on UserProfile.email only if
# EMAIL_ADDRESS_VISIBILITY_EVERYONE is configured, so
# changes between values that will not require changing
# that field, so we can save work and return here.
return
user_profiles = UserProfile.objects.filter(realm=realm, is_bot=False)
for user_profile in user_profiles:
user_profile.email = get_display_email_address(user_profile, realm)
# TODO: Design a bulk event for this or force-reload all clients
send_user_email_update_event(user_profile)
UserProfile.objects.bulk_update(user_profiles, ['email'])
for user_profile in user_profiles:
flush_user_profile(sender=UserProfile, instance=user_profile)
def do_set_realm_authentication_methods(realm: Realm,
authentication_methods: Dict[str, bool],
acting_user: Optional[UserProfile]=None) -> None:
old_value = realm.authentication_methods_dict()
for key, value in list(authentication_methods.items()):
index = getattr(realm.authentication_methods, key).number
realm.authentication_methods.set_bit(index, int(value))
realm.save(update_fields=['authentication_methods'])
updated_value = realm.authentication_methods_dict()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_PROPERTY_CHANGED, event_time=timezone_now(),
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: {'property': 'authentication_methods', 'value': old_value},
RealmAuditLog.NEW_VALUE: {'property': 'authentication_methods', 'value': updated_value}
}))
event = dict(
type="realm",
op="update_dict",
property='default',
data=dict(authentication_methods=updated_value),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_editing(realm: Realm,
allow_message_editing: bool,
message_content_edit_limit_seconds: int,
allow_community_topic_editing: bool) -> None:
realm.allow_message_editing = allow_message_editing
realm.message_content_edit_limit_seconds = message_content_edit_limit_seconds
realm.allow_community_topic_editing = allow_community_topic_editing
realm.save(update_fields=['allow_message_editing',
'allow_community_topic_editing',
'message_content_edit_limit_seconds',
],
)
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(allow_message_editing=allow_message_editing,
message_content_edit_limit_seconds=message_content_edit_limit_seconds,
allow_community_topic_editing=allow_community_topic_editing),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_message_deleting(realm: Realm,
message_content_delete_limit_seconds: int) -> None:
realm.message_content_delete_limit_seconds = message_content_delete_limit_seconds
realm.save(update_fields=['message_content_delete_limit_seconds'])
event = dict(
type="realm",
op="update_dict",
property="default",
data=dict(message_content_delete_limit_seconds=message_content_delete_limit_seconds),
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_notifications_stream(realm: Realm, stream: Optional[Stream], stream_id: int) -> None:
realm.notifications_stream = stream
realm.save(update_fields=['notifications_stream'])
event = dict(
type="realm",
op="update",
property="notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_set_realm_signup_notifications_stream(realm: Realm, stream: Optional[Stream],
stream_id: int) -> None:
realm.signup_notifications_stream = stream
realm.save(update_fields=['signup_notifications_stream'])
event = dict(
type="realm",
op="update",
property="signup_notifications_stream_id",
value=stream_id,
)
send_event(realm, event, active_user_ids(realm.id))
def do_deactivate_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
if realm.deactivated:
return
realm.deactivated = True
realm.save(update_fields=["deactivated"])
if settings.BILLING_ENABLED:
downgrade_now(realm)
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_DEACTIVATED, event_time=event_time,
acting_user=acting_user, extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
ScheduledEmail.objects.filter(realm=realm).delete()
for user in active_humans_in_realm(realm):
# Don't deactivate the users, but do delete their sessions so they get
# notice when they try to log in.
delete_user_sessions(user)
event = dict(type="realm", op="deactivated",
realm_id=realm.id)
send_event(realm, event, active_user_ids(realm.id))
def do_reactivate_realm(realm: Realm) -> None:
realm.deactivated = False
realm.save(update_fields=["deactivated"])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=realm, event_type=RealmAuditLog.REALM_REACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(realm),
}))
def do_change_realm_subdomain(realm: Realm, new_subdomain: str) -> None:
realm.string_id = new_subdomain
realm.save(update_fields=["string_id"])
def do_scrub_realm(realm: Realm, acting_user: Optional[UserProfile]=None) -> None:
users = UserProfile.objects.filter(realm=realm)
for user in users:
do_delete_messages_by_sender(user)
do_delete_avatar_image(user, acting_user=acting_user)
user.full_name = f"Scrubbed {generate_key()[:15]}"
scrubbed_email = f"scrubbed-{generate_key()[:15]}@{realm.host}"
user.email = scrubbed_email
user.delivery_email = scrubbed_email
user.save(update_fields=["full_name", "email", "delivery_email"])
do_remove_realm_custom_profile_fields(realm)
Attachment.objects.filter(realm=realm).delete()
RealmAuditLog.objects.create(realm=realm, event_time=timezone_now(),
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SCRUBBED)
def do_deactivate_user(user_profile: UserProfile,
acting_user: Optional[UserProfile]=None,
_cascade: bool=True) -> None:
if not user_profile.is_active:
return
if user_profile.realm.is_zephyr_mirror_realm: # nocoverage
# For zephyr mirror users, we need to make them a mirror dummy
# again; otherwise, other users won't get the correct behavior
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save(update_fields=["is_active"])
delete_user_sessions(user_profile)
clear_scheduled_emails([user_profile.id])
event_time = timezone_now()
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_DEACTIVATED, event_time=event_time,
extra_data=ujson.dumps({
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
do_increment_logging_stat(user_profile.realm, COUNT_STATS['active_users_log:is_bot:day'],
user_profile.is_bot, event_time, increment=-1)
if settings.BILLING_ENABLED:
update_license_ledger_if_needed(user_profile.realm, event_time)
event = dict(type="realm_user", op="remove",
person=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
event = dict(type="realm_bot", op="remove",
bot=dict(user_id=user_profile.id,
full_name=user_profile.full_name))
send_event(user_profile.realm, event, bot_owner_user_ids(user_profile))
if _cascade:
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
for profile in bot_profiles:
do_deactivate_user(profile, acting_user=acting_user, _cascade=False)
def do_deactivate_stream(stream: Stream, log: bool=True, acting_user: Optional[UserProfile]=None) -> None:
affected_user_ids = can_access_stream_user_ids(stream)
get_active_subscriptions_for_stream_id(stream.id).update(active=False)
was_invite_only = stream.invite_only
stream.deactivated = True
stream.invite_only = True
old_name = stream.name
new_name = ("!DEACTIVATED:" + old_name)[:Stream.MAX_NAME_LENGTH]
for i in range(20):
if stream_name_in_use(new_name, stream.realm_id):
new_name = ("!" + new_name)[:Stream.MAX_NAME_LENGTH]
else:
break
# If you don't have a unique name at this point, this will fail later in the
stream.name = new_name[:Stream.MAX_NAME_LENGTH]
stream.save(update_fields=['name', 'deactivated', 'invite_only'])
if DefaultStream.objects.filter(realm_id=stream.realm_id, stream_id=stream.id).exists():
do_remove_default_stream(stream)
default_stream_groups_for_stream = DefaultStreamGroup.objects.filter(streams__id=stream.id)
for group in default_stream_groups_for_stream:
do_remove_streams_from_default_stream_group(stream.realm, group, [stream])
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
cache_delete(old_cache_key)
stream_dict = stream.to_dict()
stream_dict.update(dict(name=old_name, invite_only=was_invite_only))
event = dict(type="stream", op="delete",
streams=[stream_dict])
send_event(stream.realm, event, affected_user_ids)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=stream.realm, acting_user=acting_user,
modified_stream=stream, event_type=RealmAuditLog.STREAM_DEACTIVATED,
event_time=event_time)
def send_user_email_update_event(user_profile: UserProfile) -> None:
payload = dict(user_id=user_profile.id,
new_email=user_profile.email)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def do_change_user_delivery_email(user_profile: UserProfile, new_email: str) -> None:
delete_user_profile_caches([user_profile])
user_profile.delivery_email = new_email
if user_profile.email_address_is_realm_public():
user_profile.email = new_email
user_profile.save(update_fields=["email", "delivery_email"])
else:
user_profile.save(update_fields=["delivery_email"])
payload = dict(user_id=user_profile.id,
delivery_email=new_email)
event = dict(type='realm_user', op='update', person=payload)
send_event(user_profile.realm, event, [user_profile.id])
if user_profile.avatar_source == UserProfile.AVATAR_FROM_GRAVATAR:
notify_avatar_url_change(user_profile)
if user_profile.email_address_is_realm_public():
# email, we send a new_email event as well.
send_user_email_update_event(user_profile)
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_EMAIL_CHANGED,
event_time=event_time)
def do_start_email_change_process(user_profile: UserProfile, new_email: str) -> None:
old_email = user_profile.delivery_email
obj = EmailChangeStatus.objects.create(new_email=new_email, old_email=old_email,
user_profile=user_profile, realm=user_profile.realm)
activation_url = create_confirmation_link(obj, Confirmation.EMAIL_CHANGE)
from zerver.context_processors import common_context
context = common_context(user_profile)
context.update({
'old_email': old_email,
'new_email': new_email,
'activate_url': activation_url,
})
language = user_profile.default_language
send_email('zerver/emails/confirm_new_email', to_emails=[new_email],
from_name=FromAddress.security_email_from_name(language=language),
from_address=FromAddress.tokenized_no_reply_address(),
language=language, context=context,
realm=user_profile.realm)
def compute_irc_user_fullname(email: str) -> str:
return email.split("@")[0] + " (IRC)"
def compute_jabber_user_fullname(email: str) -> str:
return email.split("@")[0] + " (XMPP)"
@cache_with_key(lambda realm, email, f: user_profile_by_email_cache_key(email),
timeout=3600*24*7)
def create_mirror_user_if_needed(realm: Realm, email: str,
email_to_fullname: Callable[[str], str]) -> UserProfile:
try:
return get_user_by_delivery_email(email, realm)
except UserProfile.DoesNotExist:
try:
# Forge a user for this person
return create_user(
email=email,
password=None,
realm=realm,
full_name=email_to_fullname(email),
active=False,
is_mirror_dummy=True,
)
except IntegrityError:
return get_user_by_delivery_email(email, realm)
def send_welcome_bot_response(message: MutableMapping[str, Any]) -> None:
welcome_bot = get_system_bot(settings.WELCOME_BOT)
human_recipient_id = message['message'].sender.recipient_id
if Message.objects.filter(sender=welcome_bot, recipient_id=human_recipient_id).count() < 2:
content = (
_("Congratulations on your first reply!") +
" "
":tada:"
"\n"
"\n" +
_("Feel free to continue using this space to practice your new messaging "
"skills. Or, try clicking on some of the stream names to your left!")
)
internal_send_private_message(
message['realm'], welcome_bot, message['message'].sender, content)
def render_incoming_message(message: Message,
content: str,
user_ids: Set[int],
realm: Realm,
mention_data: Optional[MentionData]=None,
email_gateway: bool=False) -> str:
realm_alert_words_automaton = get_alert_word_automaton(realm)
try:
rendered_content = render_markdown(
message=message,
content=content,
realm=realm,
realm_alert_words_automaton = realm_alert_words_automaton,
mention_data=mention_data,
email_gateway=email_gateway,
)
except MarkdownRenderingException:
raise JsonableError(_('Unable to render message'))
return rendered_content
class RecipientInfoResult(TypedDict):
active_user_ids: Set[int]
push_notify_user_ids: Set[int]
stream_email_user_ids: Set[int]
stream_push_user_ids: Set[int]
wildcard_mention_user_ids: Set[int]
um_eligible_user_ids: Set[int]
long_term_idle_user_ids: Set[int]
default_bot_user_ids: Set[int]
service_bot_tuples: List[Tuple[int, int]]
def get_recipient_info(recipient: Recipient,
sender_id: int,
stream_topic: Optional[StreamTopicTarget],
possibly_mentioned_user_ids: AbstractSet[int]=set(),
possible_wildcard_mention: bool=True) -> RecipientInfoResult:
stream_push_user_ids: Set[int] = set()
stream_email_user_ids: Set[int] = set()
wildcard_mention_user_ids: Set[int] = set()
if recipient.type == Recipient.PERSONAL:
# The sender and recipient may be the same id, so
# de-duplicate using a set.
message_to_user_ids = list({recipient.type_id, sender_id})
assert(len(message_to_user_ids) in [1, 2])
elif recipient.type == Recipient.STREAM:
# Anybody calling us w/r/t a stream message needs to supply
# stream_topic. We may eventually want to have different versions
# of this function for different message types.
assert(stream_topic is not None)
user_ids_muting_topic = stream_topic.user_ids_muting_topic()
subscription_rows = stream_topic.get_active_subscriptions().annotate(
user_profile_email_notifications=F('user_profile__enable_stream_email_notifications'),
user_profile_push_notifications=F('user_profile__enable_stream_push_notifications'),
user_profile_wildcard_mentions_notify=F(
'user_profile__wildcard_mentions_notify'),
).values(
'user_profile_id',
'push_notifications',
'email_notifications',
'wildcard_mentions_notify',
'user_profile_email_notifications',
'user_profile_push_notifications',
'user_profile_wildcard_mentions_notify',
'is_muted',
).order_by('user_profile_id')
message_to_user_ids = [
row['user_profile_id']
for row in subscription_rows
]
def should_send(setting: str, row: Dict[str, Any]) -> bool:
# This implements the structure that the UserProfile stream notification settings
# are defaults, which can be overridden by the stream-level settings (if those
# values are not null).
if row['is_muted']:
return False
if row['user_profile_id'] in user_ids_muting_topic:
return False
if row[setting] is not None:
return row[setting]
return row['user_profile_' + setting]
stream_push_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_push_notify
if should_send('push_notifications', row)
}
stream_email_user_ids = {
row['user_profile_id']
for row in subscription_rows
# Note: muting a stream overrides stream_email_notify
if should_send('email_notifications', row)
}
if possible_wildcard_mention:
# If there's a possible wildcard mention, we need to
# notifications here; at this stage, we're just
# notification preferences) or a normal message.
wildcard_mention_user_ids = {
row['user_profile_id']
for row in subscription_rows
if should_send("wildcard_mentions_notify", row)
}
elif recipient.type == Recipient.HUDDLE:
message_to_user_ids = get_huddle_user_ids(recipient)
else:
raise ValueError('Bad recipient type')
message_to_user_id_set = set(message_to_user_ids)
user_ids = set(message_to_user_id_set)
# Important note: Because we haven't rendered markdown yet, we
# actually mentioned in the message (in other words, the
# mention syntax might have been in a code block or otherwise
# escaped). `get_ids_for` will filter these extra user rows
# for our data structures not related to bots
user_ids |= possibly_mentioned_user_ids
if user_ids:
query = UserProfile.objects.filter(
is_active=True,
).values(
'id',
'enable_online_push_notifications',
'is_bot',
'bot_type',
'long_term_idle',
)
# query_for_ids is fast highly optimized for large queries, and we
# need this codepath to be fast (it's part of sending messages)
query = query_for_ids(
query=query,
user_ids=sorted(list(user_ids)),
field='id',
)
rows = list(query)
else:
# comment was written). We have simplified notify_new_user
# so that it should be a little easier to reason about.
# There is currently some cleanup to how we handle cross
# realm bots that is still under development. Once that
# effort is complete, we should be able to address this
# to-do.
rows = []
def get_ids_for(f: Callable[[Dict[str, Any]], bool]) -> Set[int]:
return {
row['id']
for row in rows
if f(row)
} & message_to_user_id_set
def is_service_bot(row: Dict[str, Any]) -> bool:
return row['is_bot'] and (row['bot_type'] in UserProfile.SERVICE_BOT_TYPES)
active_user_ids = get_ids_for(lambda r: True)
push_notify_user_ids = get_ids_for(
lambda r: r['enable_online_push_notifications'],
)
# Service bots don't get UserMessage rows.
um_eligible_user_ids = get_ids_for(
lambda r: not is_service_bot(r),
)
long_term_idle_user_ids = get_ids_for(
lambda r: r['long_term_idle'],
)
#
# Further in the do_send_messages code path, once
# `mentioned_user_ids` has been computed via markdown, we'll filter
# sure we have the data we need for that without extra database
# queries.
default_bot_user_ids = {
row['id']
for row in rows
if row['is_bot'] and row['bot_type'] == UserProfile.DEFAULT_BOT
}
service_bot_tuples = [
(row['id'], row['bot_type'])
for row in rows
if is_service_bot(row)
]
info: RecipientInfoResult = dict(
active_user_ids=active_user_ids,
push_notify_user_ids=push_notify_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
wildcard_mention_user_ids=wildcard_mention_user_ids,
um_eligible_user_ids=um_eligible_user_ids,
long_term_idle_user_ids=long_term_idle_user_ids,
default_bot_user_ids=default_bot_user_ids,
service_bot_tuples=service_bot_tuples,
)
return info
def get_service_bot_events(sender: UserProfile, service_bot_tuples: List[Tuple[int, int]],
mentioned_user_ids: Set[int], active_user_ids: Set[int],
recipient_type: int) -> Dict[str, List[Dict[str, Any]]]:
event_dict: Dict[str, List[Dict[str, Any]]] = defaultdict(list)
# Avoid infinite loops by preventing messages sent by bots from generating
# Service events.
if sender.is_bot:
return event_dict
def maybe_add_event(user_profile_id: int, bot_type: int) -> None:
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
queue_name = 'outgoing_webhooks'
elif bot_type == UserProfile.EMBEDDED_BOT:
queue_name = 'embedded_bots'
else:
logging.error(
'Unexpected bot_type for Service bot id=%s: %s',
user_profile_id, bot_type,
)
return
is_stream = (recipient_type == Recipient.STREAM)
# Important note: service_bot_tuples may contain service bots
# who were not actually mentioned in the message (e.g. if
# mention syntax for that bot appeared in a code block).
# Thus, it is important to filter any users who aren't part of
if user_profile_id not in mentioned_user_ids and user_profile_id not in active_user_ids:
return
if is_stream and user_profile_id in mentioned_user_ids:
trigger = 'mention'
elif (not is_stream) and (user_profile_id in active_user_ids):
trigger = 'private_message'
else:
return
event_dict[queue_name].append({
'trigger': trigger,
'user_profile_id': user_profile_id,
})
for user_profile_id, bot_type in service_bot_tuples:
maybe_add_event(
user_profile_id=user_profile_id,
bot_type=bot_type,
)
return event_dict
def do_schedule_messages(messages: Sequence[Mapping[str, Any]]) -> List[int]:
scheduled_messages: List[ScheduledMessage] = []
for message in messages:
scheduled_message = ScheduledMessage()
scheduled_message.sender = message['message'].sender
scheduled_message.recipient = message['message'].recipient
topic_name = message['message'].topic_name()
scheduled_message.set_topic_name(topic_name=topic_name)
scheduled_message.content = message['message'].content
scheduled_message.sending_client = message['message'].sending_client
scheduled_message.stream = message['stream']
scheduled_message.realm = message['realm']
scheduled_message.scheduled_timestamp = message['deliver_at']
if message['delivery_type'] == 'send_later':
scheduled_message.delivery_type = ScheduledMessage.SEND_LATER
elif message['delivery_type'] == 'remind':
scheduled_message.delivery_type = ScheduledMessage.REMIND
scheduled_messages.append(scheduled_message)
ScheduledMessage.objects.bulk_create(scheduled_messages)
return [scheduled_message.id for scheduled_message in scheduled_messages]
def do_send_messages(messages_maybe_none: Sequence[Optional[MutableMapping[str, Any]]],
email_gateway: bool=False,
mark_as_read: Sequence[int]=[]) -> List[int]:
messages = [message for message in messages_maybe_none if message is not None]
# Filter out zephyr mirror anomalies where the message was already sent
already_sent_ids: List[int] = []
new_messages: List[MutableMapping[str, Any]] = []
for message in messages:
if isinstance(message['message'], int):
already_sent_ids.append(message['message'])
else:
new_messages.append(message)
messages = new_messages
links_for_embed: Set[str] = set()
# For consistency, changes to the default values for these gets should also be applied
# to the default args in do_send_message
for message in messages:
message['rendered_content'] = message.get('rendered_content', None)
message['stream'] = message.get('stream', None)
message['local_id'] = message.get('local_id', None)
message['sender_queue_id'] = message.get('sender_queue_id', None)
message['realm'] = message.get('realm', message['message'].sender.realm)
mention_data = MentionData(
realm_id=message['realm'].id,
content=message['message'].content,
)
message['mention_data'] = mention_data
if message['message'].is_stream_message():
stream_id = message['message'].recipient.type_id
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=message['message'].topic_name(),
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message['message'].recipient,
sender_id=message['message'].sender_id,
stream_topic=stream_topic,
possibly_mentioned_user_ids=mention_data.get_user_ids(),
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
message['active_user_ids'] = info['active_user_ids']
message['push_notify_user_ids'] = info['push_notify_user_ids']
message['stream_push_user_ids'] = info['stream_push_user_ids']
message['stream_email_user_ids'] = info['stream_email_user_ids']
message['um_eligible_user_ids'] = info['um_eligible_user_ids']
message['long_term_idle_user_ids'] = info['long_term_idle_user_ids']
message['default_bot_user_ids'] = info['default_bot_user_ids']
message['service_bot_tuples'] = info['service_bot_tuples']
# Render our messages.
assert message['message'].rendered_content is None
rendered_content = render_incoming_message(
message['message'],
message['message'].content,
message['active_user_ids'],
message['realm'],
mention_data=message['mention_data'],
email_gateway=email_gateway,
)
message['message'].rendered_content = rendered_content
message['message'].rendered_content_version = markdown_version
links_for_embed |= message['message'].links_for_preview
# Add members of the mentioned user groups into `mentions_user_ids`.
for group_id in message['message'].mentions_user_group_ids:
members = message['mention_data'].get_group_members(group_id)
message['message'].mentions_user_ids.update(members)
# Only send data to Tornado about wildcard mentions if message
# rendering determined the message had an actual wildcard
# mention in it (and not e.g. wildcard mention syntax inside a
# code block).
if message['message'].mentions_wildcard:
message['wildcard_mention_user_ids'] = info['wildcard_mention_user_ids']
else:
message['wildcard_mention_user_ids'] = []
mentioned_user_ids = message['message'].mentions_user_ids
default_bot_user_ids = message['default_bot_user_ids']
mentioned_bot_user_ids = default_bot_user_ids & mentioned_user_ids
message['um_eligible_user_ids'] |= mentioned_bot_user_ids
# Save the message receipts in the database
user_message_flags: Dict[int, Dict[int, List[str]]] = defaultdict(dict)
with transaction.atomic():
Message.objects.bulk_create([message['message'] for message in messages])
# Claim attachments in message
for message in messages:
if do_claim_attachments(message['message'],
message['message'].potential_attachment_path_ids):
message['message'].has_attachment = True
message['message'].save(update_fields=['has_attachment'])
ums: List[UserMessageLite] = []
for message in messages:
# Service bots (outgoing webhook bots and embedded bots) don't store UserMessage rows;
mentioned_user_ids = message['message'].mentions_user_ids
user_messages = create_user_messages(
message=message['message'],
um_eligible_user_ids=message['um_eligible_user_ids'],
long_term_idle_user_ids=message['long_term_idle_user_ids'],
stream_push_user_ids = message['stream_push_user_ids'],
stream_email_user_ids = message['stream_email_user_ids'],
mentioned_user_ids=mentioned_user_ids,
mark_as_read=mark_as_read,
)
for um in user_messages:
user_message_flags[message['message'].id][um.user_profile_id] = um.flags_list()
ums.extend(user_messages)
message['message'].service_queue_events = get_service_bot_events(
sender=message['message'].sender,
service_bot_tuples=message['service_bot_tuples'],
mentioned_user_ids=mentioned_user_ids,
active_user_ids=message['active_user_ids'],
recipient_type=message['message'].recipient.type,
)
bulk_insert_ums(ums)
for message in messages:
do_widget_post_save_actions(message)
for message in messages:
realm_id: Optional[int] = None
if message['message'].is_stream_message():
if message['stream'] is None:
stream_id = message['message'].recipient.type_id
message['stream'] = Stream.objects.select_related().get(id=stream_id)
assert message['stream'] is not None
realm_id = message['stream'].realm_id
wide_message_dict = MessageDict.wide_dict(message['message'], realm_id)
user_flags = user_message_flags.get(message['message'].id, {})
sender = message['message'].sender
message_type = wide_message_dict['type']
presence_idle_user_ids = get_active_presence_idle_user_ids(
realm=sender.realm,
sender_id=sender.id,
message_type=message_type,
active_user_ids=message['active_user_ids'],
user_flags=user_flags,
)
event = dict(
type='message',
message=message['message'].id,
message_dict=wide_message_dict,
presence_idle_user_ids=presence_idle_user_ids,
)
user_ids = message['active_user_ids'] | set(user_flags.keys())
users = [
dict(
id=user_id,
flags=user_flags.get(user_id, []),
always_push_notify=(user_id in message['push_notify_user_ids']),
stream_push_notify=(user_id in message['stream_push_user_ids']),
stream_email_notify=(user_id in message['stream_email_user_ids']),
wildcard_mention_notify=(user_id in message['wildcard_mention_user_ids']),
)
for user_id in user_ids
]
if message['message'].is_stream_message():
# ensuring that in the tornado server, non-public stream
# messages are only associated to their subscribed users.
assert message['stream'] is not None # assert needed because stubs for django are missing
if message['stream'].is_public():
event['realm_id'] = message['stream'].realm_id
event['stream_name'] = message['stream'].name
if message['stream'].invite_only:
event['invite_only'] = True
if message['stream'].first_message_id is None:
message['stream'].first_message_id = message['message'].id
message['stream'].save(update_fields=["first_message_id"])
if message['local_id'] is not None:
event['local_id'] = message['local_id']
if message['sender_queue_id'] is not None:
event['sender_queue_id'] = message['sender_queue_id']
send_event(message['realm'], event, users)
if links_for_embed:
event_data = {
'message_id': message['message'].id,
'message_content': message['message'].content,
'message_realm_id': message['realm'].id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
if message['message'].recipient.type == Recipient.PERSONAL:
welcome_bot_id = get_system_bot(settings.WELCOME_BOT).id
if (welcome_bot_id in message['active_user_ids'] and
welcome_bot_id != message['message'].sender_id):
send_welcome_bot_response(message)
for queue_name, events in message['message'].service_queue_events.items():
for event in events:
queue_json_publish(
queue_name,
{
"message": wide_message_dict,
"trigger": event['trigger'],
"user_profile_id": event["user_profile_id"],
},
)
# Note that this does not preserve the order of message ids
# returned. In practice, this shouldn't matter, as we only
# intermingle sending zephyr messages with other messages.
return already_sent_ids + [message['message'].id for message in messages]
class UserMessageLite:
def __init__(self, user_profile_id: int, message_id: int, flags: int) -> None:
self.user_profile_id = user_profile_id
self.message_id = message_id
self.flags = flags
def flags_list(self) -> List[str]:
return UserMessage.flags_list_for_flags(self.flags)
def create_user_messages(message: Message,
um_eligible_user_ids: AbstractSet[int],
long_term_idle_user_ids: AbstractSet[int],
stream_push_user_ids: AbstractSet[int],
stream_email_user_ids: AbstractSet[int],
mentioned_user_ids: AbstractSet[int],
mark_as_read: Sequence[int] = []) -> List[UserMessageLite]:
ums_to_create = []
for user_profile_id in um_eligible_user_ids:
um = UserMessageLite(
user_profile_id=user_profile_id,
message_id=message.id,
flags=0,
)
ums_to_create.append(um)
# These properties on the Message are set via
# render_markdown by code in the markdown inline patterns
wildcard = message.mentions_wildcard
ids_with_alert_words = message.user_ids_with_alert_words
for um in ums_to_create:
if (um.user_profile_id == message.sender.id and
message.sent_by_human()) or \
um.user_profile_id in mark_as_read:
um.flags |= UserMessage.flags.read
if wildcard:
um.flags |= UserMessage.flags.wildcard_mentioned
if um.user_profile_id in mentioned_user_ids:
um.flags |= UserMessage.flags.mentioned
if um.user_profile_id in ids_with_alert_words:
um.flags |= UserMessage.flags.has_alert_word
if message.recipient.type in [Recipient.HUDDLE, Recipient.PERSONAL]:
um.flags |= UserMessage.flags.is_private
# For long_term_idle (aka soft-deactivated) users, we are allowed
# to optimize by lazily not creating UserMessage rows that would
# have the default 0 flag set (since the soft-reactivation logic
# knows how to create those when the user comes back). We need to
# create the UserMessage rows for these long_term_idle users
# non-lazily in a few cases:
#
# * There are nonzero flags (e.g. the user was mentioned), since
# that case is rare and this saves a lot of complexity in
# soft-reactivation.
#
# * If the user is going to be notified (e.g. they get push/email
# notifications for every message on a stream), since in that
# case the notifications code will call `access_message` on the
# message to re-verify permissions, and for private streams,
# will get an error if the UserMessage row doesn't exist yet.
sages = []
for um in ums_to_create:
if (um.user_profile_id in long_term_idle_user_ids and
um.user_profile_id not in stream_push_user_ids and
um.user_profile_id not in stream_email_user_ids and
message.is_stream_message() and
int(um.flags) == 0):
continue
user_messages.append(um)
return user_messages
def bulk_insert_ums(ums: List[UserMessageLite]) -> None:
if not ums:
return
vals = [
(um.user_profile_id, um.message_id, um.flags)
for um in ums
]
query = SQL('''
INSERT into
zerver_usermessage (user_profile_id, message_id, flags)
VALUES %s
''')
with connection.cursor() as cursor:
execute_values(cursor.cursor, query, vals)
def do_add_submessage(realm: Realm,
sender_id: int,
message_id: int,
msg_type: str,
content: str,
) -> None:
submessage = SubMessage(
sender_id=sender_id,
message_id=message_id,
msg_type=msg_type,
content=content,
)
submessage.save()
event = dict(
type="submessage",
msg_type=msg_type,
message_id=message_id,
submessage_id=submessage.id,
sender_id=sender_id,
content=content,
)
ums = UserMessage.objects.filter(message_id=message_id)
target_user_ids = [um.user_profile_id for um in ums]
send_event(realm, event, target_user_ids)
def notify_reaction_update(user_profile: UserProfile, message: Message,
reaction: Reaction, op: str) -> None:
user_dict = {'user_id': user_profile.id,
'email': user_profile.email,
'full_name': user_profile.full_name}
event: Dict[str, Any] = {
'type': 'reaction',
'op': op,
'user_id': user_profile.id,
'user': user_dict,
'message_id': message.id,
'emoji_name': reaction.emoji_name,
'emoji_code': reaction.emoji_code,
'reaction_type': reaction.reaction_type,
}
update_to_dict_cache([message])
# reactions to public stream messages to every browser for every
# client in the organization, which doesn't scale.
ums = UserMessage.objects.filter(message=message.id)
send_event(user_profile.realm, event, [um.user_profile_id for um in ums])
def do_add_reaction(user_profile: UserProfile, message: Message,
emoji_name: str, emoji_code: str, reaction_type: str) -> None:
reaction = Reaction(user_profile=user_profile, message=message,
emoji_name=emoji_name, emoji_code=emoji_code,
reaction_type=reaction_type)
try:
reaction.save()
except django.db.utils.IntegrityError:
raise JsonableError(_("Reaction already exists."))
notify_reaction_update(user_profile, message, reaction, "add")
def do_remove_reaction(user_profile: UserProfile, message: Message,
emoji_code: str, reaction_type: str) -> None:
reaction = Reaction.objects.filter(user_profile=user_profile,
message=message,
emoji_code=emoji_code,
reaction_type=reaction_type).get()
reaction.delete()
notify_reaction_update(user_profile, message, reaction, "remove")
def do_send_typing_notification(
realm: Realm,
sender: UserProfile,
recipient_user_profiles: List[UserProfile],
operator: str) -> None:
sender_dict = {'user_id': sender.id, 'email': sender.email}
recipient_dicts = [{'user_id': profile.id, 'email': profile.email}
for profile in recipient_user_profiles]
event = dict(
type='typing',
op=operator,
sender=sender_dict,
recipients=recipient_dicts,
)
user_ids_to_notify = [
user.id
for user in recipient_user_profiles
if user.is_active
]
send_event(realm, event, user_ids_to_notify)
def check_send_typing_notification(sender: UserProfile,
user_ids: List[int],
operator: str) -> None:
realm = sender.realm
if len(user_ids) == 0:
raise JsonableError(_('Missing parameter: \'to\' (recipient)'))
elif operator not in ('start', 'stop'):
raise JsonableError(_('Invalid \'op\' value (should be start or stop)'))
if sender.id not in user_ids:
user_ids.append(sender.id)
# sign that a client is confused (or possibly even malicious) if
# we get bad user_ids.
user_profiles = []
for user_id in user_ids:
try:
# We include cross-bot realms as possible recipients,
# so that clients can know which huddle conversation
# is relevant here.
user_profile = get_user_by_id_in_realm_including_cross_realm(
user_id, sender.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user ID {}").format(user_id))
user_profiles.append(user_profile)
do_send_typing_notification(
realm=realm,
sender=sender,
recipient_user_profiles=user_profiles,
operator=operator,
)
def ensure_stream(realm: Realm,
stream_name: str,
invite_only: bool=False,
stream_description: str="",
acting_user: Optional[UserProfile]=None) -> Stream:
return create_stream_if_needed(realm, stream_name,
invite_only=invite_only,
stream_description=stream_description,
acting_user=acting_user)[0]
def get_recipient_from_user_profiles(recipient_profiles: Sequence[UserProfile],
forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile) -> Recipient:
# Avoid mutating the passed in list of recipient_profiles.
recipient_profiles_map = {}
for user_profile in recipient_profiles:
recipient_profiles_map[user_profile.id] = user_profile
if forwarded_mirror_message:
# In our mirroring integrations with some third-party
# protocols, bots subscribed to the third-party protocol
# forward to Zulip messages that they received in the
# third-party service. The permissions model for that
# forwarding is that users can only submit to Zulip private
# messages they personally received, and here we do the check
# for whether forwarder_user_profile is among the private
# message recipients of the message.
assert forwarder_user_profile is not None
if forwarder_user_profile.id not in recipient_profiles_map:
raise ValidationError(_("User not authorized for this query"))
# If the private message is just between the sender and
# another person, force it to be a personal internally
if (len(recipient_profiles_map) == 2 and sender.id in recipient_profiles_map):
del recipient_profiles_map[sender.id]
assert len(recipient_profiles_map) != 0
if len(recipient_profiles_map) == 1:
user_profile = list(recipient_profiles_map.values())[0]
return user_profile.recipient
# Otherwise, we need a huddle. Make sure the sender is included in huddle messages
recipient_profiles_map[sender.id] = sender
user_ids: Set[int] = {user_id for user_id in recipient_profiles_map}
return get_huddle_recipient(user_ids)
def validate_recipient_user_profiles(user_profiles: Sequence[UserProfile],
sender: UserProfile,
allow_deactivated: bool=False) -> Sequence[UserProfile]:
recipient_profiles_map: Dict[int, UserProfile] = {}
# We exempt cross-realm bots from the check that all the recipients
# are in the same realm.
realms = set()
if not is_cross_realm_bot_email(sender.email):
realms.add(sender.realm_id)
for user_profile in user_profiles:
if (not user_profile.is_active and not user_profile.is_mirror_dummy and
not allow_deactivated) or user_profile.realm.deactivated:
raise ValidationError(_("'{email}' is no longer using Zulip.").format(email=user_profile.email))
recipient_profiles_map[user_profile.id] = user_profile
if not is_cross_realm_bot_email(user_profile.email):
realms.add(user_profile.realm_id)
if len(realms) > 1:
raise ValidationError(_("You can't send private messages outside of your organization."))
return list(recipient_profiles_map.values())
def recipient_for_user_profiles(user_profiles: Sequence[UserProfile], forwarded_mirror_message: bool,
forwarder_user_profile: Optional[UserProfile],
sender: UserProfile, allow_deactivated: bool=False) -> Recipient:
recipient_profiles = validate_recipient_user_profiles(user_profiles, sender,
allow_deactivated=allow_deactivated)
return get_recipient_from_user_profiles(recipient_profiles, forwarded_mirror_message,
forwarder_user_profile, sender)
def already_sent_mirrored_message_id(message: Message) -> Optional[int]:
if message.recipient.type == Recipient.HUDDLE:
# copies of the same message.
time_window = datetime.timedelta(seconds=10)
else:
time_window = datetime.timedelta(seconds=0)
query = Message.objects.filter(
sender=message.sender,
recipient=message.recipient,
content=message.content,
sending_client=message.sending_client,
date_sent__gte=message.date_sent - time_window,
date_sent__lte=message.date_sent + time_window)
messages = filter_by_exact_message_topic(
query=query,
message=message,
)
if messages.exists():
return messages[0].id
return None
def extract_stream_indicator(s: str) -> Union[str, int]:
# Users can pass stream name as either an id or a name,
# and if they choose to pass a name, they may JSON encode
# it for legacy reasons.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
# If there was no JSON encoding, then we just
# have a raw stream name.
return s
# We should stop supporting this odd use case
# once we improve our documentation.
if isinstance(data, list):
if len(data) != 1: # nocoverage
raise JsonableError(_("Expected exactly one stream"))
data = data[0]
if isinstance(data, str):
# We had a JSON-encoded stream name.
return data
if isinstance(data, int):
# We had a stream id.
return data
raise JsonableError(_("Invalid data type for stream"))
def extract_private_recipients(s: str) -> Union[List[str], List[int]]:
# We try to accept multiple incoming formats for recipients.
# See test_extract_recipients() for examples of what we allow.
try:
data = ujson.loads(s)
except (ValueError, TypeError):
data = s
if isinstance(data, str):
data = data.split(',')
if not isinstance(data, list):
raise JsonableError(_("Invalid data type for recipients"))
if not data:
# We don't complain about empty message recipients here
return data
if isinstance(data[0], str):
return get_validated_emails(data)
if not isinstance(data[0], int):
raise JsonableError(_("Invalid data type for recipients"))
return get_validated_user_ids(data)
def get_validated_user_ids(user_ids: Iterable[int]) -> List[int]:
for user_id in user_ids:
if not isinstance(user_id, int):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(set(user_ids))
def get_validated_emails(emails: Iterable[str]) -> List[str]:
for email in emails:
if not isinstance(email, str):
raise JsonableError(_("Recipient lists may contain emails or user IDs, but not both."))
return list(filter(bool, {email.strip() for email in emails}))
def check_send_stream_message(sender: UserProfile, client: Client, stream_name: str,
topic: str, body: str, realm: Optional[Realm]=None) -> int:
addressee = Addressee.for_stream_name(stream_name, topic)
message = check_message(sender, client, addressee, body, realm)
return do_send_messages([message])[0]
def check_send_private_message(sender: UserProfile, client: Client,
receiving_user: UserProfile, body: str) -> int:
addressee = Addressee.for_user_profile(receiving_user)
message = check_message(sender, client, addressee, body)
return do_send_messages([message])[0]
def check_send_message(sender: UserProfile, client: Client, message_type_name: str,
message_to: Union[Sequence[int], Sequence[str]],
topic_name: Optional[str],
message_content: str, realm: Optional[Realm]=None,
forged: bool=False, forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm, forged, forged_timestamp,
forwarder_user_profile, local_id, sender_queue_id,
widget_content)
return do_send_messages([message])[0]
def check_schedule_message(sender: UserProfile, client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str], message_content: str,
delivery_type: str, deliver_at: datetime.datetime,
realm: Optional[Realm]=None,
forwarder_user_profile: Optional[UserProfile]=None,
) -> int:
addressee = Addressee.legacy_build(
sender,
message_type_name,
message_to,
topic_name)
message = check_message(sender, client, addressee,
message_content, realm=realm,
forwarder_user_profile=forwarder_user_profile)
message['deliver_at'] = deliver_at
message['delivery_type'] = delivery_type
recipient = message['message'].recipient
if (delivery_type == 'remind' and (recipient.type != Recipient.STREAM and
recipient.type_id != sender.id)):
raise JsonableError(_("Reminders can only be set for streams."))
return do_schedule_messages([message])[0]
def check_default_stream_group_name(group_name: str) -> None:
if group_name.strip() == "":
raise JsonableError(_("Invalid default stream group name '{}'").format(group_name))
if len(group_name) > DefaultStreamGroup.MAX_NAME_LENGTH:
raise JsonableError(_("Default stream group name too long (limit: {} characters)").format(
DefaultStreamGroup.MAX_NAME_LENGTH,
))
for i in group_name:
if ord(i) == 0:
raise JsonableError(_("Default stream group name '{}' contains NULL (0x00) characters.").format(
group_name,
))
def send_rate_limited_pm_notification_to_bot_owner(sender: UserProfile,
realm: Realm,
content: str) -> None:
if sender.realm.is_zephyr_mirror_realm or sender.realm.deactivated:
return
if not sender.is_bot or sender.bot_owner is None:
return
# (e.g. from EMAIL_GATEWAY_BOT) since the owner for
# EMAIL_GATEWAY_BOT is probably the server administrator, not
# the owner of the bot who could potentially fix the problem.
if sender.realm != realm:
return
# We warn the user once every 5 minutes to avoid a flood of
# PMs on a misconfigured integration, re-using the
# UserProfile.last_reminder field, which is not used for bots.
last_reminder = sender.last_reminder
waitperiod = datetime.timedelta(minutes=UserProfile.BOT_OWNER_STREAM_ALERT_WAITPERIOD)
if last_reminder and timezone_now() - last_reminder <= waitperiod:
return
internal_send_private_message(realm, get_system_bot(settings.NOTIFICATION_BOT),
sender.bot_owner, content)
sender.last_reminder = timezone_now()
sender.save(update_fields=['last_reminder'])
def send_pm_if_empty_stream(stream: Optional[Stream],
realm: Realm,
sender: UserProfile,
stream_name: Optional[str]=None,
stream_id: Optional[int]=None) -> None:
if not sender.is_bot or sender.bot_owner is None:
return
arg_dict = {
"bot_identity": f"`{sender.delivery_email}`",
"stream_id": stream_id,
"stream_name": f"#**{stream_name}**",
"new_stream_link": "#streams/new",
}
if sender.bot_owner is not None:
with override_language(sender.bot_owner.default_language):
if stream is None:
if stream_id is not None:
content = _("Your bot {bot_identity} tried to send a message to stream ID "
"{stream_id}, but there is no stream with that ID.").format(**arg_dict)
else:
assert(stream_name is not None)
content = _("Your bot {bot_identity} tried to send a message to stream "
"{stream_name}, but that stream does not exist. "
"Click [here]({new_stream_link}) to create it.").format(**arg_dict)
else:
if num_subscribers_for_stream_id(stream.id) > 0:
return
content = _("Your bot {bot_identity} tried to send a message to "
"stream {stream_name}. The stream exists but "
"does not have any subscribers.").format(**arg_dict)
send_rate_limited_pm_notification_to_bot_owner(sender, realm, content)
def validate_stream_name_with_pm_notification(stream_name: str, realm: Realm,
sender: UserProfile) -> Stream:
stream_name = stream_name.strip()
check_stream_name(stream_name)
try:
stream = get_stream(stream_name, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_name=stream_name)
raise StreamDoesNotExistError(escape(stream_name))
return stream
def validate_stream_id_with_pm_notification(stream_id: int, realm: Realm,
sender: UserProfile) -> Stream:
try:
stream = get_stream_by_id_in_realm(stream_id, realm)
send_pm_if_empty_stream(stream, realm, sender)
except Stream.DoesNotExist:
send_pm_if_empty_stream(None, realm, sender, stream_id=stream_id)
raise StreamWithIDDoesNotExistError(stream_id)
return stream
def check_private_message_policy(realm: Realm, sender: UserProfile,
user_profiles: Sequence[UserProfile]) -> None:
if realm.private_message_policy == Realm.PRIVATE_MESSAGE_POLICY_DISABLED:
if sender.is_bot or (len(user_profiles) == 1 and user_profiles[0].is_bot):
# We allow PMs only between users and bots, to avoid
# breaking the tutorial as well as automated
# notifications from system bots to users.
return
raise JsonableError(_("Private messages are disabled in this organization."))
# check_message:
# Returns message ready for sending with do_send_message on success or the error message (string) on error.
def check_message(sender: UserProfile, client: Client, addressee: Addressee,
message_content_raw: str, realm: Optional[Realm]=None, forged: bool=False,
forged_timestamp: Optional[float]=None,
forwarder_user_profile: Optional[UserProfile]=None,
local_id: Optional[str]=None,
sender_queue_id: Optional[str]=None,
widget_content: Optional[str]=None) -> Dict[str, Any]:
stream = None
message_content = message_content_raw.rstrip()
if len(message_content) == 0:
raise JsonableError(_("Message must not be empty"))
if '\x00' in message_content:
raise JsonableError(_("Message must not contain null bytes"))
message_content = truncate_body(message_content)
if realm is None:
realm = sender.realm
if addressee.is_stream():
topic_name = addressee.topic()
topic_name = truncate_topic(topic_name)
stream_name = addressee.stream_name()
stream_id = addressee.stream_id()
if stream_name is not None:
stream = validate_stream_name_with_pm_notification(stream_name, realm, sender)
elif stream_id is not None:
stream = validate_stream_id_with_pm_notification(stream_id, realm, sender)
else:
stream = addressee.stream()
assert stream is not None
recipient = stream.recipient
# This will raise JsonableError if there are problems.
if sender.bot_type != sender.OUTGOING_WEBHOOK_BOT:
access_stream_for_send_message(
sender=sender,
stream=stream,
forwarder_user_profile=forwarder_user_profile)
elif addressee.is_private():
user_profiles = addressee.user_profiles()
mirror_message = client and client.name in ["zephyr_mirror", "irc_mirror",
"jabber_mirror", "JabberMirror"]
check_private_message_policy(realm, sender, user_profiles)
# API Super-users who set the `forged` flag are allowed to
# forge messages sent by any user, so we disable the
# `forwarded_mirror_message` security check in that case.
forwarded_mirror_message = mirror_message and not forged
try:
recipient = recipient_for_user_profiles(user_profiles,
forwarded_mirror_message,
forwarder_user_profile, sender)
except ValidationError as e:
assert isinstance(e.messages[0], str)
raise JsonableError(e.messages[0])
else:
# This is defensive code--Addressee already validates
# the message type.
raise AssertionError("Invalid message type")
message = Message()
message.sender = sender
message.content = message_content
message.recipient = recipient
if addressee.is_stream():
message.set_topic_name(topic_name)
if forged and forged_timestamp is not None:
# Forged messages come with a timestamp
message.date_sent = timestamp_to_datetime(forged_timestamp)
else:
message.date_sent = timezone_now()
message.sending_client = client
# We render messages later in the process.
assert message.rendered_content is None
if client.name == "zephyr_mirror":
id = already_sent_mirrored_message_id(message)
if id is not None:
return {'message': id}
if widget_content is not None:
try:
widget_content = ujson.loads(widget_content)
except Exception:
raise JsonableError(_('Widgets: API programmer sent invalid JSON content'))
try:
check_widget_content(widget_content)
except ValidationError as error:
raise JsonableError(_('Widgets: {error_msg}').format(
error_msg=error.message,
))
return {'message': message, 'stream': stream, 'local_id': local_id,
'sender_queue_id': sender_queue_id, 'realm': realm,
'widget_content': widget_content}
def _internal_prep_message(realm: Realm,
sender: UserProfile,
addressee: Addressee,
content: str) -> Optional[Dict[str, Any]]:
# Remove any null bytes from the content
if len(content) > MAX_MESSAGE_LENGTH:
content = content[0:3900] + "\n\n[message was too long and has been truncated]"
# If we have a stream name, and the stream doesn't exist, we
# by ID), we skip this, as the stream object must already exist.
if addressee.is_stream():
stream_name = addressee.stream_name()
if stream_name is not None:
ensure_stream(realm, stream_name, acting_user=sender)
try:
return check_message(sender, get_client("Internal"), addressee,
content, realm=realm)
except JsonableError as e:
logging.exception("Error queueing internal message by %s: %s", sender.delivery_email, e.msg)
return None
def internal_prep_stream_message(
realm: Realm, sender: UserProfile,
stream: Stream, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
addressee = Addressee.for_stream(stream, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[Dict[str, Any]]:
addressee = Addressee.for_stream_name(stream_name, topic)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_prep_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[Dict[str, Any]]:
addressee = Addressee.for_user_profile(recipient_user)
return _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
def internal_send_private_message(realm: Realm,
sender: UserProfile,
recipient_user: UserProfile,
content: str) -> Optional[int]:
message = internal_prep_private_message(realm, sender, recipient_user, content)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_stream_message(
realm: Realm,
sender: UserProfile,
stream: Stream,
topic: str,
content: str,
email_gateway: bool=False) -> Optional[int]:
message = internal_prep_stream_message(
realm, sender, stream,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message], email_gateway=email_gateway)
return message_ids[0]
def internal_send_stream_message_by_name(
realm: Realm, sender: UserProfile,
stream_name: str, topic: str, content: str,
) -> Optional[int]:
message = internal_prep_stream_message_by_name(
realm, sender, stream_name,
topic, content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def internal_send_huddle_message(realm: Realm, sender: UserProfile, emails: List[str],
content: str) -> Optional[int]:
addressee = Addressee.for_private(emails, realm)
message = _internal_prep_message(
realm=realm,
sender=sender,
addressee=addressee,
content=content,
)
if message is None:
return None
message_ids = do_send_messages([message])
return message_ids[0]
def pick_color(user_profile: UserProfile, subs: Iterable[Subscription]) -> str:
# These colors are shared with the palette in subs.js.
used_colors = [sub.color for sub in subs if sub.active]
available_colors = [s for s in STREAM_ASSIGNMENT_COLORS if s not in used_colors]
if available_colors:
return available_colors[0]
else:
return STREAM_ASSIGNMENT_COLORS[len(used_colors) % len(STREAM_ASSIGNMENT_COLORS)]
def validate_user_access_to_subscribers(user_profile: Optional[UserProfile],
stream: Stream) -> None:
validate_user_access_to_subscribers_helper(
user_profile,
{"realm_id": stream.realm_id,
"invite_only": stream.invite_only},
# We use a lambda here so that we only compute whether the
# user is subscribed if we have to
lambda user_profile: subscribed_to_stream(user_profile, stream.id))
def validate_user_access_to_subscribers_helper(
user_profile: Optional[UserProfile],
stream_dict: Mapping[str, Any],
check_user_subscribed: Callable[[UserProfile], bool],
) -> None:
if user_profile is None:
raise ValidationError("Missing user to validate access for")
if user_profile.realm_id != stream_dict["realm_id"]:
raise ValidationError("Requesting user not in given realm")
# Guest users can access subscribed public stream's subscribers
if user_profile.is_guest:
if check_user_subscribed(user_profile):
return
# any code paths that would allow a guest user to access other
# streams in the first place.
if not user_profile.can_access_public_streams() and not stream_dict["invite_only"]:
raise JsonableError(_("Subscriber data is not available for this stream"))
# Organization administrators can view subscribers for all streams.
if user_profile.is_realm_admin:
return
if (stream_dict["invite_only"] and not check_user_subscribed(user_profile)):
raise JsonableError(_("Unable to retrieve subscribers for private stream"))
def bulk_get_subscriber_user_ids(stream_dicts: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
sub_dict: Mapping[int, bool],
stream_recipient: StreamRecipientMap) -> Dict[int, List[int]]:
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_recipient.populate_with(stream_id=stream_dict["id"],
recipient_id=stream_dict["recipient_id"])
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: sub_dict[stream_dict["id"]],
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
stream_ids = [stream['id'] for stream in target_stream_dicts]
recipient_ids = sorted([
stream_recipient.recipient_id_for(stream_id)
for stream_id in stream_ids
])
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
query = SQL('''
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
INNER JOIN zerver_userprofile ON
zerver_userprofile.id = zerver_subscription.user_profile_id
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_userprofile.is_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
''')
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
recip_to_stream_id = stream_recipient.recipient_to_stream_id_dict()
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result
def get_subscribers_query(stream: Stream, requesting_user: Optional[UserProfile]) -> QuerySet:
# TODO: Make a generic stub for QuerySet
validate_user_access_to_subscribers(requesting_user, stream)
# Note that non-active users may still have "active" subscriptions, because we
# want to be able to easily reactivate them with their old subscriptions. This
# is why the query here has to look at the UserProfile.is_active flag.
subscriptions = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile__is_active=True,
)
return subscriptions
def get_subscriber_emails(stream: Stream,
requesting_user: Optional[UserProfile]=None) -> List[str]:
subscriptions_query = get_subscribers_query(stream, requesting_user)
subscriptions = subscriptions_query.values('user_profile__email')
return [subscription['user_profile__email'] for subscription in subscriptions]
def notify_subscriptions_added(user_profile: UserProfile,
sub_pairs: Iterable[Tuple[Subscription, Stream]],
stream_user_ids: Callable[[Stream], List[int]],
recent_traffic: Dict[int, int],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_added',
'user': user_profile.email,
'names': [stream.name for sub, stream in sub_pairs],
'realm': user_profile.realm.string_id})
sub_dicts = []
for (subscription, stream) in sub_pairs:
sub_dict = stream.to_dict()
for field_name in Subscription.API_FIELDS:
if field_name == "active":
# Skip the "active" field, it's implied by context
continue
sub_dict[field_name] = getattr(subscription, field_name)
sub_dict['in_home_view'] = not subscription.is_muted
sub_dict['email_address'] = encode_email_address(stream, show_sender=True)
sub_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream.id, stream.date_created, recent_traffic)
sub_dict['subscribers'] = stream_user_ids(stream)
sub_dicts.append(sub_dict)
event = dict(type="subscription", op="add",
subscriptions=sub_dicts)
send_event(user_profile.realm, event, [user_profile.id])
def get_peer_user_ids_for_stream_change(stream: Stream,
altered_user_ids: Iterable[int],
subscribed_user_ids: Iterable[int]) -> Set[int]:
if stream.invite_only:
realm_admin_ids = [user.id for user in stream.realm.get_admin_users_and_bots()]
user_ids_to_notify = []
user_ids_to_notify.extend(realm_admin_ids)
user_ids_to_notify.extend(subscribed_user_ids)
return set(user_ids_to_notify) - set(altered_user_ids)
else:
# PUBLIC STREAMS
# We now do "peer_add" or "peer_remove" events even for streams
# users were never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
return set(active_non_guest_user_ids(stream.realm_id)) - set(altered_user_ids)
def get_user_ids_for_streams(streams: Iterable[Stream]) -> Dict[int, List[int]]:
stream_ids = [stream.id for stream in streams]
all_subs = get_active_subscriptions_for_stream_ids(stream_ids).filter(
user_profile__is_active=True,
).values(
'recipient__type_id',
'user_profile_id',
).order_by(
'recipient__type_id',
)
get_stream_id = itemgetter('recipient__type_id')
all_subscribers_by_stream: Dict[int, List[int]] = defaultdict(list)
for stream_id, rows in itertools.groupby(all_subs, get_stream_id):
user_ids = [row['user_profile_id'] for row in rows]
all_subscribers_by_stream[stream_id] = user_ids
return all_subscribers_by_stream
def get_last_message_id() -> int:
# We generally use this function to populate RealmAuditLog, and
# the max id here is actually systemwide, not per-realm. I
# assume there's some advantage in not filtering by realm.
last_id = Message.objects.aggregate(Max('id'))['id__max']
if last_id is None:
last_id = -1
return last_id
SubT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_add_subscriptions(streams: Iterable[Stream],
users: Iterable[UserProfile],
color_map: Mapping[str, str]={},
from_stream_creation: bool=False,
acting_user: Optional[UserProfile]=None) -> SubT:
users = list(users)
recipients_map: Dict[int, int] = {stream.id: stream.recipient_id for stream in streams}
recipient_ids: List[int] = [recipient_id for recipient_id in recipients_map.values()]
stream_map: Dict[int, Stream] = {}
for stream in streams:
stream_map[recipients_map[stream.id]] = stream
subs_by_user: Dict[int, List[Subscription]] = defaultdict(list)
all_subs_query = get_stream_subscriptions_for_users(users).select_related('user_profile')
for sub in all_subs_query:
subs_by_user[sub.user_profile_id].append(sub)
realm = users[0].realm
already_subscribed: List[Tuple[UserProfile, Stream]] = []
subs_to_activate: List[Tuple[Subscription, Stream]] = []
new_subs: List[Tuple[UserProfile, int, Stream]] = []
for user_profile in users:
needs_new_sub: Set[int] = set(recipient_ids)
for sub in subs_by_user[user_profile.id]:
if sub.recipient_id in needs_new_sub:
needs_new_sub.remove(sub.recipient_id)
if sub.active:
already_subscribed.append((user_profile, stream_map[sub.recipient_id]))
else:
subs_to_activate.append((sub, stream_map[sub.recipient_id]))
sub.active = True
for recipient_id in needs_new_sub:
new_subs.append((user_profile, recipient_id, stream_map[recipient_id]))
subs_to_add: List[Tuple[Subscription, Stream]] = []
for (user_profile, recipient_id, stream) in new_subs:
if stream.name in color_map:
color = color_map[stream.name]
else:
color = pick_color(user_profile, subs_by_user[user_profile.id])
sub_to_add = Subscription(user_profile=user_profile, active=True,
color=color, recipient_id=recipient_id)
subs_by_user[user_profile.id].append(sub_to_add)
subs_to_add.append((sub_to_add, stream))
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(realm))
Subscription.objects.bulk_create([sub for (sub, stream) in subs_to_add])
sub_ids = [sub.id for (sub, stream) in subs_to_activate]
Subscription.objects.filter(id__in=sub_ids).update(active=True)
occupied_streams_after = list(get_occupied_streams(realm))
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_add:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED,
event_time=event_time))
for (sub, stream) in subs_to_activate:
all_subscription_logs.append(RealmAuditLog(realm=realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_ACTIVATED,
event_time=event_time))
RealmAuditLog.objects.bulk_create(all_subscription_logs)
new_occupied_streams = [stream for stream in
set(occupied_streams_after) - set(occupied_streams_before)
if not stream.invite_only]
if new_occupied_streams and not from_stream_creation:
event: Dict[str, object] = dict(
type="stream",
op="occupy",
streams=[stream.to_dict() for stream in new_occupied_streams],
)
send_event(realm, event, active_user_ids(realm.id))
# the following code and we want to minize DB queries
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def fetch_stream_subscriber_user_ids(stream: Stream) -> List[int]:
if stream.is_in_zephyr_realm and not stream.invite_only:
return []
user_ids = all_subscribers_by_stream[stream.id]
return user_ids
sub_tuples_by_user: Dict[int, List[Tuple[Subscription, Stream]]] = defaultdict(list)
new_streams: Set[Tuple[int, int]] = set()
for (sub, stream) in subs_to_add + subs_to_activate:
sub_tuples_by_user[sub.user_profile.id].append((sub, stream))
new_streams.add((sub.user_profile.id, stream.id))
# We now send several types of events to notify browsers. The
# first batch is notifications to users on invite-only streams
# that the stream exists.
for stream in streams:
if not stream.is_public():
# Users newly added to invite-only streams
# need a `create` notification. The former, because
# they need the stream to exist before
# they get the "subscribe" notification, and the latter so
# they can manage the new stream.
# Realm admins already have all created private streams.
realm_admin_ids = [user.id for user in realm.get_admin_users_and_bots()]
new_users_ids = [user.id for user in users if (user.id, stream.id) in new_streams and
user.id not in realm_admin_ids]
send_stream_creation_event(stream, new_users_ids)
stream_ids = {stream.id for stream in streams}
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
# The second batch is events for the users themselves that they
# were subscribed to the new streams.
for user_profile in users:
if len(sub_tuples_by_user[user_profile.id]) == 0:
continue
sub_pairs = sub_tuples_by_user[user_profile.id]
notify_subscriptions_added(user_profile, sub_pairs, fetch_stream_subscriber_user_ids,
recent_traffic)
# The second batch is events for other users who are tracking the
# subscribers lists of streams in their browser; everyone for
# public streams and only existing subscribers for private streams.
for stream in streams:
if stream.is_in_zephyr_realm and not stream.invite_only:
continue
new_user_ids = [user.id for user in users if (user.id, stream.id) in new_streams]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=new_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for new_user_id in new_user_ids:
event = dict(type="subscription", op="peer_add",
stream_id=stream.id,
user_id=new_user_id)
send_event(realm, event, peer_user_ids)
return ([(user_profile, stream) for (user_profile, recipient_id, stream) in new_subs] +
[(sub.user_profile, stream) for (sub, stream) in subs_to_activate],
already_subscribed)
def get_available_notification_sounds() -> List[str]:
notification_sounds_path = static_path('audio/notification_sounds')
available_notification_sounds = []
for file_name in os.listdir(notification_sounds_path):
root, ext = os.path.splitext(file_name)
if '.' in root: # nocoverage
# Exclude e.g. zulip.abcd1234.ogg (generated by production hash-naming)
# to avoid spurious duplicates.
continue
if ext == '.ogg':
available_notification_sounds.append(root)
return available_notification_sounds
def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream],
no_log: bool=False) -> None:
if not no_log:
log_event({'type': 'subscription_removed',
'user': user_profile.email,
'names': [stream.name for stream in streams],
'realm': user_profile.realm.string_id})
payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams]
event = dict(type="subscription", op="remove",
subscriptions=payload)
send_event(user_profile.realm, event, [user_profile.id])
SubAndRemovedT = Tuple[List[Tuple[UserProfile, Stream]], List[Tuple[UserProfile, Stream]]]
def bulk_remove_subscriptions(users: Iterable[UserProfile],
streams: Iterable[Stream],
acting_client: Client,
acting_user: Optional[UserProfile]=None) -> SubAndRemovedT:
users = list(users)
streams = list(streams)
stream_dict = {stream.id: stream for stream in streams}
existing_subs_by_user = get_bulk_stream_subscriber_info(users, stream_dict)
def get_non_subscribed_tups() -> List[Tuple[UserProfile, Stream]]:
stream_ids = {stream.id for stream in streams}
not_subscribed: List[Tuple[UserProfile, Stream]] = []
for user_profile in users:
user_sub_stream_info = existing_subs_by_user[user_profile.id]
subscribed_stream_ids = {
stream.id
for (sub, stream) in user_sub_stream_info
}
not_subscribed_stream_ids = stream_ids - subscribed_stream_ids
for stream_id in not_subscribed_stream_ids:
stream = stream_dict[stream_id]
not_subscribed.append((user_profile, stream))
return not_subscribed
not_subscribed = get_non_subscribed_tups()
subs_to_deactivate: List[Tuple[Subscription, Stream]] = []
sub_ids_to_deactivate: List[int] = []
# This loop just flattens out our data into big lists for
# bulk operations.
for tup_list in existing_subs_by_user.values():
for (sub, stream) in tup_list:
subs_to_deactivate.append((sub, stream))
sub_ids_to_deactivate.append(sub.id)
our_realm = users[0].realm
# TODO: XXX: This transaction really needs to be done at the serializeable
# transaction isolation level.
with transaction.atomic():
occupied_streams_before = list(get_occupied_streams(our_realm))
Subscription.objects.filter(
id__in=sub_ids_to_deactivate,
) .update(active=False)
occupied_streams_after = list(get_occupied_streams(our_realm))
# Log Subscription Activities in RealmAuditLog
event_time = timezone_now()
event_last_message_id = get_last_message_id()
all_subscription_logs: (List[RealmAuditLog]) = []
for (sub, stream) in subs_to_deactivate:
all_subscription_logs.append(RealmAuditLog(realm=sub.user_profile.realm,
acting_user=acting_user,
modified_user=sub.user_profile,
modified_stream=stream,
event_last_message_id=event_last_message_id,
event_type=RealmAuditLog.SUBSCRIPTION_DEACTIVATED,
event_time=event_time))
# Now since we have all log objects generated we can do a bulk insert
RealmAuditLog.objects.bulk_create(all_subscription_logs)
altered_user_dict: Dict[int, List[UserProfile]] = defaultdict(list)
streams_by_user: Dict[int, List[Stream]] = defaultdict(list)
for (sub, stream) in subs_to_deactivate:
streams_by_user[sub.user_profile_id].append(stream)
altered_user_dict[stream.id].append(sub.user_profile)
for user_profile in users:
if len(streams_by_user[user_profile.id]) == 0:
continue
notify_subscriptions_removed(user_profile, streams_by_user[user_profile.id])
event = {'type': 'mark_stream_messages_as_read',
'client_id': acting_client.id,
'user_profile_id': user_profile.id,
'stream_ids': [stream.id for stream in streams]}
queue_json_publish("deferred_work", event)
all_subscribers_by_stream = get_user_ids_for_streams(streams=streams)
def send_peer_remove_event(stream: Stream) -> None:
if stream.is_in_zephyr_realm and not stream.invite_only:
return
altered_users = altered_user_dict[stream.id]
altered_user_ids = [u.id for u in altered_users]
subscribed_user_ids = all_subscribers_by_stream[stream.id]
peer_user_ids = get_peer_user_ids_for_stream_change(
stream=stream,
altered_user_ids=altered_user_ids,
subscribed_user_ids=subscribed_user_ids,
)
if peer_user_ids:
for removed_user in altered_users:
event = dict(type="subscription",
op="peer_remove",
stream_id=stream.id,
user_id=removed_user.id)
send_event(our_realm, event, peer_user_ids)
for stream in streams:
send_peer_remove_event(stream=stream)
new_vacant_streams = [stream for stream in
set(occupied_streams_before) - set(occupied_streams_after)]
new_vacant_private_streams = [stream for stream in new_vacant_streams
if stream.invite_only]
new_vacant_public_streams = [stream for stream in new_vacant_streams
if not stream.invite_only]
if new_vacant_public_streams:
event = dict(type="stream", op="vacate",
streams=[stream.to_dict()
for stream in new_vacant_public_streams])
send_event(our_realm, event, active_user_ids(our_realm.id))
if new_vacant_private_streams:
# Deactivate any newly-vacant private streams
for stream in new_vacant_private_streams:
do_deactivate_stream(stream, acting_user=acting_user)
return (
[(sub.user_profile, stream) for (sub, stream) in subs_to_deactivate],
not_subscribed,
)
def log_subscription_property_change(user_email: str, stream_name: str, property: str,
value: Any) -> None:
event = {'type': 'subscription_property',
'property': property,
'user': user_email,
'stream_name': stream_name,
'value': value}
log_event(event)
def do_change_subscription_property(user_profile: UserProfile, sub: Subscription,
stream: Stream, property_name: str, value: Any,
) -> None:
database_property_name = property_name
event_property_name = property_name
database_value = value
event_value = value
# For this property, is_muted is used in the database, but
# in_home_view in the API, since we haven't migrated the events
if property_name == "in_home_view":
database_property_name = "is_muted"
database_value = not value
if property_name == "is_muted":
event_property_name = "in_home_view"
event_value = not value
setattr(sub, database_property_name, database_value)
sub.save(update_fields=[database_property_name])
log_subscription_property_change(user_profile.email, stream.name,
database_property_name, database_value)
event = dict(type="subscription",
op="update",
email=user_profile.email,
property=event_property_name,
value=event_value,
stream_id=stream.id,
name=stream.name)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_password(user_profile: UserProfile, password: str, commit: bool=True) -> None:
user_profile.set_password(password)
if commit:
user_profile.save(update_fields=["password"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile, event_type=RealmAuditLog.USER_PASSWORD_CHANGED,
event_time=event_time)
def do_change_full_name(user_profile: UserProfile, full_name: str,
acting_user: Optional[UserProfile]) -> None:
old_name = user_profile.full_name
user_profile.full_name = full_name
user_profile.save(update_fields=["full_name"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_FULL_NAME_CHANGED,
event_time=event_time, extra_data=old_name)
payload = dict(user_id=user_profile.id,
full_name=user_profile.full_name)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot', op='update', bot=payload),
bot_owner_user_ids(user_profile))
def check_change_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> str:
new_full_name = check_full_name(full_name_raw)
do_change_full_name(user_profile, new_full_name, acting_user)
return new_full_name
def check_change_bot_full_name(user_profile: UserProfile, full_name_raw: str,
acting_user: UserProfile) -> None:
new_full_name = check_full_name(full_name_raw)
if new_full_name == user_profile.full_name:
# modify the name in the form. We just silently ignore those
# situations.
return
check_bot_name_available(
realm_id=user_profile.realm_id,
full_name=new_full_name,
)
do_change_full_name(user_profile, new_full_name, acting_user)
def do_change_bot_owner(user_profile: UserProfile, bot_owner: UserProfile,
acting_user: UserProfile) -> None:
previous_owner = user_profile.bot_owner
user_profile.bot_owner = bot_owner
user_profile.save() # Can't use update_fields because of how the foreign key works.
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_BOT_OWNER_CHANGED,
event_time=event_time)
update_users = bot_owner_user_ids(user_profile)
if previous_owner and not previous_owner.is_realm_admin:
send_event(user_profile.realm,
dict(type='realm_bot',
op="delete",
bot=dict(
user_id=user_profile.id,
)),
{previous_owner.id})
# Do not send update event for previous bot owner.
update_users = update_users - {previous_owner.id}
# Notify the new owner that the bot has been added.
if not bot_owner.is_realm_admin:
add_event = created_bot_event(user_profile)
send_event(user_profile.realm, add_event, {bot_owner.id})
# Do not send update event for bot_owner.
update_users = update_users - {bot_owner.id}
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
owner_id=user_profile.bot_owner.id,
)),
update_users)
# Since `bot_owner_id` is included in the user profile dict we need
# to update the users dict with the new bot owner id
event: Dict[str, Any] = dict(
type="realm_user",
op="update",
person=dict(
user_id=user_profile.id,
bot_owner_id=user_profile.bot_owner.id,
),
)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_tos_version(user_profile: UserProfile, tos_version: str) -> None:
user_profile.tos_version = tos_version
user_profile.save(update_fields=["tos_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=user_profile,
modified_user=user_profile,
event_type=RealmAuditLog.USER_TOS_VERSION_CHANGED,
event_time=event_time)
def do_regenerate_api_key(user_profile: UserProfile, acting_user: UserProfile) -> str:
old_api_key = user_profile.api_key
new_api_key = generate_api_key()
user_profile.api_key = new_api_key
user_profile.save(update_fields=["api_key"])
# We need to explicitly delete the old API key from our caches,
# because the on-save handler for flushing the UserProfile object
# in zerver/lib/cache.py only has access to the new API key.
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, acting_user=acting_user,
modified_user=user_profile, event_type=RealmAuditLog.USER_API_KEY_CHANGED,
event_time=event_time)
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
api_key=new_api_key,
)),
bot_owner_user_ids(user_profile))
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
return new_api_key
def notify_avatar_url_change(user_profile: UserProfile) -> None:
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
avatar_url=avatar_url(user_profile),
)),
bot_owner_user_ids(user_profile))
payload = dict(
avatar_source=user_profile.avatar_source,
avatar_url=avatar_url(user_profile),
avatar_url_medium=avatar_url(user_profile, medium=True),
avatar_version=user_profile.avatar_version,
# Even clients using client_gravatar don't need the email,
user_id=user_profile.id,
)
send_event(user_profile.realm,
dict(type='realm_user',
op='update',
person=payload),
active_user_ids(user_profile.realm_id))
def do_change_avatar_fields(user_profile: UserProfile, avatar_source: str,
skip_notify: bool=False, acting_user: Optional[UserProfile]=None) -> None:
user_profile.avatar_source = avatar_source
user_profile.avatar_version += 1
user_profile.save(update_fields=["avatar_source", "avatar_version"])
event_time = timezone_now()
RealmAuditLog.objects.create(realm=user_profile.realm, modified_user=user_profile,
event_type=RealmAuditLog.USER_AVATAR_SOURCE_CHANGED,
extra_data={'avatar_source': avatar_source},
event_time=event_time, acting_user=acting_user)
if not skip_notify:
notify_avatar_url_change(user_profile)
def do_delete_avatar_image(user: UserProfile, acting_user: Optional[UserProfile]=None) -> None:
do_change_avatar_fields(user, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=acting_user)
delete_avatar_image(user)
def do_change_icon_source(realm: Realm, icon_source: str, log: bool=True) -> None:
realm.icon_source = icon_source
realm.icon_version += 1
realm.save(update_fields=["icon_source", "icon_version"])
if log:
log_event({'type': 'realm_change_icon',
'realm': realm.string_id,
'icon_source': icon_source})
send_event(realm,
dict(type='realm',
op='update_dict',
property="icon",
data=dict(icon_source=realm.icon_source,
icon_url=realm_icon_url(realm))),
active_user_ids(realm.id))
def do_change_logo_source(realm: Realm, logo_source: str, night: bool, acting_user: Optional[UserProfile]=None) -> None:
if not night:
realm.logo_source = logo_source
realm.logo_version += 1
realm.save(update_fields=["logo_source", "logo_version"])
else:
realm.night_logo_source = logo_source
realm.night_logo_version += 1
realm.save(update_fields=["night_logo_source", "night_logo_version"])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_LOGO_CHANGED,
realm=realm, event_time=timezone_now(),
acting_user=acting_user)
event = dict(type='realm',
op='update_dict',
property="night_logo" if night else "logo",
data=get_realm_logo_data(realm, night))
send_event(realm, event, active_user_ids(realm.id))
def do_change_plan_type(realm: Realm, plan_type: int) -> None:
old_value = realm.plan_type
realm.plan_type = plan_type
realm.save(update_fields=['plan_type'])
RealmAuditLog.objects.create(event_type=RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
realm=realm, event_time=timezone_now(),
extra_data={'old_value': old_value, 'new_value': plan_type})
if plan_type == Realm.STANDARD:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.SELF_HOSTED:
realm.max_invites = None # type: ignore[assignment] # Apparent mypy bug with Optional[int] setter.
realm.message_visibility_limit = None
realm.upload_quota_gb = None
elif plan_type == Realm.STANDARD_FREE:
realm.max_invites = Realm.INVITES_STANDARD_REALM_DAILY_MAX
realm.message_visibility_limit = None
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_STANDARD
elif plan_type == Realm.LIMITED:
realm.max_invites = settings.INVITES_DEFAULT_REALM_DAILY_MAX
realm.message_visibility_limit = Realm.MESSAGE_VISIBILITY_LIMITED
realm.upload_quota_gb = Realm.UPLOAD_QUOTA_LIMITED
else:
raise AssertionError("Invalid plan type")
update_first_visible_message_id(realm)
realm.save(update_fields=['_max_invites', 'message_visibility_limit', 'upload_quota_gb'])
event = {'type': 'realm', 'op': 'update', 'property': 'plan_type', 'value': plan_type,
'extra_data': {'upload_quota': realm.upload_quota_bytes()}}
send_event(realm, event, active_user_ids(realm.id))
def do_change_default_sending_stream(user_profile: UserProfile, stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_sending_stream = stream
user_profile.save(update_fields=['default_sending_stream'])
if log:
log_event({'type': 'user_change_default_sending_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_sending_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_events_register_stream(user_profile: UserProfile,
stream: Optional[Stream],
log: bool=True) -> None:
user_profile.default_events_register_stream = stream
user_profile.save(update_fields=['default_events_register_stream'])
if log:
log_event({'type': 'user_change_default_events_register_stream',
'user': user_profile.email,
'stream': str(stream)})
if user_profile.is_bot:
if stream:
stream_name: Optional[str] = stream.name
else:
stream_name = None
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_events_register_stream=stream_name,
)),
bot_owner_user_ids(user_profile))
def do_change_default_all_public_streams(user_profile: UserProfile, value: bool,
log: bool=True) -> None:
user_profile.default_all_public_streams = value
user_profile.save(update_fields=['default_all_public_streams'])
if log:
log_event({'type': 'user_change_default_all_public_streams',
'user': user_profile.email,
'value': str(value)})
if user_profile.is_bot:
send_event(user_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=user_profile.id,
default_all_public_streams=user_profile.default_all_public_streams,
)),
bot_owner_user_ids(user_profile))
def do_change_user_role(user_profile: UserProfile, value: int, acting_user: Optional[UserProfile]=None) -> None:
old_value = user_profile.role
user_profile.role = value
user_profile.save(update_fields=["role"])
RealmAuditLog.objects.create(
realm=user_profile.realm, modified_user=user_profile, acting_user=acting_user,
event_type=RealmAuditLog.USER_ROLE_CHANGED, event_time=timezone_now(),
extra_data=ujson.dumps({
RealmAuditLog.OLD_VALUE: old_value,
RealmAuditLog.NEW_VALUE: value,
RealmAuditLog.ROLE_COUNT: realm_user_count_by_role(user_profile.realm),
}))
event = dict(type="realm_user", op="update",
person=dict(user_id=user_profile.id, role=user_profile.role))
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def do_change_is_api_super_user(user_profile: UserProfile, value: bool) -> None:
user_profile.is_api_super_user = value
user_profile.save(update_fields=["is_api_super_user"])
def do_change_stream_invite_only(stream: Stream, invite_only: bool,
history_public_to_subscribers: Optional[bool]=None) -> None:
history_public_to_subscribers = get_default_value_for_history_public_to_subscribers(
stream.realm,
invite_only,
history_public_to_subscribers,
)
stream.invite_only = invite_only
stream.history_public_to_subscribers = history_public_to_subscribers
stream.save(update_fields=['invite_only', 'history_public_to_subscribers'])
event = dict(
op="update",
type="stream",
property="invite_only",
value=invite_only,
history_public_to_subscribers=history_public_to_subscribers,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_web_public(stream: Stream, is_web_public: bool) -> None:
stream.is_web_public = is_web_public
stream.save(update_fields=['is_web_public'])
def do_change_stream_post_policy(stream: Stream, stream_post_policy: int) -> None:
stream.stream_post_policy = stream_post_policy
stream.save(update_fields=['stream_post_policy'])
event = dict(
op="update",
type="stream",
property="stream_post_policy",
value=stream_post_policy,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
# Backwards-compatibility code: We removed the
# is_announcement_only property in early 2020, but we send a
# duplicate event for legacy mobile clients that might want the
# data.
event = dict(
op="update",
type="stream",
property="is_announcement_only",
value=stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_rename_stream(stream: Stream,
new_name: str,
user_profile: UserProfile,
log: bool=True) -> Dict[str, str]:
old_name = stream.name
stream.name = new_name
stream.save(update_fields=["name"])
if log:
log_event({'type': 'stream_name_change',
'realm': stream.realm.string_id,
'new_name': new_name})
recipient_id = stream.recipient_id
messages = Message.objects.filter(recipient_id=recipient_id).only("id")
# Update the display recipient and stream, which are easy single
# items to set.
old_cache_key = get_stream_cache_key(old_name, stream.realm_id)
new_cache_key = get_stream_cache_key(stream.name, stream.realm_id)
if old_cache_key != new_cache_key:
cache_delete(old_cache_key)
cache_set(new_cache_key, stream)
cache_set(display_recipient_cache_key(recipient_id), stream.name)
# Delete cache entries for everything else, which is cheaper and
# clearer than trying to set them. display_recipient is the out of
# date field in all cases.
cache_delete_many(
to_dict_cache_key_id(message.id) for message in messages)
new_email = encode_email_address(stream, show_sender=True)
# We will tell our users to essentially
# update stream.name = new_name where name = old_name
# and update stream.email = new_email where name = old_name.
# We could optimize this by trying to send one message, but the
# client code really wants one property update at a time, and
# updating stream names is a pretty infrequent operation.
# More importantly, we want to key these updates by id, not name,
# since id is the immutable primary key, and obviously name is not.
data_updates = [
['email_address', new_email],
['name', new_name],
]
for property, value in data_updates:
event = dict(
op="update",
type="stream",
property=property,
value=value,
stream_id=stream.id,
name=old_name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
sender = get_system_bot(settings.NOTIFICATION_BOT)
with override_language(stream.realm.default_language):
internal_send_stream_message(
stream.realm,
sender,
stream,
Realm.STREAM_EVENTS_NOTIFICATION_TOPIC,
_('{user_name} renamed stream {old_stream_name} to {new_stream_name}.').format(
user_name=f"@_**{user_profile.full_name}|{user_profile.id}**",
old_stream_name=f"**{old_name}**",
new_stream_name=f"**{new_name}**",
),
)
# Even though the token doesn't change, the web client needs to update the
return {"email_address": new_email}
def do_change_stream_description(stream: Stream, new_description: str) -> None:
stream.description = new_description
stream.rendered_description = render_stream_description(new_description)
stream.save(update_fields=['description', 'rendered_description'])
event = dict(
type='stream',
op='update',
property='description',
name=stream.name,
stream_id=stream.id,
value=new_description,
rendered_description=stream.rendered_description,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_change_stream_message_retention_days(stream: Stream, message_retention_days: Optional[int]=None) -> None:
stream.message_retention_days = message_retention_days
stream.save(update_fields=['message_retention_days'])
event = dict(
op="update",
type="stream",
property="message_retention_days",
value=message_retention_days,
stream_id=stream.id,
name=stream.name,
)
send_event(stream.realm, event, can_access_stream_user_ids(stream))
def do_create_realm(string_id: str, name: str,
emails_restricted_to_domains: Optional[bool]=None) -> Realm:
if Realm.objects.filter(string_id=string_id).exists():
raise AssertionError(f"Realm {string_id} already exists!")
if not server_initialized():
logging.info("Server not yet initialized. Creating the internal realm first.")
create_internal_realm()
kwargs: Dict[str, Any] = {}
if emails_restricted_to_domains is not None:
kwargs['emails_restricted_to_domains'] = emails_restricted_to_domains
realm = Realm(string_id=string_id, name=name, **kwargs)
realm.save()
notifications_stream = ensure_stream(
realm, Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
stream_description="Everyone is added to this stream by default. Welcome! :octopus:", acting_user=None)
realm.notifications_stream = notifications_stream
DefaultStream.objects.create(stream=notifications_stream, realm=realm)
signup_notifications_stream = ensure_stream(
realm, Realm.INITIAL_PRIVATE_STREAM_NAME, invite_only=True,
stream_description="A private stream for core team members.", acting_user=None)
realm.signup_notifications_stream = signup_notifications_stream
realm.save(update_fields=['notifications_stream', 'signup_notifications_stream'])
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
log_event({"type": "realm_created",
"string_id": string_id,
"emails_restricted_to_domains": emails_restricted_to_domains})
sender = get_system_bot(settings.NOTIFICATION_BOT)
admin_realm = sender.realm
with override_language(admin_realm.default_language):
signup_message = _("Signups enabled")
try:
signups_stream = get_signups_stream(admin_realm)
topic = realm.display_subdomain
internal_send_stream_message(
admin_realm,
sender,
signups_stream,
topic,
signup_message,
)
except Stream.DoesNotExist:
# realm, don't auto-create it to send to it; just do nothing.
pass
return realm
def do_change_notification_settings(user_profile: UserProfile, name: str,
value: Union[bool, int, str], log: bool=True) -> None:
notification_setting_type = UserProfile.notification_setting_types[name]
assert isinstance(value, notification_setting_type), (
f'Cannot update {name}: {value} is not an instance of {notification_setting_type}')
setattr(user_profile, name, value)
if name == 'enable_digest_emails' and not value:
clear_scheduled_emails([user_profile.id], ScheduledEmail.DIGEST)
user_profile.save(update_fields=[name])
event = {'type': 'update_global_notifications',
'user': user_profile.email,
'notification_name': name,
'setting': value}
if log:
log_event(event)
send_event(user_profile.realm, event, [user_profile.id])
def do_change_enter_sends(user_profile: UserProfile, enter_sends: bool) -> None:
user_profile.enter_sends = enter_sends
user_profile.save(update_fields=["enter_sends"])
def do_set_user_display_setting(user_profile: UserProfile,
setting_name: str,
setting_value: Union[bool, str, int]) -> None:
property_type = UserProfile.property_types[setting_name]
assert isinstance(setting_value, property_type)
setattr(user_profile, setting_name, setting_value)
user_profile.save(update_fields=[setting_name])
event = {'type': 'update_display_settings',
'user': user_profile.email,
'setting_name': setting_name,
'setting': setting_value}
if setting_name == "default_language":
assert isinstance(setting_value, str)
event['language_name'] = get_language_name(setting_value)
send_event(user_profile.realm, event, [user_profile.id])
# Updates to the timezone display setting are sent to all users
if setting_name == "timezone":
payload = dict(email=user_profile.email,
user_id=user_profile.id,
timezone=user_profile.timezone)
send_event(user_profile.realm,
dict(type='realm_user', op='update', person=payload),
active_user_ids(user_profile.realm_id))
def lookup_default_stream_groups(default_stream_group_names: List[str],
realm: Realm) -> List[DefaultStreamGroup]:
default_stream_groups = []
for group_name in default_stream_group_names:
try:
default_stream_group = DefaultStreamGroup.objects.get(
name=group_name, realm=realm)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_('Invalid default stream group {}').format(group_name))
default_stream_groups.append(default_stream_group)
return default_stream_groups
def notify_default_streams(realm: Realm) -> None:
event = dict(
type="default_streams",
default_streams=streams_to_dicts_sorted(get_default_streams_for_realm(realm.id)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def notify_default_stream_groups(realm: Realm) -> None:
event = dict(
type="default_stream_groups",
default_stream_groups=default_stream_groups_to_dicts_sorted(get_default_stream_groups(realm)),
)
send_event(realm, event, active_non_guest_user_ids(realm.id))
def do_add_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
if not DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).exists():
DefaultStream.objects.create(realm_id=realm_id, stream_id=stream_id)
notify_default_streams(stream.realm)
def do_remove_default_stream(stream: Stream) -> None:
realm_id = stream.realm_id
stream_id = stream.id
DefaultStream.objects.filter(realm_id=realm_id, stream_id=stream_id).delete()
notify_default_streams(stream.realm)
def do_create_default_stream_group(realm: Realm, group_name: str,
description: str, streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group_name))
check_default_stream_group_name(group_name)
(group, created) = DefaultStreamGroup.objects.get_or_create(
name=group_name, realm=realm, description=description)
if not created:
raise JsonableError(_(
"Default stream group '{group_name}' already exists",
).format(group_name=group_name))
group.streams.set(streams)
notify_default_stream_groups(realm)
def do_add_streams_to_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
default_streams = get_default_streams_for_realm(realm.id)
for stream in streams:
if stream in default_streams:
raise JsonableError(_(
"'{stream_name}' is a default stream and cannot be added to '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
if stream in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is already present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.add(stream)
group.save()
notify_default_stream_groups(realm)
def do_remove_streams_from_default_stream_group(realm: Realm, group: DefaultStreamGroup,
streams: List[Stream]) -> None:
for stream in streams:
if stream not in group.streams.all():
raise JsonableError(_(
"Stream '{stream_name}' is not present in default stream group '{group_name}'",
).format(stream_name=stream.name, group_name=group.name))
group.streams.remove(stream)
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_name(realm: Realm, group: DefaultStreamGroup,
new_group_name: str) -> None:
if group.name == new_group_name:
raise JsonableError(_("This default stream group is already named '{}'").format(new_group_name))
if DefaultStreamGroup.objects.filter(name=new_group_name, realm=realm).exists():
raise JsonableError(_("Default stream group '{}' already exists").format(new_group_name))
group.name = new_group_name
group.save()
notify_default_stream_groups(realm)
def do_change_default_stream_group_description(realm: Realm, group: DefaultStreamGroup,
new_description: str) -> None:
group.description = new_description
group.save()
notify_default_stream_groups(realm)
def do_remove_default_stream_group(realm: Realm, group: DefaultStreamGroup) -> None:
group.delete()
notify_default_stream_groups(realm)
def get_default_streams_for_realm(realm_id: int) -> List[Stream]:
return [default.stream for default in
DefaultStream.objects.select_related().filter(realm_id=realm_id)]
def get_default_subs(user_profile: UserProfile) -> List[Stream]:
# Right now default streams are realm-wide. This wrapper gives us flexibility
# to some day further customize how we set up default streams for new users.
return get_default_streams_for_realm(user_profile.realm_id)
# returns default streams in json serializeable format
def streams_to_dicts_sorted(streams: List[Stream]) -> List[Dict[str, Any]]:
return sorted([stream.to_dict() for stream in streams], key=lambda elt: elt["name"])
def default_stream_groups_to_dicts_sorted(groups: List[DefaultStreamGroup]) -> List[Dict[str, Any]]:
return sorted([group.to_dict() for group in groups], key=lambda elt: elt["name"])
def do_update_user_activity_interval(user_profile: UserProfile,
log_time: datetime.datetime) -> None:
effective_end = log_time + UserActivityInterval.MIN_INTERVAL_LENGTH
# This code isn't perfect, because with various races we might end
# often, and can be corrected for in post-processing
try:
last = UserActivityInterval.objects.filter(user_profile=user_profile).order_by("-end")[0]
# There are two ways our intervals could overlap:
# (1) The start of the new interval could be inside the old interval
# (2) The end of the new interval could be inside the old interval
# In either case, we just extend the old interval to include the new interval.
if ((log_time <= last.end and log_time >= last.start) or
(effective_end <= last.end and effective_end >= last.start)):
last.end = max(last.end, effective_end)
last.start = min(last.start, log_time)
last.save(update_fields=["start", "end"])
return
except IndexError:
pass
# Otherwise, the intervals don't overlap, so we should make a new one
UserActivityInterval.objects.create(user_profile=user_profile, start=log_time,
end=effective_end)
@statsd_increment('user_activity')
def do_update_user_activity(user_profile_id: int,
client_id: int,
query: str,
count: int,
log_time: datetime.datetime) -> None:
(activity, created) = UserActivity.objects.get_or_create(
user_profile_id = user_profile_id,
client_id = client_id,
query = query,
defaults={'last_visit': log_time, 'count': count})
if not created:
activity.count += count
activity.last_visit = log_time
activity.save(update_fields=["last_visit", "count"])
def send_presence_changed(user_profile: UserProfile, presence: UserPresence) -> None:
presence_dict = presence.to_dict()
event = dict(type="presence",
email=user_profile.email,
user_id=user_profile.id,
server_timestamp=time.time(),
presence={presence_dict['client']: presence_dict})
send_event(user_profile.realm, event, active_user_ids(user_profile.realm_id))
def consolidate_client(client: Client) -> Client:
if client.name in ['ZulipDesktop']:
return get_client('website')
else:
return client
@statsd_increment('user_presence')
def do_update_user_presence(user_profile: UserProfile,
client: Client,
log_time: datetime.datetime,
status: int) -> None:
client = consolidate_client(client)
defaults = dict(
timestamp=log_time,
status=status,
realm_id=user_profile.realm_id,
)
(presence, created) = UserPresence.objects.get_or_create(
user_profile = user_profile,
client = client,
defaults = defaults,
)
stale_status = (log_time - presence.timestamp) > datetime.timedelta(minutes=1, seconds=10)
was_idle = presence.status == UserPresence.IDLE
became_online = (status == UserPresence.ACTIVE) and (stale_status or was_idle)
if not created and stale_status or was_idle or status == presence.status:
# (see the UserPresence post_save hook for details).
presence.timestamp = log_time
update_fields = ["timestamp"]
if presence.status != status:
presence.status = status
update_fields.append("status")
presence.save(update_fields=update_fields)
if not user_profile.realm.presence_disabled and (created or became_online):
# Push event to all users in the realm so they see the new user
# appear in the presence list immediately, or the newly online
# user without delay. Note that we won't send an update here for a
# have gone idle. If we were more aggressive in this function about
# sending timestamp updates, we could eliminate the ping responses, but
# that's not a high priority for now, considering that most of our non-MIT
send_presence_changed(user_profile, presence)
def update_user_activity_interval(user_profile: UserProfile, log_time: datetime.datetime) -> None:
event = {'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(log_time)}
queue_json_publish("user_activity_interval", event)
def update_user_presence(user_profile: UserProfile, client: Client, log_time: datetime.datetime,
status: int, new_user_input: bool) -> None:
event = {'user_profile_id': user_profile.id,
'status': status,
'time': datetime_to_timestamp(log_time),
'client': client.name}
queue_json_publish("user_presence", event)
if new_user_input:
update_user_activity_interval(user_profile, log_time)
def do_update_user_status(user_profile: UserProfile,
away: Optional[bool],
status_text: Optional[str],
client_id: int) -> None:
if away:
status = UserStatus.AWAY
else:
status = UserStatus.NORMAL
realm = user_profile.realm
update_user_status(
user_profile_id=user_profile.id,
status=status,
status_text=status_text,
client_id=client_id,
)
event = dict(
type='user_status',
user_id=user_profile.id,
)
if away is not None:
event['away'] = away
if status_text is not None:
event['status_text'] = status_text
send_event(realm, event, active_user_ids(realm.id))
def do_mark_all_as_read(user_profile: UserProfile, client: Client) -> int:
log_statsd_event('bankruptcy')
all_push_message_ids = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list("message_id", flat=True)[0:10000]
do_clear_mobile_push_notifications_for_ids([user_profile.id], all_push_message_ids)
msgs = UserMessage.objects.filter(
user_profile=user_profile,
).extra(
where=[UserMessage.where_unread()],
)
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=[], # we don't send messages, since the client reloads anyway
all=True,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_mark_stream_messages_as_read(user_profile: UserProfile,
client: Client,
stream: Stream,
topic_name: Optional[str]=None) -> int:
log_statsd_event('mark_stream_as_read')
msgs = UserMessage.objects.filter(
user_profile=user_profile,
)
recipient = stream.recipient
msgs = msgs.filter(message__recipient=recipient)
if topic_name:
msgs = filter_by_topic_name_via_message(
query=msgs,
topic_name=topic_name,
)
msgs = msgs.extra(
where=[UserMessage.where_unread()],
)
message_ids = list(msgs.values_list('message__id', flat=True))
count = msgs.update(
flags=F('flags').bitor(UserMessage.flags.read),
)
event = dict(
type='update_message_flags',
operation='add',
flag='read',
messages=message_ids,
all=False,
)
event_time = timezone_now()
send_event(user_profile.realm, event, [user_profile.id])
do_clear_mobile_push_notifications_for_ids([user_profile.id], message_ids)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
def do_update_mobile_push_notification(message: Message,
prior_mention_user_ids: Set[int],
stream_push_user_ids: Set[int]) -> None:
if not message.is_stream_message():
return
remove_notify_users = prior_mention_user_ids - message.mentions_user_ids - stream_push_user_ids
do_clear_mobile_push_notifications_for_ids(list(remove_notify_users), [message.id])
def do_clear_mobile_push_notifications_for_ids(user_profile_ids: List[int],
message_ids: List[int]) -> None:
if len(message_ids) == 0:
return
assert len(user_profile_ids) == 1 or len(message_ids) == 1
messages_by_user = defaultdict(list)
notifications_to_update = list(UserMessage.objects.filter(
message_id__in=message_ids,
user_profile_id__in=user_profile_ids,
).extra(
where=[UserMessage.where_active_push_notification()],
).values_list('user_profile_id', 'message_id'))
for (user_id, message_id) in notifications_to_update:
messages_by_user[user_id].append(message_id)
for (user_profile_id, event_message_ids) in messages_by_user.items():
queue_json_publish("missedmessage_mobile_notifications", {
"type": "remove",
"user_profile_id": user_profile_id,
"message_ids": event_message_ids,
})
def do_update_message_flags(user_profile: UserProfile,
client: Client,
operation: str,
flag: str,
messages: List[int]) -> int:
valid_flags = [item for item in UserMessage.flags
if item not in UserMessage.NON_API_FLAGS]
if flag not in valid_flags:
raise JsonableError(_("Invalid flag: '{}'").format(flag))
if flag in UserMessage.NON_EDITABLE_FLAGS:
raise JsonableError(_("Flag not editable: '{}'").format(flag))
flagattr = getattr(UserMessage.flags, flag)
msgs = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=messages)
# This next block allows you to star any message, even those you
# didn't receive (e.g. because you're looking at a public stream
# you're not subscribed to, etc.). The problem is that starring
if msgs.count() == 0:
if not len(messages) == 1:
raise JsonableError(_("Invalid message(s)"))
if flag != "starred":
raise JsonableError(_("Invalid message(s)"))
message = access_message(user_profile, messages[0])[0]
# read UserMessage message row for you to star.
UserMessage.objects.create(user_profile=user_profile,
message=message,
flags=UserMessage.flags.historical | UserMessage.flags.read)
if operation == 'add':
count = msgs.update(flags=F('flags').bitor(flagattr))
elif operation == 'remove':
count = msgs.update(flags=F('flags').bitand(~flagattr))
else:
raise AssertionError("Invalid message flags operation")
event = {'type': 'update_message_flags',
'operation': operation,
'flag': flag,
'messages': messages,
'all': False}
send_event(user_profile.realm, event, [user_profile.id])
if flag == "read" and operation == "add":
event_time = timezone_now()
do_clear_mobile_push_notifications_for_ids([user_profile.id], messages)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read::hour'],
None, event_time, increment=count)
do_increment_logging_stat(user_profile, COUNT_STATS['messages_read_interactions::hour'],
None, event_time, increment=min(1, count))
return count
class MessageUpdateUserInfoResult(TypedDict):
message_user_ids: Set[int]
mention_user_ids: Set[int]
def notify_topic_moved_streams(user_profile: UserProfile,
old_stream: Stream, old_topic: str,
new_stream: Stream, new_topic: Optional[str],
send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool) -> None:
# Since moving content between streams is highly disruptive,
# it's worth adding a couple tombstone messages showing what
sender = get_system_bot(settings.NOTIFICATION_BOT)
if new_topic is None:
new_topic = old_topic
user_mention = f"@_**{user_profile.full_name}|{user_profile.id}**"
old_topic_link = f"#**{old_stream.name}>{old_topic}**"
new_topic_link = f"#**{new_stream.name}>{new_topic}**"
if send_notification_to_new_thread:
with override_language(new_stream.realm.default_language):
internal_send_stream_message(
new_stream.realm, sender, new_stream, new_topic,
_("This topic was moved here from {old_location} by {user}").format(
old_location=old_topic_link, user=user_mention,
),
)
if send_notification_to_old_thread:
with override_language(old_stream.realm.default_language):
internal_send_stream_message(
old_stream.realm, sender, old_stream, old_topic,
_("This topic was moved by {user} to {new_location}").format(
user=user_mention, new_location=new_topic_link,
),
)
def get_user_info_for_message_updates(message_id: int) -> MessageUpdateUserInfoResult:
query = UserMessage.objects.filter(
message=message_id,
flags=~UserMessage.flags.historical,
).values('user_profile_id', 'flags')
rows = list(query)
message_user_ids = {
row['user_profile_id']
for row in rows
}
mask = UserMessage.flags.mentioned | UserMessage.flags.wildcard_mentioned
mention_user_ids = {
row['user_profile_id']
for row in rows
if int(row['flags']) & mask
}
return dict(
message_user_ids=message_user_ids,
mention_user_ids=mention_user_ids,
)
def update_user_message_flags(message: Message, ums: Iterable[UserMessage]) -> None:
wildcard = message.mentions_wildcard
mentioned_ids = message.mentions_user_ids
ids_with_alert_words = message.user_ids_with_alert_words
changed_ums: Set[UserMessage] = set()
def update_flag(um: UserMessage, should_set: bool, flag: int) -> None:
if should_set:
if not (um.flags & flag):
um.flags |= flag
changed_ums.add(um)
else:
if (um.flags & flag):
um.flags &= ~flag
changed_ums.add(um)
for um in ums:
has_alert_word = um.user_profile_id in ids_with_alert_words
update_flag(um, has_alert_word, UserMessage.flags.has_alert_word)
mentioned = um.user_profile_id in mentioned_ids
update_flag(um, mentioned, UserMessage.flags.mentioned)
update_flag(um, wildcard, UserMessage.flags.wildcard_mentioned)
for um in changed_ums:
um.save(update_fields=['flags'])
def update_to_dict_cache(changed_messages: List[Message], realm_id: Optional[int]=None) -> List[int]:
items_for_remote_cache = {}
message_ids = []
changed_messages_to_dict = MessageDict.to_dict_uncached(changed_messages, realm_id)
for msg_id, msg in changed_messages_to_dict.items():
message_ids.append(msg_id)
key = to_dict_cache_key_id(msg_id)
items_for_remote_cache[key] = (msg,)
cache_set_many(items_for_remote_cache)
return message_ids
@transaction.atomic
def do_update_embedded_data(user_profile: UserProfile,
message: Message,
content: Optional[str],
rendered_content: Optional[str]) -> None:
event: Dict[str, Any] = {
'type': 'update_message',
'sender': user_profile.email,
'message_id': message.id}
changed_messages = [message]
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
update_user_message_flags(message, ums)
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
message.save(update_fields=["content", "rendered_content"])
event['message_ids'] = update_to_dict_cache(changed_messages)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
send_event(user_profile.realm, event, list(map(user_info, ums)))
class DeleteMessagesEvent(TypedDict, total=False):
type: str
message_ids: List[int]
message_type: str
sender_id: int
recipient_id: int
topic: str
stream_id: int
@transaction.atomic
def do_update_message(user_profile: UserProfile, message: Message,
new_stream: Optional[Stream], topic_name: Optional[str],
propagate_mode: str, send_notification_to_old_thread: bool,
send_notification_to_new_thread: bool, content: Optional[str],
rendered_content: Optional[str], prior_mention_user_ids: Set[int],
mention_user_ids: Set[int], mention_data: Optional[MentionData]=None) -> int:
timestamp = timezone_now()
message.last_edit_time = timestamp
event: Dict[str, Any] = {
'type': 'update_message',
'user_id': user_profile.id,
'edit_timestamp': datetime_to_timestamp(timestamp),
'message_id': message.id,
}
edit_history_event: Dict[str, Any] = {
'user_id': user_profile.id,
'timestamp': event['edit_timestamp'],
}
changed_messages = [message]
stream_being_edited = None
if message.is_stream_message():
stream_id = message.recipient.type_id
stream_being_edited = get_stream_by_id_in_realm(stream_id, user_profile.realm)
event['stream_name'] = stream_being_edited.name
ums = UserMessage.objects.filter(message=message.id)
if content is not None:
assert rendered_content is not None
assert mention_data is not None
# add data from group mentions to mentions_user_ids.
for group_id in message.mentions_user_group_ids:
members = mention_data.get_group_members(group_id)
message.mentions_user_ids.update(members)
update_user_message_flags(message, ums)
# One could imagine checking realm.allow_edit_history here and
# modifying the events based on that setting, but doing so
# doesn't really make sense. We need to send the edit event
event['orig_content'] = message.content
event['orig_rendered_content'] = message.rendered_content
edit_history_event["prev_content"] = message.content
edit_history_event["prev_rendered_content"] = message.rendered_content
edit_history_event["prev_rendered_content_version"] = message.rendered_content_version
message.content = content
message.rendered_content = rendered_content
message.rendered_content_version = markdown_version
event["content"] = content
event["rendered_content"] = rendered_content
event['prev_rendered_content_version'] = message.rendered_content_version
event['is_me_message'] = Message.is_status_message(content, rendered_content)
message.has_attachment = check_attachment_reference_change(message)
if message.is_stream_message():
if topic_name is not None:
new_topic_name = topic_name
else:
new_topic_name = message.topic_name()
stream_topic: Optional[StreamTopicTarget] = StreamTopicTarget(
stream_id=stream_id,
topic_name=new_topic_name,
)
else:
stream_topic = None
info = get_recipient_info(
recipient=message.recipient,
sender_id=message.sender_id,
stream_topic=stream_topic,
possible_wildcard_mention=mention_data.message_has_wildcards(),
)
event['push_notify_user_ids'] = list(info['push_notify_user_ids'])
event['stream_push_user_ids'] = list(info['stream_push_user_ids'])
event['stream_email_user_ids'] = list(info['stream_email_user_ids'])
event['prior_mention_user_ids'] = list(prior_mention_user_ids)
event['mention_user_ids'] = list(mention_user_ids)
event['presence_idle_user_ids'] = filter_presence_idle_user_ids(info['active_user_ids'])
if message.mentions_wildcard:
event['wildcard_mention_user_ids'] = list(info['wildcard_mention_user_ids'])
else:
event['wildcard_mention_user_ids'] = []
do_update_mobile_push_notification(message, prior_mention_user_ids, info['stream_push_user_ids'])
if topic_name is not None or new_stream is not None:
orig_topic_name = message.topic_name()
event["propagate_mode"] = propagate_mode
event["stream_id"] = message.recipient.type_id
if new_stream is not None:
assert content is None
assert message.is_stream_message()
assert stream_being_edited is not None
edit_history_event['prev_stream'] = stream_being_edited.id
event[ORIG_TOPIC] = orig_topic_name
message.recipient_id = new_stream.recipient_id
event["new_stream_id"] = new_stream.id
event["propagate_mode"] = propagate_mode
subscribers = get_active_subscriptions_for_stream_id(
stream_id).select_related("user_profile")
subs_to_new_stream = list(get_active_subscriptions_for_stream_id(
new_stream.id).select_related("user_profile"))
new_stream_sub_ids = [user.user_profile_id for user in subs_to_new_stream]
subs_losing_usermessages = [
sub for sub in subscribers
if sub.user_profile_id not in new_stream_sub_ids
]
# Users who can longer access the message without some action
# from administrators.
#
# TODO: Extend this list to also contain users losing access
# due to the messages moving to a private stream they are not
# subscribed to.
subs_losing_access = [
sub for sub in subs_losing_usermessages
if sub.user_profile.is_guest
]
ums = ums.exclude(user_profile_id__in=[
sub.user_profile_id for sub in subs_losing_usermessages])
if topic_name is not None:
topic_name = truncate_topic(topic_name)
message.set_topic_name(topic_name)
# These fields have legacy field names.
event[ORIG_TOPIC] = orig_topic_name
event[TOPIC_NAME] = topic_name
event[TOPIC_LINKS] = topic_links(message.sender.realm_id, topic_name)
edit_history_event[LEGACY_PREV_TOPIC] = orig_topic_name
delete_event_notify_user_ids: List[int] = []
if propagate_mode in ["change_later", "change_all"]:
assert topic_name is not None or new_stream is not None
messages_list = update_messages_for_topic_edit(
message=message,
propagate_mode=propagate_mode,
orig_topic_name=orig_topic_name,
topic_name=topic_name,
new_stream=new_stream,
)
changed_messages += messages_list
if new_stream is not None:
assert stream_being_edited is not None
message_ids = [msg.id for msg in changed_messages]
# Delete UserMessage objects for users who will no
# longer have access to these messages. Note: This could be
# very expensive, since it's N guest users x M messages.
UserMessage.objects.filter(
user_profile_id__in=[sub.user_profile_id for sub in
subs_losing_usermessages],
message_id__in=message_ids,
).delete()
delete_event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
'message_type': 'stream',
'stream_id': stream_being_edited.id,
'topic': orig_topic_name,
}
delete_event_notify_user_ids = [sub.user_profile_id for sub in subs_losing_access]
send_event(user_profile.realm, delete_event, delete_event_notify_user_ids)
if message.edit_history is not None:
edit_history = ujson.loads(message.edit_history)
edit_history.insert(0, edit_history_event)
else:
edit_history = [edit_history_event]
message.edit_history = ujson.dumps(edit_history)
save_message_for_edit_use_case(message=message)
realm_id: Optional[int] = None
if stream_being_edited is not None:
realm_id = stream_being_edited.realm_id
event['message_ids'] = update_to_dict_cache(changed_messages, realm_id)
def user_info(um: UserMessage) -> Dict[str, Any]:
return {
'id': um.user_profile_id,
'flags': um.flags_list(),
}
# subscribed to this stream and don't have a UserMessage row. This
# a real-time updates. This is a balance between sending
# message-edit notifications for every public stream to every user
# in the organization (too expansive, and also not what we do for
# newly sent messages anyway) and having magical live-updates
# where possible.
users_to_be_notified = list(map(user_info, ums))
if stream_being_edited is not None:
if stream_being_edited.is_history_public_to_subscribers:
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by
# definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
# Remove duplicates by excluding the id of users already
# in users_to_be_notified list. This is the case where a
# user both has a UserMessage row and is a current
# Subscriber
subscribers = subscribers.exclude(user_profile_id__in=[um.user_profile_id for um in ums])
if new_stream is not None:
assert delete_event_notify_user_ids is not None
subscribers = subscribers.exclude(user_profile_id__in=delete_event_notify_user_ids)
# All users that are subscribed to the stream must be
# notified when a message is edited
subscriber_ids = [user.user_profile_id for user in subscribers]
if new_stream is not None:
# TODO: Guest users don't see the new moved topic
# subscribers of the old stream but are subscribed to
# the new stream; clients will be confused.
old_stream_unsubbed_guests = [
sub for sub in subs_to_new_stream
if sub.user_profile.is_guest
and sub.user_profile_id not in subscriber_ids
]
subscribers = subscribers.exclude(user_profile_id__in=[
sub.user_profile_id for sub in old_stream_unsubbed_guests])
subscriber_ids = [user.user_profile_id for user in subscribers]
users_to_be_notified += list(map(subscriber_info, subscriber_ids))
send_event(user_profile.realm, event, users_to_be_notified)
if (len(changed_messages) > 0 and new_stream is not None and
stream_being_edited is not None):
# Notify users that the topic was moved.
notify_topic_moved_streams(user_profile, stream_being_edited, orig_topic_name,
new_stream, topic_name, send_notification_to_old_thread,
send_notification_to_new_thread)
return len(changed_messages)
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single private message, as any other behaviour is not possible with
# the current callers to this method.
messages = list(messages)
message_ids = [message.id for message in messages]
if not message_ids:
return
event: DeleteMessagesEvent = {
'type': 'delete_message',
'message_ids': message_ids,
}
sample_message = messages[0]
message_type = "stream"
users_to_notify = []
if not sample_message.is_stream_message():
assert len(messages) == 1
message_type = "private"
ums = UserMessage.objects.filter(message_id__in=message_ids)
users_to_notify = [um.user_profile_id for um in ums]
# TODO: We should plan to remove `sender_id` here.
event['recipient_id'] = sample_message.recipient_id
event['sender_id'] = sample_message.sender_id
archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
if message_type == "stream":
stream_id = sample_message.recipient.type_id
event['stream_id'] = stream_id
event['topic'] = sample_message.topic_name()
subscribers = get_active_subscriptions_for_stream_id(stream_id)
# We exclude long-term idle users, since they by definition have no active clients.
subscribers = subscribers.exclude(user_profile__long_term_idle=True)
subscriber_ids = [user.user_profile_id for user in subscribers]
users_to_notify = list(map(subscriber_info, subscriber_ids))
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
event['message_type'] = message_type
send_event(realm, event, users_to_notify)
def do_delete_messages_by_sender(user: UserProfile) -> None:
message_ids = list(Message.objects.filter(sender=user).values_list('id', flat=True).order_by('id'))
if message_ids:
move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
def get_streams_traffic(stream_ids: Set[int]) -> Dict[int, int]:
stat = COUNT_STATS['messages_in_stream:is_bot:day']
traffic_from = timezone_now() - datetime.timedelta(days=28)
query = StreamCount.objects.filter(property=stat.property,
end_time__gt=traffic_from)
query = query.filter(stream_id__in=stream_ids)
traffic_list = query.values('stream_id').annotate(value=Sum('value'))
traffic_dict = {}
for traffic in traffic_list:
traffic_dict[traffic["stream_id"]] = traffic["value"]
return traffic_dict
def round_to_2_significant_digits(number: int) -> int:
return int(round(number, 2 - len(str(number))))
STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS = 7
def get_average_weekly_stream_traffic(stream_id: int, stream_date_created: datetime.datetime,
recent_traffic: Dict[int, int]) -> Optional[int]:
try:
stream_traffic = recent_traffic[stream_id]
except KeyError:
stream_traffic = 0
stream_age = (timezone_now() - stream_date_created).days
if stream_age >= 28:
average_weekly_traffic = int(stream_traffic // 4)
elif stream_age >= STREAM_TRAFFIC_CALCULATION_MIN_AGE_DAYS:
average_weekly_traffic = int(stream_traffic * 7 // stream_age)
else:
return None
if average_weekly_traffic == 0 and stream_traffic > 0:
average_weekly_traffic = 1
return round_to_2_significant_digits(average_weekly_traffic)
SubHelperT = Tuple[List[Dict[str, Any]], List[Dict[str, Any]], List[Dict[str, Any]]]
def get_web_public_subs(realm: Realm) -> SubHelperT:
color_idx = 0
def get_next_color() -> str:
nonlocal color_idx
color = STREAM_ASSIGNMENT_COLORS[color_idx]
color_idx = (color_idx + 1) % len(STREAM_ASSIGNMENT_COLORS)
return color
subscribed = []
for stream in Stream.objects.filter(realm=realm, is_web_public=True, deactivated=False):
stream_dict = stream.to_dict()
# Add versions of the Subscription fields based on a simulated
# new user subscription set.
stream_dict['is_muted'] = False
stream_dict['color'] = get_next_color()
stream_dict['desktop_notifications'] = True
stream_dict['audible_notifications'] = True
stream_dict['push_notifications'] = True
stream_dict['email_notifications'] = True
stream_dict['pin_to_top'] = False
stream_weekly_traffic = get_average_weekly_stream_traffic(stream.id,
stream.date_created,
{})
stream_dict['stream_weekly_traffic'] = stream_weekly_traffic
stream_dict['email_address'] = ''
subscribed.append(stream_dict)
return (subscribed, [], [])
# In general, it's better to avoid using .values() because it makes
def gather_subscriptions_helper(user_profile: UserProfile,
include_subscribers: bool=True) -> SubHelperT:
sub_dicts = get_stream_subscriptions_for_user(user_profile).values(
*Subscription.API_FIELDS, "recipient_id").order_by("recipient_id")
sub_dicts = list(sub_dicts)
sub_recipient_ids = [
sub['recipient_id']
for sub in sub_dicts
]
stream_recipient = StreamRecipientMap()
stream_recipient.populate_for_recipient_ids(sub_recipient_ids)
stream_ids: Set[int] = set()
for sub in sub_dicts:
sub['stream_id'] = stream_recipient.stream_id_for(sub['recipient_id'])
stream_ids.add(sub['stream_id'])
recent_traffic = get_streams_traffic(stream_ids=stream_ids)
all_streams = get_active_streams(user_profile.realm).select_related(
"realm").values(
*Stream.API_FIELDS,
# date_created is used as an input for the stream_weekly_traffic computed field.
"date_created",
# The realm_id and recipient_id are generally not needed in the API.
"realm_id",
"recipient_id",
# email_token isn't public to some users with access to
"email_token")
stream_dicts = [stream for stream in all_streams if stream['id'] in stream_ids]
stream_hash = {}
for stream in stream_dicts:
stream_hash[stream["id"]] = stream
all_streams_id = [stream["id"] for stream in all_streams]
subscribed = []
unsubscribed = []
never_subscribed = []
# Deactivated streams aren't in stream_hash.
streams = [stream_hash[sub["stream_id"]] for sub in sub_dicts
if sub["stream_id"] in stream_hash]
streams_subscribed_map = {sub["stream_id"]: sub["active"] for sub in sub_dicts}
streams_subscribed_map.update({stream['id']: False for stream in all_streams if stream not in streams})
if include_subscribers:
subscriber_map: Mapping[int, Optional[List[int]]] = bulk_get_subscriber_user_ids(
all_streams,
user_profile,
streams_subscribed_map,
stream_recipient,
)
else:
# which the below code needs to check for anyway.
subscriber_map = defaultdict(lambda: None)
sub_unsub_stream_ids = set()
for sub in sub_dicts:
sub_unsub_stream_ids.add(sub["stream_id"])
stream = stream_hash.get(sub["stream_id"])
if not stream:
# This stream has been deactivated, don't include it.
continue
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
# Copy Subscription.API_FIELDS except for "active", which is
# used to determine where to the put the field.
for field_name in Subscription.API_FIELDS:
stream_dict[field_name] = sub[field_name]
# Backwards-compatibility for clients that haven't been
stream_dict['in_home_view'] = not stream_dict['is_muted']
# updated for the is_announcement_only -> stream_post_policy
# migration.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
# Add a few computed fields not directly from the data models.
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
stream_dict['email_address'] = encode_email_address_helper(
stream["name"], stream["email_token"], show_sender=True)
# Construct and add subscribers data
subscribers: Optional[List[int]] = subscriber_map[stream["id"]]
# Important: don't show the subscribers if the stream is invite only
if stream["invite_only"] and not (sub["active"] or user_profile.is_realm_admin):
subscribers = None
# Guest users lose access to subscribers when they are unsubscribed.
if not sub["active"] and user_profile.is_guest:
subscribers = None
if subscribers is not None:
stream_dict['subscribers'] = subscribers
# is_active is represented in this structure by which list we include it in.
is_active = stream_dict.pop("active")
if is_active:
subscribed.append(stream_dict)
else:
unsubscribed.append(stream_dict)
all_streams_id_set = set(all_streams_id)
if user_profile.can_access_public_streams():
never_subscribed_stream_ids = all_streams_id_set - sub_unsub_stream_ids
else:
never_subscribed_stream_ids = set()
never_subscribed_streams = [ns_stream_dict for ns_stream_dict in all_streams
if ns_stream_dict['id'] in never_subscribed_stream_ids]
for stream in never_subscribed_streams:
is_public = (not stream['invite_only'])
if is_public or user_profile.is_realm_admin:
stream_dict = {}
for field_name in Stream.API_FIELDS:
if field_name == "id":
stream_dict['stream_id'] = stream["id"]
continue
stream_dict[field_name] = stream[field_name]
stream_dict['stream_weekly_traffic'] = get_average_weekly_stream_traffic(
stream["id"], stream["date_created"], recent_traffic)
# Backwards-compatibility addition of removed field.
stream_dict['is_announcement_only'] = \
stream['stream_post_policy'] == Stream.STREAM_POST_POLICY_ADMINS
if is_public or user_profile.is_realm_admin:
subscribers = subscriber_map[stream["id"]]
if subscribers is not None:
stream_dict['subscribers'] = subscribers
never_subscribed.append(stream_dict)
return (sorted(subscribed, key=lambda x: x['name']),
sorted(unsubscribed, key=lambda x: x['name']),
sorted(never_subscribed, key=lambda x: x['name']))
def gather_subscriptions(
user_profile: UserProfile,
include_subscribers: bool=False,
) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
subscribed, unsubscribed, _ = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
if include_subscribers:
user_ids = set()
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
for subscriber in sub['subscribers']:
user_ids.add(subscriber)
email_dict = get_emails_from_user_ids(list(user_ids))
for subs in [subscribed, unsubscribed]:
for sub in subs:
if 'subscribers' in sub:
sub['subscribers'] = sorted([
email_dict[user_id] for user_id in sub['subscribers']
])
return (subscribed, unsubscribed)
def get_active_presence_idle_user_ids(realm: Realm,
sender_id: int,
message_type: str,
active_user_ids: Set[int],
user_flags: Dict[int, List[str]]) -> List[int]:
if realm.presence_disabled:
return []
is_pm = message_type == 'private'
user_ids = set()
for user_id in active_user_ids:
flags: Iterable[str] = user_flags.get(user_id, [])
mentioned = 'mentioned' in flags or 'wildcard_mentioned' in flags
private_message = is_pm and user_id != sender_id
alerted = 'has_alert_word' in flags
if mentioned or private_message or alerted:
user_ids.add(user_id)
return filter_presence_idle_user_ids(user_ids)
def filter_presence_idle_user_ids(user_ids: Set[int]) -> List[int]:
# Given a set of user IDs (the recipients of a message), accesses
# the UserPresence table to determine which of these users are
# currently idle and should potentially get email notifications
# (and push notifications with with
# user_profile.enable_online_push_notifications=False).
#
# We exclude any presence data from ZulipMobile for the purpose of
# triggering these notifications; the mobile app can more
# effectively do its own client-side filtering of notification
# sounds/etc. for the case that the user is actively doing a PM
# conversation in the app.
if not user_ids:
return []
# Matches presence.js constant
OFFLINE_THRESHOLD_SECS = 140
recent = timezone_now() - datetime.timedelta(seconds=OFFLINE_THRESHOLD_SECS)
rows = UserPresence.objects.filter(
user_profile_id__in=user_ids,
status=UserPresence.ACTIVE,
timestamp__gte=recent,
).exclude(client__name="ZulipMobile").distinct('user_profile_id').values('user_profile_id')
active_user_ids = {row['user_profile_id'] for row in rows}
idle_user_ids = user_ids - active_user_ids
return sorted(list(idle_user_ids))
def do_send_confirmation_email(invitee: PreregistrationUser,
referrer: UserProfile) -> str:
activation_url = create_confirmation_link(invitee, Confirmation.INVITATION)
context = {'referrer_full_name': referrer.full_name, 'referrer_email': referrer.delivery_email,
'activate_url': activation_url, 'referrer_realm_name': referrer.realm.name}
from_name = f"{referrer.full_name} (via Zulip)"
send_email('zerver/emails/invitation', to_emails=[invitee.email], from_name=from_name,
from_address=FromAddress.tokenized_no_reply_address(),
language=referrer.realm.default_language, context=context,
realm=referrer.realm)
return activation_url
def email_not_system_bot(email: str) -> None:
if is_cross_realm_bot_email(email):
msg = email_reserved_for_system_bots_error(email)
code = msg
raise ValidationError(
msg,
code=code,
params=dict(deactivated=False),
)
class InvitationError(JsonableError):
code = ErrorCode.INVITATION_FAILED
data_fields = ['errors', 'sent_invitations']
def __init__(self, msg: str, errors: List[Tuple[str, str, bool]],
sent_invitations: bool) -> None:
self._msg: str = msg
self.errors: List[Tuple[str, str, bool]] = errors
self.sent_invitations: bool = sent_invitations
def estimate_recent_invites(realms: Iterable[Realm], *, days: int) -> int:
recent_invites = RealmCount.objects.filter(
realm__in=realms,
property='invites_sent::day',
end_time__gte=timezone_now() - datetime.timedelta(days=days),
).aggregate(Sum('value'))['value__sum']
if recent_invites is None:
return 0
return recent_invites
def check_invite_limit(realm: Realm, num_invitees: int) -> None:
msg = _("You do not have enough remaining invites. "
"Please contact {email} to have your limit raised. "
"No invitations were sent.").format(email=settings.ZULIP_ADMINISTRATOR)
if not settings.OPEN_REALM_CREATION:
return
recent_invites = estimate_recent_invites([realm], days=1)
if num_invitees + recent_invites > realm.max_invites:
raise InvitationError(msg, [], sent_invitations=False)
default_max = settings.INVITES_DEFAULT_REALM_DAILY_MAX
newrealm_age = datetime.timedelta(days=settings.INVITES_NEW_REALM_DAYS)
if realm.date_created <= timezone_now() - newrealm_age:
# If this isn't a "newly-created" realm, we're done. The
# remaining code applies an aggregate limit across all
# "new" realms, to address sudden bursts of spam realms.
return
if realm.max_invites > default_max:
# If a user is on a realm where we've bumped up
return
new_realms = Realm.objects.filter(
date_created__gte=timezone_now() - newrealm_age,
_max_invites__lte=default_max,
).all()
for days, count in settings.INVITES_NEW_REALM_LIMIT_DAYS:
recent_invites = estimate_recent_invites(new_realms, days=days)
if num_invitees + recent_invites > count:
raise InvitationError(msg, [], sent_invitations=False)
def do_invite_users(user_profile: UserProfile,
invitee_emails: SizedTextIterable,
streams: Iterable[Stream],
invite_as: int=PreregistrationUser.INVITE_AS['MEMBER']) -> None:
check_invite_limit(user_profile.realm, len(invitee_emails))
realm = user_profile.realm
if not realm.invite_required:
min_age = datetime.timedelta(days=settings.INVITES_MIN_USER_AGE_DAYS)
if (user_profile.date_joined > timezone_now() - min_age
and not user_profile.is_realm_admin):
raise InvitationError(
_("Your account is too new to send invites for this organization. "
"Ask an organization admin, or a more experienced user."),
[], sent_invitations=False)
good_emails: Set[str] = set()
errors: List[Tuple[str, str, bool]] = []
validate_email_allowed_in_realm = get_realm_email_validator(user_profile.realm)
for email in invitee_emails:
if email == '':
continue
email_error = validate_email_is_valid(
email,
validate_email_allowed_in_realm,
)
if email_error:
errors.append((email, email_error, False))
else:
good_emails.add(email)
error_dict = get_existing_user_errors(user_profile.realm, good_emails)
skipped: List[Tuple[str, str, bool]] = []
for email in error_dict:
msg, deactivated = error_dict[email]
skipped.append((email, msg, deactivated))
good_emails.remove(email)
validated_emails = list(good_emails)
if errors:
raise InvitationError(
_("Some emails did not validate, so we didn't send any invitations."),
errors + skipped, sent_invitations=False)
if skipped and len(skipped) == len(invitee_emails):
# All e-mails were skipped, so we didn't actually invite anyone.
raise InvitationError(_("We weren't able to invite anyone."),
skipped, sent_invitations=False)
# We do this here rather than in the invite queue processor since this
# is used for rate limiting invitations, rather than keeping track of
# when exactly invitations were sent
do_increment_logging_stat(user_profile.realm, COUNT_STATS['invites_sent::day'],
None, timezone_now(), increment=len(validated_emails))
# Now that we are past all the possible errors, we actually create
# the PreregistrationUser objects and trigger the email invitations.
for email in validated_emails:
# The logged in user is the referrer.
prereg_user = PreregistrationUser(email=email, referred_by=user_profile,
invited_as=invite_as,
realm=user_profile.realm)
prereg_user.save()
stream_ids = [stream.id for stream in streams]
prereg_user.streams.set(stream_ids)
event = {"prereg_id": prereg_user.id, "referrer_id": user_profile.id}
queue_json_publish("invites", event)
if skipped:
raise InvitationError(_("Some of those addresses are already using Zulip, "
"so we didn't send them an invitation. We did send "
"invitations to everyone else!"),
skipped, sent_invitations=True)
notify_invites_changed(user_profile)
def do_get_user_invites(user_profile: UserProfile) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by__realm=user_profile.realm)
)
else:
prereg_users = filter_to_valid_prereg_users(
PreregistrationUser.objects.filter(referred_by=user_profile)
)
invites = []
for invitee in prereg_users:
invites.append(dict(email=invitee.email,
invited_by_user_id=invitee.referred_by.id,
invited=datetime_to_timestamp(invitee.invited_at),
id=invitee.id,
invited_as=invitee.invited_as,
is_multiuse=False))
if not user_profile.is_realm_admin:
return invites
lowest_datetime = timezone_now() - datetime.timedelta(days=settings.INVITATION_LINK_VALIDITY_DAYS)
multiuse_confirmation_objs = Confirmation.objects.filter(realm=user_profile.realm,
type=Confirmation.MULTIUSE_INVITE,
date_sent__gte=lowest_datetime)
for confirmation_obj in multiuse_confirmation_objs:
invite = confirmation_obj.content_object
invites.append(dict(invited_by_user_id=invite.referred_by.id,
invited=datetime_to_timestamp(confirmation_obj.date_sent),
id=invite.id,
link_url=confirmation_url(confirmation_obj.confirmation_key,
user_profile.realm,
Confirmation.MULTIUSE_INVITE),
invited_as=invite.invited_as,
is_multiuse=True))
return invites
def do_create_multiuse_invite_link(referred_by: UserProfile, invited_as: int,
streams: Sequence[Stream] = []) -> str:
realm = referred_by.realm
invite = MultiuseInvite.objects.create(realm=realm, referred_by=referred_by)
if streams:
invite.streams.set(streams)
invite.invited_as = invited_as
invite.save()
notify_invites_changed(referred_by)
return create_confirmation_link(invite, Confirmation.MULTIUSE_INVITE)
def do_revoke_user_invite(prereg_user: PreregistrationUser) -> None:
email = prereg_user.email
content_type = ContentType.objects.get_for_model(PreregistrationUser)
Confirmation.objects.filter(content_type=content_type,
object_id=prereg_user.id).delete()
prereg_user.delete()
clear_scheduled_invitation_emails(email)
notify_invites_changed(prereg_user)
def do_revoke_multi_use_invite(multiuse_invite: MultiuseInvite) -> None:
content_type = ContentType.objects.get_for_model(MultiuseInvite)
Confirmation.objects.filter(content_type=content_type,
object_id=multiuse_invite.id).delete()
multiuse_invite.delete()
notify_invites_changed(multiuse_invite.referred_by)
def do_resend_user_invite_email(prereg_user: PreregistrationUser) -> int:
assert prereg_user.referred_by is not None
assert prereg_user.realm is not None
check_invite_limit(prereg_user.referred_by.realm, 1)
prereg_user.invited_at = timezone_now()
prereg_user.save()
do_increment_logging_stat(prereg_user.realm, COUNT_STATS['invites_sent::day'],
None, prereg_user.invited_at)
clear_scheduled_invitation_emails(prereg_user.email)
# We don't store the custom email body, so just set it to None
event = {"prereg_id": prereg_user.id, "referrer_id": prereg_user.referred_by.id, "email_body": None}
queue_json_publish("invites", event)
return datetime_to_timestamp(prereg_user.invited_at)
def notify_realm_emoji(realm: Realm) -> None:
event = dict(type="realm_emoji", op="update",
realm_emoji=realm.get_emoji())
send_event(realm, event, active_user_ids(realm.id))
def check_add_realm_emoji(realm: Realm,
name: str,
author: UserProfile,
image_file: File) -> Optional[RealmEmoji]:
realm_emoji = RealmEmoji(realm=realm, name=name, author=author)
realm_emoji.full_clean()
realm_emoji.save()
emoji_file_name = get_emoji_file_name(image_file.name, realm_emoji.id)
emoji_file_name = mark_sanitized(emoji_file_name)
emoji_uploaded_successfully = False
try:
upload_emoji_image(image_file, emoji_file_name, author)
emoji_uploaded_successfully = True
finally:
if not emoji_uploaded_successfully:
realm_emoji.delete()
return None
else:
realm_emoji.file_name = emoji_file_name
realm_emoji.save(update_fields=['file_name'])
notify_realm_emoji(realm_emoji.realm)
return realm_emoji
def do_remove_realm_emoji(realm: Realm, name: str) -> None:
emoji = RealmEmoji.objects.get(realm=realm, name=name, deactivated=False)
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
notify_realm_emoji(realm)
def notify_alert_words(user_profile: UserProfile, words: Iterable[str]) -> None:
event = dict(type="alert_words", alert_words=words)
send_event(user_profile.realm, event, [user_profile.id])
def do_add_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = add_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_remove_alert_words(user_profile: UserProfile, alert_words: Iterable[str]) -> None:
words = remove_user_alert_words(user_profile, alert_words)
notify_alert_words(user_profile, words)
def do_mute_topic(user_profile: UserProfile, stream: Stream, recipient: Recipient, topic: str,
date_muted: Optional[datetime.datetime]=None) -> None:
if date_muted is None:
date_muted = timezone_now()
add_topic_mute(user_profile, stream.id, recipient.id, topic, date_muted)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_unmute_topic(user_profile: UserProfile, stream: Stream, topic: str) -> None:
remove_topic_mute(user_profile, stream.id, topic)
event = dict(type="muted_topics", muted_topics=get_topic_mutes(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_mark_hotspot_as_read(user: UserProfile, hotspot: str) -> None:
UserHotspot.objects.get_or_create(user=user, hotspot=hotspot)
event = dict(type="hotspots", hotspots=get_next_hotspots(user))
send_event(user.realm, event, [user.id])
def notify_realm_filters(realm: Realm) -> None:
realm_filters = realm_filters_for_realm(realm.id)
event = dict(type="realm_filters", realm_filters=realm_filters)
send_event(realm, event, active_user_ids(realm.id))
def do_add_realm_filter(realm: Realm, pattern: str, url_format_string: str) -> int:
pattern = pattern.strip()
url_format_string = url_format_string.strip()
realm_filter = RealmFilter(
realm=realm, pattern=pattern,
url_format_string=url_format_string)
realm_filter.full_clean()
realm_filter.save()
notify_realm_filters(realm)
return realm_filter.id
def do_remove_realm_filter(realm: Realm, pattern: Optional[str]=None,
id: Optional[int]=None) -> None:
if pattern is not None:
RealmFilter.objects.get(realm=realm, pattern=pattern).delete()
else:
RealmFilter.objects.get(realm=realm, pk=id).delete()
notify_realm_filters(realm)
def get_emails_from_user_ids(user_ids: Sequence[int]) -> Dict[int, str]:
return UserProfile.emails_from_ids(user_ids)
def do_add_realm_domain(realm: Realm, domain: str, allow_subdomains: bool) -> (RealmDomain):
realm_domain = RealmDomain.objects.create(realm=realm, domain=domain,
allow_subdomains=allow_subdomains)
event = dict(type="realm_domains", op="add",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm, event, active_user_ids(realm.id))
return realm_domain
def do_change_realm_domain(realm_domain: RealmDomain, allow_subdomains: bool) -> None:
realm_domain.allow_subdomains = allow_subdomains
realm_domain.save(update_fields=['allow_subdomains'])
event = dict(type="realm_domains", op="change",
realm_domain=dict(domain=realm_domain.domain,
allow_subdomains=realm_domain.allow_subdomains))
send_event(realm_domain.realm, event, active_user_ids(realm_domain.realm_id))
def do_remove_realm_domain(realm_domain: RealmDomain, acting_user: Optional[UserProfile]=None) -> None:
realm = realm_domain.realm
domain = realm_domain.domain
realm_domain.delete()
if RealmDomain.objects.filter(realm=realm).count() == 0 and realm.emails_restricted_to_domains:
# anything if there are no domains, and this is probably less
# confusing than the alternative.
do_set_realm_property(realm, 'emails_restricted_to_domains', False, acting_user=acting_user)
event = dict(type="realm_domains", op="remove", domain=domain)
send_event(realm, event, active_user_ids(realm.id))
def get_occupied_streams(realm: Realm) -> QuerySet:
# TODO: Make a generic stub for QuerySet
exists_expression = Exists(
Subscription.objects.filter(active=True, user_profile__is_active=True,
user_profile__realm=realm,
recipient_id=OuterRef('recipient_id')),
)
occupied_streams = Stream.objects.filter(realm=realm, deactivated=False) \
.annotate(occupied=exists_expression).filter(occupied=True)
return occupied_streams
def get_web_public_streams(realm: Realm) -> List[Dict[str, Any]]:
query = Stream.objects.filter(realm=realm, deactivated=False, is_web_public=True)
streams = Stream.get_client_data(query)
return streams
def do_get_streams(
user_profile: UserProfile, include_public: bool=True,
include_subscribed: bool=True, include_all_active: bool=False,
include_default: bool=False, include_owner_subscribed: bool=False,
) -> List[Dict[str, Any]]:
if include_all_active and not user_profile.is_api_super_user:
raise JsonableError(_("User not authorized for this query"))
include_public = include_public and user_profile.can_access_public_streams()
# Start out with all streams in the realm with subscribers
query = get_occupied_streams(user_profile.realm)
if include_all_active:
streams = Stream.get_client_data(query)
else:
# We construct a query as the or (|) of the various sources
# this user requested streams from.
query_filter: Optional[Q] = None
def add_filter_option(option: Q) -> None:
nonlocal query_filter
if query_filter is None:
query_filter = option
else:
query_filter |= option
if include_subscribed:
subscribed_stream_ids = get_subscribed_stream_ids_for_user(user_profile)
recipient_check = Q(id__in=set(subscribed_stream_ids))
add_filter_option(recipient_check)
if include_public:
invite_only_check = Q(invite_only=False)
add_filter_option(invite_only_check)
if include_owner_subscribed and user_profile.is_bot:
bot_owner = user_profile.bot_owner
assert bot_owner is not None
owner_stream_ids = get_subscribed_stream_ids_for_user(bot_owner)
owner_subscribed_check = Q(id__in=set(owner_stream_ids))
add_filter_option(owner_subscribed_check)
if query_filter is not None:
query = query.filter(query_filter)
streams = Stream.get_client_data(query)
else:
# Don't bother going to the database with no valid sources
streams = []
streams.sort(key=lambda elt: elt["name"])
if include_default:
is_default = {}
default_streams = get_default_streams_for_realm(user_profile.realm_id)
for default_stream in default_streams:
is_default[default_stream.id] = True
for stream in streams:
stream['is_default'] = is_default.get(stream["stream_id"], False)
return streams
def notify_attachment_update(user_profile: UserProfile, op: str,
attachment_dict: Dict[str, Any]) -> None:
event = {
'type': 'attachment',
'op': op,
'attachment': attachment_dict,
"upload_space_used": user_profile.realm.currently_used_upload_space_bytes(),
}
send_event(user_profile.realm, event, [user_profile.id])
def do_claim_attachments(message: Message, potential_path_ids: List[str]) -> bool:
claimed = False
for path_id in potential_path_ids:
user_profile = message.sender
is_message_realm_public = False
if message.is_stream_message():
is_message_realm_public = Stream.objects.get(id=message.recipient.type_id).is_public()
if not validate_attachment_request(user_profile, path_id):
# access themselves. validate_attachment_request will return False.
#
# Either case is unusual and suggests a UI bug that got
# the user in this situation, so we log in these cases.
logging.warning(
"User %s tried to share upload %s in message %s, but lacks permission",
user_profile.id, path_id, message.id,
)
continue
claimed = True
attachment = claim_attachment(user_profile, path_id, message, is_message_realm_public)
notify_attachment_update(user_profile, "update", attachment.to_dict())
return claimed
def do_delete_old_unclaimed_attachments(weeks_ago: int) -> None:
old_unclaimed_attachments = get_old_unclaimed_attachments(weeks_ago)
for attachment in old_unclaimed_attachments:
delete_message_image(attachment.path_id)
attachment.delete()
def check_attachment_reference_change(message: Message) -> bool:
# For a unsaved message edit (message.* has been updated, but not
# saved to the database), adjusts Attachment data to correspond to
# the new content.
prev_attachments = {a.path_id for a in message.attachment_set.all()}
new_attachments = set(message.potential_attachment_path_ids)
if new_attachments == prev_attachments:
return bool(prev_attachments)
to_remove = list(prev_attachments - new_attachments)
if len(to_remove) > 0:
attachments_to_update = Attachment.objects.filter(path_id__in=to_remove).select_for_update()
message.attachment_set.remove(*attachments_to_update)
to_add = list(new_attachments - prev_attachments)
if len(to_add) > 0:
do_claim_attachments(message, to_add)
return message.attachment_set.exists()
def notify_realm_custom_profile_fields(realm: Realm, operation: str) -> None:
fields = custom_profile_fields_for_realm(realm.id)
event = dict(type="custom_profile_fields",
op=operation,
fields=[f.as_dict() for f in fields])
send_event(realm, event, active_user_ids(realm.id))
def try_add_realm_default_custom_profile_field(realm: Realm,
field_subtype: str) -> CustomProfileField:
field_data = DEFAULT_EXTERNAL_ACCOUNTS[field_subtype]
field = CustomProfileField(realm=realm, name=field_data['name'],
field_type=CustomProfileField.EXTERNAL_ACCOUNT,
hint=field_data['hint'],
field_data=ujson.dumps(dict(subtype=field_subtype)))
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def try_add_realm_custom_profile_field(realm: Realm, name: str, field_type: int,
hint: str='',
field_data: Optional[ProfileFieldData]=None) -> CustomProfileField:
field = CustomProfileField(realm=realm, name=name, field_type=field_type)
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
field.order = field.id
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'add')
return field
def do_remove_realm_custom_profile_field(realm: Realm, field: CustomProfileField) -> None:
field.delete()
notify_realm_custom_profile_fields(realm, 'delete')
def do_remove_realm_custom_profile_fields(realm: Realm) -> None:
CustomProfileField.objects.filter(realm=realm).delete()
def try_update_realm_custom_profile_field(realm: Realm, field: CustomProfileField,
name: str, hint: str='',
field_data: Optional[ProfileFieldData]=None) -> None:
field.name = name
field.hint = hint
if (field.field_type == CustomProfileField.CHOICE or
field.field_type == CustomProfileField.EXTERNAL_ACCOUNT):
field.field_data = ujson.dumps(field_data or {})
field.save()
notify_realm_custom_profile_fields(realm, 'update')
def try_reorder_realm_custom_profile_fields(realm: Realm, order: List[int]) -> None:
order_mapping = {_[1]: _[0] for _ in enumerate(order)}
fields = CustomProfileField.objects.filter(realm=realm)
for field in fields:
if field.id not in order_mapping:
raise JsonableError(_("Invalid order mapping."))
for field in fields:
field.order = order_mapping[field.id]
field.save(update_fields=['order'])
notify_realm_custom_profile_fields(realm, 'update')
def notify_user_update_custom_profile_data(user_profile: UserProfile,
field: Dict[str, Union[int, str, List[int], None]]) -> None:
data = dict(id=field['id'])
if field['type'] == CustomProfileField.USER:
data["value"] = ujson.dumps(field['value'])
else:
data['value'] = field['value']
if field['rendered_value']:
data['rendered_value'] = field['rendered_value']
payload = dict(user_id=user_profile.id, custom_profile_field=data)
event = dict(type="realm_user", op="update", person=payload)
send_event(user_profile.realm, event, active_user_ids(user_profile.realm.id))
def do_update_user_custom_profile_data_if_changed(user_profile: UserProfile,
data: List[Dict[str, Union[int, str, List[int]]]],
) -> None:
with transaction.atomic():
for field in data:
field_value, created = CustomProfileFieldValue.objects.get_or_create(
user_profile=user_profile,
field_id=field['id'])
if not created and field_value.value == str(field['value']):
# If the field value isn't actually being changed to a different one,
continue
field_value.value = field['value']
if field_value.field.is_renderable():
field_value.rendered_value = render_stream_description(str(field['value']))
field_value.save(update_fields=['value', 'rendered_value'])
else:
field_value.save(update_fields=['value'])
notify_user_update_custom_profile_data(user_profile, {
"id": field_value.field_id,
"value": field_value.value,
"rendered_value": field_value.rendered_value,
"type": field_value.field.field_type})
def check_remove_custom_profile_field_value(user_profile: UserProfile, field_id: int) -> None:
try:
field = CustomProfileField.objects.get(realm=user_profile.realm, id=field_id)
field_value = CustomProfileFieldValue.objects.get(field=field, user_profile=user_profile)
field_value.delete()
notify_user_update_custom_profile_data(user_profile, {'id': field_id,
'value': None,
'rendered_value': None,
'type': field.field_type})
except CustomProfileField.DoesNotExist:
raise JsonableError(_('Field id {id} not found.').format(id=field_id))
except CustomProfileFieldValue.DoesNotExist:
pass
def do_send_create_user_group_event(user_group: UserGroup, members: List[UserProfile]) -> None:
event = dict(type="user_group",
op="add",
group=dict(name=user_group.name,
members=[member.id for member in members],
description=user_group.description,
id=user_group.id,
),
)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def check_add_user_group(realm: Realm, name: str, initial_members: List[UserProfile],
description: str) -> None:
try:
user_group = create_user_group(name, initial_members, realm, description=description)
do_send_create_user_group_event(user_group, initial_members)
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
def do_send_user_group_update_event(user_group: UserGroup, data: Dict[str, Any]) -> None:
event = dict(type="user_group", op='update', group_id=user_group.id, data=data)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def do_update_user_group_name(user_group: UserGroup, name: str) -> None:
try:
user_group.name = name
user_group.save(update_fields=['name'])
except django.db.utils.IntegrityError:
raise JsonableError(_("User group '{}' already exists.").format(name))
do_send_user_group_update_event(user_group, dict(name=name))
def do_update_user_group_description(user_group: UserGroup, description: str) -> None:
user_group.description = description
user_group.save(update_fields=['description'])
do_send_user_group_update_event(user_group, dict(description=description))
def do_update_outgoing_webhook_service(bot_profile: UserProfile,
service_interface: int,
service_payload_url: str) -> None:
service = get_bot_services(bot_profile.id)[0]
service.base_url = service_payload_url
service.interface = service_interface
service.save()
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(base_url=service.base_url,
interface=service.interface,
token=service.token)],
),
),
bot_owner_user_ids(bot_profile))
def do_update_bot_config_data(bot_profile: UserProfile,
config_data: Dict[str, str]) -> None:
for key, value in config_data.items():
set_bot_config(bot_profile, key, value)
updated_config_data = get_bot_config(bot_profile)
send_event(bot_profile.realm,
dict(type='realm_bot',
op='update',
bot=dict(user_id=bot_profile.id,
services = [dict(config_data=updated_config_data)],
),
),
bot_owner_user_ids(bot_profile))
def get_service_dicts_for_bot(user_profile_id: int) -> List[Dict[str, Any]]:
user_profile = get_user_profile_by_id(user_profile_id)
services = get_bot_services(user_profile_id)
service_dicts: List[Dict[str, Any]] = []
if user_profile.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif user_profile.bot_type == UserProfile.EMBEDDED_BOT:
try:
service_dicts = [{'config_data': get_bot_config(user_profile),
'service_name': services[0].name,
}]
except ConfigError:
pass
return service_dicts
def get_service_dicts_for_bots(bot_dicts: List[Dict[str, Any]],
realm: Realm) -> Dict[int, List[Dict[str, Any]]]:
bot_profile_ids = [bot_dict['id'] for bot_dict in bot_dicts]
bot_services_by_uid: Dict[int, List[Service]] = defaultdict(list)
for service in Service.objects.filter(user_profile_id__in=bot_profile_ids):
bot_services_by_uid[service.user_profile_id].append(service)
embedded_bot_ids = [bot_dict['id'] for bot_dict in bot_dicts
if bot_dict['bot_type'] == UserProfile.EMBEDDED_BOT]
embedded_bot_configs = get_bot_configs(embedded_bot_ids)
service_dicts_by_uid: Dict[int, List[Dict[str, Any]]] = {}
for bot_dict in bot_dicts:
bot_profile_id = bot_dict["id"]
bot_type = bot_dict["bot_type"]
services = bot_services_by_uid[bot_profile_id]
service_dicts: List[Dict[str, Any]] = []
if bot_type == UserProfile.OUTGOING_WEBHOOK_BOT:
service_dicts = [{'base_url': service.base_url,
'interface': service.interface,
'token': service.token,
}
for service in services]
elif bot_type == UserProfile.EMBEDDED_BOT:
if bot_profile_id in embedded_bot_configs.keys():
bot_config = embedded_bot_configs[bot_profile_id]
service_dicts = [{'config_data': bot_config,
'service_name': services[0].name,
}]
service_dicts_by_uid[bot_profile_id] = service_dicts
return service_dicts_by_uid
def get_owned_bot_dicts(user_profile: UserProfile,
include_all_realm_bots_if_admin: bool=True) -> List[Dict[str, Any]]:
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
services_by_ids = get_service_dicts_for_bots(result, user_profile.realm)
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner_id': botdict['bot_owner__id'],
'avatar_url': avatar_url_from_dict(botdict),
'services': services_by_ids[botdict['id']],
}
for botdict in result]
def do_send_user_group_members_update_event(event_name: str,
user_group: UserGroup,
user_ids: List[int]) -> None:
event = dict(type="user_group",
op=event_name,
group_id=user_group.id,
user_ids=user_ids)
send_event(user_group.realm, event, active_user_ids(user_group.realm_id))
def bulk_add_members_to_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
memberships = [UserGroupMembership(user_group_id=user_group.id,
user_profile=user_profile)
for user_profile in user_profiles]
UserGroupMembership.objects.bulk_create(memberships)
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('add_members', user_group, user_ids)
def remove_members_from_user_group(user_group: UserGroup,
user_profiles: List[UserProfile]) -> None:
UserGroupMembership.objects.filter(
user_group_id=user_group.id,
user_profile__in=user_profiles).delete()
user_ids = [up.id for up in user_profiles]
do_send_user_group_members_update_event('remove_members', user_group, user_ids)
def do_send_delete_user_group_event(realm: Realm, user_group_id: int,
realm_id: int) -> None:
event = dict(type="user_group",
op="remove",
group_id=user_group_id)
send_event(realm, event, active_user_ids(realm_id))
def check_delete_user_group(user_group_id: int, user_profile: UserProfile) -> None:
user_group = access_user_group_by_id(user_group_id, user_profile)
user_group.delete()
do_send_delete_user_group_event(user_profile.realm, user_group_id, user_profile.realm.id)
def do_send_realm_reactivation_email(realm: Realm) -> None:
url = create_confirmation_link(realm, Confirmation.REALM_REACTIVATION)
context = {'confirmation_url': url,
'realm_uri': realm.uri,
'realm_name': realm.name}
language = realm.default_language
send_email_to_admins(
'zerver/emails/realm_reactivation', realm,
from_address=FromAddress.tokenized_no_reply_address(),
from_name=FromAddress.security_email_from_name(language=language),
language=language, context=context)
def do_set_zoom_token(user: UserProfile, token: Optional[Dict[str, object]]) -> None:
user.zoom_token = token
user.save(update_fields=["zoom_token"])
send_event(
user.realm, dict(type="has_zoom_token", value=token is not None), [user.id],
)
def notify_realm_export(user_profile: UserProfile) -> None:
event = dict(type='realm_export',
exports=get_realm_exports_serialized(user_profile))
send_event(user_profile.realm, event, [user_profile.id])
def do_delete_realm_export(user_profile: UserProfile, export: RealmAuditLog) -> None:
export_extra_data = export.extra_data
assert export_extra_data is not None
export_data = ujson.loads(export_extra_data)
export_path = export_data.get('export_path')
if export_path:
# Allow removal even if the export failed.
delete_export_tarball(export_path)
export_data.update({'deleted_timestamp': timezone_now().timestamp()})
export.extra_data = ujson.dumps(export_data)
export.save(update_fields=['extra_data'])
notify_realm_export(user_profile)
def get_topic_messages(user_profile: UserProfile, stream: Stream,
topic_name: str) -> List[Message]:
query = UserMessage.objects.filter(
user_profile=user_profile,
message__recipient=stream.recipient,
).order_by("id")
return [um.message for um in filter_by_topic_name_via_message(query, topic_name)]
| true
| true
|
790aa421218d62214a81cc86a6532080e6d6f121
| 1,625
|
py
|
Python
|
Semana10/Dia5/alumnos/alumnos1/models.py
|
GuidoTorres/codigo8
|
7fdff4f677f048de7d7877b96ec3a688d3dde163
|
[
"MIT"
] | null | null | null |
Semana10/Dia5/alumnos/alumnos1/models.py
|
GuidoTorres/codigo8
|
7fdff4f677f048de7d7877b96ec3a688d3dde163
|
[
"MIT"
] | 40
|
2021-03-10T16:58:17.000Z
|
2022-03-26T01:55:17.000Z
|
Semana10/Dia5/ejercicio/Alumnos1/alumnos/alumnos/models.py
|
GuidoTorres/codigo8
|
7fdff4f677f048de7d7877b96ec3a688d3dde163
|
[
"MIT"
] | null | null | null |
from django.db import models
class alumno(models.Model):
alum_id = models.AutoField(primary_key=True)
alum_nom = models.CharField(max_length=100, help_text="Nombre del alumno", unique=True)
alum_ape = models.CharField(max_length=100, help_text="Apellido del alumno", unique=True)
def __str__(self):
return '{}'.format(self.alum_nom, self.alum_ape)
class Meta:
db_table = "alumno"
verbose_name_plural="alumnos"
class curso(models.Model):
cur_id = models.AutoField(primary_key=True)
cur_nom = models.CharField(max_length=100, help_text="Nombre del alumno", unique=True)
def __str__(self):
return '{}'.format(self.cur_nom)
class Meta:
db_table = "curso"
verbose_name_plural="cursos"
class alm_cur(models.Model):
almcur_id = models.AutoField(primary_key=True)
alum_id = models.ForeignKey(alumno, on_delete=models.CASCADE)
cur_id = models.ForeignKey(curso, on_delete=models.CASCADE)
def __str__(self):
return '{}'.format(self.almcur_id)
class Meta:
db_table = "alm_cur"
verbose_name_plural="alm_cursos"
class asistencia(models.Model):
asis_id = models.AutoField(primary_key=True)
asis_fecha = models.Date()
asis_est = models.BooleanField(default=False)
almcur_id = models.ForeignKey(alm_cur, on_delete=models.CASCADE)
def __str__(self):
return '{}'.format(self.asis_fecha, self.asis_est)
class Meta:
db_table = "asistencia"
verbose_name_plural="asistencias"
| 27.083333
| 93
| 0.659692
|
from django.db import models
class alumno(models.Model):
alum_id = models.AutoField(primary_key=True)
alum_nom = models.CharField(max_length=100, help_text="Nombre del alumno", unique=True)
alum_ape = models.CharField(max_length=100, help_text="Apellido del alumno", unique=True)
def __str__(self):
return '{}'.format(self.alum_nom, self.alum_ape)
class Meta:
db_table = "alumno"
verbose_name_plural="alumnos"
class curso(models.Model):
cur_id = models.AutoField(primary_key=True)
cur_nom = models.CharField(max_length=100, help_text="Nombre del alumno", unique=True)
def __str__(self):
return '{}'.format(self.cur_nom)
class Meta:
db_table = "curso"
verbose_name_plural="cursos"
class alm_cur(models.Model):
almcur_id = models.AutoField(primary_key=True)
alum_id = models.ForeignKey(alumno, on_delete=models.CASCADE)
cur_id = models.ForeignKey(curso, on_delete=models.CASCADE)
def __str__(self):
return '{}'.format(self.almcur_id)
class Meta:
db_table = "alm_cur"
verbose_name_plural="alm_cursos"
class asistencia(models.Model):
asis_id = models.AutoField(primary_key=True)
asis_fecha = models.Date()
asis_est = models.BooleanField(default=False)
almcur_id = models.ForeignKey(alm_cur, on_delete=models.CASCADE)
def __str__(self):
return '{}'.format(self.asis_fecha, self.asis_est)
class Meta:
db_table = "asistencia"
verbose_name_plural="asistencias"
| true
| true
|
790aa4b6524a5dc2dda6c02eb04c91967046a988
| 7,337
|
py
|
Python
|
python/ray/rllib/agents/ppo/ppo.py
|
FieldMrFive/ray
|
a22d6ef95594a3b95fac5b2eb17f7f21be2888e8
|
[
"Apache-2.0"
] | 3
|
2019-05-01T04:31:20.000Z
|
2021-03-01T09:25:36.000Z
|
python/ray/rllib/agents/ppo/ppo.py
|
FieldMrFive/ray
|
a22d6ef95594a3b95fac5b2eb17f7f21be2888e8
|
[
"Apache-2.0"
] | 2
|
2019-01-28T00:31:25.000Z
|
2019-11-26T16:57:06.000Z
|
python/ray/rllib/agents/ppo/ppo.py
|
FieldMrFive/ray
|
a22d6ef95594a3b95fac5b2eb17f7f21be2888e8
|
[
"Apache-2.0"
] | 2
|
2020-03-26T16:32:08.000Z
|
2021-02-05T17:04:11.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from ray.rllib.agents import Agent, with_common_config
from ray.rllib.agents.ppo.ppo_policy_graph import PPOPolicyGraph
from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer
from ray.rllib.utils.annotations import override
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# If true, use the Generalized Advantage Estimator (GAE)
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
"use_gae": True,
# GAE(lambda) parameter
"lambda": 1.0,
# Initial coefficient for KL divergence
"kl_coeff": 0.2,
# Size of batches collected from each worker
"sample_batch_size": 200,
# Number of timesteps collected for each SGD round
"train_batch_size": 4000,
# Total SGD batch size across all devices for SGD
"sgd_minibatch_size": 128,
# Number of SGD iterations in each outer loop
"num_sgd_iter": 30,
# Stepsize of SGD
"lr": 5e-5,
# Learning rate schedule
"lr_schedule": None,
# Share layers for value function
"vf_share_layers": False,
# Coefficient of the value function loss
"vf_loss_coeff": 1.0,
# Coefficient of the entropy regularizer
"entropy_coeff": 0.0,
# PPO clip parameter
"clip_param": 0.3,
# Clip param for the value function. Note that this is sensitive to the
# scale of the rewards. If your expected V is large, increase this.
"vf_clip_param": 10.0,
# If specified, clip the global norm of gradients by this amount
"grad_clip": None,
# Target value for KL divergence
"kl_target": 0.01,
# Whether to rollout "complete_episodes" or "truncate_episodes"
"batch_mode": "truncate_episodes",
# Which observation filter to apply to the observation
"observation_filter": "NoFilter",
# Uses the sync samples optimizer instead of the multi-gpu one. This does
# not support minibatches.
"simple_optimizer": False,
# (Deprecated) Use the sampling behavior as of 0.6, which launches extra
# sampling tasks for performance but can waste a large portion of samples.
"straggler_mitigation": False,
})
# __sphinx_doc_end__
# yapf: enable
class PPOAgent(Agent):
"""Multi-GPU optimized implementation of PPO in TensorFlow."""
_agent_name = "PPO"
_default_config = DEFAULT_CONFIG
_policy_graph = PPOPolicyGraph
@override(Agent)
def _init(self):
self._validate_config()
self.local_evaluator = self.make_local_evaluator(
self.env_creator, self._policy_graph)
self.remote_evaluators = self.make_remote_evaluators(
self.env_creator, self._policy_graph, self.config["num_workers"])
if self.config["simple_optimizer"]:
self.optimizer = SyncSamplesOptimizer(
self.local_evaluator, self.remote_evaluators, {
"num_sgd_iter": self.config["num_sgd_iter"],
"train_batch_size": self.config["train_batch_size"],
})
else:
self.optimizer = LocalMultiGPUOptimizer(
self.local_evaluator, self.remote_evaluators, {
"sgd_batch_size": self.config["sgd_minibatch_size"],
"num_sgd_iter": self.config["num_sgd_iter"],
"num_gpus": self.config["num_gpus"],
"sample_batch_size": self.config["sample_batch_size"],
"num_envs_per_worker": self.config["num_envs_per_worker"],
"train_batch_size": self.config["train_batch_size"],
"standardize_fields": ["advantages"],
"straggler_mitigation": (
self.config["straggler_mitigation"]),
})
@override(Agent)
def _train(self):
if "observation_filter" not in self.raw_user_config:
# TODO(ekl) remove this message after a few releases
logger.info(
"Important! Since 0.7.0, observation normalization is no "
"longer enabled by default. To enable running-mean "
"normalization, set 'observation_filter': 'MeanStdFilter'. "
"You can ignore this message if your environment doesn't "
"require observation normalization.")
prev_steps = self.optimizer.num_steps_sampled
fetches = self.optimizer.step()
if "kl" in fetches:
# single-agent
self.local_evaluator.for_policy(
lambda pi: pi.update_kl(fetches["kl"]))
else:
def update(pi, pi_id):
if pi_id in fetches:
pi.update_kl(fetches[pi_id]["kl"])
else:
logger.debug(
"No data for {}, not updating kl".format(pi_id))
# multi-agent
self.local_evaluator.foreach_trainable_policy(update)
res = self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"])
res.update(
timesteps_this_iter=self.optimizer.num_steps_sampled - prev_steps,
info=dict(fetches, **res.get("info", {})))
# Warn about bad clipping configs
if self.config["vf_clip_param"] <= 0:
rew_scale = float("inf")
elif res["policy_reward_mean"]:
rew_scale = 0 # punt on handling multiagent case
else:
rew_scale = round(
abs(res["episode_reward_mean"]) / self.config["vf_clip_param"],
0)
if rew_scale > 100:
logger.warning(
"The magnitude of your environment rewards are more than "
"{}x the scale of `vf_clip_param`. ".format(rew_scale) +
"This means that it will take more than "
"{} iterations for your value ".format(rew_scale) +
"function to converge. If this is not intended, consider "
"increasing `vf_clip_param`.")
return res
def _validate_config(self):
if self.config["sgd_minibatch_size"] > self.config["train_batch_size"]:
raise ValueError(
"Minibatch size {} must be <= train batch size {}.".format(
self.config["sgd_minibatch_size"],
self.config["train_batch_size"]))
if (self.config["batch_mode"] == "truncate_episodes"
and not self.config["use_gae"]):
raise ValueError(
"Episode truncation is not supported without a value "
"function. Consider setting batch_mode=complete_episodes.")
if (self.config["multiagent"]["policy_graphs"]
and not self.config["simple_optimizer"]):
logger.info(
"In multi-agent mode, policies will be optimized sequentially "
"by the multi-GPU optimizer. Consider setting "
"simple_optimizer=True if this doesn't work for you.")
if not self.config["vf_share_layers"]:
logger.warning(
"FYI: By default, the value function will not share layers "
"with the policy model ('vf_share_layers': False).")
| 42.410405
| 79
| 0.622189
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from ray.rllib.agents import Agent, with_common_config
from ray.rllib.agents.ppo.ppo_policy_graph import PPOPolicyGraph
from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer
from ray.rllib.utils.annotations import override
logger = logging.getLogger(__name__)
DEFAULT_CONFIG = with_common_config({
"use_gae": True,
"lambda": 1.0,
"kl_coeff": 0.2,
"sample_batch_size": 200,
"train_batch_size": 4000,
"sgd_minibatch_size": 128,
"num_sgd_iter": 30,
"lr": 5e-5,
"lr_schedule": None,
"vf_share_layers": False,
"vf_loss_coeff": 1.0,
"entropy_coeff": 0.0,
"clip_param": 0.3,
"vf_clip_param": 10.0,
"grad_clip": None,
"kl_target": 0.01,
"batch_mode": "truncate_episodes",
"observation_filter": "NoFilter",
"simple_optimizer": False,
"straggler_mitigation": False,
})
class PPOAgent(Agent):
_agent_name = "PPO"
_default_config = DEFAULT_CONFIG
_policy_graph = PPOPolicyGraph
@override(Agent)
def _init(self):
self._validate_config()
self.local_evaluator = self.make_local_evaluator(
self.env_creator, self._policy_graph)
self.remote_evaluators = self.make_remote_evaluators(
self.env_creator, self._policy_graph, self.config["num_workers"])
if self.config["simple_optimizer"]:
self.optimizer = SyncSamplesOptimizer(
self.local_evaluator, self.remote_evaluators, {
"num_sgd_iter": self.config["num_sgd_iter"],
"train_batch_size": self.config["train_batch_size"],
})
else:
self.optimizer = LocalMultiGPUOptimizer(
self.local_evaluator, self.remote_evaluators, {
"sgd_batch_size": self.config["sgd_minibatch_size"],
"num_sgd_iter": self.config["num_sgd_iter"],
"num_gpus": self.config["num_gpus"],
"sample_batch_size": self.config["sample_batch_size"],
"num_envs_per_worker": self.config["num_envs_per_worker"],
"train_batch_size": self.config["train_batch_size"],
"standardize_fields": ["advantages"],
"straggler_mitigation": (
self.config["straggler_mitigation"]),
})
@override(Agent)
def _train(self):
if "observation_filter" not in self.raw_user_config:
logger.info(
"Important! Since 0.7.0, observation normalization is no "
"longer enabled by default. To enable running-mean "
"normalization, set 'observation_filter': 'MeanStdFilter'. "
"You can ignore this message if your environment doesn't "
"require observation normalization.")
prev_steps = self.optimizer.num_steps_sampled
fetches = self.optimizer.step()
if "kl" in fetches:
# single-agent
self.local_evaluator.for_policy(
lambda pi: pi.update_kl(fetches["kl"]))
else:
def update(pi, pi_id):
if pi_id in fetches:
pi.update_kl(fetches[pi_id]["kl"])
else:
logger.debug(
"No data for {}, not updating kl".format(pi_id))
# multi-agent
self.local_evaluator.foreach_trainable_policy(update)
res = self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"])
res.update(
timesteps_this_iter=self.optimizer.num_steps_sampled - prev_steps,
info=dict(fetches, **res.get("info", {})))
# Warn about bad clipping configs
if self.config["vf_clip_param"] <= 0:
rew_scale = float("inf")
elif res["policy_reward_mean"]:
rew_scale = 0 # punt on handling multiagent case
else:
rew_scale = round(
abs(res["episode_reward_mean"]) / self.config["vf_clip_param"],
0)
if rew_scale > 100:
logger.warning(
"The magnitude of your environment rewards are more than "
"{}x the scale of `vf_clip_param`. ".format(rew_scale) +
"This means that it will take more than "
"{} iterations for your value ".format(rew_scale) +
"function to converge. If this is not intended, consider "
"increasing `vf_clip_param`.")
return res
def _validate_config(self):
if self.config["sgd_minibatch_size"] > self.config["train_batch_size"]:
raise ValueError(
"Minibatch size {} must be <= train batch size {}.".format(
self.config["sgd_minibatch_size"],
self.config["train_batch_size"]))
if (self.config["batch_mode"] == "truncate_episodes"
and not self.config["use_gae"]):
raise ValueError(
"Episode truncation is not supported without a value "
"function. Consider setting batch_mode=complete_episodes.")
if (self.config["multiagent"]["policy_graphs"]
and not self.config["simple_optimizer"]):
logger.info(
"In multi-agent mode, policies will be optimized sequentially "
"by the multi-GPU optimizer. Consider setting "
"simple_optimizer=True if this doesn't work for you.")
if not self.config["vf_share_layers"]:
logger.warning(
"FYI: By default, the value function will not share layers "
"with the policy model ('vf_share_layers': False).")
| true
| true
|
790aa74297f52abe851147e32cdcf5b1a7552301
| 1,343
|
py
|
Python
|
src/snake/Entities/Globals.py
|
willemserruys/Snake
|
5302b02e6f7bf04561e97bf001f758e6ddbaa17b
|
[
"MIT"
] | null | null | null |
src/snake/Entities/Globals.py
|
willemserruys/Snake
|
5302b02e6f7bf04561e97bf001f758e6ddbaa17b
|
[
"MIT"
] | null | null | null |
src/snake/Entities/Globals.py
|
willemserruys/Snake
|
5302b02e6f7bf04561e97bf001f758e6ddbaa17b
|
[
"MIT"
] | null | null | null |
from odroid_go import GO
from .Block import Block
from .Snake import Snake
SNAKE_COLOR = GO.lcd.colors.GREEN
BACKGROUND_COLOR = GO.lcd.colors.BLACK
FOOD_COLOR = GO.lcd.colors.RED
BORDER_COLOR = GO.lcd.colors.WHITE
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
BLOCK_SIZE = 10
#Where borders are drawn
INIT_X = 0
INIT_Y = 20
#Initial position of snake; relative to borders
SNAKEINIT_X = 40
SNAKEINIT_Y = 10
#Initial direction of snake
INITIALDIRECTION = 4
#Directions
#1: Forward
#2: Backward
#3: Left
#4: Right
def FillRectangle(block,color):
GO.lcd.fill_rectangle(block.x,block.y,block.width,block.length,color)
def FillRectangles(blocks,color):
for block in blocks:
GO.lcd.fill_rectangle(block.x,block.y,block.width,block.length,color)
def DrawSnake(snake):
FillRectangle(snake.Head,SNAKE_COLOR)
if not snake.BlockBehindTail == None:
FillRectangle(snake.BlockBehindTail,BACKGROUND_COLOR)
def LostTone():
GO.speaker.tone(300,0.2)
def FoodTone():
GO.speaker.tone(400,0.2)
def WinTone():
GO.speaker.tone(500,0.2)
CAR_BODY = GO.lcd.colors.WHITE
CAR_TIRES = GO.lcd.colors.GREEN
CAR_FRONT_LIGHTS =GO.lcd.colors.GREEN
INDICATION = GO.lcd.colors.RED
ALPHABET = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
ALPHABET_COUNT = len(ALPHABET)
| 23.155172
| 116
| 0.711095
|
from odroid_go import GO
from .Block import Block
from .Snake import Snake
SNAKE_COLOR = GO.lcd.colors.GREEN
BACKGROUND_COLOR = GO.lcd.colors.BLACK
FOOD_COLOR = GO.lcd.colors.RED
BORDER_COLOR = GO.lcd.colors.WHITE
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
BLOCK_SIZE = 10
INIT_X = 0
INIT_Y = 20
SNAKEINIT_X = 40
SNAKEINIT_Y = 10
INITIALDIRECTION = 4
def FillRectangle(block,color):
GO.lcd.fill_rectangle(block.x,block.y,block.width,block.length,color)
def FillRectangles(blocks,color):
for block in blocks:
GO.lcd.fill_rectangle(block.x,block.y,block.width,block.length,color)
def DrawSnake(snake):
FillRectangle(snake.Head,SNAKE_COLOR)
if not snake.BlockBehindTail == None:
FillRectangle(snake.BlockBehindTail,BACKGROUND_COLOR)
def LostTone():
GO.speaker.tone(300,0.2)
def FoodTone():
GO.speaker.tone(400,0.2)
def WinTone():
GO.speaker.tone(500,0.2)
CAR_BODY = GO.lcd.colors.WHITE
CAR_TIRES = GO.lcd.colors.GREEN
CAR_FRONT_LIGHTS =GO.lcd.colors.GREEN
INDICATION = GO.lcd.colors.RED
ALPHABET = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
ALPHABET_COUNT = len(ALPHABET)
| true
| true
|
790aa754ceaf63de41c7c0932667cbb8a8307095
| 314
|
py
|
Python
|
gunicorn/gunicorn_config.py
|
techstreets/django-prod
|
93ecee5823ef2e0d9337dae0cba0879758d936c4
|
[
"MIT"
] | 1
|
2017-09-11T14:46:54.000Z
|
2017-09-11T14:46:54.000Z
|
gunicorn/gunicorn_config.py
|
techstreets/django-prod
|
93ecee5823ef2e0d9337dae0cba0879758d936c4
|
[
"MIT"
] | null | null | null |
gunicorn/gunicorn_config.py
|
techstreets/django-prod
|
93ecee5823ef2e0d9337dae0cba0879758d936c4
|
[
"MIT"
] | null | null | null |
import os
import multiprocessing
def num_cpus():
cpus = 0
try:
cpus = os.sysconf("SC_NPROCESSORS_ONLN")
except:
cpus = multiprocessing.cpu_count()
return cpus or 3
name = 'django'
bind = '0.0.0.0:8000'
workers = num_cpus() * 2 + 1
debug = True
daemon = False
loglevel = 'debug'
| 15.7
| 48
| 0.633758
|
import os
import multiprocessing
def num_cpus():
cpus = 0
try:
cpus = os.sysconf("SC_NPROCESSORS_ONLN")
except:
cpus = multiprocessing.cpu_count()
return cpus or 3
name = 'django'
bind = '0.0.0.0:8000'
workers = num_cpus() * 2 + 1
debug = True
daemon = False
loglevel = 'debug'
| true
| true
|
790aa7df83a824a6efe5541f4b8d3b59132333f9
| 2,679
|
py
|
Python
|
scripts/fsmt/fsmt-make-super-tiny-model.py
|
agemagician/transformers
|
666220fc6417505607148ffb19d172d5732e860a
|
[
"Apache-2.0"
] | 2
|
2020-11-30T11:30:40.000Z
|
2021-03-26T17:20:33.000Z
|
scripts/fsmt/fsmt-make-super-tiny-model.py
|
agemagician/transformers
|
666220fc6417505607148ffb19d172d5732e860a
|
[
"Apache-2.0"
] | 3
|
2021-06-08T23:15:29.000Z
|
2022-01-13T03:40:10.000Z
|
scripts/fsmt/fsmt-make-super-tiny-model.py
|
agemagician/transformers
|
666220fc6417505607148ffb19d172d5732e860a
|
[
"Apache-2.0"
] | 1
|
2020-11-17T02:48:00.000Z
|
2020-11-17T02:48:00.000Z
|
#!/usr/bin/env python
# coding: utf-8
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
mname_tiny = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
build_dir = Path(tmpdirname)
src_vocab_file = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
tgt_vocab_file = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
merges_file = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp : fp.write("\n".join(merges))
tokenizer = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size = len(vocab),
tgt_vocab_size = len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
config = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000, tgt_vocab_size=1000,
d_model=4,
encoder_layers=1, decoder_layers=1,
encoder_ffn_dim=4, decoder_ffn_dim=4,
encoder_attention_heads=1, decoder_attention_heads=1,
)
tiny_model = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
batch = tokenizer.prepare_seq2seq_batch(["Making tiny model"], return_tensors="pt")
outputs = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 35.72
| 171
| 0.707353
|
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
mname_tiny = "tiny-wmt19-en-ru"
vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
build_dir = Path(tmpdirname)
src_vocab_file = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
tgt_vocab_file = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
merges_file = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp : fp.write("\n".join(merges))
tokenizer = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size = len(vocab),
tgt_vocab_size = len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
config = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000, tgt_vocab_size=1000,
d_model=4,
encoder_layers=1, decoder_layers=1,
encoder_ffn_dim=4, decoder_ffn_dim=4,
encoder_attention_heads=1, decoder_attention_heads=1,
)
tiny_model = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
batch = tokenizer.prepare_seq2seq_batch(["Making tiny model"], return_tensors="pt")
outputs = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
tiny_model.half()
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
| true
| true
|
790aa941cbd4163c635189738d50c89080fa22c8
| 379
|
py
|
Python
|
sympy/abc.py
|
matthew-brett/sympy
|
7b87b62144c28f2e734e9106897c72806b99d181
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/abc.py
|
matthew-brett/sympy
|
7b87b62144c28f2e734e9106897c72806b99d181
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/abc.py
|
matthew-brett/sympy
|
7b87b62144c28f2e734e9106897c72806b99d181
|
[
"BSD-3-Clause"
] | 1
|
2021-11-10T06:39:41.000Z
|
2021-11-10T06:39:41.000Z
|
from core import Symbol
_latin = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
# COSINEQ should not be imported as they clash
_greek = 'alpha beta gamma delta epsilon zeta eta theta iota kappa '\
'mu nu xi omicron pi rho sigma tau upsilon phi chi psi omega'.split(' ')
for _s in _latin + _greek:
exec "%s = Symbol('%s')" % (_s, _s)
del _latin, _greek, _s
| 31.583333
| 74
| 0.728232
|
from core import Symbol
_latin = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
_greek = 'alpha beta gamma delta epsilon zeta eta theta iota kappa '\
'mu nu xi omicron pi rho sigma tau upsilon phi chi psi omega'.split(' ')
for _s in _latin + _greek:
exec "%s = Symbol('%s')" % (_s, _s)
del _latin, _greek, _s
| false
| true
|
790aa979795fd964972e943db06f0a39fc600935
| 4,666
|
py
|
Python
|
bokeh/client/states.py
|
lvcarlosja/bokeh
|
f258eb867515d3b493589886c4699a9ca032a6b0
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/client/states.py
|
lvcarlosja/bokeh
|
f258eb867515d3b493589886c4699a9ca032a6b0
|
[
"BSD-3-Clause"
] | 12
|
2020-08-26T20:19:29.000Z
|
2020-08-26T20:19:52.000Z
|
bokeh/client/states.py
|
lvcarlosja/bokeh
|
f258eb867515d3b493589886c4699a9ca032a6b0
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a set of objects to represent different stages of a connection
to a Bokeh server.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from enum import Enum, auto
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'CONNECTED_BEFORE_ACK',
'CONNECTED_AFTER_ACK',
'DISCONNECTED',
'ErrorReason',
'NOT_YET_CONNECTED',
'WAITING_FOR_REPLY',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class ErrorReason(Enum):
NO_ERROR = auto()
HTTP_ERROR = auto()
NETWORK_ERROR = auto()
class NOT_YET_CONNECTED(object):
''' The ``ClientConnection`` is not yet connected.
'''
async def run(self, connection):
return await connection._connect_async()
class CONNECTED_BEFORE_ACK(object):
''' The ``ClientConnection`` connected to a Bokeh server, but has not yet
received an ACK from it.
'''
async def run(self, connection):
return await connection._wait_for_ack()
class CONNECTED_AFTER_ACK(object):
''' The ``ClientConnection`` connected to a Bokeh server, and has
received an ACK from it.
'''
async def run(self, connection):
return await connection._handle_messages()
class DISCONNECTED(object):
''' The ``ClientConnection`` was connected to a Bokeh server, but is
now disconnected.
'''
def __init__(self, reason=ErrorReason.NO_ERROR, error_code=None, error_detail=""):
''' Constructs a DISCONNECT-State with given reason (``ErrorReason``
enum), error id and additional information provided as string.
'''
self._error_code = error_code
self._error_detail = error_detail
self._error_reason = reason
@property
def error_reason(self):
''' The reason for the error encoded as an enumeration value.
'''
return self._error_reason
@property
def error_code(self):
''' Holds the error code, if any. None otherwise.
'''
return self._error_code
@property
def error_detail(self):
''' Holds the error message, if any. Empty string otherwise.
'''
return self._error_detail
async def run(self, connection):
return None
class WAITING_FOR_REPLY(object):
''' The ``ClientConnection`` has sent a message to the Bokeh Server which
should generate a paired reply, and is waiting for the reply.
'''
def __init__(self, reqid):
self._reqid = reqid
self._reply = None
@property
def reply(self):
''' The reply from the server. (``None`` until the reply arrives) '''
return self._reply
@property
def reqid(self):
''' The request ID of the originating message. '''
return self._reqid
async def run(self, connection):
message = await connection._pop_message()
if message is None:
return await connection._transition_to_disconnected()
elif 'reqid' in message.header and message.header['reqid'] == self.reqid:
self._reply = message
return await connection._transition(CONNECTED_AFTER_ACK())
else:
return await connection._next()
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 30.496732
| 86
| 0.476211
|
import logging
log = logging.getLogger(__name__)
from enum import Enum, auto
__all__ = (
'CONNECTED_BEFORE_ACK',
'CONNECTED_AFTER_ACK',
'DISCONNECTED',
'ErrorReason',
'NOT_YET_CONNECTED',
'WAITING_FOR_REPLY',
)
class ErrorReason(Enum):
NO_ERROR = auto()
HTTP_ERROR = auto()
NETWORK_ERROR = auto()
class NOT_YET_CONNECTED(object):
async def run(self, connection):
return await connection._connect_async()
class CONNECTED_BEFORE_ACK(object):
async def run(self, connection):
return await connection._wait_for_ack()
class CONNECTED_AFTER_ACK(object):
async def run(self, connection):
return await connection._handle_messages()
class DISCONNECTED(object):
def __init__(self, reason=ErrorReason.NO_ERROR, error_code=None, error_detail=""):
self._error_code = error_code
self._error_detail = error_detail
self._error_reason = reason
@property
def error_reason(self):
return self._error_reason
@property
def error_code(self):
return self._error_code
@property
def error_detail(self):
return self._error_detail
async def run(self, connection):
return None
class WAITING_FOR_REPLY(object):
def __init__(self, reqid):
self._reqid = reqid
self._reply = None
@property
def reply(self):
return self._reply
@property
def reqid(self):
return self._reqid
async def run(self, connection):
message = await connection._pop_message()
if message is None:
return await connection._transition_to_disconnected()
elif 'reqid' in message.header and message.header['reqid'] == self.reqid:
self._reply = message
return await connection._transition(CONNECTED_AFTER_ACK())
else:
return await connection._next()
| true
| true
|
790aaa4f76b53c29268e9f79837e1f90626a0d3e
| 3,486
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/lysinimicrobiumluteum.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/lysinimicrobiumluteum.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/lysinimicrobiumluteum.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Lysinimicrobium luteum.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def LysinimicrobiumLuteum(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Lysinimicrobium luteum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Lysinimicrobium luteum graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="LysinimicrobiumLuteum",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.2
| 223
| 0.679002
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def LysinimicrobiumLuteum(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="LysinimicrobiumLuteum",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
790aaa8934e8fdcda30462389446df95a33e1c77
| 4,687
|
py
|
Python
|
binauthz-attestation/parse_arguments.py
|
jcortejoso/cloud-builders-community
|
80bbc3dfbabf39d7594e08ed0ae57b1a2114cb99
|
[
"Apache-2.0"
] | 1,031
|
2017-10-24T14:35:13.000Z
|
2022-03-29T16:24:08.000Z
|
binauthz-attestation/parse_arguments.py
|
jcortejoso/cloud-builders-community
|
80bbc3dfbabf39d7594e08ed0ae57b1a2114cb99
|
[
"Apache-2.0"
] | 431
|
2017-10-28T22:03:36.000Z
|
2022-03-23T15:10:50.000Z
|
binauthz-attestation/parse_arguments.py
|
jcortejoso/cloud-builders-community
|
80bbc3dfbabf39d7594e08ed0ae57b1a2114cb99
|
[
"Apache-2.0"
] | 891
|
2017-10-24T14:39:17.000Z
|
2022-03-31T09:02:52.000Z
|
"""Parses the arguments passed to the bash script and returns them back to the bash script."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import sys
# Technique for printing custom error and help
# Source: https://stackoverflow.com/a/4042861/862857
class CustomParser(argparse.ArgumentParser):
def error(self, message):
print('{}: error: {}'.format(self.prog, message), file=sys.stderr)
self.print_help()
sys.exit(1)
parser = CustomParser(prog='create_binauthz_attestation')
# By default, arguments with "--" are optional, so we have
# to make our own argument group so they are required
required_arguments = parser.add_argument_group('required arguments')
required_arguments.add_argument(
'--artifact-url',
type=str,
help='Registry URL for container image',
required=True)
attestor_args = parser.add_argument_group('Attestor arguments')
attestor_args.add_argument(
'--attestor',
type=str,
help='Fully qualified attestor name or just the attestor name',
required=True)
attestor_args.add_argument(
'--attestor-project',
type=str,
help='The project that the attestor is a part of')
pgp_args = parser.add_argument_group('PGP key arguments')
pgp_args.add_argument(
'--pgp-key-fingerprint',
type=str,
help='The fingerprint of the PGP key you plan to use')
# If the user is using KMS, they should provide:
kms_args = parser.add_argument_group('KMS key arguments')
kms_args.add_argument(
'--keyversion',
type=str,
help='The fully qualified keyversion or the version number of the KMS key')
kms_args.add_argument(
'--keyversion-key', type=str, help='The name of the KMS key')
kms_args.add_argument(
'--keyversion-keyring', type=str, help='The keyring for the KMS key')
kms_args.add_argument(
'--keyversion-location', type=str, help='The location of the KMS key')
kms_args.add_argument(
'--keyversion-project',
type=str,
help='The project that the KMS key belongs to')
args = parser.parse_args()
# Validate and parse attestor resource flags.
if '/' not in args.attestor:
if not args.attestor_project:
parser.error('The --attestor-project option is required if '
'--attestor is not a fully qualified '
'Attestor resource identifier')
else:
args.attestor = 'projects/{project}/attestors/{attestor}'.format(
project=args.attestor_project, attestor=args.attestor)
attestor_regex = re.compile(r'^projects/[a-z0-9-]*/attestors/[a-zA-Z0-9-_]*$')
if not attestor_regex.search(args.attestor):
parser.error('Attestor "{attestor}" is not '
'a valid attestor name'.format(attestor=args.attestor))
# Enforce mutual exclusion of key flag types.
keyversion_args = [
args.keyversion, args.keyversion_key, args.keyversion_keyring,
args.keyversion_location, args.keyversion_project
]
if args.pgp_key_fingerprint and any(keyversion_args):
parser.error('You cannot set --pgp-key-fingerprint and --keyversion related'
' options at the same time.')
if not args.pgp_key_fingerprint and not any(keyversion_args):
parser.error('Either --pgp-key-fingerprint or --keyversion related'
' options must be set.')
# Validate and parse keyversion resource flags.
if args.keyversion is not None and '/' not in args.keyversion:
if not all(keyversion_args):
parser.error(
'The --keyversion-key, --keyversion-keyring, --keyversion-location, '
'and --keyversion-project options are required if --keyversion '
'is not a fully qualified KMS key resource identifier.')
else:
args.keyversion = (
'projects/{project}/locations/{location}/keyRings/{keyRing}/'
'cryptoKeys/{cryptoKey}/cryptoKeyVersions/{keyversion}').format(
project=args.keyversion_project,
location=args.keyversion_location,
keyRing=args.keyversion_keyring,
cryptoKey=args.keyversion_key,
keyversion=args.keyversion)
keyversion_regex = re.compile(r'^projects/[a-z0-9-]*/locations/[a-z0-9-]*'
r'/keyRings/[a-zA-Z0-9-_]*/cryptoKeys/'
r'[a-zA-Z0-9-_]*/cryptoKeyVersions/[1-9][0-9]*$')
if args.keyversion is not None and not keyversion_regex.search(args.keyversion):
parser.error('"{}" is not a valid fully qualified KMS key identifier.'.format(
args.keyversion))
arguments_list = []
for arg_name, value in args.__dict__.items():
arguments_list.append('[{name}]="{value}"'.format(
name=arg_name, value=value or ''))
print('\n'.join(arguments_list))
| 36.905512
| 94
| 0.703008
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import sys
class CustomParser(argparse.ArgumentParser):
def error(self, message):
print('{}: error: {}'.format(self.prog, message), file=sys.stderr)
self.print_help()
sys.exit(1)
parser = CustomParser(prog='create_binauthz_attestation')
required_arguments = parser.add_argument_group('required arguments')
required_arguments.add_argument(
'--artifact-url',
type=str,
help='Registry URL for container image',
required=True)
attestor_args = parser.add_argument_group('Attestor arguments')
attestor_args.add_argument(
'--attestor',
type=str,
help='Fully qualified attestor name or just the attestor name',
required=True)
attestor_args.add_argument(
'--attestor-project',
type=str,
help='The project that the attestor is a part of')
pgp_args = parser.add_argument_group('PGP key arguments')
pgp_args.add_argument(
'--pgp-key-fingerprint',
type=str,
help='The fingerprint of the PGP key you plan to use')
kms_args = parser.add_argument_group('KMS key arguments')
kms_args.add_argument(
'--keyversion',
type=str,
help='The fully qualified keyversion or the version number of the KMS key')
kms_args.add_argument(
'--keyversion-key', type=str, help='The name of the KMS key')
kms_args.add_argument(
'--keyversion-keyring', type=str, help='The keyring for the KMS key')
kms_args.add_argument(
'--keyversion-location', type=str, help='The location of the KMS key')
kms_args.add_argument(
'--keyversion-project',
type=str,
help='The project that the KMS key belongs to')
args = parser.parse_args()
if '/' not in args.attestor:
if not args.attestor_project:
parser.error('The --attestor-project option is required if '
'--attestor is not a fully qualified '
'Attestor resource identifier')
else:
args.attestor = 'projects/{project}/attestors/{attestor}'.format(
project=args.attestor_project, attestor=args.attestor)
attestor_regex = re.compile(r'^projects/[a-z0-9-]*/attestors/[a-zA-Z0-9-_]*$')
if not attestor_regex.search(args.attestor):
parser.error('Attestor "{attestor}" is not '
'a valid attestor name'.format(attestor=args.attestor))
keyversion_args = [
args.keyversion, args.keyversion_key, args.keyversion_keyring,
args.keyversion_location, args.keyversion_project
]
if args.pgp_key_fingerprint and any(keyversion_args):
parser.error('You cannot set --pgp-key-fingerprint and --keyversion related'
' options at the same time.')
if not args.pgp_key_fingerprint and not any(keyversion_args):
parser.error('Either --pgp-key-fingerprint or --keyversion related'
' options must be set.')
if args.keyversion is not None and '/' not in args.keyversion:
if not all(keyversion_args):
parser.error(
'The --keyversion-key, --keyversion-keyring, --keyversion-location, '
'and --keyversion-project options are required if --keyversion '
'is not a fully qualified KMS key resource identifier.')
else:
args.keyversion = (
'projects/{project}/locations/{location}/keyRings/{keyRing}/'
'cryptoKeys/{cryptoKey}/cryptoKeyVersions/{keyversion}').format(
project=args.keyversion_project,
location=args.keyversion_location,
keyRing=args.keyversion_keyring,
cryptoKey=args.keyversion_key,
keyversion=args.keyversion)
keyversion_regex = re.compile(r'^projects/[a-z0-9-]*/locations/[a-z0-9-]*'
r'/keyRings/[a-zA-Z0-9-_]*/cryptoKeys/'
r'[a-zA-Z0-9-_]*/cryptoKeyVersions/[1-9][0-9]*$')
if args.keyversion is not None and not keyversion_regex.search(args.keyversion):
parser.error('"{}" is not a valid fully qualified KMS key identifier.'.format(
args.keyversion))
arguments_list = []
for arg_name, value in args.__dict__.items():
arguments_list.append('[{name}]="{value}"'.format(
name=arg_name, value=value or ''))
print('\n'.join(arguments_list))
| true
| true
|
790aac67e35d239a819e990bcf6586f8e77b8cfa
| 39,040
|
py
|
Python
|
env/lib/python3.7/site-packages/pygments/lexers/graphics.py
|
ritchadh/docs-like-code-demo
|
23d189e074b9ecf136b7b91df3826bcfa51cd124
|
[
"BSD-3-Clause"
] | null | null | null |
env/lib/python3.7/site-packages/pygments/lexers/graphics.py
|
ritchadh/docs-like-code-demo
|
23d189e074b9ecf136b7b91df3826bcfa51cd124
|
[
"BSD-3-Clause"
] | null | null | null |
env/lib/python3.7/site-packages/pygments/lexers/graphics.py
|
ritchadh/docs-like-code-demo
|
23d189e074b9ecf136b7b91df3826bcfa51cd124
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
pygments.lexers.graphics
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer graphics and plotting related languages.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups, using, \
this, default
from pygments.token import Text, Comment, Operator, Keyword, Name, \
Number, Punctuation, String
__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer',
'PovrayLexer', 'HLSLShaderLexer']
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
.. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(words((
# Storage qualifiers
'attribute', 'const', 'uniform', 'varying',
'buffer', 'shared', 'in', 'out',
# Layout qualifiers
'layout',
# Interpolation qualifiers
'flat', 'smooth', 'noperspective',
# Auxiliary qualifiers
'centroid', 'sample', 'patch',
# Parameter qualifiers. Some double as Storage qualifiers
'inout',
# Precision qualifiers
'lowp', 'mediump', 'highp', 'precision',
# Invariance qualifiers
'invariant',
# Precise qualifiers
'precise',
# Memory qualifiers
'coherent', 'volatile', 'restrict', 'readonly', 'writeonly',
# Statements
'break', 'continue', 'do', 'for', 'while', 'switch',
'case', 'default', 'if', 'else', 'subroutine',
'discard', 'return', 'struct'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
# Boolean values
'true', 'false'),
prefix=r'\b', suffix=r'\b'),
Keyword.Constant),
(words((
# Miscellaneous types
'void', 'atomic_uint',
# Floating-point scalars and vectors
'float', 'vec2', 'vec3', 'vec4',
'double', 'dvec2', 'dvec3', 'dvec4',
# Integer scalars and vectors
'int', 'ivec2', 'ivec3', 'ivec4',
'uint', 'uvec2', 'uvec3', 'uvec4',
# Boolean scalars and vectors
'bool', 'bvec2', 'bvec3', 'bvec4',
# Matrices
'mat2', 'mat3', 'mat4', 'dmat2', 'dmat3', 'dmat4',
'mat2x2', 'mat2x3', 'mat2x4', 'dmat2x2', 'dmat2x3', 'dmat2x4',
'mat3x2', 'mat3x3', 'mat3x4', 'dmat3x2', 'dmat3x3',
'dmat3x4', 'mat4x2', 'mat4x3', 'mat4x4', 'dmat4x2', 'dmat4x3', 'dmat4x4',
# Floating-point samplers
'sampler1D', 'sampler2D', 'sampler3D', 'samplerCube',
'sampler1DArray', 'sampler2DArray', 'samplerCubeArray',
'sampler2DRect', 'samplerBuffer',
'sampler2DMS', 'sampler2DMSArray',
# Shadow samplers
'sampler1DShadow', 'sampler2DShadow', 'samplerCubeShadow',
'sampler1DArrayShadow', 'sampler2DArrayShadow',
'samplerCubeArrayShadow', 'sampler2DRectShadow',
# Signed integer samplers
'isampler1D', 'isampler2D', 'isampler3D', 'isamplerCube',
'isampler1DArray', 'isampler2DArray', 'isamplerCubeArray',
'isampler2DRect', 'isamplerBuffer',
'isampler2DMS', 'isampler2DMSArray',
# Unsigned integer samplers
'usampler1D', 'usampler2D', 'usampler3D', 'usamplerCube',
'usampler1DArray', 'usampler2DArray', 'usamplerCubeArray',
'usampler2DRect', 'usamplerBuffer',
'usampler2DMS', 'usampler2DMSArray',
# Floating-point image types
'image1D', 'image2D', 'image3D', 'imageCube',
'image1DArray', 'image2DArray', 'imageCubeArray',
'image2DRect', 'imageBuffer',
'image2DMS', 'image2DMSArray',
# Signed integer image types
'iimage1D', 'iimage2D', 'iimage3D', 'iimageCube',
'iimage1DArray', 'iimage2DArray', 'iimageCubeArray',
'iimage2DRect', 'iimageBuffer',
'iimage2DMS', 'iimage2DMSArray',
# Unsigned integer image types
'uimage1D', 'uimage2D', 'uimage3D', 'uimageCube',
'uimage1DArray', 'uimage2DArray', 'uimageCubeArray',
'uimage2DRect', 'uimageBuffer',
'uimage2DMS', 'uimage2DMSArray'),
prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words((
# Reserved for future use.
'common', 'partition', 'active', 'asm', 'class',
'union', 'enum', 'typedef', 'template', 'this',
'resource', 'goto', 'inline', 'noinline', 'public',
'static', 'extern', 'external', 'interface', 'long',
'short', 'half', 'fixed', 'unsigned', 'superp', 'input',
'output', 'hvec2', 'hvec3', 'hvec4', 'fvec2', 'fvec3',
'fvec4', 'sampler3DRect', 'filter', 'sizeof', 'cast',
'namespace', 'using'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# All names beginning with "gl_" are reserved.
(r'gl_\w*', Name.Builtin),
(r'[a-zA-Z_]\w*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class HLSLShaderLexer(RegexLexer):
"""
HLSL (Microsoft Direct3D Shader) lexer.
.. versionadded:: 2.3
"""
name = 'HLSL'
aliases = ['hlsl']
filenames = ['*.hlsl', '*.hlsli']
mimetypes = ['text/x-hlsl']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),.\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?f?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?f?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(r'"', String, 'string'),
(words((
'asm','asm_fragment','break','case','cbuffer','centroid','class',
'column_major','compile','compile_fragment','const','continue',
'default','discard','do','else','export','extern','for','fxgroup',
'globallycoherent','groupshared','if','in','inline','inout',
'interface','line','lineadj','linear','namespace','nointerpolation',
'noperspective','NULL','out','packoffset','pass','pixelfragment',
'point','precise','return','register','row_major','sample',
'sampler','shared','stateblock','stateblock_state','static',
'struct','switch','tbuffer','technique','technique10',
'technique11','texture','typedef','triangle','triangleadj',
'uniform','vertexfragment','volatile','while'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words(('true','false'), prefix=r'\b', suffix=r'\b'),
Keyword.Constant),
(words((
'auto','catch','char','const_cast','delete','dynamic_cast','enum',
'explicit','friend','goto','long','mutable','new','operator',
'private','protected','public','reinterpret_cast','short','signed',
'sizeof','static_cast','template','this','throw','try','typename',
'union','unsigned','using','virtual'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(words((
'dword','matrix','snorm','string','unorm','unsigned','void','vector',
'BlendState','Buffer','ByteAddressBuffer','ComputeShader',
'DepthStencilState','DepthStencilView','DomainShader',
'GeometryShader','HullShader','InputPatch','LineStream',
'OutputPatch','PixelShader','PointStream','RasterizerState',
'RenderTargetView','RasterizerOrderedBuffer',
'RasterizerOrderedByteAddressBuffer',
'RasterizerOrderedStructuredBuffer','RasterizerOrderedTexture1D',
'RasterizerOrderedTexture1DArray','RasterizerOrderedTexture2D',
'RasterizerOrderedTexture2DArray','RasterizerOrderedTexture3D',
'RWBuffer','RWByteAddressBuffer','RWStructuredBuffer',
'RWTexture1D','RWTexture1DArray','RWTexture2D','RWTexture2DArray',
'RWTexture3D','SamplerState','SamplerComparisonState',
'StructuredBuffer','Texture1D','Texture1DArray','Texture2D',
'Texture2DArray','Texture2DMS','Texture2DMSArray','Texture3D',
'TextureCube','TextureCubeArray','TriangleStream','VertexShader'),
prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words((
'bool','double','float','int','half','min16float','min10float',
'min16int','min12int','min16uint','uint'),
prefix=r'\b', suffix=r'([1-4](x[1-4])?)?\b'),
Keyword.Type), # vector and matrix types
(words((
'abort','abs','acos','all','AllMemoryBarrier',
'AllMemoryBarrierWithGroupSync','any','AppendStructuredBuffer',
'asdouble','asfloat','asin','asint','asuint','asuint','atan',
'atan2','ceil','CheckAccessFullyMapped','clamp','clip',
'CompileShader','ConsumeStructuredBuffer','cos','cosh','countbits',
'cross','D3DCOLORtoUBYTE4','ddx','ddx_coarse','ddx_fine','ddy',
'ddy_coarse','ddy_fine','degrees','determinant',
'DeviceMemoryBarrier','DeviceMemoryBarrierWithGroupSync','distance',
'dot','dst','errorf','EvaluateAttributeAtCentroid',
'EvaluateAttributeAtSample','EvaluateAttributeSnapped','exp',
'exp2','f16tof32','f32tof16','faceforward','firstbithigh',
'firstbitlow','floor','fma','fmod','frac','frexp','fwidth',
'GetRenderTargetSampleCount','GetRenderTargetSamplePosition',
'GlobalOrderedCountIncrement','GroupMemoryBarrier',
'GroupMemoryBarrierWithGroupSync','InterlockedAdd','InterlockedAnd',
'InterlockedCompareExchange','InterlockedCompareStore',
'InterlockedExchange','InterlockedMax','InterlockedMin',
'InterlockedOr','InterlockedXor','isfinite','isinf','isnan',
'ldexp','length','lerp','lit','log','log10','log2','mad','max',
'min','modf','msad4','mul','noise','normalize','pow','printf',
'Process2DQuadTessFactorsAvg','Process2DQuadTessFactorsMax',
'Process2DQuadTessFactorsMin','ProcessIsolineTessFactors',
'ProcessQuadTessFactorsAvg','ProcessQuadTessFactorsMax',
'ProcessQuadTessFactorsMin','ProcessTriTessFactorsAvg',
'ProcessTriTessFactorsMax','ProcessTriTessFactorsMin',
'QuadReadLaneAt','QuadSwapX','QuadSwapY','radians','rcp',
'reflect','refract','reversebits','round','rsqrt','saturate',
'sign','sin','sincos','sinh','smoothstep','sqrt','step','tan',
'tanh','tex1D','tex1D','tex1Dbias','tex1Dgrad','tex1Dlod',
'tex1Dproj','tex2D','tex2D','tex2Dbias','tex2Dgrad','tex2Dlod',
'tex2Dproj','tex3D','tex3D','tex3Dbias','tex3Dgrad','tex3Dlod',
'tex3Dproj','texCUBE','texCUBE','texCUBEbias','texCUBEgrad',
'texCUBElod','texCUBEproj','transpose','trunc','WaveAllBitAnd',
'WaveAllMax','WaveAllMin','WaveAllBitOr','WaveAllBitXor',
'WaveAllEqual','WaveAllProduct','WaveAllSum','WaveAllTrue',
'WaveAnyTrue','WaveBallot','WaveGetLaneCount','WaveGetLaneIndex',
'WaveGetOrderedIndex','WaveIsHelperLane','WaveOnce',
'WavePrefixProduct','WavePrefixSum','WaveReadFirstLane',
'WaveReadLaneAt'),
prefix=r'\b', suffix=r'\b'),
Name.Builtin), # built-in functions
(words((
'SV_ClipDistance','SV_ClipDistance0','SV_ClipDistance1',
'SV_Culldistance','SV_CullDistance0','SV_CullDistance1',
'SV_Coverage','SV_Depth','SV_DepthGreaterEqual',
'SV_DepthLessEqual','SV_DispatchThreadID','SV_DomainLocation',
'SV_GroupID','SV_GroupIndex','SV_GroupThreadID','SV_GSInstanceID',
'SV_InnerCoverage','SV_InsideTessFactor','SV_InstanceID',
'SV_IsFrontFace','SV_OutputControlPointID','SV_Position',
'SV_PrimitiveID','SV_RenderTargetArrayIndex','SV_SampleIndex',
'SV_StencilRef','SV_TessFactor','SV_VertexID',
'SV_ViewportArrayIndex'),
prefix=r'\b', suffix=r'\b'),
Name.Decorator), # system-value semantics
(r'\bSV_Target[0-7]?\b', Name.Decorator),
(words((
'allow_uav_condition','branch','call','domain','earlydepthstencil',
'fastopt','flatten','forcecase','instance','loop','maxtessfactor',
'numthreads','outputcontrolpoints','outputtopology','partitioning',
'patchconstantfunc','unroll'),
prefix=r'\b', suffix=r'\b'),
Name.Decorator), # attributes
(r'[a-zA-Z_]\w*', Name),
(r'\\$', Comment.Preproc), # backslash at end of line -- usually macro continuation
(r'\s+', Text),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
class PostScriptLexer(RegexLexer):
"""
Lexer for PostScript files.
The PostScript Language Reference published by Adobe at
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
.. versionadded:: 1.4
"""
name = 'PostScript'
aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
delimiter = r'()<>\[\]{}/%\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
tokens = {
'root': [
# All comment types
(r'^%!.+\n', Comment.Preproc),
(r'%%.*\n', Comment.Special),
(r'(^%.*\n){2,}', Comment.Multiline),
(r'%.*\n', Comment.Single),
# String literals are awkward; enter separate state.
(r'\(', String, 'stringliteral'),
(r'[{}<>\[\]]', Punctuation),
# Numbers
(r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
# Slight abuse: use Oct to signify any explicit base system
(r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)'
r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
(r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ delimiter_end, Number.Float),
(r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
# References
(r'\/%s' % valid_name, Name.Variable),
# Names
(valid_name, Name.Function), # Anything else is executed
# These keywords taken from
# <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
# Is there an authoritative list anywhere that doesn't involve
# trawling documentation?
(r'(false|true)' + delimiter_end, Keyword.Constant),
# Conditionals / flow control
(r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ delimiter_end, Keyword.Reserved),
(words((
'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
'transform', 'translate', 'truncate', 'typecheck', 'undefined',
'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
Name.Builtin),
(r'\s+', Text),
],
'stringliteral': [
(r'[^()\\]+', String),
(r'\\', String.Escape, 'escape'),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'escape': [
(r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
default('#pop'),
],
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
.. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
def _shortened(word):
dpos = word.find('$')
return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1))
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
.. versionadded:: 0.11
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
(r'([a-zA-Z_]\w*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
(r'([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
default('#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'sqstring': [
(r"''", String), # escaped single quote
(r"'", String, '#pop'),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # normal backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
(r'[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_]\w*', Name),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der",
"box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta",
"data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign",
"fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid",
"hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle",
"la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale",
"mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin",
"rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot",
"mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics",
"nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics",
"mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput",
"pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot",
"poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze",
"st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs",
"ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le",
"v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta",
"yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel",
"yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs",
"x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs",
"zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs",
"x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs",
"noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs",
"xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs",
"noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs",
"cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange",
"y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange",
"vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis",
"zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
.. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(words((
'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
'statistics', 'switch', 'undef', 'version', 'warning', 'while',
'write'), prefix=r'#', suffix=r'\b'),
Comment.Preproc),
(words((
'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\s+', Text),
]
}
| 49.923274
| 104
| 0.479662
|
from pygments.lexer import RegexLexer, words, include, bygroups, using, \
this, default
from pygments.token import Text, Comment, Operator, Keyword, Name, \
Number, Punctuation, String
__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer',
'PovrayLexer', 'HLSLShaderLexer']
class GLShaderLexer(RegexLexer):
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator),
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(words((
'attribute', 'const', 'uniform', 'varying',
'buffer', 'shared', 'in', 'out',
'layout',
'flat', 'smooth', 'noperspective',
'centroid', 'sample', 'patch',
'inout',
'lowp', 'mediump', 'highp', 'precision',
'invariant',
'precise',
'coherent', 'volatile', 'restrict', 'readonly', 'writeonly',
'break', 'continue', 'do', 'for', 'while', 'switch',
'case', 'default', 'if', 'else', 'subroutine',
'discard', 'return', 'struct'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'true', 'false'),
prefix=r'\b', suffix=r'\b'),
Keyword.Constant),
(words((
'void', 'atomic_uint',
'float', 'vec2', 'vec3', 'vec4',
'double', 'dvec2', 'dvec3', 'dvec4',
'int', 'ivec2', 'ivec3', 'ivec4',
'uint', 'uvec2', 'uvec3', 'uvec4',
'bool', 'bvec2', 'bvec3', 'bvec4',
'mat2', 'mat3', 'mat4', 'dmat2', 'dmat3', 'dmat4',
'mat2x2', 'mat2x3', 'mat2x4', 'dmat2x2', 'dmat2x3', 'dmat2x4',
'mat3x2', 'mat3x3', 'mat3x4', 'dmat3x2', 'dmat3x3',
'dmat3x4', 'mat4x2', 'mat4x3', 'mat4x4', 'dmat4x2', 'dmat4x3', 'dmat4x4',
'sampler1D', 'sampler2D', 'sampler3D', 'samplerCube',
'sampler1DArray', 'sampler2DArray', 'samplerCubeArray',
'sampler2DRect', 'samplerBuffer',
'sampler2DMS', 'sampler2DMSArray',
'sampler1DShadow', 'sampler2DShadow', 'samplerCubeShadow',
'sampler1DArrayShadow', 'sampler2DArrayShadow',
'samplerCubeArrayShadow', 'sampler2DRectShadow',
'isampler1D', 'isampler2D', 'isampler3D', 'isamplerCube',
'isampler1DArray', 'isampler2DArray', 'isamplerCubeArray',
'isampler2DRect', 'isamplerBuffer',
'isampler2DMS', 'isampler2DMSArray',
'usampler1D', 'usampler2D', 'usampler3D', 'usamplerCube',
'usampler1DArray', 'usampler2DArray', 'usamplerCubeArray',
'usampler2DRect', 'usamplerBuffer',
'usampler2DMS', 'usampler2DMSArray',
'image1D', 'image2D', 'image3D', 'imageCube',
'image1DArray', 'image2DArray', 'imageCubeArray',
'image2DRect', 'imageBuffer',
'image2DMS', 'image2DMSArray',
'iimage1D', 'iimage2D', 'iimage3D', 'iimageCube',
'iimage1DArray', 'iimage2DArray', 'iimageCubeArray',
'iimage2DRect', 'iimageBuffer',
'iimage2DMS', 'iimage2DMSArray',
'uimage1D', 'uimage2D', 'uimage3D', 'uimageCube',
'uimage1DArray', 'uimage2DArray', 'uimageCubeArray',
'uimage2DRect', 'uimageBuffer',
'uimage2DMS', 'uimage2DMSArray'),
prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words((
'common', 'partition', 'active', 'asm', 'class',
'union', 'enum', 'typedef', 'template', 'this',
'resource', 'goto', 'inline', 'noinline', 'public',
'static', 'extern', 'external', 'interface', 'long',
'short', 'half', 'fixed', 'unsigned', 'superp', 'input',
'output', 'hvec2', 'hvec3', 'hvec4', 'fvec2', 'fvec3',
'fvec4', 'sampler3DRect', 'filter', 'sizeof', 'cast',
'namespace', 'using'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(r'gl_\w*', Name.Builtin),
(r'[a-zA-Z_]\w*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class HLSLShaderLexer(RegexLexer):
name = 'HLSL'
aliases = ['hlsl']
filenames = ['*.hlsl', '*.hlsli']
mimetypes = ['text/x-hlsl']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator),
(r'\bdefined\b', Operator),
(r'[;{}(),.\[\]]', Punctuation),
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?f?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?f?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(r'"', String, 'string'),
(words((
'asm','asm_fragment','break','case','cbuffer','centroid','class',
'column_major','compile','compile_fragment','const','continue',
'default','discard','do','else','export','extern','for','fxgroup',
'globallycoherent','groupshared','if','in','inline','inout',
'interface','line','lineadj','linear','namespace','nointerpolation',
'noperspective','NULL','out','packoffset','pass','pixelfragment',
'point','precise','return','register','row_major','sample',
'sampler','shared','stateblock','stateblock_state','static',
'struct','switch','tbuffer','technique','technique10',
'technique11','texture','typedef','triangle','triangleadj',
'uniform','vertexfragment','volatile','while'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words(('true','false'), prefix=r'\b', suffix=r'\b'),
Keyword.Constant),
(words((
'auto','catch','char','const_cast','delete','dynamic_cast','enum',
'explicit','friend','goto','long','mutable','new','operator',
'private','protected','public','reinterpret_cast','short','signed',
'sizeof','static_cast','template','this','throw','try','typename',
'union','unsigned','using','virtual'),
prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
(words((
'dword','matrix','snorm','string','unorm','unsigned','void','vector',
'BlendState','Buffer','ByteAddressBuffer','ComputeShader',
'DepthStencilState','DepthStencilView','DomainShader',
'GeometryShader','HullShader','InputPatch','LineStream',
'OutputPatch','PixelShader','PointStream','RasterizerState',
'RenderTargetView','RasterizerOrderedBuffer',
'RasterizerOrderedByteAddressBuffer',
'RasterizerOrderedStructuredBuffer','RasterizerOrderedTexture1D',
'RasterizerOrderedTexture1DArray','RasterizerOrderedTexture2D',
'RasterizerOrderedTexture2DArray','RasterizerOrderedTexture3D',
'RWBuffer','RWByteAddressBuffer','RWStructuredBuffer',
'RWTexture1D','RWTexture1DArray','RWTexture2D','RWTexture2DArray',
'RWTexture3D','SamplerState','SamplerComparisonState',
'StructuredBuffer','Texture1D','Texture1DArray','Texture2D',
'Texture2DArray','Texture2DMS','Texture2DMSArray','Texture3D',
'TextureCube','TextureCubeArray','TriangleStream','VertexShader'),
prefix=r'\b', suffix=r'\b'),
Keyword.Type),
(words((
'bool','double','float','int','half','min16float','min10float',
'min16int','min12int','min16uint','uint'),
prefix=r'\b', suffix=r'([1-4](x[1-4])?)?\b'),
Keyword.Type), # vector and matrix types
(words((
'abort','abs','acos','all','AllMemoryBarrier',
'AllMemoryBarrierWithGroupSync','any','AppendStructuredBuffer',
'asdouble','asfloat','asin','asint','asuint','asuint','atan',
'atan2','ceil','CheckAccessFullyMapped','clamp','clip',
'CompileShader','ConsumeStructuredBuffer','cos','cosh','countbits',
'cross','D3DCOLORtoUBYTE4','ddx','ddx_coarse','ddx_fine','ddy',
'ddy_coarse','ddy_fine','degrees','determinant',
'DeviceMemoryBarrier','DeviceMemoryBarrierWithGroupSync','distance',
'dot','dst','errorf','EvaluateAttributeAtCentroid',
'EvaluateAttributeAtSample','EvaluateAttributeSnapped','exp',
'exp2','f16tof32','f32tof16','faceforward','firstbithigh',
'firstbitlow','floor','fma','fmod','frac','frexp','fwidth',
'GetRenderTargetSampleCount','GetRenderTargetSamplePosition',
'GlobalOrderedCountIncrement','GroupMemoryBarrier',
'GroupMemoryBarrierWithGroupSync','InterlockedAdd','InterlockedAnd',
'InterlockedCompareExchange','InterlockedCompareStore',
'InterlockedExchange','InterlockedMax','InterlockedMin',
'InterlockedOr','InterlockedXor','isfinite','isinf','isnan',
'ldexp','length','lerp','lit','log','log10','log2','mad','max',
'min','modf','msad4','mul','noise','normalize','pow','printf',
'Process2DQuadTessFactorsAvg','Process2DQuadTessFactorsMax',
'Process2DQuadTessFactorsMin','ProcessIsolineTessFactors',
'ProcessQuadTessFactorsAvg','ProcessQuadTessFactorsMax',
'ProcessQuadTessFactorsMin','ProcessTriTessFactorsAvg',
'ProcessTriTessFactorsMax','ProcessTriTessFactorsMin',
'QuadReadLaneAt','QuadSwapX','QuadSwapY','radians','rcp',
'reflect','refract','reversebits','round','rsqrt','saturate',
'sign','sin','sincos','sinh','smoothstep','sqrt','step','tan',
'tanh','tex1D','tex1D','tex1Dbias','tex1Dgrad','tex1Dlod',
'tex1Dproj','tex2D','tex2D','tex2Dbias','tex2Dgrad','tex2Dlod',
'tex2Dproj','tex3D','tex3D','tex3Dbias','tex3Dgrad','tex3Dlod',
'tex3Dproj','texCUBE','texCUBE','texCUBEbias','texCUBEgrad',
'texCUBElod','texCUBEproj','transpose','trunc','WaveAllBitAnd',
'WaveAllMax','WaveAllMin','WaveAllBitOr','WaveAllBitXor',
'WaveAllEqual','WaveAllProduct','WaveAllSum','WaveAllTrue',
'WaveAnyTrue','WaveBallot','WaveGetLaneCount','WaveGetLaneIndex',
'WaveGetOrderedIndex','WaveIsHelperLane','WaveOnce',
'WavePrefixProduct','WavePrefixSum','WaveReadFirstLane',
'WaveReadLaneAt'),
prefix=r'\b', suffix=r'\b'),
Name.Builtin), # built-in functions
(words((
'SV_ClipDistance','SV_ClipDistance0','SV_ClipDistance1',
'SV_Culldistance','SV_CullDistance0','SV_CullDistance1',
'SV_Coverage','SV_Depth','SV_DepthGreaterEqual',
'SV_DepthLessEqual','SV_DispatchThreadID','SV_DomainLocation',
'SV_GroupID','SV_GroupIndex','SV_GroupThreadID','SV_GSInstanceID',
'SV_InnerCoverage','SV_InsideTessFactor','SV_InstanceID',
'SV_IsFrontFace','SV_OutputControlPointID','SV_Position',
'SV_PrimitiveID','SV_RenderTargetArrayIndex','SV_SampleIndex',
'SV_StencilRef','SV_TessFactor','SV_VertexID',
'SV_ViewportArrayIndex'),
prefix=r'\b', suffix=r'\b'),
Name.Decorator), # system-value semantics
(r'\bSV_Target[0-7]?\b', Name.Decorator),
(words((
'allow_uav_condition','branch','call','domain','earlydepthstencil',
'fastopt','flatten','forcecase','instance','loop','maxtessfactor',
'numthreads','outputcontrolpoints','outputtopology','partitioning',
'patchconstantfunc','unroll'),
prefix=r'\b', suffix=r'\b'),
Name.Decorator), # attributes
(r'[a-zA-Z_]\w*', Name),
(r'\\$', Comment.Preproc), # backslash at end of line -- usually macro continuation
(r'\s+', Text),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
class PostScriptLexer(RegexLexer):
name = 'PostScript'
aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
delimiter = r'()<>\[\]{}/%\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
tokens = {
'root': [
# All comment types
(r'^%!.+\n', Comment.Preproc),
(r'%%.*\n', Comment.Special),
(r'(^%.*\n){2,}', Comment.Multiline),
(r'%.*\n', Comment.Single),
# String literals are awkward; enter separate state.
(r'\(', String, 'stringliteral'),
(r'[{}<>\[\]]', Punctuation),
# Numbers
(r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
# Slight abuse: use Oct to signify any explicit base system
(r'[0-9]+\
r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
(r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ delimiter_end, Number.Float),
(r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
# References
(r'\/%s' % valid_name, Name.Variable),
# Names
(valid_name, Name.Function), # Anything else is executed
# These keywords taken from
# <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
# Is there an authoritative list anywhere that doesn't involve
(r'(false|true)' + delimiter_end, Keyword.Constant),
(r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ delimiter_end, Keyword.Reserved),
(words((
'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
'transform', 'translate', 'truncate', 'typecheck', 'undefined',
'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
Name.Builtin),
(r'\s+', Text),
],
'stringliteral': [
(r'[^()\\]+', String),
(r'\\', String.Escape, 'escape'),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'escape': [
(r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
default('#pop'),
],
}
class AsymptoteLexer(RegexLexer):
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
],
'statements': [
(r'"(\\\\|\\"|[^"])*"', String),
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
(r'[a-zA-Z_]\w*:(?!:)', Name.Label),
(r'[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '
(r'\}', Punctuation, '
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
def _shortened(word):
dpos = word.find('$')
return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1))
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
(r'([a-zA-Z_]\w*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
(r'([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
default('#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String),
(r'\n', String, '#pop'),
],
'sqstring': [
(r"''", String),
(r"'", String, '
(r"[^\\'\n]+", String),
(r'\\\n', String),
(r'\\', String),
(r'\n', String, '#pop'),
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
(r'[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_]\w*', Name),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der",
"box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta",
"data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign",
"fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid",
"hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle",
"la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale",
"mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin",
"rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot",
"mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics",
"nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics",
"mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput",
"pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot",
"poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze",
"st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs",
"ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le",
"v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta",
"yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel",
"yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs",
"x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs",
"zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs",
"x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs",
"noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs",
"xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs",
"noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs",
"cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange",
"y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange",
"vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis",
"zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(words((
'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
'statistics', 'switch', 'undef', 'version', 'warning', 'while',
'write'), prefix=r'
Comment.Preproc),
(words((
'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\s+', Text),
]
}
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.