hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8908d261e8e38c35bb6884e8180c3cd5e28a2b89 | 33 | py | Python | pricegen/__init__.py | mahiro/python-pricegen | 89fa697554c3fcf6dc488775ba894eba53415973 | [
"MIT"
] | null | null | null | pricegen/__init__.py | mahiro/python-pricegen | 89fa697554c3fcf6dc488775ba894eba53415973 | [
"MIT"
] | null | null | null | pricegen/__init__.py | mahiro/python-pricegen | 89fa697554c3fcf6dc488775ba894eba53415973 | [
"MIT"
] | null | null | null | from pricegen.generator import *
| 16.5 | 32 | 0.818182 |
5483cdf867cc7e39e862986f2417cf80907ffef8 | 2,694 | py | Python | ws4py/__init__.py | soulgalore/wptagent | a26b2b1135e34d458f9d332b8a338bc013d51203 | [
"Apache-2.0"
] | 2 | 2020-10-28T09:42:03.000Z | 2022-02-07T14:11:50.000Z | ws4py/__init__.py | soulgalore/wptagent | a26b2b1135e34d458f9d332b8a338bc013d51203 | [
"Apache-2.0"
] | 4 | 2021-07-22T10:28:37.000Z | 2022-03-10T13:56:51.000Z | ws4py/__init__.py | soulgalore/wptagent | a26b2b1135e34d458f9d332b8a338bc013d51203 | [
"Apache-2.0"
] | 1 | 2020-10-10T04:39:35.000Z | 2020-10-10T04:39:35.000Z | # -*- coding: utf-8 -*-
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of ws4py nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import logging.handlers as handlers
__author__ = "Sylvain Hellegouarch"
__version__ = "0.4.2.dev0"
__all__ = ['WS_KEY', 'WS_VERSION', 'configure_logger', 'format_addresses']
WS_KEY = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
WS_VERSION = (8, 13)
def configure_logger(stdout=True, filepath=None, level=logging.INFO):
logger = logging.getLogger('ws4py')
logger.setLevel(level)
logfmt = logging.Formatter("[%(asctime)s] %(levelname)s %(message)s")
if filepath:
h = handlers.RotatingFileHandler(filepath, maxBytes=10485760, backupCount=3)
h.setLevel(level)
h.setFormatter(logfmt)
logger.addHandler(h)
if stdout:
import sys
h = logging.StreamHandler(sys.stdout)
h.setLevel(level)
h.setFormatter(logfmt)
logger.addHandler(h)
return logger
def format_addresses(ws):
me = ws.local_address
peer = ws.peer_address
if isinstance(me, tuple) and isinstance(peer, tuple):
me_ip, me_port = ws.local_address
peer_ip, peer_port = ws.peer_address
return "[Local => %s:%d | Remote => %s:%d]" % (me_ip, me_port, peer_ip, peer_port)
return "[Bound to '%s']" % me
| 39.617647 | 90 | 0.729027 |
e6c92cf1f30f96588c2fb9d4b6a8581756b65816 | 343 | py | Python | thoughts/commands/random.py | hofmanniac/thoughts | de3a63ac79a253139f014e89606dae6d98cfa80d | [
"MIT"
] | null | null | null | thoughts/commands/random.py | hofmanniac/thoughts | de3a63ac79a253139f014e89606dae6d98cfa80d | [
"MIT"
] | null | null | null | thoughts/commands/random.py | hofmanniac/thoughts | de3a63ac79a253139f014e89606dae6d98cfa80d | [
"MIT"
] | null | null | null | import random
from thoughts import context as ctx
def process(command, context):
random_set = command["#random"]
if (type(random_set) is list):
max = len(random_set)
r = random.randint(0, max-1)
item = random_set[r]
ctx.Context.store_item(context, command, item)
return random_set[r]
| 24.5 | 54 | 0.629738 |
3f94259acf9da229dcaf85a9ad01ea8d89524920 | 641 | py | Python | phraseless/contrib/django/views.py | jocke-l/phraseless | 47b3dcc434a4a12e9cb668d7ae527c3a5478dada | [
"BSD-3-Clause"
] | null | null | null | phraseless/contrib/django/views.py | jocke-l/phraseless | 47b3dcc434a4a12e9cb668d7ae527c3a5478dada | [
"BSD-3-Clause"
] | null | null | null | phraseless/contrib/django/views.py | jocke-l/phraseless | 47b3dcc434a4a12e9cb668d7ae527c3a5478dada | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth import authenticate as authenticate_
from django.http import HttpResponse
from phraseless.contrib.django.forms import CertificateAuth
def authenticate(request):
form = CertificateAuth(request.POST)
if form.is_valid():
user = authenticate_(
request,
certificate_chain=form.cleaned_data['certificate_chain'],
signature=form.cleaned_data['challenge_signature']
)
else:
user = None
response = HttpResponse()
if user:
response['X-Authenticated'] = 'yes'
else:
response['X-Authenticated'] = 'no'
return response
| 24.653846 | 69 | 0.667707 |
51ec7ee8d855982fba1536aa09dc8d909503147e | 153 | py | Python | src/selfpub/outp/__init__.py | groboclown/py-book-selfpub | 42060d8bca7c7d281801fe6836f238f41df872e3 | [
"Apache-2.0"
] | null | null | null | src/selfpub/outp/__init__.py | groboclown/py-book-selfpub | 42060d8bca7c7d281801fe6836f238f41df872e3 | [
"Apache-2.0"
] | null | null | null | src/selfpub/outp/__init__.py | groboclown/py-book-selfpub | 42060d8bca7c7d281801fe6836f238f41df872e3 | [
"Apache-2.0"
] | null | null | null |
from .output import OutputFile
from .mobi import MobiOutput
from .epub import EPubOutput
from .html import HtmlOutput
from .zip_gen import ZipGenOutput
| 21.857143 | 33 | 0.830065 |
be1d51e137b7a7db7ce68b037764241cc09f33f0 | 6,068 | py | Python | src/test.py | k8lion/admmdarts | 4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776 | [
"Apache-2.0"
] | null | null | null | src/test.py | k8lion/admmdarts | 4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776 | [
"Apache-2.0"
] | null | null | null | src/test.py | k8lion/admmdarts | 4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776 | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.utils
import torchvision.datasets as dset
from torch.autograd import Variable
import utils
from model import NetworkCIFAR as Network
from genotypes import *
# test of retrained discrete architectures
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='dataset', help='location of the data corpus')
parser.add_argument('--task', type=str, default='CIFAR10', help='task name')
parser.add_argument('--test_filter', type=int, default=0,
help='CIFAR100cf fine classes to filter per coarse class in test')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='EXP/model.pt', help='path of pretrained model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
args = parser.parse_args()
args.save = os.path.join(utils.get_dir(), os.path.split(args.model_path)[0], "test")
utils.create_exp_dir(args.save)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'testlog.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
if args.task == "CIFAR100":
CIFAR_CLASSES = 100
elif args.task == "CIFAR100cf":
CIFAR_CLASSES = 20
else:
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
torch.cuda.empty_cache()
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype_path = os.path.join(utils.get_dir(), os.path.split(args.model_path)[0], 'genotype.txt')
print(genotype_path)
if os.path.isfile(genotype_path):
with open(genotype_path, "r") as f:
geno_raw = f.read()
genotype = eval(geno_raw)
else:
genoname = os.path.join(utils.get_dir(), os.path.split(args.model_path)[0], 'genoname.txt')
if os.path.isfile(genoname):
with open(genoname, "r") as f:
args.arch = f.read()
genotype = eval("genotypes.%s" % args.arch)
else:
genotype = eval("genotypes.ADMM")
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, os.path.join(utils.get_dir(), args.model_path))
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
datapath = os.path.join(utils.get_dir(), args.data)
test_data = dset.CIFAR10(root=datapath, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
if args.task == "CIFAR100cf":
_, test_transform = utils._data_transforms_cifar100(args)
test_data = utils.CIFAR100C2F(root=datapath, train=False, download=True, transform=test_transform)
test_indices = test_data.filter_by_fine(args.test_filter)
test_queue = torch.utils.data.DataLoader(
torch.utils.data.Subset(test_data, test_indices), batch_size=args.batch_size,
shuffle=False, pin_memory=True, num_workers=2)
# TODO: extend each epoch or multiply number of epochs by 20%*args.class_filter
else:
if args.task == "CIFAR100":
_, test_transform = utils._data_transforms_cifar100(args)
test_data = dset.CIFAR100(root=datapath, train=False, download=True, transform=test_transform)
else:
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=datapath, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
def infer(test_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
model.eval()
for step, (input, target) in enumerate(test_queue):
input = Variable(input).cuda()
target = Variable(target).cuda(non_blocking=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1 = utils.accuracy(logits, target, topk=(1,))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1[0].item(), n)
if step % args.report_freq == 0:
logging.info('test %03d %e %f', step, objs.avg, top1.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| 38.649682 | 106 | 0.691496 |
4e17210ce1147c8d9eb9649ccae537f925a57dee | 1,492 | py | Python | asana_app/asana_utils.py | maksim-shaidulin/asana | bd23a0a794ce7aafe31e9df1de9c7606f32504f4 | [
"MIT"
] | null | null | null | asana_app/asana_utils.py | maksim-shaidulin/asana | bd23a0a794ce7aafe31e9df1de9c7606f32504f4 | [
"MIT"
] | null | null | null | asana_app/asana_utils.py | maksim-shaidulin/asana | bd23a0a794ce7aafe31e9df1de9c7606f32504f4 | [
"MIT"
] | null | null | null | import os
import asana
class AsanaApiUtils:
def __init__(self):
self.client = asana.Client.access_token(os.environ.get('ASANA_PERSONAL_TOKEN'))
self.workspace_gid = self.get_default_workspace()['gid']
def get_default_workspace(self):
workspaces = self.client.workspaces.get_workspaces()
return list(workspaces)[0]
def get_projects(self):
projects = self.client.projects.get_projects(
workspace=self.workspace_gid)
return list(projects)
def create_project(self, name):
print(f'Creating project {name}')
response = self.client.projects.create_project(
{'name': name, 'workspace': self.workspace_gid})
return response
def update_project(self, gid, name):
print(f'Updating project {gid} to name {name}')
self.client.projects.update_project(
gid, {'name': name, 'workspace': self.workspace_gid})
def get_users(self):
return self.client.users.get_users(workspace=self.workspace_gid)
def create_task(self, project, name, assignee=None):
return self.client.tasks.create({'projects': project, 'name': name})
def get_tasks(self):
tasks = []
for project in self.get_projects():
tasks_in_project = self.client.tasks.get_tasks_for_project(
project['gid'], opt_fields=['name', 'assignee.gid', 'assignee.name'])
tasks.extend(list(tasks_in_project))
return tasks
| 34.697674 | 87 | 0.655496 |
e4fce052fbb8b0fdff7281439eb9bbe2eaeb9348 | 9,320 | py | Python | VQ3D/camera_pose_estimation/pnp_api.py | emulhall/episodic-memory | 27bafec6e09c108f0efe5ac899eabde9d1ac40cc | [
"MIT"
] | 27 | 2021-10-16T02:39:17.000Z | 2022-03-31T11:16:11.000Z | VQ3D/camera_pose_estimation/pnp_api.py | emulhall/episodic-memory | 27bafec6e09c108f0efe5ac899eabde9d1ac40cc | [
"MIT"
] | 5 | 2022-03-23T04:53:36.000Z | 2022-03-29T23:39:07.000Z | VQ3D/camera_pose_estimation/pnp_api.py | emulhall/episodic-memory | 27bafec6e09c108f0efe5ac899eabde9d1ac40cc | [
"MIT"
] | 13 | 2021-11-25T19:17:29.000Z | 2022-03-25T14:01:47.000Z | import torch
import fnmatch
import argparse
import numpy as np
from torch.utils.data.dataset import Dataset
import os
from torch.utils.data import DataLoader
from tqdm import tqdm
import cv2
from utils import WritePosesToPly
def PnP(x1s, f3ds, x2s, m1_ids, K1, K2, thres=1e-2):
f2d = [] # keep only feature points with depth in the current frame
f3d_new = []
for k in range(len(f3ds)):
x2 = np.array(x2s[k])
f3d = np.array(f3ds[k])
for i in range(f3d.shape[0]):
if np.linalg.norm(f3d[i]) > 1e-2:
f2d.append(x2[i, :])
f3d_new.append(f3d[i, 0])
f2d = np.array(f2d)
f3d = np.array(f3d_new)
if f3d.ndim == 2:
f3d = np.expand_dims(f3d.astype(np.float32), axis=1)
if f2d.ndim == 2:
f2d = np.expand_dims(f2d.astype(np.float32), axis=1)
if (f3d.shape[0]<4) or (f2d.shape[0]<4):
return 0, None, None, None
ret = cv2.solvePnPRansac(f3d,
f2d,
K2,
distCoeffs=None,
flags=cv2.SOLVEPNP_EPNP)
success = ret[0]
rotation_vector = ret[1]
translation_vector = ret[2]
f_2d = np.linalg.inv(K2) @ np.concatenate((f2d[:, 0],
np.ones((f2d.shape[0], 1))), axis=1).T
rotation_mat, _ = cv2.Rodrigues(rotation_vector)
translation_vector = translation_vector.reshape(3)
proj = rotation_mat @ f3d[:, 0].T + translation_vector.reshape(3, -1)
proj = proj[:2] / proj[2:]
reproj_error = np.linalg.norm(f_2d[:2] - proj[:2], axis=0)
reproj_inliers = reproj_error < thres
reproj_inliers = reproj_inliers.reshape(-1)
if success==0 or reproj_inliers.sum() < 10:
return 0, None, None, None
else:
ret = cv2.solvePnP(f3d[reproj_inliers].reshape(reproj_inliers.sum(), 1, 3),
f2d[reproj_inliers].reshape(reproj_inliers.sum(), 1, 2),
K2,
distCoeffs=None,
flags=cv2.SOLVEPNP_ITERATIVE)
success = ret[0]
rotation_vector = ret[1]
translation_vector = ret[2]
rotation_mat, _ = cv2.Rodrigues(rotation_vector)
translation_vector = translation_vector.reshape(3)
Caz_T_Wmp = np.eye(4)
Caz_T_Wmp[:3, :3] = rotation_mat
Caz_T_Wmp[:3, 3] = translation_vector
rotation_mat, _ = cv2.Rodrigues(rotation_vector)
translation_vector = translation_vector.reshape(3)
proj = rotation_mat @ f3d[:, 0].T + translation_vector.reshape(3, -1)
proj = proj[:2] / proj[2:]
reproj_error_refined = np.linalg.norm(f_2d[:2] - proj[:2], axis=0)
reproj_error_refined = reproj_error_refined < thres
reproj_error_refined = reproj_error_refined.reshape(-1)
if reproj_error_refined.sum() < 0.5 * reproj_inliers.sum():
return 0, None, None, None
else:
return success, Caz_T_Wmp, f2d[reproj_error_refined, 0], f3d[reproj_error_refined, 0]
class PosePnP_points_accumulation(Dataset):
def __init__(self, match_database='', img_desc_folder='', image_list=None):
super(PosePnP_points_accumulation, self).__init__()
self.img_desc_folder = img_desc_folder
self.match_database = match_database
self.num_images = len(image_list)
self.P = np.zeros((self.num_images, 3, 4))
self.good_pose_pnp = np.zeros(self.num_images, dtype=bool)
self.original_image_id_list = image_list
def __getitem__(self, index):
azure_img_idx = self.original_image_id_list[index]
matches_file_list = fnmatch.filIter(os.listdir(self.match_database),
'color_%07d_*_matches.npz' % azure_img_idx)
best_inlier = -1
best_solution = None
output = {'img_idx': torch.tensor(index, dtype=torch.int),
'is_good_pose': torch.tensor([False]),
'solution': torch.zeros((3, 4), dtype=torch.double)}
x2_all = []
f3d_all = []
total_inlier = 0
for file_idx in range(len(matches_file_list)):
# 1. Input an query RGB from Kinect Azure
matterport_img_idx = int(matches_file_list[file_idx][20:26])
matches_data = np.load(os.path.join(self.match_database, matches_file_list[file_idx]),'r')
image_descriptor = np.load(os.path.join(self.img_desc_folder, 'image_%06d_descriptors.npz' % matterport_img_idx),'r')
_x1 = []
_f3d = []
_x2 = []
good_matches = 0
for i in range(matches_data['keypoints0'].shape[0]):
if matches_data['matches'][i] >= 0 and matches_data['match_confidence'][i] > 0.1:
_x2.append(matches_data['keypoints0'][i] * np.array(s_ego))
_x1.append(matches_data['keypoints1'][matches_data['matches'][i]] * np.array(s_mp))
_f3d.append(image_descriptor['XYZ'][matches_data['matches'][i]])
good_matches += 1
if good_matches > 20:
success, T, f2d_inlier, f3d_inlier = PnP([_x1], [_f3d], [_x2], [matterport_img_idx],
K_mp, K_ego, thres=3e-2)
if success and f2d_inlier.shape[0] >= 10:
total_inlier += f2d_inlier.shape[0]
x2_all.append(f2d_inlier)
f3d_all.append(f3d_inlier)
if total_inlier >= 30:
f3d_all = np.concatenate(f3d_all, axis=0)
f3d_all = np.expand_dims(f3d_all, axis=1)
x2_all = np.concatenate(x2_all, axis=0)
success, T, f2d_inlier, f3d_inlier = PnP(None, [f3d_all], [x2_all], None, K_mp, K_ego, thres=3e-2)
if success and f2d_inlier.shape[0] > 20:
## VISUALIZATION
# uv1 = np.concatenate((f2d_inlier,
# np.ones((f2d_inlier.shape[0], 1))), axis=1)
# azure_im = cv2.imread(os.path.join(EGOLOC_FOLDER, 'color/color_%07d.jpg' % azure_img_idx))
# VisualizeReprojectionError(T[:3],
# f3d_inlier,
# uv1 @ np.linalg.inv(K_azure).T,
# Im=azure_im, K=K_azure)
# self.P[index] = copy.deepcopy(T[:3])
# self.good_pose_pnp[index] = copy.deepcopy(True)
output = {'img_idx': torch.tensor(index, dtype=torch.int),
'is_good_pose': torch.tensor([True]), 'solution': torch.tensor(T[:3])}
return output
def __len__(self):
return self.num_images
parser = argparse.ArgumentParser(
description='Image pair matching and pose evaluation with SuperGlue',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--ego_dataset_folder', type=str, default='',
help='SuperGlue match threshold')
parser.add_argument(
'--matterport_descriptors_folder', type=str,
help='Matterport descriptor folder')
parser.add_argument(
'--output_dir', type=str,
help='SuperGlue match threshold')
opt = parser.parse_args()
EGO_DATASET_FOLDER = opt.ego_dataset_folder
MATCH_DATABASE = os.path.join(EGO_DATASET_FOLDER, 'superglue_match_results')
IMAGE_DESC_FOLDER = opt.matterport_descriptors_folder
OUTPUT_DIR = opt.output_dir
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
K_mp = np.array([[700., 0., 960. - 0.5],
[0., 700., 540. - 0.5],
[0., 0., 1.]])
s_mp = [(K_mp[0,2]+0.5)*2/640, (K_mp[1,2]+0.5)*2/480]
K_ego = np.loadtxt('%s/intrinsics.txt' % opt.ego_dataset_folder)
s_ego = [(K_ego[0,2]+0.5)*2/640, (K_ego[1,2]+0.5)*2/480]
original_image_id_list = np.arange(0, len(fnmatch.filter(os.listdir(EGO_DATASET_FOLDER + '/color/'), '*.jpg')), step=1)
num_images = original_image_id_list.shape[0]
P = np.zeros((num_images, 3, 4))
good_pose_pnp = np.zeros(num_images, dtype=bool)
batch_size = 8
ego_dataset = PosePnP_points_accumulation(match_database=MATCH_DATABASE,
img_desc_folder=IMAGE_DESC_FOLDER,
image_list=original_image_id_list)
data_loader = DataLoader(dataset=ego_dataset,
num_workers=8, batch_size=batch_size,
shuffle=False,
pin_memory=True)
for idx, output_batch in enumerate(tqdm(data_loader)):
for ii in range(output_batch['is_good_pose'].shape[0]):
if output_batch['is_good_pose'][ii]:
P[int(output_batch['img_idx'][ii].item())] = output_batch['solution'][ii].numpy()
good_pose_pnp[int(output_batch['img_idx'][ii].item())] = True
print('good pose found by pnp / total poses: ', np.sum(good_pose_pnp), '/', good_pose_pnp.shape[0])
np.save('%s/camera_poses_pnp.npy' % OUTPUT_DIR, P)
np.save('%s/good_pose_pnp.npy' % OUTPUT_DIR, good_pose_pnp)
print(good_pose_pnp.shape)
if np.sum(good_pose_pnp) > 0:
WritePosesToPly(P[good_pose_pnp], '%s/cameras_pnp.ply' % OUTPUT_DIR)
else:
print('No good poses found')
| 41.057269 | 129 | 0.592704 |
aedc08088f4b46e83fd19fbb1bda7745f8488eae | 7,368 | py | Python | allennlp/modules/seq2seq_encoders/intra_sentence_attention.py | annaproxy/udify-metalearning | 55206a3aac0aba74a3615a36192d03b6467cfd6f | [
"MIT"
] | 65 | 2020-11-13T05:36:29.000Z | 2022-03-26T22:45:46.000Z | allennlp/modules/seq2seq_encoders/intra_sentence_attention.py | annaproxy/udify-metalearning | 55206a3aac0aba74a3615a36192d03b6467cfd6f | [
"MIT"
] | 11 | 2021-05-26T16:22:17.000Z | 2022-03-02T04:03:18.000Z | allennlp/modules/seq2seq_encoders/intra_sentence_attention.py | annaproxy/udify-metalearning | 55206a3aac0aba74a3615a36192d03b6467cfd6f | [
"MIT"
] | 10 | 2019-12-06T11:32:37.000Z | 2022-01-06T15:39:09.000Z | from overrides import overrides
import torch
from torch.nn import Linear
from allennlp.common.checks import ConfigurationError
from allennlp.modules.matrix_attention.legacy_matrix_attention import LegacyMatrixAttention
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.modules.similarity_functions import DotProductSimilarity, SimilarityFunction
from allennlp.modules.similarity_functions import MultiHeadedSimilarity
from allennlp.nn import util
@Seq2SeqEncoder.register("intra_sentence_attention")
class IntraSentenceAttentionEncoder(Seq2SeqEncoder):
"""
An ``IntraSentenceAttentionEncoder`` is a :class:`Seq2SeqEncoder` that merges the original word
representations with an attention (for each word) over other words in the sentence. As a
:class:`Seq2SeqEncoder`, the input to this module is of shape ``(batch_size, num_tokens,
input_dim)``, and the output is of shape ``(batch_size, num_tokens, output_dim)``.
We compute the attention using a configurable :class:`SimilarityFunction`, which could have
multiple attention heads. The operation for merging the original representations with the
attended representations is also configurable (e.g., you can concatenate them, add them,
multiply them, etc.).
Parameters
----------
input_dim : ``int``
The dimension of the vector for each element in the input sequence;
``input_tensor.size(-1)``.
projection_dim : ``int``, optional
If given, we will do a linear projection of the input sequence to this dimension before
performing the attention-weighted sum.
similarity_function : ``SimilarityFunction``, optional
The similarity function to use when computing attentions. Default is to use a dot product.
num_attention_heads: ``int``, optional
If this is greater than one (default is 1), we will split the input into several "heads" to
compute multi-headed weighted sums. Must be used with a multi-headed similarity function,
and you almost certainly want to do a projection in conjunction with the multiple heads.
combination : ``str``, optional
This string defines how we merge the original word representations with the result of the
intra-sentence attention. This will be passed to
:func:`~allennlp.nn.util.combine_tensors`; see that function for more detail on exactly how
this works, but some simple examples are ``"1,2"`` for concatenation (the default),
``"1+2"`` for adding the two, or ``"2"`` for only keeping the attention representation.
output_dim : ``int``, optional (default = None)
The dimension of an optional output projection.
"""
def __init__(self,
input_dim: int,
projection_dim: int = None,
similarity_function: SimilarityFunction = DotProductSimilarity(),
num_attention_heads: int = 1,
combination: str = '1,2',
output_dim: int = None) -> None:
super(IntraSentenceAttentionEncoder, self).__init__()
self._input_dim = input_dim
if projection_dim:
self._projection = torch.nn.Linear(input_dim, projection_dim)
else:
self._projection = lambda x: x
projection_dim = input_dim
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._num_attention_heads = num_attention_heads
if isinstance(similarity_function, MultiHeadedSimilarity):
if num_attention_heads == 1:
raise ConfigurationError("Similarity function has multiple heads but encoder doesn't")
if num_attention_heads != similarity_function.num_heads:
raise ConfigurationError("Number of heads don't match between similarity function "
"and encoder: %d, %d" % (num_attention_heads,
similarity_function.num_heads))
elif num_attention_heads > 1:
raise ConfigurationError("Encoder has multiple heads but similarity function doesn't")
self._combination = combination
combined_dim = util.get_combined_dim(combination, [input_dim, projection_dim])
if output_dim:
self._output_projection = Linear(combined_dim, output_dim)
self._output_dim = output_dim
else:
self._output_projection = lambda x: x
self._output_dim = combined_dim
@overrides
def get_input_dim(self) -> int:
return self._input_dim
@overrides
def get_output_dim(self) -> int:
return self._output_dim
@overrides
def is_bidirectional(self):
return False
@overrides
def forward(self, tokens: torch.Tensor, mask: torch.Tensor): # pylint: disable=arguments-differ
batch_size, sequence_length, _ = tokens.size()
# Shape: (batch_size, sequence_length, sequence_length)
similarity_matrix = self._matrix_attention(tokens, tokens)
if self._num_attention_heads > 1:
# In this case, the similarity matrix actually has shape
# (batch_size, sequence_length, sequence_length, num_heads). To make the rest of the
# logic below easier, we'll permute this to
# (batch_size, sequence_length, num_heads, sequence_length).
similarity_matrix = similarity_matrix.permute(0, 1, 3, 2)
# Shape: (batch_size, sequence_length, [num_heads,] sequence_length)
intra_sentence_attention = util.masked_softmax(similarity_matrix.contiguous(), mask)
# Shape: (batch_size, sequence_length, projection_dim)
output_token_representation = self._projection(tokens)
if self._num_attention_heads > 1:
# We need to split and permute the output representation to be
# (batch_size, num_heads, sequence_length, projection_dim / num_heads), so that we can
# do a proper weighted sum with `intra_sentence_attention`.
shape = list(output_token_representation.size())
new_shape = shape[:-1] + [self._num_attention_heads, -1]
# Shape: (batch_size, sequence_length, num_heads, projection_dim / num_heads)
output_token_representation = output_token_representation.view(*new_shape)
# Shape: (batch_size, num_heads, sequence_length, projection_dim / num_heads)
output_token_representation = output_token_representation.permute(0, 2, 1, 3)
# Shape: (batch_size, sequence_length, [num_heads,] projection_dim [/ num_heads])
attended_sentence = util.weighted_sum(output_token_representation,
intra_sentence_attention)
if self._num_attention_heads > 1:
# Here we concatenate the weighted representation for each head. We'll accomplish this
# just with a resize.
# Shape: (batch_size, sequence_length, projection_dim)
attended_sentence = attended_sentence.view(batch_size, sequence_length, -1)
# Shape: (batch_size, sequence_length, combination_dim)
combined_tensors = util.combine_tensors(self._combination, [tokens, attended_sentence])
return self._output_projection(combined_tensors)
| 53.007194 | 102 | 0.687296 |
3abdc091320cd9227eb93207da5e21b8bdef0665 | 6,697 | py | Python | sdk/search/azure-search-documents/azure/search/documents/__init__.py | SnehaGunda/azure-sdk-for-python | 88fae6ec703ac3641459b5d07158c9ed710524fe | [
"MIT"
] | 1 | 2020-05-12T23:29:15.000Z | 2020-05-12T23:29:15.000Z | sdk/search/azure-search-documents/azure/search/documents/__init__.py | SnehaGunda/azure-sdk-for-python | 88fae6ec703ac3641459b5d07158c9ed710524fe | [
"MIT"
] | null | null | null | sdk/search/azure-search-documents/azure/search/documents/__init__.py | SnehaGunda/azure-sdk-for-python | 88fae6ec703ac3641459b5d07158c9ed710524fe | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from ._index import (
AutocompleteQuery,
IndexAction,
IndexDocumentsBatch,
IndexingResult,
SearchClient,
SearchItemPaged,
SearchQuery,
SuggestQuery,
odata,
)
from ._service import (
ComplexField,
SearchableField,
SimpleField,
SearchServiceClient,
edm,
)
from ._service._generated.models import (
Analyzer,
AnalyzeRequest,
AnalyzeResult,
AsciiFoldingTokenFilter,
AzureActiveDirectoryApplicationCredentials,
CharFilter,
CjkBigramTokenFilter,
ClassicTokenizer,
CommonGramTokenFilter,
ConditionalSkill,
CorsOptions,
CustomAnalyzer,
DataSource,
DataSourceCredentials,
DataContainer,
DictionaryDecompounderTokenFilter,
DistanceScoringFunction,
DistanceScoringParameters,
EdgeNGramTokenFilter,
EdgeNGramTokenizer,
ElisionTokenFilter,
EncryptionKey,
EntityRecognitionSkill,
Field,
FreshnessScoringFunction,
FreshnessScoringParameters,
GetIndexStatisticsResult,
ImageAnalysisSkill,
Index,
Indexer,
IndexingSchedule,
IndexingParameters,
InputFieldMappingEntry,
KeepTokenFilter,
KeyPhraseExtractionSkill,
KeywordMarkerTokenFilter,
KeywordTokenizer,
LanguageDetectionSkill,
LengthTokenFilter,
LimitTokenFilter,
MagnitudeScoringFunction,
MagnitudeScoringParameters,
MappingCharFilter,
MergeSkill,
MicrosoftLanguageStemmingTokenizer,
MicrosoftLanguageTokenizer,
NGramTokenFilter,
NGramTokenizer,
OcrSkill,
OutputFieldMappingEntry,
PatternCaptureTokenFilter,
PatternReplaceCharFilter,
PatternReplaceTokenFilter,
PhoneticTokenFilter,
RegexFlags,
ScoringFunction,
ScoringProfile,
SentimentSkill,
ShaperSkill,
ShingleTokenFilter,
Skillset,
SnowballTokenFilter,
SplitSkill,
StandardAnalyzer,
StandardTokenizer,
StemmerOverrideTokenFilter,
StemmerTokenFilter,
StopAnalyzer,
StopwordsTokenFilter,
Suggester,
SynonymMap,
SynonymTokenFilter,
TagScoringFunction,
TagScoringParameters,
TextTranslationSkill,
TextWeights,
TokenFilter,
TokenInfo,
Tokenizer,
TruncateTokenFilter,
UaxUrlEmailTokenizer,
UniqueTokenFilter,
WebApiSkill,
WordDelimiterTokenFilter,
)
from ._service._models import PatternAnalyzer, PatternTokenizer
from ._service._datasources_client import SearchDataSourcesClient
from ._service._indexers_client import SearchIndexersClient
from ._service._indexes_client import SearchIndexesClient
from ._service._skillsets_client import SearchSkillsetsClient
from ._service._synonym_maps_client import SearchSynonymMapsClient
from ._version import VERSION
__version__ = VERSION
__all__ = (
"AnalyzeRequest",
"AnalyzeResult",
"Analyzer",
"AsciiFoldingTokenFilter",
"AutocompleteQuery",
"AzureActiveDirectoryApplicationCredentials",
"CharFilter",
"CjkBigramTokenFilter",
"ClassicTokenizer",
"CommonGramTokenFilter",
"ComplexField",
"ConditionalSkill",
"CorsOptions",
"CustomAnalyzer",
"DataSource",
"DataSourceCredentials",
"DataContainer",
"DictionaryDecompounderTokenFilter",
"DistanceScoringFunction",
"DistanceScoringParameters",
"EdgeNGramTokenFilter",
"EdgeNGramTokenizer",
"ElisionTokenFilter",
"EncryptionKey",
"EntityRecognitionSkill",
"Field",
"FreshnessScoringFunction",
"FreshnessScoringParameters",
"GetIndexStatisticsResult",
"ImageAnalysisSkill",
"Index",
"Indexer",
"IndexingSchedule",
"IndexingParameters",
"IndexAction",
"IndexDocumentsBatch",
"IndexingResult",
"InputFieldMappingEntry",
"KeepTokenFilter",
"KeyPhraseExtractionSkill",
"KeywordMarkerTokenFilter",
"KeywordTokenizer",
"LanguageDetectionSkill",
"LengthTokenFilter",
"LimitTokenFilter",
"MagnitudeScoringFunction",
"MagnitudeScoringParameters",
"MappingCharFilter",
"MergeSkill",
"MicrosoftLanguageStemmingTokenizer",
"MicrosoftLanguageTokenizer",
"NGramTokenFilter",
"NGramTokenizer",
"OcrSkill",
"OutputFieldMappingEntry",
"PatternAnalyzer",
"PatternCaptureTokenFilter",
"PatternReplaceCharFilter",
"PatternReplaceTokenFilter",
"PatternTokenizer",
"PhoneticTokenFilter",
"RegexFlags",
"ScoringFunction",
"ScoringProfile",
"SearchClient",
"SearchDataSourcesClient",
"SearchIndexersClient",
"SearchIndexesClient",
"SearchSkillsetsClient",
"SearchSynonymMapsClient",
"SearchItemPaged",
"SearchQuery",
"SearchServiceClient",
"SearchableField",
"SentimentSkill",
"ShaperSkill",
"ShingleTokenFilter",
"SimpleField",
"Skillset",
"SnowballTokenFilter",
"SplitSkill",
"StandardAnalyzer",
"StandardTokenizer",
"StemmerOverrideTokenFilter",
"StemmerTokenFilter",
"StopAnalyzer",
"StopwordsTokenFilter",
"SuggestQuery",
"Suggester",
"SynonymMap",
"SynonymTokenFilter",
"TagScoringFunction",
"TagScoringParameters",
"TextTranslationSkill",
"TextWeights",
"TokenFilter",
"TokenInfo",
"Tokenizer",
"TruncateTokenFilter",
"UaxUrlEmailTokenizer",
"UniqueTokenFilter",
"WebApiSkill",
"WordDelimiterTokenFilter",
"edm",
"odata",
)
| 26.895582 | 78 | 0.712409 |
8f7e95a0701c2f48fff14425fccd818f15c68d41 | 727 | py | Python | selenium_tests/Tests/TestSuiteWithinPage/test_LoginPage.py | avielfedida/RoboAdvisor | ca8ba3c479f5fd3ae1e468f11f09ecf08e2a0cf9 | [
"MIT"
] | null | null | null | selenium_tests/Tests/TestSuiteWithinPage/test_LoginPage.py | avielfedida/RoboAdvisor | ca8ba3c479f5fd3ae1e468f11f09ecf08e2a0cf9 | [
"MIT"
] | 3 | 2020-12-31T08:03:30.000Z | 2021-03-30T06:39:50.000Z | selenium_tests/Tests/TestSuiteWithinPage/test_LoginPage.py | avielfedida/RoboAdvisor | ca8ba3c479f5fd3ae1e468f11f09ecf08e2a0cf9 | [
"MIT"
] | 1 | 2021-06-20T09:13:03.000Z | 2021-06-20T09:13:03.000Z | import pytest
from selenium_tests.Data.SiteMessages import SiteMessages
from selenium_tests.Pages.LoginPage import LoginPage
from selenium_tests.utils import get_random_username_password
@pytest.mark.usefixtures("tear_up_down")
class Test_LoginPage:
def test_crucial_elements_visibility(self):
page = LoginPage(self.driver)
assert page.is_login_btn_exists()
assert page.is_email_field_exists()
assert page.is_password_field_exists()
def test_not_logged_in(self):
page = LoginPage(self.driver)
page.do_login(*get_random_username_password())
assert page.is_danger_presented()
assert page.is_danger_message_equal(SiteMessages.INVALID_LOGIN_CREDENTIALS)
| 33.045455 | 83 | 0.774415 |
368c1af3f06fa7c67aacd0c92fc5a91e2b30c148 | 13,595 | py | Python | stsci/skypac/region.py | mcara/stsci.skypac | b986e41a29c3c2536cea86a62802b39f71e5588c | [
"BSD-3-Clause"
] | 1 | 2019-01-17T08:02:17.000Z | 2019-01-17T08:02:17.000Z | stsci/skypac/region.py | mcara/stsci.skypac | b986e41a29c3c2536cea86a62802b39f71e5588c | [
"BSD-3-Clause"
] | 21 | 2017-02-03T05:17:49.000Z | 2021-06-07T14:47:29.000Z | stsci/skypac/region.py | mcara/stsci.skypac | b986e41a29c3c2536cea86a62802b39f71e5588c | [
"BSD-3-Clause"
] | 5 | 2016-03-29T19:57:14.000Z | 2018-06-13T14:04:07.000Z | """
Polygon filling algorithm.
:Authors: Nadezhda Dencheva, Mihai Cara
:License: :doc:`LICENSE`
"""
# Original author: Nadezhda Dencheva
#
# modifications by Mihai Cara: removed functionality not needed for the
# skymatch algorithm and modified the code to be able to work with polygons
# that have vertices with negative coordinates. Polygon vertices are now
# *internally* (to region.py) rounded to integers so that Polygon will not
# crash when input vertices are floats. Fixed a bug in _construct_ordered_GET
# that was causing varying polygon filling for different ordering of the
# vertices. Finally, modified the algorithm to fill the right-most pixels
# as well as top-most row of the polygon.
#
# NOTE: Algorithm description can be found, e.g., here:
# http://www.cs.rit.edu/~icss571/filling/how_to.html
# http://www.cs.uic.edu/~jbell/CourseNotes/ComputerGraphics/PolygonFilling.html
#
from collections import OrderedDict
import numpy as np
__all__ = ['Region', 'Edge', 'Polygon']
__taskname__ = 'region'
class ValidationError(Exception):
def __init__(self, message):
self._message = message
def __str__(self):
return self._message
class Region(object):
"""
Base class for regions.
Parameters
-------------
rid: int or string
region ID
coordinate_system: astropy.wcs.CoordinateSystem instance or a string
in the context of WCS this would be an instance of wcs.CoordinateSysem
"""
def __init__(self, rid, coordinate_system):
self._coordinate_system = coordinate_system
self._rid = rid
def __contains__(self, x, y):
"""
Determines if a pixel is within a region.
Parameters
----------
x, y: float
x , y values of a pixel
Returns
-------
True or False
Subclasses must define this method.
"""
raise NotImplementedError("__contains__")
def scan(self, mask):
"""
Sets mask values to region id for all pixels within the region.
Subclasses must define this method.
Parameters
----------
mask: ndarray
a byte array with the shape of the observation to be used as a mask
Returns
-------
mask: array where the value of the elements is the region ID or 0 (for
pixels which are not included in any region).
"""
raise NotImplementedError("scan")
class Polygon(Region):
"""
Represents a 2D polygon region with multiple vertices
Parameters
----------
rid: string
polygon id
vertices: list of (x,y) tuples or lists
The list is ordered in such a way that when traversed in a
counterclockwise direction, the enclosed area is the polygon.
The last vertex must coincide with the first vertex, minimum
4 vertices are needed to define a triangle.
coord_system: string
coordinate system
"""
def __init__(self, rid, vertices, coord_system="Cartesian"):
assert len(vertices) >= 4, ("Expected vertices to be "
"a list of minimum 4 tuples (x,y)")
super(Polygon, self).__init__(rid, coord_system)
# self._shiftx & self._shifty are introduced to shift the bottom-left
# corner of the polygon's bounding box to (0,0) as a (hopefully
# temporary) workaround to a limitation of the original code that the
# polygon must be completely contained in the image. It seems that the
# code works fine if we make sure that the bottom-left corner of the
# polygon's bounding box has non-negative coordinates.
self._shiftx = 0
self._shifty = 0
for vertex in vertices:
x, y = vertex
if x < self._shiftx:
self._shiftx = x
if y < self._shifty:
self._shifty = y
v = [(i - self._shiftx, j - self._shifty) for i, j in vertices]
# convert to integer coordinates:
self._vertices = np.asarray(list(map(_round_vertex, v)))
self._shiftx = int(round(self._shiftx))
self._shifty = int(round(self._shifty))
self._bbox = self._get_bounding_box()
self._scan_line_range = list(
range(self._bbox[1], self._bbox[3] + self._bbox[1] + 1)
)
# constructs a Global Edge Table (GET) in bbox coordinates
self._GET = self._construct_ordered_GET()
def _get_bounding_box(self):
x = self._vertices[:, 0].min()
y = self._vertices[:, 1].min()
w = self._vertices[:, 0].max() - x
h = self._vertices[:, 1].max() - y
return (x, y, w, h)
def _construct_ordered_GET(self):
"""
Construct a Global Edge Table (GET)
The GET is an OrderedDict. Keys are scan line numbers,
ordered from ``bbox.ymin`` to ``bbox.ymax``, where ``bbox`` is the
bounding box of the polygon.
Values are lists of edges for which ``edge.ymin==scan_line_number``.
Returns
-------
GET: OrderedDict
{scan_line: [edge1, edge2]}
"""
# edges is a list of Edge objects which define a polygon
# with these vertices
edges = self.get_edges()
GET = OrderedDict.fromkeys(self._scan_line_range)
ymin = np.asarray([e._ymin for e in edges])
for i in self._scan_line_range:
ymin_ind = (ymin == i).nonzero()[0]
yminindlen, = ymin_ind.shape
# if ymin_ind.any(): # original
# mcara - a hack for incomplete filling .any() fails
# if 0 is in ymin_ind
if yminindlen:
GET[i] = [edges[ymin_ind[0]]]
for j in ymin_ind[1:]:
GET[i].append(edges[j])
return GET
def get_edges(self):
"""
Create a list of Edge objects from vertices
"""
edges = []
for i in range(1, len(self._vertices)):
name = 'E' + str(i - 1)
edges.append(
Edge(name=name, start=self._vertices[i - 1],
stop=self._vertices[i])
)
return edges
def scan(self, data):
"""
This is the main function which scans the polygon and creates the mask
Parameters
----------
data: array
the mask array
it has all zeros initially, elements within a region are set to
the region's ID
Notes
-----
Algorithm summary:
- Set the Global Edge Table (GET)
- Set y to be the smallest y coordinate that has an entry in GET
- Initialize the Active Edge Table (AET) to be empty
- For each scan line:
1. Add edges from GET to AET for which ymin==y
2. Remove edges from AET fro which ymax==y
3. Compute the intersection of the current scan line with all
edges in the AET
4. Sort on X of intersection point
5. Set elements between pairs of X in the AET to the Edge's ID
"""
# # TODO:
# # 1. This algorithm does not mark pixels in the top row and left
# # most column. Pad the initial pixel description on top and left
# # with 1 px to prevent this.
# # 2. Currently it uses intersection of the scan line with edges.
# # If this is too slow it should use the 1/m increment (replace 3
# # above) (or the increment should be removed from the GET entry).
# see comments in the __init__ function for the reason of introducing
# polygon shifts (self._shiftx & self._shifty). Here we need to shift
# it back.
(ny, nx) = data.shape
y = np.min(list(self._GET.keys()))
AET = []
scline = self._scan_line_range[-1]
while y <= scline:
if y < scline:
AET = self.update_AET(y, AET)
if self._bbox[2] <= 0:
y += 1
continue
scan_line = Edge('scan_line', start=[self._bbox[0], y],
stop=[self._bbox[0] + self._bbox[2], y])
x = [int(np.ceil(e.compute_AET_entry(scan_line)[1]))
for e in AET if e is not None]
xnew = np.sort(x)
if y + self._shifty < 0 or y + self._shifty >= ny:
y += 1
continue
for i, j in zip(xnew[::2], xnew[1::2]):
xstart = i + self._shiftx if (i + self._shiftx) >= 0 else 0
xend = j + self._shiftx if (j + self._shiftx) < nx else nx - 1
data[y + self._shifty][xstart:xend + 1] = self._rid
y += 1
return data
def update_AET(self, y, AET):
"""
Update the Active Edge Table (AET)
Add edges from GET to AET for which ymin of the edge is
equal to the y of the scan line.
Remove edges from AET for which ymax of the edge is
equal to y of the scan line.
"""
edge_cont = self._GET[y]
if edge_cont is not None:
for edge in edge_cont:
if edge._start[1] != edge._stop[1] and edge._ymin == y:
AET.append(edge)
for edge in AET[::-1]:
if edge is not None:
if edge._ymax == y:
AET.remove(edge)
return AET
def __contains__(self, px):
"""even-odd algorithm or smth else better sould be used"""
# minx = self._vertices[:,0].min()
# maxx = self._vertices[:,0].max()
# miny = self._vertices[:,1].min()
# maxy = self._vertices[:,1].max()
return (
px[0] >= self._bbox[0] and
px[0] <= self._bbox[0] + self._bbox[2] and
px[1] >= self._bbox[1] and
px[1] <= self._bbox[1] + self._bbox[3]
)
class Edge(object):
"""
Edge representation
An edge has a "start" and "stop" ``(x, y)`` vertices and an entry in the
GET table of a polygon. The GET entry is a list of these values:
``[ymax, x_at_ymin, delta_x/delta_y]``
"""
def __init__(self, name=None, start=None, stop=None, next=None):
self._start = None
if start is not None:
self._start = np.asarray(start)
self._name = name
self._stop = stop
if stop is not None:
self._stop = np.asarray(stop)
self._next = next
if self._stop is not None and self._start is not None:
if self._start[1] < self._stop[1]:
self._ymin = self._start[1]
self._yminx = self._start[0]
else:
self._ymin = self._stop[1]
self._yminx = self._stop[0]
self._ymax = max(self._start[1], self._stop[1])
self._xmin = min(self._start[0], self._stop[0])
self._xmax = max(self._start[0], self._stop[1])
else:
self._ymin = None
self._yminx = None
self._ymax = None
self._xmin = None
self._xmax = None
self.GET_entry = self.compute_GET_entry()
@property
def ymin(self):
return self._ymin
@property
def start(self):
return self._start
@property
def stop(self):
return self._stop
@property
def ymax(self):
return self._ymax
def compute_GET_entry(self):
"""
Compute the entry in the Global Edge Table
``[ymax, x@ymin, 1/m]``
"""
if self._start is None:
return None
earr = np.asarray([self._start, self._stop])
if np.diff(earr[:, 1]).item() == 0:
return None
entry = [
self._ymax,
self._yminx,
(np.diff(earr[:, 0]) / np.diff(earr[:, 1])).item(),
None
]
return entry
def compute_AET_entry(self, edge):
"""
Compute the entry for an edge in the current Active Edge Table
[ymax, x_intersect, 1/m]
note: currently 1/m is not used
"""
x = self.intersection(edge)[0]
return [self._ymax, x, self.GET_entry[2]]
def __repr__(self):
fmt = ""
if self._name is not None:
fmt += self._name
ne = self.next_edge
while ne is not None:
fmt += "-->"
fmt += ne._name
ne = ne.next_edge
return fmt
@property
def next_edge(self):
return self._next
@next_edge.setter
def next_edge(self, edge):
if self._name is None:
self._name = edge._name
self._stop = edge._stop
self._start = edge._start
self._next = edge.next_edge
else:
self._next = edge
def intersection(self, edge):
u = self._stop - self._start
v = edge._stop - edge._start
w = self._start - edge._start
D = np.cross(u, v)
if np.allclose(np.cross(u, v), 0, rtol=0,
atol=1e2 * np.finfo(float).eps):
return np.array(self._start)
return np.cross(v, w) / D * u + self._start
def is_parallel(self, edge):
u = self._stop - self._start
v = edge._stop - edge._start
return np.allclose(np.cross(u, v), 0, rtol=0,
atol=1e2 * np.finfo(float).eps)
def _round_vertex(v):
x, y = v
return (int(round(x)), int(round(y)))
| 30.897727 | 82 | 0.556602 |
ffa13826157da82f4e19f409b1d116a7600cd8bc | 1,141 | py | Python | nn_layers/ffn.py | alibalapour/HATNet | 9dc4a2203bf51c1d834e20500153402968bcf54e | [
"MIT"
] | 25 | 2020-07-26T12:26:26.000Z | 2022-01-24T11:06:28.000Z | nn_layers/ffn.py | alibalapour/HATNet | 9dc4a2203bf51c1d834e20500153402968bcf54e | [
"MIT"
] | 1 | 2021-11-29T09:54:17.000Z | 2022-01-08T13:40:40.000Z | nn_layers/ffn.py | sacmehta/HATNet | c4e50746f68140068bae75a6b07525046255d0b5 | [
"MIT"
] | 9 | 2020-11-18T18:38:21.000Z | 2021-12-11T01:31:50.000Z | '''
This file implements the Feed forward network
Adapted from OpenNMT-Py
'''
from torch import nn
class FFN(nn.Module):
def __init__(self, input_dim, scale, output_dim=None, p=0.1, expansion=False):
super(FFN, self).__init__()
output_dim = input_dim if output_dim is None else output_dim
proj_features = input_dim * scale if expansion else input_dim // scale
self.w_1 = nn.Linear(input_dim, proj_features)
self.w_2 = nn.Linear(proj_features, output_dim)
self.layer_norm = nn.LayerNorm(input_dim, eps=1e-6)
self.dropout_1 = nn.Dropout(p)
self.relu = nn.ReLU()
self.dropout_2 = nn.Dropout(p)
self.residual = True if input_dim == output_dim else False
def forward(self, x):
"""Layer definition.
Args:
x: ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor): Output ``(batch_size, input_len, model_dim)``.
"""
inter = self.dropout_1(self.relu(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x if self.residual else output | 34.575758 | 82 | 0.640666 |
5faae59fb8472b66ebcc0b0821a58d3b275c0dc2 | 642 | py | Python | Python Advanced/exams/fucntion-exam-01/flights_Retake Exam - 14 April 2021.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | Python Advanced/exams/fucntion-exam-01/flights_Retake Exam - 14 April 2021.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | Python Advanced/exams/fucntion-exam-01/flights_Retake Exam - 14 April 2021.py | bvoytash/Software-University | f2c6940cde093cea7b1c38bd88305206564c9947 | [
"MIT"
] | null | null | null | def flights(*args):
my_dict = {}
for index, el in enumerate(args):
if not el == "Finish":
if isinstance(el, str):
if not el in my_dict:
my_dict[el] = args[index + 1]
else:
my_dict[el] += args[index + 1]
else:
return my_dict
return my_dict
print(flights('Vienna', 256, 'Vienna', 26, 'Morocco', 98, 'Paris', 115, 'Finish', 'Paris', 15))
print(flights('London', 0, 'New York', 9, 'Aberdeen', 215, 'Sydney', 2, 'New York', 300, 'Nice', 0, 'Finish'))
print(flights('Finish', 'New York', 90, 'Aberdeen', 300, 'Sydney', 0)) | 37.764706 | 110 | 0.520249 |
19a8490275d3b994d1a49a88f89d996d1ef062c6 | 617 | py | Python | py_learning/python_demo/63/pachong2.py | flylei009/python_learning | 9d55a296dc56d6f2295d53f2c4be55724cc8fea3 | [
"MIT"
] | 1 | 2020-09-20T04:02:38.000Z | 2020-09-20T04:02:38.000Z | py_learning/python_demo/63/pachong2.py | flylei009/python_learning | 9d55a296dc56d6f2295d53f2c4be55724cc8fea3 | [
"MIT"
] | null | null | null | py_learning/python_demo/63/pachong2.py | flylei009/python_learning | 9d55a296dc56d6f2295d53f2c4be55724cc8fea3 | [
"MIT"
] | null | null | null | from urllib import parse
from urllib import request
data = bytes(parse.urlencode({'word':'hello'}),encoding='utf8')
# print(data)
response = request.urlopen('http://httpbin.org/post', data=data)
print(response.read().decode('utf-8'))
response2 = request.urlopen('http://httpbin.org/get', timeout=1)
print(response2.read())
# response3 = request.urlopen('http://httpbin.org/get', timeout=0.1)
import urllib
import socket
try:
response3 = urllib.request.urlopen('http://httpbin.org/get', timeout=0.1)
except urllib.error.URLError as e:
if isinstance(e.reason, socket.timeout):
print('TIME OUT') | 24.68 | 77 | 0.714749 |
a41283edede130c627554c607235c007e62c60c7 | 11,504 | py | Python | localProxy.py | Qogir-Chiu/swift-proxy | f3459db71eb1073e8fac22e1f69d3b9315dd6f48 | [
"MIT"
] | null | null | null | localProxy.py | Qogir-Chiu/swift-proxy | f3459db71eb1073e8fac22e1f69d3b9315dd6f48 | [
"MIT"
] | null | null | null | localProxy.py | Qogir-Chiu/swift-proxy | f3459db71eb1073e8fac22e1f69d3b9315dd6f48 | [
"MIT"
] | 1 | 2021-04-17T03:14:57.000Z | 2021-04-17T03:14:57.000Z | '''
Copyright (c)2020, by Qogir, JMJ, MA71
All rights reserved.
File Name: LocalProxy
System Name: SwiftProxy
Date: 2020-12-01
Version: 1.0
Description: 本地代理服务器。该模块主要依赖asyncio和websockets库,并使用协程/单线程异步IO的思想进行编程,是本程序的核心模块.
'''
import argparse
import asyncio
import ipaddress
import json
import logging
import signal
import struct
import sys
import traceback
import websockets
from enum import Enum
ReadMode = Enum('ReadMod', ('EXACT', 'LINE', 'MAX', 'UNTIL')) # 对应四种读模式
class MyError(Exception): # 自定义一个异常类,raise抛出错误实例,便于追踪
pass
gSendByteCount = 0 # 全局变量,记录数据流量用
gSendBandwidth = 0
gRecvByteCount = 0
gRecvBandwidth = 0
async def aioClose(w, *, logHint=None): # 关闭对应服务器,输出log信息
if not w:
await asyncio.sleep(0.001)
return
host, port, *_ = w.get_extra_info('peername')
log.info(f'{logHint} close {host} {port}')
try:
w.close()
await w.wait_closed()
except Exception as exc:
pass
async def aioRead(r, mode, *, logHint=None, exactData=None, exactLen=None, maxLen=-1, untilSep=b'\r\n'): # 读报文,有四种模式
data = None
try:
if ReadMode.EXACT == mode: # 读精确的几字节
exactLen = len(exactData) if exactData else exactLen
data = await r.readexactly(exactLen)
if exactData and data != exactData:
raise MyError(f'recvERR={data} {logHint}')
elif ReadMode.LINE == mode: # 读一行
data = await r.readline()
elif ReadMode.MAX == mode: # 读大量字节,长度为maxLen
data = await r.read(maxLen)
elif ReadMode.UNTIL == mode: # 读到对应分隔符
data = await r.readuntil(untilSep)
else:
log.error(f'INVALID mode={mode}')
exit(1)
except asyncio.IncompleteReadError as exc:
raise MyError(f'recvEXC={exc} {logHint}')
except ConnectionAbortedError as exc:
raise MyError(f'recvEXC={exc} {logHint}')
except ConnectionResetError as exc:
raise MyError(f'recvEXC={exc} {logHint}')
if not data:
raise MyError(f'EOF {logHint}')
return data
async def aioWrite(w, data, *, logHint=''): # 写报文
try:
w.write(data)
await w.drain() # 与write配套,用于立即清空缓冲区
except ConnectionAbortedError as exc:
raise MyError(f'sendEXC={exc} {logHint}')
except ConnectionResetError as exc:
raise MyError(f'recvEXC={exc} {logHint}')
async def socks5ReadDstHost(r, atyp, *, logHint): # 读取不同种类的主机地址
dstHost = None
if atyp == b'\x01':
dstHost = await aioRead(r, ReadMode.EXACT, exactLen=4, logHint=f'{logHint} ipv4') # ipv4
dstHost = str(ipaddress.ip_address(dstHost))
elif atyp == b'\x03':
dataLen = await aioRead(r, ReadMode.EXACT, exactLen=1, logHint=f'{logHint} fqdnLen')
dataLen = dataLen[0]
dstHost = await aioRead(r, ReadMode.EXACT, exactLen=dataLen, logHint=f'{logHint} fqdn') # 域名
dstHost = dstHost.decode('utf8')
elif atyp == b'\x04':
dstHost = await aioRead(r, ReadMode.EXACT, exactLen=16, logHint=f'{logHint} ipv6') # ipv6
dstHost = str(ipaddress.ip_address(dstHost))
else:
raise MyError(f'RECV ERRATYP={atyp} {logHint}')
return dstHost
def socks5EncodeBindHost(bindHost): # 根据IP地址种类的不同,编码不同的报文
atyp = b'\x03'
hostData = None
try:
ipAddr = ipaddress.ip_address(bindHost)
if ipAddr.version == 4:
atyp = b'\x01'
hostData = struct.pack('!L', int(ipAddr)) #ipv4,pack()函数用于将int型转为字符串
else:
atyp = b'\x04'
hostData = struct.pack('!16s', ipaddress.v6_int_to_packed(int(ipAddr))) # ipv6
except Exception:
hostData = struct.pack(f'!B{len(bindHost)}s', len(bindHost), bindHost)
return atyp, hostData
async def doClient(clientR, clientW): # 处理与客户端的通信,两个参数分别是stream读写类的实例
remoteR, remoteW = None, None
try:
clientHost, clientPort, *_ = clientW.get_extra_info('peername') # 读取客户端地址
logHint = f'{clientHost} {clientPort}'
firstByte = await aioRead(clientR, ReadMode.EXACT, exactLen=1, logHint=f'1stByte') # 读取报文首字节(协议种类)
if b'\x05' == firstByte: # 使用socks5协议
proxyType = 'SOCKS5' # 以下是socks5建立连接的步骤,参考RFC1928
logHint = f'{logHint} {proxyType}'
numMethods = await aioRead(clientR, ReadMode.EXACT, exactLen=1, logHint='nMethod') # 继续读1字节报文(支持的连接方式的数量)
await aioRead(clientR, ReadMode.EXACT, exactLen=numMethods[0], logHint='methods')
await aioWrite(clientW, b'\x05\x00', logHint='method.noAuth') # 向客户端返回消息,告诉客户端自己支持非认证方式
await aioRead(clientR, ReadMode.EXACT, exactData=b'\x05\x01\x00', logHint='verCmdRsv')
atyp = await aioRead(clientR, ReadMode.EXACT, exactLen=1, logHint='atyp') # 得到目的主机地址种类
dstHost = await socks5ReadDstHost(clientR, atyp, logHint='dstHost') # 读取不同种类的主机地址
dstPort = await aioRead(clientR, ReadMode.EXACT, exactLen=2, logHint='dstPort')
dstPort = int.from_bytes(dstPort, 'big') # port转为int型
else: # HTTP tunnel
line = await aioRead(clientR, ReadMode.LINE, logHint='1stLine') # 读一行,即建立HTTP connect请求的报文
line = firstByte + line
line = line.decode() # 从bytes型解码为str类型
method, uri, proto, *_ = line.split() # 报文示例: 'CONNECT streamline.t-mobile.com:22 HTTP/1.1'
if 'connect' == method.lower(): # CONNECT方式可支持https代理
proxyType = 'HTTPS'
logHint = f'{logHint} {proxyType}'
dstHost, dstPort, *_ = uri.split(':')
data = await aioRead(clientR, ReadMode.UNTIL, untilSep=b'\r\n\r\n', logHint='msg')
else:
raise MyError(f'RECV INVALID={line.strip()} EXPECT=CONNECT') # 非connect方式的请求
logHint = f'{logHint} {dstHost} {dstPort}'
log.info(f'{logHint} connStart...')
# 认证完成,与remoteProxy建立一个TCP连接
remoteR, remoteW = await asyncio.open_connection(args.remoteHost, args.remotePort)
firstDict = {'dst':dstHost, 'dport':dstPort, 'user':args.username, 'password':args.password}
await aioWrite(remoteW, f'{json.dumps(firstDict)}\r\n'.encode(), logHint=f'1stLine') # 向remoteProxy发送目的地址、用户名密码
firstLine = await aioRead(remoteR, ReadMode.LINE, logHint=f'1stLine')
bindHost, bindPort, *_ = firstLine.decode().rstrip().split() # remoteProxy绑定目的主机成功
log.info(f'{logHint} connSucc bind {bindHost} {bindPort}')
if 'SOCKS5' == proxyType: # 向Client返回reply消息
atyp, hostData = socks5EncodeBindHost(bindHost)
data = struct.pack(f'!ssss{len(hostData)}sH', b'\x05', b'\x00', b'\x00', atyp, hostData, int(bindPort))
await aioWrite(clientW, data, logHint='reply')
else:
await aioWrite(clientW, f'{proto} 200 OK\r\n\r\n'.encode(), logHint='response')
await asyncio.wait({ # 创建task以并发地传输信息,全双工方式
asyncio.create_task(xferData(None, clientR, remoteW, logHint=f'{logHint} fromClient', upDirect=True)),
asyncio.create_task(xferData(None, remoteR, clientW, logHint=f'{logHint} fromRemote', upDirect=False))
})
except MyError as exc:
log.info(f'{logHint} {exc}')
except json.JSONDecodeError as exc:
log.info(f'{logHint} {exc}')
except OSError:
log.info(f'{logHint} connFail')
except ValueError as exc:
log.info(f'{logHint} {exc}')
except Exception as exc:
log.error(f'{traceback.format_exc()}')
exit(1)
await aioClose(clientW, logHint=logHint)
await aioClose(remoteW, logHint=logHint)
async def calcBandwidth(): # 计算带宽数据
global gSendBandwidth
global gRecvBandwidth
global gSendByteCount # 负责在xferData函数中记录数据流量
global gRecvByteCount
while True:
await asyncio.sleep(1) # 每1s更新一次
gSendBandwidth = gSendByteCount
gRecvBandwidth = gRecvByteCount
gSendByteCount = 0
gRecvByteCount = 0
async def localConsole(ws, path): # 向guiconsole传递带宽数据,通过websocket
global gSendBandwidth
global gRecvBandwidth
try:
msg = await ws.recv() # 接收gui建立连接的信息
if msg != 'secret': # 请求连接的消息不正确
await ws.close()
return
while True: # 每隔1s向gui发送一次带宽信息
await asyncio.sleep(1)
msg = await ws.send(f'{gSendBandwidth} {gRecvBandwidth}')
except websockets.exceptions.ConnectionClosedError as exc:
log.error(f'{exc}')
except websockets.exceptions.ConnectionClosedOK as exc:
log.error(f'{exc}')
except Exception:
log.error(f'{traceback.format_exc()}')
exit(1)
async def localTask(): # 本地端异步任务
if args.consolePort: # 创建websocket服务器与gui通信
ws_server = await websockets.serve(localConsole, '127.0.0.1', args.consolePort)
log.info(f'CONSOLE LISTEN {ws_server.sockets[0].getsockname()}')
asyncio.create_task(calcBandwidth()) # 创建task,异步运行计算流量的函数
# 使用asyncio的流API,启动与客户端的TCP通信服务
srv = await asyncio.start_server(doClient, host=args.listenHost, port=args.listenPort)
addrList = list([s.getsockname() for s in srv.sockets])
log.info(f'LISTEN {addrList}')
async with srv:
await srv.serve_forever() # 持续异步运行
async def xferData(bucket, srcR, dstW, *, logHint=None, upDirect): # 单向数据流传输,upDirect判断是否为上行流量
global gSendByteCount
global gRecvByteCount
try:
while True:
tokenCount = 65535
if bucket: # local端无需对流量进行限制,只负责记录,故bucket为NONE
tokenCount = await bucket.acquireToken(65535)
data = await aioRead(srcR, ReadMode.MAX, maxLen=tokenCount, logHint='') # 每次读65535字节
if bucket:
leftToken = tokenCount - len(data)
if leftToken:
bucket.releaseToken(leftToken)
await aioWrite(dstW, data, logHint='')
if upDirect: # 上行
gSendByteCount += len(data)
else: # 下行
gRecvByteCount += len(data)
except MyError as exc:
log.info(f'{logHint} {exc}')
await aioClose(dstW, logHint=logHint)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
_logFmt = logging.Formatter('%(asctime)s %(levelname).1s %(lineno)-3d %(funcName)-20s %(message)s', datefmt='%H:%M:%S') # 调试信息设置
_consoleHandler = logging.StreamHandler()
_consoleHandler.setLevel(logging.DEBUG)
_consoleHandler.setFormatter(_logFmt)
log = logging.getLogger(__file__)
log.addHandler(_consoleHandler)
log.setLevel(logging.DEBUG)
_parser = argparse.ArgumentParser(description='socks5 https dual proxy') # 命令行解析设置
_parser.add_argument('-k', dest='consolePort', type=int, help='console listen port')
_parser.add_argument('-l', dest='listenHost', help='proxy listen host default listen all interfaces')
_parser.add_argument('-p', dest='listenPort', type=int, required=True, help='proxy listen port')
_parser.add_argument('-u', dest='username', required=True, help='username')
_parser.add_argument('-w', dest='password', help='password')
_parser.add_argument('remoteHost', help='remote host')
_parser.add_argument('remotePort', type=int, help='remote port')
args = _parser.parse_args()
if sys.platform == 'win32': # 检查是否为windows操作系统
asyncio.set_event_loop(asyncio.ProactorEventLoop())
asyncio.run(localTask()) # 执行异步任务
| 40.364912 | 132 | 0.638039 |
96c06f66cf4fee3e395d16c70574d64bdd018e43 | 1,745 | py | Python | akshare/stock_feature/stock_a_high_low.py | J-Z-Z/akshare | 0a9ca71b381a272e2f56211e455ff2493dfed17a | [
"MIT"
] | 721 | 2021-09-21T12:10:33.000Z | 2022-03-31T09:47:01.000Z | akshare/stock_feature/stock_a_high_low.py | J-Z-Z/akshare | 0a9ca71b381a272e2f56211e455ff2493dfed17a | [
"MIT"
] | 135 | 2021-09-21T12:07:54.000Z | 2022-03-31T14:15:36.000Z | akshare/stock_feature/stock_a_high_low.py | J-Z-Z/akshare | 0a9ca71b381a272e2f56211e455ff2493dfed17a | [
"MIT"
] | 234 | 2021-09-21T12:16:27.000Z | 2022-03-31T09:47:04.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/19 13:32
Desc: 乐咕乐股-创新高、新低的股票数量
https://www.legulegu.com/stockdata/high-low-statistics
"""
import pandas as pd
import requests
def stock_a_high_low_statistics(symbol: str = "all") -> pd.DataFrame:
"""
乐咕乐股-创新高、新低的股票数量
https://www.legulegu.com/stockdata/high-low-statistics
:param symbol: choice of {"all", "sz50", "hs300", "zz500"}
:type symbol: str
:return: 创新高、新低的股票数量
:rtype: pandas.DataFrame
"""
if symbol == "all":
url = f"https://www.legulegu.com/stockdata/member-ship/get-high-low-statistics/{symbol}"
elif symbol == "sz50":
url = f"https://www.legulegu.com/stockdata/member-ship/get-high-low-statistics/{symbol}"
elif symbol == "hs300":
url = f"https://www.legulegu.com/stockdata/member-ship/get-high-low-statistics/{symbol}"
elif symbol == "zz500":
url = f"https://www.legulegu.com/stockdata/member-ship/get-high-low-statistics/{symbol}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df["date"] = pd.to_datetime(temp_df["date"], unit="ms").dt.date
del temp_df["id"]
del temp_df["indexCode"]
return temp_df
if __name__ == '__main__':
stock_a_high_low_statistics_df = stock_a_high_low_statistics(symbol="all")
print(stock_a_high_low_statistics_df)
stock_a_high_low_statistics_df = stock_a_high_low_statistics(symbol="sz50")
print(stock_a_high_low_statistics_df)
stock_a_high_low_statistics_df = stock_a_high_low_statistics(symbol="hs300")
print(stock_a_high_low_statistics_df)
stock_a_high_low_statistics_df = stock_a_high_low_statistics(symbol="zz500")
print(stock_a_high_low_statistics_df)
| 34.9 | 96 | 0.706017 |
f989049ca1a2d92a5f2fad4af1f8254a3de43597 | 4,236 | py | Python | test_autofit/non_linear/grid/test_paths/test_indicators.py | rhayes777/AutoFit | f5d769755b85a6188ec1736d0d754f27321c2f06 | [
"MIT"
] | null | null | null | test_autofit/non_linear/grid/test_paths/test_indicators.py | rhayes777/AutoFit | f5d769755b85a6188ec1736d0d754f27321c2f06 | [
"MIT"
] | null | null | null | test_autofit/non_linear/grid/test_paths/test_indicators.py | rhayes777/AutoFit | f5d769755b85a6188ec1736d0d754f27321c2f06 | [
"MIT"
] | null | null | null | from pathlib import Path
import pytest
import autofit as af
from autoconf.conf import output_path_for_test
from autofit.database.aggregator.scrape import Scraper
output_directory = Path(
__file__
).parent / "output"
@pytest.fixture(
name="parent_search"
)
@output_path_for_test(
output_directory
)
def make_parent_search(model_gaussian_x1):
search = af.m.MockSearch(
name="parent"
)
search.paths.model = model_gaussian_x1
return search
@pytest.fixture(
name="database_parent_search"
)
def make_database_parent_search(
session
):
return af.DynestyStatic(
session=session
)
def _make_grid_search(
mapper,
parent_search,
session=None
):
search = af.SearchGridSearch(
search=af.m.MockOptimizer(
session=session
),
number_of_steps=2
)
search.fit(
model=mapper,
analysis=af.m.MockAnalysis(),
grid_priors=[
mapper.component.one_tuple.one_tuple_0,
mapper.component.one_tuple.one_tuple_1,
],
parent=parent_search
)
return search
@pytest.fixture(
name="grid_search"
)
def make_grid_search(
mapper,
parent_search
):
search = _make_grid_search(
mapper,
parent_search
)
search.paths.save_all()
return search
@pytest.fixture(
name="database_grid_search"
)
def make_database_grid_search(
mapper,
database_parent_search,
session
):
return _make_grid_search(
mapper,
database_parent_search,
session=session
)
class TestMiscombination:
def test_directory_for_database(
self,
parent_search,
session,
mapper
):
with pytest.raises(TypeError):
_make_grid_search(
mapper,
parent_search,
session
)
class TestDirectory:
def test_parent_search(
self,
grid_search,
parent_search
):
grid_paths = grid_search.paths
parent_paths = parent_search.paths
assert parent_paths is grid_paths.parent
with open(
grid_paths._parent_identifier_path
) as f:
assert f.read() == parent_paths.identifier
def test_is_grid_search(
self,
grid_search
):
assert grid_search.paths.is_grid_search
@output_path_for_test(
output_directory
)
def test_scrape(
grid_search,
parent_search,
model_gaussian_x1,
session
):
grid_search.fit(
model=model_gaussian_x1,
analysis=af.m.MockAnalysis(),
parent=parent_search,
grid_priors=[model_gaussian_x1.centre]
)
parent_search.fit(
model=model_gaussian_x1,
analysis=af.m.MockAnalysis()
)
parent_search.paths.save_all()
Scraper(
directory=output_directory,
session=session
).scrape()
aggregator = af.Aggregator(session)
assert list(aggregator.query(
aggregator.search.id == grid_search.paths.identifier
))[0].parent.id == parent_search.paths.identifier
assert len(aggregator.values("max_log_likelihood")) > 0
assert list(aggregator.grid_searches())[0].is_complete
@output_path_for_test(
output_directory
)
def test_incomplete(
grid_search,
session
):
grid_search.save_metadata()
Scraper(
directory=output_directory,
session=session
).scrape()
session.commit()
aggregator = af.Aggregator(
session
)
aggregator = aggregator(
aggregator.search.is_complete
)
assert len(aggregator) == 0
class TestDatabase:
def test_parent_search(
self,
database_grid_search,
database_parent_search
):
parent_paths = database_parent_search.paths
assert parent_paths is database_grid_search.paths.parent
assert database_grid_search.paths.fit.parent_id == parent_paths.identifier
def test_is_grid_search(
self,
database_grid_search
):
assert database_grid_search.paths.is_grid_search
| 20.764706 | 82 | 0.631256 |
53744ab0a2a0b1760576ef3d90eb2c443dd2256c | 6,592 | py | Python | venv/Lib/hmac.py | rchangdar/Django-WebApp | 7f5e08ba5892b73f84c885ad805605985a5c542a | [
"MIT"
] | null | null | null | venv/Lib/hmac.py | rchangdar/Django-WebApp | 7f5e08ba5892b73f84c885ad805605985a5c542a | [
"MIT"
] | null | null | null | venv/Lib/hmac.py | rchangdar/Django-WebApp | 7f5e08ba5892b73f84c885ad805605985a5c542a | [
"MIT"
] | null | null | null | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
try:
import _hashlib as _hashopenssl
except ImportError:
_hashopenssl = None
_openssl_md_meths = None
else:
_openssl_md_meths = frozenset(_hashopenssl.openssl_md_meth_names)
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object. *OR*
A hash name suitable for hashlib.new().
Defaults to hashlib.md5.
Implicit default to hashlib.md5 is deprecated and will be
removed in Python 3.6.
Note: key and msg must be a bytes or bytearray objects.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if digestmod is None:
_warnings.warn("HMAC() without an explicit digestmod argument "
"is deprecated.", PendingDeprecationWarning, 2)
digestmod = _hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key.ljust(blocksize, b'\0')
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
def digest(key, msg, digest):
"""Fast inline implementation of HMAC
key: key for the keyed hash object.
msg: input message
digest: A hash name suitable for hashlib.new() for best performance. *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Note: key and msg must be a bytes or bytearray objects.
"""
if (_hashopenssl is not None and
isinstance(digest, str) and digest in _openssl_md_meths):
return _hashopenssl.hmac_digest(key, msg, digest)
if callable(digest):
digest_cons = digest
elif isinstance(digest, str):
digest_cons = lambda d=b'': _hashlib.new(digest, d)
else:
digest_cons = lambda d=b'': digest.new(d)
inner = digest_cons()
outer = digest_cons()
blocksize = getattr(inner, 'block_size', 64)
if len(key) > blocksize:
key = digest_cons(key).digest()
key = key + b'\x00' * (blocksize - len(key))
inner.update(key.translate(trans_36))
outer.update(key.translate(trans_5C))
inner.update(msg)
outer.update(inner.digest())
return outer.digest()
| 35.251337 | 97 | 0.60361 |
a369b50d9d9d2ed07d9522087cbc852ecc5b10e3 | 103,205 | py | Python | modules/s3db/msg.py | flavour/RMSYemen | 0fa7ecd3259e1d051193ef26a9d2999e67695b8f | [
"MIT"
] | 27 | 2015-01-13T23:52:49.000Z | 2016-01-11T08:08:45.000Z | modules/s3db/msg.py | flavour/RMSYemen | 0fa7ecd3259e1d051193ef26a9d2999e67695b8f | [
"MIT"
] | 140 | 2015-01-01T07:38:10.000Z | 2016-03-01T10:51:54.000Z | modules/s3db/msg.py | flavour/RMSYemen | 0fa7ecd3259e1d051193ef26a9d2999e67695b8f | [
"MIT"
] | 109 | 2015-01-02T17:36:17.000Z | 2016-02-07T14:49:28.000Z | # -*- coding: utf-8 -*-
""" Sahana Eden Messaging Model
@copyright: 2009-2019 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3ChannelModel",
"S3MessageModel",
"S3MessageAttachmentModel",
"S3MessageContactModel",
"S3MessageTagModel",
"S3EmailModel",
"S3FacebookModel",
"S3MCommonsModel",
"S3GCMModel",
"S3ParsingModel",
"S3RSSModel",
"S3SMSModel",
"S3SMSOutboundModel",
"S3TropoModel",
"S3TwilioModel",
"S3TwitterModel",
"S3TwitterSearchModel",
"S3XFormsModel",
"S3BaseStationModel",
)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3ChannelModel(S3Model):
"""
Messaging Channels
- all Inbound & Outbound channels for messages are instances of this
super-entity
"""
names = ("msg_channel",
"msg_channel_limit",
"msg_channel_status",
"msg_channel_id",
"msg_channel_enable",
"msg_channel_disable",
"msg_channel_enable_interactive",
"msg_channel_disable_interactive",
"msg_channel_onaccept",
)
def model(self):
T = current.T
db = current.db
define_table = self.define_table
#----------------------------------------------------------------------
# Super entity: msg_channel
#
channel_types = Storage(msg_email_channel = T("Email (Inbound)"),
msg_facebook_channel = T("Facebook"),
msg_gcm_channel = T("Google Cloud Messaging"),
msg_mcommons_channel = T("Mobile Commons (Inbound)"),
msg_rss_channel = T("RSS Feed"),
msg_sms_modem_channel = T("SMS Modem"),
msg_sms_webapi_channel = T("SMS WebAPI (Outbound)"),
msg_sms_smtp_channel = T("SMS via SMTP (Outbound)"),
msg_tropo_channel = T("Tropo"),
msg_twilio_channel = T("Twilio (Inbound)"),
msg_twitter_channel = T("Twitter"),
)
tablename = "msg_channel"
self.super_entity(tablename, "channel_id",
channel_types,
Field("name",
#label = T("Name"),
),
Field("description",
#label = T("Description"),
),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?")
#represent = s3_yes_no_represent,
),
# @ToDo: Indicate whether channel can be used for Inbound or Outbound
#Field("inbound", "boolean",
# label = T("Inbound?")),
#Field("outbound", "boolean",
# label = T("Outbound?")),
on_define = lambda table: \
[table.instance_type.set_attributes(readable = True),
],
)
# Reusable Field
channel_id = S3ReusableField("channel_id", "reference %s" % tablename,
label = T("Channel"),
ondelete = "SET NULL",
represent = S3Represent(lookup=tablename),
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_channel.id")),
)
self.add_components(tablename,
msg_channel_status = "channel_id",
)
# ---------------------------------------------------------------------
# Channel Limit
# Used to limit the number of emails sent from the system
# - works by simply recording an entry for the timestamp to be checked against
#
# - currently just used by msg.send_email()
#
tablename = "msg_channel_limit"
define_table(tablename,
# @ToDo: Make it per-channel
#channel_id(),
*S3MetaFields.timestamps())
# ---------------------------------------------------------------------
# Channel Status
# Used to record errors encountered in the Channel
#
tablename = "msg_channel_status"
define_table(tablename,
channel_id(),
Field("status",
#label = T("Status"),
#represent = s3_yes_no_represent,
represent = lambda v: v or current.messages["NONE"],
),
*s3_meta_fields())
# ---------------------------------------------------------------------
return {"msg_channel_id": channel_id,
"msg_channel_enable": self.channel_enable,
"msg_channel_disable": self.channel_disable,
"msg_channel_enable_interactive": self.channel_enable_interactive,
"msg_channel_disable_interactive": self.channel_disable_interactive,
"msg_channel_onaccept": self.channel_onaccept,
"msg_channel_poll": self.channel_poll,
}
# -------------------------------------------------------------------------
@staticmethod
def channel_enable(tablename, channel_id):
"""
Enable a Channel
- Schedule a Poll for new messages
- Enable all associated Parsers
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.table(tablename)
record = db(table.channel_id == channel_id).select(table.id, # needed for update_record
table.enabled,
limitby=(0, 1),
).first()
if not record.enabled:
# Flag it as enabled
# Update Instance
record.update_record(enabled = True)
# Update Super
s3db.update_super(table, record)
# Enable all Parser tasks on this channel
ptable = s3db.msg_parser
query = (ptable.channel_id == channel_id) & \
(ptable.deleted == False)
parsers = db(query).select(ptable.id)
for parser in parsers:
s3db.msg_parser_enable(parser.id)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '["%s", %s]' % (tablename, channel_id)
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
return "Channel already enabled"
else:
current.s3task.schedule_task("msg_poll",
args = [tablename, channel_id],
period = 300, # seconds
timeout = 300, # seconds
repeats = 0 # unlimited
)
return "Channel enabled"
# -------------------------------------------------------------------------
@staticmethod
def channel_enable_interactive(r, **attr):
"""
Enable a Channel
- Schedule a Poll for new messages
S3Method for interactive requests
"""
tablename = r.tablename
result = current.s3db.msg_channel_enable(tablename, r.record.channel_id)
current.session.confirmation = result
fn = tablename.split("_", 1)[1]
redirect(URL(f=fn))
# -------------------------------------------------------------------------
@staticmethod
def channel_disable(tablename, channel_id):
"""
Disable a Channel
- Remove schedule for Polling for new messages
- Disable all associated Parsers
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.table(tablename)
record = db(table.channel_id == channel_id).select(table.id, # needed for update_record
table.enabled,
limitby=(0, 1),
).first()
if record.enabled:
# Flag it as disabled
# Update Instance
record.update_record(enabled = False)
# Update Super
s3db.update_super(table, record)
# Disable all Parser tasks on this channel
ptable = s3db.msg_parser
parsers = db(ptable.channel_id == channel_id).select(ptable.id)
for parser in parsers:
s3db.msg_parser_disable(parser.id)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '["%s", %s]' % (tablename, channel_id)
query = ((ttable.function_name == "msg_poll") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Channel disabled"
else:
return "Channel already disabled"
# --------------------------------------------------------------------------
@staticmethod
def channel_disable_interactive(r, **attr):
"""
Disable a Channel
- Remove schedule for Polling for new messages
S3Method for interactive requests
"""
tablename = r.tablename
result = current.s3db.msg_channel_disable(tablename, r.record.channel_id)
current.session.confirmation = result
fn = tablename.split("_", 1)[1]
redirect(URL(f=fn))
# -------------------------------------------------------------------------
@staticmethod
def channel_onaccept(form):
"""
Process the Enabled Flag
"""
form_vars = form.vars
if form.record:
# Update form
# Process if changed
if form.record.enabled and not form_vars.enabled:
current.s3db.msg_channel_disable(form.table._tablename,
form_vars.channel_id)
elif form_vars.enabled and not form.record.enabled:
current.s3db.msg_channel_enable(form.table._tablename,
form_vars.channel_id)
else:
# Create form
# Process only if enabled
if form_vars.enabled:
current.s3db.msg_channel_enable(form.table._tablename,
form_vars.channel_id)
# -------------------------------------------------------------------------
@staticmethod
def channel_poll(r, **attr):
"""
Poll a Channel for new messages
S3Method for interactive requests
"""
tablename = r.tablename
current.s3task.run_async("msg_poll", args=[tablename, r.record.channel_id])
current.session.confirmation = \
current.T("The poll request has been submitted, so new messages should appear shortly - refresh to see them")
if tablename == "msg_email_channel":
fn = "email_inbox"
elif tablename == "msg_mcommons_channel":
fn = "sms_inbox"
elif tablename == "msg_rss_channel":
fn = "rss"
elif tablename == "msg_twilio_channel":
fn = "sms_inbox"
elif tablename == "msg_twitter_channel":
fn = "twitter_inbox"
else:
return "Unsupported channel: %s" % tablename
redirect(URL(f=fn))
# =============================================================================
class S3MessageModel(S3Model):
"""
Messages
"""
names = ("msg_message",
"msg_message_id",
"msg_message_represent",
"msg_outbox",
)
def model(self):
T = current.T
db = current.db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
configure = self.configure
define_table = self.define_table
# Message priority
msg_priority_opts = {3 : T("High"),
2 : T("Medium"),
1 : T("Low"),
}
# ---------------------------------------------------------------------
# Message Super Entity - all Inbound & Outbound Messages
#
message_types = Storage(msg_contact = T("Contact"),
msg_email = T("Email"),
msg_facebook = T("Facebook"),
msg_rss = T("RSS"),
msg_sms = T("SMS"),
msg_twitter = T("Twitter"),
msg_twitter_result = T("Twitter Search Results"),
)
tablename = "msg_message"
self.super_entity(tablename, "message_id",
message_types,
# Knowing which Channel Incoming Messages
# came in on allows correlation to Outbound
# messages (campaign_message, deployment_alert, etc)
self.msg_channel_id(),
s3_datetime(default="now"),
Field("body", "text",
label = T("Message"),
),
Field("from_address",
label = T("From"),
),
Field("to_address",
label = T("To"),
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or \
[T("Out")])[0],
),
on_define = lambda table: \
[table.instance_type.set_attributes(readable = True,
writable = True,
),
],
)
configure(tablename,
list_fields = ["instance_type",
"from_address",
"to_address",
"body",
"inbound",
],
)
# Reusable Field
message_represent = S3Represent(lookup=tablename, fields=["body"])
message_id = S3ReusableField("message_id", "reference %s" % tablename,
ondelete = "RESTRICT",
represent = message_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_message.id")),
)
self.add_components(tablename,
msg_attachment = "message_id",
msg_tag = "message_id",
deploy_response = "message_id",
)
# ---------------------------------------------------------------------
# Outbound Messages
#
# Show only the supported messaging methods
MSG_CONTACT_OPTS = current.msg.MSG_CONTACT_OPTS
# Maximum number of retries to send a message
MAX_SEND_RETRIES = current.deployment_settings.get_msg_max_send_retries()
# Valid message outbox statuses
MSG_STATUS_OPTS = {1 : T("Unsent"),
2 : T("Sent"),
3 : T("Draft"),
4 : T("Invalid"),
5 : T("Failed"),
}
opt_msg_status = S3ReusableField("status", "integer",
notnull=True,
requires = IS_IN_SET(MSG_STATUS_OPTS,
zero=None),
default = 1,
label = T("Status"),
represent = lambda opt: \
MSG_STATUS_OPTS.get(opt,
UNKNOWN_OPT))
# Outbox - needs to be separate to Message since a single message
# sent needs different outbox entries for each recipient
tablename = "msg_outbox"
define_table(tablename,
# FK not instance
message_id(),
# Person/Group to send the message out to:
self.super_link("pe_id", "pr_pentity"),
# If set used instead of picking up from pe_id:
Field("address"),
Field("contact_method", length=32,
default = "EMAIL",
label = T("Contact Method"),
represent = lambda opt: \
MSG_CONTACT_OPTS.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(MSG_CONTACT_OPTS,
zero=None),
),
opt_msg_status(),
# Used to loop through a PE to get it's members
Field("system_generated", "boolean",
default = False,
),
# Give up if we can't send after MAX_RETRIES
Field("retries", "integer",
default = MAX_SEND_RETRIES,
readable = False,
writable = False,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["id",
"message_id",
"pe_id",
"status",
],
orderby = "msg_outbox.created_on desc",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return {"msg_message_id": message_id,
"msg_message_represent": message_represent,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return {"msg_message_id": lambda **attr: dummy("message_id"),
}
# =============================================================================
class S3MessageAttachmentModel(S3Model):
"""
Message Attachments
- link table between msg_message & doc_document
"""
names = ("msg_attachment",)
def model(self):
# ---------------------------------------------------------------------
#
tablename = "msg_attachment"
self.define_table(tablename,
# FK not instance
self.msg_message_id(ondelete="CASCADE"),
self.doc_document_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3MessageContactModel(S3Model):
"""
Contact Form
"""
names = ("msg_contact",
)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Contact Messages: InBox
#
# Should probably use project_task if this kind of functionality is desired:
#priority_opts = {1: T("Low"),
# 2: T("Medium"),
# 3: T("High"),
# }
#status_opts = {1: T("New"),
# 2: T("In-Progress"),
# 3: T("Closed"),
# }
tablename = "msg_contact"
self.define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(), # Unused
s3_datetime(default = "now"),
Field("subject", length=78, # RFC 2822
label = T("Subject"),
requires = IS_LENGTH(78),
),
Field("name",
label = T("Name"),
),
Field("body", "text",
label = T("Message"),
),
Field("phone",
label = T("Phone"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("from_address",
label = T("Email"),
requires = IS_EMPTY_OR(IS_EMAIL()),
),
#Field("priority", "integer",
# default = 1,
# label = T("Priority"),
# represent = S3Represent(options = priority_opts),
# requires = IS_IN_SET(priority_opts,
# zero = None),
# ),
#Field("status", "integer",
# default = 3,
# label = T("Status"),
# represent = S3Represent(options = status_opts),
# requires = IS_IN_SET(status_opts,
# zero = None),
# ),
Field("inbound", "boolean",
default = True,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or [T("Out")])[0],
readable = False,
writable = False,
),
*s3_meta_fields())
self.configure(tablename,
orderby = "msg_contact.date desc",
super_entity = "msg_message",
)
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
label_create=T("Contact Form"),
title_display=T("Contact Details"),
title_list=T("Contacts"),
title_update=T("Edit Contact"),
label_list_button=T("List Contacts"),
label_delete_button=T("Delete Contact"),
msg_record_created=T("Contact added"),
msg_record_modified=T("Contact updated"),
msg_record_deleted=T("Contact deleted"),
msg_list_empty=T("No Contacts currently registered"))
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3MessageTagModel(S3Model):
"""
Message Tags
"""
names = ("msg_tag",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Message Tags
# - Key-Value extensions
# - can be used to provide conversions to external systems, such as:
# * HXL, FTS
# - can be a Triple Store for Semantic Web support
# - can be used to add custom fields
#
tablename = "msg_tag"
self.define_table(tablename,
# FK not instance
self.msg_message_id(ondelete="CASCADE"),
# key is a reserved word in MySQL
Field("tag",
label = T("Key"),
),
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("message_id",
"tag",
),
),
)
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class S3EmailModel(S3ChannelModel):
"""
Email
InBound Channels
Outbound Email is currently handled via deployment_settings
InBox/OutBox
"""
names = ("msg_email_channel",
"msg_email",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Email Inbound Channels
#
tablename = "msg_email_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
# Allows using different Inboxes for different Orgs/Branches
self.org_organisation_id(),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("server"),
Field("protocol",
requires = IS_IN_SET(["imap", "pop3"],
zero=None),
),
Field("use_ssl", "boolean"),
Field("port", "integer"),
Field("username"),
Field("password", "password", length=64,
readable = False,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
widget = S3PasswordWidget(),
),
# Set true to delete messages from the remote
# inbox after fetching them.
Field("delete_from_server", "boolean"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "email_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "email_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "email_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Email Messages: InBox & Outbox
#
sender = current.deployment_settings.get_mail_sender()
tablename = "msg_email"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now"),
Field("subject", length=78, # RFC 2822
label = T("Subject"),
requires = IS_LENGTH(78),
),
Field("body", "text",
label = T("Message"),
),
Field("from_address", #notnull=True,
default = sender,
label = T("Sender"),
requires = IS_EMAIL(),
),
Field("to_address",
label = T("To"),
requires = IS_EMAIL(),
),
Field("raw", "text",
label = T("Message Source"),
readable = False,
writable = False,
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or [T("Out")])[0],
),
*s3_meta_fields())
configure(tablename,
orderby = "msg_email.date desc",
super_entity = "msg_message",
)
# Components
self.add_components(tablename,
# Used to link to custom tab deploy_response_select_mission:
deploy_mission = {"name": "select",
"link": "deploy_response",
"joinby": "message_id",
"key": "mission_id",
"autodelete": False,
},
)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3FacebookModel(S3ChannelModel):
"""
Facebook
Channels
InBox/OutBox
https://developers.facebook.com/docs/graph-api
"""
names = ("msg_facebook_channel",
"msg_facebook",
"msg_facebook_login",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# Facebook Channels
#
tablename = "msg_facebook_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("login", "boolean",
default = False,
label = T("Use for Login?"),
represent = s3_yes_no_represent,
),
Field("app_id", "bigint",
requires = IS_INT_IN_RANGE(0, +1e16)
),
Field("app_secret", "password", length=64,
readable = False,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
widget = S3PasswordWidget(),
),
# Optional
Field("page_id", "bigint",
requires = IS_INT_IN_RANGE(0, +1e16)
),
Field("page_access_token"),
*s3_meta_fields())
configure(tablename,
onaccept = self.msg_facebook_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "facebook_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "facebook_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
#set_method("msg", "facebook_channel",
# method = "poll",
# action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Facebook Messages: InBox & Outbox
#
tablename = "msg_facebook"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now"),
Field("body", "text",
label = T("Message"),
),
# @ToDo: Are from_address / to_address relevant in Facebook?
Field("from_address", #notnull=True,
#default = sender,
label = T("Sender"),
),
Field("to_address",
label = T("To"),
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or [T("Out")])[0],
),
*s3_meta_fields())
configure(tablename,
orderby = "msg_facebook.date desc",
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return {"msg_facebook_login": self.msg_facebook_login,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for model-global names if module is disabled """
return {"msg_facebook_login": lambda: False,
}
# -------------------------------------------------------------------------
@staticmethod
def msg_facebook_channel_onaccept(form):
if form.vars.login:
# Ensure only a single account used for Login
current.db(current.s3db.msg_facebook_channel.id != form.vars.id).update(login = False)
# Normal onaccept processing
S3ChannelModel.channel_onaccept(form)
# -------------------------------------------------------------------------
@staticmethod
def msg_facebook_login():
table = current.s3db.msg_facebook_channel
query = (table.login == True) & \
(table.deleted == False)
c = current.db(query).select(table.app_id,
table.app_secret,
limitby=(0, 1)
).first()
return c
# =============================================================================
class S3MCommonsModel(S3ChannelModel):
"""
Mobile Commons Inbound SMS Settings
- Outbound can use Web API
"""
names = ("msg_mcommons_channel",)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
tablename = "msg_mcommons_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("campaign_id", length=128, unique=True,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
Field("url",
default = \
"https://secure.mcommons.com/api/messages",
requires = IS_URL()
),
Field("username",
requires = IS_NOT_EMPTY(),
),
Field("password", "password",
readable = False,
requires = IS_NOT_EMPTY(),
widget = S3PasswordWidget(),
),
Field("query"),
Field("timestmp", "datetime",
writable = False,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "mcommons_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "mcommons_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "mcommons_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3GCMModel(S3ChannelModel):
"""
Google Cloud Messaging
Channels
https://developers.google.com/cloud-messaging/
"""
names = ("msg_gcm_channel",
)
def model(self):
T = current.T
set_method = self.set_method
# ---------------------------------------------------------------------
# GCM Channels
#
tablename = "msg_gcm_channel"
self.define_table(tablename,
# Instance
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
#Field("login", "boolean",
# default = False,
# label = T("Use for Login?"),
# represent = s3_yes_no_represent,
# ),
Field("api_key",
notnull = True,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_gcm_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "gcm_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "gcm_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
#set_method("msg", "gcm_channel",
# method = "poll",
# action = self.msg_channel_poll)
# ---------------------------------------------------------------------
return {}
# -------------------------------------------------------------------------
@staticmethod
def msg_gcm_channel_onaccept(form):
if form.vars.enabled:
# Ensure only a single account enabled
current.db(current.s3db.msg_gcm_channel.id != form.vars.id).update(enabled = False)
# Normal onaccept processing
S3ChannelModel.channel_onaccept(form)
# =============================================================================
class S3ParsingModel(S3Model):
"""
Message Parsing Model
"""
names = ("msg_parser",
"msg_parsing_status",
"msg_session",
"msg_keyword",
"msg_sender",
"msg_parser_enabled",
"msg_parser_enable",
"msg_parser_disable",
"msg_parser_enable_interactive",
"msg_parser_disable_interactive",
)
def model(self):
T = current.T
define_table = self.define_table
set_method = self.set_method
channel_id = self.msg_channel_id
message_id = self.msg_message_id
# ---------------------------------------------------------------------
# Link between Message Channels and Parsers in parser.py
#
tablename = "msg_parser"
define_table(tablename,
# Source
channel_id(ondelete = "CASCADE"),
Field("function_name",
label = T("Parser"),
),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_parser_onaccept,
)
set_method("msg", "parser",
method = "enable",
action = self.parser_enable_interactive)
set_method("msg", "parser",
method = "disable",
action = self.parser_disable_interactive)
set_method("msg", "parser",
method = "parse",
action = self.parser_parse)
# ---------------------------------------------------------------------
# Message parsing status
# - component to core msg_message table
# - messages which need parsing are placed here & updated when parsed
#
tablename = "msg_parsing_status"
define_table(tablename,
# Component, not Instance
message_id(ondelete = "CASCADE"),
# Source
channel_id(ondelete = "CASCADE"),
Field("is_parsed", "boolean",
default = False,
label = T("Parsing Status"),
represent = lambda parsed: \
(parsed and [T("Parsed")] or \
[T("Not Parsed")])[0],
),
message_id("reply_id",
label = T("Reply"),
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Login sessions for Message Parsing
# - links a from_address with a login until expiry
#
tablename = "msg_session"
define_table(tablename,
Field("from_address"),
Field("email"),
Field("created_datetime", "datetime",
default = current.request.utcnow,
),
Field("expiration_time", "integer"),
Field("is_expired", "boolean",
default = False,
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Keywords for Message Parsing
#
tablename = "msg_keyword"
define_table(tablename,
Field("keyword",
label = T("Keyword"),
),
# @ToDo: Move this to a link table
self.event_incident_type_id(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Senders for Message Parsing
# - whitelist / blacklist / prioritise
#
tablename = "msg_sender"
define_table(tablename,
Field("sender",
label = T("Sender"),
),
# @ToDo: Make pe_id work for this
#self.super_link("pe_id", "pr_pentity"),
Field("priority", "integer",
label = T("Priority"),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
return {"msg_parser_enabled": self.parser_enabled,
"msg_parser_enable": self.parser_enable,
"msg_parser_disable": self.parser_disable,
}
# -----------------------------------------------------------------------------
@staticmethod
def parser_parse(r, **attr):
"""
Parse unparsed messages
S3Method for interactive requests
"""
record = r.record
current.s3task.run_async("msg_parse", args=[record.channel_id, record.function_name])
current.session.confirmation = \
current.T("The parse request has been submitted")
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def parser_enabled(channel_id):
"""
Helper function to see if there is a Parser connected to a Channel
- used to determine whether to populate the msg_parsing_status table
"""
table = current.s3db.msg_parser
record = current.db(table.channel_id == channel_id).select(table.enabled,
limitby=(0, 1),
).first()
if record and record.enabled:
return True
else:
return False
# -------------------------------------------------------------------------
@staticmethod
def parser_enable(id):
"""
Enable a Parser
- Connect a Parser to a Channel
CLI API for shell scripts & to be called by S3Method
@ToDo: Ensure only 1 Parser is connected to any Channel at a time
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby=(0, 1),
).first()
if not record.enabled:
# Flag it as enabled
record.update_record(enabled = True)
channel_id = record.channel_id
function_name = record.function_name
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (channel_id, function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
return "Parser already enabled"
else:
current.s3task.schedule_task("msg_parse",
args = [channel_id, function_name],
period = 300, # seconds
timeout = 300, # seconds
repeats = 0 # unlimited
)
return "Parser enabled"
# -------------------------------------------------------------------------
@staticmethod
def parser_enable_interactive(r, **attr):
"""
Enable a Parser
- Connect a Parser to a Channel
S3Method for interactive requests
"""
result = current.s3db.msg_parser_enable(r.id)
current.session.confirmation = result
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def parser_disable(id):
"""
Disable a Parser
- Disconnect a Parser from a Channel
CLI API for shell scripts & to be called by S3Method
"""
db = current.db
s3db = current.s3db
table = s3db.msg_parser
record = db(table.id == id).select(table.id, # needed for update_record
table.enabled,
table.channel_id,
table.function_name,
limitby=(0, 1),
).first()
if record.enabled:
# Flag it as disabled
record.update_record(enabled = False)
# Do we have an existing Task?
ttable = db.scheduler_task
args = '[%s, "%s"]' % (record.channel_id, record.function_name)
query = ((ttable.function_name == "msg_parse") & \
(ttable.args == args) & \
(ttable.status.belongs(["RUNNING", "QUEUED", "ALLOCATED"])))
exists = db(query).select(ttable.id,
limitby=(0, 1)).first()
if exists:
# Disable all
db(query).update(status="STOPPED")
return "Parser disabled"
else:
return "Parser already disabled"
# -------------------------------------------------------------------------
@staticmethod
def parser_disable_interactive(r, **attr):
"""
Disable a Parser
- Disconnect a Parser from a Channel
S3Method for interactive requests
"""
result = current.s3db.msg_parser_disable(r.id)
current.session.confirmation = result
redirect(URL(f="parser"))
# -------------------------------------------------------------------------
@staticmethod
def msg_parser_onaccept(form):
"""
Process the Enabled Flag
"""
if form.record:
# Update form
# process of changed
if form.record.enabled and not form.vars.enabled:
current.s3db.msg_parser_disable(form.vars.id)
elif form.vars.enabled and not form.record.enabled:
current.s3db.msg_parser_enable(form.vars.id)
else:
# Create form
# Process only if enabled
if form.vars.enabled:
current.s3db.msg_parser_enable(form.vars.id)
# =============================================================================
class S3RSSModel(S3ChannelModel):
"""
RSS channel
"""
names = ("msg_rss_channel",
"msg_rss",
"msg_rss_link",
)
def model(self):
T = current.T
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
# ---------------------------------------------------------------------
# RSS Settings for an account
#
tablename = "msg_rss_channel"
define_table(tablename,
# Instance
super_link("channel_id", "msg_channel"),
Field("name", length=255, unique=True,
label = T("Name"),
requires = IS_LENGTH(255),
),
Field("description",
label = T("Description"),
),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("url",
label = T("URL"),
requires = IS_URL(),
),
Field("content_type", "boolean",
default = False,
label = T("Content-Type Override"),
represent = s3_yes_no_represent,
# Some feeds have text/html set which feedparser refuses to parse
comment = T("Force content-type to application/xml"),
),
s3_datetime(label = T("Last Polled"),
writable = False,
),
Field("etag",
label = T("ETag"),
writable = False
),
# Enable this when required in the template
# Used by SAMBRO to separate the RSS for cap or cms
Field("type",
readable = False,
writable = False,
),
Field("username",
label = T("Username"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Username"),
T("Optional username for HTTP Basic Authentication."))),
),
Field("password", "password",
label = T("Password"),
readable = False,
widget = S3PasswordWidget(),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Password"),
T("Optional password for HTTP Basic Authentication."))),
),
*s3_meta_fields())
self.configure(tablename,
list_fields = ["name",
"description",
"enabled",
"url",
"date",
"channel_status.status",
],
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "rss_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "rss_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "rss_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# RSS Feed Posts
#
tablename = "msg_rss"
define_table(tablename,
# Instance
super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default="now",
label = T("Published on"),
),
Field("title",
label = T("Title"),
),
Field("body", "text",
label = T("Content"),
),
Field("from_address",
label = T("Link"),
),
# http://pythonhosted.org/feedparser/reference-feed-author_detail.html
Field("author",
label = T("Author"),
),
# http://pythonhosted.org/feedparser/reference-entry-tags.html
Field("tags", "list:string",
label = T("Tags"),
),
self.gis_location_id(),
# Just present for Super Entity
Field("inbound", "boolean",
default = True,
readable = False,
writable = False,
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("from_address",),
ignore_case = False,
),
list_fields = ["channel_id",
"title",
"from_address",
"date",
"body"
],
super_entity = current.s3db.msg_message,
)
# Components
self.add_components(tablename,
msg_rss_link = "rss_id",
)
rss_represent = S3Represent(lookup = tablename,
fields = ["title", "from_address",],
field_sep = " - ")
rss_id = S3ReusableField("rss_id", "reference %s" % tablename,
label = T("RSS Link"),
ondelete = "CASCADE",
represent = rss_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(current.db, "msg_rss.id",
rss_represent)),
)
# ---------------------------------------------------------------------
# Links for RSS Feed
#
tablename = "msg_rss_link"
define_table(tablename,
rss_id(),
Field("url",
requires = IS_EMPTY_OR(IS_URL()),
),
Field("type",
),
*s3_meta_fields())
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("rss_id", "url"),
),
)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3SMSModel(S3Model):
"""
SMS: Short Message Service
These can be received through a number of different gateways
- MCommons
- Modem (@ToDo: Restore this)
- Tropo
- Twilio
"""
names = ("msg_sms",)
def model(self):
#T = current.T
user = current.auth.user
if user and user.organisation_id:
# SMS Messages need to be tagged to their org so that they can be sent through the correct gateway
default = user.organisation_id
else:
default = None
# ---------------------------------------------------------------------
# SMS Messages: InBox & Outbox
#
tablename = "msg_sms"
self.define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(),
self.org_organisation_id(default = default),
s3_datetime(default="now"),
Field("body", "text",
# Allow multi-part SMS
#length = 160,
#label = T("Message"),
),
Field("from_address",
#label = T("Sender"),
),
Field("to_address",
#label = T("To"),
),
Field("inbound", "boolean",
default = False,
#represent = lambda direction: \
# (direction and [T("In")] or \
# [T("Out")])[0],
#label = T("Direction")),
),
# Used e.g. for Clickatell
Field("remote_id",
#label = T("Remote ID"),
),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3SMSOutboundModel(S3Model):
"""
SMS: Short Message Service
- Outbound Channels
These can be sent through a number of different gateways
- Modem
- SMTP
- Tropo
- Web API (inc Clickatell, MCommons, mVaayoo)
"""
names = ("msg_sms_outbound_gateway",
"msg_sms_modem_channel",
"msg_sms_smtp_channel",
"msg_sms_webapi_channel",
)
def model(self):
#T = current.T
configure = self.configure
define_table = self.define_table
settings = current.deployment_settings
# ---------------------------------------------------------------------
# SMS Outbound Gateway
# - select which gateway is in active use for which Organisation/Branch
#
country_code = settings.get_L10n_default_country_code()
tablename = "msg_sms_outbound_gateway"
define_table(tablename,
self.msg_channel_id(
requires = IS_ONE_OF(current.db, "msg_channel.channel_id",
S3Represent(lookup="msg_channel"),
instance_types = ("msg_sms_modem_channel",
"msg_sms_webapi_channel",
"msg_sms_smtp_channel",
),
sort = True,
),
),
#Field("outgoing_sms_handler", length=32,
# requires = IS_IN_SET(current.msg.GATEWAY_OPTS,
# zero = None),
# ),
# Allow selection of different gateways based on Organisation/Branch
self.org_organisation_id(),
# @ToDo: Allow selection of different gateways based on destination Location
#self.gis_location_id(),
Field("default_country_code", "integer",
default = country_code,
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# SMS Modem Channel
#
tablename = "msg_sms_modem_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("modem_port"),
Field("modem_baud", "integer",
default = 115200,
),
Field("enabled", "boolean",
default = True,
),
Field("max_length", "integer",
default = 160,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
# SMS via SMTP Channel
#
tablename = "msg_sms_smtp_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("address", length=64,
requires = IS_LENGTH(64),
),
Field("subject", length=64,
requires = IS_LENGTH(64),
),
Field("enabled", "boolean",
default = True,
),
Field("max_length", "integer",
default = 160,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
# Settings for Web API services
#
# @ToDo: Simplified dropdown of services which prepopulates entries & provides nice prompts for the config options
# + Advanced mode for raw access to real fields
#
# https://www.twilio.com/docs/api/rest/sending-messages
#
tablename = "msg_sms_webapi_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("url",
#default = "http://sms1.cardboardfish.com:9001/HTTPSMS?", # Cardboardfish
default = "https://api.clickatell.com/http/sendmsg", # Clickatell
#default = "https://secure.mcommons.com/api/send_message", # Mobile Commons
#default = "https://www.textmagic.com/app/api", # Text Magic
#default = "http://bulkmessage-api.dhiraagu.com.mv/jsp/receiveSMS.jsp", # Dhiraagu (Maldives local provider)
#default = "https://api.twilio.com/2010-04-01/Accounts/{AccountSid}/Messages", # Twilio (Untested)
requires = IS_URL(),
),
Field("parameters",
#default = "S=H&UN=yourusername&P=yourpassword&SA=Sahana", # Cardboardfish
default = "user=yourusername&password=yourpassword&api_id=yourapiid", # Clickatell
#default = "campaign_id=yourid", # Mobile Commons
#default = "username=yourusername&password=yourpassword&cmd=send&unicode=1", # Text Magic
#default = "userid=yourusername&password=yourpassword", # Dhiraagu
#default = "From={RegisteredTelNumber}", # Twilio (Untested)
),
Field("message_variable", "string",
#default = "M", # Cardboardfish
default = "text", # Clickatell, Text Magic, Dhiraagu
#default = "body", # Mobile Commons
#default = "Body", # Twilio (Untested)
requires = IS_NOT_EMPTY(),
),
Field("to_variable", "string",
#default = "DA", # Cardboardfish
default = "to", # Clickatell, Dhiraagu
#default = "phone_number", # Mobile Commons
#default = "phone", # Text Magic
#default = "To", # Twilio (Untested)
requires = IS_NOT_EMPTY(),
),
Field("max_length", "integer",
default = 480, # Clickatell concat 3
),
# If using HTTP Auth (e.g. Mobile Commons)
Field("username"),
Field("password", "password",
readable = False,
widget = S3PasswordWidget(),
),
Field("enabled", "boolean",
default = True,
),
*s3_meta_fields())
configure(tablename,
super_entity = "msg_channel",
)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3TropoModel(S3Model):
"""
Tropo can be used to send & receive SMS, Twitter & XMPP
https://www.tropo.com
"""
names = ("msg_tropo_channel",
"msg_tropo_scratch",
)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Tropo Channels
#
tablename = "msg_tropo_channel"
define_table(tablename,
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("token_messaging"),
#Field("token_voice"),
*s3_meta_fields())
self.configure(tablename,
super_entity = "msg_channel",
)
set_method("msg", "tropo_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "tropo_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "tropo_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Tropo Scratch pad for outbound messaging
#
tablename = "msg_tropo_scratch"
define_table(tablename,
Field("row_id", "integer"),
Field("message_id", "integer"),
Field("recipient"),
Field("message"),
Field("network"),
)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3TwilioModel(S3ChannelModel):
"""
Twilio Inbound SMS channel
- for Outbound, use Web API
"""
names = ("msg_twilio_channel",
"msg_twilio_sid",
)
def model(self):
#T = current.T
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twilio Channels
#
tablename = "msg_twilio_channel"
define_table(tablename,
# Instance
self.super_link("channel_id", "msg_channel"),
Field("name"),
Field("description"),
Field("enabled", "boolean",
default = True,
#label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("account_name", length=255, unique=True),
Field("url",
default = \
"https://api.twilio.com/2010-04-01/Accounts"
),
Field("account_sid", length=64,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("auth_token", "password", length=64,
readable = False,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
widget = S3PasswordWidget(),
),
*s3_meta_fields())
self.configure(tablename,
onaccept = self.msg_channel_onaccept,
super_entity = "msg_channel",
)
set_method("msg", "twilio_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "twilio_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "twilio_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Twilio Message extensions
# - store message sid to know which ones we've already downloaded
#
tablename = "msg_twilio_sid"
define_table(tablename,
# Component not Instance
self.msg_message_id(ondelete = "CASCADE"),
Field("sid"),
*s3_meta_fields())
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3TwitterModel(S3Model):
names = ("msg_twitter_channel",
"msg_twitter",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twitter Channel
#
password_widget = S3PasswordWidget()
tablename = "msg_twitter_channel"
define_table(tablename,
# Instance
self.super_link("channel_id", "msg_channel"),
# @ToDo: Allow different Twitter accounts for different Orgs
#self.org_organisation_id(),
Field("name",
label = T("Name"),
),
Field("description",
label = T("Description"),
),
Field("enabled", "boolean",
default = True,
label = T("Enabled?"),
represent = s3_yes_no_represent,
),
Field("login", "boolean",
default = False,
label = T("Use for Login?"),
represent = s3_yes_no_represent,
),
Field("twitter_account",
label = T("Twitter Account"),
),
# Get these from https://apps.twitter.com
Field("consumer_key", "password",
label = T("Consumer Key"),
readable = False,
widget = password_widget,
),
Field("consumer_secret", "password",
label = T("Consumer Secret"),
readable = False,
widget = password_widget,
),
Field("access_token", "password",
label = T("Access Token"),
readable = False,
widget = password_widget,
),
Field("access_token_secret", "password",
label = T("Access Token Secret"),
readable = False,
widget = password_widget,
),
*s3_meta_fields())
configure(tablename,
onaccept = self.twitter_channel_onaccept,
#onvalidation = self.twitter_channel_onvalidation
super_entity = "msg_channel",
)
set_method("msg", "twitter_channel",
method = "enable",
action = self.msg_channel_enable_interactive)
set_method("msg", "twitter_channel",
method = "disable",
action = self.msg_channel_disable_interactive)
set_method("msg", "twitter_channel",
method = "poll",
action = self.msg_channel_poll)
# ---------------------------------------------------------------------
# Twitter Messages: InBox & Outbox
#
tablename = "msg_twitter"
define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
self.msg_channel_id(),
s3_datetime(default = "now",
label = T("Posted on"),
),
Field("body", length=140,
label = T("Message"),
requires = IS_LENGTH(140),
),
Field("from_address", #notnull=True,
label = T("From"),
represent = self.twitter_represent,
requires = IS_NOT_EMPTY(),
),
Field("to_address",
label = T("To"),
represent = self.twitter_represent,
),
Field("inbound", "boolean",
default = False,
label = T("Direction"),
represent = lambda direction: \
(direction and [T("In")] or \
[T("Out")])[0],
),
Field("msg_id", # Twitter Message ID
readable = False,
writable = False,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["id",
#"priority",
#"category",
"body",
"from_address",
"date",
#"location_id",
],
#orderby = ~table.priority,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return {}
# -------------------------------------------------------------------------
@staticmethod
def twitter_represent(nickname, show_link=True):
"""
Represent a Twitter account
"""
if not nickname:
return current.messages["NONE"]
db = current.db
s3db = current.s3db
table = s3db.pr_contact
query = (table.contact_method == "TWITTER") & \
(table.value == nickname)
row = db(query).select(table.pe_id,
limitby=(0, 1)).first()
if row:
repr = s3db.pr_pentity_represent(row.pe_id)
if show_link:
# Assume person
ptable = s3db.pr_person
row = db(ptable.pe_id == row.pe_id).select(ptable.id,
limitby=(0, 1)).first()
if row:
link = URL(c="pr", f="person", args=[row.id])
return A(repr, _href=link)
return repr
else:
return nickname
# -------------------------------------------------------------------------
@staticmethod
def twitter_channel_onaccept(form):
if form.vars.login:
# Ensure only a single account used for Login
current.db(current.s3db.msg_twitter_channel.id != form.vars.id).update(login = False)
# Normal onaccept processing
S3ChannelModel.channel_onaccept(form)
# -------------------------------------------------------------------------
@staticmethod
def twitter_channel_onvalidation(form):
"""
Complete oauth: take tokens from session + pin from form,
and do the 2nd API call to Twitter
"""
T = current.T
session = current.session
settings = current.deployment_settings.msg
s3 = session.s3
form_vars = form.vars
if form_vars.pin and s3.twitter_request_key and s3.twitter_request_secret:
try:
import tweepy
except:
raise HTTP(501, body=T("Can't import tweepy"))
oauth = tweepy.OAuthHandler(settings.twitter_oauth_consumer_key,
settings.twitter_oauth_consumer_secret)
oauth.set_request_token(s3.twitter_request_key,
s3.twitter_request_secret)
try:
oauth.get_access_token(form_vars.pin)
form_vars.oauth_key = oauth.access_token.key
form_vars.oauth_secret = oauth.access_token.secret
twitter = tweepy.API(oauth)
form_vars.twitter_account = twitter.me().screen_name
form_vars.pin = "" # we won't need it anymore
return
except tweepy.TweepError:
session.error = T("Settings were reset because authenticating with Twitter failed")
# Either user asked to reset, or error - clear everything
for k in ["oauth_key", "oauth_secret", "twitter_account"]:
form_vars[k] = None
for k in ["twitter_request_key", "twitter_request_secret"]:
s3[k] = ""
# =============================================================================
class S3TwitterSearchModel(S3ChannelModel):
"""
Twitter Searches
- results can be fed to KeyGraph
https://dev.twitter.com/docs/api/1.1/get/search/tweets
"""
names = ("msg_twitter_search",
"msg_twitter_result",
)
def model(self):
T = current.T
db = current.db
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Twitter Search Query
#
tablename = "msg_twitter_search"
define_table(tablename,
Field("keywords", "text",
label = T("Keywords"),
),
# @ToDo: Allow setting a Point & Radius for filtering by geocode
#self.gis_location_id(),
Field("lang",
# Set in controller
#default = current.response.s3.language,
label = T("Language"),
),
Field("count", "integer",
default = 100,
label = T("# Results per query"),
),
Field("include_entities", "boolean",
default = False,
label = T("Include Entity Information?"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Entity Information"),
T("This is required if analyzing with KeyGraph."))),
),
# @ToDo: Rename or even move to Component Table
Field("is_processed", "boolean",
default = False,
label = T("Processed with KeyGraph?"),
represent = s3_yes_no_represent,
),
Field("is_searched", "boolean",
default = False,
label = T("Searched?"),
represent = s3_yes_no_represent,
),
*s3_meta_fields())
configure(tablename,
list_fields = ["keywords",
"lang",
"count",
#"include_entities",
],
)
# Reusable Query ID
represent = S3Represent(lookup=tablename, fields=["keywords"])
search_id = S3ReusableField("search_id", "reference %s" % tablename,
label = T("Search Query"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "msg_twitter_search.id")
),
)
set_method("msg", "twitter_search",
method = "poll",
action = self.twitter_search_poll)
set_method("msg", "twitter_search",
method = "keygraph",
action = self.twitter_keygraph)
set_method("msg", "twitter_result",
method = "timeline",
action = self.twitter_timeline)
# ---------------------------------------------------------------------
# Twitter Search Results
#
# @ToDo: Store the places mentioned in the Tweet as linked Locations
#
tablename = "msg_twitter_result"
define_table(tablename,
# Instance
self.super_link("message_id", "msg_message"),
# Just present for Super Entity
#self.msg_channel_id(),
search_id(),
s3_datetime(default="now",
label = T("Tweeted on"),
),
Field("tweet_id",
label = T("Tweet ID")),
Field("lang",
label = T("Language")),
Field("from_address",
label = T("Tweeted by")),
Field("body",
label = T("Tweet")),
# @ToDo: Populate from Parser
#Field("category",
# writable = False,
# label = T("Category"),
# ),
#Field("priority", "integer",
# writable = False,
# label = T("Priority"),
# ),
self.gis_location_id(),
# Just present for Super Entity
#Field("inbound", "boolean",
# default = True,
# readable = False,
# writable = False,
# ),
*s3_meta_fields())
configure(tablename,
list_fields = [#"category",
#"priority",
"body",
"from_address",
"date",
"location_id",
],
#orderby=~table.priority,
super_entity = "msg_message",
)
# ---------------------------------------------------------------------
return {}
# -----------------------------------------------------------------------------
@staticmethod
def twitter_search_poll(r, **attr):
"""
Perform a Search of Twitter
S3Method for interactive requests
"""
id = r.id
tablename = r.tablename
current.s3task.run_async("msg_twitter_search", args=[id])
current.session.confirmation = \
current.T("The search request has been submitted, so new messages should appear shortly - refresh to see them")
# Filter results to this Search
redirect(URL(f="twitter_result",
vars={"~.search_id": id}))
# -----------------------------------------------------------------------------
@staticmethod
def twitter_keygraph(r, **attr):
"""
Prcoess Search Results with KeyGraph
S3Method for interactive requests
"""
tablename = r.tablename
current.s3task.run_async("msg_process_keygraph", args=[r.id])
current.session.confirmation = \
current.T("The search results are now being processed with KeyGraph")
# @ToDo: Link to KeyGraph results
redirect(URL(f="twitter_result"))
# =============================================================================
@staticmethod
def twitter_timeline(r, **attr):
"""
Display the Tweets on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
"""
if r.representation == "html" and r.name == "twitter_result":
response = current.response
s3 = response.s3
appname = r.application
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % appname)
# Add our control script
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % appname)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % appname)
# Add our data
# @ToDo: Make this the initial data & then collect extra via REST with a stylesheet
# add in JS using S3.timeline.eventSource.addMany(events) where events is a []
if r.record:
# Single record
rows = [r.record]
else:
# Multiple records
# @ToDo: Load all records & sort to closest in time
# http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d
rows = r.resource.select(["date", "body"], limit=2000, as_rows=True)
data = {"dateTimeFormat": "iso8601",
}
now = r.utcnow
tl_start = tl_end = now
events = []
import re
for row in rows:
# Dates
start = row.date or ""
if start:
if start < tl_start:
tl_start = start
if start > tl_end:
tl_end = start
start = start.isoformat()
title = (re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)|RT", "", row.body))
if len(title) > 30:
title = title[:30]
events.append({"start": start,
"title": title,
"description": row.body,
})
data["events"] = events
data = json.dumps(data, separators=SEPARATORS)
code = "".join((
'''S3.timeline.data=''', data, '''
S3.timeline.tl_start="''', tl_start.isoformat(), '''"
S3.timeline.tl_end="''', tl_end.isoformat(), '''"
S3.timeline.now="''', now.isoformat(), '''"
'''))
# Control our code in static/scripts/S3/s3.timeline.js
s3.js_global.append(code)
# Create the DIV
item = DIV(_id="s3timeline", _class="s3-timeline")
output = {"item": item}
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = current.T("Twitter Timeline")
response.view = "timeline.html"
return output
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class S3XFormsModel(S3Model):
"""
XForms are used by the ODK Collect mobile client
http://eden.sahanafoundation.org/wiki/BluePrint/Mobile#Android
"""
names = ("msg_xforms_store",)
def model(self):
#T = current.T
# ---------------------------------------------------------------------
# SMS store for persistence and scratch pad for combining incoming xform chunks
tablename = "msg_xforms_store"
self.define_table(tablename,
Field("sender", length=20),
Field("fileno", "integer"),
Field("totalno", "integer"),
Field("partno", "integer"),
Field("message", length=160)
)
# ---------------------------------------------------------------------
return {}
# =============================================================================
class S3BaseStationModel(S3Model):
"""
Base Stations (Cell Towers) are a type of Site
@ToDo: Calculate Coverage from Antenna Height, Radio Power and Terrain
- see RadioMobile
"""
names = ("msg_basestation",)
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Base Stations (Cell Towers)
#
if current.deployment_settings.get_msg_basestation_code_unique():
db = current.db
code_requires = IS_EMPTY_OR([IS_LENGTH(10),
IS_NOT_IN_DB(db, "msg_basestation.code")
])
else:
code_requires = IS_LENGTH(10)
tablename = "msg_basestation"
self.define_table(tablename,
self.super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("code", length=10, # Mayon compatibility
label = T("Code"),
requires = code_requires,
),
self.org_organisation_id(
label = T("Operator"),
requires = self.org_organisation_requires(required=True,
updateable=True),
#widget=S3OrganisationAutocompleteWidget(default_from_profile=True),
),
self.gis_location_id(),
s3_comments(),
*s3_meta_fields())
# CRUD strings
current.response.s3.crud_strings[tablename] = Storage(
label_create=T("Create Base Station"),
title_display=T("Base Station Details"),
title_list=T("Base Stations"),
title_update=T("Edit Base Station"),
title_upload=T("Import Base Stations"),
title_map=T("Map of Base Stations"),
label_list_button=T("List Base Stations"),
label_delete_button=T("Delete Base Station"),
msg_record_created=T("Base Station added"),
msg_record_modified=T("Base Station updated"),
msg_record_deleted=T("Base Station deleted"),
msg_list_empty=T("No Base Stations currently registered"))
self.configure(tablename,
deduplicate = S3Duplicate(),
super_entity = "org_site",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# END =========================================================================
| 39.542146 | 141 | 0.394458 |
8aa8c4ec1edd50aad7cce4dff8e8d3195f7e67cb | 4,122 | py | Python | app/classifiers/color_classifier/color_classifer.py | Stosan/api-ris | 41f40787d4e1eb44aaea019a99f9721ff38bd18a | [
"MIT"
] | null | null | null | app/classifiers/color_classifier/color_classifer.py | Stosan/api-ris | 41f40787d4e1eb44aaea019a99f9721ff38bd18a | [
"MIT"
] | null | null | null | app/classifiers/color_classifier/color_classifer.py | Stosan/api-ris | 41f40787d4e1eb44aaea019a99f9721ff38bd18a | [
"MIT"
] | null | null | null | import datetime
import base64
import json
import os
from io import BytesIO
import pandas as pd
import numpy as np
import PIL.Image as img
from colorthief import ColorThief
from keras.models import load_model
from pathlib import Path
class ColorClassifier:
def __init__(self):
self.path_to_artifacts = "app/classifiers/color_classifier/data/"
self.path_to_save = "app/classifiers/color_classifier/data/usage/"
self.index=["color","color_name","hex","R","G","B"]
self.col_lib_csv = pd.read_csv(self.path_to_artifacts+'colors_library.csv', names=self.index, header=None)
#load saved classifier model, including its weights and the optimizer
self.model = load_model(self.path_to_artifacts+'classifier_model.h5')
def base64_to_image(self, base64_str):
date = datetime.datetime.now()
folder = date.strftime("%d_%m_%Y_%H-%M")
image_data = BytesIO(base64_str)
img_data = img.open(image_data)
new_image_data=img_data.resize((100,100))
image_path=Path(self.path_to_save, folder)
image_path.mkdir(parents=True, exist_ok=True)
image_data_path=self.path_to_save + folder + '/kobimdi.png'
if image_path:
new_image_data.save(image_data_path, "PNG")
new_image_data_path= Path(image_data_path)
return str(new_image_data_path)
def color_stripping(self, input_data):
print(input_data)
colorthief=ColorThief(input_data)
dmtcolor=colorthief.get_color(quality=1)
arr = dmtcolor
r,g,b = arr
return r,g,b
def preprocessing(self, R,G,B):
minimum = 10000
for i in range(len(self.col_lib_csv)):
d = abs(R- int(self.col_lib_csv.loc[i,"R"])) + abs(G- int(self.col_lib_csv.loc[i,"G"]))+ abs(B- int(self.col_lib_csv.loc[i,"B"]))
if(d<=minimum):
minimum = d
cname = self.col_lib_csv.loc[i,"color_name"]
return cname
def predict(self, input_data):
pred = self.model.predict(input_data)
predicted_encoded_train_labels = np.argmax(pred, axis=1)
return predicted_encoded_train_labels
def postprocessing(self, input_data):
colour_value = []
for row in input_data:
if row == 0:
colour_value.append("Red")
elif row == 1:
colour_value.append("Green")
elif row == 2:
colour_value.append("Blue")
elif row == 3:
colour_value.append("Yellow")
elif row == 4:
colour_value.append("Orange")
elif row == 5:
colour_value.append("Pink")
elif row == 6:
colour_value.append("Purple")
elif row == 7:
colour_value.append("Brown")
elif row == 8:
colour_value.append("Grey")
elif row == 9:
colour_value.append("Black")
elif row == 10:
colour_value.append("White")
return colour_value
def rgb2hex(self,r,g,b):
res_hex= '#%02x%02x%02x' % (r,g,b)
return res_hex
def compute_prediction(self, input_data):
try:
img_res_data = self.base64_to_image(input_data)
r,g,b = self.color_stripping(img_res_data)
hexcode = self.rgb2hex(r,g,b)
data = {'r': [r],'g': [g],'b': [b]}
pred_df = pd.DataFrame(data)
Colorname = self.preprocessing(r,g,b) + ' R='+ str(r) + ' G='+ str(g) + ' B='+ str(b)
prediction = self.predict(pred_df)
prediction = self.postprocessing(prediction)
spc_color = Colorname.split()[0]
prediction = {"Color_common": prediction, "Specific colour": spc_color,"rgb_color_code":Colorname.split()[1:5],"hex code":hexcode, "status": "OK"}
#print(prediction)
except Exception as e:
return {"status": "Prediction Error", "message": str(e)}
return prediction
| 37.472727 | 158 | 0.586851 |
9d5dbaa251ba6ceb296c5e450eb74e0924ec6df5 | 10,443 | py | Python | skika/data/hyper_plane_generator_redund.py | cpearce/scikit-ika | 01f90ac3e7963e4d05f73316a7d14de0d8f08d1e | [
"BSD-3-Clause"
] | 4 | 2020-04-29T03:36:36.000Z | 2021-09-01T02:46:19.000Z | skika/data/hyper_plane_generator_redund.py | cpearce/scikit-ika | 01f90ac3e7963e4d05f73316a7d14de0d8f08d1e | [
"BSD-3-Clause"
] | 2 | 2020-05-04T07:46:31.000Z | 2022-03-14T20:28:25.000Z | skika/data/hyper_plane_generator_redund.py | cpearce/scikit-ika | 01f90ac3e7963e4d05f73316a7d14de0d8f08d1e | [
"BSD-3-Clause"
] | 3 | 2020-02-21T00:27:32.000Z | 2020-05-04T07:04:35.000Z | # Modified version of scikit-multiflow code to include generation of redundant attributes
import numpy as np
from skmultiflow.data.base_stream import Stream
from skmultiflow.utils import check_random_state
import math
class HyperplaneGeneratorRedund(Stream):
""" Hyperplane stream generator.
Modified version of scikit-multiflow code to include generation of redundant attributes.
Generates a problem of prediction class of a rotation hyperplane. It was
used as testbed for CVFDT and VFDT in [1]_.
A hyperplane in d-dimensional space is the set of points :math:`x` that satisfy
:math:`\sum^{d}_{i=1} w_i x_i = w_0 = \sum^{d}_{i=1} w_i`, where
:math:`x_i` is the ith coordinate of :math:`x`. Examples for which
:math:`\sum^{d}_{i=1} w_i x_i > w_0`, are labeled positive, and examples
for which :math:`\sum^{d}_{i=1} w_i x_i \leq w_0`, are labeled negative.
Hyperplanes are useful for simulating time-changing concepts, because we
can change the orientation and position of the hyperplane in a smooth
manner by changing the relative size of the weights. We introduce change
to this dataset by adding drift to each weight feature :math:`w_i = w_i + d \sigma`,
where :math:`\sigma` is the probability that the direction of change is
reversed and :math:`d` is the change applied to every example.
Parameters
----------
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_features: int (Default 10)
The number of attributes to generate.
Higher than 2.
n_drift_features: int (Default: 2)
The number of attributes with drift.
Higher than 2.
perc_redund_feature: float (Default: 0.0)
The percentage of features to be redundant.
From 0.0 to 1.0.
mag_change: float (Default: 0.0)
Magnitude of the change for every example.
From 0.0 to 1.0.
noise_percentage: float (Default: 0.05)
Percentage of noise to add to the data.
From 0.0 to 1.0.
sigma_percentage: int (Default 0.1)
Percentage of probability that the direction of change is reversed.
From 0.0 to 1.0.
References
----------
.. [1] G. Hulten, L. Spencer, and P. Domingos. Mining time-changing data streams.
In KDD’01, pages 97–106, San Francisco, CA, 2001. ACM Press.
"""
def __init__(self, random_state=None, n_features=10, n_drift_features=2, perc_redund_feature = 0, mag_change=0.0,
noise_percentage=0.05, sigma_percentage=0.1):
super().__init__()
self.random_state = random_state
self.n_num_features = n_features
self.n_features = self.n_num_features
self.perc_redund_features = perc_redund_feature # Percentage of redundant features. n_redund_features = floor((n_features * prop_redund_feature)/100)
self.n_classes = 2
self.n_drift_features = n_drift_features
self.mag_change = mag_change
self.sigma_percentage = sigma_percentage
self.noise_percentage = noise_percentage
self.n_targets = 1
self._random_state = None # This is the actual random_state object used internally
self._next_class_should_be_zero = False
self._weights = np.zeros(self.n_features)
self._sigma = np.zeros(self.n_features)
self.name = "Hyperplane Generator"
self.__configure()
def __configure(self):
self.target_names = ["target_0"]
self.feature_names = ["att_num_" + str(i) for i in range(self.n_features)]
self.target_values = [i for i in range(self.n_classes)]
self.n_redund_features = math.floor((self.n_features * self.perc_redund_features))
self.n_not_redund_features = self.n_features - self.n_redund_features
@property
def n_drift_features(self):
""" Retrieve the number of drift features.
Returns
-------
int
The total number of drift features.
"""
return self._n_drift_features
@n_drift_features.setter
def n_drift_features(self, n_drift_features):
""" Set the number of drift features
"""
self._n_drift_features = n_drift_features
@property
def perc_redund_features(self):
""" Retrieve the number of redundant features.
Returns
-------
int
The total number of redundant features.
"""
return self._perc_redund_features
@perc_redund_features.setter
def perc_redund_features(self, perc_redund_features):
""" Set the number of redundant features
"""
if (0.0 <= perc_redund_features) and (perc_redund_features <= 1.0):
self._perc_redund_features = perc_redund_features
else:
raise ValueError("percentage of redundant features should be in [0.0..1.0], {} was passed".format(perc_redund_features))
@property
def noise_percentage(self):
""" Retrieve the value of the value of Noise percentage
Returns
-------
float
percentage of the noise
"""
return self._noise_percentage
@noise_percentage.setter
def noise_percentage(self, noise_percentage):
""" Set the value of the value of noise percentage.
Parameters
----------
noise_percentage: float (0.0..1.0)
"""
if (0.0 <= noise_percentage) and (noise_percentage <= 1.0):
self._noise_percentage = noise_percentage
else:
raise ValueError("noise percentage should be in [0.0..1.0], {} was passed".format(noise_percentage))
@property
def mag_change(self):
""" Retrieve the value of the value of magnitude of change.
Returns
-------
float
magnitude of change
"""
return self._mag_change
@mag_change.setter
def mag_change(self, mag_change):
""" Set the value of the value of magnitude of change
Parameters
----------
mag_change: float (0.0..1.0)
"""
if (0.0 <= mag_change) and (mag_change <= 1.0):
self._mag_change = mag_change
else:
raise ValueError("noise percentage should be in [0.0..1.0], {} was passed".format(mag_change))
@property
def sigma_percentage(self):
""" Retrieve the value of the value of sigma percentage
Returns
-------
float
percentage of the sigma
"""
return self._sigma_percentage
@sigma_percentage.setter
def sigma_percentage(self, sigma_percentage):
""" Set the value of the value of noise percentage.
Parameters
----------
sigma_percentage: float (0.0..1.0)
"""
if (0.0 <= sigma_percentage) and (sigma_percentage <= 1.0):
self._sigma_percentage = sigma_percentage
else:
raise ValueError("sigma percentage should be in [0.0..1.0], {} was passed".format(sigma_percentage))
def prepare_for_use(self):
"""
Prepares the stream for use.
Notes
-----
This functions should always be called after the stream initialization.
"""
self._random_state = check_random_state(self.random_state)
self._next_class_should_be_zero = False
self.sample_idx = 0
for i in range(self.n_features):
self._weights[i] = self._random_state.rand()
self._sigma[i] = 1 if (i < self.n_drift_features) else 0
self.n_redund_features = math.floor((self.n_features * self.perc_redund_features))
self.n_not_redund_features = self.n_features - self.n_redund_features
# Initialise variable for redundancy
self.index_redund = [self._sample_random_state.randint(0,(self.n_features - self.n_redund_features-1)) for ind in range(self.n_redund_features)]
self.coef_redund = [self._random_state.rand()+0.1 for ind in range(self.n_redund_features)]
def next_sample(self, batch_size=1):
""" next_sample
The sample generation works as follows: The features are generated
with the random generator, initialized with the seed passed by the
user. Then the classification function decides, as a function of the
sum and weight's sum, whether to instance belongs to class 0 or
class 1. The next step is to add noise if requested by the user and
than generate drift.
Parameters
----------
batch_size: int
The number of samples to return.
Returns
-------
tuple or tuple list
Return a tuple with the features matrix and the labels matrix for
the batch_size samples that were requested.
"""
data = np.zeros([batch_size, self.n_features + 1])
for j in range(batch_size):
sum_weights = np.sum(self._weights)
self.sample_idx += 1
sum = 0
for i in range(self.n_features):
if i < self.n_not_redund_features :
data[j, i] = self._random_state.rand()
sum += self._weights[i] * data[j, i]
else :
data[j, i] = data[j, self.index_redund[i-self.n_not_redund_features]]
sum += self._weights[i] * data[j, i]
group = 1 if sum >= sum_weights * 0.5 else 0
if 0.01 + self._random_state.rand() <= self.noise_percentage:
group = 1 if (group == 0) else 0
data[j, -1] = group
self._generate_drift()
self.current_sample_x = data[:, :self.n_features]
self.current_sample_y = data[:, self.n_features:].flatten().astype(int)
return self.current_sample_x, self.current_sample_y
def _generate_drift(self):
"""
Generate drift in the stream.
"""
for i in range(self.n_drift_features):
self._weights[i] += float(float(self._sigma[i]) * float(self.mag_change))
if (0.01 + self._random_state.rand()) <= self.sigma_percentage:
self._sigma[i] *= -1
| 35.520408 | 157 | 0.630949 |
af569e57ff54e852e1504f5cd095cadacb30058e | 1,863 | py | Python | tests/flexical/text_processing/test_bow.py | renanlage/flexical | 31143b17266eee4c4e9e7103c2ee7b81733f5fd5 | [
"MIT"
] | null | null | null | tests/flexical/text_processing/test_bow.py | renanlage/flexical | 31143b17266eee4c4e9e7103c2ee7b81733f5fd5 | [
"MIT"
] | null | null | null | tests/flexical/text_processing/test_bow.py | renanlage/flexical | 31143b17266eee4c4e9e7103c2ee7b81733f5fd5 | [
"MIT"
] | null | null | null | from unittest.case import TestCase
from flexical.text_processing.bow import BowGenerator
class BowTest(TestCase):
def test_return_expected_bow_if_mask_is_not_applied(self):
docs = [u'grande coisa essa coisa nao muito grande se o mundo acabar'.split(), [u'doideira', u'isso']]
bow_generator = BowGenerator(apply_socal_mask=False)
bows = bow_generator.fit_transform(docs)
vocabulary = bow_generator.get_feature_names()
self.assertEqual([u'acabar', u'coisa', u'doideira', u'essa', u'grande', u'isso', u'muito', u'mundo',
u'nao', u'o', u'se'], vocabulary)
self.assertEqual(bows.toarray().tolist(),
[[1, 2, 0, 1, 2, 0, 1, 1, 1, 1, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0]])
def test_return_expected_bow_if_mask_is_applied(self):
docs = [u'grande coisa essa coisa nao muito grande se o mundo acabar'.split(), [u'doideira', u'isso']]
bow_generator = BowGenerator()
bows = bow_generator.fit_transform(docs)
vocabulary = bow_generator.get_feature_names()
self.assertEqual([u'coisa', u'doideira', u'essa', u'grande', u'isso'], vocabulary)
self.assertEqual(bows.toarray().tolist(),
[[2, 0, 1, -1, 0],
[0, 1, 0, 0, 1]])
def test_ignore_stopwords_in_bows_and_vocabulary_if_provided(self):
docs = [u'grande coisa essa coisa nao muito grande se o mundo acabar'.split(), [u'doideira', u'isso']]
bow_generator = BowGenerator(stopwords={u'coisa'})
bows = bow_generator.fit_transform(docs)
vocabulary = bow_generator.get_feature_names()
self.assertEqual([u'doideira', u'essa', u'grande', u'isso'], vocabulary)
self.assertEqual(bows.toarray().tolist(), [[0, 1, -1, 0], [1, 0, 0, 1]])
| 45.439024 | 110 | 0.613527 |
8b565d0c3fa6b86a5d81db7c44ace217267fb951 | 789 | py | Python | web/core/migrations/0142_auto_20210510_1046.py | MTES-MCT/biocarburants | ff084916e18cdbdc41400f36fa6cc76a5e05900e | [
"MIT"
] | 4 | 2020-03-22T18:13:12.000Z | 2021-01-25T10:33:31.000Z | web/core/migrations/0142_auto_20210510_1046.py | MTES-MCT/carbure | 2876756b760ab4866fa783bb40e61a046eebb1ab | [
"MIT"
] | 20 | 2020-07-06T14:33:14.000Z | 2022-03-15T16:54:17.000Z | web/core/migrations/0142_auto_20210510_1046.py | MTES-MCT/biocarburants | ff084916e18cdbdc41400f36fa6cc76a5e05900e | [
"MIT"
] | 4 | 2020-04-03T12:19:12.000Z | 2021-06-15T12:20:57.000Z | # Generated by Django 3.2 on 2021-05-10 08:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0141_auto_20210510_1044'),
]
operations = [
migrations.AlterField(
model_name='lottransaction',
name='delivery_status',
field=models.CharField(choices=[('N', 'En Attente'), ('A', 'Accepté'), ('R', 'Refusé'), ('AC', 'À corriger'), ('AA', 'Corrigé'), ('F', 'Déclaré')], default='N', max_length=64),
),
migrations.AlterField(
model_name='lotv2',
name='status',
field=models.CharField(choices=[('Draft', 'Brouillon'), ('Validated', 'Validé'), ('Declared', 'Déclaré')], default='Draft', max_length=64),
),
]
| 32.875 | 188 | 0.572877 |
93fa7b49050e541a808b9d96c46729249722d248 | 4,520 | py | Python | thonny/plugins/goto_definition.py | jharris1993/thonny | acc9ee88ebb9eebdfe72c543a39346a5256f53ff | [
"MIT"
] | 1 | 2022-02-09T06:25:31.000Z | 2022-02-09T06:25:31.000Z | thonny/plugins/goto_definition.py | jharris1993/thonny | acc9ee88ebb9eebdfe72c543a39346a5256f53ff | [
"MIT"
] | null | null | null | thonny/plugins/goto_definition.py | jharris1993/thonny | acc9ee88ebb9eebdfe72c543a39346a5256f53ff | [
"MIT"
] | null | null | null | from logging import getLogger
import os.path
import tkinter as tk
from tkinter import messagebox
from typing import cast, Set
from thonny import get_workbench, get_runner
from thonny.codeview import CodeViewText, SyntaxText
from thonny.common import InlineCommand
from thonny.editor_helpers import get_text_filename, get_relevant_source_and_cursor_position
from thonny.languages import tr
from thonny.misc_utils import running_on_mac_os
from thonny.ui_utils import control_is_pressed, command_is_pressed, get_hyperlink_cursor
logger = getLogger(__name__)
class GotoHandler:
def __init__(self):
wb = get_workbench()
wb.bind_class("EditorCodeViewText", "<1>", self.request_definitions, True)
wb.bind_class("EditorCodeViewText", "<Any-Motion>", self.on_motion, True)
wb.bind_class("EditorCodeViewText", "<Any-Leave>", self.remove_underline, True)
if running_on_mac_os():
wb.bind_class("EditorCodeViewText", "<Command-KeyRelease>", self.remove_underline, True)
else:
wb.bind_class("EditorCodeViewText", "<Control-KeyRelease>", self.remove_underline, True)
wb.bind("get_definitions_response", self.handle_definitions_response, True)
def request_definitions(self, event=None):
if not self.proper_modifier_is_pressed(event):
return
assert isinstance(event.widget, CodeViewText)
text = event.widget
source, row, column = get_relevant_source_and_cursor_position(text)
filename = get_text_filename(text)
if not get_runner() or not get_runner().get_backend_proxy():
return
get_runner().send_command(
InlineCommand(
"get_definitions", source=source, row=row, column=column, filename=filename
)
)
def proper_modifier_is_pressed(self, event: tk.Event) -> bool:
if running_on_mac_os():
return command_is_pressed(event)
else:
return control_is_pressed(event)
def handle_definitions_response(self, msg):
defs = msg.definitions
if len(defs) != 1:
messagebox.showerror(
tr("Problem"), tr("Could not find definition"), master=get_workbench()
)
return
# TODO: handle multiple results like PyCharm
module_path = str(defs[0].module_path)
if not os.path.isfile(module_path):
logger.warning("%s is not a file", module_path)
return
module_name = defs[0].module_name
row = defs[0].row
if module_path and row is not None:
get_workbench().get_editor_notebook().show_file(module_path, row)
elif module_name == "" and row is not None: # current editor
get_workbench().get_editor_notebook().get_current_editor().select_range(row)
def on_motion(self, event):
text = cast(SyntaxText, event.widget)
if self.proper_modifier_is_pressed(event):
self.remove_underline(event)
start_index = text.index(f"@{event.x},{event.y} wordstart")
end_index = text.index(f"@{event.x},{event.y} wordend")
# sometimes, start_index will contain wrong line number
start_line, start_col = start_index.split(".")
end_line, end_col = end_index.split(".")
if start_line != end_line:
start_index = end_line + "." + start_col
word = text.get(start_index, end_index)
if (
word
and (word[0].isalpha() or word[0] == "_")
# and not iskeyword(word)
and self._index_doesnt_have_tags(
text,
start_index,
{"string", "string3", "open_string", "open_string3", "comment"},
)
):
text.tag_add("name_link", start_index, end_index)
text["cursor"] = get_hyperlink_cursor()
text.underlined = True
else:
if getattr(text, "underlined", False):
self.remove_underline(event)
def _index_doesnt_have_tags(self, text, index, tags: Set[str]) -> bool:
return not (set(text.tag_names(index)) & tags)
def remove_underline(self, event=None):
text = cast(SyntaxText, event.widget)
text.tag_remove("name_link", "1.0", "end")
text["cursor"] = ""
text.underlined = False
def load_plugin() -> None:
goto_handler = GotoHandler()
| 38.305085 | 100 | 0.630752 |
e3374ba6d354cebd76b34dd7f64b2f415e9529d3 | 717 | py | Python | raw/day02/part1.py | aesdeef/advent-of-code-2021 | 4561bcf12ac03d360f5b28c48ef80134f97613b9 | [
"MIT"
] | 2 | 2021-12-03T06:18:27.000Z | 2021-12-06T11:28:33.000Z | raw/day02/part1.py | aesdeef/advent-of-code-2021 | 4561bcf12ac03d360f5b28c48ef80134f97613b9 | [
"MIT"
] | null | null | null | raw/day02/part1.py | aesdeef/advent-of-code-2021 | 4561bcf12ac03d360f5b28c48ef80134f97613b9 | [
"MIT"
] | null | null | null | hor = 0
dep = 0
with open("../../input/02.txt", "r") as f:
for line in f:
match line.strip().split():
case ["forward", num]:
hor += int(num)
case ["down", num]:
dep += int(num)
case ["up", num]:
dep -= int(num)
print(hor, dep, hor * dep)
aim = 0
hor = 0
dep = 0
with open("../../input/02.txt", "r") as f:
for line in f:
match line.strip().split():
case ["forward", num]:
hor += int(num)
dep += aim * int(num)
case ["down", num]:
aim += int(num)
case ["up", num]:
aim -= int(num)
print(hor, dep, hor * dep)
| 23.9 | 42 | 0.404463 |
c56dc4ba101de02fd03464262394bf4095af5c5c | 15,067 | py | Python | ensembles/TrnBK_TstBK/ESNtrainCV.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | 1 | 2020-11-29T12:42:30.000Z | 2020-11-29T12:42:30.000Z | ensembles/TrnBK_TstBK/ESNtrainCV.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | null | null | null | ensembles/TrnBK_TstBK/ESNtrainCV.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Config
# biased_regress = True
# normal_equations = True
dataset = "training_1"
path = "../" + dataset +"/"
kfold_split = 10
nan_to_zero = True
mm = False
std = False
numpy_load = True
nanfill = False
## ESN parameters
N_def = [100] # Neurons
scale_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # scaling
mem_def = [0.001, 0.025, 0.050, 0.075, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] # memory
exponent_def = 1.0 # sigmoid exponent
# Script name struct for report
#script_name = 'ESNtrainCV'
#name_struct_meta = "_N_scale_mem"
#name_struct = '_{:03d}_{:1.3f}_{:1.3f}'.format(N_def, scale_def, mem_def)
## Imports
import numpy as np
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
#import matplotlib.pyplot as plt
import ESNtools
import GSK
#Needed for reporting
import platform
import time
# Fix boundary nans (replicate head/tail vals)
def nan_bounds(feats):
nanidx = np.where(np.isnan(feats))[0]
pointer_left = 0
pointer_right = len(feats)-1
fix_left = pointer_left in nanidx
fix_right = pointer_right in nanidx
while fix_left:
if pointer_left in nanidx:
pointer_left += 1
# print("pointer_left:", pointer_left)
else:
val_left = feats[pointer_left]
feats[:pointer_left] = val_left*np.ones((1,pointer_left),dtype=np.float)
fix_left = False
while fix_right:
if pointer_right in nanidx:
pointer_right -= 1
# print("pointer_right:", pointer_right)
else:
val_right = feats[pointer_right]
feats[pointer_right+1:] = val_right*np.ones((1,len(feats)-pointer_right-1),dtype=np.float)
fix_right = False
# nan interpolation
def nan_interpolate(feats):
nanidx = np.where(np.isnan(feats))[0]
nan_remain = len(nanidx)
nanid = 0
while nan_remain > 0:
nanpos = nanidx[nanid]
nanval = feats[nanpos-1]
nan_remain -= 1
nandim = 1
initpos = nanpos
# Check whether it extends
while nanpos+1 in nanidx:
nanpos += 1
nanid += 1
nan_remain -= 1
nandim += 1
# Average sides
if np.isfinite(feats[nanpos+1]):
nanval = 0.5 * (nanval + feats[nanpos+1])
# Single value average
if nandim == 1:
nanval = 0.5 * (nanval + feats[nanpos+1])
feats[initpos:initpos+nandim] = nanval*np.ones((1,nandim),dtype=np.double)
nanpos += 1
nanid += 1
## Get sepsis patients
def get_sepsis_patients(sepsis_label, patient):
patient_sep = np.zeros(len(sepsis_label),dtype=np.int)
for i in range(n):
i_pat = np.where(patient==i)[0]
patient_sep[i_pat] = int(np.sum(sepsis_label[i_pat])>0)*np.ones(len(i_pat), dtype=np.int)
patient_sep_idx = np.where(patient_sep!=0)[0]
patient_healthy_idx = np.where(patient_sep==0)[0]
return patient_sep, patient_sep_idx, patient_healthy_idx
## Create the feature matrix
features = []
patient = []
sepsis_label = []
dataloaded = False
## Read data
if not numpy_load:
## Folder and files
fnames = os.listdir(path)
fnames.sort()
if 'README.md' in fnames:
fnames.remove('README.md')
print('last file: ', fnames[-1])
n = len(fnames)
print(n, ' files present')
## read data
for i in range(n):
input_file = os.path.join(path, fnames[i])
if i ==0:
data, sep_lab, columns = ESNtools.read_challenge_data_label(input_file, return_header=True)
else:
data, sep_lab = ESNtools.read_challenge_data_label(input_file)
features.append(data)
sepsis_label.append(sep_lab)
pat = i * np.ones((sep_lab.shape), dtype=np.int)
patient.append(pat)
feature_matrix = np.concatenate(features)
del(features)
sepsis_label = np.concatenate(sepsis_label)
patient = np.concatenate(patient)
dataloaded = True
else:
npyfilename = "../npy/" + dataset + "_patient.npy"
patient = np.load(npyfilename)
print(npyfilename, " loaded")
npyfilename = "../npy/" + dataset + "_Y.npy"
sepsis_label = np.load(npyfilename)
print(npyfilename, " loaded")
#ADD nanfill tag
if nanfill:
dataset = dataset + "_nanfill"
if mm:
npyfilename = "../npy/" + dataset + "_mm.npy"
mm = False
print(npyfilename, '(mm) to be loaded')
else:
npyfilename = "../npy/" + dataset + ".npy"
print(npyfilename, '(not mm) to be loaded')
n = len(np.unique(patient))
print(n, ' files present')
dataloaded = True
feature_matrix = np.load(npyfilename)
##Flatten patient
patient = patient.flatten()
## Separate pointers
feature_phys = feature_matrix[:,:-6] ## Physiology
feature_demog = feature_matrix[:,-6:] ## Demographics
## Normalize mm(all) or std (sepsis, phys) vals, feature-based
if mm:
scaler = MinMaxScaler()
for i in range(n):
i_pat = np.where(patient==i)[0]
scaler.fit(feature_matrix[i_pat,:])
feature_matrix[i_pat,:] = scaler.transform(feature_matrix[i_pat,:])
elif std:
## (Get sepsis patients)
patient_sep, patient_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
scaler = StandardScaler()
scaler.fit(feature_phys[patient_healthy_idx,:])
feature_phys[:,:] = scaler.transform(feature_phys[:,:])
## nan to zero
if nan_to_zero:
feature_matrix[np.isnan(feature_matrix)]=0
print("Changed nan to 0")
## Septic groups stratify
patient_sep, patient_sep_idx, patient_healthy_idx = get_sepsis_patients(sepsis_label, patient)
#healthy_patient_list = np.unique(patient[patient_healthy_idx])
#sep_patient_list = np.unique(patient[patient_sep_idx])
## Nonlinear mapping function
sigmoid_exponent = exponent_def
func = ESNtools.sigmoid
#SFK
#skf = StratifiedKFold(n_splits=kfold_split)
#skf.get_n_splits(X)
#GSKF
groups = patient
train_index, test_index = GSK.GroupStratifiedKFold(np.hstack([patient_sep.reshape(-1,1), groups.reshape(-1,1)]), 10)
#ENSEMBLES keep function
def keep_ensembles(i, ensemble_results, results, ensemble_target, target, ensemble_patient, patient):
if i==0:
return ensemble_results.append(results), ensemble_target.append(target), ensemble_patient.append(patient)
else:
return ensemble_results.append(results), ensemble_target, ensemble_patient
#Gridsearch point function
def get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index):
script_name = 'ESNtrainCV'
name_struct_meta = "_N_scale_mem"
name_struct = '_{:03d}_{:1.4f}_{:1.4f}'.format(N, scale, mem)
## ESN Generation parameters
## Perform ESN feed
pat_shift = np.append(np.where(np.diff(patient)!=0)[0] + 1, [len(patient)])
print("pat_shift: ",len(pat_shift))
## ENSEMBLE LOOP
ensemble_results = []
ensemble_target = []
ensemble_patient = []
get_ensembles = True
if get_ensembles:
ensemble_max = 10
else:
ensemble_max = 1
for ESNi in range(ensemble_max):
print('ESN: ')
allocateESN = True
#####BACKWARD INTERP FOR THE ESNs
pat_ipos = 0
allocateESN = True
if allocateESN:
ESN = np.ones((len(feature_matrix),N+1), dtype = np.float)
for i in range(len(pat_shift)):
patients_features = feature_matrix[pat_ipos:pat_shift[i]]
for h, hour in enumerate(patients_features):
features = patients_features[:h+1]
for f in range(features.shape[1]):
if np.sum(np.isnan(features[:, f])) < len(features[:, f]):
nan_bounds(features[:, f])
nan_interpolate(features[:, f])
else:
features[:, f] = np.nan_to_num(features[:, f], 0)
ESN[pat_ipos,:] = ESNtools.feedESN(features, N, M, Mb, scale, mem, func, sigmoid_exponent)[-1]
pat_ipos = pat_ipos + 1
else:
for i in range(len(pat_shift)):
patients_features = feature_matrix[pat_ipos:pat_shift[i]]
for h, hour in enumerate(patients_features):
features = patients_features[:h+1]
for f in range(features.shape[1]):
if np.sum(np.isnan(features[:, f])) < len(features[:, f]):
nan_bounds(features[:, f])
nan_interpolate(features[:, f])
else:
features[:, f] = np.nan_to_num(features[:, f], 0)
if i == 0:
ESN = ESNtools.feedESN(features, N, M, Mb, scale, mem, func, sigmoid_exponent)[-1]
else:
ESN = np.vstack((ESN, ESNtools.feedESN(features, N, M, Mb, scale, mem, func, sigmoid_exponent)))
pat_ipos = pat_shift[i]
## Divide in sets
X = ESN
X2 = ESN
y = sepsis_label
## KFold
results = []
target = []
kk = 0
#for train_index, test_index in skf.split(X,y): #Stratified KFold
for j in range(len(train_index)): #GSKF
X_train, X_test = X[train_index[j]], X2[test_index[j]] #GSKF
y_train, y_test = y[train_index[j]], y[test_index[j]] #GSKF
patients_id_train, patients_id_test = patient[train_index[j]], patient[test_index[j]]
w = ESNtools.get_weights_biasedNE(X_train, y_train)
print("Start testing...", flush=True)
Y_pred = (np.matmul(X_test,w))
print(kk, ' realisation ')
print("auc: ", roc_auc_score(y_test, Y_pred))
kk +=1
target.append(y_test)
results.append(Y_pred)
## Evaluate results
results = np.concatenate(results)
target = np.concatenate(target)
auc = roc_auc_score(target,results)
print('auc: ', auc)
## ENSEMBLES keep
ensemble_results, ensemble_target, ensemble_patient = keep_ensembles(ESNi, ensemble_results, results, ensemble_target, target, ensemble_patient, patient)
ensemble_results = np.array(ensemble_results)
ensemble_target = np.concatenate(ensemble_target)
ensemble_patient = np.concatenate(ensemble_patient)
auc = roc_auc_score(ensemble_target,ensemble_results) #ensembles substitute the original auc
## Threshold study
th_i = np.min(results)
th_f = np.max(results)
## AUC-based CV
AUC_CV = True
if AUC_CV:
th_max = 0
f1 = 0
ACC = 0
Pr = 0
Re = 0
else:
th_steps = 1000
th_step = (th_f-th_i)/th_steps
thsum = 0
th = np.zeros((1000, 1), dtype = np.double)
f1 =np.zeros((1000, 1), dtype = np.double)
print("Threshold: Loop between ", th_i, th_i+th_step*th_steps)
for i, j in enumerate(np.arange(th_i, th_f, th_step)):
if j < th_steps:
th[i] = j
f1[i] = f1_score(target, results > th[i])
thsum = thsum + th[i]
if i%100 == 0:
print(i, th[i], f1[i])
if f1[i] < 0.001 and np.abs(thsum) > 0:
th = th[:i]
f1 = f1[:i]
break
## Max Threshold
th_max = th[np.argmax(f1)]
## Metrics
Pr = precision_score(target, results > th_max)
Re = recall_score(target, results > th_max)
ACC = accuracy_score(target, results > th_max)
auc = roc_auc_score(target, results)
f1 = f1_score(target, results > th_max)
user = platform.uname()[1] + '@' + platform.platform()
dir_path = os.path.dirname(os.path.realpath(__file__))
# write to report file
output_file = 'report_' + script_name + name_struct + '.txt'
with open(output_file, 'w') as f:
f.write(user + '\n')
f.write(dir_path + '\n')
f.write(__file__ + '\n')
f.write(time.strftime("%Y-%m-%d %H:%M") + '\n')
# f.write('Dataset: ' + path + '\n')
f.write('{:03d} \t N \n'.format(N))
f.write('{:1.3f} \t scale \n'.format(scale))
f.write('{:1.3f} \t mem \n'.format(mem))
f.write('%1.3f \t exp\n' % sigmoid_exponent)
f.write('(%2.4f, %2.4f, %2.4f) \t th_i, th_f, *th_sc\n' % (th_i, th_f, th_f-th_i))
f.write('%2.4f \t th\n' % th_max)
f.write('%2.4f \t Pr\n' % Pr)
f.write('%2.4f \t Re\n' % Re)
f.write('%2.4f \t F1\n' % f1)
f.write('%2.4f \t ACC\n' % ACC)
f.write('%2.4f \t AUC\n' % auc)
print(user)
print(dir_path)
print(__file__)
print(time.strftime("%Y-%m-%d %H:%M"))
print('Dataset: ' + path)
print('N: {:03d}'.format(N))
print('scale: {:1.3f}'.format(scale))
print('mem: {:1.3f}'.format(mem))
print('exp: %1.3f' % sigmoid_exponent)
print('th_i, th_f, *th_sc: (%2.4f, %2.4f, %2.4f)' % (th_i, th_f, th_f-th_i))
print('th: %2.4f' % th_max)
print('Pr: %2.4f' % Pr)
print('Re: %2.4f' % Re)
print('F1: %2.4f' % f1)
print('ACC: %2.4f' % ACC)
print('AUC: %2.4f' % auc)
## Grid_search for loop
for i_N in range(len(N_def)):
N = N_def[i_N] # Neurons
## Random seed
np.random.seed(seed=0)
## Mask parameters
M = 2*np.random.rand(np.shape(feature_matrix)[1],N)-1
Mb = 2*np.random.rand(1,N)-1
for i_scale in range(len(scale_def)):
scale = scale_def[i_scale] # scaling factor
for i_mem in range(len(mem_def)):
mem = mem_def[i_mem] # memory
try:
get_gridsearchpoint(feature_matrix, patient, sepsis_label, M, Mb, N, scale, mem, sigmoid_exponent, train_index, test_index)
except:
print("Error at ", N, scale, mem)
pass
| 33.706935 | 165 | 0.584655 |
b8d22fb7c58215b6af276e4f37580536cd6d230f | 5,472 | py | Python | examples/start2.py | beidongjiedeguang/manim-express | e9c89b74da3692db3ea9b568727e78d5cbcef503 | [
"MIT"
] | 12 | 2021-06-14T07:28:29.000Z | 2022-02-25T02:49:49.000Z | examples/start2.py | beidongjiedeguang/manim-kunyuan | e9c89b74da3692db3ea9b568727e78d5cbcef503 | [
"MIT"
] | 1 | 2022-02-01T12:30:14.000Z | 2022-02-01T12:30:14.000Z | examples/start2.py | beidongjiedeguang/manim-express | e9c89b74da3692db3ea9b568727e78d5cbcef503 | [
"MIT"
] | 2 | 2021-05-13T13:24:15.000Z | 2021-05-18T02:56:22.000Z | from manimlib import *
import os
class SquareToCircle(Scene):
def construct(self):
circle = Circle()
circle.set_fill(BLUE, opacity=0.5)
circle.set_stroke(BLUE_E, width=4)
square = Square()
self.play(ShowCreation(square))
self.wait()
# self.play(ReplacementTransform(square, circle))
self.play(Transform(square, circle))
self.wait()
class SurfaceExample(Scene):
CONFIG = {
"camera_class": ThreeDCamera,
}
def construct(self):
surface_text = Text("For 3d scenes, try using surfaces")
surface_text.fix_in_frame()
surface_text.to_edge(UP)
self.add(surface_text)
self.wait(0.1)
torus1 = Torus(r1=1, r2=1)
torus2 = Torus(r1=3, r2=1)
sphere = Sphere(radius=3, resolution=torus1.resolution)
# You can texture a surface with up to two images, which will
# be interpreted as the side towards the light, and away from
# the light. These can be either urls, or paths to a local file
# in whatever you've set as the image directory in
# the custom_config.yml file
# day_texture = "EarthTextureMap"
# night_texture = "NightEarthTextureMap"
day_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Whole_world_-_land_and_oceans.jpg/1280px-Whole_world_-_land_and_oceans.jpg"
night_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/The_earth_at_night.jpg/1280px-The_earth_at_night.jpg"
surfaces = [
TexturedSurface(surface, day_texture, night_texture)
for surface in [sphere, torus1, torus2]
]
for mob in surfaces:
mob.shift(IN)
mob.mesh = SurfaceMesh(mob)
mob.mesh.set_stroke(BLUE, 1, opacity=0.5)
# Set perspective
frame = self.camera.frame
frame.set_euler_angles(
theta=-30 * DEGREES,
phi=70 * DEGREES,
)
surface = surfaces[0]
self.play(
FadeIn(surface),
ShowCreation(surface.mesh, lag_ratio=0.01, run_time=3),
)
for mob in surfaces:
mob.add(mob.mesh)
surface.save_state()
self.play(Rotate(surface, PI / 2), run_time=2)
for mob in surfaces[1:]:
mob.rotate(PI / 2)
self.play(
Transform(surface, surfaces[1]),
run_time=3
)
self.play(
Transform(surface, surfaces[2]),
# Move camera frame during the transition
frame.animate.increment_phi(-10 * DEGREES),
frame.animate.increment_theta(-20 * DEGREES),
run_time=3
)
# Add ambient rotation
frame.add_updater(lambda m, dt: m.increment_theta(-0.1 * dt))
# Play around with where the light is
light_text = Text("You can move around the light source")
light_text.move_to(surface_text)
light_text.fix_in_frame()
self.play(FadeTransform(surface_text, light_text))
light = self.camera.light_source
self.add(light)
light.save_state()
self.play(light.animate.move_to(3 * IN), run_time=5)
self.play(light.animate.shift(10 * OUT), run_time=5)
drag_text = Text("Try moving the mouse while pressing d or s")
drag_text.move_to(light_text)
drag_text.fix_in_frame()
self.play(FadeTransform(light_text, drag_text))
self.wait()
class OpeningManimExample(Scene):
def construct(self):
intro_words = Text("""
The original motivation for manim was to
better illustrate mathematical functions
as transformations.
""")
intro_words.to_edge(UP)
self.play(Write(intro_words))
self.wait(2)
# Linear transform
grid = NumberPlane((-10, 10), (-5, 5))
matrix = [[1, 1], [0, 1]]
linear_transform_words = VGroup(
Text("This is what the matrix"),
IntegerMatrix(matrix, include_background_rectangle=True),
Text("looks like")
)
linear_transform_words.arrange(RIGHT)
linear_transform_words.to_edge(UP)
linear_transform_words.set_stroke(BLACK, 10, background=True)
self.play(
ShowCreation(grid),
FadeTransform(intro_words, linear_transform_words)
)
self.wait()
self.play(grid.animate.apply_matrix(matrix), run_time=3)
self.wait()
# Complex map
c_grid = ComplexPlane()
moving_c_grid = c_grid.copy()
moving_c_grid.prepare_for_nonlinear_transform()
c_grid.set_stroke(BLUE_E, 1)
c_grid.add_coordinate_labels(font_size=24)
complex_map_words = TexText("""
Or thinking of the plane as $\\mathds{C}$,\\\\
this is the map $z \\rightarrow z^2$
""")
complex_map_words.to_corner(UR)
complex_map_words.set_stroke(BLACK, 5, background=True)
self.play(
FadeOut(grid),
Write(c_grid, run_time=3),
FadeIn(moving_c_grid),
FadeTransform(linear_transform_words, complex_map_words),
)
self.wait()
self.play(
moving_c_grid.animate.apply_complex_function(lambda z: z**2),
run_time=6,
)
self.wait(2)
if __name__ == "__main__":
os.system('manimgl start2.py OpeningManimExample') | 32.571429 | 156 | 0.606542 |
e60a492d6e6274ad455d3d829e092347263284d5 | 550 | py | Python | Python/5928.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | Python/5928.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | Python/5928.py | GeneralLi95/leetcode | f42392f2283e19ec76273d81b2912944f9039568 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
class Solution:
def decodeCiphertext(self, encodedText: str, rows: int) -> str:
col = len(encodedText) // rows
aaa = list(encodedText)
# cube = [['*' for j in range(col)] for i in range(rows)]
# for i in range(rows):
# for j in range(col):
# cube[i][j] = aaa.pop(0)
result = ''
for j in range(col):
for i in range(rows):
if (i+j)<col:
result += aaa[i* col +i +j ]
# result += cube[i][i+j]
return(result.rstrip())
a = Solution()
print(Solution.decodeCiphertext(a, "ch ie pr", 3)) | 22.916667 | 64 | 0.589091 |
602ecb66633af3f1c4c017c695a7f8b84fd3b7de | 4,197 | py | Python | contrib/seeds/generate-seeds.py | wavercoin/core-project | 48cc42ce5ad314c9672e6ccb854b497128d17781 | [
"MIT"
] | 2 | 2019-04-08T01:45:35.000Z | 2019-06-01T01:34:10.000Z | contrib/seeds/generate-seeds.py | wavercoin/core-project | 48cc42ce5ad314c9672e6ccb854b497128d17781 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | wavercoin/core-project | 48cc42ce5ad314c9672e6ccb854b497128d17781 | [
"MIT"
] | 1 | 2021-09-06T19:33:50.000Z | 2021-09-06T19:33:50.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 21501)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.088889 | 98 | 0.580891 |
86f393e8676b4ebe60a9ce8976c44b09f96e3fff | 2,426 | py | Python | research/carls/context_test.py | srihari-humbarwadi/neural-structured-learning | 345b8d644dd7745179263bf6dc9aeb8a921528f4 | [
"Apache-2.0"
] | 939 | 2019-08-28T06:50:30.000Z | 2022-03-30T02:37:07.000Z | research/carls/context_test.py | srihari-humbarwadi/neural-structured-learning | 345b8d644dd7745179263bf6dc9aeb8a921528f4 | [
"Apache-2.0"
] | 80 | 2019-09-01T19:47:30.000Z | 2022-02-02T20:38:38.000Z | research/carls/context_test.py | srihari-humbarwadi/neural-structured-learning | 345b8d644dd7745179263bf6dc9aeb8a921528f4 | [
"Apache-2.0"
] | 196 | 2019-09-01T19:38:53.000Z | 2022-02-08T01:25:57.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for neural_structured_learning.research.carls.context."""
from research.carls import context
from research.carls import dynamic_embedding_config_pb2 as de_config_pb2
import tensorflow as tf
class ContextTest(tf.test.TestCase):
def test_add_to_collection(self):
config = de_config_pb2.DynamicEmbeddingConfig(embedding_dimension=5)
context.add_to_collection('first', config)
context.add_to_collection('second', config)
context.add_to_collection('first', config) # ok to add twice.
self.assertLen(context._knowledge_bank_collections, 2)
# Empty name.
with self.assertRaises(ValueError):
context.add_to_collection('', config)
# Wrong config type.
with self.assertRaises(TypeError):
context.add_to_collection('first', 'config')
# Checks adding a different config with the same name is not allowed.
config.embedding_dimension = 10
with self.assertRaises(ValueError):
context.add_to_collection('first', config)
def test_get_all_collection(self):
config = de_config_pb2.DynamicEmbeddingConfig(embedding_dimension=5)
context.add_to_collection('first', config)
context.add_to_collection('second', config)
collections = context.get_all_collection()
self.assertLen(collections, 2)
for key, value in collections:
self.assertIn(key, {'first', 'second'})
self.assertProtoEquals(value, config)
def testClearAllCollection(self):
config = de_config_pb2.DynamicEmbeddingConfig(embedding_dimension=5)
context.add_to_collection('first', config)
context.add_to_collection('second', config)
collections = context.get_all_collection()
self.assertLen(collections, 2)
context.clear_all_collection()
collections = context.get_all_collection()
self.assertLen(collections, 0)
if __name__ == '__main__':
tf.test.main()
| 37.323077 | 74 | 0.755565 |
10de67d8b1bab9e6e2795ec03f9b0cb0cb351785 | 12,375 | py | Python | basinmaker/addattributes/addattributestocatchments.py | dustming/basinmaker | 9a49f4aa3daabf98fb1af61423e4c54309bd1417 | [
"Artistic-2.0"
] | 4 | 2021-04-07T19:26:55.000Z | 2022-03-26T01:03:54.000Z | basinmaker/addattributes/addattributestocatchments.py | dustming/basinmaker | 9a49f4aa3daabf98fb1af61423e4c54309bd1417 | [
"Artistic-2.0"
] | 4 | 2021-04-08T04:31:06.000Z | 2021-04-09T15:02:50.000Z | basinmaker/addattributes/addattributestocatchments.py | dustming/basinmaker | 9a49f4aa3daabf98fb1af61423e4c54309bd1417 | [
"Artistic-2.0"
] | null | null | null | from basinmaker.utilities.utilities import *
from basinmaker.func.pdtable import *
def add_attributes_to_catchments(
input_geo_names,
path_bkfwidthdepth="#",
bkfwd_attributes=[],
path_landuse="#",
path_landuse_info="#",
path_k_c_zone_polygon = '#',
k_in=-1,
c_in=-1,
out_cat_name="catchment_without_merging_lakes",
out_riv_name="river_without_merging_lakes",
grassdb="#",
grass_location="#",
qgis_prefix_path="#",
gis_platform="qgis",
projection="EPSG:3573",
obs_attributes=[],
lake_attributes=[],
outlet_obs_id=1,
path_sub_reg_outlets_v="#",
output_folder="#",
):
"""Calculate hydrological paramters
Calculate hydrological paramters for each subbasin generated by
"AutomatedWatershedsandLakesFilterToolset". The result generaed
by this tool can be used as inputs for Define_Final_Catchment
and other post processing tools
Parameters
----------
input_geo_names : dict
it is a dictionary that list the required input file names,should at
least indicate the name of following items:
sl_nonconnect_lake : raster
it is a raster represent all non connected lakes that are selected
by lake area threstholds
sl_connected_lake : raster
it is a raster represent allconnected lakes that are selected
by lake area threstholds
river_without_merging_lakes : raster/vector
it is the updated river segment for each subbasin
catchment_without_merging_lakes : raster/vector
it is a raster represent updated subbasins after adding lake inflow
and outflow points as new subbasin outlet.
snapped_obs_points : raster/vector
it is a name of the point gis file represent successfully sanpped
observation points
path_bkfwidthdepth : string
It is a string to indicate the full path of the
polyline shapefile that having bankfull width and
depth data
bkfwd_attributes :
the columns names that indicate following items has to be included
1) column name for the Bankfull width in m;
2) column name for the Bankfull depth in m;
3) column name for the annual mean discharge in m3/s;
path_landuse : string
It is a string to indicate the full path of the landuse data.
It will be used to estimate the floodplain roughness
coefficient. Should have the same projection with the DEM data
in ".tif" format.
path_landuse_info : string
It is a string to indicate the full path of the table in '.csv'
format.The table describe the floodplain roughness coefficient
correspond to a given landuse type. The table should have two
columns: RasterV and MannV. RasterV is the landuse value in the
landuse raster for each land use type and the MannV is the
roughness coefficient value for each landuse type.
gis_platform : string
It is a string indicate with gis platform is used:
'qgis' : the basinmaker is running within QGIS
'arcgis' : the basinmaker is running within ArcGIS
lake_attributes : list
the columns names that indicate following items has to be included
1) column name for the unique Id of each lake within the lake polygon shpfile;
2) column name for type of the lake should be integer;
3) column name for the volume of the lake in km3;
4) column name for the average depth of the lake in m;
5) column name for the area of the lake in km2.
obs_attributes : list
the columns names that indicate following items has to be included
1) column name for the unique Id of each observation point;
2) column name for the unique name of each observation point;
3) column name for the drainage area of each observation point in km3;
4) column name for the source of the observation point:
'CA' for observation in canada;
'US' for observation in US;
outlet_obs_id : int
It is one 'Obs_ID' in the provided observation gauge
shapefile. If it is larger than zero. Subbasins that
do not drainage to this gauge will be removed from
delineation result.
projection : string
It is a string indicate a projected coordinate system,
which wiil be used to calcuate area, slope and aspect.
output_folder : string
The path to a folder to save outputs
path_sub_reg_outlets_v : string
Notes
-------
Five vector files will be generated by this function. these files
can be used to define final routing structure by "Define_Final_Catchment"
or be used as input for other postprocessing tools. All files
are stored at self.OutputFolder
catchment_without_merging_lakes.shp : shapefile
It is the subbasin polygon before merging lakes catchments and
need to be processed before used.
river_without_merging_lakes.shp : shapefile
It is the subbasin river segment before merging lakes catchments and
need to be processed before used.
sl_connected_lake.shp : shapefile
It is the connected lake polygon. Connected lakes are lakes that
are connected by Path_final_riv.
sl_non_connected_lake.shp : shapefile
It is the non connected lake polygon. Connected lakes are lakes
that are not connected by Path_final_cat_riv or Path_final_riv.
obs_gauges : shapefile
It is the point shapefile that represent the observation gauge
after snap to river network.
Returns:
-------
None
Examples
-------
"""
columns = COLUMN_NAMES_CONSTANT
coltypes = COLUMN_TYPES_CONSTANT
# local geo file names
cat_ply_info = Internal_Constant_Names["cat_ply_info"]
cat_riv_info = Internal_Constant_Names["cat_riv_info"]
outlet_pt_info = Internal_Constant_Names["outlet_pt_info"]
if not os.path.exists(output_folder):
os.makedirs(output_folder)
if gis_platform == "qgis":
assert (
grassdb != "#"
), "grass database folder is needed, when gis_platform = qgis"
assert (
grass_location != "#"
), "grass location name is needed, when gis_platform = qgis"
assert (
qgis_prefix_path != "#"
), "qgis prefix path is needed, when gis_platform = qgis"
from basinmaker.addattributes.createattributestemplateqgis import (
create_catchments_attributes_template_table,
)
from basinmaker.addattributes.calculatebasicattributesqgis import (
calculate_basic_attributes,
)
from basinmaker.addattributes.addlakeattributesqgis import add_lake_attributes
from basinmaker.addattributes.joinpandastoattributesqgis import (
join_pandas_table_to_vector_attributes,
)
from basinmaker.addattributes.exportoutputsqgis import export_files_to_output_folder
from basinmaker.addattributes.addgaugeattributesqgis import add_gauge_attributes
from basinmaker.addattributes.calfloodmanningnqgis import calculate_flood_plain_manning_n
from basinmaker.addattributes.calbkfwidthdepthqgis import (
calculate_bankfull_width_depth_from_polyline,
)
attr_template = create_catchments_attributes_template_table(
grassdb=grassdb,
grass_location=grass_location,
columns=columns,
input_geo_names=input_geo_names,
)
attr_basic = calculate_basic_attributes(
grassdb=grassdb,
grass_location=grass_location,
qgis_prefix_path=qgis_prefix_path,
input_geo_names=input_geo_names,
projection=projection,
catinfo=attr_template,
cat_ply_info=cat_ply_info,
cat_riv_info=cat_riv_info,
outlet_pt_info=outlet_pt_info,
)
input_geo_names["cat_ply_info"] = cat_ply_info
input_geo_names["cat_riv_info"] = cat_riv_info
input_geo_names["outlet_pt_info"] = outlet_pt_info
if len(lake_attributes) > 0:
attr_lake = add_lake_attributes(
grassdb=grassdb,
grass_location=grass_location,
qgis_prefix_path=qgis_prefix_path,
input_geo_names=input_geo_names,
lake_attributes=lake_attributes,
catinfo=attr_basic,
)
else:
attr_lake = attr_basic
if len(obs_attributes) > 0:
attr_obs = add_gauge_attributes(
grassdb=grassdb,
grass_location=grass_location,
qgis_prefix_path=qgis_prefix_path,
input_geo_names=input_geo_names,
obs_attributes=obs_attributes,
catinfo=attr_lake,
)
else:
attr_obs = attr_lake
if outlet_obs_id > 0:
attr_select = return_interest_catchments_info(
catinfo=attr_obs,
outlet_obs_id=outlet_obs_id,
path_sub_reg_outlets_v=path_sub_reg_outlets_v,
)
else:
attr_select = attr_obs
if path_landuse != "#":
attr_landuse = calculate_flood_plain_manning_n(
grassdb=grassdb,
grass_location=grass_location,
qgis_prefix_path=qgis_prefix_path,
catinfo=attr_select,
input_geo_names=input_geo_names,
path_landuse=path_landuse,
path_landuse_info=path_landuse_info,
)
else:
attr_landuse = attr_select
attr_da = streamorderanddrainagearea(attr_landuse)
if path_bkfwidthdepth != "#" or k_in != -1 or path_k_c_zone_polygon != '#':
attr_bkf = calculate_bankfull_width_depth_from_polyline(
grassdb=grassdb,
grass_location=grass_location,
qgis_prefix_path=qgis_prefix_path,
path_bkfwidthdepth=path_bkfwidthdepth,
path_k_c_zone_polygon = path_k_c_zone_polygon,
bkfwd_attributes=bkfwd_attributes,
catinfo=attr_da,
input_geo_names=input_geo_names,
k_in=k_in,
c_in=k_in,
)
else:
attr_bkf = attr_da
attr_ncl = update_non_connected_catchment_info(attr_bkf)
attr_ncl.loc[attr_ncl['RivLength'] == -1.2345,'RivSlope'] = -1.2345
attr_ncl.loc[attr_ncl['RivLength'] == -1.2345,'FloodP_n'] = -1.2345
attr_ncl.loc[attr_ncl['RivLength'] == -1.2345,'Max_DEM'] = -1.2345
attr_ncl.loc[attr_ncl['RivLength'] == -1.2345,'Min_DEM'] = -1.2345
attr_ncl.loc[attr_ncl['RivLength'] == -1.2345,'Ch_n'] = -1.2345
join_pandas_table_to_vector_attributes(
grassdb=grassdb,
grass_location=grass_location,
qgis_prefix_path=qgis_prefix_path,
vector_name=cat_ply_info,
pd_table=attr_ncl,
column_types=coltypes,
columns_names=columns,
)
join_pandas_table_to_vector_attributes(
grassdb=grassdb,
grass_location=grass_location,
qgis_prefix_path=qgis_prefix_path,
vector_name=cat_riv_info,
pd_table=attr_ncl,
column_types=coltypes,
columns_names=columns,
)
export_files_to_output_folder(
grassdb=grassdb,
grass_location=grass_location,
qgis_prefix_path=qgis_prefix_path,
input_geo_names=input_geo_names,
output_riv=out_riv_name,
output_cat=out_cat_name,
output_folder=output_folder,
obs_attributes = obs_attributes,
)
| 41.25 | 97 | 0.632323 |
bae02accdd374bbe5af9e3100b09092ccd36caa4 | 23,872 | py | Python | litex/soc/interconnect/wishbone.py | umarcor/litex | 1d20bbcd01784158088e1eb85e219309cfce0248 | [
"ADSL"
] | 1 | 2021-12-25T13:49:55.000Z | 2021-12-25T13:49:55.000Z | litex/soc/interconnect/wishbone.py | umarcor/litex | 1d20bbcd01784158088e1eb85e219309cfce0248 | [
"ADSL"
] | null | null | null | litex/soc/interconnect/wishbone.py | umarcor/litex | 1d20bbcd01784158088e1eb85e219309cfce0248 | [
"ADSL"
] | 1 | 2021-12-25T13:49:57.000Z | 2021-12-25T13:49:57.000Z | #
# This file is part of LiteX.
#
# Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2015-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2018 Tim 'mithro' Ansell <me@mith.ro>
# Copytight (c) 2022 Antmicro <www.antmicro.com>
# SPDX-License-Identifier: BSD-2-Clause
"""Wishbone Classic support for LiteX (Standard HandShaking/Synchronous Feedback)"""
from math import log2
from functools import reduce
from operator import or_
from migen import *
from migen.genlib import roundrobin
from migen.genlib.record import *
from migen.genlib.misc import split, displacer, chooser, WaitTimer
from litex.build.generic_platform import *
from litex.soc.interconnect import csr, csr_bus
# Wishbone Definition ------------------------------------------------------------------------------
_layout = [
("adr", "adr_width", DIR_M_TO_S),
("dat_w", "data_width", DIR_M_TO_S),
("dat_r", "data_width", DIR_S_TO_M),
("sel", "sel_width", DIR_M_TO_S),
("cyc", 1, DIR_M_TO_S),
("stb", 1, DIR_M_TO_S),
("ack", 1, DIR_S_TO_M),
("we", 1, DIR_M_TO_S),
("cti", 3, DIR_M_TO_S),
("bte", 2, DIR_M_TO_S),
("err", 1, DIR_S_TO_M)
]
CTI_BURST_NONE = 0b000
CTI_BURST_CONSTANT = 0b001
CTI_BURST_INCREMENTING = 0b010
CTI_BURST_END = 0b111
class Interface(Record):
def __init__(self, data_width=32, adr_width=30, bursting=False):
self.data_width = data_width
self.adr_width = adr_width
self.bursting = bursting
Record.__init__(self, set_layout_parameters(_layout,
adr_width = adr_width,
data_width = data_width,
sel_width = data_width//8))
self.adr.reset_less = True
self.dat_w.reset_less = True
self.dat_r.reset_less = True
self.sel.reset_less = True
@staticmethod
def like(other):
return Interface(len(other.dat_w))
def _do_transaction(self):
yield self.cyc.eq(1)
yield self.stb.eq(1)
yield
while not (yield self.ack):
yield
yield self.cyc.eq(0)
yield self.stb.eq(0)
def write(self, adr, dat, sel=None, cti=None, bte=None):
if sel is None:
sel = 2**len(self.sel) - 1
yield self.adr.eq(adr)
yield self.dat_w.eq(dat)
yield self.sel.eq(sel)
if cti is not None:
yield self.cti.eq(cti)
if bte is not None:
yield self.bte.eq(bte)
yield self.we.eq(1)
yield from self._do_transaction()
def read(self, adr, cti=None, bte=None):
yield self.adr.eq(adr)
yield self.we.eq(0)
if cti is not None:
yield self.cti.eq(cti)
if bte is not None:
yield self.bte.eq(bte)
yield from self._do_transaction()
return (yield self.dat_r)
def get_ios(self, bus_name="wb"):
subsignals = []
for name, width, direction in self.layout:
subsignals.append(Subsignal(name, Pins(width)))
ios = [(bus_name , 0) + tuple(subsignals)]
return ios
def connect_to_pads(self, pads, mode="master"):
assert mode in ["slave", "master"]
r = []
for name, width, direction in self.layout:
sig = getattr(self, name)
pad = getattr(pads, name)
if mode == "master":
if direction == DIR_M_TO_S:
r.append(pad.eq(sig))
else:
r.append(sig.eq(pad))
else:
if direction == DIR_S_TO_M:
r.append(pad.eq(sig))
else:
r.append(sig.eq(pad))
return r
# Wishbone Timeout ---------------------------------------------------------------------------------
class Timeout(Module):
def __init__(self, master, cycles):
self.error = Signal()
# # #
timer = WaitTimer(int(cycles))
self.submodules += timer
self.comb += [
timer.wait.eq(master.stb & master.cyc & ~master.ack),
If(timer.done,
master.dat_r.eq((2**len(master.dat_w))-1),
master.ack.eq(1),
self.error.eq(1)
)
]
# Wishbone Interconnect ----------------------------------------------------------------------------
class InterconnectPointToPoint(Module):
def __init__(self, master, slave):
self.comb += master.connect(slave)
class Arbiter(Module):
def __init__(self, masters, target):
self.submodules.rr = roundrobin.RoundRobin(len(masters))
# mux master->slave signals
for name, size, direction in _layout:
if direction == DIR_M_TO_S:
choices = Array(getattr(m, name) for m in masters)
self.comb += getattr(target, name).eq(choices[self.rr.grant])
# connect slave->master signals
for name, size, direction in _layout:
if direction == DIR_S_TO_M:
source = getattr(target, name)
for i, m in enumerate(masters):
dest = getattr(m, name)
if name == "ack" or name == "err":
self.comb += dest.eq(source & (self.rr.grant == i))
else:
self.comb += dest.eq(source)
# connect bus requests to round-robin selector
reqs = [m.cyc for m in masters]
self.comb += self.rr.request.eq(Cat(*reqs))
class Decoder(Module):
# slaves is a list of pairs:
# 0) function that takes the address signal and returns a FHDL expression
# that evaluates to 1 when the slave is selected and 0 otherwise.
# 1) wishbone.Slave reference.
# register adds flip-flops after the address comparators. Improves timing,
# but breaks Wishbone combinatorial feedback.
def __init__(self, master, slaves, register=False):
ns = len(slaves)
slave_sel = Signal(ns)
slave_sel_r = Signal(ns)
# decode slave addresses
self.comb += [slave_sel[i].eq(fun(master.adr))
for i, (fun, bus) in enumerate(slaves)]
if register:
self.sync += slave_sel_r.eq(slave_sel)
else:
self.comb += slave_sel_r.eq(slave_sel)
# connect master->slaves signals except cyc
for slave in slaves:
for name, size, direction in _layout:
if direction == DIR_M_TO_S and name != "cyc":
self.comb += getattr(slave[1], name).eq(getattr(master, name))
# combine cyc with slave selection signals
self.comb += [slave[1].cyc.eq(master.cyc & slave_sel[i])
for i, slave in enumerate(slaves)]
# generate master ack (resp. err) by ORing all slave acks (resp. errs)
self.comb += [
master.ack.eq(reduce(or_, [slave[1].ack for slave in slaves])),
master.err.eq(reduce(or_, [slave[1].err for slave in slaves]))
]
# mux (1-hot) slave data return
masked = [Replicate(slave_sel_r[i], len(master.dat_r)) & slaves[i][1].dat_r for i in range(ns)]
self.comb += master.dat_r.eq(reduce(or_, masked))
class InterconnectShared(Module):
def __init__(self, masters, slaves, register=False, timeout_cycles=1e6):
shared = Interface(data_width=masters[0].data_width)
self.submodules.arbiter = Arbiter(masters, shared)
self.submodules.decoder = Decoder(shared, slaves, register)
if timeout_cycles is not None:
self.submodules.timeout = Timeout(shared, timeout_cycles)
class Crossbar(Module):
def __init__(self, masters, slaves, register=False):
matches, busses = zip(*slaves)
access = [[Interface() for j in slaves] for i in masters]
# decode each master into its access row
for row, master in zip(access, masters):
row = list(zip(matches, row))
self.submodules += Decoder(master, row, register)
# arbitrate each access column onto its slave
for column, bus in zip(zip(*access), busses):
self.submodules += Arbiter(column, bus)
# Wishbone Data Width Converter --------------------------------------------------------------------
class DownConverter(Module):
"""DownConverter
This module splits Wishbone accesses from a master interface to a smaller slave interface.
Writes:
Writes from master are splitted N writes to the slave. Access is acked when the last
access is acked by the slave.
Reads:
Read from master are splitted in N reads to the the slave. Read datas from
the slave are cached before being presented concatenated on the last access.
"""
def __init__(self, master, slave):
dw_from = len(master.dat_w)
dw_to = len(slave.dat_w)
ratio = dw_from//dw_to
# # #
skip = Signal()
counter = Signal(max=ratio)
# Control Path
fsm = FSM(reset_state="IDLE")
fsm = ResetInserter()(fsm)
self.submodules.fsm = fsm
self.comb += fsm.reset.eq(~master.cyc)
fsm.act("IDLE",
NextValue(counter, 0),
If(master.stb & master.cyc,
NextState("CONVERT"),
)
)
fsm.act("CONVERT",
slave.adr.eq(Cat(counter, master.adr)),
Case(counter, {i: slave.sel.eq(master.sel[i*dw_to//8:]) for i in range(ratio)}),
If(master.stb & master.cyc,
skip.eq(slave.sel == 0),
slave.we.eq(master.we),
slave.cyc.eq(~skip),
slave.stb.eq(~skip),
If(slave.ack | skip,
NextValue(counter, counter + 1),
If(counter == (ratio - 1),
master.ack.eq(1),
NextState("IDLE")
)
)
)
)
# Write Datapath
self.comb += Case(counter, {i: slave.dat_w.eq(master.dat_w[i*dw_to:]) for i in range(ratio)})
# Read Datapath
dat_r = Signal(dw_from, reset_less=True)
self.comb += master.dat_r.eq(Cat(dat_r[dw_to:], slave.dat_r))
self.sync += If(slave.ack | skip, dat_r.eq(master.dat_r))
class UpConverter(Module):
"""UpConverter"""
def __init__(self, master, slave):
dw_from = len(master.dat_w)
dw_to = len(slave.dat_w)
ratio = dw_to//dw_from
# # #
self.comb += master.connect(slave, omit={"adr", "sel", "dat_w", "dat_r"})
cases = {}
for i in range(ratio):
cases[i] = [
slave.adr.eq(master.adr[int(log2(ratio)):]),
slave.sel[i*dw_from//8:(i+1)*dw_from//8].eq(master.sel),
slave.dat_w[i*dw_from:(i+1)*dw_from].eq(master.dat_w),
master.dat_r.eq(slave.dat_r[i*dw_from:(i+1)*dw_from]),
]
self.comb += Case(master.adr[:int(log2(ratio))], cases)
class Converter(Module):
"""Converter
This module is a wrapper for DownConverter and UpConverter.
It should preferably be used rather than direct instantiations
of specific converters.
"""
def __init__(self, master, slave):
self.master = master
self.slave = slave
# # #
dw_from = len(master.dat_r)
dw_to = len(slave.dat_r)
if dw_from > dw_to:
downconverter = DownConverter(master, slave)
self.submodules += downconverter
elif dw_from < dw_to:
upconverter = UpConverter(master, slave)
self.submodules += upconverter
else:
self.comb += master.connect(slave)
# Wishbone SRAM ------------------------------------------------------------------------------------
class SRAM(Module):
def __init__(self, mem_or_size, read_only=None, init=None, bus=None, name=None):
if bus is None:
bus = Interface()
self.bus = bus
bus_data_width = len(self.bus.dat_r)
if isinstance(mem_or_size, Memory):
assert(mem_or_size.width <= bus_data_width)
self.mem = mem_or_size
else:
self.mem = Memory(bus_data_width, mem_or_size//(bus_data_width//8), init=init, name=name)
if read_only is None:
if hasattr(self.mem, "bus_read_only"):
read_only = self.mem.bus_read_only
else:
read_only = False
# # #
adr_burst = Signal()
# Burst support.
# --------------
if self.bus.bursting:
adr_wrap_mask = Array((0b0000, 0b0011, 0b0111, 0b1111))
adr_wrap_max = adr_wrap_mask[-1].bit_length()
adr_burst_wrap = Signal()
adr_latched = Signal()
adr_counter = Signal(len(self.bus.adr))
adr_counter_base = Signal(len(self.bus.adr))
adr_counter_offset = Signal(adr_wrap_max)
adr_offset_lsb = Signal(adr_wrap_max)
adr_offset_msb = Signal(len(self.bus.adr))
adr_next = Signal(len(self.bus.adr))
# Only Incrementing Burts are supported.
self.comb += [
Case(self.bus.cti, {
# incrementing address burst cycle
CTI_BURST_INCREMENTING: adr_burst.eq(1),
# end current burst cycle
CTI_BURST_END: adr_burst.eq(0),
# unsupported burst cycle
"default": adr_burst.eq(0)
}),
adr_burst_wrap.eq(self.bus.bte[0] | self.bus.bte[1]),
adr_counter_base.eq(
Cat(self.bus.adr & ~adr_wrap_mask[self.bus.bte],
self.bus.adr[adr_wrap_max:]
)
)
]
# Latch initial address (without wrapping bits and wrap offset).
self.sync += [
If(self.bus.cyc & self.bus.stb & adr_burst,
adr_latched.eq(1),
# Latch initial address, then increment it every clock cycle
If(adr_latched,
adr_counter.eq(adr_counter + 1)
).Else(
adr_counter_offset.eq(self.bus.adr & adr_wrap_mask[self.bus.bte]),
adr_counter.eq(adr_counter_base +
Cat(~self.bus.we, Replicate(0, len(adr_counter)-1))
)
),
If(self.bus.cti == CTI_BURST_END,
adr_latched.eq(0),
adr_counter.eq(0),
adr_counter_offset.eq(0)
)
).Else(
adr_latched.eq(0),
adr_counter.eq(0),
adr_counter_offset.eq(0)
),
]
# Next Address = counter value without wrapped bits + wrapped counter bits with offset.
self.comb += [
adr_offset_lsb.eq((adr_counter + adr_counter_offset) & adr_wrap_mask[self.bus.bte]),
adr_offset_msb.eq(adr_counter & ~adr_wrap_mask[self.bus.bte]),
adr_next.eq(adr_offset_msb + adr_offset_lsb)
]
# # #
# Memory.
# -------
port = self.mem.get_port(write_capable=not read_only, we_granularity=8,
mode=READ_FIRST if read_only else WRITE_FIRST)
self.specials += self.mem, port
# Generate write enable signal
if not read_only:
self.comb += [port.we[i].eq(self.bus.cyc & self.bus.stb & self.bus.we & self.bus.sel[i])
for i in range(bus_data_width//8)]
# Address and data
self.comb += port.adr.eq(self.bus.adr[:len(port.adr)])
if self.bus.bursting:
self.comb += If(adr_burst & adr_latched,
port.adr.eq(adr_next[:len(port.adr)]),
)
self.comb += [
self.bus.dat_r.eq(port.dat_r)
]
if not read_only:
self.comb += port.dat_w.eq(self.bus.dat_w),
# Generate Ack.
self.sync += [
self.bus.ack.eq(0),
If(self.bus.cyc & self.bus.stb & (~self.bus.ack | adr_burst), self.bus.ack.eq(1))
]
# Wishbone To CSR ----------------------------------------------------------------------------------
class Wishbone2CSR(Module):
def __init__(self, bus_wishbone=None, bus_csr=None, register=True):
self.csr = bus_csr
if self.csr is None:
# If no CSR bus provided, create it with default parameters.
self.csr = csr_bus.Interface()
self.wishbone = bus_wishbone
if self.wishbone is None:
# If no Wishbone bus provided, create it with default parameters.
self.wishbone = Interface()
# # #
if register:
fsm = FSM(reset_state="IDLE")
self.submodules += fsm
fsm.act("IDLE",
NextValue(self.csr.dat_w, self.wishbone.dat_w),
If(self.wishbone.cyc & self.wishbone.stb,
NextValue(self.csr.adr, self.wishbone.adr),
NextValue(self.csr.we, self.wishbone.we & (self.wishbone.sel != 0)),
NextState("WRITE-READ")
)
)
fsm.act("WRITE-READ",
NextValue(self.csr.adr, 0),
NextValue(self.csr.we, 0),
NextState("ACK")
)
fsm.act("ACK",
self.wishbone.ack.eq(1),
self.wishbone.dat_r.eq(self.csr.dat_r),
NextState("IDLE")
)
else:
fsm = FSM(reset_state="WRITE-READ")
self.submodules += fsm
fsm.act("WRITE-READ",
self.csr.dat_w.eq(self.wishbone.dat_w),
If(self.wishbone.cyc & self.wishbone.stb,
self.csr.adr.eq(self.wishbone.adr),
self.csr.we.eq(self.wishbone.we & (self.wishbone.sel != 0)),
NextState("ACK")
)
)
fsm.act("ACK",
self.wishbone.ack.eq(1),
self.wishbone.dat_r.eq(self.csr.dat_r),
NextState("WRITE-READ")
)
# Wishbone Cache -----------------------------------------------------------------------------------
class Cache(Module):
"""Cache
This module is a write-back wishbone cache that can be used as a L2 cache.
Cachesize (in 32-bit words) is the size of the data store and must be a power of 2
"""
def __init__(self, cachesize, master, slave, reverse=True):
self.master = master
self.slave = slave
# # #
dw_from = len(master.dat_r)
dw_to = len(slave.dat_r)
if dw_to > dw_from and (dw_to % dw_from) != 0:
raise ValueError("Slave data width must be a multiple of {dw}".format(dw=dw_from))
if dw_to < dw_from and (dw_from % dw_to) != 0:
raise ValueError("Master data width must be a multiple of {dw}".format(dw=dw_to))
# Split address:
# TAG | LINE NUMBER | LINE OFFSET
offsetbits = log2_int(max(dw_to//dw_from, 1))
addressbits = len(slave.adr) + offsetbits
linebits = log2_int(cachesize) - offsetbits
tagbits = addressbits - linebits
wordbits = log2_int(max(dw_from//dw_to, 1))
adr_offset, adr_line, adr_tag = split(master.adr, offsetbits, linebits, tagbits)
word = Signal(wordbits) if wordbits else None
# Data memory
data_mem = Memory(dw_to*2**wordbits, 2**linebits)
data_port = data_mem.get_port(write_capable=True, we_granularity=8)
self.specials += data_mem, data_port
write_from_slave = Signal()
if adr_offset is None:
adr_offset_r = None
else:
adr_offset_r = Signal(offsetbits, reset_less=True)
self.sync += adr_offset_r.eq(adr_offset)
self.comb += [
data_port.adr.eq(adr_line),
If(write_from_slave,
displacer(slave.dat_r, word, data_port.dat_w),
displacer(Replicate(1, dw_to//8), word, data_port.we)
).Else(
data_port.dat_w.eq(Replicate(master.dat_w, max(dw_to//dw_from, 1))),
If(master.cyc & master.stb & master.we & master.ack,
displacer(master.sel, adr_offset, data_port.we, 2**offsetbits, reverse=reverse)
)
),
chooser(data_port.dat_r, word, slave.dat_w),
slave.sel.eq(2**(dw_to//8)-1),
chooser(data_port.dat_r, adr_offset_r, master.dat_r, reverse=reverse)
]
# Tag memory
tag_layout = [("tag", tagbits), ("dirty", 1)]
tag_mem = Memory(layout_len(tag_layout), 2**linebits)
tag_port = tag_mem.get_port(write_capable=True)
self.specials += tag_mem, tag_port
tag_do = Record(tag_layout)
tag_di = Record(tag_layout)
self.comb += [
tag_do.raw_bits().eq(tag_port.dat_r),
tag_port.dat_w.eq(tag_di.raw_bits())
]
self.comb += [
tag_port.adr.eq(adr_line),
tag_di.tag.eq(adr_tag)
]
if word is not None:
self.comb += slave.adr.eq(Cat(word, adr_line, tag_do.tag))
else:
self.comb += slave.adr.eq(Cat(adr_line, tag_do.tag))
# slave word computation, word_clr and word_inc will be simplified
# at synthesis when wordbits=0
word_clr = Signal()
word_inc = Signal()
if word is not None:
self.sync += \
If(word_clr,
word.eq(0),
).Elif(word_inc,
word.eq(word+1)
)
def word_is_last(word):
if word is not None:
return word == 2**wordbits-1
else:
return 1
# Control FSM
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
If(master.cyc & master.stb,
NextState("TEST_HIT")
)
)
fsm.act("TEST_HIT",
word_clr.eq(1),
If(tag_do.tag == adr_tag,
master.ack.eq(1),
If(master.we,
tag_di.dirty.eq(1),
tag_port.we.eq(1)
),
NextState("IDLE")
).Else(
If(tag_do.dirty,
NextState("EVICT")
).Else(
# Write the tag first to set the slave address
tag_port.we.eq(1),
word_clr.eq(1),
NextState("REFILL")
)
)
)
fsm.act("EVICT",
slave.stb.eq(1),
slave.cyc.eq(1),
slave.we.eq(1),
If(slave.ack,
word_inc.eq(1),
If(word_is_last(word),
# Write the tag first to set the slave address
tag_port.we.eq(1),
word_clr.eq(1),
NextState("REFILL")
)
)
)
fsm.act("REFILL",
slave.stb.eq(1),
slave.cyc.eq(1),
slave.we.eq(0),
If(slave.ack,
write_from_slave.eq(1),
word_inc.eq(1),
If(word_is_last(word),
NextState("TEST_HIT"),
).Else(
NextState("REFILL")
)
)
)
| 35.471025 | 103 | 0.522914 |
2211083a1a8e26f86e11508e893a4b7340372f23 | 5,313 | py | Python | ansible/my_env/lib/python2.7/site-packages/ansible/plugins/action/ce.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 1 | 2019-04-16T21:23:15.000Z | 2019-04-16T21:23:15.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/plugins/action/ce.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 5 | 2020-02-26T20:10:50.000Z | 2021-09-23T23:23:18.000Z | ansible/my_env/lib/python2.7/site-packages/ansible/plugins/action/ce.py | otus-devops-2019-02/yyashkin_infra | 0cd0c003884155ac922e3e301305ac202de7028c | [
"MIT"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils.network.cloudengine.ce import ce_provider_spec
from ansible.module_utils.network.common.utils import load_provider
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
CLI_SUPPORTED_MODULES = ['ce_config', 'ce_command']
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
socket_path = None
if self._play_context.connection == 'local':
provider = load_provider(ce_provider_spec, self._task.args)
transport = provider['transport'] or 'cli'
display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr)
if transport == 'cli':
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'ce'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
self._task.args['provider'] = provider.update(
host=pc.remote_addr,
port=pc.port,
username=pc.remote_user,
password=pc.password
)
if self._task.action in ['ce_netconf'] or self._task.action not in CLI_SUPPORTED_MODULES:
pc.connection = 'netconf'
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
# make sure a transport value is set in args
self._task.args['transport'] = transport
self._task.args['provider'] = provider
elif self._play_context.connection in ('netconf', 'network_cli'):
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnessary whene using %s and will be ignored' % self._play_context.connection)
del self._task.args['provider']
if (self._play_context.connection == 'network_cli' and self._task.action not in CLI_SUPPORTED_MODULES) or \
(self._play_context.connection == 'netconf' and self._task.action in CLI_SUPPORTED_MODULES):
return {'failed': True, 'msg': "Connection type '%s' is not valid for '%s' module."
% (self._play_context.connection, self._task.action)}
if (self._play_context.connection == 'local' and transport == 'cli' and self._task.action in CLI_SUPPORTED_MODULES) \
or self._play_context.connection == 'network_cli':
# make sure we are in the right cli context whitch should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(']'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| 47.017699 | 126 | 0.645022 |
7f6466f347b26528f5e3d25e718e8d9fbb4b5522 | 814 | py | Python | Chapter02/py212privateProperty.py | csy1993/PythonQt | c100cd9e1327fc7731bf04c7754cafb8dd578fa5 | [
"Apache-2.0"
] | null | null | null | Chapter02/py212privateProperty.py | csy1993/PythonQt | c100cd9e1327fc7731bf04c7754cafb8dd578fa5 | [
"Apache-2.0"
] | null | null | null | Chapter02/py212privateProperty.py | csy1993/PythonQt | c100cd9e1327fc7731bf04c7754cafb8dd578fa5 | [
"Apache-2.0"
] | null | null | null | '''
* @Author: csy
* @Date: 2019-05-09 15:42:03
* @Last Modified by: csy
* @Last Modified time: 2019-05-09 15:42:03
'''
# -*- coding: utf-8 -*-
class MyCounter:
__secretCount = 0 # 私有变量
publicCount = 0 # 公开变量
def __privateCountFun(self):
print('这是私有方法')
self.__secretCount += 1
self.publicCount += 1
#print (self.__secretCount)
def publicCountFun(self):
print('这是公共方法')
self.__privateCountFun()
if __name__ == "__main__":
counter = MyCounter()
counter.publicCountFun()
counter.publicCountFun()
print('instance publicCount=%d' % counter.publicCount)
print('Class publicCount=%d' % MyCounter.publicCount)
# 报错,实例不能访问私有变量
# print (counter.__secretCount)
# 报错,实例不能访问私有方法
# counter.__privateCountFun()
| 22.611111 | 58 | 0.626536 |
0865b12c4cc75efcbb60119c26590f7141bebd54 | 1,927 | py | Python | Others/cisco_crack.py | OblackatO/OffensiveSecurity | 4b2c5f981cf41cc463791f279c3ad0115c28d738 | [
"MIT"
] | 1 | 2018-05-09T05:51:35.000Z | 2018-05-09T05:51:35.000Z | Others/cisco_crack.py | pegom96/OffensiveSecurity | 4b2c5f981cf41cc463791f279c3ad0115c28d738 | [
"MIT"
] | null | null | null | Others/cisco_crack.py | pegom96/OffensiveSecurity | 4b2c5f981cf41cc463791f279c3ad0115c28d738 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import time,re
global list_colors,list_animals,list_passwd
list_colors = ['Green','Yellow','Black','White','Brown','Purple','Blue','Pink','Orange','Red','Grey',\
'Violet','Gold','Magenta']
list_animals = ['Pig','Fish','Monkey','Cat','Dog','Horse','Chicken','Sheep','Macropods','Ant','Tiger',\
'Penguim','Cattle','Wolf','Bird','Whale','Lion','Giraffe','Llame','Crocodile','Boar','Rabbit','Shark',\
'Cheetah','Fox','Elephant','Dolphin','Platypus','Goat','Goldfish','Snake','Chimpanzee','Bear','Bat',\
'Alligator','Gorilla','Squirrel','Eagle','Rhinoceros','Koala']
list_passwd = []
def create_dict():
global total_passwords
for item in list_colors:
for item1 in list_animals:
final_word = item+item1+'-guest'
list_passwd.append(final_word)
total_passwords = 0
for item in list_passwd:
total_passwords = total_passwords + 1
print('[>]Total number of passwords:',total_passwords)
def crack_password(link):
print('[>]Starting the attack')
bi = webdriver.Firefox(executable_path='/home/alyattes_Lion/Downloads/geckodriver')
link_of_login_page = link
bi.get(link_of_login_page)
wait = WebDriverWait(bi,10)
element = wait.until(ec.presence_of_element_located((By.ID,'guest-pass')))
total_passwords_tried = 0
for item in list_passwd:
total_passwords_tried = total_passwords_tried + 1
time.sleep(1)
element.send_keys(item,Keys.ENTER)
print('[>]Tried password:'+item)
if bi.title == 'Google' :
print('[>]Password Found:'+item)
if total_passwords_tried == total_passwords :
print('[!]Password not in dictionary')
def main():
create_dict()
link = input('[>]Copy and Paste the link of the Log In Guest WebPage:')
crack_password(link)
main()
| 34.410714 | 103 | 0.731707 |
d1e79a95138de4c7b1b06065f2ecc9f6224967cd | 192 | py | Python | painter/tests/runner.py | adam-thomas/imperial-painter | 092c858c35dce5b2d598f1c3c5845d4838d13944 | [
"MIT"
] | null | null | null | painter/tests/runner.py | adam-thomas/imperial-painter | 092c858c35dce5b2d598f1c3c5845d4838d13944 | [
"MIT"
] | 22 | 2015-09-23T10:44:08.000Z | 2022-03-09T21:34:03.000Z | painter/tests/runner.py | adam-thomas/imperial-painter | 092c858c35dce5b2d598f1c3c5845d4838d13944 | [
"MIT"
] | null | null | null | from colour_runner.django_runner import ColourRunnerMixin
from django.test.runner import DiscoverRunner
class TestRunner(ColourRunnerMixin, DiscoverRunner):
"""I like pretty colours."""
| 27.428571 | 57 | 0.817708 |
9f8118f7b679c15ee81e603711275c1ae4cb7d80 | 2,407 | py | Python | metadata-ingestion/src/datahub/ingestion/api/sink.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
] | 3,586 | 2020-01-27T11:09:57.000Z | 2022-03-15T16:13:30.000Z | metadata-ingestion/src/datahub/ingestion/api/sink.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
] | 1,678 | 2020-01-27T20:51:01.000Z | 2022-03-15T15:22:02.000Z | metadata-ingestion/src/datahub/ingestion/api/sink.py | zhoxie-cisco/datahub | 254a73e6ca9b1ec6002fcf013ed42cb6a754d1ad | [
"Apache-2.0"
] | 924 | 2020-01-28T20:10:50.000Z | 2022-03-15T10:01:23.000Z | from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field
from typing import Any, List
from datahub.ingestion.api.closeable import Closeable
from datahub.ingestion.api.common import PipelineContext, RecordEnvelope, WorkUnit
from datahub.ingestion.api.report import Report
@dataclass
class SinkReport(Report):
records_written: int = 0
warnings: List[Any] = field(default_factory=list)
failures: List[Any] = field(default_factory=list)
def report_record_written(self, record_envelope: RecordEnvelope) -> None:
self.records_written += 1
def report_warning(self, info: Any) -> None:
self.warnings.append(info)
def report_failure(self, info: Any) -> None:
self.failures.append(info)
class WriteCallback(metaclass=ABCMeta):
@abstractmethod
def on_success(
self, record_envelope: RecordEnvelope, success_metadata: dict
) -> None:
pass
@abstractmethod
def on_failure(
self,
record_envelope: RecordEnvelope,
failure_exception: Exception,
failure_metadata: dict,
) -> None:
pass
class NoopWriteCallback(WriteCallback):
"""Convenience WriteCallback class to support noop"""
def on_success(
self, record_envelope: RecordEnvelope, success_metadata: dict
) -> None:
pass
def on_failure(
self,
record_envelope: RecordEnvelope,
failure_exception: Exception,
failure_metadata: dict,
) -> None:
pass
# See https://github.com/python/mypy/issues/5374 for why we suppress this mypy error.
@dataclass # type: ignore[misc]
class Sink(Closeable, metaclass=ABCMeta):
"""All Sinks must inherit this base class."""
ctx: PipelineContext
@classmethod
@abstractmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "Sink":
pass
@abstractmethod
def handle_work_unit_start(self, workunit: WorkUnit) -> None:
pass
@abstractmethod
def handle_work_unit_end(self, workunit: WorkUnit) -> None:
pass
@abstractmethod
def write_record_async(
self, record_envelope: RecordEnvelope, callback: WriteCallback
) -> None:
# must call callback when done.
pass
@abstractmethod
def get_report(self) -> SinkReport:
pass
@abstractmethod
def close(self) -> None:
pass
| 25.606383 | 85 | 0.680515 |
0abd949f6fd2cd0b42355f7274f56855ac9816be | 127 | py | Python | pysitra/__init__.py | marjanmo/pysitra | deaf2b84c8ccb72207a514a1adfc43d64b23b29d | [
"MIT"
] | 1 | 2017-11-29T22:37:46.000Z | 2017-11-29T22:37:46.000Z | pysitra/__init__.py | marjanmo/pysitra | deaf2b84c8ccb72207a514a1adfc43d64b23b29d | [
"MIT"
] | null | null | null | pysitra/__init__.py | marjanmo/pysitra | deaf2b84c8ccb72207a514a1adfc43d64b23b29d | [
"MIT"
] | null | null | null | from .trans import SloTransformation,trans_2R_4params,trans_2R_6params,trans_3R_7params,csv_transformation,shp_transformation
| 42.333333 | 125 | 0.913386 |
f31a0c9f4f02ed92439611e5844a932cb71b471b | 2,542 | py | Python | bank/cards/models.py | ruixy/bank | eb4e20f2dfae07ed41751f58288f0b7500935663 | [
"Apache-2.0"
] | null | null | null | bank/cards/models.py | ruixy/bank | eb4e20f2dfae07ed41751f58288f0b7500935663 | [
"Apache-2.0"
] | null | null | null | bank/cards/models.py | ruixy/bank | eb4e20f2dfae07ed41751f58288f0b7500935663 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
# django apps
from django.db import models
class CardStatus(models.Model):
''' 银行卡的状态 '''
name = models.CharField(max_length=16, verbose_name='名称')
remark = models.TextField(blank=True, verbose_name='备注')
def __str__(self):
return self.name
class CardOperateType(models.Model):
''' 银行卡的操作类型 '''
name = models.CharField(max_length=16, verbose_name='名称')
remark = models.TextField(blank=True, verbose_name='备注')
def __str__(self):
return self.name
class Card(models.Model):
''' 银行卡 '''
balance = models.IntegerField(verbose_name='余额', default=0)
balance_available = models.IntegerField(verbose_name='可用金额', default=0)
balance_freeze = models.IntegerField(verbose_name='冻结金额', default=0)
status = models.ForeignKey(
'CardStatus',
on_delete=models.CASCADE,
verbose_name='状态',
)
def __str__(self):
return '{card_id} - {balance}'.format(
card_id=self.id,
balance=self.balance,
)
def name(self):
return self.cardinfo.name
name.short_description = '姓名'
def to_json(self):
info = {
'id': self.id,
'balance': self.balance,
'balance_available': self.balance_available,
'balance_freeze': self.balance_freeze,
'status': self.status_id,
}
return info
class CardInfo(models.Model):
''' 用户信息 '''
name = models.CharField(max_length=64, verbose_name='姓名')
phone = models.CharField(max_length=64, verbose_name='电话', blank=True)
email = models.EmailField(blank=True)
card = models.OneToOneField(
'Card',
on_delete=models.DO_NOTHING,
)
def __str__(self):
return self.name
class CardHistory(models.Model):
''' 银行卡的流水帐 '''
time = models.DateTimeField(auto_now_add=True, verbose_name='时间')
remark = models.TextField(verbose_name='说明')
card = models.ForeignKey(
'Card',
on_delete=models.DO_NOTHING,
verbose_name='银行卡',
)
operate = models.ForeignKey(
'CardOperateType',
on_delete=models.DO_NOTHING,
verbose_name='操作类型',
)
def __str__(self):
return '{time} - {card_id} - {operator}'.format(
time=self.time.isoformat(),
card_id=self.card.id,
operator=self.operator_type.name,
)
| 25.42 | 75 | 0.585759 |
5eada3b78019b21c3769283316e88b6391d47b19 | 20,098 | py | Python | qiskit_nature/algorithms/pes_samplers/extrapolator.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
] | 1 | 2022-02-15T08:34:51.000Z | 2022-02-15T08:34:51.000Z | qiskit_nature/algorithms/pes_samplers/extrapolator.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
] | null | null | null | qiskit_nature/algorithms/pes_samplers/extrapolator.py | jschuhmac/qiskit-nature | b8b1181d951cf8fa76fe0db9e5ea192dad5fb186 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""An implementation to extrapolate variational parameters."""
from abc import ABC, abstractmethod
from typing import Optional, List, Dict, Union, cast, Iterable
import numpy as np
from sklearn import linear_model
from sklearn.decomposition import PCA, KernelPCA
from qiskit_nature.exceptions import QiskitNatureError
class Extrapolator(ABC):
"""
This class is based on performing extrapolation of parameters of a wavefunction for a
variational algorithm defined in the ansatzes as part of the Qiskit module.
This concept is based on fitting a set of (point,parameter) data to some specified
function and predicting the optimal variational parameters for the next point. This
technique is aimed towards providing a better starting point for the variational algorithm,
in addition to bootstrapping techniques, ultimately reducing the total number of function
evaluations.
Each instance of an Extrapolator requires a dictionary where each item consist of a point
(key) and a list of variational parameters (value) for that given point. In practice, a Sampler
Class can utilize the Extrapolator as a wrapper. The Extrapolator class then extracts optimal
variational parameters from the previous points for use in extrapolation. For instance, one can
utilize the Extrapolator to accelerate the computation of the Born-Oppenheimer Potential Energy
Surface (BOPES) for a given molecule. In this case, each point can represent the interatomic
distance and the list of parameters represent rotational parameters in a quantum circuit,
in the context of computing the bond dissociation profile for a diatomic molecule.
NOTE: However this is not a requirement - once an instance of the Extrapolator class is created,
extrapolation can proceed by specifying the point(s) of interest and the dictionary of
(point, parameter) pairs for a problem.
There are two types of Extrapolators: external/wrapper and internal.
The external/wrapper extrapolator specifies the number of previous points or data window
within which to perform the extrapolation as well as the dimensionality/space to
perform the extrapolation. For instance, one can utilize the PCA Extrapolator as an external
extrapolator that sets the data window and transforms the variational parameters in PCA space
before the actual extrapolation is executed. The internal extrapolator can then proceed via
linear regression/spline fitting of variational parameters to predict a parameter set.
"""
@abstractmethod
def extrapolate(
self, points: List[float], param_dict: Dict[float, List[float]]
) -> Dict[float, List[float]]:
"""
Abstract method to extrapolate point(s) of interest.
Args:
points: List of point(s) to be used for extrapolation. Can represent
some degree of freedom, ex, interatomic distance.
param_dict: Dictionary of variational parameters. Each key is the point
and the value is a list of the variational parameters.
Returns:
Dictionary of variational parameters for extrapolated point(s).
"""
raise NotImplementedError()
@staticmethod
def factory(mode: str, **kwargs) -> "Extrapolator":
"""
Factory method for constructing extrapolators.
Args:
mode: Extrapolator to instantiate. Can be one of
- 'window'
- 'poly'
- 'diff_model'
- 'pca'
- 'l1'
**kwargs: arguments to be passed to the constructor of an extrapolator
Returns:
A newly created extrapolator instance.
Raises:
QiskitNatureError: if specified mode is unknown.
"""
if mode == "window":
return WindowExtrapolator(**kwargs)
elif mode == "poly":
return PolynomialExtrapolator(**kwargs)
elif mode == "diff_model":
return DifferentialExtrapolator(**kwargs)
elif mode == "pca":
return PCAExtrapolator(**kwargs)
elif mode == "l1":
return SieveExtrapolator(**kwargs)
else:
raise QiskitNatureError(f"No extrapolator called {mode}")
class PolynomialExtrapolator(Extrapolator):
"""
An extrapolator based on fitting each parameter to a polynomial function of a user-specified
degree.
WARNING: Should only be used with window. Using no window includes points after
the point being extrapolated in the data window.
"""
def __init__(self, degree: int = 1) -> None:
"""
Constructor.
Args:
degree: Degree of polynomial to use for fitting in extrapolation.
"""
self._degree = degree
def extrapolate(
self, points: List[float], param_dict: Optional[Dict[float, List[float]]]
) -> Dict[float, List[float]]:
"""
Extrapolate at specified point of interest given a set of variational parameters.
Extrapolation is based on a polynomial function/spline fitting with a user-specified
degree.
Args:
points: List of point(s) to be used for extrapolation. Can represent
some degree of freedom, ex, interatomic distance.
param_dict: Dictionary of variational parameters. Each key is the point
and the value is a list of the variational parameters.
Returns:
Dictionary of variational parameters for extrapolated point(s).
"""
param_arr = np.transpose(list(param_dict.values()))
data_points = list(param_dict.keys())
ret_param_arr = []
for params in param_arr:
coefficients = np.polyfit(data_points, params, deg=self._degree)
poly = np.poly1d(coefficients)
ret_param_arr += [poly(points)]
ret_param_arr = np.transpose(ret_param_arr).tolist()
ret_params: Dict[float, List[float]] = dict(
zip(points, cast(Iterable[List[float]], ret_param_arr))
)
return ret_params
class DifferentialExtrapolator(Extrapolator):
"""
An extrapolator based on treating each param set as a point in space, and fitting a
Hamiltonian which evolves each point to the next. The user specifies the type of regression
model to perform fitting, and a degree which adds derivatives to the values in the point
vector; serving as features for the regression model.
WARNING: Should only be used with window. Using no window includes points after the
point being extrapolated in the data window.
"""
def __init__(
self,
degree: int = 1,
model: Optional[
Union[
linear_model.LinearRegression,
linear_model.Ridge,
linear_model.RidgeCV,
linear_model.SGDRegressor,
]
] = None,
) -> None:
"""
Constructor.
Args:
model: Regression model (from sklearn) to be used for fitting
variational parameters. Currently supports the following models:
LinearRegression(), Ridge(), RidgeCV(), and SGDRegressor().
degree: Specifies (degree -1) derivatives to be added as
'features' in regression model.
"""
self._degree = degree
self._model = model or linear_model.LinearRegression()
def extrapolate(
self, points: List[float], param_dict: Optional[Dict[float, List[float]]]
) -> Dict[float, List[float]]:
"""
Extrapolate at specified point of interest given a set of variational parameters.
Each parameter list and list of numerical gradients is treated as a single point
in vector space. The regression model tries to fit a Hamiltonian that describes
the evolution from one parameter set (and its gradient features) at point r,
to another parameter set at point, r + epsilon. The regression model is then
used to predict the parameter set at the point of interest. Note that this
extrapolation technique does not explicitly use the spacing of the points
(step size) but rather infers it from the list of parameter values.
Args:
points: List of point(s) to be used for extrapolation. Can represent
some degree of freedom, ex, interatomic distance.
param_dict: Dictionary of variational parameters. Each key is the point
and the value is a list of the variational parameters.
Returns:
Dictionary of variational parameters for extrapolated point(s).
"""
response = list(param_dict.values())[1:]
features = [list(param_dict.values())]
for i in range(self._degree - 1):
grad = np.gradient(features[i], axis=0)
features.append(list(grad))
features = cast(List[List[List[float]]], np.concatenate(features, axis=1))
self._model.fit(features[:-1], response)
next_params = np.asarray(self._model.predict([features[-1]])[0].tolist())
ret_params = {point: next_params for point in points}
return cast(Dict[float, List[float]], ret_params)
class WindowExtrapolator(Extrapolator):
"""
An extrapolator which wraps another extrapolator, limiting the internal extrapolator's
ground truth parameter set to a fixed window size.
"""
def __init__(
self,
extrapolator: Union[PolynomialExtrapolator, DifferentialExtrapolator] = None,
window: int = 2,
) -> None:
"""
Constructor.
Args:
extrapolator: 'internal' extrapolator that performs extrapolation on
variational parameters based on data window
window: Number of previous points to use for extrapolation. A value of zero
indicates that all previous points will be used for bootstrapping.
"""
self._extrapolator = extrapolator
self._window = window
def extrapolate(
self, points: List[float], param_dict: Optional[Dict[float, List[float]]]
) -> Dict[float, List[float]]:
"""
Extrapolate at specified point of interest given a set of variational parameters.
Based on the specified window, a subset of the data points will be used for
extrapolation. A default window of 2 points is used, while a value of zero indicates
that all previous points will be used for extrapolation. This method defines the
data window before performing the internal extrapolation.
Args:
points: List of point(s) to be used for extrapolation. Can represent
some degree of freedom, ex, interatomic distance.
param_dict: Dictionary of variational parameters. Each key is the point
and the value is a list of the variational parameters.
Returns:
Dictionary of variational parameters for extrapolated point(s).
"""
ret_params = {}
sorted_points = sorted(points)
reference_points = [pt for pt in sorted(param_dict.keys()) if pt < max(sorted_points)]
for bottom_index, bottom in enumerate(reference_points):
if bottom_index < len(reference_points) - 1:
top = reference_points[bottom_index + 1]
else:
top = float("inf")
extrapolation_group = [pt for pt in sorted_points if bottom < pt <= top]
window_points = [pt for pt in reference_points if pt <= bottom]
if len(window_points) > self._window:
window_points = window_points[-self._window :]
window_param_dict = {pt: param_dict[pt] for pt in window_points}
if extrapolation_group:
ret_params.update(
self._extrapolator.extrapolate(
extrapolation_group, param_dict=window_param_dict
)
)
return ret_params
@property
def extrapolator(self) -> Extrapolator:
"""Returns the internal extrapolator.
Returns:
The internal extrapolator.
"""
return self._extrapolator
@extrapolator.setter
def extrapolator(
self, extrapolator: Union[PolynomialExtrapolator, DifferentialExtrapolator]
) -> None:
"""Sets the internal extrapolator.
Args:
extrapolator: The internal extrapolator to set.
"""
self._extrapolator = extrapolator
@property
def window(self) -> int:
"""Returns the size of the window.
Returns:
The size of the window.
"""
return self._window
@window.setter
def window(self, window: int) -> None:
"""Set the size of the window
Args:
window: the size of the window to set.
"""
self._window = window
class PCAExtrapolator(Extrapolator):
"""
A wrapper extrapolator which reduces the points' dimensionality with PCA,
performs extrapolation in the transformed pca space, and inverse transforms the
results before returning.
A user specifies the kernel within how the PCA transformation should be done.
"""
def __init__(
self,
extrapolator: Optional[Union[PolynomialExtrapolator, DifferentialExtrapolator]] = None,
kernel: Optional[str] = None,
window: int = 2,
) -> None:
"""
Constructor.
Args:
extrapolator: 'internal' extrapolator that performs extrapolation on
variational parameters based on data window.
kernel: Kernel (from sklearn) that specifies how dimensionality
reduction should be done for PCA. Default value is None, and switches
the extrapolation to standard PCA.
window: Number of previous points to use for extrapolation.
Raises:
QiskitNatureError: if kernel is not defined in sklearn module.
"""
self._extrapolator = WindowExtrapolator(extrapolator=extrapolator, window=window)
self._kernel = kernel
if self._kernel is None:
self._pca_model = PCA()
elif self._kernel in ["linear", "poly", "rbf", "sigmoid", "cosine"]:
self._pca_model = KernelPCA(kernel=self._kernel, fit_inverse_transform=True)
else:
raise QiskitNatureError(f"PCA kernel type {self._kernel} not found")
def extrapolate(
self, points: List[float], param_dict: Optional[Dict[float, List[float]]]
) -> Dict[float, List[float]]:
"""
Extrapolate at specified point of interest given a set of variational parameters.
This method transforms the parameters in PCA space before performing the internal
extrapolation. The parameters are transformed back to regular space after extrapolation.
Args:
points: List of point(s) to be used for extrapolation. Can represent
some degree of freedom, ex, interatomic distance.
param_dict: Dictionary of variational parameters. Each key is the point
and the value is a list of the variational parameters.
Returns:
Dictionary of variational parameters for extrapolated point(s).
"""
# run pca fitting and extrapolate in pca space
self._pca_model.fit(list(param_dict.values()))
updated_params = {
pt: self._pca_model.transform([param_dict[pt]])[0] for pt in list(param_dict.keys())
}
output_params = self._extrapolator.extrapolate(points, param_dict=updated_params)
ret_params = {
point: self._pca_model.inverse_transform(param) if not param else []
for (point, param) in output_params.items()
}
return ret_params
class SieveExtrapolator(Extrapolator):
"""
A wrapper extrapolator which clusters the parameter values - either before
extrapolation, after, or both - into two large and small clusters, and sets the
small clusters' parameters to zero.
"""
def __init__(
self,
extrapolator: Optional[Union[PolynomialExtrapolator, DifferentialExtrapolator]] = None,
window: int = 2,
filter_before: bool = True,
filter_after: bool = True,
) -> None:
"""
Constructor.
Args:
extrapolator: 'internal' extrapolator that performs extrapolation on
variational parameters based on data window.
window: Number of previous points to use for extrapolation.
filter_before: Keyword to perform clustering before extrapolation.
filter_after: Keyword to perform clustering after extrapolation.
"""
self._extrapolator = WindowExtrapolator(extrapolator=extrapolator, window=window)
self._filter_before = filter_before
self._filter_after = filter_after
def extrapolate(
self, points: List[float], param_dict: Optional[Dict[float, List[float]]]
) -> Dict[float, List[float]]:
"""
Extrapolate at specified point of interest given a set of variational parameters.
Based on the specified window, a subset of the data points will be used for
extrapolation. A default window of 2 points is used, while a value of zero indicates
that all previous points will be used for extrapolation. This method finds a cutoff distance
based on the maximum average distance or 'gap' between the average values of the variational
parameters. This cutoff distance is used as a criteria to divide the parameters into two
clusters by setting all parameters that are below the cutoff distance to zero.
Args:
points: List of point(s) to be used for extrapolation. Can represent
some degree of freedom, ex, interatomic distance.
param_dict: Dictionary of variational parameters. Each key is the point
and the value is a list of the variational parameters.
Returns:
Dictionary of variational parameters for extrapolated point(s).
"""
# determine clustering cutoff
param_arr = np.transpose(list(param_dict.values()))
param_averages = np.array(sorted(np.average(np.log10(np.abs(param_arr)), axis=0)))
gaps = param_averages[1:] - param_averages[:-1]
max_gap = int(np.argmax(gaps))
sieve_cutoff = 10 ** np.average([param_averages[max_gap], param_averages[max_gap + 1]])
if self._filter_before:
filtered_dict = {
point: list(map(lambda x: x if np.abs(x) > sieve_cutoff else 0, param))
for (point, param) in param_dict.items()
}
output_params = self._extrapolator.extrapolate(points, param_dict=filtered_dict)
else:
output_params = self._extrapolator.extrapolate(points, param_dict=param_dict)
if self._filter_after:
ret_params = cast(
Dict[float, List[float]],
{
point: np.asarray(
list(map(lambda x: x if np.abs(x) > sieve_cutoff else 0, param))
)
for (point, param) in output_params.items()
},
)
else:
ret_params = cast(Dict[float, List[float]], np.asarray(output_params))
return ret_params
| 41.870833 | 100 | 0.651657 |
b9287c19264f8eb0d52f2af07d433453731a65b8 | 720 | py | Python | idact/detail/log/get_logger.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 5 | 2018-12-06T15:40:34.000Z | 2019-06-19T11:22:58.000Z | idact/detail/log/get_logger.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 9 | 2018-12-06T16:35:26.000Z | 2019-04-28T19:01:40.000Z | idact/detail/log/get_logger.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 2 | 2019-04-28T19:18:58.000Z | 2019-06-17T06:56:28.000Z | """This module contains functions for getting a logger from a global provider.
"""
import logging
from idact.detail.log.logger_provider import LoggerProvider
def get_logger(name: str) -> logging.Logger:
"""Returns a logger with the proper logging level set.
See :class:`.LoggerProvider`.
:param name: Logger name, e.g. `__name__` of the caller.
"""
return LoggerProvider().get_logger(name=name)
def get_debug_logger(name: str) -> logging.Logger:
"""Returns a logger that will log everything with DEBUG level.
See :class:`.LoggerProvider`.
:param name: Logger name, e.g. `__name__` of the caller.
"""
return LoggerProvider().get_debug_logger(name=name)
| 24.827586 | 78 | 0.691667 |
954fec0cc73f57b10e8f4be47ef469f3ce0e3355 | 58 | py | Python | source/documentModel/comparators/__init__.py | vilaras/PyINSECT | 4cc20659c47bd5f9c394d175cb041b729f6eb132 | [
"Apache-2.0"
] | 178 | 2016-09-21T19:51:28.000Z | 2021-09-07T17:37:06.000Z | source/documentModel/comparators/__init__.py | vilaras/PyINSECT | 4cc20659c47bd5f9c394d175cb041b729f6eb132 | [
"Apache-2.0"
] | 2 | 2021-05-27T19:47:25.000Z | 2021-05-28T17:11:23.000Z | source/documentModel/comparators/__init__.py | vilaras/PyINSECT | 4cc20659c47bd5f9c394d175cb041b729f6eb132 | [
"Apache-2.0"
] | 17 | 2016-10-21T02:11:13.000Z | 2020-10-07T19:11:54.000Z | from NGramGraphSimilarity import *
from Operator import *
| 19.333333 | 34 | 0.827586 |
7b7b6ed75f7330cdf32559780f7e4df7340e92c9 | 237 | py | Python | aws-boto3-demos/ec2/2_ec2_monitor_off.py | prafulpatel16/python-practice | 9a66f83e5d846c22c117faa4e0f1e8882de2193d | [
"Apache-2.0"
] | null | null | null | aws-boto3-demos/ec2/2_ec2_monitor_off.py | prafulpatel16/python-practice | 9a66f83e5d846c22c117faa4e0f1e8882de2193d | [
"Apache-2.0"
] | null | null | null | aws-boto3-demos/ec2/2_ec2_monitor_off.py | prafulpatel16/python-practice | 9a66f83e5d846c22c117faa4e0f1e8882de2193d | [
"Apache-2.0"
] | null | null | null | import sys
import boto3
ec2 = boto3.client('ec2')
if sys.argv[1] == 'ON':
response = ec2.monitor_instance(InstaceIds=['INSTANCE_ID'])
else:
response = ec2.monitor_instance(InstaceIds=['INSTANCE_ID'])
print (response)
| 21.545455 | 64 | 0.683544 |
bb60d549d217d51d21987bb3d61f405e47ec394b | 1,099 | py | Python | day_11/day11_test.py | mickeelm/aoc2019 | 7fd532d2237e1cf0686c9b331a2b97515ee94c03 | [
"Unlicense"
] | 1 | 2021-02-02T08:32:36.000Z | 2021-02-02T08:32:36.000Z | day_11/day11_test.py | mickeelm/aoc2019 | 7fd532d2237e1cf0686c9b331a2b97515ee94c03 | [
"Unlicense"
] | null | null | null | day_11/day11_test.py | mickeelm/aoc2019 | 7fd532d2237e1cf0686c9b331a2b97515ee94c03 | [
"Unlicense"
] | null | null | null | from day_11.day11 import Panel, next_panel, Direction, rotate, count_painted_panels, print_panels
def test_next_panel():
assert next_panel(Panel(0, 0), Direction.UP) == (0, 1)
assert next_panel(Panel(0, 0), Direction.RIGHT) == (1, 0)
assert next_panel(Panel(0, 0), Direction.DOWN) == (0, -1)
assert next_panel(Panel(0, 0), Direction.LEFT) == (-1, 0)
def test_rotate():
assert rotate(Direction.UP, 0) == Direction.LEFT
assert rotate(Direction.UP, 1) == Direction.RIGHT
assert rotate(Direction.RIGHT, 0) == Direction.UP
assert rotate(Direction.RIGHT, 1) == Direction.DOWN
assert rotate(Direction.DOWN, 0) == Direction.RIGHT
assert rotate(Direction.DOWN, 1) == Direction.LEFT
assert rotate(Direction.LEFT, 0) == Direction.DOWN
assert rotate(Direction.LEFT, 1) == Direction.UP
def test_part_1():
with open('input') as f:
assert count_painted_panels([int(instr) for instr in f.readline().split(',')]) == 1732
def test_part_2():
with open('input') as f:
assert print_panels([int(instr) for instr in f.readline().split(',')])
| 36.633333 | 97 | 0.678799 |
a221ca36be25977300b21b6f9de2b6f4b00897dc | 1,124 | py | Python | src/shijing.py | hippieZhou/chinese-poetry-db | 8a1b111a1bdfc9b08a5447975c1fe01076e75d8c | [
"MIT"
] | 28 | 2019-07-18T09:47:05.000Z | 2022-03-31T05:26:19.000Z | src/shijing.py | hippieZhou/chinese-poetry-db | 8a1b111a1bdfc9b08a5447975c1fe01076e75d8c | [
"MIT"
] | 1 | 2019-05-03T02:54:21.000Z | 2019-05-03T02:54:21.000Z | src/shijing.py | hippieZhou/chinese-poetry-db | 8a1b111a1bdfc9b08a5447975c1fe01076e75d8c | [
"MIT"
] | 4 | 2019-07-25T04:44:10.000Z | 2021-06-20T11:28:25.000Z | """
类别:诗经
"""
import sqlite3
import os
import json
def make_db(db, path):
sql = '''
CREATE TABLE IF NOT EXISTS "shijing" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"title" text,
"chapter" text,
"section" TEXT,
"content" TEXT
);
'''
print('\r\n诗经 正在初始化...')
try:
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute(sql)
conn.commit()
shijing_data = os.path.join(path, 'shijing.json')
if os.path.exists(shijing_data) is None:
print('诗经 数据文件不存在')
return
print('\t', shijing_data)
with open(shijing_data, 'r', encoding='UTF-8') as f:
data_dict = json.load(f)
items = [(str(item['title']), str(item['chapter']), str(item['section']), str(item['content']))
for item in data_dict]
cur.executemany(
"insert into shijing(title, chapter, section, content) values (?,?,?,?)", items)
conn.commit()
print('诗经 数据处理完毕.')
except Exception as e:
print(e)
conn.rollback()
finally:
conn.close()
| 26.139535 | 107 | 0.549822 |
f46c45b91e59e9781844981c3d0e7ad02019679c | 35,435 | py | Python | lifetimes/plotting.py | DaniGate/lifetimes | 514d69fbc1cb721f560489d917e37a2736508109 | [
"MIT"
] | null | null | null | lifetimes/plotting.py | DaniGate/lifetimes | 514d69fbc1cb721f560489d917e37a2736508109 | [
"MIT"
] | null | null | null | lifetimes/plotting.py | DaniGate/lifetimes | 514d69fbc1cb721f560489d917e37a2736508109 | [
"MIT"
] | 1 | 2018-04-16T14:27:31.000Z | 2018-04-16T14:27:31.000Z | import sys
import os
import numpy as np
import pandas as pd
import datetime as dt
from scipy import stats
from lifetimes.utils import calculate_alive_path, expected_cumulative_transactions
PROJECTSPATH = os.environ['PROJECTSPATH']
sys.path.insert(0, os.path.join(PROJECTSPATH,'ops-scripts'))
from common.settings import settings
glovo_green = settings['style']['colors']['glovo_green']
glovo_yellow = settings['style']['colors']['glovo_yellow']
glovo_gray = settings['style']['colors']['glovo_gray']
__all__ = [
'plot_period_transactions',
'plot_calibration_purchases_vs_holdout_purchases',
'plot_frequency_recency_matrix',
'plot_probability_alive_matrix',
'plot_expected_repeat_purchases',
'plot_history_alive',
'plot_cumulative_transactions',
'plot_incremental_transactions',
'plot_transaction_rate_heterogeneity',
'plot_dropout_rate_heterogeneity'
]
def coalesce(*args):
return next(s for s in args if s is not None)
def plot_period_transactions(model,
max_frequency=7,
title='Frequency of Repeat Transactions',
xlabel='Number of Calibration Period Transactions',
ylabel='Customers',
**kwargs):
"""
Plot a figure with period actual and predicted transactions.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
labels = kwargs.pop('label', ['Actual', 'Model'])
n = model.data.shape[0]
simulated_data = model.generate_new_data(size=n)
model_counts = pd.DataFrame(model.data['frequency'].value_counts().sort_index().iloc[:max_frequency])
simulated_counts = pd.DataFrame(simulated_data['frequency'].value_counts().sort_index().iloc[:max_frequency])
combined_counts = model_counts.merge(simulated_counts, how='outer', left_index=True, right_index=True).fillna(0)
combined_counts.columns = labels
ax = combined_counts.plot(kind='bar', **kwargs)
plt.legend()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
return ax
def plot_calibration_purchases_vs_holdout_purchases(model,
calibration_holdout_matrix,
kind="frequency_cal",
n=7,
**kwargs):
"""
Plot calibration purchases vs holdout.
This currently relies too much on the lifetimes.util calibration_and_holdout_data function.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
calibration_holdout_matrix: pandas DataFrame
Dataframe from calibration_and_holdout_data function.
kind: str, optional
x-axis :"frequency_cal". Purchases in calibration period,
"recency_cal". Age of customer at last purchase,
"T_cal". Age of customer at the end of calibration period,
"time_since_last_purchase". Time since user made last purchase
n: int, optional
Number of ticks on the x axis
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
x_labels = {
"frequency_cal": "Purchases in calibration period",
"recency_cal": "Age of customer at last purchase",
"T_cal": "Age of customer at the end of calibration period",
"time_since_last_purchase": "Time since user made last purchase"
}
summary = calibration_holdout_matrix.copy()
duration_holdout = summary.iloc[0]['duration_holdout']
summary['model_predictions'] = summary.apply(lambda r: model.conditional_expected_number_of_purchases_up_to_time(duration_holdout, r['frequency_cal'], r['recency_cal'], r['T_cal']), axis=1)
if kind == "time_since_last_purchase":
summary["time_since_last_purchase"] = summary["T_cal"] - summary["recency_cal"]
ax = summary.groupby(["time_since_last_purchase"])[['frequency_holdout', 'model_predictions']].mean().iloc[:n].plot(**kwargs)
else:
ax = summary.groupby(kind)[['frequency_holdout', 'model_predictions']].mean().iloc[:n].plot(**kwargs)
plt.title('Actual Purchases in Holdout Period vs Predicted Purchases')
plt.xlabel(x_labels[kind])
plt.ylabel('Average of Purchases in Holdout Period')
plt.legend()
return ax
def plot_frequency_recency_population(summary_data,
max_frequency=None,
max_recency=None,
title=None,
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
ax=None,
**kwargs):
"""
Plot distribution of customers in a recency frequecy matrix as heatmap.
Parameters
----------
summary_data: RF dataframe
Dataframe containing recency-frequency feature vectors for all users.
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the customer.
Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if max_frequency is None:
max_frequency = int(summary_data['frequency_cal'].max())
if max_recency is None:
max_recency = int(summary_data['T_cal'].max())
population_matrix = summary_data.groupby(['frequency_cal','recency_cal']
)['T_cal'].count(
).reset_index(
).rename(columns={'T_cal':'num_customers'})
population_matrix['num_customers'] = np.log10(population_matrix['num_customers'])
Z = pd.merge(pd.DataFrame(np.transpose([list(range(max_recency)), [1]*max_recency]),
columns=['recency_cal','dummy']),
pd.DataFrame(np.transpose([list(range(max_frequency)), [1]*max_frequency]),
columns=['frequency_cal','dummy']),
on='dummy')
Z.drop('dummy',1,inplace=True)
Z = pd.merge(Z, population_matrix, on=['recency_cal','frequency_cal'], how='left')
# Z.fillna(0,inplace=True)
# Z.loc[(Z['frequency_cal']==0)&(Z['recency_cal']==0),'num_customers'] = 0
interpolation = kwargs.pop('interpolation', 'none')
if ax is None:
ax = plt.subplot(111)
PCM = ax.imshow(Z.pivot(index='recency_cal',
columns='frequency_cal',
values='num_customers').values,
interpolation=interpolation, **kwargs)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if title is None:
title = 'Number of Customers (log)\nby Frequency and Recency'
ax.set_title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
cb = plt.colorbar(PCM, ax=ax)
cb.set_ticklabels([str(round(np.power(10,x),1)) for x in cb.get_ticks()])
cb.set_label('Number of customers')
return ax
def plot_frequency_recency_matrix(model,
T=1,
max_frequency=None,
max_recency=None,
title=None,
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
ax=None,
**kwargs):
"""
Plot recency frequecy matrix as heatmap.
Plot a figure of expected transactions in T next units of time
by a customer's frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
T: fload, optional
Next units of time to make predictions for
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the
customer. Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if max_frequency is None:
max_frequency = int(model.data['frequency'].max())
if max_recency is None:
max_recency = int(model.data['T'].max())
Z = np.zeros((max_recency + 1, max_frequency + 1))
for i, recency in enumerate(np.arange(max_recency + 1)):
for j, frequency in enumerate(np.arange(max_frequency + 1)):
Z[i, j] = model.conditional_expected_number_of_purchases_up_to_time(
T, frequency, recency, max_recency)
interpolation = kwargs.pop('interpolation', 'none')
if ax is None:
ax = plt.subplot(111)
PCM = ax.imshow(Z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
title = 'Expected Number of Future Purchases for {} Unit{} of Time,'. \
format(T, "s"[T == 1:]) + '\nby Frequency and Recency of a Customer'
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
plt.colorbar(PCM, ax=ax)
return ax
def plot_rfm_matrix(model,
rfm_data,
horizon=1,
T=None,
revenue_type='gross',
max_frequency=None,
max_recency=None,
title=None,
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
ax=None,
log=True,
**kwargs):
"""
Plot recency frequecy matrix as heatmap, with color indicating RLTV
per customer.
Plot a figure of expected transactions in T next units of time
by a customer's frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
horizon: float, optional
Next units of time to make predictions for
T: integer
age of the user
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the
customer. Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if max_frequency is None:
max_frequency = int(model.data['frequency'].max())
if max_recency is None:
max_recency = int(model.data['T'].max())
mean_monetary_value = rfm_data.groupby(['frequency_cal','recency_cal']
)['monetary_value_cal'
].mean(
).reset_index()
mean_margin = rfm_data.groupby(['frequency_cal','recency_cal']
)['margin_cal'
].mean(
).reset_index()
Z = np.zeros((max_recency + 1, max_frequency + 1))
if T is None:
T = max_recency
for i, recency in enumerate(np.arange(max_recency + 1)):
for j, frequency in enumerate(np.arange(max_frequency + 1)):
exp_purchases = model.conditional_expected_number_of_purchases_up_to_time(
horizon, frequency, recency, T)
money = mean_monetary_value[
(mean_monetary_value['frequency_cal']==frequency)&
(mean_monetary_value['recency_cal']==recency)
]['monetary_value_cal']
if not money.empty:
money = money.values[0]
else:
money = 0.
margin = mean_margin[
(mean_margin['frequency_cal']==frequency)&
(mean_margin['recency_cal']==recency)
]['margin_cal']
if not margin.empty:
margin = margin.values[0]
else:
margin = 0.
Z[i, j] = exp_purchases * money
if revenue_type == 'net':
Z[i, j] *= 0.01 * margin
if log:
if Z[i, j] > 0.01:
Z[i, j] = np.log10(Z[i, j])
else:
Z[i, j] = None
elif Z[i, j] <= 0.01:
Z[i, j] = None
interpolation = kwargs.pop('interpolation', 'none')
if ax is None:
ax = plt.subplot(111)
PCM = ax.imshow(Z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
revenue_type = revenue_type[0].upper()+revenue_type[1:].lower()
title = 'Expected {} Revenue for {} Unit{} of Time,'. \
format(revenue_type, horizon, "s"[horizon == 1:]) + '\nby Frequency and Recency of a Customer'
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
cb = plt.colorbar(PCM, ax=ax)
cb.set_ticklabels([str(round(np.power(10,x),1)) for x in cb.get_ticks()])
cb.set_label('Customer RLTV @ 3 months')
plt.show()
return ax, Z
def plot_integral_rfm_matrix(model,
rfm_data,
horizon=1,
revenue_type='gross',
max_frequency=None,
max_recency=None,
title=None,
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
ax=None,
log=True,
**kwargs):
"""
Plot recency frequecy matrix as heatmap, for all ages according to
the distribution in our user base and color indicating total RLTV
at the provided horizon.
Plot a figure of expected transactions in T next units of time
by a customer's frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
horizon: float, optional
Next units of time to make predictions for
T: integer
age of the user
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the
customer. Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if max_frequency is None:
max_frequency = int(model.data['frequency'].max())
if max_recency is None:
max_recency = int(model.data['T'].max())
mean_monetary_value = rfm_data.groupby(['frequency_cal','recency_cal']
)['monetary_value_cal'
].mean(
).reset_index()
mean_margin = rfm_data.groupby(['frequency_cal','recency_cal']
)['margin_cal'
].mean(
).reset_index()
age_count = rfm_data.reset_index(
).groupby(['frequency_cal','recency_cal','T_cal']
)['id'
].count(
).reset_index()
N_total = age_count['id'].sum()
Z = np.zeros((max_recency + 1, max_frequency + 1))
for i, recency in enumerate(np.arange(max_recency + 1)):
for j, frequency in enumerate(np.arange(max_frequency + 1)):
exp_purchases = 0.
partial_age_count = age_count[
(age_count['frequency_cal']==frequency)&
(age_count['recency_cal']==recency)
]
for T,N in partial_age_count[['T_cal','id']].values:
exp_purchases += N * model.conditional_expected_number_of_purchases_up_to_time(
horizon, frequency, recency, T)
# exp_purchases /= (N_total+0.)
money = mean_monetary_value[
(mean_monetary_value['frequency_cal']==frequency)&
(mean_monetary_value['recency_cal']==recency)
]['monetary_value_cal']
if not money.empty:
money = money.values[0]
else:
money = 0.
margin = mean_margin[
(mean_margin['frequency_cal']==frequency)&
(mean_margin['recency_cal']==recency)
]['margin_cal']
if not margin.empty:
margin = margin.values[0]
else:
margin = 0.
Z[i, j] = exp_purchases * money
if revenue_type == 'net':
Z[i, j] *= 0.01 * margin
if log:
if Z[i, j] > 0.01:
Z[i, j] = np.log10(Z[i, j])
else:
Z[i, j] = None
elif Z[i, j] <= 0.01:
Z[i, j] = None
interpolation = kwargs.pop('interpolation', 'none')
if ax is None:
ax = plt.subplot(111)
PCM = ax.imshow(Z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if title is None:
revenue_type = revenue_type[0].upper()+revenue_type[1:].lower()
title = 'Expected {} Revenue for {} Unit{} of Time,'. \
format(revenue_type, horizon, "s"[horizon == 1:]) + '\nby Frequency and Recency of a Customer'
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
cb = plt.colorbar(PCM, ax=ax)
print(cb.get_ticks())
cb.set_ticklabels([str(round(np.power(10,x),1)) for x in cb.get_ticks()])
print(cb.get_ticks())
cb.set_label('Total RLTV @ 3 months')
return ax, Z
def plot_probability_alive_matrix(model,
max_frequency=None,
max_recency=None,
title='Probability Customer is Alive,\nby Frequency and Recency of a Customer',
xlabel="Customer's Historical Frequency",
ylabel="Customer's Recency",
ax=None,
**kwargs):
"""
Plot probability alive matrix as heatmap.
Plot a figure of the probability a customer is alive based on their
frequency and recency.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
T: fload, optional
Next units of time to make predictions for
max_frequency: int, optional
The maximum frequency to plot. Default is max observed frequency.
max_recency: int, optional
The maximum recency to plot. This also determines the age of the
customer. Default to max observed age.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.imshow command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
z = model.conditional_probability_alive_matrix(max_frequency, max_recency)
interpolation = kwargs.pop('interpolation', 'none')
if ax is None:
ax = plt.subplot(111)
PCM = ax.imshow(z, interpolation=interpolation, **kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
# turn matrix into square
forceAspect(ax)
# plot colorbar beside matrix
cb = plt.colorbar(PCM, ax=ax)
cb.set_label('Survival probability')
return ax
def plot_expected_repeat_purchases(model,
title='Expected Number of Repeat Purchases per Customer',
xlabel='Time Since First Purchase',
ax=None,
label=None,
**kwargs):
"""
Plot expected repeat purchases on calibration period .
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ax: matplotlib.AxesSubplot, optional
Using user axes
label: str, optional
Label for plot.
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
if plt.matplotlib.__version__ >= "1.5":
color_cycle = ax._get_lines.prop_cycler
color = coalesce(kwargs.pop('c', None),
kwargs.pop('color', None),
next(color_cycle)['color'])
else:
color_cycle = ax._get_lines.color_cycle
color = coalesce(kwargs.pop('c', None),
kwargs.pop('color', None), next(color_cycle))
max_T = model.data['T'].max()
times = np.linspace(0, max_T, 100)
ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, label=label, **kwargs)
times = np.linspace(max_T, 1.5 * max_T, 100)
ax.plot(times, model.expected_number_of_purchases_up_to_time(times), color=color, ls='--', **kwargs)
plt.title(title)
plt.xlabel(xlabel)
if label is not None:
plt.legend(loc='lower right')
return ax
def plot_history_alive(model, t, transactions, datetime_col, freq='D',
start_date=None, end_date=None, ax=None,
title='Evolution of survival probability', **kwargs):
"""
Draw a graph showing the probablility of being alive for a customer in time.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
t: int
the number of time units since the birth we want to draw the p_alive
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in the transactions that denotes the datetime the purchase
was made
freq: str, optional
Default 'D' for days. Other examples= 'W' for weekly
start_date: datetime, optional
Limit xaxis to start date
ax: matplotlib.AxesSubplot, optional
Using user axes
title: str, optional
Title of the plot.
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
first_transaction_date = transactions[datetime_col].min()
if start_date is None:
start_date = dt.datetime.strptime(first_transaction_date,"%Y-%m-%d") - \
dt.timedelta(days=7)
if ax is None:
ax = plt.subplot(111)
# Get purchasing history of user
customer_history = transactions[[datetime_col]].copy()
customer_history.index = pd.DatetimeIndex(customer_history[datetime_col])
# Add transactions column
customer_history['transactions'] = 1
customer_history = customer_history.resample(freq).sum()
current_date = dt.datetime.strftime(dt.datetime.now(),"%Y-%m-%d")
refresh_date = dt.datetime.strftime(
dt.datetime.strptime(first_transaction_date,"%Y-%m-%d") + dt.timedelta(days=t),
"%Y-%m-%d")
periods_to_plot = (dt.datetime.strptime(end_date,"%Y-%m-%d") \
- start_date
).days
if freq == 'W':
periods_to_plot = int(periods_to_plot/7.)
# plot alive_path
path = pd.concat(
[pd.Series([None]*7),
calculate_alive_path(model, transactions, datetime_col, periods_to_plot, freq)*100.])
path_dates = pd.date_range(start=start_date, periods=len(path), freq=freq)
past_dates_path = path_dates[path_dates<=refresh_date]
past_path = path[path_dates<=refresh_date]
future_dates_path = path_dates[path_dates>=refresh_date]
future_path = path[path_dates>=refresh_date]
plt.plot(past_dates_path, past_path, color=glovo_gray, ls='-', label='P_alive')
plt.plot(future_dates_path, future_path, color=glovo_gray, ls='--')
# plot buying dates
payment_dates = customer_history[customer_history['transactions'] >= 1].index
plt.vlines(payment_dates.values, ymin=0, ymax=100,
colors=glovo_green, linestyles='dashed', label='Orders')
plt.vlines(refresh_date, ymin=0, ymax=100,
colors=glovo_yellow, linestyles='solid', label='Last refresh')
plt.vlines(current_date, ymin=0, ymax=100,
colors='red', linestyles='solid', label='Today')
plt.ylim(0, 105)
plt.yticks(np.arange(0, 110, 10))
plt.xticks(rotation=-20.)
plt.xlim(start_date, path_dates[-1])
plt.legend(loc=3)
plt.ylabel('P_alive (%)')
plt.title(title)
return ax
def plot_cumulative_transactions(model, transactions, datetime_col, customer_id_col, t, t_cal,
datetime_format=None, freq='D', set_index_date=False,
title='Tracking Cumulative Transactions',
xlabel='day', ylabel='Cumulative Transactions',
ax=None, **kwargs):
"""
Plot a figure of the predicted and actual cumulative transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
df_cum_transactions = expected_cumulative_transactions(model, transactions, datetime_col,
customer_id_col, t,
datetime_format=datetime_format, freq=freq,
set_index_date=set_index_date)
ax = df_cum_transactions.plot(ax=ax, title=title, **kwargs)
if set_index_date:
x_vline = df_cum_transactions.index[int(t_cal)]
xlabel = 'date'
else:
x_vline = t_cal
ax.axvline(x=x_vline, color='r', linestyle='--')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_incremental_transactions(model, transactions, datetime_col, customer_id_col, t, t_cal,
datetime_format=None, freq='D', set_index_date=False,
title='Tracking Daily Transactions',
xlabel='day', ylabel='Transactions',
ax=None, **kwargs):
"""
Plot a figure of the predicted and actual cumulative transactions of users.
Parameters
----------
model: lifetimes model
A fitted lifetimes model
transactions: pandas DataFrame
DataFrame containing the transactions history of the customer_id
datetime_col: str
The column in transactions that denotes the datetime the purchase was made.
customer_id_col: str
The column in transactions that denotes the customer_id
t: float
The number of time units since the begining of
data for which we want to calculate cumulative transactions
t_cal: float
A marker used to indicate where the vertical line for plotting should be.
datetime_format: str, optional
A string that represents the timestamp format. Useful if Pandas
can't understand the provided format.
freq: str, optional
Default 'D' for days, 'W' for weeks, 'M' for months... etc.
Full list here:
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#dateoffset-objects
set_index_date: bool, optional
When True set date as Pandas DataFrame index, default False - number of time units
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
ax: matplotlib.AxesSubplot, optional
Using user axes
kwargs
Passed into the pandas.DataFrame.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.subplot(111)
df_cum_transactions = expected_cumulative_transactions(model, transactions, datetime_col,
customer_id_col, t,
datetime_format=datetime_format, freq=freq,
set_index_date=set_index_date)
# get incremental from cumulative transactions
df_cum_transactions = df_cum_transactions.apply(lambda x: x - x.shift(1))
ax = df_cum_transactions.plot(ax=ax, title=title, **kwargs)
if set_index_date:
x_vline = df_cum_transactions.index[int(t_cal)]
xlabel = 'date'
else:
x_vline = t_cal
ax.axvline(x=x_vline, color='r', linestyle='--')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_transaction_rate_heterogeneity(model,
title='Heterogeneity in Transaction Rate',
xlabel='Transaction Rate',
ylabel='Density',
suptitle_fontsize=14,
ax=None,
**kwargs):
"""
Plot the estimated gamma distribution of lambda (customers' propensities to purchase).
Parameters
----------
model: lifetimes model
A fitted lifetimes model, for now only for BG/NBD
suptitle: str, optional
Figure suptitle
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
#TODO Include a text with the PDF with the fitted values
from matplotlib import pyplot as plt
r, alpha = model._unload_params('r', 'alpha')
rate_mean = r / alpha
rate_var = r / alpha ** 2
rv = stats.gamma(r, scale=1 / alpha)
lim = rv.ppf(0.99)
x = np.linspace(0, lim, 100)
if ax is None:
fig, ax = plt.subplots(1)
ax.set_title(title+'\nmean: {:.3f}, var: {:.3f}'.format(rate_mean, rate_var))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.plot(x, rv.pdf(x), **kwargs)
return ax
def plot_dropout_rate_heterogeneity(model,
title='Heterogeneity in Dropout Probability',
xlabel='Dropout Probability p',
ylabel='Density',
suptitle_fontsize=14,
ax=None,
**kwargs):
"""
Plot the estimated gamma distribution of p.
p - (customers' probability of dropping out immediately after a transaction).
Parameters
----------
model: lifetimes model
A fitted lifetimes model, for now only for BG/NBD
suptitle: str, optional
Figure suptitle
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
#TODO Include a text with the PDF with the fitted values
from matplotlib import pyplot as plt
a, b = model._unload_params('a', 'b')
beta_mean = a / (a + b)
beta_var = a * b / ((a + b) ** 2) / (a + b + 1)
rv = stats.beta(a, b)
lim = rv.ppf(0.99)
x = np.linspace(0, lim, 100)
if ax is None:
fig, ax = plt.subplots(1)
ax.set_title(title+'\nmean: {:.3f}, var: {:.3f}'.format(beta_mean, beta_var))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.plot(x, rv.pdf(x), **kwargs)
return ax
def forceAspect(ax, aspect=1):
im = ax.get_images()
extent = im[0].get_extent()
ax.set_aspect(abs((extent[1] - extent[0]) / (extent[3] - extent[2])) / aspect)
| 35.15377 | 193 | 0.584366 |
39ccef52633c9a1ecf36491a4baa0431b35496bf | 24,427 | py | Python | src/containerapp/azext_containerapp/_utils.py | zoharHenMicrosoft/azure-cli-extensions | 4b9b8d19d029f2f736c54b263e8a9f7894499dc3 | [
"MIT"
] | 1 | 2022-01-25T07:33:18.000Z | 2022-01-25T07:33:18.000Z | src/containerapp/azext_containerapp/_utils.py | zoharHenMicrosoft/azure-cli-extensions | 4b9b8d19d029f2f736c54b263e8a9f7894499dc3 | [
"MIT"
] | null | null | null | src/containerapp/azext_containerapp/_utils.py | zoharHenMicrosoft/azure-cli-extensions | 4b9b8d19d029f2f736c54b263e8a9f7894499dc3 | [
"MIT"
] | 1 | 2022-03-10T22:13:02.000Z | 2022-03-10T22:13:02.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument
from urllib.parse import urlparse
from azure.cli.command_modules.appservice.custom import (_get_acr_cred)
from azure.cli.core.azclierror import (ValidationError, RequiredArgumentMissingError)
from azure.cli.core.commands.client_factory import get_subscription_id
from knack.log import get_logger
from msrestazure.tools import parse_resource_id
from ._clients import ContainerAppClient
from ._client_factory import handle_raw_exception, providers_client_factory, cf_resource_groups, log_analytics_client_factory, log_analytics_shared_key_client_factory
logger = get_logger(__name__)
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = cf_resource_groups(cli_ctx)
group = client.get(resource_group_name)
return group.location
def _validate_subscription_registered(cmd, resource_provider, subscription_id=None):
providers_client = None
if not subscription_id:
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
providers_client = providers_client_factory(cmd.cli_ctx, subscription_id)
registration_state = getattr(providers_client.get(resource_provider), 'registration_state', "NotRegistered")
if not (registration_state and registration_state.lower() == 'registered'):
raise ValidationError('Subscription {} is not registered for the {} resource provider. Please run \"az provider register -n {} --wait\" to register your subscription.'.format(
subscription_id, resource_provider, resource_provider))
except ValidationError as ex:
raise ex
except Exception: # pylint: disable=broad-except
pass
def _ensure_location_allowed(cmd, location, resource_provider, resource_type):
providers_client = None
try:
providers_client = providers_client_factory(cmd.cli_ctx, get_subscription_id(cmd.cli_ctx))
if providers_client is not None:
resource_types = getattr(providers_client.get(resource_provider), 'resource_types', [])
res_locations = []
for res in resource_types:
if res and getattr(res, 'resource_type', "") == resource_type:
res_locations = getattr(res, 'locations', [])
res_locations = [res_loc.lower().replace(" ", "").replace("(", "").replace(")", "") for res_loc in res_locations if res_loc.strip()]
location_formatted = location.lower().replace(" ", "")
if location_formatted not in res_locations:
raise ValidationError("Location '{}' is not currently supported. To get list of supported locations, run `az provider show -n {} --query \"resourceTypes[?resourceType=='{}'].locations\"`".format(
location, resource_provider, resource_type))
except ValidationError as ex:
raise ex
except Exception: # pylint: disable=broad-except
pass
def parse_env_var_flags(env_list, is_update_containerapp=False):
env_pairs = {}
for pair in env_list:
key_val = pair.split('=', 1)
if len(key_val) != 2:
if is_update_containerapp:
raise ValidationError("Environment variables must be in the format \"<key>=<value> <key>=secretref:<value> ...\".")
raise ValidationError("Environment variables must be in the format \"<key>=<value> <key>=secretref:<value> ...\".")
if key_val[0] in env_pairs:
raise ValidationError("Duplicate environment variable {env} found, environment variable names must be unique.".format(env=key_val[0]))
value = key_val[1].split('secretref:')
env_pairs[key_val[0]] = value
env_var_def = []
for key, value in env_pairs.items():
if len(value) == 2:
env_var_def.append({
"name": key,
"secretRef": value[1]
})
else:
env_var_def.append({
"name": key,
"value": value[0]
})
return env_var_def
def parse_secret_flags(secret_list):
secret_pairs = {}
for pair in secret_list:
key_val = pair.split('=', 1)
if len(key_val) != 2:
raise ValidationError("Secrets must be in format \"<key>=<value> <key>=<value> ...\".")
if key_val[0] in secret_pairs:
raise ValidationError("Duplicate secret \"{secret}\" found, secret names must be unique.".format(secret=key_val[0]))
secret_pairs[key_val[0]] = key_val[1]
secret_var_def = []
for key, value in secret_pairs.items():
secret_var_def.append({
"name": key,
"value": value
})
return secret_var_def
def _update_revision_env_secretrefs(containers, name):
for container in containers:
if "env" in container:
for var in container["env"]:
if "secretRef" in var:
var["secretRef"] = var["secretRef"].replace("{}-".format(name), "")
def _update_revision_env_secretrefs(containers, name):
for container in containers:
if "env" in container:
for var in container["env"]:
if "secretRef" in var:
var["secretRef"] = var["secretRef"].replace("{}-".format(name), "")
def store_as_secret_and_return_secret_ref(secrets_list, registry_user, registry_server, registry_pass, update_existing_secret=False):
if registry_pass.startswith("secretref:"):
# If user passed in registry password using a secret
registry_pass = registry_pass.split("secretref:")
if len(registry_pass) <= 1:
raise ValidationError("Invalid registry password secret. Value must be a non-empty value starting with \'secretref:\'.")
registry_pass = registry_pass[1:]
registry_pass = ''.join(registry_pass)
if not any(secret for secret in secrets_list if secret['name'].lower() == registry_pass.lower()):
raise ValidationError("Registry password secret with name '{}' does not exist. Add the secret using --secrets".format(registry_pass))
return registry_pass
else:
# If user passed in registry password
if urlparse(registry_server).hostname is not None:
registry_secret_name = "{server}-{user}".format(server=urlparse(registry_server).hostname.replace('.', ''), user=registry_user.lower())
else:
registry_secret_name = "{server}-{user}".format(server=registry_server.replace('.', ''), user=registry_user.lower())
for secret in secrets_list:
if secret['name'].lower() == registry_secret_name.lower():
if secret['value'].lower() != registry_pass.lower():
if update_existing_secret:
secret['value'] = registry_pass
else:
raise ValidationError('Found secret with name \"{}\" but value does not equal the supplied registry password.'.format(registry_secret_name))
return registry_secret_name
logger.warning('Adding registry password as a secret with name \"{}\"'.format(registry_secret_name)) # pylint: disable=logging-format-interpolation
secrets_list.append({
"name": registry_secret_name,
"value": registry_pass
})
return registry_secret_name
def parse_list_of_strings(comma_separated_string):
comma_separated = comma_separated_string.split(',')
return [s.strip() for s in comma_separated]
def raise_missing_token_suggestion():
pat_documentation = "https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line"
raise RequiredArgumentMissingError("GitHub access token is required to authenticate to your repositories. "
"If you need to create a Github Personal Access Token, "
"please run with the '--login-with-github' flag or follow "
"the steps found at the following link:\n{0}".format(pat_documentation))
def _get_default_log_analytics_location(cmd):
default_location = "eastus"
providers_client = None
try:
providers_client = providers_client_factory(cmd.cli_ctx, get_subscription_id(cmd.cli_ctx))
resource_types = getattr(providers_client.get("Microsoft.OperationalInsights"), 'resource_types', [])
res_locations = []
for res in resource_types:
if res and getattr(res, 'resource_type', "") == "workspaces":
res_locations = getattr(res, 'locations', [])
if len(res_locations) > 0:
location = res_locations[0].lower().replace(" ", "").replace("(", "").replace(")", "")
if location:
return location
except Exception: # pylint: disable=broad-except
return default_location
return default_location
# Generate random 4 character string
def _new_tiny_guid():
import random
import string
return ''.join(random.choices(string.ascii_letters + string.digits, k=4))
# Follow same naming convention as Portal
def _generate_log_analytics_workspace_name(resource_group_name):
import re
prefix = "workspace"
suffix = _new_tiny_guid()
alphaNumericRG = resource_group_name
alphaNumericRG = re.sub(r'[^0-9a-z]', '', resource_group_name)
maxLength = 40
name = "{}-{}{}".format(
prefix,
alphaNumericRG,
suffix
)
if len(name) > maxLength:
name = name[:maxLength]
return name
def _generate_log_analytics_if_not_provided(cmd, logs_customer_id, logs_key, location, resource_group_name):
if logs_customer_id is None and logs_key is None:
logger.warning("No Log Analytics workspace provided.")
try:
_validate_subscription_registered(cmd, "Microsoft.OperationalInsights")
log_analytics_client = log_analytics_client_factory(cmd.cli_ctx)
log_analytics_shared_key_client = log_analytics_shared_key_client_factory(cmd.cli_ctx)
log_analytics_location = location
try:
_ensure_location_allowed(cmd, log_analytics_location, "Microsoft.OperationalInsights", "workspaces")
except Exception: # pylint: disable=broad-except
log_analytics_location = _get_default_log_analytics_location(cmd)
from azure.cli.core.commands import LongRunningOperation
from azure.mgmt.loganalytics.models import Workspace
workspace_name = _generate_log_analytics_workspace_name(resource_group_name)
workspace_instance = Workspace(location=log_analytics_location)
logger.warning("Generating a Log Analytics workspace with name \"{}\"".format(workspace_name)) # pylint: disable=logging-format-interpolation
poller = log_analytics_client.begin_create_or_update(resource_group_name, workspace_name, workspace_instance)
log_analytics_workspace = LongRunningOperation(cmd.cli_ctx)(poller)
logs_customer_id = log_analytics_workspace.customer_id
logs_key = log_analytics_shared_key_client.get_shared_keys(
workspace_name=workspace_name,
resource_group_name=resource_group_name).primary_shared_key
except Exception as ex:
raise ValidationError("Unable to generate a Log Analytics workspace. You can use \"az monitor log-analytics workspace create\" to create one and supply --logs-customer-id and --logs-key") from ex
elif logs_customer_id is None:
raise ValidationError("Usage error: Supply the --logs-customer-id associated with the --logs-key")
elif logs_key is None: # Try finding the logs-key
log_analytics_client = log_analytics_client_factory(cmd.cli_ctx)
log_analytics_shared_key_client = log_analytics_shared_key_client_factory(cmd.cli_ctx)
log_analytics_name = None
log_analytics_rg = None
log_analytics = log_analytics_client.list()
for la in log_analytics:
if la.customer_id and la.customer_id.lower() == logs_customer_id.lower():
log_analytics_name = la.name
parsed_la = parse_resource_id(la.id)
log_analytics_rg = parsed_la['resource_group']
if log_analytics_name is None:
raise ValidationError('Usage error: Supply the --logs-key associated with the --logs-customer-id')
shared_keys = log_analytics_shared_key_client.get_shared_keys(workspace_name=log_analytics_name, resource_group_name=log_analytics_rg)
if not shared_keys or not shared_keys.primary_shared_key:
raise ValidationError('Usage error: Supply the --logs-key associated with the --logs-customer-id')
logs_key = shared_keys.primary_shared_key
return logs_customer_id, logs_key
def _get_existing_secrets(cmd, resource_group_name, name, containerapp_def):
if "secrets" not in containerapp_def["properties"]["configuration"]:
containerapp_def["properties"]["configuration"]["secrets"] = []
else:
secrets = []
try:
secrets = ContainerAppClient.list_secrets(cmd=cmd, resource_group_name=resource_group_name, name=name)
except Exception as e: # pylint: disable=broad-except
handle_raw_exception(e)
containerapp_def["properties"]["configuration"]["secrets"] = secrets["value"]
def _ensure_identity_resource_id(subscription_id, resource_group, resource):
from msrestazure.tools import resource_id, is_valid_resource_id
if is_valid_resource_id(resource):
return resource
return resource_id(subscription=subscription_id,
resource_group=resource_group,
namespace='Microsoft.ManagedIdentity',
type='userAssignedIdentities',
name=resource)
def _add_or_update_secrets(containerapp_def, add_secrets):
if "secrets" not in containerapp_def["properties"]["configuration"]:
containerapp_def["properties"]["configuration"]["secrets"] = []
for new_secret in add_secrets:
is_existing = False
for existing_secret in containerapp_def["properties"]["configuration"]["secrets"]:
if existing_secret["name"].lower() == new_secret["name"].lower():
is_existing = True
existing_secret["value"] = new_secret["value"]
break
if not is_existing:
containerapp_def["properties"]["configuration"]["secrets"].append(new_secret)
def _remove_registry_secret(containerapp_def, server, username):
if urlparse(server).hostname is not None:
registry_secret_name = "{server}-{user}".format(server=urlparse(server).hostname.replace('.', ''), user=username.lower())
else:
registry_secret_name = "{server}-{user}".format(server=server.replace('.', ''), user=username.lower())
_remove_secret(containerapp_def, secret_name=registry_secret_name)
def _remove_secret(containerapp_def, secret_name):
if "secrets" not in containerapp_def["properties"]["configuration"]:
containerapp_def["properties"]["configuration"]["secrets"] = []
for index, value in enumerate(containerapp_def["properties"]["configuration"]["secrets"]):
existing_secret = value
if existing_secret["name"].lower() == secret_name.lower():
containerapp_def["properties"]["configuration"]["secrets"].pop(index)
break
def _add_or_update_env_vars(existing_env_vars, new_env_vars, is_add=False):
for new_env_var in new_env_vars:
# Check if updating existing env var
is_existing = False
for existing_env_var in existing_env_vars:
if existing_env_var["name"].lower() == new_env_var["name"].lower():
is_existing = True
if is_add:
logger.warning("Environment variable {} already exists. Replacing environment variable value.".format(new_env_var["name"])) # pylint: disable=logging-format-interpolation
if "value" in new_env_var:
existing_env_var["value"] = new_env_var["value"]
else:
existing_env_var["value"] = None
if "secretRef" in new_env_var:
existing_env_var["secretRef"] = new_env_var["secretRef"]
else:
existing_env_var["secretRef"] = None
break
# If not updating existing env var, add it as a new env var
if not is_existing:
if not is_add:
logger.warning("Environment variable {} does not exist. Adding as new environment variable.".format(new_env_var["name"])) # pylint: disable=logging-format-interpolation
existing_env_vars.append(new_env_var)
def _remove_env_vars(existing_env_vars, remove_env_vars):
for old_env_var in remove_env_vars:
# Check if updating existing env var
is_existing = False
for i, value in enumerate(existing_env_vars):
existing_env_var = value
if existing_env_var["name"].lower() == old_env_var.lower():
is_existing = True
existing_env_vars.pop(i)
break
# If not updating existing env var, add it as a new env var
if not is_existing:
logger.warning("Environment variable {} does not exist.".format(old_env_var)) # pylint: disable=logging-format-interpolation
def _remove_env_vars(existing_env_vars, remove_env_vars):
for old_env_var in remove_env_vars:
# Check if updating existing env var
is_existing = False
for index, value in enumerate(existing_env_vars):
existing_env_var = value
if existing_env_var["name"].lower() == old_env_var.lower():
is_existing = True
existing_env_vars.pop(index)
break
# If not updating existing env var, add it as a new env var
if not is_existing:
logger.warning("Environment variable {} does not exist.".format(old_env_var)) # pylint: disable=logging-format-interpolation
def _add_or_update_tags(containerapp_def, tags):
if 'tags' not in containerapp_def:
if tags:
containerapp_def['tags'] = tags
else:
containerapp_def['tags'] = {}
else:
for key in tags:
containerapp_def['tags'][key] = tags[key]
def _object_to_dict(obj):
import json
import datetime
def default_handler(x):
if isinstance(x, datetime.datetime):
return x.isoformat()
return x.__dict__
return json.loads(json.dumps(obj, default=default_handler))
def _to_camel_case(snake_str):
components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
def _convert_object_from_snake_to_camel_case(o):
if isinstance(o, list):
return [_convert_object_from_snake_to_camel_case(i) if isinstance(i, (dict, list)) else i for i in o]
return {
_to_camel_case(a): _convert_object_from_snake_to_camel_case(b) if isinstance(b, (dict, list)) else b for a, b in o.items()
}
def _remove_additional_attributes(o):
if isinstance(o, list):
for i in o:
_remove_additional_attributes(i)
elif isinstance(o, dict):
if "additionalProperties" in o:
del o["additionalProperties"]
for key in o:
_remove_additional_attributes(o[key])
def _remove_readonly_attributes(containerapp_def):
unneeded_properties = [
"id",
"name",
"type",
"systemData",
"provisioningState",
"latestRevisionName",
"latestRevisionFqdn",
"customDomainVerificationId",
"outboundIpAddresses",
"fqdn"
]
for unneeded_property in unneeded_properties:
if unneeded_property in containerapp_def:
del containerapp_def[unneeded_property]
elif unneeded_property in containerapp_def['properties']:
del containerapp_def['properties'][unneeded_property]
def _remove_dapr_readonly_attributes(daprcomponent_def):
unneeded_properties = [
"id",
"name",
"type",
"systemData",
"provisioningState",
"latestRevisionName",
"latestRevisionFqdn",
"customDomainVerificationId",
"outboundIpAddresses",
"fqdn"
]
for unneeded_property in unneeded_properties:
if unneeded_property in daprcomponent_def:
del daprcomponent_def[unneeded_property]
def update_nested_dictionary(orig_dict, new_dict):
# Recursively update a nested dictionary. If the value is a list, replace the old list with new list
from collections.abc import Mapping
for key, val in new_dict.items():
if isinstance(val, Mapping):
tmp = update_nested_dictionary(orig_dict.get(key, {}), val)
orig_dict[key] = tmp
elif isinstance(val, list):
if new_dict[key]:
orig_dict[key] = new_dict[key]
else:
if new_dict[key] is not None:
orig_dict[key] = new_dict[key]
return orig_dict
def _is_valid_weight(weight):
try:
n = int(weight)
if 0 <= n <= 100:
return True
return False
except ValueError:
return False
def _update_traffic_weights(containerapp_def, list_weights):
if "traffic" not in containerapp_def["properties"]["configuration"]["ingress"] or list_weights and len(list_weights):
containerapp_def["properties"]["configuration"]["ingress"]["traffic"] = []
for new_weight in list_weights:
key_val = new_weight.split('=', 1)
is_existing = False
if len(key_val) != 2:
raise ValidationError('Traffic weights must be in format \"<revision>=weight <revision2>=<weigh2> ...\"')
if not _is_valid_weight(key_val[1]):
raise ValidationError('Traffic weights must be integers between 0 and 100')
if not is_existing:
containerapp_def["properties"]["configuration"]["ingress"]["traffic"].append({
"revisionName": key_val[0],
"weight": int(key_val[1])
})
def _get_app_from_revision(revision):
if not revision:
raise ValidationError('Invalid revision. Revision must not be empty')
revision = revision.split('--')
revision.pop()
revision = "--".join(revision)
return revision
def _infer_acr_credentials(cmd, registry_server):
# If registry is Azure Container Registry, we can try inferring credentials
if '.azurecr.io' not in registry_server:
raise RequiredArgumentMissingError('Registry username and password are required if not using Azure Container Registry.')
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up credentials...')
parsed = urlparse(registry_server)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
registry_user, registry_pass = _get_acr_cred(cmd.cli_ctx, registry_name)
return (registry_user, registry_pass)
except Exception as ex:
raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry {}. Please provide the registry username and password'.format(registry_name)) from ex
def _registry_exists(containerapp_def, registry_server):
exists = False
if "properties" in containerapp_def and "configuration" in containerapp_def["properties"] and "registries" in containerapp_def["properties"]["configuration"]:
for registry in containerapp_def["properties"]["configuration"]["registries"]:
if "server" in registry and registry["server"] and registry["server"].lower() == registry_server.lower():
exists = True
break
return exists
| 41.542517 | 211 | 0.661113 |
64f00a575c42ebccef6be7ddfd0b9177177099fd | 45 | py | Python | instantmailer/__init__.py | Pythonastics/pymail | a7659cdff09ac3a77bc1c06638a00352168e006c | [
"MIT"
] | null | null | null | instantmailer/__init__.py | Pythonastics/pymail | a7659cdff09ac3a77bc1c06638a00352168e006c | [
"MIT"
] | null | null | null | instantmailer/__init__.py | Pythonastics/pymail | a7659cdff09ac3a77bc1c06638a00352168e006c | [
"MIT"
] | null | null | null | from .mail import Mail
__version__ = "1.0.0" | 15 | 22 | 0.711111 |
5e8d1f671a412b9cc300c1d4d1ef7c7b6cad37f1 | 376 | py | Python | setup.py | eranroz/BotMisparim | 4553cc7679e7c04ddf3eb7920e0cd5166cb57087 | [
"MIT"
] | 1 | 2021-11-02T10:09:01.000Z | 2021-11-02T10:09:01.000Z | setup.py | eranroz/BotMisparim | 4553cc7679e7c04ddf3eb7920e0cd5166cb57087 | [
"MIT"
] | null | null | null | setup.py | eranroz/BotMisparim | 4553cc7679e7c04ddf3eb7920e0cd5166cb57087 | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name='botMisparim',
version='1.0',
packages=[''],
scripts=['misparim.py'],
url='https://github.com/eranroz/BotMisparim',
license='MIT',
author='eranroz',
author_email='eranroz@cs.huji.ac.il',
description='Bot for scanning common grammar mistakes in Hebrew',
requires=['HspellPy', 'pywikibot']
)
| 25.066667 | 69 | 0.659574 |
f14038c3af25510ac2db7cc0b2d4866fb27f4657 | 391 | py | Python | homebar/homebar/asgi.py | cdriehuys/home-bar | 22e408c00f3bae0b9b7c0de6ebba36e211c32828 | [
"MIT"
] | null | null | null | homebar/homebar/asgi.py | cdriehuys/home-bar | 22e408c00f3bae0b9b7c0de6ebba36e211c32828 | [
"MIT"
] | null | null | null | homebar/homebar/asgi.py | cdriehuys/home-bar | 22e408c00f3bae0b9b7c0de6ebba36e211c32828 | [
"MIT"
] | null | null | null | """
ASGI config for homebar project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'homebar.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
d244e7d77fc3187f3712ffc2e750f00a0b556877 | 663 | py | Python | setup.py | pratik-m/OracleERPIntegrationServiceWrapper | dd3af6d92c9665c662163179f35b9b6e06638a5b | [
"MIT"
] | null | null | null | setup.py | pratik-m/OracleERPIntegrationServiceWrapper | dd3af6d92c9665c662163179f35b9b6e06638a5b | [
"MIT"
] | null | null | null | setup.py | pratik-m/OracleERPIntegrationServiceWrapper | dd3af6d92c9665c662163179f35b9b6e06638a5b | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name="OracleSaaSApiPy",
version="1.0.0.2",
author="Pratik Munot",
description="An API wrapper for Oracle SaaS Webservice APIs",
long_description="An API wrapper for Oracle SaaS Webservice APIs",
url="https://github.com/pratik-m/OracleERPIntegrationServiceWrapper",
#package_dir={'':'OracleSaaSApiPy'},
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=['requests'],
python_requires='>=3.6',
)
| 31.571429 | 74 | 0.651584 |
7f701bc357ddbeaa907d1226ef42aea535a70943 | 300 | py | Python | CreeDictionary/tests/utils_tests/test_profiling.py | aaronfay/cree-intelligent-dictionary | 5119aa819a48d0ce21c002af284bd2a28a77b15b | [
"Apache-2.0"
] | null | null | null | CreeDictionary/tests/utils_tests/test_profiling.py | aaronfay/cree-intelligent-dictionary | 5119aa819a48d0ce21c002af284bd2a28a77b15b | [
"Apache-2.0"
] | null | null | null | CreeDictionary/tests/utils_tests/test_profiling.py | aaronfay/cree-intelligent-dictionary | 5119aa819a48d0ce21c002af284bd2a28a77b15b | [
"Apache-2.0"
] | null | null | null | import time
from utils.profiling import timed
def test_timed_decorator(capsys):
@timed(msg="{func_name} finished in {second:.1f} seconds")
def quick_nap():
time.sleep(0.1)
quick_nap()
out, err = capsys.readouterr()
assert "quick_nap finished in 0.1 seconds\n" == out
| 20 | 62 | 0.673333 |
c975a464a180f698babd3c2866f9886e1512f8c7 | 39,710 | py | Python | Bio/Phylo/TreeConstruction.py | amblina/biopython | 5045a7a3e86d5b32e0eaab941ab35daac86c59f8 | [
"PostgreSQL"
] | 1 | 2018-12-27T08:43:52.000Z | 2018-12-27T08:43:52.000Z | Bio/Phylo/TreeConstruction.py | amblina/biopython | 5045a7a3e86d5b32e0eaab941ab35daac86c59f8 | [
"PostgreSQL"
] | null | null | null | Bio/Phylo/TreeConstruction.py | amblina/biopython | 5045a7a3e86d5b32e0eaab941ab35daac86c59f8 | [
"PostgreSQL"
] | 1 | 2018-12-27T08:43:42.000Z | 2018-12-27T08:43:42.000Z | # Copyright (C) 2013 by Yanbo Ye (yeyanbo289@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Classes and methods for tree construction"""
import itertools
import copy
from Bio.Phylo import BaseTree
from Bio.Align import MultipleSeqAlignment
from Bio.SubsMat import MatrixInfo
from Bio import _py3k
def _is_numeric(x):
return _py3k._is_int_or_long(x) or isinstance(x, (float, complex))
class _Matrix(object):
"""Base class for distance matrix or scoring matrix
Accepts a list of names and a lower triangular matrix.::
matrix = [[0],
[1, 0],
[2, 3, 0],
[4, 5, 6, 0]]
represents the symmetric matrix of
[0,1,2,4]
[1,0,3,5]
[2,3,0,6]
[4,5,6,0]
:Parameters:
names : list
names of elements, used for indexing
matrix : list
nested list of numerical lists in lower triangular format
Example
-------
>>> from Bio.Phylo.TreeConstruction import _Matrix
>>> names = ['Alpha', 'Beta', 'Gamma', 'Delta']
>>> matrix = [[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]]
>>> m = _Matrix(names, matrix)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [1, 0], [2, 3, 0], [4, 5, 6, 0]])
You can use two indices to get or assign an element in the matrix.
>>> m[1,2]
3
>>> m['Beta','Gamma']
3
>>> m['Beta','Gamma'] = 4
>>> m['Beta','Gamma']
4
Further more, you can use one index to get or assign a list of elements related to that index.
>>> m[0]
[0, 1, 2, 4]
>>> m['Alpha']
[0, 1, 2, 4]
>>> m['Alpha'] = [0, 7, 8, 9]
>>> m[0]
[0, 7, 8, 9]
>>> m[0,1]
7
Also you can delete or insert a column&row of elemets by index.
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
>>> del m['Alpha']
>>> m
_Matrix(names=['Beta', 'Gamma', 'Delta'], matrix=[[0], [4, 0], [5, 6, 0]])
>>> m.insert('Alpha', [0, 7, 8, 9] , 0)
>>> m
_Matrix(names=['Alpha', 'Beta', 'Gamma', 'Delta'], matrix=[[0], [7, 0], [8, 4, 0], [9, 5, 6, 0]])
"""
def __init__(self, names, matrix=None):
"""Initialize matrix by a list of names and a list of
lower triangular matrix data"""
# check names
if isinstance(names, list) and all(isinstance(s, str) for s in names):
if len(set(names)) == len(names):
self.names = names
else:
raise ValueError("Duplicate names found")
else:
raise TypeError("'names' should be a list of strings")
# check matrix
if matrix is None:
# create a new one with 0 if matrix is not assigned
matrix = [[0] * i for i in range(1, len(self) + 1)]
self.matrix = matrix
else:
# check if all elements are numbers
if (isinstance(matrix, list) and
all(isinstance(l, list) for l in matrix) and
all(_is_numeric(n) for n in [item for sublist in matrix
for item in sublist])):
# check if the same length with names
if len(matrix) == len(names):
# check if is lower triangle format
if [len(m) for m in matrix] == list(range(1, len(self) + 1)):
self.matrix = matrix
else:
raise ValueError(
"'matrix' should be in lower triangle format")
else:
raise ValueError(
"'names' and 'matrix' should be the same size")
else:
raise TypeError("'matrix' should be a list of numerical lists")
def __getitem__(self, item):
"""Access value(s) by the index(s) or name(s).
For a _Matrix object 'dm'::
dm[i] get a value list from the given 'i' to others;
dm[i, j] get the value between 'i' and 'j';
dm['name'] map name to index first
dm['name1', 'name2'] map name to index first
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
return [self.matrix[index][i] for i in range(0, index)] + [self.matrix[i][index] for i in range(index, len(self))]
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
if row_index > col_index:
return self.matrix[row_index][col_index]
else:
return self.matrix[col_index][row_index]
else:
raise TypeError("Invalid index type.")
def __setitem__(self, item, value):
"""Set value by the index(s) or name(s).
Similar to __getitem__::
dm[1] = [1, 0, 3, 4] set values from '1' to others;
dm[i, j] = 2 set the value from 'i' to 'j'
"""
# Handle single indexing
if isinstance(item, (int, str)):
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
if item in self.names:
index = self.names.index(item)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if isinstance(value, list) and all(_is_numeric(n) for n in value):
if len(value) == len(self):
for i in range(0, index):
self.matrix[index][i] = value[i]
for i in range(index, len(self)):
self.matrix[i][index] = value[i]
else:
raise ValueError("Value not the same size.")
else:
raise TypeError("Invalid value type.")
# Handle double indexing
elif len(item) == 2:
row_index = None
col_index = None
if all(isinstance(i, int) for i in item):
row_index, col_index = item
elif all(isinstance(i, str) for i in item):
row_name, col_name = item
if row_name in self.names and col_name in self.names:
row_index = self.names.index(row_name)
col_index = self.names.index(col_name)
else:
raise ValueError("Item not found.")
else:
raise TypeError("Invalid index type.")
# check index
if row_index > len(self) - 1 or col_index > len(self) - 1:
raise IndexError("Index out of range.")
# check and assign value
if _is_numeric(value):
if row_index > col_index:
self.matrix[row_index][col_index] = value
else:
self.matrix[col_index][row_index] = value
else:
raise TypeError("Invalid value type.")
else:
raise TypeError("Invalid index type.")
def __delitem__(self, item):
"""Delete related distances by the index or name"""
index = None
if isinstance(item, int):
index = item
elif isinstance(item, str):
index = self.names.index(item)
else:
raise TypeError("Invalid index type.")
# remove distances related to index
for i in range(index + 1, len(self)):
del self.matrix[i][index]
del self.matrix[index]
# remove name
del self.names[index]
def insert(self, name, value, index=None):
"""Insert distances given the name and value.
:Parameters:
name : str
name of a row/col to be inserted
value : list
a row/col of values to be inserted
"""
if isinstance(name, str):
# insert at the given index or at the end
if index is None:
index = len(self)
if not isinstance(index, int):
raise TypeError("Invalid index type.")
# insert name
self.names.insert(index, name)
# insert elements of 0, to be assigned
self.matrix.insert(index, [0] * index)
for i in range(index, len(self)):
self.matrix[i].insert(index, 0)
# assign value
self[index] = value
else:
raise TypeError("Invalid name type.")
def __len__(self):
"""Matrix length"""
return len(self.names)
def __repr__(self):
return self.__class__.__name__ \
+ "(names=%s, matrix=%s)" \
% tuple(map(repr, (self.names, self.matrix)))
def __str__(self):
"""Get a lower triangular matrix string"""
matrix_string = '\n'.join(
[self.names[i] + "\t" + "\t".join([str(n) for n in self.matrix[i]])
for i in range(0, len(self))])
matrix_string = matrix_string + "\n\t" + "\t".join(self.names)
return matrix_string
class _DistanceMatrix(_Matrix):
"""Distance matrix class that can be used for distance based tree algorithms.
All diagonal elements will be zero no matter what the users provide.
"""
def __init__(self, names, matrix=None):
_Matrix.__init__(self, names, matrix)
self._set_zero_diagonal()
def __setitem__(self, item, value):
_Matrix.__setitem__(self, item, value)
self._set_zero_diagonal()
def _set_zero_diagonal(self):
"""set all diagonal elements to zero"""
for i in range(0, len(self)):
self.matrix[i][i] = 0
class DistanceCalculator(object):
"""Class to calculate the distance matrix from a DNA or Protein
Multiple Sequence Alignment(MSA) and the given name of the
substitution model.
Currently only scoring matrices are used.
:Parameters:
model : str
Name of the model matrix to be used to calculate distance.
The attribute `dna_matrices` contains the available model
names for DNA sequences and `protein_matrices` for protein
sequences.
Example
-------
>>> from Bio.Phylo.TreeConstruction import DistanceCalculator
>>> from Bio import AlignIO
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print aln
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
DNA calculator with 'identity' model::
>>> calculator = DistanceCalculator('identity')
>>> dm = calculator.get_distance(aln)
>>> print dm
Alpha 0
Beta 0.230769230769 0
Gamma 0.384615384615 0.230769230769 0
Delta 0.538461538462 0.538461538462 0.538461538462 0
Epsilon 0.615384615385 0.384615384615 0.461538461538 0.153846153846 0
Alpha Beta Gamma Delta Epsilon
Protein calculator with 'blosum62' model::
>>> calculator = DistanceCalculator('blosum62')
>>> dm = calculator.get_distance(aln)
>>> print dm
Alpha 0
Beta 0.369047619048 0
Gamma 0.493975903614 0.25 0
Delta 0.585365853659 0.547619047619 0.566265060241 0
Epsilon 0.7 0.355555555556 0.488888888889 0.222222222222 0
Alpha Beta Gamma Delta Epsilon
"""
dna_alphabet = ['A', 'T', 'C', 'G']
# BLAST nucleic acid scoring matrix
blastn = [[5],
[-4, 5],
[-4, -4, 5],
[-4, -4, -4, 5]]
# transition/transversion scoring matrix
trans = [[6],
[-5, 6],
[-5, -1, 6],
[-1, -5, -5, 6]]
protein_alphabet = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L',
'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y',
'Z']
# matrices available
dna_matrices = {'blastn': blastn, 'trans': trans}
protein_models = MatrixInfo.available_matrices
protein_matrices = dict((name, getattr(MatrixInfo, name))
for name in protein_models)
dna_models = list(dna_matrices.keys())
models = ['identity'] + dna_models + protein_models
def __init__(self, model='identity'):
"""Initialize with a distance model"""
if model == 'identity':
self.scoring_matrix = None
elif model in self.dna_models:
self.scoring_matrix = _Matrix(self.dna_alphabet,
self.dna_matrices[model])
elif model in self.protein_models:
self.scoring_matrix = self._build_protein_matrix(
self.protein_matrices[model])
else:
raise ValueError("Model not supported. Available models: " +
", ".join(self.models))
def _pairwise(self, seq1, seq2):
"""Calculate pairwise distance from two sequences.
Returns a value between 0 (identical sequences) and 1 (completely
different, or seq1 is an empty string.)
"""
score = 0
max_score = 0
if self.scoring_matrix:
max_score1 = 0
max_score2 = 0
skip_letters = ['-', '*']
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 in skip_letters or l2 in skip_letters:
continue
if l1 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l1, seq1.id, i))
if l2 not in self.scoring_matrix.names:
raise ValueError("Bad alphabet '%s' in sequence '%s' at position '%s'"
% (l2, seq2.id, i))
max_score1 += self.scoring_matrix[l1, l1]
max_score2 += self.scoring_matrix[l2, l2]
score += self.scoring_matrix[l1, l2]
# Take the higher score if the matrix is asymmetrical
max_score = max(max_score1, max_score2)
else:
# Score by character identity, not skipping any special letters
for i in range(0, len(seq1)):
l1 = seq1[i]
l2 = seq2[i]
if l1 == l2:
score += 1
max_score = len(seq1)
if max_score == 0:
return 1 # max possible scaled distance
return 1 - (score * 1.0 / max_score)
def get_distance(self, msa):
"""Return a _DistanceMatrix for MSA object
:Parameters:
msa : MultipleSeqAlignment
DNA or Protein multiple sequence alignment.
"""
if not isinstance(msa, MultipleSeqAlignment):
raise TypeError("Must provide a MultipleSeqAlignment object.")
names = [s.id for s in msa]
dm = _DistanceMatrix(names)
for seq1, seq2 in itertools.combinations(msa, 2):
dm[seq1.id, seq2.id] = self._pairwise(seq1, seq2)
return dm
def _build_protein_matrix(self, subsmat):
"""Convert matrix from SubsMat format to _Matrix object"""
protein_matrix = _Matrix(self.protein_alphabet)
for k, v in subsmat.items():
aa1, aa2 = k
protein_matrix[aa1, aa2] = v
return protein_matrix
class TreeConstructor(object):
"""Base class for all tree constructor."""
def build_tree(self, msa):
"""Caller to built the tree from a MultipleSeqAlignment object.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class DistanceTreeConstructor(TreeConstructor):
"""Distance based tree constructor.
:Parameters:
method : str
Distance tree construction method, 'nj'(default) or 'upgma'.
distance_calculator : DistanceCalculator
The distance matrix calculator for multiple sequence alignment.
It must be provided if `build_tree` will be called.
Example
--------
>>> from TreeConstruction import DistanceTreeConstructor
>>> constructor = DistanceTreeConstructor()
UPGMA Tree:
>>> upgmatree = constructor.upgma(dm)
>>> print upgmatree
Tree(rooted=True)
Clade(name='Inner4')
Clade(branch_length=0.171955155115, name='Inner1')
Clade(branch_length=0.111111111111, name='Epsilon')
Clade(branch_length=0.111111111111, name='Delta')
Clade(branch_length=0.0673103855608, name='Inner3')
Clade(branch_length=0.0907558806655, name='Inner2')
Clade(branch_length=0.125, name='Gamma')
Clade(branch_length=0.125, name='Beta')
Clade(branch_length=0.215755880666, name='Alpha')
NJ Tree:
>>> njtree = constructor.nj(dm)
>>> print njtree
Tree(rooted=False)
Clade(name='Inner3')
Clade(branch_length=0.0142054862889, name='Inner2')
Clade(branch_length=0.239265540676, name='Inner1')
Clade(branch_length=0.0853101915988, name='Epsilon')
Clade(branch_length=0.136912030623, name='Delta')
Clade(branch_length=0.292306275042, name='Alpha')
Clade(branch_length=0.0747705106139, name='Beta')
Clade(branch_length=0.175229489386, name='Gamma')
"""
methods = ['nj', 'upgma']
def __init__(self, distance_calculator=None, method="nj"):
if (distance_calculator is None or
isinstance(distance_calculator, DistanceCalculator)):
self.distance_calculator = distance_calculator
else:
raise TypeError("Must provide a DistanceCalculator object.")
if isinstance(method, str) and method in self.methods:
self.method = method
else:
raise TypeError("Bad method: " + method +
". Available methods: " + ", ".join(self.methods))
def build_tree(self, msa):
if self.distance_calculator:
dm = self.distance_calculator.get_distance(msa)
tree = None
if self.method == 'upgma':
tree = self.upgma(dm)
else:
tree = self.nj(dm)
return tree
else:
raise TypeError("Must provide a DistanceCalculator object.")
def upgma(self, distance_matrix):
"""Construct and return an UPGMA tree.
Constructs and returns an Unweighted Pair Group Method
with Arithmetic mean (UPGMA) tree.
:Parameters:
distance_matrix : _DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, _DistanceMatrix):
raise TypeError("Must provide a _DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 1:
min_dist = dm[1, 0]
# find minimum index
for i in range(1, len(dm)):
for j in range(0, i):
if min_dist >= dm[i, j]:
min_dist = dm[i, j]
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
if clade1.is_terminal():
clade1.branch_length = min_dist * 1.0 / 2
else:
clade1.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade1)
if clade2.is_terminal():
clade2.branch_length = min_dist * 1.0 / 2
else:
clade2.branch_length = min_dist * \
1.0 / 2 - self._height_of(clade2)
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k]) * 1.0 / 2
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
inner_clade.branch_length = 0
return BaseTree.Tree(inner_clade)
def nj(self, distance_matrix):
"""Construct and return an Neighbor Joining tree.
:Parameters:
distance_matrix : _DistanceMatrix
The distance matrix for tree construction.
"""
if not isinstance(distance_matrix, _DistanceMatrix):
raise TypeError("Must provide a _DistanceMatrix object.")
# make a copy of the distance matrix to be used
dm = copy.deepcopy(distance_matrix)
# init terminal clades
clades = [BaseTree.Clade(None, name) for name in dm.names]
# init node distance
node_dist = [0] * len(dm)
# init minimum index
min_i = 0
min_j = 0
inner_count = 0
while len(dm) > 2:
# calculate nodeDist
for i in range(0, len(dm)):
node_dist[i] = 0
for j in range(0, len(dm)):
node_dist[i] += dm[i, j]
node_dist[i] = node_dist[i] / (len(dm) - 2)
# find minimum distance pair
min_dist = dm[1, 0] - node_dist[1] - node_dist[0]
min_i = 0
min_j = 1
for i in range(1, len(dm)):
for j in range(0, i):
temp = dm[i, j] - node_dist[i] - node_dist[j]
if min_dist > temp:
min_dist = temp
min_i = i
min_j = j
# create clade
clade1 = clades[min_i]
clade2 = clades[min_j]
inner_count += 1
inner_clade = BaseTree.Clade(None, "Inner" + str(inner_count))
inner_clade.clades.append(clade1)
inner_clade.clades.append(clade2)
# assign branch length
clade1.branch_length = (dm[min_i, min_j] + node_dist[min_i] -
node_dist[min_j]) / 2.0
clade2.branch_length = dm[min_i, min_j] - clade1.branch_length
# update node list
clades[min_j] = inner_clade
del clades[min_i]
# rebuild distance matrix,
# set the distances of new node at the index of min_j
for k in range(0, len(dm)):
if k != min_i and k != min_j:
dm[min_j, k] = (dm[min_i, k] + dm[min_j, k] -
dm[min_i, min_j]) / 2.0
dm.names[min_j] = "Inner" + str(inner_count)
del dm[min_i]
# set the last clade as one of the child of the inner_clade
root = None
if clades[0] == inner_clade:
clades[0].branch_length = 0
clades[1].branch_length = dm[1, 0]
clades[0].clades.append(clades[1])
root = clades[0]
else:
clades[0].branch_length = dm[1, 0]
clades[1].branch_length = 0
clades[1].clades.append(clades[0])
root = clades[1]
return BaseTree.Tree(root, rooted=False)
def _height_of(self, clade):
"""calculate clade height -- the longest path to any terminal."""
height = 0
if clade.is_terminal():
height = clade.branch_length
else:
height = height + max([self._height_of(c) for c in clade.clades])
return height
# #################### Tree Scoring and Searching Classes #####################
class Scorer(object):
"""Base class for all tree scoring methods"""
def get_score(self, tree, alignment):
"""Caller to get the score of a tree for the given alignment.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class TreeSearcher(object):
"""Base class for all tree searching methods"""
def search(self, starting_tree, alignment):
"""Caller to search the best tree with a starting tree.
This should be implemented in subclass"""
raise NotImplementedError("Method not implemented!")
class NNITreeSearcher(TreeSearcher):
"""Tree searching with Nearest Neighbor Interchanges (NNI) algorithm.
:Parameters:
scorer : ParsimonyScorer
parsimony scorer to calculate the parsimony score of
different trees during NNI algorithm.
"""
def __init__(self, scorer):
if isinstance(scorer, Scorer):
self.scorer = scorer
else:
raise TypeError("Must provide a Scorer object.")
def search(self, starting_tree, alignment):
"""Implement the TreeSearcher.search method.
:Parameters:
starting_tree : Tree
starting tree of NNI method.
alignment : MultipleSeqAlignment
multiple sequence alignment used to calculate parsimony
score of different NNI trees.
"""
return self._nni(starting_tree, alignment)
def _nni(self, starting_tree, alignment):
"""Search for the best parsimony tree using the NNI algorithm."""
best_tree = starting_tree
while True:
best_score = self.scorer.get_score(best_tree, alignment)
temp = best_score
for t in self._get_neighbors(best_tree):
score = self.scorer.get_score(t, alignment)
if score < best_score:
best_score = score
best_tree = t
# stop if no smaller score exist
if best_score >= temp:
break
return best_tree
def _get_neighbors(self, tree):
"""Get all neighbor trees of the given tree.
Currently only for binary rooted trees.
"""
# make child to parent dict
parents = {}
for clade in tree.find_clades():
if clade != tree.root:
node_path = tree.get_path(clade)
# cannot get the parent if the parent is root. Bug?
if len(node_path) == 1:
parents[clade] = tree.root
else:
parents[clade] = node_path[-2]
neighbors = []
root_childs = []
for clade in tree.get_nonterminals(order="level"):
if clade == tree.root:
left = clade.clades[0]
right = clade.clades[1]
root_childs.append(left)
root_childs.append(right)
if not left.is_terminal() and not right.is_terminal():
# make changes around the left_left clade
# left_left = left.clades[0]
left_right = left.clades[1]
right_left = right.clades[0]
right_right = right.clades[1]
# neightbor 1 (left_left + right_right)
del left.clades[1]
del right.clades[1]
left.clades.append(right_right)
right.clades.append(left_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (left_left + right_left)
del left.clades[1]
del right.clades[0]
left.clades.append(right_left)
right.clades.append(right_right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (left_left + left_right)
del left.clades[1]
del right.clades[0]
left.clades.append(left_right)
right.clades.insert(0, right_left)
elif clade in root_childs:
# skip root child
continue
else:
# method for other clades
# make changes around the parent clade
left = clade.clades[0]
right = clade.clades[1]
parent = parents[clade]
if clade == parent.clades[0]:
sister = parent.clades[1]
# neighbor 1 (parent + right)
del parent.clades[1]
del clade.clades[1]
parent.clades.append(right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[1]
del clade.clades[0]
parent.clades.append(sister)
clade.clades.insert(0, left)
else:
sister = parent.clades[0]
# neighbor 1 (parent + right)
del parent.clades[0]
del clade.clades[1]
parent.clades.insert(0, right)
clade.clades.append(sister)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# neighbor 2 (parent + left)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, left)
clade.clades.append(right)
temp_tree = copy.deepcopy(tree)
neighbors.append(temp_tree)
# change back (parent + sister)
del parent.clades[0]
del clade.clades[0]
parent.clades.insert(0, sister)
clade.clades.insert(0, left)
return neighbors
# ######################## Parsimony Classes ##########################
class ParsimonyScorer(Scorer):
"""Parsimony scorer with a scoring matrix.
This is a combination of Fitch algorithm and Sankoff algorithm.
See ParsimonyTreeConstructor for usage.
:Parameters:
matrix : _Matrix
scoring matrix used in parsimony score calculation.
"""
def __init__(self, matrix=None):
if not matrix or isinstance(matrix, _Matrix):
self.matrix = matrix
else:
raise TypeError("Must provide a _Matrix object.")
def get_score(self, tree, alignment):
"""Calculate and return the parsimony score given a tree and
the MSA using the Fitch algorithm without the penalty matrix
the Sankoff algorithm with the matrix"""
# make sure the tree is rooted and bifurcating
if not tree.is_bifurcating():
raise ValueError("The tree provided should be bifurcating.")
if not tree.rooted:
tree.root_at_midpoint()
# sort tree terminals and alignment
terms = tree.get_terminals()
terms.sort(key=lambda term: term.name)
alignment.sort()
if not all(t.name == a.id for t, a in zip(terms, alignment)):
raise ValueError(
"Taxon names of the input tree should be the same with the alignment.")
# term_align = dict(zip(terms, alignment))
score = 0
for i in range(len(alignment[0])):
# parsimony score for column_i
score_i = 0
# get column
column_i = alignment[:, i]
# skip non-informative column
if column_i == len(column_i) * column_i[0]:
continue
# start calculating score_i using the tree and column_i
# Fitch algorithm without the penalty matrix
if not self.matrix:
# init by mapping terminal clades and states in column_i
clade_states = dict(zip(terms, [set([c]) for c in column_i]))
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_state = clade_states[clade_childs[0]]
right_state = clade_states[clade_childs[1]]
state = left_state & right_state
if not state:
state = left_state | right_state
score_i = score_i + 1
clade_states[clade] = state
# Sankoff algorithm with the penalty matrix
else:
inf = float('inf')
# init score arrays for terminal clades
alphabet = self.matrix.names
length = len(alphabet)
clade_scores = {}
for j in range(len(column_i)):
array = [inf] * length
index = alphabet.index(column_i[j])
array[index] = 0
clade_scores[terms[j]] = array
# bottom up calculation
for clade in tree.get_nonterminals(order="postorder"):
clade_childs = clade.clades
left_score = clade_scores[clade_childs[0]]
right_score = clade_scores[clade_childs[1]]
array = []
for m in range(length):
min_l = inf
min_r = inf
for n in range(length):
sl = self.matrix[
alphabet[m], alphabet[n]] + left_score[n]
sr = self.matrix[
alphabet[m], alphabet[n]] + right_score[n]
if min_l > sl:
min_l = sl
if min_r > sr:
min_r = sr
array.append(min_l + min_r)
clade_scores[clade] = array
# minimum from root score
score_i = min(array)
# TODO: resolve internal states
score = score + score_i
return score
class ParsimonyTreeConstructor(TreeConstructor):
"""Parsimony tree constructor.
:Parameters:
searcher : TreeSearcher
tree searcher to search the best parsimony tree.
starting_tree : Tree
starting tree provided to the searcher.
Example
--------
>>> from Bio import AlignIO
>>> from TreeConstruction import *
>>> aln = AlignIO.read(open('Tests/TreeConstruction/msa.phy'), 'phylip')
>>> print aln
SingleLetterAlphabet() alignment with 5 rows and 13 columns
AACGTGGCCACAT Alpha
AAGGTCGCCACAC Beta
GAGATTTCCGCCT Delta
GAGATCTCCGCCC Epsilon
CAGTTCGCCACAA Gamma
>>> starting_tree = Phylo.read('Tests/TreeConstruction/nj.tre', 'newick')
>>> print tree
Tree(weight=1.0, rooted=False)
Clade(branch_length=0.0, name='Inner3')
Clade(branch_length=0.01421, name='Inner2')
Clade(branch_length=0.23927, name='Inner1')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.29231, name='Alpha')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.17523, name='Gamma')
>>> from TreeConstruction import *
>>> scorer = ParsimonyScorer()
>>> searcher = NNITreeSearcher(scorer)
>>> constructor = ParsimonyTreeConstructor(searcher, starting_tree)
>>> pars_tree = constructor.build_tree(aln)
>>> print pars_tree
Tree(weight=1.0, rooted=True)
Clade(branch_length=0.0)
Clade(branch_length=0.197335, name='Inner1')
Clade(branch_length=0.13691, name='Delta')
Clade(branch_length=0.08531, name='Epsilon')
Clade(branch_length=0.041935, name='Inner2')
Clade(branch_length=0.01421, name='Inner3')
Clade(branch_length=0.17523, name='Gamma')
Clade(branch_length=0.07477, name='Beta')
Clade(branch_length=0.29231, name='Alpha')
"""
def __init__(self, searcher, starting_tree=None):
self.searcher = searcher
self.starting_tree = starting_tree
def build_tree(self, alignment):
"""Build the tree.
:Parameters:
alignment : MultipleSeqAlignment
multiple sequence alignment to calculate parsimony tree.
"""
# if starting_tree is none,
# create a upgma tree with 'identity' scoring matrix
if self.starting_tree is None:
dtc = DistanceTreeConstructor(DistanceCalculator("identity"),
"upgma")
self.starting_tree = dtc.build_tree(alignment)
return self.searcher.search(self.starting_tree, alignment)
| 37.356538 | 126 | 0.531629 |
b776deb37789da6405558e8c16dce2bd6c24b574 | 1,803 | py | Python | septentrion/lib.py | peopledoc/septentrion | 7c91890f525878ed50ccb8be482d093b93ef77f2 | [
"MIT"
] | 7 | 2020-02-11T15:24:09.000Z | 2021-12-08T12:24:59.000Z | septentrion/lib.py | peopledoc/septentrion | 7c91890f525878ed50ccb8be482d093b93ef77f2 | [
"MIT"
] | 124 | 2018-12-06T14:42:13.000Z | 2022-03-14T21:37:09.000Z | septentrion/lib.py | peopledoc/septentrion | 7c91890f525878ed50ccb8be482d093b93ef77f2 | [
"MIT"
] | 1 | 2020-02-11T15:24:14.000Z | 2020-02-11T15:24:14.000Z | import logging
from typing import Iterable
from septentrion import core, db, files, migration, style, versions
logger = logging.getLogger(__name__)
def initialize(settings_kwargs):
quiet = settings_kwargs.pop("quiet", False)
stylist = style.noop_stylist if quiet else style.stylist
settings = core.initialize(**settings_kwargs)
return {"settings": settings, "stylist": stylist}
def show_migrations(**settings_kwargs):
lib_kwargs = initialize(settings_kwargs)
core.describe_migration_plan(**lib_kwargs)
def migrate(**settings_kwargs):
lib_kwargs = initialize(settings_kwargs)
migration.migrate(**lib_kwargs)
def is_schema_initialized(**settings_kwargs):
lib_kwargs = initialize(settings_kwargs)
return db.is_schema_initialized(settings=lib_kwargs["settings"])
def build_migration_plan(**settings_kwargs):
lib_kwargs = initialize(settings_kwargs)
schema_version = core.get_best_schema_version(settings=lib_kwargs["settings"])
return core.build_migration_plan(
settings=lib_kwargs["settings"], schema_version=schema_version
)
def fake(version: str, **settings_kwargs):
lib_kwargs = initialize(settings_kwargs)
fake_version = versions.Version.from_string(version)
migration.create_fake_entries(version=fake_version, **lib_kwargs)
def load_fixtures(version: str, **settings_kwargs) -> None:
lib_kwargs = initialize(settings_kwargs)
init_version = versions.Version.from_string(version)
migration.load_fixtures(init_version=init_version, **lib_kwargs)
def get_known_versions(**settings_kwargs) -> Iterable[str]:
lib_kwargs = initialize(settings_kwargs)
known_versions = files.get_known_versions(settings=lib_kwargs["settings"])
return [version.original_string for version in known_versions]
| 32.196429 | 82 | 0.77371 |
95244e95d6ba3b806abed4478cf34bbe039320a9 | 1,153 | py | Python | compute/api/create_instance_test.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 4 | 2018-12-23T18:17:14.000Z | 2020-01-05T19:13:58.000Z | compute/api/create_instance_test.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 16 | 2019-06-15T00:02:56.000Z | 2021-03-25T23:22:38.000Z | compute/api/create_instance_test.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 4 | 2018-06-03T14:43:25.000Z | 2019-11-24T04:05:18.000Z | # Copyright 2015, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from gcp_devrel.testing.flaky import flaky
from create_instance import main
PROJECT = os.environ['GCLOUD_PROJECT']
BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
@flaky
def test_main(capsys):
main(
PROJECT,
BUCKET,
'us-central1-f',
'test-instance',
wait=False)
out, _ = capsys.readouterr()
expected_output = re.compile(
(r'Instances in project .* and zone us-central1-.* - test-instance'
r'.*Deleting instance.*done..$'),
re.DOTALL)
assert re.search(expected_output, out)
| 27.452381 | 75 | 0.702515 |
e8675f0ad03972825dcf990efa9bc8bddd65047f | 4,102 | py | Python | src/olympe/utils/format.py | bkbilly/olympe | 77cdb634d4221a6311369ddf3b68b7eb46b2b741 | [
"BSD-3-Clause"
] | null | null | null | src/olympe/utils/format.py | bkbilly/olympe | 77cdb634d4221a6311369ddf3b68b7eb46b2b741 | [
"BSD-3-Clause"
] | null | null | null | src/olympe/utils/format.py | bkbilly/olympe | 77cdb634d4221a6311369ddf3b68b7eb46b2b741 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (C) 2019-2021 Parrot Drones SAS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Parrot Company nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PARROT COMPANY BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import subprocess
def columns(strs, col_nb=None, aligns='<', vsep='', hsep=None):
"""
Format a collection of strings (strs) into multiple columns.
If the number of columns (col_nb) is unspecified, the current terminal width
is used to determine the maximum number of columns without line split that
it is possible to format.
@param strs: input list of string to format
@param col_nb: the number of column, the default depends on the input and
the current terminal size
@param aligns: the alignment for each column, defaults to left alignment
@param vsep: optional vertical column separator
@param hsep: optional horizontal row separator
"""
# pre-process function parameters
col_nb, cols_size, line_width = _columns_param(
strs, col_nb=col_nb, vsep=vsep)
if not aligns:
aligns = '<'
if len(aligns) == 1:
aligns = aligns * col_nb
elif len(aligns) < col_nb:
aligns += aligns[-1] * (col_nb - len(aligns))
if hsep is None:
hsep = '\n'
else:
hsep = '\n{}\n'.format(hsep * line_width)
# build the row format string
row_fmt = vsep.join(['{{:{}{}}}'.format(
align, cols_size[i]) for align, i in zip(aligns, list(range(col_nb)))])
# format each row of input
item = iter(strs)
rows = []
while True:
try:
rows += [
row_fmt.format(next(item), *(
next(item, "") for i in range(col_nb - 1)))]
except StopIteration:
break
# join the rows and return the formatted text
return hsep.join(rows)
def _term_width(default=200):
p = subprocess.Popen('stty size', stdout=subprocess.PIPE, shell=True)
p.wait()
if p.returncode == 0:
return int(p.stdout.read().split()[1])
else:
return default
def _columns_param(strs, col_nb, vsep):
max_width = _term_width()
col_nb_max = len(strs)
params = None
if col_nb is not None:
col_nb_candidates = [min(col_nb_max, col_nb)]
else:
col_nb_candidates = list(range(1, col_nb_max + 1))
for col_nb in col_nb_candidates:
cols_size = {
i: max(list(map(len, strs[i::col_nb]))) + 1 + len(vsep)
for i in range(col_nb)
}
line_width = sum(cols_size.values()) + (col_nb - 1) * len(vsep)
if params is not None and line_width > max_width:
break
if params is None or params[2] < line_width:
params = (col_nb, cols_size, line_width)
return params
| 36.954955 | 80 | 0.670161 |
d301d588d6b3ccbe6d394594bc26913888a21e6f | 156 | py | Python | ai/corrections/ml_d01/ex05.py | PoCFrance/security-pool-2018 | acabc082808ade8ceccc395736a337059c0650de | [
"MIT"
] | 8 | 2018-02-27T09:31:04.000Z | 2019-05-09T14:42:51.000Z | ai/corrections/ml_d01/ex05.py | PoCFrance/security-pool-2018 | acabc082808ade8ceccc395736a337059c0650de | [
"MIT"
] | null | null | null | ai/corrections/ml_d01/ex05.py | PoCFrance/security-pool-2018 | acabc082808ade8ceccc395736a337059c0650de | [
"MIT"
] | 2 | 2018-02-27T09:31:05.000Z | 2018-07-27T14:07:53.000Z | import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-10, 10, 30)
y = 1 / (1 + np.exp(-x))
z = y
plt.xlim(-10, 10)
plt.plot(x, y)
plt.show()
| 15.6 | 31 | 0.621795 |
8595f22701c3ef1b2feb3c211d27012b2d943501 | 13,370 | py | Python | config/settings/base.py | leonardoo/cb_backend | 3b92c12856961539fee55ac6c05adbfec4ee5784 | [
"MIT"
] | null | null | null | config/settings/base.py | leonardoo/cb_backend | 3b92c12856961539fee55ac6c05adbfec4ee5784 | [
"MIT"
] | null | null | null | config/settings/base.py | leonardoo/cb_backend | 3b92c12856961539fee55ac6c05adbfec4ee5784 | [
"MIT"
] | null | null | null | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# cb_backend/
APPS_DIR = ROOT_DIR / "cb_backend"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# https://docs.djangoproject.com/en/stable/ref/settings/#std:setting-DEFAULT_AUTO_FIELD
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"crispy_bootstrap5",
"allauth",
"allauth.account",
"allauth.socialaccount",
"django_celery_beat",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
]
LOCAL_APPS = [
"cb_backend.users",
# Your stuff: custom apps go here
"cb_backend.eye",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "cb_backend.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#dirs
"DIRS": [str(APPS_DIR / "templates")],
# https://docs.djangoproject.com/en/dev/ref/settings/#app-dirs
"APP_DIRS": True,
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"cb_backend.users.context_processors.allauth_settings",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap5"
CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap5"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND",
default="django.core.mail.backends.smtp.EmailBackend",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""leonardo orozco""", "leonardo-orozco@example.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# Celery
# ------------------------------------------------------------------------------
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env("CELERY_BROKER_URL")
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ["json"]
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = "json"
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERY_TASK_SOFT_TIME_LIMIT = 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#beat-scheduler
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "cb_backend.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "cb_backend.users.adapters.SocialAccountAdapter"
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ["compressor"]
STATICFILES_FINDERS += ["compressor.finders.CompressorFinder"]
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
| 42.444444 | 100 | 0.651085 |
ba1409c730c2bf926e4d8a2554b69400ba75caad | 3,815 | py | Python | segment.py | hakril/midap | 726c62b583192b8ebb4d217bd5d13b318df0bc0f | [
"BSD-3-Clause"
] | 1 | 2017-08-25T12:23:10.000Z | 2017-08-25T12:23:10.000Z | segment.py | hakril/midap | 726c62b583192b8ebb4d217bd5d13b318df0bc0f | [
"BSD-3-Clause"
] | null | null | null | segment.py | hakril/midap | 726c62b583192b8ebb4d217bd5d13b318df0bc0f | [
"BSD-3-Clause"
] | null | null | null | import elt
import idc
import idaapi
class IDASegment(elt.IDANamedSizedElt):
# Segment from any addr ? : yep
def __init__(self, addr):
start = idc.SegStart(addr)
end = idc.SegEnd(addr)
# TODO: when implementing segment relocation: be carrefull with self.addr going out of the new segment
super(IDASegment, self).__init__(start, end)
@property
def start(self):
return idc.SegStart(self.addr)
@property
def end(self):
return idc.SegEnd(self.addr)
# make start and end descriptor writable to move segment ? (seems dangerous)
def set_bound(self, startea=None, endea= None, flags=idc.SEGMOD_KEEP):
"""
Change segment boundaries
@param ea: any address in the segment
@param startea: new start address of the segment
@param endea: new end address of the segment
@param flags: combination of SEGMOD_... flags
@return: boolean success
"""
if startea is None:
startea = self.start
if endea is None:
endea = self.end
return idc.SetSegBounds(self.start, startea, endea, flags)
def get_name(self):
return idc.SegName(self.addr)
def set_name(self, value):
return idc.RenameSeg(self.addr, value)
name = property(get_name, set_name, None, "Name of the segment")
def get_class(self):
seg = idaapi.getseg(self.addr)
if not seg:
return None
return idaapi.get_segm_class()
def set_class(self, value):
return idc.SetSegClass(self.addr, value)
sclass = property(get_class, set_class, "class of the segment")
def get_type(self):
seg = idaapi.getseg(self.addr)
if not seg:
return None
return seg.type
def set_type(self, value):
return idc.SetSegmentType(self.addr, value)
type = property(get_type, set_type, "type of the segment")
def get_addressing(self):
seg = idaapi.getseg(self.start)
return seg.bitness
def set_addressing(self, value):
return idc.SetSegAddressing(self.start, value)
addressing_doc = """addressing bitness of the segment\n0: 16bit\n1: 32bit\n2: 64bit"""
addressing = property(get_addressing, set_addressing, None, addressing_doc)
def move(to, flags):
"""
Move the segment to a new address
This function moves all information to the new address
It fixes up address sensitive information in the kernel
The total effect is equal to reloading the segment to the target address
@param ea: any address within the segment to move
@param to: new segment start address
@param flags: combination MFS_... constants
@returns: MOVE_SEGM_... error code
MSF_SILENT # don't display a "please wait" box on the screen
MSF_NOFIX # don't call the loader to fix relocations
MSF_LDKEEP # keep the loader in the memory (optimization)
MSF_FIXONCE # valid for rebase_program(): call loader only once
MOVE_SEGM_OK # all ok
MOVE_SEGM_PARAM # The specified segment does not exist
MOVE_SEGM_ROOM # Not enough free room at the target address
MOVE_SEGM_IDP # IDP module forbids moving the segment
MOVE_SEGM_CHUNK # Too many chunks are defined, can't move
MOVE_SEGM_LOADER # The segment has been moved but the loader complained
MOVE_SEGM_ODD # Can't move segments by an odd number of bytes
"""
return idc.MoveSegm(self.start, to, flags)
| 33.173913 | 110 | 0.613893 |
ef0cfce2c5c2cc0028302538686c3c744d63853e | 749 | py | Python | soni_f110_ws/build/f110-skeletons-spring2020/simulator/racecar_simulator/catkin_generated/pkg.develspace.context.pc.py | 4-legends/Paresh-Soni-F110-2020 | 24b8d6ee654d4b0f78dc2bf643f0f850e5c0ca85 | [
"MIT"
] | null | null | null | soni_f110_ws/build/f110-skeletons-spring2020/simulator/racecar_simulator/catkin_generated/pkg.develspace.context.pc.py | 4-legends/Paresh-Soni-F110-2020 | 24b8d6ee654d4b0f78dc2bf643f0f850e5c0ca85 | [
"MIT"
] | null | null | null | soni_f110_ws/build/f110-skeletons-spring2020/simulator/racecar_simulator/catkin_generated/pkg.develspace.context.pc.py | 4-legends/Paresh-Soni-F110-2020 | 24b8d6ee654d4b0f78dc2bf643f0f850e5c0ca85 | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/graspinglab/carla-ros-bridge/Paresh-Soni-F110-2020/soni_f110_ws/src/f110-skeletons-spring2020/simulator/racecar_simulator/include".split(';') if "/home/graspinglab/carla-ros-bridge/Paresh-Soni-F110-2020/soni_f110_ws/src/f110-skeletons-spring2020/simulator/racecar_simulator/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lracecar_simulator".split(';') if "-lracecar_simulator" != "" else []
PROJECT_NAME = "racecar_simulator"
PROJECT_SPACE_DIR = "/home/graspinglab/carla-ros-bridge/Paresh-Soni-F110-2020/soni_f110_ws/devel"
PROJECT_VERSION = "0.0.0"
| 83.222222 | 337 | 0.795728 |
21a04012fc37b26cc06fefeb33d9bb0d1706fc07 | 571 | py | Python | pypy/module/__pypy__/bytebuffer.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | pypy/module/__pypy__/bytebuffer.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | pypy/module/__pypy__/bytebuffer.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | #
# A convenient read-write buffer. Located here for want of a better place.
#
from pypy.interpreter.buffer import RWBuffer
from pypy.interpreter.gateway import ObjSpace
class ByteBuffer(RWBuffer):
def __init__(self, len):
self.data = ['\x00'] * len
def getlength(self):
return len(self.data)
def getitem(self, index):
return self.data[index]
def setitem(self, index, char):
self.data[index] = char
def bytebuffer(space, length):
return space.wrap(ByteBuffer(length))
bytebuffer.unwrap_spec = [ObjSpace, int]
| 21.148148 | 75 | 0.683012 |
39f24a6307619d5874e1acf788d4e6b6d05b2e00 | 30,356 | py | Python | frappe/utils/data.py | jch4nni/frappe | 64387632ff64ff0c3e2fa5e793d1e7b3b6e2c27e | [
"MIT"
] | null | null | null | frappe/utils/data.py | jch4nni/frappe | 64387632ff64ff0c3e2fa5e793d1e7b3b6e2c27e | [
"MIT"
] | 4 | 2019-11-10T20:43:56.000Z | 2021-04-22T13:52:53.000Z | frappe/utils/data.py | jch4nni/frappe | 64387632ff64ff0c3e2fa5e793d1e7b3b6e2c27e | [
"MIT"
] | 1 | 2021-03-10T10:58:28.000Z | 2021-03-10T10:58:28.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
# IMPORTANT: only import safe functions as this module will be included in jinja environment
import frappe
import subprocess
import operator
import re, datetime, math, time
import babel.dates
from babel.core import UnknownLocaleError
from dateutil import parser
from num2words import num2words
from six.moves import html_parser as HTMLParser
from six.moves.urllib.parse import quote, urljoin
from html2text import html2text
from markdown2 import markdown, MarkdownError
from six import iteritems, text_type, string_types, integer_types
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S.%f"
DATETIME_FORMAT = DATE_FORMAT + " " + TIME_FORMAT
# datetime functions
def getdate(string_date=None):
"""
Converts string date (yyyy-mm-dd) to datetime.date object
"""
if not string_date:
return get_datetime().date()
if isinstance(string_date, datetime.datetime):
return string_date.date()
elif isinstance(string_date, datetime.date):
return string_date
# dateutil parser does not agree with dates like 0001-01-01
if not string_date or string_date=="0001-01-01":
return None
return parser.parse(string_date).date()
def get_datetime(datetime_str=None):
if not datetime_str:
return now_datetime()
if isinstance(datetime_str, (datetime.datetime, datetime.timedelta)):
return datetime_str
elif isinstance(datetime_str, (list, tuple)):
return datetime.datetime(datetime_str)
elif isinstance(datetime_str, datetime.date):
return datetime.datetime.combine(datetime_str, datetime.time())
# dateutil parser does not agree with dates like "0001-01-01" or "0000-00-00"
if not datetime_str or (datetime_str or "").startswith(("0001-01-01", "0000-00-00")):
return None
try:
return datetime.datetime.strptime(datetime_str, DATETIME_FORMAT)
except ValueError:
return parser.parse(datetime_str)
def to_timedelta(time_str):
if isinstance(time_str, string_types):
t = parser.parse(time_str)
return datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second, microseconds=t.microsecond)
else:
return time_str
def add_to_date(date, years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, as_string=False, as_datetime=False):
"""Adds `days` to the given date"""
from dateutil.relativedelta import relativedelta
if date==None:
date = now_datetime()
if hours:
as_datetime = True
if isinstance(date, string_types):
as_string = True
if " " in date:
as_datetime = True
date = parser.parse(date)
date = date + relativedelta(years=years, months=months, weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds)
if as_string:
if as_datetime:
return date.strftime(DATETIME_FORMAT)
else:
return date.strftime(DATE_FORMAT)
else:
return date
def add_days(date, days):
return add_to_date(date, days=days)
def add_months(date, months):
return add_to_date(date, months=months)
def add_years(date, years):
return add_to_date(date, years=years)
def date_diff(string_ed_date, string_st_date):
return (getdate(string_ed_date) - getdate(string_st_date)).days
def month_diff(string_ed_date, string_st_date):
ed_date = getdate(string_ed_date)
st_date = getdate(string_st_date)
return (ed_date.year - st_date.year) * 12 + ed_date.month - st_date.month + 1
def time_diff(string_ed_date, string_st_date):
return get_datetime(string_ed_date) - get_datetime(string_st_date)
def time_diff_in_seconds(string_ed_date, string_st_date):
return time_diff(string_ed_date, string_st_date).total_seconds()
def time_diff_in_hours(string_ed_date, string_st_date):
return round(float(time_diff(string_ed_date, string_st_date).total_seconds()) / 3600, 6)
def now_datetime():
dt = convert_utc_to_user_timezone(datetime.datetime.utcnow())
return dt.replace(tzinfo=None)
def get_timestamp(date):
return time.mktime(getdate(date).timetuple())
def get_eta(from_time, percent_complete):
diff = time_diff(now_datetime(), from_time).total_seconds()
return str(datetime.timedelta(seconds=(100 - percent_complete) / percent_complete * diff))
def _get_time_zone():
return frappe.db.get_system_setting('time_zone') or 'Asia/Kolkata' # Default to India ?!
def get_time_zone():
if frappe.local.flags.in_test:
return _get_time_zone()
return frappe.cache().get_value("time_zone", _get_time_zone)
def convert_utc_to_user_timezone(utc_timestamp):
from pytz import timezone, UnknownTimeZoneError
utcnow = timezone('UTC').localize(utc_timestamp)
try:
return utcnow.astimezone(timezone(get_time_zone()))
except UnknownTimeZoneError:
return utcnow
def now():
"""return current datetime as yyyy-mm-dd hh:mm:ss"""
if frappe.flags.current_date:
return getdate(frappe.flags.current_date).strftime(DATE_FORMAT) + " " + \
now_datetime().strftime(TIME_FORMAT)
else:
return now_datetime().strftime(DATETIME_FORMAT)
def nowdate():
"""return current date as yyyy-mm-dd"""
return now_datetime().strftime(DATE_FORMAT)
def today():
return nowdate()
def nowtime():
"""return current time in hh:mm"""
return now_datetime().strftime(TIME_FORMAT)
def get_first_day(dt, d_years=0, d_months=0):
"""
Returns the first day of the month for the date specified by date object
Also adds `d_years` and `d_months` if specified
"""
dt = getdate(dt)
# d_years, d_months are "deltas" to apply to dt
overflow_years, month = divmod(dt.month + d_months - 1, 12)
year = dt.year + d_years + overflow_years
return datetime.date(year, month + 1, 1)
def get_first_day_of_week(dt):
return dt - datetime.timedelta(days=dt.weekday())
def get_last_day(dt):
"""
Returns last day of the month using:
`get_first_day(dt, 0, 1) + datetime.timedelta(-1)`
"""
return get_first_day(dt, 0, 1) + datetime.timedelta(-1)
def get_time(time_str):
if isinstance(time_str, datetime.datetime):
return time_str.time()
elif isinstance(time_str, datetime.time):
return time_str
else:
if isinstance(time_str, datetime.timedelta):
time_str = str(time_str)
return parser.parse(time_str).time()
def get_datetime_str(datetime_obj):
if isinstance(datetime_obj, string_types):
datetime_obj = get_datetime(datetime_obj)
return datetime_obj.strftime(DATETIME_FORMAT)
def get_user_format():
if getattr(frappe.local, "user_format", None) is None:
frappe.local.user_format = frappe.db.get_default("date_format")
return frappe.local.user_format or "yyyy-mm-dd"
def formatdate(string_date=None, format_string=None):
"""
Converts the given string date to :data:`user_format`
User format specified in defaults
Examples:
* dd-mm-yyyy
* mm-dd-yyyy
* dd/mm/yyyy
"""
if not string_date:
return ''
date = getdate(string_date)
if not format_string:
format_string = get_user_format()
format_string = format_string.replace("mm", "MM")
try:
formatted_date = babel.dates.format_date(date, format_string, locale=(frappe.local.lang or "").replace("-", "_"))
except UnknownLocaleError:
format_string = format_string.replace("MM", "%m").replace("dd", "%d").replace("yyyy", "%Y")
formatted_date = date.strftime(format_string)
return formatted_date
def format_time(txt):
try:
formatted_time = babel.dates.format_time(get_time(txt), locale=(frappe.local.lang or "").replace("-", "_"))
except UnknownLocaleError:
formatted_time = get_time(txt).strftime("%H:%M:%S")
return formatted_time
def format_datetime(datetime_string, format_string=None):
if not datetime_string:
return
datetime = get_datetime(datetime_string)
if not format_string:
format_string = get_user_format().replace("mm", "MM") + " HH:mm:ss"
try:
formatted_datetime = babel.dates.format_datetime(datetime, format_string, locale=(frappe.local.lang or "").replace("-", "_"))
except UnknownLocaleError:
formatted_datetime = datetime.strftime('%Y-%m-%d %H:%M:%S')
return formatted_datetime
def get_weekdays():
return ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
def get_weekday(datetime=None):
if not datetime:
datetime = now_datetime()
weekdays = get_weekdays()
return weekdays[datetime.weekday()]
def global_date_format(date, format="long"):
"""returns localized date in the form of January 1, 2012"""
date = getdate(date)
formatted_date = babel.dates.format_date(date, locale=(frappe.local.lang or "en").replace("-", "_"), format=format)
return formatted_date
def has_common(l1, l2):
"""Returns truthy value if there are common elements in lists l1 and l2"""
return set(l1) & set(l2)
def flt(s, precision=None):
"""Convert to float (ignore commas)"""
if isinstance(s, string_types):
s = s.replace(',','')
try:
num = float(s)
if precision is not None:
num = rounded(num, precision)
except Exception:
num = 0
return num
def get_wkhtmltopdf_version():
wkhtmltopdf_version = frappe.cache().hget("wkhtmltopdf_version", None)
if not wkhtmltopdf_version:
try:
res = subprocess.check_output(["wkhtmltopdf", "--version"])
wkhtmltopdf_version = res.decode('utf-8').split(" ")[1]
frappe.cache().hset("wkhtmltopdf_version", None, wkhtmltopdf_version)
except Exception:
pass
return (wkhtmltopdf_version or '0')
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
def floor(s):
"""
A number representing the largest integer less than or equal to the specified number
Parameters
----------
s : int or str or Decimal object
The mathematical value to be floored
Returns
-------
int
number representing the largest integer less than or equal to the specified number
"""
try: num = cint(math.floor(flt(s)))
except: num = 0
return num
def ceil(s):
"""
The smallest integer greater than or equal to the given number
Parameters
----------
s : int or str or Decimal object
The mathematical value to be ceiled
Returns
-------
int
smallest integer greater than or equal to the given number
"""
try: num = cint(math.ceil(flt(s)))
except: num = 0
return num
def cstr(s, encoding='utf-8'):
return frappe.as_unicode(s, encoding)
def rounded(num, precision=0):
"""round method for round halfs to nearest even algorithm aka banker's rounding - compatible with python3"""
precision = cint(precision)
multiplier = 10 ** precision
# avoid rounding errors
num = round(num * multiplier if precision else num, 8)
floor = math.floor(num)
decimal_part = num - floor
if not precision and decimal_part == 0.5:
num = floor if (floor % 2 == 0) else floor + 1
else:
if decimal_part == 0.5:
num = floor + 1
else:
num = round(num)
return (num / multiplier) if precision else num
def remainder(numerator, denominator, precision=2):
precision = cint(precision)
multiplier = 10 ** precision
if precision:
_remainder = ((numerator * multiplier) % (denominator * multiplier)) / multiplier
else:
_remainder = numerator % denominator
return flt(_remainder, precision);
def safe_div(numerator, denominator, precision=2):
"""
SafeMath division that returns zero when divided by zero.
"""
precision = cint(precision)
if denominator == 0:
_res = 0.0
else:
_res = float(numerator) / denominator
return flt(_res, precision)
def round_based_on_smallest_currency_fraction(value, currency, precision=2):
smallest_currency_fraction_value = flt(frappe.db.get_value("Currency",
currency, "smallest_currency_fraction_value", cache=True))
if smallest_currency_fraction_value:
remainder_val = remainder(value, smallest_currency_fraction_value, precision)
if remainder_val > (smallest_currency_fraction_value / 2):
value += smallest_currency_fraction_value - remainder_val
else:
value -= remainder_val
else:
value = rounded(value)
return flt(value, precision)
def encode(obj, encoding="utf-8"):
if isinstance(obj, list):
out = []
for o in obj:
if isinstance(o, text_type):
out.append(o.encode(encoding))
else:
out.append(o)
return out
elif isinstance(obj, text_type):
return obj.encode(encoding)
else:
return obj
def parse_val(v):
"""Converts to simple datatypes from SQL query results"""
if isinstance(v, (datetime.date, datetime.datetime)):
v = text_type(v)
elif isinstance(v, datetime.timedelta):
v = ":".join(text_type(v).split(":")[:2])
elif isinstance(v, integer_types):
v = int(v)
return v
def fmt_money(amount, precision=None, currency=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = frappe.db.get_default("number_format") or "#,###.##"
if precision is None:
precision = cint(frappe.db.get_default('currency_precision')) or None
decimal_str, comma_str, number_format_precision = get_number_format_info(number_format)
if precision is None:
precision = number_format_precision
# 40,000 -> 40,000.00
# 40,000.00000 -> 40,000.00
# 40,000.23000 -> 40,000.23
if isinstance(amount, string_types):
amount = flt(amount, precision)
if decimal_str:
decimals_after = str(round(amount % 1, precision))
parts = decimals_after.split('.')
parts = parts[1] if len(parts) > 1 else parts[0]
decimals = parts
if precision > 2:
if len(decimals) < 3:
if currency:
fraction = frappe.db.get_value("Currency", currency, "fraction_units", cache=True) or 100
precision = len(cstr(fraction)) - 1
else:
precision = number_format_precision
elif len(decimals) < precision:
precision = len(decimals)
amount = '%.*f' % (precision, round(flt(amount), precision))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + ((precision and decimal_str) and (decimal_str + decimals) or "")
if amount != '0':
amount = minus + amount
if currency and frappe.defaults.get_global_default("hide_currency_symbol") != "Yes":
symbol = frappe.db.get_value("Currency", currency, "symbol", cache=True) or currency
amount = symbol + " " + amount
return amount
number_format_info = {
"#,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"# ###,##": (",", " ", 2),
"#'###.##": (".", "'", 2),
"#, ###.##": (".", ", ", 2),
"#,##,###.##": (".", ",", 2),
"#,###.###": (".", ",", 3),
"#.###": ("", ".", 0),
"#,###": ("", ",", 0)
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
#
# convert currency to words
#
def money_in_words(number, main_currency = None, fraction_currency=None):
"""
Returns string in words with currency and fraction currency.
"""
from frappe.utils import get_defaults
_ = frappe._
try:
# note: `flt` returns 0 for invalid input and we don't want that
number = float(number)
except ValueError:
return ""
number = flt(number)
if number < 0:
return ""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = frappe.db.get_value("Currency", main_currency, "fraction", cache=True) or _("Cent")
number_format = frappe.db.get_value("Currency", main_currency, "number_format", cache=True) or \
frappe.db.get_default("number_format") or "#,###.##"
fraction_length = get_number_format_info(number_format)[2]
n = "%.{0}f".format(fraction_length) % number
numbers = n.split('.')
main, fraction = numbers if len(numbers) > 1 else [n, '00']
if len(fraction) < fraction_length:
zeros = '0' * (fraction_length - len(fraction))
fraction += zeros
in_million = True
if number_format == "#,##,###.##": in_million = False
# 0.00
if main == '0' and fraction in ['00', '000']:
out = "{0} {1}".format(main_currency, _('Zero'))
# 0.XX
elif main == '0':
out = _(in_words(fraction, in_million).title()) + ' ' + fraction_currency
else:
out = main_currency + ' ' + _(in_words(main, in_million).title())
if cint(fraction):
out = out + ' ' + _('and') + ' ' + _(in_words(fraction, in_million).title()) + ' ' + fraction_currency
return out + ' ' + _('only.')
#
# convert number to words
#
def in_words(integer, in_million=True):
"""
Returns string in words for the given integer.
"""
locale = 'en_IN' if not in_million else frappe.local.lang
integer = int(integer)
try:
ret = num2words(integer, lang=locale)
except NotImplementedError:
ret = num2words(integer, lang='en')
except OverflowError:
ret = num2words(integer, lang='en')
return ret.replace('-', ' ')
def is_html(text):
if not isinstance(text, frappe.string_types):
return False
return re.search('<[^>]+>', text)
def is_image(filepath):
from mimetypes import guess_type
# filepath can be https://example.com/bed.jpg?v=129
filepath = filepath.split('?')[0]
return (guess_type(filepath)[0] or "").startswith("image/")
# from Jinja2 code
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
def strip_html(text):
"""removes anything enclosed in and including <>"""
return _striptags_re.sub("", text)
def escape_html(text):
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c,c) for c in text)
def pretty_date(iso_datetime):
"""
Takes an ISO time and returns a string representing how
long ago the date represents.
Ported from PrettyDate by John Resig
"""
from frappe import _
if not iso_datetime: return ''
import math
if isinstance(iso_datetime, string_types):
iso_datetime = datetime.datetime.strptime(iso_datetime, DATETIME_FORMAT)
now_dt = datetime.datetime.strptime(now(), DATETIME_FORMAT)
dt_diff = now_dt - iso_datetime
# available only in python 2.7+
# dt_diff_seconds = dt_diff.total_seconds()
dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds
dt_diff_days = math.floor(dt_diff_seconds / 86400.0)
# differnt cases
if dt_diff_seconds < 60.0:
return _('just now')
elif dt_diff_seconds < 120.0:
return _('1 minute ago')
elif dt_diff_seconds < 3600.0:
return _('{0} minutes ago').format(cint(math.floor(dt_diff_seconds / 60.0)))
elif dt_diff_seconds < 7200.0:
return _('1 hour ago')
elif dt_diff_seconds < 86400.0:
return _('{0} hours ago').format(cint(math.floor(dt_diff_seconds / 3600.0)))
elif dt_diff_days == 1.0:
return _('Yesterday')
elif dt_diff_days < 7.0:
return _('{0} days ago').format(cint(dt_diff_days))
elif dt_diff_days < 12:
return _('1 week ago')
elif dt_diff_days < 31.0:
return _('{0} weeks ago').format(cint(math.ceil(dt_diff_days / 7.0)))
elif dt_diff_days < 46:
return _('1 month ago')
elif dt_diff_days < 365.0:
return _('{0} months ago').format(cint(math.ceil(dt_diff_days / 30.0)))
elif dt_diff_days < 550.0:
return _('1 year ago')
else:
return '{0} years ago'.format(cint(math.floor(dt_diff_days / 365.0)))
def comma_or(some_list, add_quotes=True):
return comma_sep(some_list, frappe._("{0} or {1}"), add_quotes)
def comma_and(some_list ,add_quotes=True):
return comma_sep(some_list, frappe._("{0} and {1}"), add_quotes)
def comma_sep(some_list, pattern, add_quotes=True):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [text_type(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["'%s'" % s for s in some_list] if add_quotes else ["%s" % s for s in some_list]
return pattern.format(", ".join(frappe._(s) for s in some_list[:-1]), some_list[-1])
else:
return some_list
def new_line_sep(some_list):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [text_type(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["%s" % s for s in some_list]
return format("\n ".join(some_list))
else:
return some_list
def filter_strip_join(some_list, sep):
"""given a list, filter None values, strip spaces and join"""
return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list)))
def get_url(uri=None, full_address=False):
"""get app url from request"""
host_name = frappe.local.conf.host_name or frappe.local.conf.hostname
if uri and (uri.startswith("http://") or uri.startswith("https://")):
return uri
if not host_name:
request_host_name = get_host_name_from_request()
if request_host_name:
host_name = request_host_name
elif frappe.local.site:
protocol = 'http://'
if frappe.local.conf.ssl_certificate:
protocol = 'https://'
elif frappe.local.conf.wildcard:
domain = frappe.local.conf.wildcard.get('domain')
if domain and frappe.local.site.endswith(domain) and frappe.local.conf.wildcard.get('ssl_certificate'):
protocol = 'https://'
host_name = protocol + frappe.local.site
else:
host_name = frappe.db.get_value("Website Settings", "Website Settings",
"subdomain")
if not host_name:
host_name = "http://localhost"
if host_name and not (host_name.startswith("http://") or host_name.startswith("https://")):
host_name = "http://" + host_name
if not uri and full_address:
uri = frappe.get_request_header("REQUEST_URI", "")
port = frappe.conf.http_port or frappe.conf.webserver_port
if not (frappe.conf.restart_supervisor_on_update or frappe.conf.restart_systemd_on_update) and host_name and not url_contains_port(host_name) and port:
host_name = host_name + ':' + str(port)
url = urljoin(host_name, uri) if uri else host_name
return url
def get_host_name_from_request():
if hasattr(frappe.local, "request") and frappe.local.request and frappe.local.request.host:
protocol = 'https://' if 'https' == frappe.get_request_header('X-Forwarded-Proto', "") else 'http://'
return protocol + frappe.local.request.host
def url_contains_port(url):
parts = url.split(':')
return len(parts) > 2
def get_host_name():
return get_url().rsplit("//", 1)[-1]
def get_link_to_form(doctype, name, label=None):
if not label: label = name
return """<a href="{0}">{1}</a>""".format(get_url_to_form(doctype, name), label)
def get_link_to_report(name, label=None, report_type=None, doctype=None, filters=None):
if not label: label = name
if filters:
conditions = []
for k,v in iteritems(filters):
if isinstance(v, list):
for value in v:
conditions.append(str(k)+'='+'["'+str(value[0]+'"'+','+'"'+str(value[1])+'"]'))
else:
conditions.append(str(k)+"="+str(v))
filters = "&".join(conditions)
return """<a href='{0}'>{1}</a>""".format(get_url_to_report_with_filters(name, filters, report_type, doctype), label)
else:
return """<a href='{0}'>{1}</a>""".format(get_url_to_report(name, report_type, doctype), label)
def get_absolute_url(doctype, name):
return "desk#Form/{0}/{1}".format(quoted(doctype), quoted(name))
def get_url_to_form(doctype, name):
return get_url(uri = "desk#Form/{0}/{1}".format(quoted(doctype), quoted(name)))
def get_url_to_list(doctype):
return get_url(uri = "desk#List/{0}".format(quoted(doctype)))
def get_url_to_report(name, report_type = None, doctype = None):
if report_type == "Report Builder":
return get_url(uri = "desk#Report/{0}/{1}".format(quoted(doctype), quoted(name)))
else:
return get_url(uri = "desk#query-report/{0}".format(quoted(name)))
def get_url_to_report_with_filters(name, filters, report_type = None, doctype = None):
if report_type == "Report Builder":
return get_url(uri = "desk#Report/{0}?{1}".format(quoted(doctype), filters))
else:
return get_url(uri = "desk#query-report/{0}?{1}".format(quoted(name), filters))
operator_map = {
# startswith
"^": lambda a, b: (a or "").startswith(b),
# in or not in a list
"in": lambda a, b: operator.contains(b, a),
"not in": lambda a, b: not operator.contains(b, a),
# comparison operators
"=": lambda a, b: operator.eq(a, b),
"!=": lambda a, b: operator.ne(a, b),
">": lambda a, b: operator.gt(a, b),
"<": lambda a, b: operator.lt(a, b),
">=": lambda a, b: operator.ge(a, b),
"<=": lambda a, b: operator.le(a, b),
"not None": lambda a, b: a and True or False,
"None": lambda a, b: (not a) and True or False
}
def evaluate_filters(doc, filters):
'''Returns true if doc matches filters'''
if isinstance(filters, dict):
for key, value in iteritems(filters):
f = get_filter(None, {key:value})
if not compare(doc.get(f.fieldname), f.operator, f.value):
return False
elif isinstance(filters, (list, tuple)):
for d in filters:
f = get_filter(None, d)
if not compare(doc.get(f.fieldname), f.operator, f.value):
return False
return True
def compare(val1, condition, val2):
ret = False
if condition in operator_map:
ret = operator_map[condition](val1, val2)
return ret
def get_filter(doctype, f):
"""Returns a _dict like
{
"doctype":
"fieldname":
"operator":
"value":
}
"""
from frappe.model import default_fields, optional_fields
if isinstance(f, dict):
key, value = next(iter(f.items()))
f = make_filter_tuple(doctype, key, value)
if not isinstance(f, (list, tuple)):
frappe.throw(frappe._("Filter must be a tuple or list (in a list)"))
if len(f) == 3:
f = (doctype, f[0], f[1], f[2])
elif len(f) > 4:
f = f[0:4]
elif len(f) != 4:
frappe.throw(frappe._("Filter must have 4 values (doctype, fieldname, operator, value): {0}").format(str(f)))
f = frappe._dict(doctype=f[0], fieldname=f[1], operator=f[2], value=f[3])
sanitize_column(f.fieldname)
if not f.operator:
# if operator is missing
f.operator = "="
valid_operators = ("=", "!=", ">", "<", ">=", "<=", "like", "not like", "in", "not in", "is",
"between", "descendants of", "ancestors of", "not descendants of", "not ancestors of", "previous", "next")
if f.operator.lower() not in valid_operators:
frappe.throw(frappe._("Operator must be one of {0}").format(", ".join(valid_operators)))
if f.doctype and (f.fieldname not in default_fields + optional_fields):
# verify fieldname belongs to the doctype
meta = frappe.get_meta(f.doctype)
if not meta.has_field(f.fieldname):
# try and match the doctype name from child tables
for df in meta.get_table_fields():
if frappe.get_meta(df.options).has_field(f.fieldname):
f.doctype = df.options
break
return f
def make_filter_tuple(doctype, key, value):
'''return a filter tuple like [doctype, key, operator, value]'''
if isinstance(value, (list, tuple)):
return [doctype, key, value[0], value[1]]
else:
return [doctype, key, "=", value]
def make_filter_dict(filters):
'''convert this [[doctype, key, operator, value], ..]
to this { key: (operator, value), .. }
'''
_filter = frappe._dict()
for f in filters:
_filter[f[1]] = (f[2], f[3])
return _filter
def sanitize_column(column_name):
from frappe import _
regex = re.compile("^.*[,'();].*")
blacklisted_keywords = ['select', 'create', 'insert', 'delete', 'drop', 'update', 'case', 'and', 'or']
def _raise_exception():
frappe.throw(_("Invalid field name {0}").format(column_name), frappe.DataError)
if 'ifnull' in column_name:
if regex.match(column_name):
# to avoid and, or
if any(' {0} '.format(keyword) in column_name.split() for keyword in blacklisted_keywords):
_raise_exception()
# to avoid select, delete, drop, update and case
elif any(keyword in column_name.split() for keyword in blacklisted_keywords):
_raise_exception()
elif regex.match(column_name):
_raise_exception()
def scrub_urls(html):
html = expand_relative_urls(html)
# encoding should be responsibility of the composer
# html = quote_urls(html)
return html
def expand_relative_urls(html):
# expand relative urls
url = get_url()
if url.endswith("/"): url = url[:-1]
def _expand_relative_urls(match):
to_expand = list(match.groups())
if not to_expand[2].startswith('mailto') and not to_expand[2].startswith('data:'):
if not to_expand[2].startswith("/"):
to_expand[2] = "/" + to_expand[2]
to_expand.insert(2, url)
if 'url' in to_expand[0] and to_expand[1].startswith('(') and to_expand[-1].endswith(')'):
# background-image: url('/assets/...') - workaround for wkhtmltopdf print-media-type
to_expand.append(' !important')
return "".join(to_expand)
html = re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
# background-image: url('/assets/...')
html = re.sub('(:[\s]?url)(\([\'"]?)((?!http)[^\'" >]+)([\'"]?\))', _expand_relative_urls, html)
return html
def quoted(url):
return cstr(quote(encode(url), safe=b"~@#$&()*!+=:;,.?/'"))
def quote_urls(html):
def _quote_url(match):
groups = list(match.groups())
groups[2] = quoted(groups[2])
return "".join(groups)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?:http)[^\'">]+)([\'"]?)',
_quote_url, html)
def unique(seq):
"""use this instead of list(set()) to preserve order of the original list.
Thanks to Stackoverflow: http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x)) ]
def strip(val, chars=None):
# \ufeff is no-width-break, \u200b is no-width-space
return (val or "").replace("\ufeff", "").replace("\u200b", "").strip(chars)
def to_markdown(html):
text = None
try:
text = html2text(html or '')
except HTMLParser.HTMLParseError:
pass
return text
def md_to_html(markdown_text):
extras = {
'fenced-code-blocks': None,
'tables': None,
'header-ids': None,
'highlightjs-lang': None,
'html-classes': {
'table': 'table table-bordered',
'img': 'screenshot'
}
}
html = None
try:
html = markdown(markdown_text or '', extras=extras)
except MarkdownError:
pass
return html
def get_source_value(source, key):
'''Get value from source (object or dict) based on key'''
if isinstance(source, dict):
return source.get(key)
else:
return getattr(source, key)
def is_subset(list_a, list_b):
'''Returns whether list_a is a subset of list_b'''
return len(list(set(list_a) & set(list_b))) == len(list_a)
| 28.800759 | 152 | 0.696403 |
a6d60b6565129b8838a86e98b5a368f656a6a31b | 1,292 | py | Python | test/functional/p2p_mempool.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | test/functional/p2p_mempool.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | test/functional/p2p_mempool.py | cryptomiles/cryptomiles | d3815eaf7716fbca9459f4162ae7ba4714298d27 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The Cryptomiles Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import CryptomilesTestFramework
from test_framework.util import *
class P2PMempoolTests(CryptomilesTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 33.128205 | 75 | 0.712074 |
a84e7e02c60daff2c50bf1eb09781365997c1468 | 13,181 | py | Python | tf_agents/experimental/examples/ppo/train_eval_lib.py | ngroves08/agents | cdb7a94124a1bc756217ad868b1aa49c11dc26bf | [
"Apache-2.0"
] | 1 | 2021-04-19T02:28:24.000Z | 2021-04-19T02:28:24.000Z | tf_agents/experimental/examples/ppo/train_eval_lib.py | ngroves08/agents | cdb7a94124a1bc756217ad868b1aa49c11dc26bf | [
"Apache-2.0"
] | null | null | null | tf_agents/experimental/examples/ppo/train_eval_lib.py | ngroves08/agents | cdb7a94124a1bc756217ad868b1aa49c11dc26bf | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Train and Eval PPOClipAgent in the Mujoco environments.
All hyperparameters come from the PPO paper
https://arxiv.org/abs/1707.06347.pdf
"""
import os
from absl import logging
import gin
import reverb
import tensorflow.compat.v2 as tf
from tf_agents.agents.ppo import ppo_actor_network
from tf_agents.agents.ppo import ppo_clip_agent
from tf_agents.environments import suite_mujoco
from tf_agents.metrics import py_metrics
from tf_agents.networks import value_network
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import ppo_learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
@gin.configurable
def train_eval(
root_dir,
env_name='HalfCheetah-v2',
# Training params
num_iterations=1600,
actor_fc_layers=(64, 64),
value_fc_layers=(64, 64),
learning_rate=3e-4,
collect_sequence_length=2048,
minibatch_size=64,
num_epochs=10,
# Agent params
importance_ratio_clipping=0.2,
lambda_value=0.95,
discount_factor=0.99,
entropy_regularization=0.,
value_pred_loss_coef=0.5,
use_gae=True,
use_td_lambda_return=True,
gradient_clipping=0.5,
value_clipping=None,
# Replay params
reverb_port=None,
replay_capacity=10000,
# Others
policy_save_interval=5000,
summary_interval=1000,
eval_interval=10000,
eval_episodes=100,
debug_summaries=False,
summarize_grads_and_vars=False):
"""Trains and evaluates PPO (Importance Ratio Clipping).
Args:
root_dir: Main directory path where checkpoints, saved_models, and summaries
will be written to.
env_name: Name for the Mujoco environment to load.
num_iterations: The number of iterations to perform collection and training.
actor_fc_layers: List of fully_connected parameters for the actor network,
where each item is the number of units in the layer.
value_fc_layers: : List of fully_connected parameters for the value network,
where each item is the number of units in the layer.
learning_rate: Learning rate used on the Adam optimizer.
collect_sequence_length: Number of steps to take in each collect run.
minibatch_size: Number of elements in each mini batch. If `None`, the entire
collected sequence will be treated as one batch.
num_epochs: Number of iterations to repeat over all collected data per data
collection step. (Schulman,2017) sets this to 10 for Mujoco, 15 for
Roboschool and 3 for Atari.
importance_ratio_clipping: Epsilon in clipped, surrogate PPO objective. For
more detail, see explanation at the top of the doc.
lambda_value: Lambda parameter for TD-lambda computation.
discount_factor: Discount factor for return computation. Default to `0.99`
which is the value used for all environments from (Schulman, 2017).
entropy_regularization: Coefficient for entropy regularization loss term.
Default to `0.0` because no entropy bonus was used in (Schulman, 2017).
value_pred_loss_coef: Multiplier for value prediction loss to balance with
policy gradient loss. Default to `0.5`, which was used for all
environments in the OpenAI baseline implementation. This parameters is
irrelevant unless you are sharing part of actor_net and value_net. In that
case, you would want to tune this coeeficient, whose value depends on the
network architecture of your choice.
use_gae: If True (default False), uses generalized advantage estimation for
computing per-timestep advantage. Else, just subtracts value predictions
from empirical return.
use_td_lambda_return: If True (default False), uses td_lambda_return for
training value function; here: `td_lambda_return = gae_advantage +
value_predictions`. `use_gae` must be set to `True` as well to enable TD
-lambda returns. If `use_td_lambda_return` is set to True while
`use_gae` is False, the empirical return will be used and a warning will
be logged.
gradient_clipping: Norm length to clip gradients.
value_clipping: Difference between new and old value predictions are clipped
to this threshold. Value clipping could be helpful when training
very deep networks. Default: no clipping.
reverb_port: Port for reverb server, if None, use a randomly chosen unused
port.
replay_capacity: The maximum number of elements for the replay buffer. Items
will be wasted if this is smalled than collect_sequence_length.
policy_save_interval: How often, in train_steps, the policy will be saved.
summary_interval: How often to write data into Tensorboard.
eval_interval: How often to run evaluation, in train_steps.
eval_episodes: Number of episodes to evaluate over.
debug_summaries: Boolean for whether to gather debug summaries.
summarize_grads_and_vars: If true, gradient summaries will be written.
"""
collect_env = suite_mujoco.load(env_name)
eval_env = suite_mujoco.load(env_name)
num_environments = 1
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(collect_env))
# TODO(b/172267869): Remove this conversion once TensorNormalizer stops
# converting float64 inputs to float32.
observation_tensor_spec = tf.TensorSpec(
dtype=tf.float32, shape=observation_tensor_spec.shape)
train_step = train_utils.create_train_step()
actor_net = ppo_actor_network.create_sequential_actor_net(
actor_fc_layers, action_tensor_spec)
value_net = value_network.ValueNetwork(
observation_tensor_spec,
fc_layer_params=value_fc_layers,
kernel_initializer=tf.keras.initializers.Orthogonal())
current_iteration = tf.Variable(0, dtype=tf.int64)
def learning_rate_fn():
# Linearly decay the learning rate.
return learning_rate * (1 - current_iteration / num_iterations)
agent = ppo_clip_agent.PPOClipAgent(
time_step_tensor_spec,
action_tensor_spec,
optimizer=tf.keras.optimizers.Adam(
learning_rate=learning_rate_fn, epsilon=1e-5),
actor_net=actor_net,
value_net=value_net,
importance_ratio_clipping=importance_ratio_clipping,
lambda_value=lambda_value,
discount_factor=discount_factor,
entropy_regularization=entropy_regularization,
value_pred_loss_coef=value_pred_loss_coef,
# This is a legacy argument for the number of times we repeat the data
# inside of the train function, incompatible with mini batch learning.
# We set the epoch number from the replay buffer and tf.Data instead.
num_epochs=1,
use_gae=use_gae,
use_td_lambda_return=use_td_lambda_return,
gradient_clipping=gradient_clipping,
value_clipping=value_clipping,
# TODO(b/150244758): Default compute_value_and_advantage_in_train to False
# after Reverb open source.
compute_value_and_advantage_in_train=False,
# Skips updating normalizers in the agent, as it's handled in the learner.
update_normalizers_in_train=False,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step)
agent.initialize()
reverb_server = reverb.Server(
[
reverb.Table( # Replay buffer storing experience for training.
name='training_table',
sampler=reverb.selectors.Fifo(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_capacity,
max_times_sampled=1,
),
reverb.Table( # Replay buffer storing experience for normalization.
name='normalization_table',
sampler=reverb.selectors.Fifo(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_capacity,
max_times_sampled=1,
)
],
port=reverb_port)
# Create the replay buffer.
reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(
agent.collect_data_spec,
sequence_length=collect_sequence_length,
table_name='training_table',
server_address='localhost:{}'.format(reverb_server.port),
# The only collected sequence is used to populate the batches.
max_cycle_length=1,
rate_limiter_timeout_ms=1000)
reverb_replay_normalization = reverb_replay_buffer.ReverbReplayBuffer(
agent.collect_data_spec,
sequence_length=collect_sequence_length,
table_name='normalization_table',
server_address='localhost:{}'.format(reverb_server.port),
# The only collected sequence is used to populate the batches.
max_cycle_length=1,
rate_limiter_timeout_ms=1000)
rb_observer = reverb_utils.ReverbTrajectorySequenceObserver(
reverb_replay_train.py_client, ['training_table', 'normalization_table'],
sequence_length=collect_sequence_length,
stride_length=collect_sequence_length)
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
collect_env_step_metric = py_metrics.EnvironmentSteps()
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
agent,
train_step,
interval=policy_save_interval,
metadata_metrics={
triggers.ENV_STEP_METADATA_KEY: collect_env_step_metric
}),
triggers.StepPerSecondLogTrigger(train_step, interval=summary_interval),
]
def training_dataset_fn():
return reverb_replay_train.as_dataset(
sample_batch_size=num_environments,
sequence_preprocess_fn=agent.preprocess_sequence)
def normalization_dataset_fn():
return reverb_replay_normalization.as_dataset(
sample_batch_size=num_environments,
sequence_preprocess_fn=agent.preprocess_sequence)
agent_learner = ppo_learner.PPOLearner(
root_dir,
train_step,
agent,
experience_dataset_fn=training_dataset_fn,
normalization_dataset_fn=normalization_dataset_fn,
num_batches=1,
num_epochs=num_epochs,
minibatch_size=minibatch_size,
shuffle_buffer_size=collect_sequence_length,
triggers=learning_triggers)
tf_collect_policy = agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
collect_actor = actor.Actor(
collect_env,
collect_policy,
train_step,
steps_per_run=collect_sequence_length,
observers=[rb_observer],
metrics=actor.collect_metrics(buffer_size=10) + [collect_env_step_metric],
reference_metrics=[collect_env_step_metric],
summary_dir=os.path.join(root_dir, learner.TRAIN_DIR),
summary_interval=summary_interval)
eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(
agent.policy, use_tf_function=True)
if eval_interval:
logging.info('Intial evaluation.')
eval_actor = actor.Actor(
eval_env,
eval_greedy_policy,
train_step,
metrics=actor.eval_metrics(eval_episodes),
reference_metrics=[collect_env_step_metric],
summary_dir=os.path.join(root_dir, 'eval'),
episodes_per_run=eval_episodes)
eval_actor.run_and_log()
logging.info('Training on %s', env_name)
last_eval_step = 0
for i in range(num_iterations):
collect_actor.run()
# TODO(b/159615593): Update to use observer.flush.
# Reset the reverb observer to make sure the data collected is flushed and
# written to the RB.
rb_observer.reset()
agent_learner.run()
reverb_replay_train.clear()
reverb_replay_normalization.clear()
current_iteration.assign_add(1)
# Eval only if `eval_interval` has been set. Then, eval if the current train
# step is equal or greater than the `last_eval_step` + `eval_interval` or if
# this is the last iteration. This logic exists because agent_learner.run()
# does not return after every train step.
if (eval_interval and
(agent_learner.train_step_numpy >= eval_interval + last_eval_step
or i == num_iterations - 1)):
logging.info('Evaluating.')
eval_actor.run_and_log()
last_eval_step = agent_learner.train_step_numpy
rb_observer.close()
reverb_server.stop()
| 40.682099 | 80 | 0.741749 |
e365cd41f656a991643fae46e5405642557b8adb | 13,070 | py | Python | django/utils/html.py | szuprefix/django | 4803834aaad99a92e65b5c70ccdbcf4d07ea9b03 | [
"PSF-2.0",
"BSD-3-Clause"
] | 4 | 2020-04-08T17:57:46.000Z | 2021-11-08T08:56:16.000Z | django/utils/html.py | guilhermej/django | 1d8cfa36089f2d1295abad03a99fc3c259bde6b5 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/utils/html.py | guilhermej/django | 1d8cfa36089f2d1295abad03a99fc3c259bde6b5 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | """HTML utilities suitable for global use."""
import re
from html.parser import HTMLParser
from urllib.parse import (
parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit,
)
from django.utils.encoding import force_text
from django.utils.functional import Promise, keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.safestring import SafeData, SafeText, mark_safe
from django.utils.text import normalize_newlines
# Configuration for urlize() function.
TRAILING_PUNCTUATION_RE = re.compile(
'^' # Beginning of word
'(.*?)' # The URL in word
'([.,:;!]+)' # Allowed non-wrapping, trailing punctuation
'$' # End of word
)
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'''([\s<>"']+)''')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
@keep_lazy(str, SafeText)
def escape(text):
"""
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
Always escape input, even if it's already escaped and marked as such.
This may result in double-escaping. If this is a concern, use
conditional_escape() instead.
"""
return mark_safe(
str(text).replace('&', '&').replace('<', '<')
.replace('>', '>').replace('"', '"').replace("'", ''')
)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('`'): '\\u0060',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
@keep_lazy(str, SafeText)
def escapejs(value):
"""Hex encode characters for use in JavaScript strings."""
return mark_safe(str(value).translate(_js_escapes))
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if isinstance(text, Promise):
text = str(text)
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Convert newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', str(value))
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self, convert_charrefs=False)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(value)
s.close()
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Return the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = force_text(value)
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Return the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
def smart_urlquote(url):
"""Quote a URL if it isn't already quoted."""
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC3986 Unreserved Characters
# http://tools.ietf.org/html/rfc3986#section-2.3
# See also http://bugs.python.org/issue16285
segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~')
return force_text(segment)
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Convert any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, truncate the URLs in the link text longer
than this limit to trim_url_limit-3 characters and append an ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
"""
safe_input = isinstance(text, SafeData)
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
def unescape(text, trail):
"""
If input URL is HTML-escaped, unescape it so that it can be safely fed
to smart_urlquote. For example:
http://example.com?x=1&y=<2> => http://example.com?x=1&y=<2>
"""
unescaped = (text + trail).replace(
'&', '&').replace('<', '<').replace(
'>', '>').replace('"', '"').replace(''', "'")
if trail and unescaped.endswith(trail):
# Remove trail for unescaped if it was not consumed by unescape
unescaped = unescaped[:-len(trail)]
elif trail == ';':
# Trail was consumed by unescape (as end-of-entity marker), move it to text
text += trail
trail = ''
return text, unescaped, trail
def trim_punctuation(lead, middle, trail):
"""
Trim trailing and wrapping punctuation from `middle`. Return the items
of the new state.
"""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim trailing punctuation.
match = TRAILING_PUNCTUATION_RE.match(middle)
if match:
middle = match.group(1)
trail = match.group(2) + trail
trimmed_something = True
# Trim wrapping punctuation.
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing) and
middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
trimmed_something = True
return lead, middle, trail
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# lead: Current punctuation trimmed from the beginning of the word.
# middle: Current state of the word.
# trail: Current punctuation trimmed from the end of the word.
lead, middle, trail = '', word, ''
# Deal with punctuation.
lead, middle, trail = trim_punctuation(lead, middle, trail)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote(middle_unescaped)
elif simple_url_2_re.match(middle):
middle, middle_unescaped, trail = unescape(middle, trail)
url = smart_urlquote('http://%s' % middle_unescaped)
elif ':' not in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeText.
"""
if '__html__' in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if '__str__' not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
| 35.710383 | 110 | 0.599923 |
72e8553a951ffa8b378dab8a952903a9801d6e4d | 11,242 | py | Python | tf_agents/environments/time_step.py | YoussefBenDhieb/agents | 7ff8119dbb10f2a250e458e82f78b1818951f535 | [
"Apache-2.0"
] | 1 | 2019-10-28T08:39:04.000Z | 2019-10-28T08:39:04.000Z | tf_agents/environments/time_step.py | Akshay22121995/agents | 1455410dffed3cfdede793b87c179965cdd27d22 | [
"Apache-2.0"
] | null | null | null | tf_agents/environments/time_step.py | Akshay22121995/agents | 1455410dffed3cfdede793b87c179965cdd27d22 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TimeStep representing a step in the environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import numpy as np
import tensorflow as tf
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
_as_float32_array = functools.partial(np.asarray, dtype=np.float32)
class TimeStep(
collections.namedtuple('TimeStep',
['step_type', 'reward', 'discount', 'observation'])):
"""Returned with every call to `step` and `reset` on an environment.
A `TimeStep` contains the data emitted by an environment at each step of
interaction. A `TimeStep` holds a `step_type`, an `observation` (typically a
NumPy array or a dict or list of arrays), and an associated `reward` and
`discount`.
The first `TimeStep` in a sequence will equal `StepType.FIRST`. The final
`TimeStep` will equal `StepType.LAST`. All other `TimeStep`s in a sequence
will equal `StepType.MID.
Attributes:
step_type: A scalar mapping to `StepType` enum value.
reward: A scalar, or `None` if `step_type` equals `StepType.FIRST`, i.e. at
the start of a sequence.
discount: A discount value in the range `[0, 1]`.
observation: A NumPy array, or a nested dict, list or tuple of arrays.
"""
__slots__ = ()
def is_first(self):
if tf.is_tensor(self.step_type):
return tf.equal(self.step_type, StepType.FIRST)
return self.step_type == StepType.FIRST
def is_mid(self):
if tf.is_tensor(self.step_type):
return tf.equal(self.step_type, StepType.MID)
return self.step_type == StepType.MID
def is_last(self):
if tf.is_tensor(self.step_type):
return tf.equal(self.step_type, StepType.LAST)
return self.step_type == StepType.LAST
def __hash__(self):
# TODO(oars): Explore performance impact and consider converting dicts in
# the observation into ordered dicts in __new__ call.
return hash(tuple(tf.nest.flatten(self)))
class StepType(object):
"""Defines the status of a `TimeStep` within a sequence."""
# Denotes the first `TimeStep` in a sequence.
FIRST = np.asarray(0, dtype=np.int32)
# Denotes any `TimeStep` in a sequence that is not FIRST or LAST.
MID = np.asarray(1, dtype=np.int32)
# Denotes the last `TimeStep` in a sequence.
LAST = np.asarray(2, dtype=np.int32)
def __new__(cls, value):
"""Add ability to create StepType constants from a value."""
if value == cls.FIRST:
return cls.FIRST
if value == cls.MID:
return cls.MID
if value == cls.LAST:
return cls.LAST
raise ValueError('No known conversion for `%r` into a StepType' % value)
def restart(observation, batch_size=None):
"""Returns a `TimeStep` with `step_type` set equal to `StepType.FIRST`.
Args:
observation: A NumPy array, tensor, or a nested dict, list or tuple of
arrays or tensors.
batch_size: (Optional) A python or tensorflow integer scalar.
Returns:
A `TimeStep`.
"""
first_observation = tf.nest.flatten(observation)[0]
if not tf.is_tensor(first_observation):
if batch_size is not None:
reward = np.zeros(batch_size, dtype=np.float32)
discount = np.ones(batch_size, dtype=np.float32)
step_type = np.tile(StepType.FIRST, batch_size)
return TimeStep(step_type, reward, discount, observation)
else:
return TimeStep(
StepType.FIRST,
_as_float32_array(0.0),
_as_float32_array(1.0),
observation,
)
# TODO(sguada,kbanoop): Check leading dimension of first_observation
# against batch_size if all are known statically.
shape = (batch_size,) if batch_size is not None else ()
step_type = tf.fill(shape, StepType.FIRST, name='step_type')
reward = tf.fill(shape, _as_float32_array(0.0), name='reward')
discount = tf.fill(shape, _as_float32_array(1.0), name='discount')
return TimeStep(step_type, reward, discount, observation)
def transition(observation, reward, discount=1.0):
"""Returns a `TimeStep` with `step_type` set equal to `StepType.MID`.
For TF transitions, the batch size is inferred from the shape of `reward`.
If `discount` is a scalar, and `observation` contains Tensors,
then `discount` will be broadcasted to match `reward.shape`.
Args:
observation: A NumPy array, tensor, or a nested dict, list or tuple of
arrays or tensors.
reward: A scalar, or 1D NumPy array, or tensor.
discount: (optional) A scalar, or 1D NumPy array, or tensor.
Returns:
A `TimeStep`.
Raises:
ValueError: If observations are tensors but reward's statically known rank
is not `0` or `1`.
"""
first_observation = tf.nest.flatten(observation)[0]
if not tf.is_tensor(first_observation):
reward = _as_float32_array(reward)
discount = _as_float32_array(discount)
if reward.shape:
step_type = np.tile(StepType.MID, reward.shape)
else:
step_type = StepType.MID
return TimeStep(step_type, reward, discount, observation)
# TODO(sguada, kbanoop): If reward.shape.ndims == 2, and static
# batch sizes are available for both first_observation and reward,
# check that these match.
reward = tf.convert_to_tensor(value=reward, dtype=tf.float32, name='reward')
if reward.shape.ndims is None or reward.shape.ndims > 1:
raise ValueError('Expected reward to be a scalar or vector; saw shape: %s' %
reward.shape)
if reward.shape.ndims == 0:
shape = []
else:
first_observation.shape[:1].assert_is_compatible_with(reward.shape)
shape = [
tf.compat.dimension_value(reward.shape[0]) or tf.shape(input=reward)[0]
]
step_type = tf.fill(shape, StepType.MID, name='step_type')
discount = tf.convert_to_tensor(
value=discount, dtype=tf.float32, name='discount')
if discount.shape.ndims == 0:
discount = tf.fill(shape, discount, name='discount_fill')
else:
reward.shape.assert_is_compatible_with(discount.shape)
return TimeStep(step_type, reward, discount, observation)
def termination(observation, reward):
"""Returns a `TimeStep` with `step_type` set to `StepType.LAST`.
Args:
observation: A NumPy array, tensor, or a nested dict, list or tuple of
arrays or tensors.
reward: A scalar, or 1D NumPy array, or tensor.
Returns:
A `TimeStep`.
Raises:
ValueError: If observations are tensors but reward's statically known rank
is not `0` or `1`.
"""
first_observation = tf.nest.flatten(observation)[0]
if not tf.is_tensor(first_observation):
reward = _as_float32_array(reward)
if reward.shape:
step_type = np.tile(StepType.LAST, reward.shape)
discount = np.zeros_like(reward, dtype=np.float32)
return TimeStep(step_type, reward, discount, observation)
else:
return TimeStep(StepType.LAST, reward, _as_float32_array(0.0),
observation)
# TODO(sguada, kbanoop): If reward.shape.ndims == 2, and static
# batch sizes are available for both first_observation and reward,
# check that these match.
reward = tf.convert_to_tensor(value=reward, dtype=tf.float32, name='reward')
if reward.shape.ndims is None or reward.shape.ndims > 1:
raise ValueError('Expected reward to be a scalar or vector; saw shape: %s' %
reward.shape)
if reward.shape.ndims == 0:
shape = []
else:
first_observation.shape[:1].assert_is_compatible_with(reward.shape)
shape = [
tf.compat.dimension_value(reward.shape[0]) or tf.shape(input=reward)[0]
]
step_type = tf.fill(shape, StepType.LAST, name='step_type')
discount = tf.fill(shape, _as_float32_array(0.0), name='discount')
return TimeStep(step_type, reward, discount, observation)
def truncation(observation, reward, discount=1.0):
"""Returns a `TimeStep` with `step_type` set to `StepType.LAST`.
If `discount` is a scalar, and `observation` contains Tensors,
then `discount` will be broadcasted to match `reward.shape`.
Args:
observation: A NumPy array, tensor, or a nested dict, list or tuple of
arrays or tensors.
reward: A scalar, or 1D NumPy array, or tensor.
discount: (optional) A scalar, or 1D NumPy array, or tensor.
Returns:
A `TimeStep`.
Raises:
ValueError: If observations are tensors but reward's statically known rank
is not `0` or `1`.
"""
first_observation = tf.nest.flatten(observation)[0]
if not tf.is_tensor(first_observation):
reward = _as_float32_array(reward)
discount = _as_float32_array(discount)
if reward.shape:
step_type = np.tile(StepType.LAST, reward.shape)
else:
step_type = StepType.LAST
return TimeStep(step_type, reward, discount, observation)
reward = tf.convert_to_tensor(value=reward, dtype=tf.float32, name='reward')
if reward.shape.ndims is None or reward.shape.ndims > 1:
raise ValueError('Expected reward to be a scalar or vector; saw shape: %s' %
reward.shape)
if reward.shape.ndims == 0:
shape = []
else:
first_observation.shape[:1].assert_is_compatible_with(reward.shape)
shape = [
tf.compat.dimension_value(reward.shape[0]) or tf.shape(input=reward)[0]
]
step_type = tf.fill(shape, StepType.LAST, name='step_type')
discount = tf.convert_to_tensor(
value=discount, dtype=tf.float32, name='discount')
if discount.shape.ndims == 0:
discount = tf.fill(shape, discount, name='discount_fill')
else:
reward.shape.assert_is_compatible_with(discount.shape)
return TimeStep(step_type, reward, discount, observation)
def time_step_spec(observation_spec=None):
"""Returns a `TimeStep` spec given the observation_spec."""
if observation_spec is None:
return TimeStep(step_type=(), reward=(), discount=(), observation=())
first_observation_spec = tf.nest.flatten(observation_spec)[0]
if isinstance(first_observation_spec,
(tensor_spec.TensorSpec, tensor_spec.BoundedTensorSpec)):
return TimeStep(
step_type=tensor_spec.TensorSpec([], tf.int32, name='step_type'),
reward=tensor_spec.TensorSpec([], tf.float32, name='reward'),
discount=tensor_spec.BoundedTensorSpec(
[], tf.float32, minimum=0.0, maximum=1.0, name='discount'),
observation=observation_spec)
return TimeStep(
step_type=array_spec.ArraySpec([], np.int32, name='step_type'),
reward=array_spec.ArraySpec([], np.float32, name='reward'),
discount=array_spec.BoundedArraySpec(
[], np.float32, minimum=0.0, maximum=1.0, name='discount'),
observation=observation_spec)
| 36.859016 | 80 | 0.701832 |
bc1b118bde727d06efb225ba46e4a9cf34703e68 | 593 | py | Python | main.py | Raffson/Heroku-Example | 6084f39948e5885005a3174153ca6dc912a1a354 | [
"MIT"
] | null | null | null | main.py | Raffson/Heroku-Example | 6084f39948e5885005a3174153ca6dc912a1a354 | [
"MIT"
] | null | null | null | main.py | Raffson/Heroku-Example | 6084f39948e5885005a3174153ca6dc912a1a354 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
import datetime as dt
app = Flask(__name__)
@app.route("/")
def index():
some_text = "Message from the index-handler."
current_year = dt.datetime.now().year
cities = ["Boston", "Vienna", "Paris", "Berlin"]
return render_template("index.html", some_text_name=some_text,
current_year=current_year)
@app.route("/about")
def about_me():
return render_template("about.html")
@app.route("/portfolio")
def portfolio():
return render_template("portfolio.html")
if __name__ == '__main__':
app.run() | 22.807692 | 66 | 0.667791 |
2a8f065d9168dd387fb6454d3db39f4c002727b1 | 21,936 | py | Python | src/torch/nn/parallel/distributed.py | warcraft12321/Hyperfoods | b995cd7afe10fcbd338158c80f53ce637bfffc0c | [
"MIT"
] | 51 | 2020-01-26T23:32:57.000Z | 2022-03-20T14:49:57.000Z | src/torch/nn/parallel/distributed.py | warcraft12321/Hyperfoods | b995cd7afe10fcbd338158c80f53ce637bfffc0c | [
"MIT"
] | 2 | 2020-12-19T20:00:28.000Z | 2021-03-03T20:22:45.000Z | src/torch/nn/parallel/distributed.py | warcraft12321/Hyperfoods | b995cd7afe10fcbd338158c80f53ce637bfffc0c | [
"MIT"
] | 33 | 2020-02-18T16:15:48.000Z | 2022-03-24T15:12:05.000Z | import sys
import math
import threading
import copy
import torch
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors, \
_take_tensors
from torch.cuda.comm import broadcast_coalesced
from torch.cuda import nccl
import torch.distributed as dist
from ..modules import Module
from .replicate import replicate
from .scatter_gather import scatter_kwargs, gather
from .parallel_apply import parallel_apply
if sys.version_info[0] == 3:
import queue
else:
import Queue as queue
class DistributedDataParallel(Module):
r"""Implements distributed data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine and each device, and
each such replica handles a portion of the input. During the backwards
pass, gradients from each node are averaged.
The batch size should be larger than the number of GPUs used locally. It
should also be an integer multiple of the number of GPUs so that each chunk
is the same size (so that each GPU processes the same number of samples).
See also: :ref:`distributed-basics` and :ref:`cuda-nn-dataparallel-instead`.
The same constraints on input as in :class:`torch.nn.DataParallel` apply.
Creation of this class requires the distributed package to be already
initialized in the process group mode
(see :func:`torch.distributed.init_process_group`).
.. warning::
This module works only with the ``nccl`` and ``gloo`` backends.
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) is a distributed synchronization
point. Take that into account in case different processes might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
Same applies to buffers.
.. warning::
This module assumes all buffers and gradients are dense.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. warning::
If you plan on using this module with a ``nccl`` backend or a ``gloo``
backend (that uses Infiniband), together with a DataLoader that uses
multiple workers, please change the multiprocessing start method to
``forkserver`` (Python 3 only) or ``spawn``. Unfortunately
Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will
likely experience deadlocks if you don't change this setting.
.. note::
Parameters are never broadcast between processes. The module performs
an all-reduce step on gradients and assumes that they will be modified
by the optimizer in all processes in the same way. Buffers
(e.g. BatchNorm stats) are broadcast from the module in process of rank
0, to all other replicas in the system in every iteration.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
broadcast_buffers: flag that enables syncing (broadcasting) buffers of
the module at beginning of the forward function.
(default: True)
Attributes:
module (Module): the module to be parallelized
Example::
>>> torch.distributed.init_process_group(world_size=4, init_method='...')
>>> net = torch.nn.DistributedDataParallel(model)
"""
def __init__(self, module, device_ids=None, output_device=None, dim=0,
broadcast_buffers=True):
super(DistributedDataParallel, self).__init__()
if dist._backend not in (dist.dist_backend.NCCL, dist.dist_backend.GLOO):
raise ValueError('Invalid backend, only NCCL and GLOO backends are supported by DistributedDataParallel')
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.output_device = output_device
self.broadcast_buffers = broadcast_buffers
# Flag used by the NCCL backend to make sure we only reduce gradients
# one time in the execution engine
self.need_reduction = False
MB = 1024 * 1024
# used for intra-node param sync and inter-node sync as well
self.broadcast_bucket_size = 10 * MB
self.nccl_reduce_bucket_size = 256 * MB
# Sync params and buffers
module_states = list(self.module.state_dict().values())
if len(module_states) > 0:
self._dist_broadcast_coalesced(module_states,
self.broadcast_bucket_size)
if len(device_ids) > 1:
# TODO: we don't need to replicate params in here. they're always going to
# be broadcasted using larger blocks in broadcast_coalesced, so it might be
# better to not pollute the caches with these small blocks
self._module_copies = replicate(self.module, self.device_ids, detach=True)
self._module_copies[0] = self.module
for module_copy in self._module_copies[1:]:
for param, copy_param in zip(self.module.parameters(), module_copy.parameters()):
copy_param.requires_grad = param.requires_grad
else:
self._module_copies = [self.module]
# For NCCL backend, since every single NCCL call is asynchoronous, we
# therefore directly enqueue all the NCCL reduction calls to the
# default CUDA stream without spawning up other reduction threads.
# This achieves the best performance.
if dist._backend == dist.dist_backend.NCCL:
self._register_nccl_grad_hook()
return
bucket_bytes_cap = 1 * MB
# This is a triply-nested list where the "dimensions" are: devices, buckets, bucket_elems
param_buckets = []
# Split the parameters into buckets and by types as well
for dev_idx, module in enumerate(self._module_copies):
param_buckets.append(list(_take_tensors(module.parameters(), bucket_bytes_cap)))
self.bucket_sizes = []
self.bucket_map = {}
# We transpose param_buckets, so the loop is over buckets.
# param_buckets_tuple is a doubly-nested list with "dims": devices, bucket_elems
for bucket_idx, param_buckets_tuple in enumerate(zip(*param_buckets)):
self.bucket_sizes.append(0)
# Now, we transpose again, so we iterate over bucket_elems, but getting tuples
# of params from each device.
for idx, param_tuple in enumerate(zip(*param_buckets_tuple)):
if idx == 0:
# Bucket parameter type tracking
bucket_param_type = param_tuple[0].type()
# Only gloo and nccl support half-precision
if bucket_param_type == torch.cuda.HalfTensor and \
dist._backend != dist.dist_backend.GLOO:
raise RuntimeError("DistributedDataParallel currently only "
"supports half precision parameters "
"with Nccl and Gloo backend")
if not param_tuple[0].requires_grad:
continue
for p in param_tuple:
self.bucket_map[p] = bucket_idx
self.bucket_sizes[bucket_idx] += 1
self.buckets = [[[] for _ in range(len(self.device_ids))] for _ in range(len(self.bucket_sizes))]
self.bucket_events = [[None] * len(self.device_ids) for _ in range(len(self.bucket_sizes))]
self.reduced = [False] * len(self.bucket_sizes)
self._register_grad_hooks()
self.dispatch_lock = threading.Lock()
self._start_reduction_threads()
def __getstate__(self):
attrs = copy.copy(self.__dict__)
if dist._backend != dist.dist_backend.NCCL:
del attrs['_grad_accs'], attrs['_reduction_queues'], \
attrs['_reduction_streams'], attrs['_reduction_threads'], \
attrs['_nccl_streams'], attrs['_default_streams']
return attrs
def __setstate__(self, state):
super(DistributedDataParallel, self).__setstate__(state)
if dist._backend == dist.dist_backend.NCCL:
self._register_nccl_grad_hook()
else:
self._register_grad_hooks()
self._start_reduction_threads()
def forward(self, *inputs, **kwargs):
self.need_reduction = True
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
self._sync_params()
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
return self.gather(outputs, self.output_device)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def train(self, mode=True):
super(DistributedDataParallel, self).train(mode)
for module in self._module_copies[1:]:
module.train(mode)
def _dist_broadcast_coalesced(self, tensors, buffer_size):
"""
Broadcast a sequence of tensors to the default group from rank 0.
Small tensors are first coalesced into a buffer to reduce the number of
broadcasts.
tensors (sequence): tensors to broadcast. Each tensor needs to be on the
same GPU.
buffer_size (int): maximum size of the buffer for coalescing
"""
for tensors in _take_tensors(tensors, buffer_size):
flat_tensors = _flatten_dense_tensors(tensors)
dist.broadcast(flat_tensors, 0)
for tensor, synced in zip(tensors,
_unflatten_dense_tensors(flat_tensors, tensors)):
tensor.copy_(synced)
def _sync_params(self):
if len(self.device_ids) > 1:
# intra-node parameter sync
params = [p.data for p in self.module.parameters()]
result = broadcast_coalesced(params, self.device_ids, self.broadcast_bucket_size)
for tensors, module in zip(result[1:], self._module_copies[1:]):
for tensor, param in zip(tensors, module.parameters()):
param.data.set_(tensor)
# module buffer sync
if self.broadcast_buffers:
buffers = [b.data for b in self.module._all_buffers()]
if len(buffers) > 0:
# cross-node buffer sync
self._dist_broadcast_coalesced(buffers, self.broadcast_bucket_size)
if len(self.device_ids) > 1:
# intra-node buffer sync
result = broadcast_coalesced(buffers, self.device_ids, self.broadcast_bucket_size)
for tensors, module in zip(result[1:], self._module_copies[1:]):
for tensor, buf in zip(tensors, module._all_buffers()):
buf.data.set_(tensor)
def _register_grad_hooks(self):
self._grad_accs = [] # need to keep them in scope
for device_idx, module in enumerate(self._module_copies):
for p in module.parameters():
if p.requires_grad:
p_tmp = p.expand_as(p)
grad_acc = p_tmp.grad_fn.next_functions[0][0]
grad_acc.register_hook(self._make_param_hook(p, device_idx))
self._grad_accs.append(grad_acc)
def _register_nccl_grad_hook(self):
"""
This function registers the callback all-reduction function for the
NCCL backend. All gradients will be all reduced in one single step.
The NCCL reduction will directly be enqueued into the
default CUDA stream. Therefore, no synchronization is needed.
"""
# Creating a new group
self.nccl_reduction_group_id = dist.new_group()
def reduction_fn_nccl():
# This function only needs to be called once
if not self.need_reduction:
return
self.need_reduction = False
all_grads = [[] for _ in range(len(self._module_copies))]
all_grads_buckets_iters = []
# Bucketing all the gradients
for dev_idx, module in enumerate(self._module_copies):
for param in module.parameters():
if not param.requires_grad or param.grad is None:
continue
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works "
"with gradients that don't require "
"grad")
# Adding the gradients for reduction
all_grads[dev_idx].append(param.grad.data)
# Now bucketing the parameters
dev_grads_buckets = _take_tensors(all_grads[dev_idx],
self.nccl_reduce_bucket_size)
all_grads_buckets_iters.append(dev_grads_buckets)
# Now reduce each bucket one after another
for grads_batch in zip(*all_grads_buckets_iters):
grads_batch_coalesced = []
# Coalesce each bucket
for dev_idx, dev_grads_batch in enumerate(grads_batch):
dev_id = self.device_ids[dev_idx]
with torch.cuda.device(dev_id):
dev_grads_batch_coalesced = _flatten_dense_tensors(dev_grads_batch)
grads_batch_coalesced.append(dev_grads_batch_coalesced)
# We will only use device 0's results, but this single op should be
# faster than doing the following two operation sequentially:
# (1) intra-node reduce to lead GPU, followed by
# (2) inter-node allreduce for all the first lead GPUs in all nodes
dist.all_reduce_multigpu(grads_batch_coalesced,
group=self.nccl_reduction_group_id)
# Now only work on the first device of self.device_ids, uncoalesce
# the gradients for each bucket
grads_batch_coalesced[0] /= dist.get_world_size()
grads_batch_reduced = _unflatten_dense_tensors(grads_batch_coalesced[0], grads_batch[0])
for grad, reduced in zip(grads_batch[0], grads_batch_reduced):
grad.copy_(reduced)
# clear the gradients and save memory for replicas
for module in self._module_copies[1:]:
for param in module.parameters():
if param.requires_grad:
param.grad = None
param.data.set_()
# Now register the reduction hook on the parameters
for p in self.module.parameters():
if not p.requires_grad:
continue
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(reduction_fn_nccl)
p.register_hook(allreduce_hook)
def _make_param_hook(self, param, device_idx):
bucket_idx = self.bucket_map[param]
def distributed_data_parallel_hook(*unused):
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works with "
"gradients that don't require grad")
bucket = self.buckets[bucket_idx][device_idx]
bucket.append(param.grad.data)
# We can flush these and save memory for replicas
if device_idx > 0:
param.grad = None
param.data.set_()
# Current device's bucket is full
if len(bucket) == self.bucket_sizes[bucket_idx]:
with torch.cuda.device(self.device_ids[device_idx]):
event = torch.cuda.Event()
event.record()
with self.dispatch_lock:
self.bucket_events[bucket_idx][device_idx] = event
self._queue_reduction(bucket_idx)
return distributed_data_parallel_hook
def _queue_reduction(self, bucket_idx):
dev_buckets = self.buckets[bucket_idx]
dev_events = self.bucket_events[bucket_idx]
# Check if it's ready
if any(evt is None for evt in dev_events):
return
# Queue the reduction and make sure backward waits for it
event = threading.Event()
self._reduction_queues[bucket_idx].put((dev_buckets, dev_events, event))
Variable._execution_engine.queue_callback(lambda: event.wait())
# Reset bucket state
self.buckets[bucket_idx] = [[] for _ in range(len(self.device_ids))]
self.bucket_events[bucket_idx] = [None] * len(self.device_ids)
self.reduced[bucket_idx] = True
if all(self.reduced):
self.reduced = [False] * len(self.bucket_sizes)
def sync_reduction_streams():
# We only have to sync with the first one, but it's safer to do it this way
# in case we change the way in which we paralellize work
r_streams = zip(*self._reduction_streams)
for dev_id, default_stream, dev_r_streams in zip(self.device_ids, self._default_streams, r_streams):
with torch.cuda.device(dev_id):
for reduction_stream in dev_r_streams:
default_stream.wait_stream(reduction_stream)
Variable._execution_engine.queue_callback(sync_reduction_streams)
def _start_reduction_threads(self):
num_buckets = len(self.bucket_sizes)
self._reduction_queues = [queue.Queue() for _ in range(num_buckets)]
self._reduction_threads = []
self._reduction_streams = [[] for _ in range(num_buckets)]
self._nccl_streams = []
self._default_streams = []
for dev_id in self.device_ids:
with torch.cuda.device(dev_id):
# TODO: don't assume we're on a default stream
self._default_streams.append(torch.cuda.current_stream())
self._nccl_streams.append(torch.cuda.Stream())
for reduction_queue, reduction_streams in zip(self._reduction_queues, self._reduction_streams):
for dev_id in self.device_ids:
with torch.cuda.device(dev_id):
reduction_streams.append(torch.cuda.Stream())
# We only use the first device for distributed reductions
dist._register_stream(reduction_streams[0])
group_id = dist.new_group()
self._reduction_threads.append(threading.Thread(
target=self._reduction_thread_fn,
args=(reduction_queue, group_id, self.device_ids, reduction_streams, self._nccl_streams)))
self._reduction_threads[-1].daemon = True
self._reduction_threads[-1].start()
@staticmethod
def _reduction_thread_fn(queue, group_id, device_ids, reduction_streams, nccl_streams):
def _process_batch():
dev_grad_batch, dev_events, job_event = queue.get()
dev_coalesced = []
# Coalesce the tensors on all devices and start a local reduction
for dev_id, grad_batch, event, stream in zip(device_ids, dev_grad_batch, dev_events, reduction_streams):
with torch.cuda.device(dev_id), torch.cuda.stream(stream):
stream.wait_event(event)
coalesced = _flatten_dense_tensors(grad_batch)
dev_coalesced.append(coalesced)
# Wait for all copies to complete before starting the NCCL kernel
for stream in reduction_streams:
stream.synchronize()
nccl.reduce(dev_coalesced, root=0, streams=nccl_streams)
# From now on we're only going to work on the first device (from device_ids)
grad_batch = dev_grad_batch[0]
coalesced = dev_coalesced[0]
reduce_stream = reduction_streams[0]
with torch.cuda.stream(reduce_stream):
reduce_stream.wait_stream(nccl_streams[0])
coalesced /= dist.get_world_size()
dist.all_reduce(coalesced, group=group_id)
for grad, reduced in zip(grad_batch, _unflatten_dense_tensors(coalesced, grad_batch)):
grad.copy_(reduced)
job_event.set()
with torch.cuda.device(device_ids[0]):
while True:
_process_batch() # just to have a clear scope
| 45.416149 | 117 | 0.627006 |
aa0d945385b47dc6d41e6b72f9e979a57cb126cb | 2,227 | py | Python | galaxy_milkyway_files/tools/wohl-proteomics/msgf2pin/mass.py | wohllab/milkyway_proteomics | 622969f7f4a5955ae2bff299ae7b08572d422814 | [
"MIT"
] | null | null | null | galaxy_milkyway_files/tools/wohl-proteomics/msgf2pin/mass.py | wohllab/milkyway_proteomics | 622969f7f4a5955ae2bff299ae7b08572d422814 | [
"MIT"
] | null | null | null | galaxy_milkyway_files/tools/wohl-proteomics/msgf2pin/mass.py | wohllab/milkyway_proteomics | 622969f7f4a5955ae2bff299ae7b08572d422814 | [
"MIT"
] | null | null | null | import re
from orangecontrib.bio.ontology import OBOParser
import orangecontrib
from pyteomics import mass
import sys
mod_bracket_re='\[|\]'
mod_dict={}
pepseq="VS[UNIMOD:21]KLKNWEY[UNIMOD:21]R"
mod_obo="/galaxy-central/tools/wohl-proteomics/MSGFcrux/unimod.obo"
with open(mod_obo,"r") as unimod_obo:
obo_parser=orangecontrib.bio.ontology.OBOParser(unimod_obo)
if "UNIMOD" in pepseq:
print pepseq,"working on this..."
split_peptide=re.split(mod_bracket_re,pepseq)
#print split_peptide
new_peptide=[]
mod_mass=0.0
unmod_pepmass=0.0
for each_split in split_peptide:
if "UNIMOD" in each_split:
if each_split in mod_dict:
mod_mass+=mod_dict[each_split]
else:
thismod_mass=""
print each_split," was not in the dictionary!"
trigger=False
unimod_obo.seek(0)
for event,value in obo_parser:
if "TAG_VALUE" in event and not trigger:
print event,value
if each_split in value[1]:
trigger=True
elif trigger:
if "delta_mono_mass" in value[1]:
print value[1],"val1 should be the mass"
thismod_mass=value[1].split("\"")[1]
trigger=False
break
else:
continue
if thismod_mass=="":
print "ERROR: ERROR: ERROR: THE MASS FOR THIS MOD WAS NOT FOUND IN THE UNIMOD OBO FILE..."
sys.exit(2)
mod_dict[each_split]=float(thismod_mass)
mod_mass+=mod_dict[each_split]
#print each_split,"with unimod..."
else:
new_peptide.extend(each_split)
unmod_pepmass=mass.fast_mass(new_peptide)
print mod_mass,"mod mass"
print unmod_pepmass+mod_mass,"This is the combined mass..."
print new_peptide
| 37.116667 | 114 | 0.515492 |
bad65b4f8be9808a1fe00e86655ca2cfe8898d04 | 200 | py | Python | google_play_scraper/__init__.py | kluhan/google-play-scraper | 478d7461a5c3c4557949585647f75ac9682e9eee | [
"MIT"
] | null | null | null | google_play_scraper/__init__.py | kluhan/google-play-scraper | 478d7461a5c3c4557949585647f75ac9682e9eee | [
"MIT"
] | null | null | null | google_play_scraper/__init__.py | kluhan/google-play-scraper | 478d7461a5c3c4557949585647f75ac9682e9eee | [
"MIT"
] | null | null | null | from .constants.google_play import Sort
from .features.app import app
from .features.reviews import reviews, reviews_all
from .features.permissions import permissions
VERSION = __version__ = "0.2.1"
| 28.571429 | 50 | 0.81 |
9130c1e3b84bca8e13150bae8dbd14a2637101e0 | 3,428 | py | Python | application/test_app.py | martibook/SemanPhone | fbd895f61000f0d5e3ce9f5d3d6b7c2078620281 | [
"MIT"
] | null | null | null | application/test_app.py | martibook/SemanPhone | fbd895f61000f0d5e3ce9f5d3d6b7c2078620281 | [
"MIT"
] | 2 | 2018-03-22T21:38:44.000Z | 2018-04-19T20:57:15.000Z | application/test_app.py | martibook/SemanPhone | fbd895f61000f0d5e3ce9f5d3d6b7c2078620281 | [
"MIT"
] | 1 | 2018-04-19T10:41:03.000Z | 2018-04-19T10:41:03.000Z | from flask import Flask, render_template, redirect, url_for, request, session
from flask_bootstrap import Bootstrap
from application.tools.exceptions import NoResults
from application.tools.database import get_asso_words, get_definitions, pick_words_4experiment, pick_words_4control, \
get_quiz_info, increase_corrate, decrease_corrate, pick_words
test_app = Flask(__name__)
test_app.config['SECRET_KEY'] = 'XMDidks2hal89JHNhdk93049dKndOpP'
bootstrap = Bootstrap(test_app)
@test_app.route('/')
def index():
return render_template('test_welcome.html')
@test_app.route('/thankyou')
def thankyou():
return render_template('test_thankyou.html')
@test_app.route('/semanphone')
def api_semanphone():
"""
url_format: http://semanphone.fun/semanphone?word=word_argument
"""
word = request.args.get('word', None)
if word is not None:
definition_list = get_definitions(word)
asso_word_list = get_asso_words(word)
return render_template('semanphone.html', word=word, asso_word_list=asso_word_list, definition_list=definition_list)
else:
raise NoResults()
@test_app.route('/dictionary')
def api_dictionary():
"""
url_format: http://semanphone.fun/dictionary?word=word_argument
"""
word = request.args.get('word', None)
if word is not None:
definition_list = get_definitions(word)
return render_template('dictionary.html', word=word, definition_list=definition_list)
else:
raise NoResults()
@test_app.route('/experiment')
def experiment():
# random_words = pick_words_4experiment()
# session["exp_random_words"] = random_words
fixed_words = pick_words('experiment')
session["fixed_words"] = fixed_words
return render_template('experiment.html', random_words=fixed_words)
@test_app.route('/control')
def control():
# random_words = pick_words_4control()
# session["con_random_words"] = random_words
fixed_words = pick_words("control")
session["fixed_words"] = fixed_words
return render_template('control.html', random_words=fixed_words)
@test_app.route('/quiz/<group>')
def quiz(group):
# if group == 'experiment':
# random_words = session["exp_random_words"]
# elif group == "control":
# random_words = session["con_random_words"]
# else:
# random_words = []
if session["fixed_words"]:
fixed_words = session["fixed_words"]
else:
fixed_words = []
quiz_info = get_quiz_info(fixed_words)
return render_template('quiz.html', quiz_info=quiz_info)
@test_app.route('/increase/<word>/<ref_word>/<group>')
def increase(word, ref_word, group):
"""
increase <group> correct time of the word
"""
increase_corrate(word=word, ref_word=ref_word, group=group)
return "success"
@test_app.route('/decrease/<word>/<ref_word>/<group>')
def decrease(word, ref_word, group):
"""
decrease <group> correct time of the word
"""
decrease_corrate(word=word, ref_word=ref_word, group=group)
return "success"
@test_app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@test_app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@test_app.errorhandler(NoResults)
def handle_no_results(error):
return render_template(error.render_page)
if __name__ == '__main__':
test_app.run(debug=True)
| 27.424 | 124 | 0.712952 |
6ea10d4c8ec0abf73fb1a6ce08dc319076a7ffad | 362 | py | Python | avatar/urls.py | sterliakov/django-avatar | e8be928d4f7bd442822f1dcc576f6c0909360475 | [
"BSD-3-Clause"
] | null | null | null | avatar/urls.py | sterliakov/django-avatar | e8be928d4f7bd442822f1dcc576f6c0909360475 | [
"BSD-3-Clause"
] | null | null | null | avatar/urls.py | sterliakov/django-avatar | e8be928d4f7bd442822f1dcc576f6c0909360475 | [
"BSD-3-Clause"
] | null | null | null | from django.urls import path
from avatar import views
urlpatterns = [
path(r'add/', views.add, name='avatar_add'),
path(r'change/', views.change, name='avatar_change'),
path(r'delete/', views.delete, name='avatar_delete'),
path(r'render_primary/<slug:user>/<int:size>/',
views.render_primary,
name='avatar_render_primary'),
]
| 27.846154 | 57 | 0.668508 |
af6c0f9dbe2c19a699d569c8f4693ecebe07dd63 | 257 | py | Python | Projects/Languages/Python/Labs/Unit 00/Inputs/Input.py | mikozera/Computer-Science | 52d2a317d95e2df2a82f54e837e43b6e6873a400 | [
"MIT"
] | null | null | null | Projects/Languages/Python/Labs/Unit 00/Inputs/Input.py | mikozera/Computer-Science | 52d2a317d95e2df2a82f54e837e43b6e6873a400 | [
"MIT"
] | 3 | 2020-05-01T01:36:59.000Z | 2020-05-05T05:38:33.000Z | Projects/Languages/Python/Labs/Unit 00/Inputs/Input.py | mikozera/Computer-Science | 52d2a317d95e2df2a82f54e837e43b6e6873a400 | [
"MIT"
] | 1 | 2019-11-26T09:02:51.000Z | 2019-11-26T09:02:51.000Z | # intergers
interger = int(input("Enter a Interger :: "))
# floats
decimal = float(input("Enter a Float :: "))
# strings
input("Enter a String :: ")
# Print
print("\nInterger: " + str(interger) + "\nFloat: " +
str(decimal) + "\nString: " + string)
| 19.769231 | 52 | 0.607004 |
1162542f8e60ff57716f8d458d62249972395197 | 2,381 | py | Python | setup.py | yuokada/csv2athena_schema | ef583bd109c8de1b8b7e849f5afb0a34291c91b2 | [
"MIT"
] | null | null | null | setup.py | yuokada/csv2athena_schema | ef583bd109c8de1b8b7e849f5afb0a34291c91b2 | [
"MIT"
] | null | null | null | setup.py | yuokada/csv2athena_schema | ef583bd109c8de1b8b7e849f5afb0a34291c91b2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
REQUIRES = [
'docopt',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
def find_version(fname):
'''Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
'''
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version("csv2athena_schema.py")
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name='csv2athena_schema',
version="0.1.1",
description='A Python Script to build a athena create table from csv file',
long_description=read("README.rst"),
author='Yukihiro Okada',
author_email='callistoiv+pypi@gmail.com',
url='https://github.com/yuokada/csv2athena_schema',
install_requires=REQUIRES,
license=read("LICENSE"),
zip_safe=False,
keywords='csv2athena_schema',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
py_modules=["csv2athena_schema"],
entry_points={
'console_scripts': [
"csv2athena_schema = csv2athena_schema:main"
]
},
tests_require=['pytest', 'coverage', 'coveralls'],
cmdclass={'test': PyTest}
)
| 28.686747 | 79 | 0.612768 |
e2688489d68036e9499e7489f9c6e8d2f8f22b46 | 178 | py | Python | Python-Std-Libraries/click/click_commit.py | MiracleWong/PythonPractice | 40aecd84045ad18f6aff95d5b8be8e352ca0a726 | [
"MIT"
] | null | null | null | Python-Std-Libraries/click/click_commit.py | MiracleWong/PythonPractice | 40aecd84045ad18f6aff95d5b8be8e352ca0a726 | [
"MIT"
] | null | null | null | Python-Std-Libraries/click/click_commit.py | MiracleWong/PythonPractice | 40aecd84045ad18f6aff95d5b8be8e352ca0a726 | [
"MIT"
] | null | null | null | import click
@click.command()
@click.option('--message', '-m', multiple=True)
def commit(message):
click.echo('\n'.join(message))
if __name__ == '__main__':
commit()
| 14.833333 | 47 | 0.646067 |
cbcd2c6fc84d502ca29c6ee8cf00c7ac92f511a4 | 791 | py | Python | more_threads/channel.py | zehemz/clases-python-101 | 633cb5f0cbc85e64e242514f0394754a5bed0513 | [
"Apache-2.0"
] | null | null | null | more_threads/channel.py | zehemz/clases-python-101 | 633cb5f0cbc85e64e242514f0394754a5bed0513 | [
"Apache-2.0"
] | null | null | null | more_threads/channel.py | zehemz/clases-python-101 | 633cb5f0cbc85e64e242514f0394754a5bed0513 | [
"Apache-2.0"
] | null | null | null | # channel.py
#
# A minimal object that implements a message channel over a pair
# of file descriptors (like a pipe)
import cPickle as pickle
class Channel(object):
def __init__(self,out_f,in_f):
self.out_f = out_f
self.in_f = in_f
def send(self,item):
pickle.dump(item,self.out_f)
self.out_f.flush()
def recv(self):
return pickle.load(self.in_f)
# Example of using the channel
if __name__ == '__main__':
import subprocess
p = subprocess.Popen(['python','child.py'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
ch = Channel(p.stdin,p.stdout)
ch.send("Hello World")
print ch.recv()
ch.send(42)
print ch.recv()
ch.send([1,2,3,4,5])
print ch.recv()
| 23.969697 | 64 | 0.605563 |
530455636bdd1a62b4194b06685d4852e6aea132 | 12,794 | py | Python | library/azure_rm_trafficmanagerprofile_facts.py | wray/azure_modules | af2d84ffc4a0061f5ab4ed7e621faa0bbdbb2da5 | [
"MIT"
] | null | null | null | library/azure_rm_trafficmanagerprofile_facts.py | wray/azure_modules | af2d84ffc4a0061f5ab4ed7e621faa0bbdbb2da5 | [
"MIT"
] | null | null | null | library/azure_rm_trafficmanagerprofile_facts.py | wray/azure_modules | af2d84ffc4a0061f5ab4ed7e621faa0bbdbb2da5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_trafficmanagerprofile_facts
version_added: "2.7"
short_description: Get Azure Traffic Manager profile facts
description:
- Get facts for a Azure specific Traffic Manager profile or all Traffic Manager profiles.
options:
name:
description:
- Limit results to a specific Traffic Manager profile.
resource_group:
description:
- The resource group to search for the desired Traffic Manager profile
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Hai Cao (@caohai) <t-haicao@microsoft.com>"
- "Yunge Zhu (@yungezz) <yungez@microsoft.com>"
'''
EXAMPLES = '''
- name: Get facts for one Traffic Manager profile
azure_rm_trafficmanager_facts:
name: Testing
resource_group: TestRG
- name: Get facts for all Traffic Manager profiles
azure_rm_trafficmanager_facts:
- name: Get facts by tags
azure_rm_trafficmanager_facts:
tags:
- Environment:Test
'''
RETURN = '''
tms:
description: List of Traffic Manager profiles.
returned: always
type: complex
contains:
resource_group:
description:
- Name of a resource group where the Traffic Manager profile exists.
returned: always
type: str
sample: testGroup
name:
description:
- Name of the Traffic Manager profile.
returned: always
type: str
sample: testTm
state:
description:
- The state of the Traffic Manager profile.
type: str
sample: present
location:
description:
- Location of the Traffic Manager profile.
type: str
sample: global
profile_status:
description:
- The status of the Traffic Manager profile.
type: str
sample: Enabled
routing_method:
description:
- The traffic routing method of the Traffic Manager profile.
type: str
sample: performance
dns_config:
description:
- The DNS settings of the Traffic Manager profile.
type: complex
sample:
relative_name: testTm
fqdn: testTm.trafficmanager.net
ttl: 60
monitor_config:
description:
- The endpoint monitoring settings of the Traffic Manager profile.
type: complex
contains:
protocol:
description:
- The protocol (HTTP, HTTPS or TCP) used to probe for endpoint health.
type: str
sample: HTTP
port:
description:
- The TCP port used to probe for endpoint health.
type: int
sample: 80
path:
description:
- The path relative to the endpoint domain name used to probe for endpoint health.
type: str
sample: /
interval:
description:
- The monitor interval for endpoints in this profile in seconds.
type: int
sample: 10
timeout:
description:
- The monitor timeout for endpoints in this profile in seconds.
type: int
sample: 30
tolerated_failures:
description:
- The number of consecutive failed health check before declaring an endpoint Degraded after the next failed health check.
type: int
sample: 3
endpoints:
description:
- The list of endpoints in the Traffic Manager profile.
type: list
element: complex
contains:
id:
description:
- Fully qualified resource Id for the resource.
type: str
sample: /subscriptions/XXXXXX...XXXXXXXXX/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tmtest/externalEndpoints/e1
name:
description:
- The name of the endpoint.
type: str
sample: e1
type:
description:
- The type of the endpoint.
type: str
sample: external_endpoints
target_resource_id:
description:
- The Azure Resource URI of the of the endpoint.
type: str
sample: /subscriptions/XXXXXX...XXXXXXXXX/resourceGroups/vscjavaci/providers/Microsoft.ClassicCompute/domainNames/vscjavaci
target:
description:
- The fully-qualified DNS name of the endpoint.
type: str
sample: 8.8.8.8
status:
description:
- The status of the endpoint.
type: str
sample: Enabled
weight:
description:
- The weight of this endpoint when the profile has routing_method C(weighted).
type: int
sample: 10
priority:
description:
- The priority of this endpoint when the profile has routing_method C(priority).
type: str
sample: 3
location:
description:
- The location of endpoints when type is C(external_endpoints) or C(nested_endpoints), and profile routing_method is (performance).
type: str
sample: East US
min_child_endpoints:
description:
- The minimum number of endpoints that must be available in the child profile to make the parent profile available.
type: int
sample: 3
geo_mapping:
description:
- The list of countries/regions mapped to this endpoint when the profile has routing_method C(geographic).
type: list
sample: [
"GEO-NA",
"GEO-AS"
]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _camel_to_snake
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureHttpError
except:
# handled in azure_rm_common
pass
import re
AZURE_OBJECT_CLASS = 'trafficManagerProfiles'
def serialize_endpoint(endpoint):
result = dict(
id=endpoint.id,
name=endpoint.name,
target_resource_id=endpoint.target_resource_id,
target=endpoint.target,
status=endpoint.endpoint_status,
weight=endpoint.weight,
priority=endpoint.priority,
location=endpoint.endpoint_location,
min_child_endpoints=endpoint.min_child_endpoints,
geo_mapping=endpoint.geo_mapping,
)
if endpoint.type:
result['type'] = _camel_to_snake(endpoint.type.split("/")[-1])
return result
class AzureRMTrafficManagerProfileFacts(AzureRMModuleBase):
"""Utility class to get Azure Traffic Manager profile facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
tms=[]
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMTrafficManagerProfileFacts, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['tms'] = self.get_item()
elif self.resource_group:
self.results['tms'] = self.list_resource_group()
else:
self.results['tms'] = self.list_all()
return self.results
def get_item(self):
"""Get a single Azure Traffic Manager profile"""
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.traffic_manager_management_client.profiles.get(
self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_tm(item)]
return result
def list_resource_group(self):
"""Get all Azure Traffic Managers profiles within a resource group"""
self.log('List all Azure Traffic Managers within a resource group')
try:
response = self.traffic_manager_management_client.profiles.list_by_resource_group(
self.resource_group)
except AzureHttpError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_tm(item))
return results
def list_all(self):
"""Get all Azure Traffic Manager profiles within a subscription"""
self.log('List all Traffic Manager profiles within a subscription')
try:
response = self.traffic_manager_management_client.profiles.list_by_subscription()
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_tm(item))
return results
def serialize_tm(self, tm):
'''
Convert a Traffic Manager profile object to dict.
:param tm: Traffic Manager profile object
:return: dict
'''
result = self.serialize_obj(tm, AZURE_OBJECT_CLASS)
new_result = {}
new_result['id'] = tm.id
new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourceGroups\\/', '', result['id']))
new_result['name'] = tm.name
new_result['state'] = 'present'
new_result['location'] = tm.location
new_result['profile_status'] = tm.profile_status
new_result['routing_method'] = tm.traffic_routing_method.lower()
new_result['dns_config'] = dict(
relative_name=tm.dns_config.relative_name,
fqdn=tm.dns_config.fqdn,
ttl=tm.dns_config.ttl
)
new_result['monitor_config'] = dict(
profile_monitor_status=tm.monitor_config.profile_monitor_status,
protocol=tm.monitor_config.protocol,
port=tm.monitor_config.port,
path=tm.monitor_config.path,
interval=tm.monitor_config.interval_in_seconds,
timeout=tm.monitor_config.timeout_in_seconds,
tolerated_failures=tm.monitor_config.tolerated_number_of_failures
)
new_result['endpoints'] = [serialize_endpoint(endpoint) for endpoint in tm.endpoints]
new_result['tags'] = tm.tags
return new_result
def main():
"""Main module execution code path"""
AzureRMTrafficManagerProfileFacts()
if __name__ == '__main__':
main()
| 33.757256 | 159 | 0.560419 |
389b43d9f61c25cb1bc9a46d29298eecb6051544 | 2,257 | py | Python | jinahub/encoders/image/ImageTorchEncoder/tests/integration/test_flow_integration.py | Gikiman/executors | 98658b4136859164390cfccbde8cf0f7cf843593 | [
"Apache-2.0"
] | null | null | null | jinahub/encoders/image/ImageTorchEncoder/tests/integration/test_flow_integration.py | Gikiman/executors | 98658b4136859164390cfccbde8cf0f7cf843593 | [
"Apache-2.0"
] | null | null | null | jinahub/encoders/image/ImageTorchEncoder/tests/integration/test_flow_integration.py | Gikiman/executors | 98658b4136859164390cfccbde8cf0f7cf843593 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from ...torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((224, 224, 3), dtype=np.uint8)),
(np.ones((100, 100, 3), dtype=np.uint8)),
(np.ones((50, 40, 3), dtype=np.uint8))
])
def test_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=[Document(blob=arr_in)],
return_results=True
)
results_arr = DocumentArray(resp[0].data.docs)
assert len(results_arr) == 1
assert results_arr[0].embedding is not None
assert results_arr[0].embedding.shape == (512, )
def test_with_batch():
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=(Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)),
return_results=True
)
assert len(resp[0].docs.get_attributes('embedding')) == 25
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 11], ['c', 0], ['cc', 0]], 'r'),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [['r', 0], ['c', 11], ['cc', 0]], 'c'),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [['r', 0], ['c', 0], ['cc', 11]], 'cc')
]
)
def test_traversal_paths(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
return len(DocumentArray(res[0].docs).traverse_flat([path]).get_attributes('embedding')) == count
return validate
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True
)
assert validate_traversal(docs_per_path)(resp)
| 30.5 | 113 | 0.618077 |
23caa65f25b9b2815e2d5248244a713e5b9c9ef7 | 4,983 | py | Python | students/K33422/practical_works/Novatorova_Irina/django_project_novatorova/project_first_app/migrations/0002_auto_20210122_1935.py | reeeena/ITMO_ICT_WebDevelopment_2020-2021 | 9f6d3cdf48effd8976958c676ef99e323031d57c | [
"MIT"
] | null | null | null | students/K33422/practical_works/Novatorova_Irina/django_project_novatorova/project_first_app/migrations/0002_auto_20210122_1935.py | reeeena/ITMO_ICT_WebDevelopment_2020-2021 | 9f6d3cdf48effd8976958c676ef99e323031d57c | [
"MIT"
] | null | null | null | students/K33422/practical_works/Novatorova_Irina/django_project_novatorova/project_first_app/migrations/0002_auto_20210122_1935.py | reeeena/ITMO_ICT_WebDevelopment_2020-2021 | 9f6d3cdf48effd8976958c676ef99e323031d57c | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2021-01-22 19:35
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
('project_first_app', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='driver',
options={'verbose_name': 'user', 'verbose_name_plural': 'users'},
),
migrations.AlterModelManagers(
name='driver',
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='car',
name='usage',
field=models.ManyToManyField(through='project_first_app.Ownership', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='driver',
name='address',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='driver',
name='date_joined',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined'),
),
migrations.AddField(
model_name='driver',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='email address'),
),
migrations.AddField(
model_name='driver',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='driver',
name='is_active',
field=models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active'),
),
migrations.AddField(
model_name='driver',
name='is_staff',
field=models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status'),
),
migrations.AddField(
model_name='driver',
name='is_superuser',
field=models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'),
),
migrations.AddField(
model_name='driver',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
migrations.AddField(
model_name='driver',
name='nationality',
field=models.CharField(max_length=50, null=True),
),
migrations.AddField(
model_name='driver',
name='passport',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='driver',
name='password',
field=models.CharField(default=1, max_length=128, verbose_name='password'),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
migrations.AddField(
model_name='driver',
name='username',
field=models.CharField(default=1, error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username'),
preserve_default=False,
),
migrations.RemoveField(
model_name='car',
name='id',
),
migrations.AddField(
model_name='car',
name='id',
field=models.AutoField(default=1, primary_key=True, serialize=False, unique=True),
preserve_default=False,
),
migrations.AlterField(
model_name='driver',
name='id',
field=models.AutoField(default=1, primary_key=True, serialize=False, unique=True),
),
migrations.AlterField(
model_name='driverlicence',
name='id',
field=models.AutoField(primary_key=True, serialize=False, unique=True),
),
]
| 41.181818 | 328 | 0.608268 |
eaeec606236e8bb412417b9872108d402145373c | 5,215 | py | Python | samples/transpose/transpose_opentuner.py | abhisekkundu-intel/libxsmm | 66d97cb86c192ca727afd9ddf42ad8c80addf6e1 | [
"BSD-3-Clause"
] | 1 | 2021-05-23T21:25:05.000Z | 2021-05-23T21:25:05.000Z | samples/transpose/transpose_opentuner.py | abhisekkundu-intel/libxsmm | 66d97cb86c192ca727afd9ddf42ad8c80addf6e1 | [
"BSD-3-Clause"
] | null | null | null | samples/transpose/transpose_opentuner.py | abhisekkundu-intel/libxsmm | 66d97cb86c192ca727afd9ddf42ad8c80addf6e1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
###############################################################################
# Copyright (c) Intel Corporation - All rights reserved. #
# This file is part of the LIBXSMM library. #
# #
# For information on the license, see the LICENSE file. #
# Further information: https://github.com/hfp/libxsmm/ #
# SPDX-License-Identifier: BSD-3-Clause #
###############################################################################
# Hans Pabst (Intel Corp.)
###############################################################################
#
# This script is based on OpenTuner's tutorial:
# "Optimizing Block Matrix Multiplication".
#
import opentuner
from opentuner import ConfigurationManipulator
from opentuner import IntegerParameter
from opentuner import MeasurementInterface
from opentuner import Result
import time
import sys
import re
class TransposeTune(MeasurementInterface):
def manipulator(self):
"""
Define the search space by creating a
ConfigurationManipulator
"""
self.mintilesize = 2
self.granularity = 1
assert(0 < self.granularity)
minsize = max(self.mintilesize / self.granularity, 1)
maxsize = minsize + self.granularity
m_max = max(min(self.args.maxm, self.args.end), maxsize)
n_max = max(min(self.args.maxn, self.args.end), maxsize)
m_max = (m_max + self.granularity - 1) / self.granularity
n_max = (n_max + self.granularity - 1) / self.granularity
m_param = IntegerParameter("M", minsize, m_max)
n_param = IntegerParameter("N", minsize, n_max)
manipulator = ConfigurationManipulator()
manipulator.add_parameter(m_param)
manipulator.add_parameter(n_param)
return manipulator
def seed_configurations(self):
m_seed = [self.args.n, self.args.m][0 != self.args.m]
n_seed = [self.args.m, self.args.n][0 != self.args.n]
if 0 == m_seed or 0 == n_seed:
return []
else:
return [{"M": max(m_seed, self.mintilesize),
"N": max(n_seed, self.mintilesize)}]
def objective(self):
return opentuner.search.objective.MaximizeAccuracyMinimizeSize()
def run(self, desired_result, input, limit):
"""
Compile and run a given configuration then
return performance
"""
cfg = desired_result.configuration.data
nruns = max(self.args.nruns, 1)
begin = max(self.args.begin, self.mintilesize)
end = max(self.args.end, self.mintilesize)
run_cmd = (
"CHECK=-1" # repeatable runs
" LIBXSMM_XCOPY_M=" + str(self.granularity * cfg["M"]) +
" LIBXSMM_XCOPY_N=" + str(self.granularity * cfg["N"]) +
" ./transpose.sh o" + " " + str(end) + " " + str(end) +
" " + str(end) + " " + str(end) + " " + str(nruns) +
" -" + str(begin))
run_result = self.call_program(run_cmd)
if (0 == run_result["returncode"]):
match = re.search(
"\\s*duration:\\s+([0-9]+(\\.[0-9]*)*)",
run_result["stdout"])
assert(match is not None)
mseconds = float(match.group(1)) / nruns
assert(0 < mseconds)
frequency = 1000.0 / mseconds
kernelsize = (self.granularity**2) * cfg["M"] * cfg["N"]
return Result(time=mseconds, accuracy=frequency, size=kernelsize)
else:
sys.tracebacklimit = 0
raise RuntimeError("Execution failed for \"" + run_cmd + "\"!")
def save_final_config(self, configuration):
"""
called at the end of tuning
"""
filename = (
"transpose-" + str(max(self.args.begin, 1)) +
"_" + str(max(self.args.end, 1)) +
"_" + str(max(self.args.nruns, 1)) +
time.strftime("-%Y%m%d-%H%M%S") + ".json")
print("Optimal block size written to " + filename +
": ", configuration.data)
self.manipulator().save_to_file(configuration.data, filename)
if __name__ == "__main__":
argparser = opentuner.default_argparser()
argparser.add_argument(
"begin", type=int,
help="Begin of the range (min. M and N)")
argparser.add_argument(
"end", type=int,
help="End of the range (max. M and N)")
argparser.add_argument(
"nruns", type=int, default=100, nargs='?',
help="Number of experiments per epoch")
argparser.add_argument(
"m", type=int, default=0, nargs='?',
help="Initial tile size (M)")
argparser.add_argument(
"n", type=int, default=0, nargs='?',
help="Initial tile size (N)")
argparser.add_argument(
"maxm", type=int, default=160, nargs='?',
help="Max. tile size (M)")
argparser.add_argument(
"maxn", type=int, default=160, nargs='?',
help="Max. tile size (N)")
TransposeTune.main(argparser.parse_args())
| 40.426357 | 79 | 0.540556 |
f7fe4fbbf04ddc7f858ba1868ed2188887e30a06 | 316 | py | Python | __main__.py | HsOjo/QiniuSyncer | 92e21179cd97ef1b5baf294d04dbfb7ce3db7aa9 | [
"MIT"
] | null | null | null | __main__.py | HsOjo/QiniuSyncer | 92e21179cd97ef1b5baf294d04dbfb7ce3db7aa9 | [
"MIT"
] | null | null | null | __main__.py | HsOjo/QiniuSyncer | 92e21179cd97ef1b5baf294d04dbfb7ce3db7aa9 | [
"MIT"
] | null | null | null | import sys
from app import Application
from app.util import pyinstaller
from app.util.log import Log
if getattr(sys, 'frozen', False):
# is run at pyinstaller
pyinstaller.fix_encoding_in_pyinstaller()
Log.init_app()
app = Application(sys.argv)
try:
app.run()
except:
app.callback_exception()
| 17.555556 | 45 | 0.731013 |
06d770d24fe8467ecea6b1df6f01ed227bed81b5 | 24,355 | py | Python | packages/dcos-integration-test/extra/test_mesos.py | timgates42/dcos | 9d7e4d65b797d67fcd32c626f8026f28d2dece90 | [
"Apache-2.0"
] | 2,577 | 2016-04-19T09:57:39.000Z | 2022-03-17T10:34:25.000Z | packages/dcos-integration-test/extra/test_mesos.py | timgates42/dcos | 9d7e4d65b797d67fcd32c626f8026f28d2dece90 | [
"Apache-2.0"
] | 7,410 | 2016-04-19T21:19:31.000Z | 2022-01-21T20:14:21.000Z | packages/dcos-integration-test/extra/test_mesos.py | timgates42/dcos | 9d7e4d65b797d67fcd32c626f8026f28d2dece90 | [
"Apache-2.0"
] | 625 | 2016-04-19T10:09:35.000Z | 2022-03-16T10:53:45.000Z | import copy
import json
import logging
import os
import textwrap
import uuid
from typing import Any, Generator
import pytest
import retrying
import test_helpers
from dcos_test_utils import marathon, recordio
from dcos_test_utils.dcos_api import DcosApiSession
__maintainer__ = 'Gilbert88'
__contact__ = 'core-team@mesosphere.io'
# Creates and yields the initial ATTACH_CONTAINER_INPUT message, then a data message,
# then an empty data chunk to indicate end-of-stream.
def input_streamer(nested_container_id: dict) -> Generator:
encoder = recordio.Encoder(lambda s: bytes(json.dumps(s, ensure_ascii=False), "UTF-8"))
message = {
'type': 'ATTACH_CONTAINER_INPUT',
'attach_container_input': {
'type': 'CONTAINER_ID',
'container_id': nested_container_id}}
yield encoder.encode(message)
message['attach_container_input'] = {
'type': 'PROCESS_IO',
'process_io': {
'type': 'DATA',
'data': {'type': 'STDIN', 'data': 'meow'}}}
yield encoder.encode(message)
# Place an empty string to indicate EOF to the server and push
# 'None' to our queue to indicate that we are done processing input.
message['attach_container_input']['process_io']['data']['data'] = '' # type: ignore
yield encoder.encode(message)
def test_if_marathon_app_can_be_debugged(dcos_api_session: DcosApiSession) -> None:
# Launch a basic marathon app (no image), so we can debug into it!
# Cannot use deploy_and_cleanup because we must attach to a running app/task/container.
app, test_uuid = test_helpers.marathon_test_app()
app_id = 'integration-test-{}'.format(test_uuid)
with dcos_api_session.marathon.deploy_and_cleanup(app):
# Fetch the mesos master state once the task is running
master_ip = dcos_api_session.masters[0]
r = dcos_api_session.get('/state', host=master_ip, port=5050)
assert r.status_code == 200
state = r.json()
# Find the agent_id and container_id from master state
container_id = None
agent_id = None
for framework in state['frameworks']:
for task in framework['tasks']:
if app_id in task['id']:
container_id = task['statuses'][0]['container_status']['container_id']['value']
agent_id = task['slave_id']
assert container_id is not None, 'Container ID not found for instance of app_id {}'.format(app_id)
assert agent_id is not None, 'Agent ID not found for instance of app_id {}'.format(app_id)
# Find hostname and URL from agent_id
agent_hostname = None
for agent in state['slaves']:
if agent['id'] == agent_id:
agent_hostname = agent['hostname']
assert agent_hostname is not None, 'Agent hostname not found for agent_id {}'.format(agent_id)
logging.debug('Located %s with containerID %s on agent %s', app_id, container_id, agent_hostname)
def _post_agent(url: str, headers: Any, json: Any = None, data: Any = None, stream: bool = False) -> Any:
r = dcos_api_session.post(
url,
host=agent_hostname,
port=5051,
headers=headers,
json=json,
data=data,
stream=stream)
assert r.status_code == 200
return r
# Prepare nested container id data
nested_container_id = {
'value': 'debug-%s' % str(uuid.uuid4()),
'parent': {'value': '%s' % container_id}}
# Launch debug session and attach to output stream of debug container
output_headers = {
'Content-Type': 'application/json',
'Accept': 'application/recordio',
'Message-Accept': 'application/json'
}
lncs_data = {
'type': 'LAUNCH_NESTED_CONTAINER_SESSION',
'launch_nested_container_session': {
'command': {'value': 'cat'},
'container_id': nested_container_id}}
launch_output = _post_agent('/api/v1', output_headers, json=lncs_data, stream=True)
# Attach to output stream of nested container
attach_out_data = {
'type': 'ATTACH_CONTAINER_OUTPUT',
'attach_container_output': {'container_id': nested_container_id}}
attached_output = _post_agent('/api/v1', output_headers, json=attach_out_data, stream=True)
# Attach to input stream of debug container and stream a message
input_headers = {
'Content-Type': 'application/recordio',
'Message-Content-Type': 'application/json',
'Accept': 'application/json',
'Transfer-Encoding': 'chunked'
}
_post_agent('/api/v1', input_headers, data=input_streamer(nested_container_id))
# Verify the streamed output from the launch session
meowed = False
decoder = recordio.Decoder(lambda s: json.loads(s.decode("UTF-8")))
for chunk in launch_output.iter_content():
for r in decoder.decode(chunk):
if r['type'] == 'DATA':
logging.debug('Extracted data chunk: %s', r['data'])
assert r['data']['data'] == 'meow', 'Output did not match expected'
meowed = True
assert meowed, 'Read launch output without seeing meow.'
meowed = False
# Verify the message from the attached output stream
for chunk in attached_output.iter_content():
for r in decoder.decode(chunk):
if r['type'] == 'DATA':
logging.debug('Extracted data chunk: %s', r['data'])
assert r['data']['data'] == 'meow', 'Output did not match expected'
meowed = True
assert meowed, 'Read output stream without seeing meow.'
def test_files_api(dcos_api_session: DcosApiSession) -> None:
'''
This test verifies that the standard output and error of a Mesos task can be
read. We check that neither standard output nor error are empty files. Since
the default `marathon_test_app()` does not write to its standard output the
task definition is modified to output something there.
'''
app, test_uuid = test_helpers.marathon_test_app()
app['cmd'] = 'echo $DCOS_TEST_UUID && ' + app['cmd']
with dcos_api_session.marathon.deploy_and_cleanup(app):
marathon_framework_id = dcos_api_session.marathon.get('/v2/info').json()['frameworkId']
app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(app['id'])).json()['tasks'][0]
for required_sandbox_file in ('stdout', 'stderr'):
content = dcos_api_session.mesos_sandbox_file(
app_task['slaveId'], marathon_framework_id, app_task['id'], required_sandbox_file)
assert content, 'File {} should not be empty'.format(required_sandbox_file)
def test_if_ucr_app_runs_in_new_pid_namespace(dcos_api_session: DcosApiSession) -> None:
# We run a marathon app instead of a metronome job because metronome
# doesn't support running docker images with the UCR. We need this
# functionality in order to test that the pid namespace isolator
# is functioning correctly.
app, test_uuid = test_helpers.marathon_test_app(container_type=marathon.Container.MESOS)
ps_output_file = 'ps_output'
app['cmd'] = 'ps ax -o pid= > {}; sleep 1000'.format(ps_output_file)
with dcos_api_session.marathon.deploy_and_cleanup(app, check_health=False):
marathon_framework_id = dcos_api_session.marathon.get('/v2/info').json()['frameworkId']
app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(app['id'])).json()['tasks'][0]
# There is a short delay between the `app_task` starting and it writing
# its output to the `pd_output_file`. Because of this, we wait up to 10
# seconds for this file to appear before throwing an exception.
@retrying.retry(wait_fixed=1000, stop_max_delay=10000)
def get_ps_output() -> Any:
return dcos_api_session.mesos_sandbox_file(
app_task['slaveId'], marathon_framework_id, app_task['id'], ps_output_file)
assert len(get_ps_output().split()) <= 4, 'UCR app has more than 4 processes running in its pid namespace'
def test_memory_profiling(dcos_api_session: DcosApiSession) -> None:
# Test that we can fetch raw memory profiles
master_ip = dcos_api_session.masters[0]
r0 = dcos_api_session.get(
'/memory-profiler/start', host=master_ip, port=5050)
assert r0.status_code == 200, r0.text
r1 = dcos_api_session.get(
'/memory-profiler/stop', host=master_ip, port=5050)
assert r1.status_code == 200, r1.text
r2 = dcos_api_session.get(
'/memory-profiler/download/raw', host=master_ip, port=5050)
assert r2.status_code == 200, r2.text
def test_containerizer_debug_endpoint(dcos_api_session: DcosApiSession) -> None:
# Test that we can poll `/containerizer/debug` endpoint exposed by the agent.
agent = dcos_api_session.slaves[0]
r = dcos_api_session.get('/containerizer/debug', host=agent, port=5051)
assert r.status_code == 200
assert r.json() == {'pending': []}
def test_blkio_stats(dcos_api_session: DcosApiSession) -> None:
expanded_config = test_helpers.get_expanded_config()
if expanded_config['provider'] == 'azure' or expanded_config.get('platform') == 'azure':
pytest.skip('See: https://jira.mesosphere.com/browse/DCOS-49023')
# Launch a Marathon application to do some disk writes, and then verify that
# the cgroups blkio statistics of the application can be correctly retrieved.
app, test_uuid = test_helpers.marathon_test_app(container_type=marathon.Container.MESOS)
app_id = 'integration-test-{}'.format(test_uuid)
# The application will generate a 10k file with 10 disk writes.
#
# TODO(qianzhang): In some old platforms (CentOS 6 and Ubuntu 14),
# the first disk write of a blkio cgroup will always be missed in
# the blkio throttling statistics, so here we run two `dd` commands,
# the first one which does only one disk write will be missed on
# those platforms, and the second one will be recorded in the blkio
# throttling statistics. When we drop the CentOS 6 and Ubuntu 14
# support in future, we should remove the first `dd` command.
marker_file = 'marker'
app['cmd'] = ('dd if=/dev/zero of=file bs=1024 count=1 oflag=dsync && '
'dd if=/dev/zero of=file bs=1024 count=10 oflag=dsync && '
'echo -n done > {} && sleep 1000').format(marker_file)
with dcos_api_session.marathon.deploy_and_cleanup(app, check_health=False):
marathon_framework_id = dcos_api_session.marathon.get('/v2/info').json()['frameworkId']
app_task = dcos_api_session.marathon.get('/v2/apps/{}/tasks'.format(app['id'])).json()['tasks'][0]
# Wait up to 10 seconds for the marker file to appear which
# indicates the disk writes via `dd` command are done.
@retrying.retry(wait_fixed=1000, stop_max_delay=10000)
def get_marker_file_content() -> Any:
return dcos_api_session.mesos_sandbox_file(
app_task['slaveId'], marathon_framework_id, app_task['id'], marker_file)
assert get_marker_file_content() == 'done'
# Fetch the Mesos master state
master_ip = dcos_api_session.masters[0]
r = dcos_api_session.get('/state', host=master_ip, port=5050)
assert r.status_code == 200
state = r.json()
# Find the agent_id from master state
agent_id = None
for framework in state['frameworks']:
for task in framework['tasks']:
if app_id in task['id']:
agent_id = task['slave_id']
assert agent_id is not None, 'Agent ID not found for instance of app_id {}'.format(app_id)
# Find hostname from agent_id
agent_hostname = None
for agent in state['slaves']:
if agent['id'] == agent_id:
agent_hostname = agent['hostname']
assert agent_hostname is not None, 'Agent hostname not found for agent_id {}'.format(agent_id)
logging.debug('Located %s on agent %s', app_id, agent_hostname)
# Fetch the Mesos agent statistics
r = dcos_api_session.get('/monitor/statistics', host=agent_hostname, port=5051)
assert r.status_code == 200
stats = r.json()
total_io_serviced = None
total_io_service_bytes = None
for stat in stats:
# Find the statistic for the Marathon application that we deployed. Since what that
# Marathon application launched is a Mesos command task (i.e., using Mesos built-in
# command executor), the executor ID will be same as the task ID, so if we find the
# `app_id` in an executor ID of a statistic, that must be the statistic entry
# corresponding to the application that we deployed.
if app_id in stat['executor_id']:
# We only care about the blkio throttle statistics but not the blkio cfq statistics,
# because in the environment where the disk IO scheduler is not `cfq`, all the cfq
# statistics may be 0.
throttle_stats = stat['statistics']['blkio_statistics']['throttling']
for throttle_stat in throttle_stats:
if 'device' not in throttle_stat:
total_io_serviced = throttle_stat['io_serviced'][0]['value']
total_io_service_bytes = throttle_stat['io_service_bytes'][0]['value']
assert total_io_serviced is not None, ('Total blkio throttling IO serviced not found '
'for app_id {}'.format(app_id))
assert total_io_service_bytes is not None, ('Total blkio throttling IO service bytes '
'not found for app_id {}'.format(app_id))
# We expect the statistics retrieved from Mesos agent are equal or greater than what we
# did with the `dd` command (i.e., 10 and 10240), because:
# 1. Besides the disk writes done by the `dd` command, the statistics may also include
# some disk reads, e.g., to load the necessary executable binary and libraries.
# 2. In the environment where RAID is enabled, there may be multiple disk writes to
# different disks for a single `dd` write.
assert int(total_io_serviced) >= 10, ('Total blkio throttling IO serviced for app_id {} '
'are less than 10'.format(app_id))
assert int(total_io_service_bytes) >= 10240, ('Total blkio throttling IO service bytes for '
'app_id {} are less than 10240'.format(app_id))
def get_region_zone(domain: dict) -> tuple:
assert isinstance(domain, dict), 'input must be dict'
assert 'fault_domain' in domain, 'fault_domain is missing. {}'.format(domain)
# check region set correctly
assert 'region' in domain['fault_domain'], 'missing region. {}'.format(domain)
assert 'name' in domain['fault_domain']['region'], 'missing region. {}'.format(domain)
region = domain['fault_domain']['region']['name']
# check zone set correctly
assert 'zone' in domain['fault_domain'], 'missing zone. {}'.format(domain)
assert 'name' in domain['fault_domain']['zone'], 'missing zone. {}'.format(domain)
zone = domain['fault_domain']['zone']['name']
return region, zone
def test_fault_domain(dcos_api_session: DcosApiSession) -> None:
expanded_config = test_helpers.get_expanded_config()
if expanded_config['fault_domain_enabled'] == 'false':
pytest.skip('fault domain is not set')
master_ip = dcos_api_session.masters[0]
r = dcos_api_session.get('/state', host=master_ip, port=5050)
assert r.status_code == 200
state = r.json()
# check flags and get the domain parameters mesos master was started with.
assert 'flags' in state, 'missing flags in state json'
assert 'domain' in state['flags'], 'missing domain in state json flags'
cli_flag = json.loads(state['flags']['domain'])
expected_region, expected_zone = get_region_zone(cli_flag)
# check master top level keys
assert 'leader_info' in state, 'leader_info is missing in state json'
assert 'domain' in state['leader_info'], 'domain is missing in state json'
leader_region, leader_zone = get_region_zone(state['leader_info']['domain'])
assert leader_region == expected_region, 'expect region {}. Got {}'.format(expected_region, leader_region)
assert leader_zone == expected_zone, 'expect zone {}. Got {}'.format(expected_zone, leader_zone)
for agent in state['slaves']:
assert 'domain' in agent, 'missing domain field for agent. {}'.format(agent)
agent_region, agent_zone = get_region_zone(agent['domain'])
assert agent_region == expected_region, 'expect region {}. Got {}'.format(expected_region, agent_region)
# agent_zone might be different on agents, so we just make sure it's a sane value
assert agent_zone, 'agent_zone cannot be empty'
@pytest.fixture
def reserved_disk(dcos_api_session: DcosApiSession) -> Generator:
"""
Set up an agent with one disk in a role.
Reserve a chunk of `disk` resources on an agent for a role, and the
remaining resources to another role. With that a framework in the first
role will only be offered `disk` resources.
"""
# Setup.
def principal() -> str:
is_enterprise = os.getenv('DCOS_ENTERPRISE', 'false').lower() == 'true'
if is_enterprise:
uid = dcos_api_session.auth_user.uid # type: str
return uid
else:
return 'reserved_disk_fixture_principal'
dcos_api_session.principal = principal()
# Keep track of all reservations we created so we can clean them up on
# teardown or on error paths.
reserved_resources = []
try:
# Get the ID of a private agent. We some assume that resources on that
# agent are unreserved.
r = dcos_api_session.get('/mesos/slaves')
assert r.status_code == 200, r.text
response = json.loads(r.text)
slaves = [
slave['id'] for slave in response['slaves']
if 'public_ip' not in slave['attributes']]
assert slaves, 'Could not find any private agents'
slave_id = slaves[0]
# Create a unique role to reserve the disk to. The test framework should
# register in this role.
dcos_api_session.role = 'disk-' + uuid.uuid4().hex
resources1 = {
'agent_id': {'value': slave_id},
'resources': [
{
'type': 'SCALAR',
'name': 'disk',
'reservations': [
{
'type': 'DYNAMIC',
'role': dcos_api_session.role,
'principal': dcos_api_session.principal,
}
],
'scalar': {'value': 32}
}
]
}
request = {'type': 'RESERVE_RESOURCES', 'reserve_resources': resources1}
r = dcos_api_session.post('/mesos/api/v1', json=request)
assert r.status_code == 202, r.text
reserved_resources.append(resources1)
# Reserve the remaining agent resources for another role. We let the Mesos
# master perform the calculation of the unreserved resources on the agent
# which requires another query.
r = dcos_api_session.get('/mesos/slaves')
assert r.status_code == 200, r.text
response = json.loads(r.text)
unreserved = [
slave['unreserved_resources_full'] for slave in response['slaves']
if slave['id'] == slave_id]
assert len(unreserved) == 1
unreserved = unreserved[0]
another_role = uuid.uuid4().hex
for resource in unreserved:
resource['reservations'] = [
{
'type': 'DYNAMIC',
'role': another_role,
'principal': dcos_api_session.principal,
}
]
resource.pop('role')
resources2 = copy.deepcopy(resources1)
resources2['resources'] = unreserved
request = {'type': 'RESERVE_RESOURCES', 'reserve_resources': resources2}
r = dcos_api_session.post('/mesos/api/v1', json=request)
assert r.status_code == 202, r.text
reserved_resources.append(resources2)
yield dcos_api_session
finally:
# Teardown.
#
# Remove all reservations this fixture has created in reverse order.
for resources in reversed(reserved_resources):
request = {
'type': 'UNRESERVE_RESOURCES',
'unreserve_resources': resources}
r = dcos_api_session.post('/mesos/api/v1', json=request)
assert r.status_code == 202, r.text
def test_executor_uses_domain_socket(dcos_api_session: DcosApiSession) -> None:
"""
This test validates that by default executors connect with the agent over domain sockets.
The test launches a Marathon app with a health check which validates that
the `mesos-executor` process of the task is connected to the agent socket.
We do not validate that any actual data is passed over the socket
connection. Once the app is healthy the test has succeeded.
"""
expanded_config = test_helpers.get_expanded_config()
if expanded_config.get('security') == 'strict':
pytest.skip('Cannot detect domain sockets with EE strict mode enabled')
task_id = 'domain-socket-{}'.format(uuid.uuid4())
check = textwrap.dedent('''\
#!/bin/bash
set -o nounset -o pipefail
set -ex
export PATH=/usr/sbin/:$PATH
MESOS_EXECUTORS_SOCK="/var/run/mesos/mesos-executors.sock"
# In the container's PID namespace the containerizer will have pid=1. Since the
# first process it launches is the executor, the executor has pid=2.
EXECUTOR_PID=2
grep -q '^mesos-executor' /proc/$EXECUTOR_PID/cmdline || exit 2
declare -i socket_connections
socket_connections=0
for peer in $(ss -xp | grep "pid=$EXECUTOR_PID" | awk '{print $8}'); do
# We cannot see the mesos-agent process, but can make sure
# the executor's socket is related to the agent socket.
if ss -xp | grep "$peer" | grep -q "$MESOS_EXECUTORS_SOCK"; then
((socket_connections+=1))
fi
done
if [ $socket_connections -ne 2 ]; then
echo "expected 2 socket connections, got $socket_connections"
exit 1
fi''')
app = {
"id": task_id,
"cpus": 0.1,
"mem": 32,
"disk": 32,
"cmd": "sleep 10000",
"container": {
"type": "MESOS",
"volumes": []
},
"instances": 1,
"healthChecks": [{
"gracePeriodSeconds": 5,
"intervalSeconds": 60,
"maxConsecutiveFailures": 1,
"timeoutSeconds": 20,
"delaySeconds": 1,
"protocol": "COMMAND",
"command": {
"value": check
}
}],
}
with dcos_api_session.marathon.deploy_and_cleanup(app,
check_health=False,
timeout=60):
# Retry collecting the health check result since its availability might be delayed.
@retrying.retry(wait_fixed=1000, stop_max_delay=10000)
def assert_app_is_healthy() -> None:
assert dcos_api_session.marathon.check_app_instances(
app_id=task_id,
app_instances=1,
check_health=True,
ignore_failed_tasks=False)
assert_app_is_healthy()
| 43.882883 | 114 | 0.627961 |
068c50c300f3e0a0ecf4595aa4e36f2dd8cea7d5 | 5,860 | py | Python | Python3/Tornado/apps/pg/PG_Collection/src/api/handlers/handler_base.py | youngqqcn/QBlockChainNotes | 85122049024dc5555705bf016312491a51966621 | [
"MIT"
] | 24 | 2018-11-01T03:36:43.000Z | 2022-03-28T08:20:30.000Z | Python3/Tornado/apps/pg/PG_Deposit/src/api/handlers/handler_base.py | songning4/QBlockChainNotes | d65ede073f5a20f728f41cc6850409693820cdb1 | [
"MIT"
] | 57 | 2019-12-04T08:26:47.000Z | 2022-03-08T07:35:15.000Z | Python3/Tornado/apps/pg/PG_Collection/src/api/handlers/handler_base.py | youngqqcn/QBlockChainNotes | 85122049024dc5555705bf016312491a51966621 | [
"MIT"
] | 11 | 2019-01-04T08:41:57.000Z | 2022-03-16T03:51:36.000Z | import redis
import tornado.web
import tornado.httpserver
import time
import json
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from tornado.httpclient import HTTPError
import json
import time
from ed25519 import SigningKey, VerifyingKey
from src.config.constant import REDIS_HOST, REDIS_PORT, REDIS_API_KEY_DB_NAME, MYSQL_CONNECT_INFO
from src.model.model import Project
class BaseHandler(tornado.web.RequestHandler):
AllowHeaders = set(['X-Requested-With', 'PG_API_KEY', 'PG_API_TIMESTAMP', 'PG_API_SIGNATURE'])
def __init__(self, application, request, **kwargs):
super().__init__(application=application, request=request, **kwargs)
# def prepare(self):
# pass
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Methods", "GET, POST")
self.set_header("Access-Control-Allow-Headers", ",".join(hd for hd in BaseHandler.AllowHeaders))
def options(self):
self.set_status(204)
self.finish()
@staticmethod
def success_ret_with_data(data):
t = int(time.time() * 1000)
ret = {
"err_code": 0,
"err_msg": None,
"timestamp": t,
"data" : data
}
return ret
@staticmethod
def error_ret_with_data(err_code : int, err_msg : str, data):
t = int(time.time() * 1000)
ret = {
"err_code": err_code ,
"err_msg" : err_msg,
"timestamp": t,
"data" : data
}
return ret
@classmethod
def loading_api_key_to_redis(cls):
engine = create_engine(MYSQL_CONNECT_INFO,
max_overflow=0,
pool_size=5)
SessionCls = sessionmaker(bind=engine,
autoflush=False, # 关于 autoflush https://www.jianshu.com/p/b219c3dd4d1e
autocommit=True # 自动提交
)
session = SessionCls()
rds = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_API_KEY_DB_NAME, decode_responses=True)
projects = session.query(Project).all()
if not (projects is not None and len(projects) > 0):
print('not found any api_key from database to loading into redis')
else:
print(f'loading {len(projects)} api_key into redis ')
for pro in projects:
assert isinstance(pro, Project), 'pro is not Project instance'
pro_id = pro.pro_id
api_key = pro.api_key
verify_key = pro.client_verify_key
rds.set(api_key, verify_key + ',' + str(pro_id))
print(f'successfully loaded {len(projects)} api_key into redis ')
pass
def get_argument_from_json(self, str):
str2dict = json.loads(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def get_verify_key(self, api_key : str):
rds = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_API_KEY_DB_NAME, decode_responses=True)
value = rds.get(name=api_key)
return value
def sign_msg(sign_key : str, msg : bytes) -> any:
sk = SigningKey(sk_s=sign_key.encode('latin1'), prefix='', encoding='hex')
sig = sk.sign(msg=msg, prefix='', encoding='hex')
return sig
def verify_sig(verify_key : str, sig : str, msg : bytes) -> bool:
vk = VerifyingKey(vk_s=verify_key, prefix='', encoding='hex')
try:
vk.verify(sig=sig, msg=msg, prefix='', encoding='hex')
except Exception as e:
return False
return True
def apiauth(fun):
"""
对请求请求进行认证, 错误信息不应暴露细节
:param fun:
:return:
"""
def wrapper(self, *args, **kwargs):
try:
if not ('PG_API_SIGNATURE' in self.request.headers
and 'PG_API_TIMESTAMP' in self.request.headers
and 'PG_API_KEY' in self.request.headers):
raise Exception()
sig = self.request.headers['PG_API_SIGNATURE']
timestamp = self.request.headers['PG_API_TIMESTAMP']
api_key = self.request.headers['PG_API_KEY']
if not (len(sig) == 128 and str(sig).isalnum() and str(timestamp).isnumeric()
and len(api_key) == 64 and str(api_key).isalnum()):
raise Exception()
# 使用绝对值,
if abs(int(time.time() * 1000) - int(timestamp)) > 2 * 60 * 1000:
raise Exception('timestamp expired')
value = self.get_verify_key(api_key)
if value is None:
raise Exception()
items = value.split(',')
verify_key = items[0]
pro_id = items[1]
msg = json.loads(self.request.body)
# 判断 pro_id 和 api_key 是否对应
if not ('pro_id' in msg and msg['pro_id'] == int(pro_id)):
raise Exception()
strdata = json.dumps(msg, separators=(',', ':'), sort_keys=True)
api_name = self.request.uri[self.request.uri.rfind('/') + 1:]
param = '|'.join([timestamp, api_name, strdata])
print(param)
msg = param.encode('utf8')
if not verify_sig(verify_key=verify_key, sig=sig, msg=msg):
raise Exception()
return fun(self, *args, **kwargs)
except Exception as e:
self.write(self.error_ret_with_data(err_code=403, err_msg=str(f'invalid auth {e}'), data=None))
return
return wrapper
if __name__ == '__main__':
pass
| 32.021858 | 109 | 0.568089 |
cb44490b58c7f876ed265f982392278b292f2cec | 2,176 | py | Python | src/models/fmow.py | YugeTen/fish | 333efa24572d99da0a4107ab9cc4af93a915d2a9 | [
"MIT"
] | 71 | 2021-07-12T12:43:57.000Z | 2022-03-29T04:40:22.000Z | src/models/fmow.py | YugeTen/fish | 333efa24572d99da0a4107ab9cc4af93a915d2a9 | [
"MIT"
] | 2 | 2021-07-15T06:48:33.000Z | 2022-02-23T13:34:15.000Z | src/models/fmow.py | YugeTen/fish | 333efa24572d99da0a4107ab9cc4af93a915d2a9 | [
"MIT"
] | 6 | 2021-07-15T11:13:14.000Z | 2022-03-21T16:34:51.000Z | import os
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.models import densenet121
from wilds.common.data_loaders import get_eval_loader
from wilds.datasets.fmow_dataset import FMoWDataset
from .datasets import FMoW_Batched_Dataset
IMG_HEIGHT = 224
NUM_CLASSES = 62
class Model(nn.Module):
def __init__(self, args, weights):
super(Model, self).__init__()
self.num_classes = NUM_CLASSES
self.enc = densenet121(pretrained=True).features
self.classifier = nn.Linear(1024, self.num_classes)
if weights is not None:
self.load_state_dict(deepcopy(weights))
def reset_weights(self, weights):
self.load_state_dict(deepcopy(weights))
@staticmethod
def getDataLoaders(args, device):
dataset = FMoWDataset(root_dir=os.path.join(args.data_dir, 'wilds'), download=True)
# get all train data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
train_sets = FMoW_Batched_Dataset(dataset, 'train', args.batch_size, transform)
datasets = {}
for split in dataset.split_dict:
if split != 'train':
datasets[split] = dataset.get_subset(split, transform=transform)
# get the loaders
kwargs = {'num_workers': 4, 'pin_memory': True, 'drop_last': False} \
if device.type == "cuda" else {}
train_loaders = DataLoader(train_sets, batch_size=args.batch_size, shuffle=True, **kwargs)
tv_loaders = {}
for split, dataset in datasets.items():
tv_loaders[split] = get_eval_loader('standard', dataset, batch_size=256)
return train_loaders, tv_loaders
def forward(self, x):
features = self.enc(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
| 35.672131 | 98 | 0.654412 |
8163ac516c0c652e794d28faf6be48a577853ccc | 1,124 | py | Python | 06_P19.py | wiphoo/computer_programing_101 | 59013d774f6ec6e019829d73ce163cf5766c6a48 | [
"MIT"
] | null | null | null | 06_P19.py | wiphoo/computer_programing_101 | 59013d774f6ec6e019829d73ce163cf5766c6a48 | [
"MIT"
] | null | null | null | 06_P19.py | wiphoo/computer_programing_101 | 59013d774f6ec6e019829d73ce163cf5766c6a48 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
inputFileName = input()
# read input file
with open( inputFileName, 'r' ) as inputFile:
inputList = [ line.split() for line in inputFile.readlines() ]
#print('inputList = {}'.format( inputList ) )
fruitNameToPersonsListDict = {}
for fruitName, person in inputList:
personList = fruitNameToPersonsListDict.get( fruitName, None )
if personList == None:
fruitNameToPersonsListDict[fruitName] = [ person ]
else:
fruitNameToPersonsListDict[fruitName].append( person )
maxFaveriteFruit = -1
maxFaveriteFruitName = None
outputStr = '['
for index, ( fruitName, personsList ) in enumerate( fruitNameToPersonsListDict.items() ):
if maxFaveriteFruit < len( personsList ):
maxFaveriteFruit = len( personsList )
maxFaveriteFruitName = fruitName
outputStr += '[\'{}\', {}]'.format( fruitName, personsList )
if index < len( fruitNameToPersonsListDict ) - 1:
outputStr += ', '
outputStr += ']'
print( ' fruitNameToPersonsListDict = {}'.format( fruitNameToPersonsListDict ) )
print( '{}'.format( outputStr ) )
print( 'The most favorite fruit is a {}'.format( maxFaveriteFruitName ) )
| 30.378378 | 89 | 0.717972 |
34eae15425bebf2fe815704d4ad028e14e88a637 | 2,996 | py | Python | lib/rucio/tests/test_daemons.py | llwang00/rucio | f49c5c9599e147823110dc6da22a0bc33a881f8e | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_daemons.py | llwang00/rucio | f49c5c9599e147823110dc6da22a0bc33a881f8e | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_daemons.py | llwang00/rucio | f49c5c9599e147823110dc6da22a0bc33a881f8e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
#
# PY3K COMPATIBLE
import sys
import pytest
import rucio.db.sqla.util
from rucio.common import exception
from rucio.daemons.abacus import account, collection_replica, rse
from rucio.daemons.atropos import atropos
from rucio.daemons.automatix import automatix
from rucio.daemons.badreplicas import minos, minos_temporary_expiration, necromancer
from rucio.daemons.c3po import c3po
from rucio.daemons.cache import consumer
from rucio.daemons.conveyor import finisher, fts_throttler, poller, poller_latest, receiver, stager, submitter, throttler
from rucio.daemons.follower import follower
from rucio.daemons.hermes import hermes, hermes2
from rucio.daemons.judge import cleaner, evaluator, injector, repairer
from rucio.daemons.oauthmanager import oauthmanager
from rucio.daemons.reaper import dark_reaper, light_reaper, reaper, reaper2
from rucio.daemons.replicarecoverer import suspicious_replica_recoverer
from rucio.daemons.sonar.distribution import distribution_daemon
from rucio.daemons.tracer import kronos
from rucio.daemons.transmogrifier import transmogrifier
from rucio.daemons.undertaker import undertaker
if sys.version_info >= (3, 3):
from unittest import mock
else:
import mock
DAEMONS = [
account,
collection_replica,
rse,
atropos,
automatix,
minos,
minos_temporary_expiration,
necromancer,
c3po,
consumer,
finisher,
fts_throttler,
poller,
poller_latest,
receiver,
stager,
submitter,
throttler,
follower,
hermes,
hermes2,
cleaner,
evaluator,
injector,
repairer,
oauthmanager,
dark_reaper,
light_reaper,
reaper,
reaper2,
suspicious_replica_recoverer,
distribution_daemon,
# sonar_v3_dev_daemon, -- lib/rucio/common/config.py:55: NoSectionError: No section: 'sonar'
kronos,
transmogrifier,
undertaker,
]
@pytest.mark.parametrize('daemon', DAEMONS)
@mock.patch('rucio.db.sqla.util.is_old_db')
def test_fail_on_old_database(mock_is_old_db, daemon):
""" DAEMON: Test daemon failure on old database """
mock_is_old_db.return_value = True
assert rucio.db.sqla.util.is_old_db() is True
with pytest.raises(exception.DatabaseException, match='Database was not updated, daemon won\'t start'):
daemon.run()
assert mock_is_old_db.call_count > 1
| 29.372549 | 121 | 0.757343 |
a65907ca4c57136bb93eecae920b809c4386c78f | 5,289 | py | Python | visual_cloze.py | linbojun/Deep-Learning-Final-Project | ce9233eac73b13338d19cd03405847d4e58e37d8 | [
"Apache-2.0"
] | null | null | null | visual_cloze.py | linbojun/Deep-Learning-Final-Project | ce9233eac73b13338d19cd03405847d4e58e37d8 | [
"Apache-2.0"
] | null | null | null | visual_cloze.py | linbojun/Deep-Learning-Final-Project | ce9233eac73b13338d19cd03405847d4e58e37d8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorflow as tf
from preprocessrecipeqa import *
class Model(tf.keras.Model):
def __init__(self, l_embed):
super(Model, self).__init__()
self.image_conv1 = tf.keras.layers.Conv2D(filters = 4, kernel_size = 3, strides=(2, 2), padding='valid')
self.image_conv2 = tf.keras.layers.Conv2D(filters = 8, kernel_size = 3, strides=(2, 2), padding='valid')
self.image_conv3 = tf.keras.layers.Conv2D(filters = 16, kernel_size = 3, strides=(2, 2), padding='valid')
self.image_dense = tf.keras.layers.Dense(units = 100, activation = "relu")
self.word_embedding = l_embed
self.text_dense1 = tf.keras.layers.Dense(units = 100, activation = "relu")
self.text_dense2 = tf.keras.layers.Dense(units = 100, activation = "relu")
self.qdense = tf.keras.layers.Dense(units = 100, activation = "relu")
self.text_image_embedding1 = tf.keras.layers.Dense(units = 100, activation = "relu")
self.text_image_embedding2 = tf.keras.layers.Dense(units = 100, activation = "relu")
self.class_dense1 = tf.keras.layers.Dense(units = 100, activation = "relu")
self.class_dense2 = tf.keras.layers.Dense(units = 4)
self.optimizer = tf.keras.optimizers.Adam(1e-3)
def call(self, Xs):
textlist = []
choicelist = []
qlist = []
for recipe in Xs:
text = []
for step in recipe['context']:
text += step['body']
text = tf.reduce_mean(self.word_embedding(tf.convert_to_tensor(text)),axis=0)
textlist.append(text)
choicelist.append(recipe['choice_list'])
qlist.append(recipe['question'])
textlist = tf.convert_to_tensor(textlist)
qlist = tf.convert_to_tensor(qlist)
q0 = self.image_conv3(self.image_conv2(self.image_conv1(qlist[:,0])))
q0 = tf.reshape(q0,(q0.shape[0],-1))
q1 = self.image_conv3(self.image_conv2(self.image_conv1(qlist[:,1])))
q1 = tf.reshape(q1,(q1.shape[0],-1))
q2 = self.image_conv3(self.image_conv2(self.image_conv1(qlist[:,2])))
q2 = tf.reshape(q2,(q2.shape[0],-1))
qout = self.qdense(tf.concat([q0,q1,q2],axis=-1))
choice_image = tf.convert_to_tensor(choicelist)
choice_token = self.image_conv1(choice_image[:,0])
choice_token = self.image_conv2(choice_token)
choice_token = self.image_conv3(choice_token)
choice0_embedding = self.image_dense(tf.reshape(choice_token,(choice_token.shape[0],-1)))
choice_token = self.image_conv1(choice_image[:,1])
choice_token = self.image_conv2(choice_token)
choice_token = self.image_conv3(choice_token)
choice1_embedding = self.image_dense(tf.reshape(choice_token,(choice_token.shape[0],-1)))
choice_token = self.image_conv1(choice_image[:,2])
choice_token = self.image_conv2(choice_token)
choice_token = self.image_conv3(choice_token)
choice2_embedding = self.image_dense(tf.reshape(choice_token,(choice_token.shape[0],-1)))
choice_token = self.image_conv1(choice_image[:,3])
choice_token = self.image_conv2(choice_token)
choice_token = self.image_conv3(choice_token)
choice3_embedding = self.image_dense(tf.reshape(choice_token,(choice_token.shape[0],-1)))
#query word embedding
text_embedding = self.text_dense2(self.text_dense1(textlist))
#create image_word embedding for choice 0
token = tf.concat([choice0_embedding, text_embedding, qout],axis=-1)
token = self.text_image_embedding1(token)
choice0_embedding = self.text_image_embedding2(token)
#create image_word embedding for choice 1
token = tf.concat([choice1_embedding, text_embedding, qout],axis=-1)
token = self.text_image_embedding1(token)
choice1_embedding = self.text_image_embedding2(token)
#create image_word embedding for choice 2
token = tf.concat([choice2_embedding, text_embedding, qout],axis=-1)
token = self.text_image_embedding1(token)
choice2_embedding = self.text_image_embedding2(token)
#create image_word embedding for choice 3
token = tf.concat([choice3_embedding, text_embedding, qout],axis=-1)
token = self.text_image_embedding1(token)
choice3_embedding = self.text_image_embedding2(token)
token = tf.concat([choice0_embedding, choice1_embedding, choice2_embedding, choice3_embedding],axis=-1)
token = self.class_dense1(token)
logit = self.class_dense2(token)
return logit
def loss(self, logits, labels):
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels, logits))
batch_size = 50
(train_iter1, train_iter2, train_iter3, train_iter4), (test_iter1, test_iter2, test_iter3, test_iter4), (val_iter1, val_iter2, val_iter3, val_iter4), embedding_index, word_index = preprocess(batch_size)
l_embed = get_embedding_layer(word_index, embedding_index)
def train(model, iter):
for Xs, Ys in iter:
with tf.GradientTape() as tape:
logits = model(Xs)
loss = model.loss(logits, Ys)
print(loss)
gradients = tape.gradient(loss, model.trainable_variables)
model.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
def test(model, iter):
n = 0
m = 0
for Xs, Ys in iter:
n += len(Xs)
probs = model(Xs)
m += sum(np.argmax(probs,-1)==Ys)
print(m/n)
return m/n
model = Model(l_embed)
train(model, train_iter2)
test(model, test_iter2)
| 40.684615 | 202 | 0.715825 |
50d91de586950bcb5f025bef188fbecc24f754ec | 24,247 | py | Python | lib/modes/mode_starcraft.py | okonomichiyaki/parrot.py | f8a0cee117be3ca80dd16ecb475dee02a2602ce4 | [
"MIT"
] | null | null | null | lib/modes/mode_starcraft.py | okonomichiyaki/parrot.py | f8a0cee117be3ca80dd16ecb475dee02a2602ce4 | [
"MIT"
] | null | null | null | lib/modes/mode_starcraft.py | okonomichiyaki/parrot.py | f8a0cee117be3ca80dd16ecb475dee02a2602ce4 | [
"MIT"
] | null | null | null | from lib.detection_strategies import *
import threading
import numpy as np
import pyautogui
from lib.input_manager import InputManager
from time import sleep
from subprocess import call
from lib.system_toggles import toggle_eyetracker, turn_on_sound, mute_sound, toggle_speechrec
from lib.pattern_detector import PatternDetector
from config.config import *
import os
import pythoncom
from lib.overlay_manipulation import update_overlay_image
from lib.grammar.chat_grammar import *
from lib.grammar.replay_grammar import *
class StarcraftMode:
def __init__(self, modeSwitcher, is_testing=False):
self.inputManager = InputManager(is_testing=is_testing)
if( SPEECHREC_ENABLED == True ):
self.grammar = Grammar("Starcraft")
self.chatCommandRule = ChatCommandRule()
self.chatCommandRule.set_callback( self.toggle_speech )
self.replayCommandRule = ReplaySpeechCommand()
self.toggleEyetracker = ToggleEyetrackerCommand()
self.quitReplayCommand = QuitReplayCommand()
self.quitReplayCommand.set_callback( self.toggle_speech )
self.grammar.add_rule( self.chatCommandRule )
self.grammar.add_rule( self.replayCommandRule )
self.grammar.add_rule( self.toggleEyetracker )
self.grammar.add_rule( self.quitReplayCommand )
self.mode = "regular"
self.modeSwitcher = modeSwitcher
self.detector = PatternDetector({
'select': {
'strategy': 'continuous',
'sound': 'sibilant_s',
'percentage': 95,
'intensity': 1400,
'lowest_percentage': 50,
'lowest_intensity': 1000,
'throttle': 0
},
'rapidclick': {
'strategy': 'continuous_power',
'sound': 'thrill_thr',
'percentage': 80,
'lowest_percentage': 40,
'power': 20000,
'lowest_power': 15000,
'throttle': 0
},
'click': {
'strategy': 'frequency_threshold',
'sound': 'click_alveolar',
'percentage': 90,
'above_frequency': 58,
'power': 20000,
'throttle': 0.2
},
'movement': {
'strategy': 'rapid_power',
'sound': 'sound_whistle',
'percentage': 80,
'power': 23000,
'throttle': 0.3
},
'secondary_movement': {
'strategy': 'rapid_power',
'sound': 'sound_finger_snap',
'percentage': 65,
'power': 100000,
'throttle': 0.3
},
'control': {
'strategy': 'rapid_power',
'sound': 'vowel_oh',
'percentage': 80,
'ratio': 0.01,
'power': 20000,
'throttle': 0.2
},
'secondary_control': {
'strategy': 'combined_power',
'sound': 'sibilant_z',
'secondary_sound': 'fricative_v',
'percentage': 90,
'power': 20000,
'ratio': 0,
'throttle': 0.2
},
'shift': {
'strategy': 'rapid_power',
'sound': 'sibilant_sh',
'percentage': 90,
'power': 20000,
'throttle': 0.4
},
'alt': {
'strategy': 'rapid_power',
'sound': 'fricative_v',
'percentage': 95,
'power': 20000,
'throttle': 0.4
},
'camera': {
'strategy': 'combined_power',
'sound': 'vowel_y',
'secondary_sound': 'vowel_u',
'ratio': 4,
'percentage': 85,
'power': 15000,
'throttle': 0.25
},
'camera_secondary': {
'strategy': 'combined_power',
'sound': 'vowel_eu',
'secondary_sound': 'vowel_y',
'percentage': 60,
'ratio': 0,
'power': 20000,
'throttle': 0.18
},
'first_ability': {
'strategy': 'combined',
'sound': 'vowel_ow',
'secondary_sound': 'vowel_u',
'ratio': 0.3,
'percentage': 90,
'intensity': 1000,
'throttle': 0.3
},
'second_ability': {
'strategy': 'rapid_power',
'sound': 'vowel_ae',
'percentage': 90,
'power': 25000,
'throttle': 0
},
'third_ability': {
'strategy': 'rapid_power',
'sound': 'approximant_r',
'percentage': 95,
'power': 80000,
'throttle': 0
},
'r': {
'strategy': 'rapid_power',
'sound': 'fricative_f',
'percentage': 90,
'power': 20000,
'throttle': 0.4
},
'grid_ability': {
'strategy': 'combined_continuous',
'sound': 'vowel_ah',
'secondary_sound': 'vowel_aa',
'ratio': 0,
'percentage': 90,
'intensity': 1500,
'lowest_percentage': 12,
'lowest_intensity': 1000
},
'numbers': {
'strategy': 'combined_power',
'sound': 'vowel_iy',
'secondary_sound': 'vowel_ih',
'ratio': 4,
'percentage': 80,
'power': 25000,
'throttle': 0.18
},
'numbers_secondary': {
'strategy': 'combined_power',
'sound': 'vowel_y',
'secondary_sound': 'vowel_ih',
'percentage': 60,
'ratio': 0,
'power': 20000,
'throttle': 0.18
},
'menu': {
'strategy': 'rapid_power',
'sound': 'sound_call_bell',
'percentage': 80,
'power': 100000,
'throttle': 0.5
}
})
self.KEY_DELAY_THROTTLE = 0.5
self.pressed_keys = []
self.should_follow = False
self.should_drag = False
self.last_control_group = -1
self.ability_selected = False
self.last_ability_selected = None
self.ctrlKey = False
self.shiftKey = False
self.altKey = False
self.hold_down_start_timer = 0
self.hold_down_key_timer = 0
self.last_key_timestamp = 0
self.hold_key = ""
def toggle_speech( self, with_enter=True ):
self.release_hold_keys()
if( self.mode != "speech" ):
self.mode = "speech"
self.grammar.load()
if( with_enter ):
self.inputManager.press('enter')
else:
self.mode = "regular"
self.grammar.unload()
toggle_speechrec()
# Used in case the speech recognition is triggered accidentally
def reset_mode( self ):
if( self.mode == "speech" ):
self.grammar.unload()
toggle_speechrec()
self.inputManager.press("esc")
self.mode = "regular"
def start( self ):
mute_sound()
toggle_eyetracker()
update_overlay_image( "default" )
def cast_ability( self, ability ):
self.press_ability( ability )
self.ability_selected = True
def hold_shift( self, shift ):
if( self.shiftKey != shift ):
if( shift == True ):
self.inputManager.keyDown('shift')
self.shiftKey = shift
self.update_overlay()
else:
self.inputManager.keyUp('shift')
self.shiftKey = shift
self.update_overlay()
def hold_alt( self, alt ):
if( self.altKey != alt ):
if( alt == True ):
self.altKey = alt
self.update_overlay()
self.detector.deactivate_for( 'first_ability', 0.1 )
self.detector.deactivate_for( 'second_ability', 0.1 )
else:
self.altKey = alt
self.update_overlay()
self.detector.deactivate_for( 'first_ability', 0.3 )
self.detector.deactivate_for( 'second_ability', 0.15 )
def hold_control( self, ctrlKey ):
if( self.ctrlKey != ctrlKey ):
if( ctrlKey == True ):
self.inputManager.keyDown('ctrl')
self.ctrlKey = ctrlKey
self.update_overlay()
else:
self.inputManager.keyUp('ctrl')
self.ctrlKey = ctrlKey
self.update_overlay()
def release_hold_keys( self ):
self.ability_selected = False
self.hold_control( False )
self.hold_shift( False )
self.hold_alt( False )
self.update_overlay()
def handle_input( self, dataDicts ):
self.detector.tick( dataDicts )
# Always allow switching between speech and regular mode
if( self.detector.detect( "menu" ) ):
self.release_hold_keys()
quadrant3x3 = self.detector.detect_mouse_quadrant( 3, 3 )
if( quadrant3x3 == 9 ):
self.press_ability( 'f10' )
elif( quadrant3x3 == 2 ):
self.release_hold_keys()
self.toggle_speech( False )
elif( quadrant3x3 == 1 ):
self.reset_mode()
elif( quadrant3x3 == 7 ):
self.mode = "ignore_commands"
elif( quadrant3x3 == 3 ):
self.release_hold_keys()
self.toggle_speech()
else:
self.press_ability( 'esc' )
return self.detector.tickActions
# Recognize speech commands in speech mode
elif( self.mode == "speech" ):
pythoncom.PumpWaitingMessages()
return self.detector.tickActions
# Regular quick command mode
elif( self.mode == "regular" ):
self.handle_quick_commands( dataDicts )
return self.detector.tickActions
def handle_quick_commands( self, dataDicts ):
# Early escape for performance
if( self.detector.detect_silence() ):
self.drag_mouse( False )
self.hold_down_start_timer = 0
return
if( self.detector.detect_below_threshold( 800 ) ):
self.hold_down_start_timer = 0
# Selecting units
rapidclick = self.detector.detect("rapidclick")
selecting = self.detector.detect( "select" )
if( self.ability_selected and selecting ):
self.inputManager.click(button='left')
self.ability_selected = False
# Clear the throttles for abilities
self.detector.clear_throttle('camera')
self.detector.clear_throttle('first_ability')
self.detector.clear_throttle('second_ability')
self.detector.clear_throttle('third_ability')
self.detector.deactivate_for('select', 0.3)
elif( self.ability_selected and rapidclick ):
if( self.last_ability_selected == 'first' ):
self.cast_ability_throttled('z', 0.05)
elif( self.last_ability_selected == 'second' ):
self.cast_ability_throttled('x', 0.05)
elif( self.last_ability_selected == 'third' ):
self.cast_ability_throttled('c', 0.05)
# Prevent some misclassifying errors when using the thr sound
self.detector.deactivate_for( 'control', 0.3 )
self.detector.deactivate_for( 'click', 0.1 )
self.detector.deactivate_for( 'grid_ability', 0.3 )
else:
self.drag_mouse( selecting )
## Press Grid ability
if( self.detector.detect("grid_ability") and not rapidclick ):
quadrant4x3 = self.detector.detect_mouse_quadrant( 4, 3 )
if( time.time() - self.hold_down_start_timer > self.KEY_DELAY_THROTTLE ):
self.use_ability_throttled( quadrant4x3, 0.03 )
self.release_hold_keys()
self.hold_shift( False )
if( self.hold_down_start_timer == 0 ):
self.hold_down_start_timer = time.time()
self.detector.deactivate_for( 'control', 0.15 )
if( selecting ):
self.ability_selected = False
self.hold_control( False )
elif( self.detector.detect( "click" ) ):
# Cast selected ability or Ctrl+click
if( self.detect_command_area() or self.ability_selected == True or self.ctrlKey == True or self.altKey == True or ( self.shiftKey and self.detect_selection_tray() ) ):
self.inputManager.click(button='left')
else:
self.inputManager.click(button='right')
self.detector.deactivate_for( 'grid_ability', 0.2 )
self.detector.deactivate_for( 'secondary_movement', 0.2 )
# Release the held keys - except when shift clicking units in the selection tray ( for easy removing from the unit group )
if( not( self.shiftKey and self.detect_selection_tray() ) ):
self.release_hold_keys()
elif( ( self.detector.is_throttled('camera') or self.detector.is_throttled("first_ability") or self.detector.is_throttled('second_ability') ) and self.detector.detect( "rapidclick" ) ):
self.inputManager.click(button='left')
self.ability_selected = False
# Clear the throttles for abilities
self.detector.clear_throttle('camera')
self.detector.clear_throttle('first_ability')
self.detector.clear_throttle('second_ability')
# CTRL KEY holding
elif( self.detector.detect( "control" ) ):
self.hold_control( True )
elif( self.detector.detect( "secondary_control" ) ):
self.hold_control( True )
self.detector.deactivate_for( 'select', 0.2 )
self.detector.deactivate_for( 'camera', 0.2 )
# SHIFT KEY holding / toggling
elif( self.detector.detect( "shift" ) ):
self.hold_shift( not self.shiftKey )
# ALT KEY holding / toggling
elif( self.detector.detect( "alt" ) ):
self.hold_alt( not self.altKey )
## Primary movement options
elif( self.detector.detect( "movement" ) ):
self.cast_ability( 'a' )
self.detector.deactivate_for( 'control', 0.4 )
self.hold_shift( False )
## Secondary movement options
elif( self.detector.detect( "secondary_movement" ) ):
quadrant3x3 = self.detector.detect_mouse_quadrant( 3, 3 )
if( quadrant3x3 <= 6 ):
self.cast_ability( 'h' )
elif( quadrant3x3 > 6 ):
self.press_ability( 'p' )
self.hold_shift( False )
## Press Q
elif( self.detector.detect( "first_ability" ) ):
self.ability_selected = True
self.detector.clear_throttle('rapidclick')
self.last_ability_selected = 'first'
self.inputManager.press( 'q' )
## Press W
elif( self.detector.detect( "second_ability") ):
self.ability_selected = True
self.detector.clear_throttle('rapidclick')
self.last_ability_selected = 'second'
if( time.time() - self.hold_down_key_timer > self.KEY_DELAY_THROTTLE ):
self.press_ability_throttled( 'w', 0.1 )
if( self.hold_down_key_timer == 0 ):
self.hold_down_key_timer = time.time()
## Press E
elif( self.detector.detect( "third_ability") ):
self.ability_selected = True
self.detector.clear_throttle('rapidclick')
self.last_ability_selected = 'third'
if( time.time() - self.hold_down_key_timer > self.KEY_DELAY_THROTTLE ):
self.press_ability_throttled( 'e', 0.1 )
if( self.hold_down_key_timer == 0 ):
self.hold_down_key_timer = time.time()
## Press R ( Burrow )
elif( self.detector.detect( "r") ):
self.last_ability_selected = 'third'
self.inputManager.press( 'r' )
## Move the camera
elif( self.detector.detect( "camera" ) ):
quadrant3x3 = self.detector.detect_mouse_quadrant( 3, 3 )
self.camera_movement( quadrant3x3 )
self.hold_control( False )
self.hold_shift( False )
self.hold_alt( False )
elif( self.ctrlKey == True and self.detector.is_throttled('control') and self.detector.detect("camera_secondary") ):
quadrant3x3 = self.detector.detect_mouse_quadrant( 3, 3 )
self.camera_movement( quadrant3x3 )
self.hold_control( False )
self.hold_shift( False )
self.hold_alt( False )
self.detector.deactivate_for('camera', 0.3)
self.detector.deactivate_for('numbers', 0.3)
## Press control group ( only allow CTRL and SHIFT )
elif( self.detector.detect( "numbers" ) ):
quadrant3x3 = self.detector.detect_mouse_quadrant( 3, 3 )
self.use_control_group( quadrant3x3 )
self.hold_alt( False )
self.hold_control( False )
self.hold_shift( False )
self.detector.deactivate_for('camera', 0.3)
elif( ( ( self.ctrlKey == True and self.detector.is_throttled('secondary_control') ) or self.shiftKey == True ) and self.detector.detect("numbers_secondary") ):
quadrant3x3 = self.detector.detect_mouse_quadrant( 3, 3 )
self.use_control_group( quadrant3x3 )
self.hold_alt( False )
self.hold_control( False )
self.hold_shift( False )
self.detector.deactivate_for('camera', 0.3)
self.detector.deactivate_for('numbers', 0.3)
else:
self.hold_down_key_timer = 0
return
def use_control_group( self, quadrant ):
if( quadrant == 1 ):
self.press_ability('1')
elif( quadrant == 2 ):
self.press_ability('2')
elif( quadrant == 3 ):
self.press_ability('3')
elif( quadrant == 4 ):
self.press_ability('4')
elif( quadrant == 5 ):
self.press_ability('5')
elif( quadrant == 6 ):
self.press_ability('6')
elif( quadrant == 7 ):
self.press_ability('7')
elif( quadrant == 8 ):
self.press_ability('8')
elif( quadrant == 9 ):
self.press_ability('9')
self.last_control_group = quadrant
def use_ability( self, quadrant ):
if( quadrant == 1 ):
self.press_ability('q')
elif( quadrant == 2 ):
self.press_ability('w')
elif( quadrant == 3 ):
self.press_ability('e')
elif( quadrant == 4 ):
self.press_ability('r')
elif( quadrant == 5 ):
self.press_ability('a')
elif( quadrant == 6 ):
self.press_ability('s')
elif( quadrant == 7 ):
self.press_ability('d')
elif( quadrant == 8 ):
self.press_ability('f')
elif( quadrant == 9 ):
self.press_ability('z')
elif( quadrant == 10 ):
self.press_ability('x')
elif( quadrant == 11 ):
self.press_ability('c')
elif( quadrant == 12 ):
self.press_ability('v')
def use_ability_throttled( self, quadrant, throttle ):
if( time.time() - self.last_key_timestamp > throttle ):
self.last_key_timestamp = time.time()
self.use_ability( quadrant )
def press_ability( self, key ):
self.inputManager.press( key )
self.detector.add_tick_action( key )
self.release_hold_keys()
def cast_ability_throttled( self, key, throttle ):
if( time.time() - self.last_key_timestamp > throttle ):
self.last_key_timestamp = time.time()
self.cast_ability( key )
def press_ability_throttled( self, key, throttle ):
if( time.time() - self.last_key_timestamp > throttle ):
self.last_key_timestamp = time.time()
self.press_ability( key )
def camera_movement( self, quadrant ):
## Move camera to kerrigan when looking above the UI
if( quadrant == 1 ):
self.inputManager.press( "f1" )
elif( quadrant == 2 ):
self.inputManager.press( "f2" )
elif( quadrant == 3 ):
self.inputManager.press( "f3" )
elif( quadrant == 4 ):
self.inputManager.press( "f5" )
elif( quadrant == 5 ):
self.inputManager.press( "backspace" )
## Camera hotkeys
elif( quadrant == 6 ):
self.inputManager.press( "f6" )
## Camera hotkey
elif( quadrant == 7 ):
self.inputManager.press( "f7" )
## Camera hotkey
elif( quadrant == 8 ):
self.inputManager.press( "f8" )
## Camera hotkey
elif( quadrant == 9 ):
self.inputManager.press( "f9" )
# Detect when the cursor is inside the command area
def detect_command_area( self ):
return self.detector.detect_inside_minimap( 1521, 815, 396, 266 )
# Detect when the cursor is inside the command area
def detect_selection_tray( self ):
return self.detector.detect_inside_minimap( 360, 865, 1000, 215 )
# Drag mouse for selection purposes
def drag_mouse( self, should_drag ):
if( self.should_drag != should_drag ):
if( should_drag == True ):
self.inputManager.mouseDown()
else:
self.inputManager.mouseUp()
self.should_drag = should_drag
def update_overlay( self ):
if( not( self.ctrlKey or self.shiftKey or self.altKey ) ):
update_overlay_image( "default" )
else:
modes = []
if( self.ctrlKey ):
modes.append( "ctrl" )
if( self.shiftKey ):
modes.append( "shift" )
if( self.altKey ):
modes.append( "alt" )
update_overlay_image( "mode-starcraft-%s" % ( "-".join( modes ) ) )
def exit( self ):
if( self.mode == "speech" ):
self.toggle_speech()
self.release_hold_keys()
self.mode = "regular"
turn_on_sound()
update_overlay_image( "default" )
toggle_eyetracker()
| 37.767913 | 193 | 0.509011 |
4e2f4728f865e671877a9c34e0648bcffca07e3b | 16,000 | py | Python | discord/coffee.py | korea3500/Toy | 3577489e9d8556a24ebd2c13df81a317b8f83d92 | [
"Apache-2.0"
] | null | null | null | discord/coffee.py | korea3500/Toy | 3577489e9d8556a24ebd2c13df81a317b8f83d92 | [
"Apache-2.0"
] | null | null | null | discord/coffee.py | korea3500/Toy | 3577489e9d8556a24ebd2c13df81a317b8f83d92 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from bs4 import BeautifulSoup
import time, os
import requests
from datetime import datetime
import pandas as pd
from webdriver_manager.chrome import ChromeDriverManager
import discord
import asyncio
import re
from discord.ext import commands
from discord.utils import get
from discord import FFmpegPCMAudio
from youtube_dl import YoutubeDL
import logging
import time
import nest_asyncio
import glob
import numpy as np
# def item_croll(soup) :
# regexp = re.compile('[+\d]+\s[가-힣+\s]+')
# item = soup.select('span.d-block.text-grade5') # 유뮬
# item_p = []
# for idx, i in enumerate(item) :
# if regexp.findall(str(item[idx])) :
# item_p.append(",".join(regexp.findall(str(item[idx]))))
# item2 = soup.select('span.d-block.text-grade6') # 고대
# item2_p = []
# for idx, i in enumerate(item2) :
# if regexp.findall(str(item2[idx])) :
# item2_p.append(",".join(regexp.findall(str(item2[idx]))))
# return item_p + item2_p # 장착 유물 방어구
def item_croll(soup) :
투구 = soup.select_one('#tab-equips > div > a.char-equip.equip-1 > div > div > h4 > span').get_text()
견장 = soup.select_one('#tab-equips > div > a.char-equip.equip-5 > div > div > h4 > span').get_text()
상의 = soup.select_one('#tab-equips > div > a.char-equip.equip-2 > div > div > h4 > span').get_text()
하의 = soup.select_one('#tab-equips > div > a.char-equip.equip-3 > div > div > h4 > span').get_text()
장갑 = soup.select_one('#tab-equips > div > a.char-equip.equip-4 > div > div > h4 > span').get_text()
무기 = soup.select_one('#tab-equips > div > a.char-equip.equip-0 > div > div > h4 > span').get_text()
return 투구, 견장, 상의, 하의, 장갑, 무기
def stat_croll(soup) :
공격력 = soup.select_one('#qul-box-1 > div.qul-box-1-wrap.pt-2.pb-2.ps-1.pe-1.rounded.shadow-sm.bg-theme-4.text-left > div > div.row.pt-1.pb-0.ps-0.pe-0.m-0.mb-2 > div:nth-child(1) > span > span.text-grade5').get_text()
최대생명력 = soup.select_one('#qul-box-1 > div.qul-box-1-wrap.pt-2.pb-2.ps-1.pe-1.rounded.shadow-sm.bg-theme-4.text-left > div > div.row.pt-1.pb-0.ps-0.pe-0.m-0.mb-2 > div:nth-child(2) > span.text-grade5').get_text()
치명 = soup.select_one('#qul-box-1 > div.qul-box-1-wrap.pt-2.pb-2.ps-1.pe-1.rounded.shadow-sm.bg-theme-4.text-left > div > div:nth-child(4) > div:nth-child(1) > span > span.text-grade5').get_text()
특화 = soup.select_one('#qul-box-1 > div.qul-box-1-wrap.pt-2.pb-2.ps-1.pe-1.rounded.shadow-sm.bg-theme-4.text-left > div > div:nth-child(4) > div:nth-child(2) > span > span.text-grade5').get_text()
제압 = soup.select_one('#qul-box-1 > div.qul-box-1-wrap.pt-2.pb-2.ps-1.pe-1.rounded.shadow-sm.bg-theme-4.text-left > div > div:nth-child(5) > div:nth-child(1) > span > span.text-grade5').get_text()
신속 = soup.select_one('#qul-box-1 > div.qul-box-1-wrap.pt-2.pb-2.ps-1.pe-1.rounded.shadow-sm.bg-theme-4.text-left > div > div:nth-child(5) > div:nth-child(2) > span > span.text-grade5').get_text()
인내 = soup.select_one('#qul-box-1 > div.qul-box-1-wrap.pt-2.pb-2.ps-1.pe-1.rounded.shadow-sm.bg-theme-4.text-left > div > div:nth-child(6) > div:nth-child(1) > span > span.text-grade5').get_text()
숙련 = soup.select_one('#qul-box-1 > div.qul-box-1-wrap.pt-2.pb-2.ps-1.pe-1.rounded.shadow-sm.bg-theme-4.text-left > div > div:nth-child(6) > div:nth-child(2) > span > span.text-grade5').get_text()
return 공격력, 최대생명력, 제압, 신속, 인내, 숙련, 치명, 특화
def engraving(soup) :
item = soup.select('#qul-box-3 > div > div > div')[0].get_text().split('\n\xa0')
# engrav_text = item.split('\n')
engrav_text = ''.join(i for i in item)
return engrav_text
def kill_thread(thread):
"""
thread: a threading.Thread object
"""
thread_id = thread.ident
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print('Exception raise failure')
def URL_croll(keyword, driver) :
link = 'https://www.youtube.com/results?search_query=' + keyword
driver.get(link)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
my_titles = soup.select(
'h3 > a'
)
title = []
url = []
for idx in my_titles:
if idx.get('href')[:7] != '/watch?':
pass
else:
title.append(idx.text)
url.append(idx.get('href'))
title_list = pd.DataFrame(url, columns = ['url'])
title_list['title'] = title
# print(my_titles)
# display(title_list)
return title_list._get_value(0, 'url'), title_list._get_value(0, 'title')
def queue(id): #음악 재생용 큐
if que[id] != []:
player = que[id].pop(0)
playerlist[id] = player
del playlist[0]
player.start()
nest_asyncio.apply()
now = time.localtime()
datetime = "%04d%02d%02d" % (now.tm_year, now.tm_mon, now.tm_mday)
#####for logging
file = "./log/" + datetime + ".log" #로깅
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
# logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename= file, encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
#####
##### for emo
listdir = os.listdir("C:/Users/kyeongmin/Desktop/labs/private/discord/images/로스트아크_환영해요_로아콘!/image")
image_df = pd.DataFrame(listdir, columns = ['path'])
for idx, i in enumerate(image_df.index) :
text = image_df._get_value(idx, col = 'path').split('.')[0].split('_')[-1]
text = ''.join(filter(str.isalnum, text))
image_df._set_value(idx, 'key', text)
#####
##### for tip
tip_listdir = os.listdir("C:/Users/kyeongmin/Desktop/labs/private/discord/images/팁")
tip_df = pd.DataFrame(tip_listdir, columns = ['path'])
for idx, i in enumerate(tip_df.index) :
text = tip_df._get_value(idx, col = 'path').split('.')[0].split('_')[-1]
text = ''.join(filter(str.isalnum, text))
tip_df._set_value(idx, 'key', text)
#####
app = commands.Bot(command_prefix = "!")
@app.event
async def on_ready():
print("다음으로 로그인합니다 : ") # 봇 디스코드 세션 로그인
print(app.user.name)
print(app.user.id)
print("==========")
game = discord.Game("!명령어") # 봇 현재 상태
await app.change_presence(status=discord.Status.online, activity=game) # 활동 상태 표시
@app.event
async def on_message(message) :
# print(message.content)
await app.process_commands(message)
@app.command(pass_context = True)
async def 검색(ctx, char_id) :
try :
loawa_url = 'https://loawa.com/char/'
html = requests.get(loawa_url + char_id)
soup = BeautifulSoup(html.text, 'html.parser')
except discord.ext.commands.errors.MissingRequiredArgument :
await ctx.send("검색할 모험가명을 입력해 주세요.")
stat = []
p = re.compile('[\d+]')
try :
temp = list(stat_croll(soup))
for idx, i in enumerate(temp) :
stat.append(''.join(p.findall(temp[idx])))
item_text = '\n'.join(item_croll(soup))
# item_text = '\t\n'.join(i for i in item_p )
embed=discord.Embed(title = char_id, url="https://loawa.com", description= item_text, color=0x369140)
embed.set_author(name="로아와 검색", url= loawa_url + char_id)
embed.add_field(name = '\n==========================\n', value = '\u200b', inline = False)
embed.add_field(name="신속", value=stat[3], inline=True)
embed.add_field(name="치명", value=stat[6], inline=True)
embed.add_field(name="숙련", value=stat[4], inline=True)
embed.add_field(name="특화", value=stat[7], inline=True)
embed.add_field(name="제압", value=stat[2], inline=True)
embed.add_field(name="인내", value=stat[4], inline=True)
embed.add_field(name="공격력", value=stat[0], inline=True)
embed.add_field(name="최대생명력", value=stat[1], inline=True)
embed.add_field(name = '\n==========================\n', value = '\u200b', inline = False)
embed.add_field(name="각인 효과", value = engraving(soup), inline = False)
embed.set_footer(text="")
await ctx.send(embed=embed)
except :
await ctx.send('존재하지 않는 모험가입니다.')
@app.command(pass_context = True)
async def 재생(ctx, *, char) :
print(char)
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
url, title = URL_croll(char, driver)
print(url, title)
url = 'https://www.youtube.com' + url
url1 = re.match('(https?://)?(www\.)?((youtube\.(com))/watch\?v=([-\w]+)|youtu\.be/([-\w]+))', url) #정규 표현식을 사용해 url 검사
if url1 == None:
await ctx.send(embed=discord.Embed(title=":no_entry_sign: url을 제대로 입력해주세요.",colour = 0x2EFEF7, description = url1))
else :
if ctx.author.voice and ctx.author.voice.channel:
channel = ctx.author.voice.channel
await ctx.send(embed=discord.Embed(title = title, colour = 0x2EFEF7, description = url))
await channel.connect()
else:
await ctx.send("초대할 음성채널에 먼저 입장해 주세요!")
except IndexError:
await ctx.send(embed=discord.Embed(title=":no_entry_sign: url을 입력해주세요.",colour = 0x2EFEF7))
YDL_OPTIONS = {'format': 'bestaudio', 'noplaylist':'True'}
FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}
voice = get(app.voice_clients, guild=ctx.guild) #discord.utils.get
if not voice.is_playing():
with YoutubeDL(YDL_OPTIONS) as ydl:
info = ydl.extract_info(url, download=False)
URL = info['formats'][0]['url']
voice.play(discord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS))
voice.is_playing()
else:
await ctx.send("노래 종료 후 사용해 주세요. !노래끄기")
if char == "끄기" :
await app.voice_clients[0].disconnect()
@app.command(pass_context = True)
async def 노래끄기(ctx) :
await app.voice_clients[0].disconnect()
@app.command(pass_context = True)
async def 삭제(ctx, char) :
# user = discord.utils.get(message.guild.members, name = name) NOT WORKING (TODO : get user information)
try :
await ctx.message.channel.purge(limit = int(char))
# await ctx.send(user.mension + " " + char + "개의 메시지를 삭제했습니다.")
await ctx.send(char + "개의 메시지를 삭제했습니다.")
except ValueError:
await ctx.send("해당 명령어는 !삭제 {줄}로 사용 가능합니다.")
except discord.errors.Forbidden :
await ctx.send("권한이 부족합니다. 관리자에게 문의하세요.")
@app.command(pass_context = True)
async def 이모티콘(ctx, char) :
dir_path = "C:/Users/kyeongmin/Desktop/labs/private/discord/images/로스트아크_환영해요_로아콘!/image/"
if len(image_df[image_df['key'] == char]) > 0 :
file_name = image_df[image_df['key'] == char]['path'].tolist()[0]
# print(file_name)
file = discord.File(dir_path + file_name, filename = char + ".png")
await ctx.message.channel.purge(limit = int(1))
await ctx.send(file = file)
else :
await ctx.send("현재는 사용할 수 없는 이모티콘입니다.")
@app.command(pass_context = True)
async def 팁(ctx, char) :
dir_path = "C:/Users/kyeongmin/Desktop/labs/private/discord/images/팁/"
if len(tip_df[tip_df['key'] == char]) > 0 :
file_name = tip_df[tip_df['key'] == char]['path'].tolist()[0]
file = discord.File(dir_path + file_name, filename = char + ".png")
await ctx.send(file = file)
else :
await ctx.send("존재하지 않는 이미지입니다.")
@app.command(pass_context = True)
async def 각인계산기(ctx) :
url = 'https://loa.icepeng.com/imprinting'
await ctx.send(embed=discord.Embed(title = "icepeng 각인계산기", colour = 0x2EFEF7, description = url))
@app.command(pass_context = True)
async def 명령어(ctx, char) :
url = 'https://korea3500.notion.site/Coffee-5bb06765136f49e39f645b1f61e37651'
if "이모티콘" in char :
await ctx.send("작업 중")
@app.command(pass_context= True)
async def 총쏘는진첩이(ctx) :
await ctx.send("건슬링어 짱짱쌔요!")
@app.command(pass_context= True)
async def 디에스공(ctx) :
await ctx.send("고백살인마")
@app.command(pass_context = True)
async def 계산기(ctx, char) :
quadro_optimal_value = 0
octa_optimal_value = 0
discount_factor = 0.95
char = int(char)
quadro_optimal_value = np.round(char * discount_factor * 3/4, 2)
octa_optimal_value = np.round(char * discount_factor * 7/8, 2)
### generating https://cog-creators.github.io/discord-embed-sandbox/ ###
embed=discord.Embed(title="로아 경매 이득금 계산기", url="http://github.com/korea3500", description="입력한 금액 : " + str(char))
embed.add_field(name="4인 파티 기준", value = quadro_optimal_value, inline=True)
embed.add_field(name="8인 공격대 기준", value = octa_optimal_value, inline=True)
await ctx.send(embed=embed)
@app.command(pass_context = True)
async def 사사게(ctx, *, char) :
# print(char)
search_id = char.replace(' ', '+')
inven_url = 'https://www.inven.co.kr/board/lostark/5355?query=list&p=1&sterm=&name=subject&keyword='
html = requests.get(inven_url + search_id)
soup = BeautifulSoup(html.text, 'html.parser')
result = soup.find_all("a", class_="subject-link")
title_list = []
url_list = []
for idx, i in enumerate(result) :
title = i.get_text()
title_list.append(''.join(title.replace(' ', '').split('\n')))
url_list.append(result[idx]['href'])
# print(title_list, url_list)
if len(title_list) == 1 :
embed=discord.Embed(title="인벤 사사게 검색", url= inven_url + search_id, description="검색한 키워드 : " + char)
embed.add_field(name = "검색 결과가 없습니다!", value = url_list[0], inline=False)
await ctx.send(embed=embed)
else :
embed=discord.Embed(title="인벤 사사게 검색", url= inven_url + search_id, description="검색한 키워드 : " + char)
for idx in range(1, len(title_list)) : # 인벤 사사게 검색 중 기본 게시물인 사사게 정책 게시글을 표시하지 않기 위함
# print(title_list[idx])
embed.add_field(name = title_list[idx], value = url_list[idx], inline=False)
# embed.set_footer(text = "add footer")
await ctx.send(embed=embed)
@명령어.error
async def 명령어_error(ctx, error) :
url = 'https://korea3500.notion.site/Coffee-5bb06765136f49e39f645b1f61e37651'
await ctx.send(embed=discord.Embed(title="Coffee guide",colour = 0x2EFEF7, description = url))
@삭제.error
async def 삭제_error(ctx, error) :
if isinstance(error, commands.MissingRequiredArgument) :
await ctx.send("해당 명령어는 !삭제 {줄}로 사용 가능합니다.")
@이모티콘.error
async def 이모티콘_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
# await ctx.send("사용할 이모티콘의 이름을 입력 해주세요!")
await ctx.send(', '.join(image_df['key']))
@팁.error
async def 팁_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
tip_listdir = os.listdir("C:/Users/kyeongmin/Desktop/labs/private/discord/images/팁")
await ctx.send("사용할 팁의 이름을 입력 해주세요! !팁 {팁}\n\n사용가능한 팁 : \n" + ', '.join(tip_df['key']))
@계산기.error
async def 계산기_error(ctx, error) :
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("경매 금액을 입력해 주세요!")
if isinstance(error, commands.CommandInvokeError) :
await ctx.send("경매 금액은 반드시 숫자여야 합니다.\nex) !계산기 6000")
@사사게.error
async def 사사게_error(ctx, error) :
if isinstance(error, commands.MissingRequiredArgument) :
await ctx.send("검색 키워드를 입력해 주세요!\nex) !사사게 커피왜캐맛있음")
# @app.error
# async def app_error(ctx, error) :
# url = 'https://korea3500.notion.site/Coffee-5bb06765136f49e39f645b1f61e37651'
# await ctx.send("등록되지 않은 명령어입니다. !help 를 확인해주세요!")
# await ctx.send(embed=discord.Embed(title="Coffee guide",colour = 0x2EFEF7, description = url))
token = '__TOKEN__'
app.run(token) | 37.558685 | 220 | 0.629563 |
846934cd7f7342ca1d67bfe5d5a216b411841700 | 4,178 | py | Python | questions/linked_list/questions.py | alexkeating/computer_science | e464300fd56d8998f3619a0acbc254321b8c18ce | [
"MIT"
] | null | null | null | questions/linked_list/questions.py | alexkeating/computer_science | e464300fd56d8998f3619a0acbc254321b8c18ce | [
"MIT"
] | null | null | null | questions/linked_list/questions.py | alexkeating/computer_science | e464300fd56d8998f3619a0acbc254321b8c18ce | [
"MIT"
] | null | null | null | """
This module contains funstions for each question in the linked list
section of cracking the coding interview.
"""
def remove_dups(linked_list):
"""
Write code to remove duplicates from an unsorted linked list.
How would you solve this problem if a temporary buffer is not
allowed?
O(N^2) - with no buffer
O(N) with buffer
"""
done = False
list_ = linked_list
node = list_.head
while done is False:
if not node:
done = True
else:
new_node = list_.search(node.data)
if new_node != node:
list_.delete(node.data)
node = node.get_next_node()
return list_
def return_kth_to_last(linked_list):
"""
Implement an algorithm to find the kth to last element in a
singly linked list.
"""
if not linked_list:
return None
values = {}
pass_ = 0
node = linked_list.head
done = False
while done is False:
if not node:
kth = values.get(pass_ - 1 if pass_ != 1 else 1)
return kth
else:
pass_ += 1
values[pass_] = node
node = node.get_next_node()
def delete_middle_node(node):
"""
Implement an algorithm to delete a node in the middle (i.e., any
node but the first and last node not necessarily the exact middle) of
a singly linked list, given only access to that node.
"""
if not node:
return None
next_node = node.get_next_node()
node.next_node = next_node.get_next_node()
node.data = next_node.data
def partition(value):
"""
Write code to parition a linked list around a value x, such that all
nodes less than x come before all nodes greater than or equal to x. If
x is contained within a list, the values of x only need to be after the
elements less than x. The partition element x can appear anywhere in the
"right partition", it does not need to appear between the left and right
partitions.
"""
pass
def sum_lists(linked_list1, linked_list2):
"""
You have two numbers represented by a linked list, where each node contains
a single digit. The digits are stored in reverse order, such that the 1's digit
is at the head of the list. Write a function that adds the two numbers and
returns the sum as a linked list.
This should return a linked list not an integer.
"""
def create_number(linked_list):
end = False
number = 0
multiplication_rule = 1
node = linked_list.head
while end is False:
if not node:
end = True
else:
digit = node.data * multiplication_rule
number += digit
multiplication_rule *= 10
node = node.get_next_node()
return number
number1 = create_number(linked_list1)
number2 = create_number(linked_list2)
final_number = number1 + number2
return final_number
def palidrome(linked_list):
"""
Implement a function to check if a linked list is a palindrome.
"""
pass
def intersection(linked_list1, linked_list2):
"""
Given two (singly) linked lists, determine if the two lists intersect. Return
the intersecting node. Note theat the intersection is defined based on reference
not value. That is, if the kth node of the first linked list is the same exact node
(by reference) as the jth node of the second linked list, then they are intersecting.
"""
pass
def loop_detection(linked_list):
"""
Given a circular linked list, imlement an algorithm that returns the node at the
beginning of the loop.
Circular linke list: A (corrupt) linked list in which the node's next pointer points
to an earlier node, so as to make a loop in the linked list.
"""
if not linked_list:
return False
node = linked_list.head
node_dict = {}
while node:
if not node:
return False
elif node_dict.get(node, None):
return node
else:
node_dict[node] = True
node = node.get_next_node()
| 28.616438 | 89 | 0.633317 |
2af607853686504317fefa9b5635a9a434769353 | 7,013 | py | Python | doc_ref/NLP/word2vec-nlp-tutorial/DeepLearningMovies/Word2Vec_AverageVectors.py | gtesei/fast-furious | b974e6b71be92ad8892864794af57631291ebac1 | [
"MIT"
] | 19 | 2015-06-24T00:04:11.000Z | 2021-02-28T16:55:44.000Z | doc_ref/NLP/word2vec-nlp-tutorial/DeepLearningMovies/Word2Vec_AverageVectors.py | gtesei/fast-furious | b974e6b71be92ad8892864794af57631291ebac1 | [
"MIT"
] | null | null | null | doc_ref/NLP/word2vec-nlp-tutorial/DeepLearningMovies/Word2Vec_AverageVectors.py | gtesei/fast-furious | b974e6b71be92ad8892864794af57631291ebac1 | [
"MIT"
] | 4 | 2016-10-11T17:36:44.000Z | 2019-08-16T10:03:04.000Z | #!/usr/bin/env python
# Author: Angela Chapman
# Date: 8/6/2014
#
# This file contains code to accompany the Kaggle tutorial
# "Deep learning goes to the movies". The code in this file
# is for Parts 2 and 3 of the tutorial, which cover how to
# train a model using Word2Vec.
#
# *************************************** #
# ****** Read the two training sets and the test set
#
import pandas as pd
import os
from nltk.corpus import stopwords
import nltk.data
import logging
import numpy as np # Make sure that numpy is imported
from gensim.models import Word2Vec
from sklearn.ensemble import RandomForestClassifier
from KaggleWord2VecUtility import KaggleWord2VecUtility
# ****** Define functions to create average word vectors
#
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,),dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = np.add(featureVec,model[word])
#
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec,nwords)
return featureVec
def hash32(value):
return hash(value) & 0xffffffff
def getAvgFeatureVecs(reviews, model, num_features):
# Given a set of reviews (each one a list of words), calculate
# the average feature vector for each one and return a 2D numpy array
#
# Initialize a counter
counter = 0.
#
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype="float32")
#
# Loop through the reviews
for review in reviews:
#
# Print a status message every 1000th review
if counter%1000. == 0.:
print("Review %d of %d" % (counter, len(reviews)))
#
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[counter] = makeFeatureVec(review, model,num_features)
#
# Increment the counter
counter = counter + 1.
return reviewFeatureVecs
def getCleanReviews(reviews):
clean_reviews = []
for review in reviews["review"]:
clean_reviews.append( KaggleWord2VecUtility.review_to_wordlist( review, remove_stopwords=True ))
return clean_reviews
if __name__ == '__main__':
if os.name=='posix':
print('>>> Loading Mac OSX env ...')
os.chdir('/Users/gino/kaggle/fast-furious/gitHub/fast-furious/doc_ref/NLP/word2vec-nlp-tutorial/')
else:
print('>>> Loading Windows env ...')
os.chdir('C:/Machine_Learning/git/fast-furious/doc_ref/NLP/word2vec-nlp-tutorial/')
# Read data from files
train = pd.read_csv('labeledTrainData.tsv', header=0, delimiter="\t", quoting=3)
test = pd.read_csv('testData.tsv', header=0, delimiter="\t", quoting=3 )
unlabeled_train = pd.read_csv("unlabeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
# Verify the number of reviews that were read (100,000 in total)
print("Read %d labeled train reviews, %d labeled test reviews, "
"and %d unlabeled reviews\n" % (train["review"].size,
test["review"].size, unlabeled_train["review"].size))
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# ****** Split the labeled and unlabeled training sets into clean sentences
#
sentences = [] # Initialize an empty list of sentences
print("Parsing sentences from training set")
for review in train["review"]:
sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer)
print("Parsing sentences from unlabeled set")
for review in unlabeled_train["review"]:
sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer)
# ****** Set parameters and train the word2vec model
#
# Import the built-in logging module and configure it so that Word2Vec
# creates nice output messages
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model (this will take some time)
print("Training Word2Vec model...")
model = Word2Vec(sentences, workers=num_workers,
size=num_features, min_count = min_word_count,
window = context, sample=downsampling, seed=1 , hashfxn=hash32)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model_name = "300features_40minwords_10context.tsv"
model.save(model_name)
model.doesnt_match("man woman child kitchen".split())
model.doesnt_match("france england germany berlin".split())
model.doesnt_match("paris berlin london austria".split())
model.most_similar("man")
model.most_similar("queen")
model.most_similar("awful")
# ****** Create average vectors for the training and test sets
#
print("Creating average feature vecs for training reviews")
trainDataVecs = getAvgFeatureVecs( getCleanReviews(train), model, num_features )
print("Creating average feature vecs for test reviews")
testDataVecs = getAvgFeatureVecs( getCleanReviews(test), model, num_features )
# ****** Fit a random forest to the training set, then make predictions
#
# Fit a random forest to the training data, using 100 trees
forest = RandomForestClassifier( n_estimators = 100 )
print("Fitting a random forest to labeled training data...")
forest = forest.fit(trainDataVecs, train["sentiment"])
# Test & extract results
result = forest.predict(testDataVecs)
# Write the test results
output = pd.DataFrame( data={"id":test["id"], "sentiment":result})
output.to_csv("Word2Vec_AverageVectors.tsv", index=False, quoting=3 )
print("Wrote Word2Vec_AverageVectors.csv") | 37.502674 | 107 | 0.664908 |
dfd74c944116ab6f5951f0f6362300c23a653f83 | 64,464 | py | Python | ros/src/util/packages/runtime_manager/scripts/rtmgr.py | yukkysaito/autoware_thesis | db6e2091b5f518f49e47ebafb18db1a208f46704 | [
"BSD-3-Clause"
] | 7 | 2016-12-19T14:52:59.000Z | 2021-02-10T09:54:49.000Z | ros/src/util/packages/runtime_manager/scripts/rtmgr.py | yukkysaito/autoware_thesis | db6e2091b5f518f49e47ebafb18db1a208f46704 | [
"BSD-3-Clause"
] | null | null | null | ros/src/util/packages/runtime_manager/scripts/rtmgr.py | yukkysaito/autoware_thesis | db6e2091b5f518f49e47ebafb18db1a208f46704 | [
"BSD-3-Clause"
] | 3 | 2016-12-19T14:53:01.000Z | 2020-11-17T14:18:41.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.6.8 on Wed Aug 3 11:50:19 2016
#
import wx
# begin wxGlade: dependencies
import gettext
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.notebook_1 = wx.Notebook(self, wx.ID_ANY, style=0)
self.tab_qs = wx.Panel(self.notebook_1, wx.ID_ANY)
self.button_map_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Map"))
self.panel_map_qs = wx.Panel(self.tab_qs, wx.ID_ANY)
self.label_map_qs = wx.StaticText(self.tab_qs, wx.ID_ANY, "", style=wx.ALIGN_CENTRE)
self.button_sensing_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Sensing"))
self.panel_sensing_qs = wx.Panel(self.tab_qs, wx.ID_ANY)
self.label_sensing_qs = wx.StaticText(self.tab_qs, wx.ID_ANY, "", style=wx.ALIGN_CENTRE)
self.button_localization_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Localization"))
self.panel_localization_qs = wx.Panel(self.tab_qs, wx.ID_ANY)
self.label_localization_qs = wx.StaticText(self.tab_qs, wx.ID_ANY, "", style=wx.ALIGN_CENTRE)
self.button_detection_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Detection"))
self.panel_detection_qs = wx.Panel(self.tab_qs, wx.ID_ANY)
self.label_detection_qs = wx.StaticText(self.tab_qs, wx.ID_ANY, "", style=wx.ALIGN_CENTRE)
self.button_mission_planning_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Mission Planning"))
self.panel_mission_planning_qs = wx.Panel(self.tab_qs, wx.ID_ANY)
self.label_mission_planning_qs = wx.StaticText(self.tab_qs, wx.ID_ANY, "", style=wx.ALIGN_CENTRE)
self.button_motion_planning_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Motion Planning"))
self.panel_motion_planning_qs = wx.Panel(self.tab_qs, wx.ID_ANY)
self.label_motion_planning_qs = wx.StaticText(self.tab_qs, wx.ID_ANY, "", style=wx.ALIGN_CENTRE)
self.button_android_tablet_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Android Tablet"))
self.button_oculus_rift_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Oculus Rift"))
self.button_vehicle_gateway_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Vehicle Gateway"))
self.button_cloud_data_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Cloud Data"))
self.sizer_45_staticbox = wx.StaticBox(self.tab_qs, wx.ID_ANY, "")
self.button_auto_pilot_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("Auto Pilot"))
self.button_rosbag_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("RViz"))
self.button_rqt_qs = wx.ToggleButton(self.tab_qs, wx.ID_ANY, _("RQT"))
self.tab_setup = wx.Panel(self.notebook_1, wx.ID_ANY)
self.radio_box_localizer = wx.RadioBox(self.tab_setup, wx.ID_ANY, _("Localizer"), choices=[_("Velodyne"), _("Hokuyo 3D URG")], majorDimension=2, style=wx.RA_SPECIFY_COLS)
self.button_setup_tf = wx.ToggleButton(self.tab_setup, wx.ID_ANY, _("TF"))
self.panel_setup_tf = wx.Panel(self.tab_setup, wx.ID_ANY)
self.sizer_42_staticbox = wx.StaticBox(self.tab_setup, wx.ID_ANY, _("Baselink to Localizer"))
self.button_vehicle_model = wx.ToggleButton(self.tab_setup, wx.ID_ANY, _("Vehicle Model"))
self.panel_vehicle_model = wx.Panel(self.tab_setup, wx.ID_ANY)
self.sizer_43_staticbox = wx.StaticBox(self.tab_setup, wx.ID_ANY, _("Vehicle Model"))
self.button_vehicle_info = wx.ToggleButton(self.tab_setup, wx.ID_ANY, _("Vehicle Info"))
self.panel_vehicle_info = wx.Panel(self.tab_setup, wx.ID_ANY)
self.sizer_43_copy_staticbox = wx.StaticBox(self.tab_setup, wx.ID_ANY, _("Vehicle Info"))
self.button_rosbag_setup = wx.ToggleButton(self.tab_setup, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_setup = wx.ToggleButton(self.tab_setup, wx.ID_ANY, _("RViz"))
self.button_rqt_setup = wx.ToggleButton(self.tab_setup, wx.ID_ANY, _("RQT"))
self.tab_map = wx.Panel(self.notebook_1, wx.ID_ANY)
self.button_point_cloud = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("Point Cloud"))
self.panel_point_cloud = wx.Panel(self.tab_map, wx.ID_ANY)
self.checkbox_auto_update = wx.CheckBox(self.tab_map, wx.ID_ANY, _("Auto Update"))
self.choice_scene_num = wx.Choice(self.tab_map, wx.ID_ANY, choices=[_("1x1"), _("3x3"), _("5x5"), _("7x7"), _("9x9")])
self.button_area_lists = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("Area Lists"))
self.label_9 = wx.StaticText(self.tab_map, wx.ID_ANY, _("Area List :"))
self.panel_area_lists = wx.Panel(self.tab_map, wx.ID_ANY)
self.label_point_cloud_bar = wx.StaticText(self.tab_map, wx.ID_ANY, _("Loading bar ... 82%"), style=wx.ALIGN_CENTRE)
self.label_point_cloud = wx.StaticText(self.tab_map, wx.ID_ANY, "", style=wx.ALIGN_CENTRE)
self.static_line_4 = wx.StaticLine(self.tab_map, wx.ID_ANY)
self.button_vector_map = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("Vector Map"))
self.panel_vector_map = wx.Panel(self.tab_map, wx.ID_ANY)
self.static_line_5 = wx.StaticLine(self.tab_map, wx.ID_ANY)
self.button_tf = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("TF"))
self.panel_tf = wx.Panel(self.tab_map, wx.ID_ANY)
self.sizer_61_staticbox = wx.StaticBox(self.tab_map, wx.ID_ANY, "")
self.button_pcd_filter = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("PCD Filter"))
self.panel_pcd_filter = wx.Panel(self.tab_map, wx.ID_ANY)
self.static_line_5_copy = wx.StaticLine(self.tab_map, wx.ID_ANY)
self.button_pcd_binarizer = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("PCD Binarizer"))
self.panel_pcd_binarizer = wx.Panel(self.tab_map, wx.ID_ANY)
self.sizer_39_staticbox = wx.StaticBox(self.tab_map, wx.ID_ANY, _("Map Tools"))
self.button_rosbag_map = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_map = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("RViz"))
self.button_rqt_map = wx.ToggleButton(self.tab_map, wx.ID_ANY, _("RQT"))
self.tab_sensing = wx.Panel(self.notebook_1, wx.ID_ANY)
self.panel_sensing = wx.ScrolledWindow(self.tab_sensing, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.tree_ctrl_sense = wx.TreeCtrl(self.tab_sensing, wx.ID_ANY, style=wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER)
self.button_calibration_toolkit = wx.ToggleButton(self.tab_sensing, wx.ID_ANY, _("Calibration Tool Kit"))
self.button_calibration_publisher = wx.ToggleButton(self.tab_sensing, wx.ID_ANY, _("Calibration Publisher"))
self.sizer_69_staticbox = wx.StaticBox(self.tab_sensing, wx.ID_ANY, "")
self.button_points_image = wx.ToggleButton(self.tab_sensing, wx.ID_ANY, _("Points Image"))
self.button_virtual_scan_image = wx.ToggleButton(self.tab_sensing, wx.ID_ANY, _("Virtual Scan Image"))
self.button_scan_image = wx.ToggleButton(self.tab_sensing, wx.ID_ANY, _("Scan Image"))
self.sizer_70_staticbox = wx.StaticBox(self.tab_sensing, wx.ID_ANY, "")
self.button_rosbag_sensing = wx.ToggleButton(self.tab_sensing, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_sensing = wx.ToggleButton(self.tab_sensing, wx.ID_ANY, _("RViz"))
self.button_rqt_sensing = wx.ToggleButton(self.tab_sensing, wx.ID_ANY, _("RQT"))
self.tab_computing = wx.Panel(self.notebook_1, wx.ID_ANY)
self.tree_ctrl_0 = wx.TreeCtrl(self.tab_computing, wx.ID_ANY, style=wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER)
self.tree_ctrl_1 = wx.TreeCtrl(self.tab_computing, wx.ID_ANY, style=wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER)
self.button_synchronization = wx.ToggleButton(self.tab_computing, wx.ID_ANY, _("Synchronization"))
self.button_rosbag_computing = wx.ToggleButton(self.tab_computing, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_computing = wx.ToggleButton(self.tab_computing, wx.ID_ANY, _("RViz"))
self.button_rqt_computing = wx.ToggleButton(self.tab_computing, wx.ID_ANY, _("RQT"))
self.tab_interface = wx.Panel(self.notebook_1, wx.ID_ANY)
self.button_android_tablet_interface = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("Android Tablet"))
self.button_oculus_rift_interface = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("Oculus Rift"))
self.button_vehicle_gateway_interface = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("Vehicle Gateway"))
self.sizer_25_staticbox = wx.StaticBox(self.tab_interface, wx.ID_ANY, "")
self.checkbox_sound = wx.CheckBox(self.tab_interface, wx.ID_ANY, _("Sound"))
self.sizer_12_staticbox = wx.StaticBox(self.tab_interface, wx.ID_ANY, "")
self.button_auto_pilot_interface = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("Auto Pilot"))
self.label_5 = wx.StaticText(self.tab_interface, wx.ID_ANY, _("Lamp"))
self.button_statchk_lamp_l = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("L"))
self.button_statchk_lamp_r = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("R"))
self.label_5_copy = wx.StaticText(self.tab_interface, wx.ID_ANY, _("Indicator"))
self.button_statchk_indi_l = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("L"))
self.button_statchk_indi_r = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("R"))
self.button_statchk_d = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("D"))
self.button_statchk_r = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("R"))
self.button_statchk_b = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("B"))
self.button_statchk_n = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("N"))
self.panel_interface_cc = wx.Panel(self.tab_interface, wx.ID_ANY)
self.sizer_26_staticbox = wx.StaticBox(self.tab_interface, wx.ID_ANY, "")
self.button_rosbag_interface = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_interface = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("RViz"))
self.button_rqt_interface = wx.ToggleButton(self.tab_interface, wx.ID_ANY, _("RQT"))
self.tab_database = wx.Panel(self.notebook_1, wx.ID_ANY)
self.tree_ctrl_data = wx.TreeCtrl(self.tab_database, wx.ID_ANY, style=wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER)
self.text_ctrl_query = wx.TextCtrl(self.tab_database, wx.ID_ANY, _("sentence"))
self.button_query = wx.Button(self.tab_database, wx.ID_ANY, _("Query"))
self.list_ctrl_sql = wx.ListCtrl(self.tab_database, wx.ID_ANY, style=wx.LC_REPORT | wx.SUNKEN_BORDER)
self.button_rosbag_database = wx.ToggleButton(self.tab_database, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_database = wx.ToggleButton(self.tab_database, wx.ID_ANY, _("RViz"))
self.button_rqt_database = wx.ToggleButton(self.tab_database, wx.ID_ANY, _("RQT"))
self.tab_simulation = wx.Panel(self.notebook_1, wx.ID_ANY)
self.panel_rosbag_play = wx.Panel(self.tab_simulation, wx.ID_ANY)
self.sizer_79_staticbox = wx.StaticBox(self.tab_simulation, wx.ID_ANY, "")
self.button_play_rosbag_play = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("Play"))
self.button_stop_rosbag_play = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("Stop"))
self.button_pause_rosbag_play = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("Pause"))
self.label_rosbag_play_bar = wx.StaticText(self.tab_simulation, wx.ID_ANY, _("Playing ... 82 %"))
self.label_rosbag_play_pos = wx.StaticText(self.tab_simulation, wx.ID_ANY, "")
self.static_line_3 = wx.StaticLine(self.tab_simulation, wx.ID_ANY)
self.label_rosbag_play_total = wx.StaticText(self.tab_simulation, wx.ID_ANY, "")
self.panel_5 = wx.ScrolledWindow(self.tab_simulation, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.label_rosbag_info = wx.StaticText(self.panel_5, wx.ID_ANY, "")
self.button_rosbag_simulation = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_simulation = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("RViz"))
self.button_rqt_simulation = wx.ToggleButton(self.tab_simulation, wx.ID_ANY, _("RQT"))
self.tab_status = wx.Panel(self.notebook_1, wx.ID_ANY)
self.panel_3 = wx.ScrolledWindow(self.tab_status, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.label_top_cmd = wx.StaticText(self.panel_3, wx.ID_ANY, "")
self.sizer_86_staticbox = wx.StaticBox(self.tab_status, wx.ID_ANY, "")
self.panel_4 = wx.ScrolledWindow(self.tab_status, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.label_node_time = wx.StaticText(self.panel_4, wx.ID_ANY, "")
self.sizer_87_staticbox = wx.StaticBox(self.panel_4, wx.ID_ANY, "")
self.checkbox_stdout = wx.CheckBox(self.tab_status, wx.ID_ANY, _("Stdout"))
self.checkbox_stderr = wx.CheckBox(self.tab_status, wx.ID_ANY, _("Stderr"))
self.text_ctrl_stdout = wx.TextCtrl(self.tab_status, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL)
self.sizer_stdout_staticbox = wx.StaticBox(self.tab_status, wx.ID_ANY, "")
self.button_system_monitor = wx.ToggleButton(self.tab_status, wx.ID_ANY, _("System Monitor"))
self.button_rosbag_status = wx.ToggleButton(self.tab_status, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_status = wx.ToggleButton(self.tab_status, wx.ID_ANY, _("RViz"))
self.button_rqt_status = wx.ToggleButton(self.tab_status, wx.ID_ANY, _("RQT"))
self.tab_topics = wx.Panel(self.notebook_1, wx.ID_ANY)
self.panel_topics_list = wx.ScrolledWindow(self.tab_topics, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.checkbox_topics_echo = wx.CheckBox(self.tab_topics, wx.ID_ANY, _("Echo"))
self.text_ctrl_topics_echo = wx.TextCtrl(self.tab_topics, wx.ID_ANY, "", style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL)
self.panel_topics_info = wx.ScrolledWindow(self.tab_topics, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.label_topics_info = wx.StaticText(self.panel_topics_info, wx.ID_ANY, "")
self.sizer_36_staticbox = wx.StaticBox(self.tab_topics, wx.ID_ANY, _("Info"))
self.button_refresh_topics = wx.Button(self.tab_topics, wx.ID_ANY, _("Refresh"))
self.button_rosbag_topics = wx.ToggleButton(self.tab_topics, wx.ID_ANY, _("ROSBAG"))
self.button_rviz_topics = wx.ToggleButton(self.tab_topics, wx.ID_ANY, _("RViz"))
self.button_rqt_topics = wx.ToggleButton(self.tab_topics, wx.ID_ANY, _("RQT"))
self.bitmap_logo = wx.StaticBitmap(self, wx.ID_ANY, wx.NullBitmap)
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_map_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_sensing_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_localization_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_detection_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_mission_planning_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_motion_planning_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_android_tablet_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_oculus_rift_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_vehicle_gateway_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_cloud_data_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnAutoPilot, self.button_auto_pilot_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_qs)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_qs)
self.Bind(wx.EVT_RADIOBOX, self.OnSetupLocalizer, self.radio_box_localizer)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_setup_tf)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_vehicle_model)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_vehicle_info)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_setup)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_setup)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_setup)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnSelector, self.button_point_cloud)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_area_lists)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_vector_map)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_tf)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_pcd_filter)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_pcd_binarizer)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_map)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_map)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_map)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_calibration_toolkit)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnCalibrationPublisher, self.button_calibration_publisher)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_points_image)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_virtual_scan_image)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_scan_image)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_sensing)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_sensing)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_sensing)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_synchronization)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_computing)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_computing)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_computing)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_android_tablet_interface)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_oculus_rift_interface)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_vehicle_gateway_interface)
self.Bind(wx.EVT_CHECKBOX, self.OnLaunchKill, self.checkbox_sound)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnAutoPilot, self.button_auto_pilot_interface)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLamp, self.button_statchk_lamp_l)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLamp, self.button_statchk_lamp_r)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnIndi, self.button_statchk_indi_l)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnIndi, self.button_statchk_indi_r)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnGear, self.button_statchk_d)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnGear, self.button_statchk_r)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnGear, self.button_statchk_b)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnGear, self.button_statchk_n)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_interface)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_interface)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_interface)
self.Bind(wx.EVT_BUTTON, self.OnQuery, self.button_query)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_database)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_database)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_database)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagPlay, self.button_play_rosbag_play)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagPlay, self.button_stop_rosbag_play)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagPlay, self.button_pause_rosbag_play)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_simulation)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_simulation)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_simulation)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_system_monitor)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_status)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_status)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_status)
self.Bind(wx.EVT_CHECKBOX, self.OnEcho, self.checkbox_topics_echo)
self.Bind(wx.EVT_BUTTON, self.OnRefreshTopics, self.button_refresh_topics)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnRosbagRecord, self.button_rosbag_topics)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rviz_topics)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnLaunchKill, self.button_rqt_topics)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle(_("Runtime Manager"))
self.SetSize((806, 584))
self.button_map_qs.SetMinSize((125, 29))
self.label_map_qs.SetMinSize((100, 17))
self.button_sensing_qs.SetMinSize((125, 29))
self.label_sensing_qs.SetMinSize((100, 17))
self.button_localization_qs.SetMinSize((125, 29))
self.label_localization_qs.SetMinSize((100, 17))
self.button_detection_qs.SetMinSize((125, 29))
self.label_detection_qs.SetMinSize((100, 17))
self.button_mission_planning_qs.SetMinSize((125, 29))
self.label_mission_planning_qs.SetMinSize((100, 17))
self.button_motion_planning_qs.SetMinSize((125, 29))
self.label_motion_planning_qs.SetMinSize((100, 17))
self.button_auto_pilot_qs.SetMinSize((135, 29))
self.radio_box_localizer.SetSelection(0)
self.button_point_cloud.SetMinSize((125, 29))
self.choice_scene_num.SetSelection(0)
self.button_area_lists.SetMinSize((125, 29))
self.button_area_lists.Hide()
self.label_point_cloud.SetMinSize((100, 17))
self.button_vector_map.SetMinSize((125, 29))
self.button_tf.SetMinSize((125, 29))
self.button_pcd_filter.SetMinSize((125, 29))
self.button_pcd_binarizer.SetMinSize((125, 29))
self.panel_sensing.SetScrollRate(10, 10)
self.button_statchk_lamp_l.SetMinSize((32, 29))
self.button_statchk_lamp_r.SetMinSize((32, 29))
self.button_statchk_indi_l.SetMinSize((32, 29))
self.button_statchk_indi_r.SetMinSize((32, 29))
self.button_statchk_d.SetMinSize((32, 29))
self.button_statchk_r.SetMinSize((32, 29))
self.button_statchk_b.SetMinSize((32, 29))
self.button_statchk_b.SetValue(1)
self.button_statchk_n.SetMinSize((32, 29))
self.button_stop_rosbag_play.Enable(False)
self.button_stop_rosbag_play.SetValue(1)
self.button_pause_rosbag_play.Enable(False)
self.label_rosbag_play_pos.SetMinSize((32, 17))
self.label_rosbag_play_total.SetMinSize((32, 17))
self.panel_5.SetScrollRate(10, 10)
self.panel_3.SetScrollRate(10, 10)
self.panel_4.SetScrollRate(10, 10)
self.panel_topics_list.SetScrollRate(10, 10)
self.panel_topics_info.SetScrollRate(10, 10)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
self.sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_29 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_cpuinfo = wx.BoxSizer(wx.HORIZONTAL)
sizer_85_copy = wx.BoxSizer(wx.VERTICAL)
sizer_51_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy_1_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_60_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_32 = wx.BoxSizer(wx.HORIZONTAL)
sizer_34 = wx.BoxSizer(wx.VERTICAL)
self.sizer_36_staticbox.Lower()
sizer_36 = wx.StaticBoxSizer(self.sizer_36_staticbox, wx.HORIZONTAL)
sizer_topics_info = wx.BoxSizer(wx.VERTICAL)
sizer_35 = wx.BoxSizer(wx.VERTICAL)
self.sizer_topics_list = wx.BoxSizer(wx.VERTICAL)
sizer_85 = wx.BoxSizer(wx.VERTICAL)
sizer_51_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_60_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_20 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_stdout_staticbox.Lower()
sizer_stdout = wx.StaticBoxSizer(self.sizer_stdout_staticbox, wx.VERTICAL)
sizer_38 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_87_staticbox.Lower()
sizer_87 = wx.StaticBoxSizer(self.sizer_87_staticbox, wx.HORIZONTAL)
self.sizer_86_staticbox.Lower()
sizer_86 = wx.StaticBoxSizer(self.sizer_86_staticbox, wx.HORIZONTAL)
sizer_19 = wx.BoxSizer(wx.HORIZONTAL)
sizer_78 = wx.BoxSizer(wx.VERTICAL)
sizer_62_copy_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy_copy_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_37 = wx.BoxSizer(wx.HORIZONTAL)
sizer_80 = wx.BoxSizer(wx.HORIZONTAL)
sizer_82 = wx.BoxSizer(wx.HORIZONTAL)
sizer_83 = wx.BoxSizer(wx.VERTICAL)
sizer_81 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_79_staticbox.Lower()
sizer_79 = wx.StaticBoxSizer(self.sizer_79_staticbox, wx.VERTICAL)
sizer_10 = wx.BoxSizer(wx.VERTICAL)
sizer_62_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_11 = wx.BoxSizer(wx.HORIZONTAL)
sizer_17 = wx.BoxSizer(wx.VERTICAL)
sizer_18 = wx.BoxSizer(wx.HORIZONTAL)
sizer_24 = wx.BoxSizer(wx.VERTICAL)
sizer_62_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_26_staticbox.Lower()
sizer_26 = wx.StaticBoxSizer(self.sizer_26_staticbox, wx.VERTICAL)
sizer_77 = wx.BoxSizer(wx.HORIZONTAL)
sizer_66 = wx.BoxSizer(wx.VERTICAL)
sizer_72 = wx.BoxSizer(wx.HORIZONTAL)
sizer_75 = wx.BoxSizer(wx.HORIZONTAL)
sizer_76_copy = wx.BoxSizer(wx.VERTICAL)
sizer_54_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_76 = wx.BoxSizer(wx.VERTICAL)
sizer_54 = wx.BoxSizer(wx.HORIZONTAL)
sizer_9 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_12_staticbox.Lower()
sizer_12 = wx.StaticBoxSizer(self.sizer_12_staticbox, wx.VERTICAL)
self.sizer_25_staticbox.Lower()
sizer_25 = wx.StaticBoxSizer(self.sizer_25_staticbox, wx.HORIZONTAL)
sizer_71 = wx.BoxSizer(wx.VERTICAL)
sizer_62_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_47 = wx.BoxSizer(wx.HORIZONTAL)
sizer_27 = wx.BoxSizer(wx.HORIZONTAL)
sizer_68 = wx.BoxSizer(wx.VERTICAL)
sizer_62_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
sizer_7 = wx.BoxSizer(wx.VERTICAL)
self.sizer_70_staticbox.Lower()
sizer_70 = wx.StaticBoxSizer(self.sizer_70_staticbox, wx.VERTICAL)
self.sizer_69_staticbox.Lower()
sizer_69 = wx.StaticBoxSizer(self.sizer_69_staticbox, wx.VERTICAL)
sizer_33 = wx.BoxSizer(wx.VERTICAL)
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_62 = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_39_staticbox.Lower()
sizer_39 = wx.StaticBoxSizer(self.sizer_39_staticbox, wx.VERTICAL)
sizer_53_copy_3_copy_2_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy_3_copy_2 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_61_staticbox.Lower()
sizer_61 = wx.StaticBoxSizer(self.sizer_61_staticbox, wx.VERTICAL)
sizer_53_copy_3_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_8 = wx.BoxSizer(wx.HORIZONTAL)
sizer_64 = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy_3_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_63 = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_40 = wx.BoxSizer(wx.VERTICAL)
sizer_62_copy_copy_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_52_copy_copy_copy_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_46 = wx.BoxSizer(wx.VERTICAL)
self.sizer_43_copy_staticbox.Lower()
sizer_43_copy = wx.StaticBoxSizer(self.sizer_43_copy_staticbox, wx.HORIZONTAL)
self.sizer_43_staticbox.Lower()
sizer_43 = wx.StaticBoxSizer(self.sizer_43_staticbox, wx.HORIZONTAL)
self.sizer_42_staticbox.Lower()
sizer_42 = wx.StaticBoxSizer(self.sizer_42_staticbox, wx.HORIZONTAL)
sizer_16 = wx.BoxSizer(wx.VERTICAL)
sizer_51 = wx.BoxSizer(wx.HORIZONTAL)
sizer_52 = wx.BoxSizer(wx.HORIZONTAL)
sizer_60 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_45_staticbox.Lower()
sizer_45 = wx.StaticBoxSizer(self.sizer_45_staticbox, wx.VERTICAL)
sizer_59 = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy_2_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_53_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_53 = wx.BoxSizer(wx.HORIZONTAL)
sizer_53.Add(self.button_map_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53.Add(self.panel_map_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_53.Add(self.label_map_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_45.Add(sizer_53, 1, wx.EXPAND, 0)
sizer_53_copy_1.Add(self.button_sensing_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_1.Add(self.panel_sensing_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_53_copy_1.Add(self.label_sensing_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_45.Add(sizer_53_copy_1, 1, wx.EXPAND, 0)
sizer_53_copy_2.Add(self.button_localization_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_2.Add(self.panel_localization_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_53_copy_2.Add(self.label_localization_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_45.Add(sizer_53_copy_2, 1, wx.EXPAND, 0)
sizer_53_copy_2_copy.Add(self.button_detection_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_2_copy.Add(self.panel_detection_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_53_copy_2_copy.Add(self.label_detection_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_45.Add(sizer_53_copy_2_copy, 1, wx.EXPAND, 0)
sizer_53_copy_copy.Add(self.button_mission_planning_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_copy.Add(self.panel_mission_planning_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_53_copy_copy.Add(self.label_mission_planning_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_45.Add(sizer_53_copy_copy, 1, wx.EXPAND, 0)
sizer_53_copy.Add(self.button_motion_planning_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy.Add(self.panel_motion_planning_qs, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_53_copy.Add(self.label_motion_planning_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_45.Add(sizer_53_copy, 1, wx.EXPAND, 0)
sizer_59.Add(self.button_android_tablet_qs, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_59.Add(self.button_oculus_rift_qs, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_59.Add(self.button_vehicle_gateway_qs, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_59.Add(self.button_cloud_data_qs, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_45.Add(sizer_59, 1, wx.EXPAND, 0)
sizer_16.Add(sizer_45, 1, wx.ALL | wx.EXPAND, 4)
sizer_60.Add(self.button_auto_pilot_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_51.Add(sizer_60, 1, wx.EXPAND, 0)
sizer_52.Add(self.button_rosbag_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_52.Add(self.button_rviz_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_52.Add(self.button_rqt_qs, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_51.Add(sizer_52, 0, wx.EXPAND, 0)
sizer_16.Add(sizer_51, 0, wx.EXPAND, 0)
self.tab_qs.SetSizer(sizer_16)
sizer_46.Add(self.radio_box_localizer, 0, wx.ALL, 4)
sizer_42.Add(self.button_setup_tf, 0, wx.ALL, 4)
sizer_42.Add(self.panel_setup_tf, 1, wx.ALL, 4)
sizer_46.Add(sizer_42, 0, wx.ALL | wx.EXPAND, 4)
sizer_43.Add(self.button_vehicle_model, 0, wx.ALL, 4)
sizer_43.Add(self.panel_vehicle_model, 1, wx.ALL, 4)
sizer_46.Add(sizer_43, 0, wx.ALL | wx.EXPAND, 4)
sizer_43_copy.Add(self.button_vehicle_info, 0, wx.ALL, 4)
sizer_43_copy.Add(self.panel_vehicle_info, 1, wx.ALL, 4)
sizer_46.Add(sizer_43_copy, 0, wx.ALL | wx.EXPAND, 10)
sizer_40.Add(sizer_46, 1, wx.EXPAND, 0)
sizer_62_copy_copy_copy_1.Add((20, 20), 1, 0, 0)
sizer_52_copy_copy_copy_copy_1.Add(self.button_rosbag_setup, 0, wx.ALL, 4)
sizer_52_copy_copy_copy_copy_1.Add(self.button_rviz_setup, 0, wx.ALL, 4)
sizer_52_copy_copy_copy_copy_1.Add(self.button_rqt_setup, 0, wx.ALL, 4)
sizer_62_copy_copy_copy_1.Add(sizer_52_copy_copy_copy_copy_1, 0, wx.EXPAND, 0)
sizer_40.Add(sizer_62_copy_copy_copy_1, 0, wx.EXPAND, 0)
self.tab_setup.SetSizer(sizer_40)
sizer_53_copy_3.Add(self.button_point_cloud, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_3.Add(self.panel_point_cloud, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_63.Add(sizer_53_copy_3, 1, wx.EXPAND, 0)
sizer_61.Add(sizer_63, 0, wx.TOP | wx.EXPAND, 4)
sizer_64.Add(self.checkbox_auto_update, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_64.Add(self.choice_scene_num, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_3_copy.Add(self.button_area_lists, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_3_copy.Add(self.label_9, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_3_copy.Add(self.panel_area_lists, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_64.Add(sizer_53_copy_3_copy, 1, wx.BOTTOM | wx.EXPAND, 4)
sizer_61.Add(sizer_64, 0, wx.BOTTOM | wx.EXPAND, 4)
sizer_8.Add(self.label_point_cloud_bar, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_8.Add(self.label_point_cloud, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_61.Add(sizer_8, 1, wx.ALL | wx.EXPAND, 4)
sizer_61.Add(self.static_line_4, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
sizer_53_copy_4.Add(self.button_vector_map, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_4.Add(self.panel_vector_map, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_61.Add(sizer_53_copy_4, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
sizer_61.Add(self.static_line_5, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
sizer_53_copy_3_copy_1.Add(self.button_tf, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_3_copy_1.Add(self.panel_tf, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_61.Add(sizer_53_copy_3_copy_1, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
sizer_4.Add(sizer_61, 0, wx.ALL | wx.EXPAND, 4)
sizer_53_copy_3_copy_2.Add(self.button_pcd_filter, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_3_copy_2.Add(self.panel_pcd_filter, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_39.Add(sizer_53_copy_3_copy_2, 0, wx.EXPAND, 0)
sizer_39.Add(self.static_line_5_copy, 0, wx.TOP | wx.BOTTOM | wx.EXPAND, 4)
sizer_53_copy_3_copy_2_copy.Add(self.button_pcd_binarizer, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_53_copy_3_copy_2_copy.Add(self.panel_pcd_binarizer, 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_39.Add(sizer_53_copy_3_copy_2_copy, 0, wx.EXPAND, 0)
sizer_4.Add(sizer_39, 0, wx.LEFT | wx.RIGHT | wx.EXPAND, 4)
sizer_62.Add((20, 20), 1, 0, 0)
sizer_52_copy.Add(self.button_rosbag_map, 0, wx.ALL, 4)
sizer_52_copy.Add(self.button_rviz_map, 0, wx.ALL, 4)
sizer_52_copy.Add(self.button_rqt_map, 0, wx.ALL, 4)
sizer_62.Add(sizer_52_copy, 0, wx.EXPAND, 0)
sizer_4.Add(sizer_62, 0, wx.EXPAND, 0)
self.tab_map.SetSizer(sizer_4)
sizer_33.Add(self.panel_sensing, 1, wx.EXPAND, 0)
sizer_5.Add(sizer_33, 1, wx.ALL | wx.EXPAND, 4)
sizer_7.Add(self.tree_ctrl_sense, 2, wx.EXPAND, 0)
sizer_69.Add(self.button_calibration_toolkit, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
sizer_69.Add(self.button_calibration_publisher, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
sizer_7.Add(sizer_69, 2, wx.TOP | wx.EXPAND, 8)
sizer_70.Add(self.button_points_image, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
sizer_70.Add(self.button_virtual_scan_image, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
sizer_70.Add(self.button_scan_image, 1, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 4)
sizer_7.Add(sizer_70, 3, wx.TOP | wx.BOTTOM | wx.EXPAND, 8)
sizer_5.Add(sizer_7, 1, wx.ALL | wx.EXPAND, 4)
sizer_68.Add(sizer_5, 1, wx.EXPAND, 0)
sizer_62_copy.Add((20, 20), 1, 0, 0)
sizer_52_copy_copy.Add(self.button_rosbag_sensing, 0, wx.ALL, 4)
sizer_52_copy_copy.Add(self.button_rviz_sensing, 0, wx.ALL, 4)
sizer_52_copy_copy.Add(self.button_rqt_sensing, 0, wx.ALL, 4)
sizer_62_copy.Add(sizer_52_copy_copy, 0, wx.EXPAND, 0)
sizer_68.Add(sizer_62_copy, 0, wx.EXPAND, 0)
self.tab_sensing.SetSizer(sizer_68)
sizer_27.Add(self.tree_ctrl_0, 1, wx.EXPAND, 0)
sizer_27.Add(self.tree_ctrl_1, 1, wx.EXPAND, 0)
sizer_71.Add(sizer_27, 1, wx.EXPAND, 0)
sizer_47.Add(self.button_synchronization, 0, wx.ALL, 4)
sizer_62_copy_copy.Add(sizer_47, 1, wx.EXPAND, 0)
sizer_52_copy_copy_copy.Add(self.button_rosbag_computing, 0, wx.ALL, 4)
sizer_52_copy_copy_copy.Add(self.button_rviz_computing, 0, wx.ALL, 4)
sizer_52_copy_copy_copy.Add(self.button_rqt_computing, 0, wx.ALL, 4)
sizer_62_copy_copy.Add(sizer_52_copy_copy_copy, 0, wx.EXPAND, 0)
sizer_71.Add(sizer_62_copy_copy, 0, wx.EXPAND, 0)
self.tab_computing.SetSizer(sizer_71)
sizer_25.Add(self.button_android_tablet_interface, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_25.Add(self.button_oculus_rift_interface, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_25.Add(self.button_vehicle_gateway_interface, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_9.Add(sizer_25, 3, wx.ALL | wx.EXPAND, 4)
sizer_12.Add(self.checkbox_sound, 1, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
sizer_9.Add(sizer_12, 1, wx.ALL | wx.EXPAND, 4)
sizer_24.Add(sizer_9, 1, wx.EXPAND, 0)
sizer_72.Add(self.button_auto_pilot_interface, 1, wx.ALL | wx.EXPAND, 4)
sizer_75.Add((20, 20), 1, 0, 0)
sizer_76.Add(self.label_5, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_54.Add(self.button_statchk_lamp_l, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_54.Add(self.button_statchk_lamp_r, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_76.Add(sizer_54, 1, wx.EXPAND, 0)
sizer_75.Add(sizer_76, 1, 0, 0)
sizer_76_copy.Add(self.label_5_copy, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_54_copy.Add(self.button_statchk_indi_l, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_54_copy.Add(self.button_statchk_indi_r, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_76_copy.Add(sizer_54_copy, 1, wx.EXPAND, 0)
sizer_75.Add(sizer_76_copy, 1, 0, 0)
sizer_75.Add((20, 20), 1, 0, 0)
sizer_72.Add(sizer_75, 1, 0, 0)
sizer_26.Add(sizer_72, 0, wx.EXPAND, 0)
sizer_66.Add(self.button_statchk_d, 0, wx.ALL | wx.EXPAND, 4)
sizer_66.Add(self.button_statchk_r, 0, wx.ALL | wx.EXPAND, 4)
sizer_66.Add(self.button_statchk_b, 0, wx.ALL | wx.EXPAND, 4)
sizer_66.Add(self.button_statchk_n, 0, wx.ALL | wx.EXPAND, 4)
sizer_77.Add(sizer_66, 0, wx.ALL | wx.EXPAND, 4)
sizer_77.Add(self.panel_interface_cc, 1, wx.ALL, 4)
sizer_26.Add(sizer_77, 0, wx.EXPAND, 0)
sizer_24.Add(sizer_26, 0, wx.ALL | wx.EXPAND, 4)
sizer_24.Add((20, 20), 1, 0, 0)
sizer_62_copy_copy_copy.Add((20, 20), 1, 0, 0)
sizer_52_copy_copy_copy_copy.Add(self.button_rosbag_interface, 0, wx.ALL, 4)
sizer_52_copy_copy_copy_copy.Add(self.button_rviz_interface, 0, wx.ALL, 4)
sizer_52_copy_copy_copy_copy.Add(self.button_rqt_interface, 0, wx.ALL, 4)
sizer_62_copy_copy_copy.Add(sizer_52_copy_copy_copy_copy, 0, wx.EXPAND, 0)
sizer_24.Add(sizer_62_copy_copy_copy, 0, wx.EXPAND, 0)
self.tab_interface.SetSizer(sizer_24)
sizer_11.Add(self.tree_ctrl_data, 1, wx.ALL | wx.EXPAND, 4)
sizer_18.Add(self.text_ctrl_query, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_18.Add(self.button_query, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_17.Add(sizer_18, 0, wx.EXPAND, 0)
sizer_17.Add(self.list_ctrl_sql, 1, wx.ALL | wx.EXPAND, 4)
sizer_17.Add((85, 29), 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
sizer_11.Add(sizer_17, 1, wx.ALL | wx.EXPAND, 4)
sizer_10.Add(sizer_11, 1, wx.ALL | wx.EXPAND, 4)
sizer_62_copy_copy_copy_copy.Add((20, 20), 1, 0, 0)
sizer_52_copy_copy_copy_copy_copy.Add(self.button_rosbag_database, 0, wx.ALL, 4)
sizer_52_copy_copy_copy_copy_copy.Add(self.button_rviz_database, 0, wx.ALL, 4)
sizer_52_copy_copy_copy_copy_copy.Add(self.button_rqt_database, 0, wx.ALL, 4)
sizer_62_copy_copy_copy_copy.Add(sizer_52_copy_copy_copy_copy_copy, 0, wx.EXPAND, 0)
sizer_10.Add(sizer_62_copy_copy_copy_copy, 0, wx.EXPAND, 0)
self.tab_database.SetSizer(sizer_10)
sizer_79.Add(self.panel_rosbag_play, 1, wx.ALL | wx.EXPAND, 4)
sizer_78.Add(sizer_79, 0, wx.ALL | wx.EXPAND, 4)
sizer_81.Add(self.button_play_rosbag_play, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_81.Add(self.button_stop_rosbag_play, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_81.Add(self.button_pause_rosbag_play, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_80.Add(sizer_81, 1, wx.EXPAND, 0)
sizer_82.Add(self.label_rosbag_play_bar, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_83.Add(self.label_rosbag_play_pos, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
sizer_83.Add(self.static_line_3, 0, wx.EXPAND, 0)
sizer_83.Add(self.label_rosbag_play_total, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 0)
sizer_82.Add(sizer_83, 0, wx.ALL | wx.EXPAND | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_80.Add(sizer_82, 1, wx.EXPAND, 0)
sizer_78.Add(sizer_80, 0, wx.ALL | wx.EXPAND, 4)
sizer_37.Add(self.label_rosbag_info, 1, wx.ALL | wx.EXPAND, 4)
self.panel_5.SetSizer(sizer_37)
sizer_78.Add(self.panel_5, 1, wx.EXPAND, 0)
sizer_62_copy_copy_copy_copy_copy.Add((20, 20), 1, 0, 0)
sizer_52_copy_copy_copy_copy_copy_copy.Add(self.button_rosbag_simulation, 0, wx.ALL, 4)
sizer_52_copy_copy_copy_copy_copy_copy.Add(self.button_rviz_simulation, 0, wx.ALL, 4)
sizer_52_copy_copy_copy_copy_copy_copy.Add(self.button_rqt_simulation, 0, wx.ALL, 4)
sizer_62_copy_copy_copy_copy_copy.Add(sizer_52_copy_copy_copy_copy_copy_copy, 0, wx.EXPAND, 0)
sizer_78.Add(sizer_62_copy_copy_copy_copy_copy, 0, wx.EXPAND, 0)
self.tab_simulation.SetSizer(sizer_78)
sizer_19.Add(self.label_top_cmd, 1, wx.ALL | wx.EXPAND, 4)
self.panel_3.SetSizer(sizer_19)
sizer_86.Add(self.panel_3, 1, wx.ALL | wx.EXPAND, 4)
sizer_85.Add(sizer_86, 1, wx.ALL | wx.EXPAND, 4)
sizer_87.Add(self.label_node_time, 1, wx.ALL, 4)
self.panel_4.SetSizer(sizer_87)
sizer_20.Add(self.panel_4, 1, wx.EXPAND, 0)
sizer_38.Add(self.checkbox_stdout, 0, wx.LEFT, 4)
sizer_38.Add(self.checkbox_stderr, 0, wx.LEFT, 4)
sizer_stdout.Add(sizer_38, 0, wx.EXPAND, 0)
sizer_stdout.Add(self.text_ctrl_stdout, 1, wx.ALL | wx.EXPAND, 4)
sizer_20.Add(sizer_stdout, 1, wx.EXPAND, 0)
sizer_85.Add(sizer_20, 1, wx.EXPAND, 0)
sizer_60_copy.Add(self.button_system_monitor, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_51_copy.Add(sizer_60_copy, 1, wx.EXPAND, 0)
sizer_52_copy_1.Add(self.button_rosbag_status, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_52_copy_1.Add(self.button_rviz_status, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_52_copy_1.Add(self.button_rqt_status, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_51_copy.Add(sizer_52_copy_1, 0, wx.EXPAND, 0)
sizer_85.Add(sizer_51_copy, 0, wx.EXPAND, 0)
self.tab_status.SetSizer(sizer_85)
self.panel_topics_list.SetSizer(self.sizer_topics_list)
sizer_32.Add(self.panel_topics_list, 1, wx.EXPAND, 0)
sizer_35.Add(self.checkbox_topics_echo, 0, wx.LEFT, 4)
sizer_35.Add(self.text_ctrl_topics_echo, 1, wx.ALL | wx.EXPAND, 4)
sizer_34.Add(sizer_35, 1, wx.EXPAND, 0)
sizer_topics_info.Add(self.label_topics_info, 0, 0, 0)
self.panel_topics_info.SetSizer(sizer_topics_info)
sizer_36.Add(self.panel_topics_info, 1, wx.EXPAND, 0)
sizer_34.Add(sizer_36, 1, wx.EXPAND, 0)
sizer_32.Add(sizer_34, 1, wx.EXPAND, 0)
sizer_85_copy.Add(sizer_32, 1, wx.EXPAND, 0)
sizer_60_copy_copy.Add(self.button_refresh_topics, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_51_copy_copy.Add(sizer_60_copy_copy, 1, wx.EXPAND, 0)
sizer_52_copy_1_copy.Add(self.button_rosbag_topics, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_52_copy_1_copy.Add(self.button_rviz_topics, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_52_copy_1_copy.Add(self.button_rqt_topics, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_51_copy_copy.Add(sizer_52_copy_1_copy, 0, wx.EXPAND, 0)
sizer_85_copy.Add(sizer_51_copy_copy, 0, wx.EXPAND, 0)
self.tab_topics.SetSizer(sizer_85_copy)
self.notebook_1.AddPage(self.tab_qs, _("Quick Start"))
self.notebook_1.AddPage(self.tab_setup, _("Setup"))
self.notebook_1.AddPage(self.tab_map, _("Map"))
self.notebook_1.AddPage(self.tab_sensing, _("Sensing"))
self.notebook_1.AddPage(self.tab_computing, _("Computing"))
self.notebook_1.AddPage(self.tab_interface, _("Interface"))
self.notebook_1.AddPage(self.tab_database, _("Database"))
self.notebook_1.AddPage(self.tab_simulation, _("Simulation"))
self.notebook_1.AddPage(self.tab_status, _("Status"))
self.notebook_1.AddPage(self.tab_topics, _("Topics"))
self.sizer_1.Add(self.notebook_1, 1, wx.EXPAND, 0)
sizer_29.Add((0, 100), 0, wx.EXPAND, 0)
sizer_29.Add(self.sizer_cpuinfo, 1, wx.EXPAND, 0)
self.sizer_1.Add(sizer_29, 0, wx.EXPAND, 0)
self.sizer_1.Add(self.bitmap_logo, 0, 0, 0)
self.SetSizer(self.sizer_1)
self.Layout()
# end wxGlade
def OnLaunchKill(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnLaunchKill' not implemented!"
event.Skip()
def OnAutoPilot(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnAutoPilot' not implemented!"
event.Skip()
def OnRosbagRecord(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnRosbagRecord' not implemented!"
event.Skip()
def OnSetupLocalizer(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnSetupLocalizer' not implemented!"
event.Skip()
def OnSelector(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnSelector' not implemented!"
event.Skip()
def OnCalibrationPublisher(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnCalibrationPublisher' not implemented!"
event.Skip()
def OnLamp(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnLamp' not implemented!"
event.Skip()
def OnIndi(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnIndi' not implemented!"
event.Skip()
def OnGear(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnGear' not implemented!"
event.Skip()
def OnQuery(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnQuery' not implemented!"
event.Skip()
def OnRosbagPlay(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnRosbagPlay' not implemented!"
event.Skip()
def OnEcho(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnEcho' not implemented!"
event.Skip()
def OnRefreshTopics(self, event): # wxGlade: MyFrame.<event_handler>
print "Event handler 'OnRefreshTopics' not implemented!"
event.Skip()
# end of class MyFrame
class MyDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.panel_2 = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.radio_box = wx.RadioBox(self.panel_2, wx.ID_ANY, "", choices=[_("0"), _("1"), _("2"), _("3")], majorDimension=0, style=wx.RA_SPECIFY_ROWS)
self.button_1 = wx.Button(self, wx.ID_ANY, _("OK"))
self.button_1_copy = wx.Button(self, wx.ID_ANY, _("Cancel"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.OnOk, self.button_1)
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.button_1_copy)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialog.__set_properties
self.SetSize((258, 212))
self.radio_box.SetSelection(0)
self.panel_2.SetScrollRate(10, 10)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialog.__do_layout
sizer_13 = wx.BoxSizer(wx.VERTICAL)
sizer_15 = wx.BoxSizer(wx.HORIZONTAL)
sizer_14 = wx.BoxSizer(wx.HORIZONTAL)
sizer_14.Add(self.radio_box, 1, wx.LEFT | wx.RIGHT | wx.TOP | wx.ALIGN_CENTER_HORIZONTAL, 4)
self.panel_2.SetSizer(sizer_14)
sizer_13.Add(self.panel_2, 1, wx.EXPAND, 0)
sizer_15.Add(self.button_1, 0, wx.ALL, 4)
sizer_15.Add(self.button_1_copy, 0, wx.ALL, 4)
sizer_13.Add(sizer_15, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizer_13)
self.Layout()
# end wxGlade
def OnOk(self, event): # wxGlade: MyDialog.<event_handler>
print "Event handler 'OnOk' not implemented!"
event.Skip()
def OnCancel(self, event): # wxGlade: MyDialog.<event_handler>
print "Event handler 'OnCancel' not implemented!"
event.Skip()
# end of class MyDialog
class MyDialogParam(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialogParam.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.panel_v = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.button_1 = wx.Button(self, wx.ID_ANY, _("OK"))
self.button_1_copy = wx.Button(self, wx.ID_ANY, _("Cancel"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.OnOk, self.button_1)
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.button_1_copy)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialogParam.__set_properties
self.SetTitle(_("dialog_3"))
self.SetSize((470, 300))
self.panel_v.SetScrollRate(10, 10)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialogParam.__do_layout
sizer_30 = wx.BoxSizer(wx.VERTICAL)
sizer_31 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_v = wx.BoxSizer(wx.VERTICAL)
self.panel_v.SetSizer(self.sizer_v)
sizer_30.Add(self.panel_v, 1, wx.EXPAND, 0)
sizer_31.Add(self.button_1, 0, wx.ALL, 4)
sizer_31.Add(self.button_1_copy, 0, wx.ALL, 4)
sizer_30.Add(sizer_31, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizer_30)
self.Layout()
# end wxGlade
def OnOk(self, event): # wxGlade: MyDialogParam.<event_handler>
print "Event handler 'OnOk' not implemented!"
event.Skip()
def OnCancel(self, event): # wxGlade: MyDialogParam.<event_handler>
print "Event handler 'OnCancel' not implemented!"
event.Skip()
# end of class MyDialogParam
class MyDialogRosbagRecord(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialogRosbagRecord.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, "")
self.button_ref = wx.Button(self, wx.ID_ANY, _("Ref"))
self.checkbox_split = wx.CheckBox(self, wx.ID_ANY, _("split"))
self.label_2 = wx.StaticText(self, wx.ID_ANY, _("size"))
self.text_ctrl_size = wx.TextCtrl(self, wx.ID_ANY, "")
self.label_2_copy = wx.StaticText(self, wx.ID_ANY, _("MB"))
self.button_start = wx.Button(self, wx.ID_ANY, _("Start"))
self.button_stop = wx.Button(self, wx.ID_ANY, _("Stop"))
self.panel_1 = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.button_refresh = wx.Button(self, wx.ID_ANY, _("Refresh"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.OnRef, self.button_ref)
self.Bind(wx.EVT_BUTTON, self.OnStart, self.button_start)
self.Bind(wx.EVT_BUTTON, self.OnStop, self.button_stop)
self.Bind(wx.EVT_BUTTON, self.OnRefresh, self.button_refresh)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialogRosbagRecord.__set_properties
self.SetTitle(_("ROSBAG Record"))
self.SetSize((300, 430))
self.button_ref.SetMinSize((40, 29))
self.text_ctrl_size.SetMinSize((50, 27))
self.button_stop.Enable(False)
self.panel_1.SetScrollRate(10, 10)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialogRosbagRecord.__do_layout
sizer_41 = wx.BoxSizer(wx.VERTICAL)
self.sizer_topic = wx.BoxSizer(wx.VERTICAL)
sizer_44 = wx.BoxSizer(wx.HORIZONTAL)
sizer_22 = wx.BoxSizer(wx.HORIZONTAL)
sizer_23 = wx.BoxSizer(wx.HORIZONTAL)
sizer_28_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_28_copy_1.Add(self.text_ctrl, 1, wx.LEFT | wx.TOP, 4)
sizer_28_copy_1.Add(self.button_ref, 0, wx.LEFT | wx.RIGHT | wx.TOP, 4)
sizer_41.Add(sizer_28_copy_1, 0, wx.EXPAND, 0)
sizer_22.Add(self.checkbox_split, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_23.Add(self.label_2, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_23.Add(self.text_ctrl_size, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_23.Add(self.label_2_copy, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_22.Add(sizer_23, 1, wx.LEFT | wx.EXPAND, 20)
sizer_41.Add(sizer_22, 0, wx.EXPAND, 0)
sizer_44.Add(self.button_start, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_44.Add(self.button_stop, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_41.Add(sizer_44, 0, wx.EXPAND, 0)
self.panel_1.SetSizer(self.sizer_topic)
sizer_41.Add(self.panel_1, 1, wx.EXPAND, 0)
sizer_41.Add(self.button_refresh, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
self.SetSizer(sizer_41)
self.Layout()
# end wxGlade
def OnRef(self, event): # wxGlade: MyDialogRosbagRecord.<event_handler>
print "Event handler 'OnRef' not implemented!"
event.Skip()
def OnStart(self, event): # wxGlade: MyDialogRosbagRecord.<event_handler>
print "Event handler 'OnStart' not implemented!"
event.Skip()
def OnStop(self, event): # wxGlade: MyDialogRosbagRecord.<event_handler>
print "Event handler 'OnStop' not implemented!"
event.Skip()
def OnRefresh(self, event): # wxGlade: MyDialogRosbagRecord.<event_handler>
print "Event handler 'OnRefresh' not implemented!"
event.Skip()
# end of class MyDialogRosbagRecord
class MyDialogLaneStop(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialogLaneStop.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.button_red_light = wx.Button(self, wx.ID_ANY, _("Red Light"))
self.button_green_light = wx.Button(self, wx.ID_ANY, _("Green Light"))
self.sizer_3_staticbox = wx.StaticBox(self, wx.ID_ANY, _("Traffic Light"))
self.button_left_lane = wx.Button(self, wx.ID_ANY, _("<< Left Lane"))
self.button_right_lane = wx.Button(self, wx.ID_ANY, _("Right Lane >>"))
self.sizer_3_copy_staticbox = wx.StaticBox(self, wx.ID_ANY, _("Lane Change"))
self.button_keep_at = wx.Button(self, wx.ID_ANY, _("Keep at"))
self.text_keep_at = wx.TextCtrl(self, wx.ID_ANY, _("60"))
self.label_1 = wx.StaticText(self, wx.ID_ANY, _("km/h"))
self.button_stop_in = wx.Button(self, wx.ID_ANY, _("Stop in"))
self.text_ctrl_stop_in = wx.TextCtrl(self, wx.ID_ANY, _("5.0"))
self.label_1_copy = wx.StaticText(self, wx.ID_ANY, _("m"))
self.sizer_6_staticbox = wx.StaticBox(self, wx.ID_ANY, _("Currnet Lane"))
self.checkbox_lane_stop = wx.CheckBox(self, wx.ID_ANY, _("Use traffic light recognition result"))
self.sizer_47_staticbox = wx.StaticBox(self, wx.ID_ANY, _("topic:/config/lane_stop"))
self.button_1 = wx.Button(self, wx.ID_ANY, _("OK"))
self.button_1_copy = wx.Button(self, wx.ID_ANY, _("Cancel"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.OnTrafficRedLight, self.button_red_light)
self.Bind(wx.EVT_BUTTON, self.OnTrafficGreenLight, self.button_green_light)
self.Bind(wx.EVT_CHECKBOX, self.OnTrafficLightRecognition, self.checkbox_lane_stop)
self.Bind(wx.EVT_BUTTON, self.OnOk, self.button_1)
self.Bind(wx.EVT_BUTTON, self.OnCancel, self.button_1_copy)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialogLaneStop.__set_properties
self.SetTitle(_("lane_stop"))
self.button_red_light.SetBackgroundColour(wx.Colour(255, 0, 0))
self.button_green_light.SetBackgroundColour(wx.Colour(0, 255, 0))
self.text_keep_at.SetMinSize((40, 27))
self.text_ctrl_stop_in.SetMinSize((40, 27))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialogLaneStop.__do_layout
sizer_30 = wx.BoxSizer(wx.VERTICAL)
sizer_31 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_47_staticbox.Lower()
sizer_47 = wx.StaticBoxSizer(self.sizer_47_staticbox, wx.VERTICAL)
self.sizer_v = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_6_staticbox.Lower()
sizer_6 = wx.StaticBoxSizer(self.sizer_6_staticbox, wx.VERTICAL)
sizer_21_copy = wx.BoxSizer(wx.HORIZONTAL)
sizer_21 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_3_copy_staticbox.Lower()
sizer_3_copy = wx.StaticBoxSizer(self.sizer_3_copy_staticbox, wx.VERTICAL)
self.sizer_3_staticbox.Lower()
sizer_3 = wx.StaticBoxSizer(self.sizer_3_staticbox, wx.VERTICAL)
sizer_3.Add(self.button_red_light, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_3.Add(self.button_green_light, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_2.Add(sizer_3, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_3_copy.Add(self.button_left_lane, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_3_copy.Add(self.button_right_lane, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_2.Add(sizer_3_copy, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_21.Add(self.button_keep_at, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_21.Add(self.text_keep_at, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_21.Add(self.label_1, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_6.Add(sizer_21, 0, wx.EXPAND, 0)
sizer_21_copy.Add(self.button_stop_in, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
sizer_21_copy.Add(self.text_ctrl_stop_in, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_21_copy.Add(self.label_1_copy, 0, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_6.Add(sizer_21_copy, 0, wx.EXPAND, 0)
sizer_2.Add(sizer_6, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 4)
self.sizer_v.Add(sizer_2, 1, wx.EXPAND, 0)
sizer_30.Add(self.sizer_v, 1, wx.EXPAND, 0)
sizer_47.Add(self.checkbox_lane_stop, 0, 0, 0)
sizer_30.Add(sizer_47, 0, wx.ALL | wx.EXPAND, 4)
sizer_31.Add(self.button_1, 0, wx.ALL, 4)
sizer_31.Add(self.button_1_copy, 0, wx.ALL, 4)
sizer_30.Add(sizer_31, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizer(sizer_30)
sizer_30.Fit(self)
self.Layout()
# end wxGlade
def OnTrafficRedLight(self, event): # wxGlade: MyDialogLaneStop.<event_handler>
print "Event handler 'OnTrafficRedLight' not implemented!"
event.Skip()
def OnTrafficGreenLight(self, event): # wxGlade: MyDialogLaneStop.<event_handler>
print "Event handler 'OnTrafficGreenLight' not implemented!"
event.Skip()
def OnTrafficLightRecognition(self, event): # wxGlade: MyDialogLaneStop.<event_handler>
print "Event handler 'OnTrafficLightRecognition' not implemented!"
event.Skip()
def OnOk(self, event): # wxGlade: MyDialogLaneStop.<event_handler>
print "Event handler 'OnOk' not implemented!"
event.Skip()
def OnCancel(self, event): # wxGlade: MyDialogLaneStop.<event_handler>
print "Event handler 'OnCancel' not implemented!"
event.Skip()
# end of class MyDialogLaneStop
class MyDialogNdtMapping(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialogNdtMapping.__init__
kwds["style"] = wx.CAPTION
wx.Dialog.__init__(self, *args, **kwds)
self.panel_v = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.static_line_1 = wx.StaticLine(self, wx.ID_ANY)
self.text_ctrl_path = wx.TextCtrl(self, wx.ID_ANY, "")
self.button_ref_path = wx.Button(self, wx.ID_ANY, _("Ref"))
self.radio_btn_filter_resolution = wx.RadioButton(self, wx.ID_ANY, _("Filter Resolution"))
self.text_ctrl_filter_resolution = wx.TextCtrl(self, wx.ID_ANY, _("0.2"))
self.radio_btn_original = wx.RadioButton(self, wx.ID_ANY, _("Original"))
self.button_3 = wx.Button(self, wx.ID_ANY, _("PCD OUTPUT"))
self.static_line_2 = wx.StaticLine(self, wx.ID_ANY)
self.button_1 = wx.Button(self, wx.ID_ANY, _("Close"))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.OnRef, self.button_ref_path)
self.Bind(wx.EVT_RADIOBUTTON, self.OnRadio, self.radio_btn_filter_resolution)
self.Bind(wx.EVT_RADIOBUTTON, self.OnRadio, self.radio_btn_original)
self.Bind(wx.EVT_BUTTON, self.OnPcdOutput, self.button_3)
self.Bind(wx.EVT_BUTTON, self.OnOk, self.button_1)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialogNdtMapping.__set_properties
self.SetTitle(_("ndt_mapping"))
self.SetSize((352, 341))
self.panel_v.SetScrollRate(10, 10)
self.button_ref_path.SetMinSize((40, 29))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialogNdtMapping.__do_layout
sizer_30 = wx.BoxSizer(wx.VERTICAL)
sizer_48 = wx.BoxSizer(wx.VERTICAL)
sizer_49 = wx.BoxSizer(wx.HORIZONTAL)
sizer_28_copy_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_30.Add(self.panel_v, 1, wx.EXPAND, 0)
sizer_30.Add(self.static_line_1, 0, wx.EXPAND, 0)
sizer_28_copy_1.Add(self.text_ctrl_path, 1, wx.LEFT | wx.TOP, 4)
sizer_28_copy_1.Add(self.button_ref_path, 0, wx.LEFT | wx.RIGHT | wx.TOP, 4)
sizer_48.Add(sizer_28_copy_1, 0, wx.ALL | wx.EXPAND, 4)
sizer_49.Add(self.radio_btn_filter_resolution, 0, wx.ALL, 4)
sizer_49.Add(self.text_ctrl_filter_resolution, 0, wx.ALL, 4)
sizer_49.Add(self.radio_btn_original, 0, wx.ALL, 4)
sizer_48.Add(sizer_49, 0, wx.ALL | wx.EXPAND, 4)
sizer_48.Add(self.button_3, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
sizer_30.Add(sizer_48, 0, wx.EXPAND, 0)
sizer_30.Add(self.static_line_2, 0, wx.EXPAND, 0)
sizer_30.Add(self.button_1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 4)
self.SetSizer(sizer_30)
self.Layout()
# end wxGlade
def OnRef(self, event): # wxGlade: MyDialogNdtMapping.<event_handler>
print "Event handler 'OnRef' not implemented!"
event.Skip()
def OnRadio(self, event): # wxGlade: MyDialogNdtMapping.<event_handler>
print "Event handler 'OnRadio' not implemented!"
event.Skip()
def OnPcdOutput(self, event): # wxGlade: MyDialogNdtMapping.<event_handler>
print "Event handler 'OnPcdOutput' not implemented!"
event.Skip()
def OnOk(self, event): # wxGlade: MyDialogNdtMapping.<event_handler>
print "Event handler 'OnOk' not implemented!"
event.Skip()
# end of class MyDialogNdtMapping
class MyDialogDpm(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialogDpm.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.panel_v = wx.ScrolledWindow(self, wx.ID_ANY, style=wx.TAB_TRAVERSAL)
self.button_1 = wx.Button(self, wx.ID_ANY, _("Detection Start"))
self.hyperlink_car = wx.HyperlinkCtrl(self, wx.ID_ANY, _("car_param_tuning"), "")
self.hyperlink_pedestrian = wx.HyperlinkCtrl(self, wx.ID_ANY, _("pedestrian_param_tuning"), "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.OnOk, self.button_1)
self.Bind(wx.EVT_HYPERLINK, self.OnLink, self.hyperlink_car)
self.Bind(wx.EVT_HYPERLINK, self.OnLink, self.hyperlink_pedestrian)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialogDpm.__set_properties
self.SetTitle(_("dialog_6"))
self.SetSize((470, 350))
self.panel_v.SetScrollRate(10, 10)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialogDpm.__do_layout
sizer_30 = wx.BoxSizer(wx.VERTICAL)
sizer_31 = wx.BoxSizer(wx.HORIZONTAL)
sizer_28 = wx.BoxSizer(wx.VERTICAL)
self.sizer_v = wx.BoxSizer(wx.VERTICAL)
self.panel_v.SetSizer(self.sizer_v)
sizer_30.Add(self.panel_v, 1, wx.EXPAND, 0)
sizer_31.Add(self.button_1, 1, wx.ALL | wx.EXPAND, 4)
sizer_28.Add(self.hyperlink_car, 0, wx.ALL, 4)
sizer_28.Add(self.hyperlink_pedestrian, 0, wx.ALL, 4)
sizer_31.Add(sizer_28, 1, wx.EXPAND, 0)
sizer_30.Add(sizer_31, 0, wx.EXPAND, 0)
self.SetSizer(sizer_30)
self.Layout()
# end wxGlade
def OnOk(self, event): # wxGlade: MyDialogDpm.<event_handler>
print "Event handler 'OnOk' not implemented!"
event.Skip()
def OnLink(self, event): # wxGlade: MyDialogDpm.<event_handler>
print "Event handler 'OnLink' not implemented!"
event.Skip()
# end of class MyDialogDpm
class MyDialogCarPedestrian(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialogCarPedestrian.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.hyperlink_car = wx.HyperlinkCtrl(self, wx.ID_ANY, _("parameter tuning for car"), "")
self.hyperlink_pedestrian = wx.HyperlinkCtrl(self, wx.ID_ANY, _("parameter tuning for pedestrian"), "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_HYPERLINK, self.OnLink, self.hyperlink_car)
self.Bind(wx.EVT_HYPERLINK, self.OnLink, self.hyperlink_pedestrian)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialogCarPedestrian.__set_properties
self.SetTitle(_("dialog_7"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialogCarPedestrian.__do_layout
sizer_28 = wx.BoxSizer(wx.VERTICAL)
sizer_28.Add(self.hyperlink_car, 0, wx.ALL, 4)
sizer_28.Add(self.hyperlink_pedestrian, 0, wx.ALL, 4)
self.SetSizer(sizer_28)
sizer_28.Fit(self)
self.Layout()
# end wxGlade
def OnLink(self, event): # wxGlade: MyDialogCarPedestrian.<event_handler>
print "Event handler 'OnLink' not implemented!"
event.Skip()
# end of class MyDialogCarPedestrian
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(frame_1)
frame_1.Show()
return 1
# end of class MyApp
if __name__ == "__main__":
gettext.install("app") # replace with the appropriate catalog name
app = MyApp(0)
app.MainLoop() | 53.497095 | 172 | 0.76497 |
b2cc5b75bb7d9fb7c83df05839b6156d0de9f7be | 988 | py | Python | setup.py | erickrf/unitex-lemmatizer | 2f3df6dc978001458e9c03e90727dc7c7aa69269 | [
"MIT"
] | 1 | 2017-01-24T15:04:11.000Z | 2017-01-24T15:04:11.000Z | setup.py | erickrf/unitex-lemmatizer | 2f3df6dc978001458e9c03e90727dc7c7aa69269 | [
"MIT"
] | null | null | null | setup.py | erickrf/unitex-lemmatizer | 2f3df6dc978001458e9c03e90727dc7c7aa69269 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Installation setup for unitexlemmatizer
"""
from setuptools import setup, find_packages
with open('README.rst', 'rb') as f:
long_desc = f.read()
setup(name='unitexlemmatizer',
version='1.0.0',
description='A simple lemmatizer based on Unitex word lists',
long_description=long_desc,
author='Erick Fonseca',
author_email='erickrfonseca@gmail.com',
url='https://github.com/erickrf/unitex-lemmatizer',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
keywords=['nlp', 'lemmatizer'],
packages=find_packages()
)
| 29.058824 | 67 | 0.633603 |
fc00db39ae9ab9e0ac5381964dcbd23f1e7708c3 | 802 | py | Python | Question_01_10/Max Pooling.py | SHK2018/Gasyori100knock | 3fab0a2906ac99a37281269e1618e8ac74629dfa | [
"MIT"
] | 5 | 2021-06-08T16:09:01.000Z | 2021-12-10T09:42:43.000Z | Question_01_10/Max Pooling.py | SHK2018/Gasyori100knock | 3fab0a2906ac99a37281269e1618e8ac74629dfa | [
"MIT"
] | null | null | null | Question_01_10/Max Pooling.py | SHK2018/Gasyori100knock | 3fab0a2906ac99a37281269e1618e8ac74629dfa | [
"MIT"
] | 1 | 2021-05-24T04:14:27.000Z | 2021-05-24T04:14:27.000Z | # -*- coding: utf-8 -*-
import cv2
import numpy as np
def max_pooling(img, ksize):
H, W, C = img.shape
out = np.zeros_like(img, dtype=np.float32)
for i in range(0, H, ksize):
for j in range(0, H, ksize):
start_i = i
end_i = i + ksize
start_j = j
end_j = j + ksize
out[start_i:end_i, start_j:end_j, :] = np.max(
np.max(img[start_i:end_i, start_j:end_j, :], axis=0), axis=0)
return out
# Read image
img = cv2.imread("imori.jpg").astype(np.float32)
# Max pooling
out = max_pooling(img, 8).astype(np.uint8)
# Show and save image
cv2.imwrite("Myresult/out8.jpg", out)
cv2.namedWindow("result",0);
cv2.resizeWindow("result", 256, 256);
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 23.588235 | 77 | 0.600998 |
7b394a15dac43de723f5da1437a7bd8cd13016fb | 494 | py | Python | mmf/__init__.py | sisilmehta2000/mmf | ac1bb736f281ffbde367cfe9cf6f4f78fc890fc4 | [
"BSD-3-Clause"
] | 1 | 2022-01-14T05:46:04.000Z | 2022-01-14T05:46:04.000Z | mmf/__init__.py | sisilmehta2000/mmf | ac1bb736f281ffbde367cfe9cf6f4f78fc890fc4 | [
"BSD-3-Clause"
] | null | null | null | mmf/__init__.py | sisilmehta2000/mmf | ac1bb736f281ffbde367cfe9cf6f4f78fc890fc4 | [
"BSD-3-Clause"
] | 1 | 2021-11-27T23:29:10.000Z | 2021-11-27T23:29:10.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# isort:skip_file
# flake8: noqa: F401
from mmf.utils.patch import patch_transformers
patch_transformers()
from mmf import utils, common, modules, datasets, models
from mmf.modules import losses, schedulers, optimizers, metrics, poolers
from mmf.version import __version__
__all__ = [
"utils",
"common",
"modules",
"datasets",
"models",
"losses",
"poolers",
"schedulers",
"optimizers",
"metrics",
]
| 19.76 | 72 | 0.686235 |
4ff8543cff289f53855e11f9e6567321b99b5572 | 658 | py | Python | python/BubbleSort.py | Rebaiahmed/hacktoberfest-2020 | b60cf24dc1d761e261ad5a00486c2a5b75d1fcf6 | [
"MIT"
] | 51 | 2020-09-27T15:28:05.000Z | 2021-09-29T02:07:25.000Z | python/BubbleSort.py | Rebaiahmed/hacktoberfest-2020 | b60cf24dc1d761e261ad5a00486c2a5b75d1fcf6 | [
"MIT"
] | 158 | 2020-09-26T12:57:12.000Z | 2020-11-01T19:34:31.000Z | python/BubbleSort.py | Rebaiahmed/hacktoberfest-2020 | b60cf24dc1d761e261ad5a00486c2a5b75d1fcf6 | [
"MIT"
] | 453 | 2020-09-27T12:34:35.000Z | 2021-10-16T08:33:33.000Z | # Python program for implementation of Bubble Sort
def bubbleSort(arr):
n = len(arr)
# Traverse through all array elements
for i in range(n):
# Last i elements are already in place
for j in range(0, n-i-1):
# traverse the array from 0 to n-i-1
# Swap if the element found is greater
# than the next element
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
# Driver code to test above
arr = [64, 34, 25, 12, 22, 11, 90]
bubbleSort(arr)
print ("Sorted array is:")
for i in range(len(arr)):
print ("%d" %arr[i])
| 24.37037 | 52 | 0.534954 |
0a467d570d4285e7189fea05fd375a74a3a688dc | 1,685 | py | Python | terminology/snomed_ct/terminology.py | drgriffis/text-essence | db3cacc484d31cbb7c3e9120643120e18a91c3af | [
"BSD-3-Clause"
] | 7 | 2021-03-26T07:10:40.000Z | 2021-08-09T14:24:56.000Z | terminology/snomed_ct/terminology.py | drgriffis/text-essence | db3cacc484d31cbb7c3e9120643120e18a91c3af | [
"BSD-3-Clause"
] | 1 | 2021-08-09T15:31:48.000Z | 2021-08-09T15:32:20.000Z | terminology/snomed_ct/terminology.py | drgriffis/text-essence | db3cacc484d31cbb7c3e9120643120e18a91c3af | [
"BSD-3-Clause"
] | null | null | null | from .parsers import *
from hedgepig_logger import log
class SnomedTerminology:
def __init__(self, concepts_file, descriptions_file, definitions_file, language_codes=None, verbose=False):
concepts_by_ID = {}
if verbose:
status = lambda s: log.writeln('[SnomedTerminology] %s' % s)
else:
status = lambda s: None
status('Loading Concepts...')
with ConceptParser(concepts_file, language_codes=language_codes) as parser:
for concept in parser:
concepts_by_ID[concept.ID] = concept
status('Loaded {0:,} concepts.'.format(len(concepts_by_ID)))
status('Loading Descriptions...')
ctr = 0
with DescriptionParser(descriptions_file, language_codes=language_codes) as parser:
for description in parser:
if description.concept_ID in concepts_by_ID:
concepts_by_ID[description.concept_ID].descriptions.append(description)
ctr += 1
status('Loaded {0:,} Descriptions.'.format(ctr))
status('Loading Definitions...')
ctr = 0
with TextDefinitionParser(definitions_file, language_codes=language_codes) as parser:
for definition in parser:
if definition.concept_ID in concepts_by_ID:
concepts_by_ID[definition.concept_ID].definition = definition
ctr += 1
status('Loaded {0:,} Definitions.'.format(ctr))
self.concepts = list(concepts_by_ID.values())
def __iter__(self):
return iter(self.concepts)
def __len__(self):
return len(self.concepts)
| 34.387755 | 111 | 0.623739 |
9e359a7006040cc85de22a60bc84c4eb5c105450 | 2,413 | py | Python | pingmystuff/main.py | larssont/PingMyStuff | 22fb1aa12b88b2a8de37a2fc35ec3367f364f1c5 | [
"MIT"
] | null | null | null | pingmystuff/main.py | larssont/PingMyStuff | 22fb1aa12b88b2a8de37a2fc35ec3367f364f1c5 | [
"MIT"
] | null | null | null | pingmystuff/main.py | larssont/PingMyStuff | 22fb1aa12b88b2a8de37a2fc35ec3367f364f1c5 | [
"MIT"
] | null | null | null | import requests
import yaml
import argparse
from cerberus import Validator
from schema_config import schema
config = {}
def load_config(file):
global config
with open(file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
def write_config(file):
with open(file, "w") as file:
yaml.dump(config, file)
def validate_conf():
v = Validator(schema)
if not v.validate(config):
raise Exception(v.errors)
def insert_str_vars(str_, str_vars):
new_str = str_
for key, value in str_vars.items():
old = f"{{{{ {key} }}}}"
new_str = new_str.replace(old, str(value))
return new_str
def send_notification(notifier_opt, status, site):
site_opt = config["sites"][site]
data = dict.get(notifier_opt, "data")
if data is None:
data = {}
conf_vars = {"name": site, "address": site_opt["address"], "status": status}
msg = get_status_text(status, config["sites"][site]["consider_up"])
msg = insert_str_vars(msg, conf_vars)
data[notifier_opt["msgDataKey"]] = msg
address = notifier_opt["address"]
requests.post(address, data=data)
def call_notifiers(site, status):
for notifier_opt in config["notifiers"].values():
if site in notifier_opt["sites"]:
send_notification(notifier_opt, status, site)
def get_status(address):
return requests.get(address).status_code
def get_status_text(status, status_list):
if status in status_list:
return config["message"]["up"]
return config["message"]["down"]
def has_status_changed(site_opt, new_status):
consider_up = site_opt["consider_up"]
old_status = site_opt.get("status")
if new_status != old_status:
if old_status in consider_up or new_status in consider_up:
return True
return False
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="YAML config file path")
return parser.parse_args()
def main():
args = get_args()
load_config(args.config)
validate_conf()
sites = config["sites"]
for site, site_opt in sites.items():
new_status = get_status(site_opt["address"])
if has_status_changed(site_opt, new_status):
call_notifiers(site, new_status)
sites[site]["status"] = new_status
write_config(args.config)
if __name__ == "__main__":
main()
| 24.13 | 80 | 0.663489 |
e9126de03f747fc365be94fcbb96e4f118c456a3 | 1,007 | py | Python | property/migrations/0010_owner.py | A1exander-Pro/real_estate_lesson | 95a7fe12338e39baf23204605b8c37cb53c43f81 | [
"MIT"
] | null | null | null | property/migrations/0010_owner.py | A1exander-Pro/real_estate_lesson | 95a7fe12338e39baf23204605b8c37cb53c43f81 | [
"MIT"
] | null | null | null | property/migrations/0010_owner.py | A1exander-Pro/real_estate_lesson | 95a7fe12338e39baf23204605b8c37cb53c43f81 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2021-02-09 14:06
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('property', '0009_auto_20210209_1430'),
]
operations = [
migrations.CreateModel(
name='Owner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=200, verbose_name='ФИО Владельца')),
('phone_number', models.CharField(max_length=20, verbose_name='Номер владельца')),
('pure_phone_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None, verbose_name='Нормализованный номер владельца')),
('owned_apartments', models.ManyToManyField(related_name='owners', to='property.Flat', verbose_name='Квартиры в собственности')),
],
),
]
| 40.28 | 175 | 0.658391 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.