content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from itertools import product with open('output.txt') as f: s = f.read().strip() for i, j in product(range(10), repeat=2): try: bits = '1'*i + s + '1'*j x = bytes.fromhex(f'{int(bits, 2):x}') if b'CCTF{' in x: print(x) break except: pass
nilq/baby-python
python
class Solution: def setZeroes(self, matrix): """ :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead. """ row = [] column = [] for i in range(len(matrix)): for j in range(len(matrix[0])): # print(matrix[i][j]) if matrix[i][j] == 0: column.append(i) row.append(j) for i in range(len(matrix)): for j in range(len(matrix[0])): if i in column: matrix[i][j] = 0 if j in row: matrix[i][j] = 0
nilq/baby-python
python
import pytest from numpy.testing import assert_array_almost_equal from Auto import * class Test_AutoSample: @classmethod def setup_method(cls): np.random.seed(123) cls.target = lambda x: np.where(x < 0, 0, np.exp(-x)) cls.shape = (1,) cls.njobs = 1 cls.algo = AutoSample(target=cls.target, shape=cls.shape, njobs=cls.njobs) @pytest.mark.filterwarnings("ignore::UserWarning") @pytest.mark.filterwarnings('ignore::RuntimeWarning') def test_sample(self): sample = self.algo.sample(size=1, chains=1) assert sample.shape == (1, 1)
nilq/baby-python
python
#!python # This generates a java source file by taking each method that has a # parameters (String s, int off, int end) and generating a copy that # takes (char[] s, int off, int end). # Fix emacs syntax highlighting " src = r""" // Copyright (C) 2011 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.autoesc; import java.io.IOException; import java.io.Writer; import javax.annotation.Nullable; /** XML contains utilities for dealing with XML contexts. */ class XML { static final ReplacementTable REPLACEMENT_TABLE = new ReplacementTable() .add('`', "&#96;") .add('<', "&lt;") .add('>', "&gt;") .add('+', "&#43;") .add('\'', "&#39;") .add('&', "&amp;") .add('"', "&#34;") // XML cannot contain NULs even if encoded, so treat NUL as an error case // and replace it with U+FFFD, the replacement character. .add((char) 0, "\ufffd"); static final ReplacementTable NORM_REPLACEMENT_TABLE = new ReplacementTable(REPLACEMENT_TABLE) .add('&', null); /** escapeOnto escapes for inclusion in XML text. */ static void escapeOnto(@Nullable Object o, Writer out) throws IOException { String safe = ContentType.Markup.derefSafeContent(o); if (safe != null) { out.write(safe); return; } REPLACEMENT_TABLE.escapeOnto(o, out); } /** escapeOnto escapes for inclusion in XML text. */ static void escapeOnto(String s, int off, int end, Writer out) throws IOException { REPLACEMENT_TABLE.escapeOnto(s, off, end, out); } /** * normalizeOnto escapes for inclusion in XML text but does not break * existing entities. */ static void normalizeOnto(@Nullable Object o, Writer out) throws IOException { String safe = ContentType.Markup.derefSafeContent(o); if (safe != null) { out.write(safe); return; } NORM_REPLACEMENT_TABLE.escapeOnto(o, out); } /** * normalizeOnto escapes for inclusion in XML text but does not break * existing entities. */ static void normalizeOnto(String s, int off, int end, Writer out) throws IOException { NORM_REPLACEMENT_TABLE.escapeOnto(s, off, end, out); } /** * escapeCDATAOnto emits the text unchanged assuming it will go inside a * {@code <![CDATA[...]]>} block unless the string contains {@code "]]>"} or * starts or ends with a prefix or suffix thereof in which case it splits the * CDATA section around that chunk and resumes on the other side: * {@code "foo]]>bar"} &rarr; {@code "foo]]]]><![CDATA[>bar"}. * Any buggy regex based XML parsers that allow CDATA sections to contain * {@code "]]>"} by using surrounding tags as boundaries (e.g. looking for * {@code /<tag><!\[CDATA\[(.*?)\]\]><\/tag>/} can simply remove all * all occurrences of {@code "]]><![CDATA["}. */ static void escapeCDATAOnto(String s, int offset, int end, Writer out) throws IOException { if (offset >= end) { return; } int off = offset; // Elide all NULs which are not strictly allowed in XML. for (int i = off; i < end; ++i) { if (s.charAt(i) == 0) { StringBuilder sb = new StringBuilder(end - off); for (i = off; i < end; ++i) { char ch = s.charAt(i); if (ch != 0) { sb.append(ch); } } escapeCDATAOnto(sb.toString(), 0, sb.length(), out); return; } } // Make sure the start of the string can't combine with any characters // already on out to break out of the CDATA section. { char ch0 = s.charAt(off); if (ch0 == '>' || (ch0 == ']' && off + 1 < end && s.charAt(off + 1) == '>')) { out.write("]]><![CDATA["); } } for (int i = off; i < end - 2; ++i) { if (s.charAt(i)== ']' && s.charAt(i + 1) == ']' && s.charAt(i + 2) == '>') { out.write(s, off, i - off); out.write("]]]]><![CDATA[>"); i += 2; off = i + 1; } } out.write(s, off, end - off); // Prevent the next character written to out from combining with trailing // characters from s to form "]]>". if (s.charAt(end - 1) == ']') { out.write("]]><![CDATA["); } } /** * escapeCDATAOnto escapes for inclusion in an XML {@code <![CDATA[...]]>} * section. */ static void escapeCDATAOnto(@Nullable Object o, Writer out) throws IOException { if (o == null) { return; } if (o instanceof char[]) { char[] chars = (char[]) o; escapeCDATAOnto(chars, 0, chars.length, out); } else { String s = o.toString(); escapeCDATAOnto(s, 0, s.length(), out); } } } """ # Fix emacs syntax highlighting " import dupe_methods print dupe_methods.dupe(src)
nilq/baby-python
python
def print_me(y): return 10 + y # pragma: no cover def return_val(val): val += 1 return val def return_val2(val): val += 1 return val
nilq/baby-python
python
""" endpoint schemas for knoweng """
nilq/baby-python
python
"""Integration tests for dice_roller.py""" import unittest import dice_roller class DiceRollerIntegrationTests(unittest.TestCase): """ Integration tests for DiceRoller that check that history() and clear() are working """ def test_no_history(self): """ test that .history() returns {} when no rolls have been made """ dice_roller_instance = dice_roller.DiceRoller() self.assertEqual(dice_roller_instance.history(), {}) def test_history(self): """ test .history() returns the correct output after running .roll() """ dice_roller_instance = dice_roller.DiceRoller() # run 4 rolls, save the results so we can get the roll result for the assert below result_0 = dice_roller_instance.roll((1, 20)) result_1 = dice_roller_instance.roll((1, 20), (2, 10), (1, 100)) result_2 = dice_roller_instance.roll((1, 20), (2, 10), (1, 100)) result_3 = dice_roller_instance.roll((10, 1)) self.assertEqual( dice_roller_instance.history(), { 'roll_0': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20}}, 'result': result_0[0], 'min': 1, 'max': 20, 'median': 10.5}, 'roll_1': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20}, 'dice_1': {'number_of_dice': 2, 'number_of_sides': 10}, 'dice_2': {'number_of_dice': 1, 'number_of_sides': 100}}, 'result': result_1[0], 'min': 4, 'max': 140, 'median': 72.0}, 'roll_2': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20}, 'dice_1': {'number_of_dice': 2, 'number_of_sides': 10}, 'dice_2': {'number_of_dice': 1, 'number_of_sides': 100}}, 'result': result_2[0], 'min': 4, 'max': 140, 'median': 72.0}, 'roll_3': {'dice': {'dice_0': {'number_of_dice': 10, 'number_of_sides': 1}}, 'result': result_3[0], 'min': 10, 'max': 10, 'median': 10.0} } ) def test_history_with_invalid_inputs(self): """ test that .history() is not messed up by invalid rolls """ dice_roller_instance = dice_roller.DiceRoller() result_0 = dice_roller_instance.roll((1, 15)) result_1 = dice_roller_instance.roll((1, 30), (2, 10), (1, 100)) try: dice_roller_instance.roll((0, 5)) except ValueError: pass try: dice_roller_instance.roll((10, 0)) except ValueError: pass result_2 = dice_roller_instance.roll((1, 20), (2, 10)) result_3 = dice_roller_instance.roll((5, 4)) result_4 = dice_roller_instance.roll() self.assertEqual( dice_roller_instance.history(), { 'roll_0': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 15}}, 'result': result_0[0], 'min': 1, 'max': 15, 'median': 8.0}, 'roll_1': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 30}, 'dice_1': {'number_of_dice': 2, 'number_of_sides': 10}, 'dice_2': {'number_of_dice': 1, 'number_of_sides': 100}}, 'result': result_1[0], 'min': 4, 'max': 150, 'median': 77.0}, 'roll_2': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20}, 'dice_1': {'number_of_dice': 2, 'number_of_sides': 10}}, 'result': result_2[0], 'min': 3, 'max': 40, 'median': 21.5}, 'roll_3': {'dice': {'dice_0': {'number_of_dice': 5, 'number_of_sides': 4}}, 'result': result_3[0], 'min': 5, 'max': 20, 'median': 12.5}, 'roll_4': {'dice': {'dice_0': {'number_of_dice': 1, 'number_of_sides': 20}}, 'result': result_4[0], 'min': 1, 'max': 20, 'median': 10.5}, } ) def test_clear_history(self): """ test that .clear() empties the instance's record and that .history() returns {} """ dice_roller_instance = dice_roller.DiceRoller() dice_roller_instance.roll((1, 200)) dice_roller_instance.roll((2, 20), (2, 13), (1, 100)) dice_roller_instance.roll((1, 20), (2, 6), (1, 10)) dice_roller_instance.roll((10, 2)) dice_roller_instance.clear() self.assertEqual(dice_roller_instance.history(), {})
nilq/baby-python
python
from presentation.models import Liked, Author from django.shortcuts import get_object_or_404 from presentation.Serializers.liked_serializer import LikedSerializer from presentation.Serializers.author_serializer import AuthorSerializer from rest_framework import viewsets, status from rest_framework.response import Response from urllib.parse import urlparse from . import urlutil def getAuthorIDFromRequestURL(request, id): host = urlutil.getSafeURL(request.build_absolute_uri()) author_id = f"{host}/author/{id}" return author_id class LikedViewSet(viewsets.ModelViewSet): serializer_class = LikedSerializer queryset = Liked.objects.all() def list(self, request, *args, **kwargs): author_id = getAuthorIDFromRequestURL(request, self.kwargs['author_id']) author_ = get_object_or_404(Author, id=author_id) queryset = Liked.objects.filter(author=author_id) if queryset.exists(): items = Liked.objects.filter(author=author_id) for item in items: item.id = None items = list(items.values()) return JsonResponse(items,safe=False) else: Liked.objects.create(author=author_id) return Response({ 'type': 'liked', 'author': author_id, 'items': [] }) def retrieve(self, request, *args, **kwargs): author_id = getAuthorIDFromRequestURL(request, self.kwargs['author_id']) queryset = Liked.objects.get(author=author_id) serializer = LikedViewSet(queryset) return Response(serializer.data)
nilq/baby-python
python
import argparse import sys import os.path as osp import os sys.path.insert(1, osp.abspath(osp.join(os.getcwd(), *('..',)*2))) from dataset_preprocess import CoraDataset, PlanetoidDataset from attack.models import * import torch import pandas as pd from tqdm.notebook import tqdm from attack.GAFNC import GNNAttack from torch_geometric.utils.loop import add_self_loops, remove_self_loops import utils import numpy as np import pickle device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def index_to_mask(index, size): mask = torch.zeros(size, dtype=torch.bool, device=index.device) mask[index] = 1 return mask def split_dataset(dataset, new_nodes, train_percent=0.7): indices = [] _size = dataset.data.num_nodes - new_nodes y = dataset.data.y[:_size] for i in range(dataset.num_classes): index = (y == i).nonzero().view(-1) index = index[torch.randperm(index.size(0))] indices.append(index) train_index = torch.cat([i[:int(len(i) * train_percent)] for i in indices], dim=0) rest_index = torch.cat([i[int(len(i) * train_percent):] for i in indices], dim=0) rest_index = rest_index[torch.randperm(rest_index.size(0))] dataset.data.train_mask = index_to_mask(train_index, size=dataset.data.num_nodes) dataset.data.val_mask = index_to_mask(rest_index[:len(rest_index) // 2], size=dataset.data.num_nodes) dataset.data.test_mask = index_to_mask(rest_index[len(rest_index) // 2:], size=dataset.data.num_nodes) dataset.train_index = train_index[:] dataset.val_index = rest_index[:len(rest_index) // 2] dataset.test_index = rest_index[len(rest_index) // 2:] dataset.data, dataset.slices = dataset.collate([dataset.data]) return dataset def build_args(): def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser = argparse.ArgumentParser() parser.add_argument('--dataset_name', type=str, default='cora', help='name of dataset_preprocess') # dataset_name = ['cora', 'citeseer', 'pubmed'] parser.add_argument('--attack_graph', type=str2bool, default=True, help='global attack') parser.add_argument('--node_idx', type=int, default=None, help='no target idx') parser.add_argument('--structure_attack', type=str2bool, default=True, help='with structure attack') parser.add_argument('--feature_attack', type=str2bool, default=False, help='with feature attack') parser.add_argument('--added_node_num', type=int, default=20, help='num of new nodes') parser.add_argument('--train_percent', type=float, default=0.7, help='train percent') parser.add_argument('--fix_sparsity', type=str2bool, default=True, help='control the attack sparsity') parser.add_argument('--sparsity', type=float, default=0.5, help='sparsity') parser.add_argument('--feat_sparsity', type=float, default=0.5, help='feat_sparsity') parser.add_argument('--random_structure', type=str2bool, default=False, help='random mask') parser.add_argument('--random_feature', type=str2bool, default=False, help='random mask of feature') parser.add_argument('--edge_size', type=float, default=1e-5, help='edge_size') parser.add_argument('--edge_ent', type=float, default=1.0, help='edge_ent') parser.add_argument('--node_feat_size', type=float, default=1e-5, help='edge_size') parser.add_argument('--node_feat_ent', type=float, default=1.0, help='edge_ent') parser.add_argument('--train_epochs', type=int, default=300, help='epochs for training a GNN model') parser.add_argument('--attack_epochs', type=int, default=600, help='epochs for attacking a GNN model') parser.add_argument('--retrain_epochs', type=int, default=10, help='epochs for retraining a GNN model with new graph') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument('--desired_class', type=int, default=None, help='attack specific node to desired class') parser.add_argument('--model_name', type=str, default="baseline", help='model variants name') parser.add_argument('--indirect_level', type=int, default=0, help='target indirect attack level') args = parser.parse_args() return args def fix_random_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # multi gpu torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False torch.backends.cudnn.enabled = False np.random.seed(seed) def eval_all(model, data): train_loss, train_acc = utils.evaluate(model, data, data.train_mask) val_loss, val_acc = utils.evaluate(model, data, data.val_mask) test_loss, test_acc = utils.evaluate(model, data, data.test_mask) return [train_loss, test_loss, val_loss, train_acc, test_acc, val_acc] if __name__ == '__main__': args = build_args() print("args", args) fix_random_seed(seed=args.seed) ADD_ZERO = 0 # step 1: load baseline dataset_preprocess data_name = args.dataset_name if data_name in ["cora", 'photo']: baseline = CoraDataset('./datasets', data_name, added_new_nodes=ADD_ZERO) else: # for dataset_preprocess pubmed, and citeseer baseline = PlanetoidDataset('./datasets', data_name, added_new_nodes=ADD_ZERO) split_dataset_name = "baseline_"+data_name+"_split" split_path = osp.join('./datasets', split_dataset_name, 'train_percent', str(args.train_percent), 'added_node', str(ADD_ZERO)) if not osp.isdir(split_path): dataset = split_dataset(baseline, ADD_ZERO, train_percent=args.train_percent) os.makedirs(split_path) torch.save(baseline, osp.join(split_path, 'split_data.pt')) else: baseline = torch.load(osp.join(split_path, 'split_data.pt')) dim_node = baseline.num_node_features dim_edge = baseline.num_edge_features num_classes = baseline.num_classes baseline_model_ckpt_path = osp.join('checkpoints', data_name, str(args.train_percent), 'GCN_2l', 'seed', '0', 'GCN_2l_best.ckpt') # step 2: attack # add new nodes to origin dataset_preprocess added_node_num = args.added_node_num added_data_name = data_name + "-added" if data_name in ["cora", 'photo']: added_dataset = CoraDataset('./datasets', added_data_name, added_new_nodes=added_node_num) else: added_dataset = PlanetoidDataset('./datasets', added_data_name, added_new_nodes=added_node_num) if args.feature_attack: added_dataset.data.x[-added_node_num:] = 1 print("feature attack ", added_dataset.data.x[-added_node_num:]) added_indices = torch.as_tensor(list(range(baseline.data.num_nodes, baseline.data.num_nodes+added_node_num))) add_train_index = torch.cat((baseline.train_index, added_indices), dim=0) added_dataset.data.train_mask = index_to_mask(add_train_index, size=added_dataset.data.num_nodes) added_dataset.data.val_mask = index_to_mask(baseline.val_index, size=added_dataset.data.num_nodes) added_dataset.data.test_mask = index_to_mask(baseline.test_index, size=added_dataset.data.num_nodes) added_dataset.data, added_dataset.slices = added_dataset.collate([added_dataset.data]) # step 2.1: load model print(" step 2.1: loading base model for attack") model = GCN_2l(model_level='node', dim_node=dim_node, dim_hidden=16, num_classes=num_classes) model.to(device) model.load_state_dict(torch.load(baseline_model_ckpt_path, map_location=device)['state_dict']) # step 2.2 attack attack_graph = args.attack_graph if attack_graph: print(" args.structure_attack", args.structure_attack) attacker = GNNAttack(model, new_node_num=added_node_num, epochs=args.attack_epochs, lr=0.005, attack_graph=attack_graph, mask_features=args.feature_attack, mask_structure=args.structure_attack, indirect_level=args.indirect_level, random_structure=args.random_structure, random_feature=args.random_feature, args=args) else: # print(" random choise one id from test part of the datasete") # print(" test index is", baseline.test_index[0]) # args.node_idx = print(" node idx is", args.node_idx) # args.node_idx = baseline.test_index[0].item() origin_label = baseline.data.y[args.node_idx] # args.desired_class = 2 print(" target id is ", args.node_idx, " origin label is", origin_label, "desired label is ", args.desired_class) if args.node_idx == None and args.desired_class == None: print(" target attack, please input your target node id, and desired class id") exit(-1) attacker = GNNAttack(model, new_node_num=added_node_num, epochs=args.attack_epochs, lr=0.005, attack_graph=attack_graph, mask_features=args.feature_attack, mask_structure=args.structure_attack, indirect_level=args.indirect_level, random_structure=args.random_structure, random_feature=args.random_feature, args=args) attacker.to(device) sparsity = args.sparsity feat_sparsity = args.feat_sparsity fix_sparsity = args.fix_sparsity data = added_dataset.data data.to(device) print(" input file args is",args) walks, structure_masks, feature_masks, structure_sp, feature_sp = attacker(data.x, data.edge_index, data.y, fix_sparsity= fix_sparsity,sparsity=sparsity,feat_sparsity=feat_sparsity,\ num_classes=num_classes) print(" strucutre sparisty =", structure_sp, " feature sparsity = ", feature_sp) # check train dataset predict shift # model.eval() # tmp_list = [] # with torch.no_grad(): # output = model(baseline.data.x, baseline.data.edge_index, None) # pred_class = torch.argmax(output[args.node_idx], dim=0).item() # path = f'results/{data_name}/target_attack/added_node_{added_node_num}/train_percent_{args.train_percent}/desired_class_{args.desired_class}' # if not osp.isdir(path): # os.makedirs(path) # print(" pred class is, ", pred_class) # file = f'{path}/train_model_res.csv' # cols=["ID", "desired_class", "pred_class", "pred_score"] # tmp_list.append([args.node_idx, args.desired_class, pred_class, output[args.node_idx]]) # df = pd.DataFrame(tmp_list, columns=cols) # if not os.path.isfile(file): # df.to_csv(file, index=False) # else: # prev_res = pd.read_csv(file) # final_res = pd.concat([df, prev_res],ignore_index=True) # final_res.reset_index() # final_res.to_csv(file, index=False) # exit(-2) # step 2.3 apply learned mask to added_dataset # step 2.3.1 apply structure mask to dataset_preprocess print("mask dim", added_dataset.data.num_nodes) print(" edge index", added_dataset.data.edge_index.shape) print(" structur mask is", structure_masks) filter_indices = (structure_masks[0] == float('inf')).nonzero(as_tuple=True)[0] print(" filter indices = ", filter_indices) print(" filter indeices", filter_indices.shape) edge_index_with_loop, _ = add_self_loops(added_dataset.data.edge_index, num_nodes=added_dataset.data.num_nodes) added_dataset.data.edge_index = edge_index_with_loop print("dataset_preprocess.data.edge_index", added_dataset.data.edge_index.shape) added_dataset.data.edge_index = torch.index_select(added_dataset.data.edge_index, 1, filter_indices.to(device)) print("after filter dataset_preprocess.data.edge_index", added_dataset.data.edge_index.shape) # step 2.3.2 apply feature mask to added_dataset if attacker.mask_features: added_dataset.data.x[-added_node_num:] *= feature_masks[0] # step 3: retrain model in changed dataset_preprocess del model model = GCN_2l(model_level='node', dim_node=added_dataset.num_node_features, dim_hidden=16,num_classes=added_dataset.num_classes) model.to(device) attack_ckpt_fold = osp.join('attack_checkpoints', data_name, str(added_node_num), 'GCN_2l') if not osp.isdir(attack_ckpt_fold): os.makedirs(attack_ckpt_fold) attack_ckpt_path = osp.join(attack_ckpt_fold, 'GCN_2l_best.ckpt') utils.train(model, added_dataset.data, attack_ckpt_path, lr=0.005, epochs=args.train_epochs,verbose=True) # [_, _, _, train_acc, test_acc, val_acc] = eval_all(model, added_dataset.data) if not args.attack_graph: path = f'results/target_attack/{data_name}/added_node_{added_node_num}/train_percent_{args.train_percent}/desired_class_{args.desired_class}' if not osp.isdir(path): os.makedirs(path) model.eval() with torch.no_grad(): output = model(added_dataset.data.x, added_dataset.data.edge_index, None) success = None print(" node idx = ", args.node_idx) print(" output shape ", output.shape, output[args.node_idx], type(output[args.node_idx])) pred_class = torch.argmax(output[args.node_idx], dim=0).item() print(" pred class = ", pred_class) origin = added_dataset.data.y[args.node_idx] print(" origin", origin, "desired class", args.desired_class) cols = ["id", "pred_class", "desired_class", "success", "vis_path", "structure_sp", "feature_sp", "pred_score"] tmp_list = [] vis_file = None if pred_class == args.desired_class: success = True vis_file = f'{path}/target_attack_{str(args.node_idx)}_{str(structure_sp)}_feature_sparsity_{str(feature_sp)}_dataset.pkl' utils.save_to_file([added_dataset.data.edge_index.to('cpu'), torch.argmax(output.to('cpu'), dim=1), added_dataset.data.x[:]], vis_file) baseline_fold = osp.join('./results/target_attack', data_name) baseline_vis_file = f'{baseline_fold}/train_percent_{args.train_percent}_baseline_A_X_res.pkl' # plot 1-hop and 2-hop figures center by node id with open(baseline_vis_file, 'rb') as f: edge_indx, pred, att = pickle.load(f) utils.viz_k_hop_op(edge_indx, pred, args.node_idx, 1, path, f'origin_center_node_{str(args.node_idx)}_hops_{str(1)}') utils.viz_k_hop_op(edge_indx, pred, args.node_idx, 2, path, f'origin_center_node_{str(args.node_idx)}_hops_{str(2)}') with open(vis_file, 'rb') as f: attack_edge_indx, attack_pred, attack_att = pickle.load(f) utils.viz_k_hop_op(attack_edge_indx, attack_pred, args.node_idx, 1, path, f'attack_center_node_{str(args.node_idx)}_hops_{str(1)}') utils.viz_k_hop_op(attack_edge_indx, attack_pred, args.node_idx, 2, path, f'attack_center_node_{str(args.node_idx)}_hops_{str(2)}') else: success = False tmp_list.append([args.node_idx, pred_class, args.desired_class, success, vis_file, structure_sp, feature_sp, output[args.node_idx]]) df = pd.DataFrame(tmp_list,columns=cols) file = f'{path}/res.csv' if not os.path.isfile(file): df.to_csv(file, index=False) else: prev_res = pd.read_csv(file) final_res = pd.concat([df, prev_res],ignore_index=True) final_res.reset_index() final_res.to_csv(file, index=False)
nilq/baby-python
python
import sympy class Curtis: type = 0 # module for computing zUy and UxU deodhar = 0 # Bruhat form bruhat = 0 # the Chevalley group group = 0 # the Weyl group weyl = 0 # standard parabolics para = 0 # distinguished expressions for standard parabolics dist_expr_p = 0 # Deodhar cells D = 0 # Deodhar cells DI-form DI = 0 # Deodhar cells in zUyi form zUyi = 0 # Deodhar cells in UxU form UxU = 0 # the toral elements for the basis of the Hecke algebra of a GG-rep # given explicitly in derived classes tori = [] # a second list of the same tori with "primed" variables tori2 = [] # a third list of the same tori with "double primed" variables tori3 = [] def __init__(self, t): self.type = t self.deodhar = self.type.deodhar self.bruhat = self.type.bruhat self.group = self.type.group self.weyl = self.type.weyl self.para = self.type.parabolics self.dist_expr_p = self.extract_para_dist_expr() # needs dist_expr_p: # self.load_cells() """ Selecting those distinguished expressions corresponding to standard parabolic subgroups """ def extract_para_dist_expr(self): de = self.weyl.dist_expr w0w = self.para.w0w result = [] for i in range(len(de)): e = de[i] if e[0][0] in w0w and \ e[0][1] in w0w and \ e[0][2] in w0w: result.append(e + [i]) return result """ Select cells corresponding to dist_expr_p --- needs dist_expr_p """ def load_cells(self): dep = self.dist_expr_p self.D = [] self.DI = [] self.zUyi = [] self.UxU = [] for e in dep: pos = e[len(e) - 1] tmpD = [] tmpDI = [] tmpzUyi = [] tmpUxU = [] for j in range(len(e[1])): # D and zUyi uyiu = self.deodhar.cell_UyiU(pos, j) tmpzUyi.append(uyiu) # DI and UxU uxu = self.deodhar.cell_Ux(pos, j) tmpUxU.append(uxu) self.D.append(tmpD) self.DI.append(tmpDI) self.zUyi.append(tmpzUyi) self.UxU.append(tmpUxU) """ prepare the two forms of the cell """ def prepare_zUy_UxU(self, ii, j): de = self.weyl.dist_expr x = de[ii][0][0] y = de[ii][0][1] z = de[ii][0][2] nx = self.group.w_to_n(self.weyl.word(x)) ny = self.group.w_to_n(self.weyl.word(y)) nz = self.group.w_to_n(self.weyl.word(z)) ty = self.para.w0w.index(y) ty = self.tori2[ty] tyi = self.group.invert(ty) ytyi = self.group.conjugate_left(ny, tyi) tz = self.para.w0w.index(z) tz = self.tori3[tz] ztz = self.group.conjugate_left(nz, tz) uyiu = self.deodhar.cell_UyiU(ii, j) uxu = self.deodhar.cell_Ux(ii, j) uyiu = self.bruhat.split_strict_Bruhat(uyiu, n_coef=-1) ytyi0 = ytyi + self.group.invert(uyiu[2]) uxu = self.bruhat.split_strict_Bruhat(uxu) uxu[0] = self.group.conjugate_left(ztz, uxu[0]) ztzx = self.group.conjugate_right(ztz, nx) if nx != uxu[1]: print("curtis.prepare_zUy_UxU: this should not be!") uxu[3] = uxu[3] + self.group.invert(uyiu[3]) uxu[3] = self.group.conjugate_right(uxu[3], ytyi0) uxu[2] = uxu[2] + ztzx + ytyi0 uy = uyiu[0] + uyiu[1] uxu = uxu[0] + uxu[1] + self.group.canonic_th(uxu[2]) + self.group.canonic_u(uxu[3]) for i in range(len(uy)): uy[i] = [uy[i][0], uy[i][1], sympy.simplify(uy[i][2])] for i in range(len(uxu)): uxu[i] = [uxu[i][0], uxu[i][1], sympy.simplify(uxu[i][2])] return [uy, uxu] """ Get condition for toral elements to represent the same cell --- we need t0 in zUyi*t0 --- we need t00 in Uxt00U [z*tz][U][(y*ty)^-1]t = [tz^(z^-1)][z][U][y^-1][(ty^-1)^(y^-1)] = [tz^(z^-1)][zUyi][t0^-1][(ty^-1)^(y^-1)] = [tz^(z^-1)][UxU][t0^-1][(ty^-1)^(y^-1)] = [tz^(z^-1)][U][x][t00][U][t0^-1][(ty^-1)^(y^-1)] """ def structure_equation(self, i, j): x = self.dist_expr_p[i][0][0] y = self.dist_expr_p[i][0][1] z = self.dist_expr_p[i][0][2] # copiem ca sa nu modificam zUyi = [list(e) for e in self.zUyi[i][j]] UxU = [list(e) for e in self.UxU[i][j]] xx = self.weyl.word(x) xx = self.group.w_to_n(xx) yy = self.weyl.word(y) yy = self.group.w_to_n(yy) zz = self.weyl.word(z) zz = self.group.w_to_n(zz) # # toral part for y # # the order is important # this is the correct order to get t0 on the right t0 = yy + zUyi[1] + zUyi[2] t0 = self.group.canonic_nt(t0) if not self.group.all_t(t0): print("curtis.structure_equation: This should not be! (t0)") # # toral part for x # xxi = self.group.invert(xx) # the order is important # this is the correct order to get t0 on the right t00 = xxi + UxU[1] + UxU[2] t00 = self.group.canonic_nt(t00) if not self.group.all_t(t00): print("curtis.structure_equation: This should not be! (t00)") # # tz and ty # tz = self.para.w0w.index(z) # use the second set of variables for z tz = self.tori2[tz] ty = self.para.w0w.index(y) ty = self.tori[ty] # bring to other form # left U zztz = self.group.conjugate_left(zz, tz) UxU[0] = self.group.conjugate_left(zztz, UxU[0]) xxizztz = self.group.conjugate_right(zztz, xxi) # right U t0i = self.group.invert(t0) UxU[3] = self.group.conjugate_right(UxU[3], t0i) tyi = self.group.invert(ty) yytyi = self.group.conjugate_left(yy, tyi) UxU[3] = self.group.conjugate_right(UxU[3], yytyi) tt = xxizztz + t00 + t0i + yytyi tt = self.group.canonic_t(tt) return [tt, zUyi, UxU] """ Truncate the unipotent part and bring the two forms of the cells in the right form for the structure constants of the Hecke algebra of a GG-rep """ def Hecke_GG_form(self, i, j): [tt, zUyi, UxU] = self.structure_equation(i, j) Uyz = self.group.truncate_u_sr(zUyi[0]) # # just added !!! non-standard # # no Uyz=self.group.invert(Uyz) # no Uyz=self.group.canonic_u(Uyz) # no Uyz=self.group.truncate_u_sr(Uyz) Ux_left = self.group.truncate_u_sr(UxU[0]) Ux_right = self.group.truncate_u_sr(UxU[3]) Ux = Ux_left + Ux_right Ux = self.group.invert(Ux) Ux = self.group.canonic_u(Ux) Ux = self.group.truncate_u_sr(Ux) U = Ux + Uyz U = self.group.canonic_u(U) U = self.group.truncate_u_sr(U) return [tt, zUyi, UxU, U] """ Produce a report for the j-th cell in the i-th case """ def report(self, i, j): [uy, uxu] = self.prepare_zUy_UxU(i, j) uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1) uxu = self.bruhat.split_strict_Bruhat(uxu) de = self.weyl.dist_expr[i] word = self.weyl.word latex = self.group.latex truncate = self.group.truncate_u_sr print("############################") print("CASE: ", i, j) print("CONFIGURATION: ", de[0]) print("DIST EXPR: ", de[1][j]) print("------------------") print("Z: ", word(de[0][2])) print("Y: ", word(de[0][1])) print("X: ", word(de[0][0])) print("------------------") print("U in zUyi:") print("U1: ", latex(truncate(uy[0]))) print("U in UxU:") print(uxu) print("U2: ", latex(truncate(uxu[0]))) print("U3: ", latex(truncate(uxu[3]))) print("------------------") print("Condition on toral element:") print("A) ", latex(uxu[2])) print("------------------") print("U to evaluate psi on:") Ux_left = truncate(uxu[0]) Ux_right = truncate(uxu[3]) Ux = Ux_left + Ux_right Ux = self.group.invert(Ux) Ux = self.group.canonic_u(Ux) Ux = truncate(Ux) U = Ux + uy[0] U = self.group.canonic_u(U) U = truncate(U) U = self.group.simplify_params(U) print(U) print(latex(U)) print("############################") """ Produce a report for the j-th cell in the i-th case """ def report_file(self, i, j): f_name = "data/" + self.type.label + "/reports/" + str(i) + str(j) + ".rep" f_name = f_name.lower() f = open(f_name, "w") # [tt,zUyi,UxU,U]=self.Hecke_GG_form(i,j) [uy, uxu] = self.prepare_zUy_UxU(i, j) uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1) uxu = self.bruhat.split_strict_Bruhat(uxu) de = self.weyl.dist_expr[i] word = self.weyl.word latex = self.group.latex truncate = self.group.truncate_u_sr f.write("############################\n") f.write("CASE: " + str(i) + str(j) + "\n") f.write("CONFIGURATION: " + str(de[0]) + "\n") f.write("DIST EXPR: " + str(de[1][j]) + "\n") f.write("------------------") f.write("Z: " + str(word(de[0][2])) + "\n") # f.write("Y^-1t0: ",zUyi[1]+zUyi[2]) f.write("Y: " + str(word(de[0][1])) + "\n") # f.write("Xt00: ",UxU[1]+UxU[2]) f.write("X: " + str(word(de[0][0])) + "\n") f.write("------------------\n") f.write("U in zUyi:") f.write("U1: " + latex(truncate(uy[0])) + "\n") f.write("U2: " + latex(truncate(uxu[0])) + "\n") f.write("U in UxU:") f.write("U3: " + latex(truncate(uxu[3])) + "\n") f.write("------------------\n") f.write("Condition on toral element:\n") f.write("A) " + latex(uxu[2]) + "\n") f.write("------------------\n") f.write("U to evaluate psi on:\n") Ux_left = truncate(uxu[0]) Ux_right = truncate(uxu[3]) Ux = Ux_left + Ux_right Ux = self.group.invert(Ux) Ux = self.group.canonic_u(Ux) Ux = truncate(Ux) U = Ux + uy[0] U = self.group.canonic_u(U) U = truncate(U) U = self.group.simplify_params(U) f.write(latex(U) + "\n") f.write("############################\n") f.close() """ Returns the index in the list dist_expr_p of the case c """ def index(self, c): de = self.dist_expr_p tmp = [i[0] for i in de] return tmp.index(c) def latex_dist_expr(self, i, j): de = self.weyl.dist_expr[i][1][j] result = "$" + str([i + 1 for i in de[0]]) + "$" result += " (of type " t = "" vari = "" for k in range(len(de[0])): if k in de[1][0]: t += "A" vari += "$x_{" + str(k + 1) + "}\in k$, " elif k in de[1][1]: t += "B" vari += "$x_{" + str(k + 1) + "}\in k^{\\ast}$, " elif k in de[1][2]: t += "C" vari += "$x_{" + str(k + 1) + "}=1$, " else: print("curtis.latex_dist_expr: this should not be!") return result += t + ") " + vari return result """ Produce a report for the j-th cell in the i-th case """ def report_latex(self, i): ii = self.dist_expr_p[i][2] w0w = list(self.para.w0w) # # atentie inversez ultimul cu primul element aici # tmp = w0w[3] w0w[3] = w0w[2] w0w[2] = tmp case = [w0w.index(k) for k in self.dist_expr_p[i][0]] case_str = "".join([str(k) for k in case]) fname = "latex/" + self.type.label + "/" + case_str + ".tex" f = open(fname, "w+") f.write("\subsection{" + case_str + "}\n") f.write("\label{" + case_str + "}\n") for j in range(len(self.dist_expr_p[i][1])): f.write(self.latex_dist_expr(ii, j) + ":\n") self.report_latex_sub(ii, j, f, [self.para.w0w.index(k) for k in self.dist_expr_p[i][0]]) # case) other_case = case_str[1] + case_str[0] + case_str[2] f.write("Should equal \eqref{" + other_case + "}\n") f.close() def report_latex_sub(self, i, j, f, case): # [tt,zUyi,UxU,U]=self.Hecke_GG_form(i,j) [uy, uxu] = self.prepare_zUy_UxU(i, j) uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1) uxu = self.bruhat.split_strict_Bruhat(uxu) latex = self.group.latex truncate = self.group.truncate_u_sr f.write("$$" + latex(self.tori[case[0]]) + "=" + latex(uxu[2]) + "$$\n") Ux_left = truncate(uxu[0]) Ux_right = truncate(uxu[3]) Ux = Ux_left + Ux_right Ux = self.group.invert(Ux) Ux = self.group.canonic_u(Ux) Ux = truncate(Ux) U = Ux + uy[0] U = self.group.canonic_u(U) U = truncate(U) U = self.group.simplify_params(U) f.write("$$\sum\psi(" + latex(U) + ")$$\n") def report_latex_files(self): w0w = list(self.para.w0w) # atentie inversez ultimul cu primul element aici # tmp = w0w[3] w0w[3] = w0w[2] w0w[2] = tmp result = [] for i in range(len(self.dist_expr_p)): case = [w0w.index(k) for k in self.dist_expr_p[i][0]] case_str = "".join([str(k) for k in case]) result.append("\\input{" + self.type.label + "/" + case_str + ".tex}\n") return result def report_latex_all(self): for i in range(len(self.dist_expr_p)): self.report_latex(i) def report_poly(self, ii, j): i = self.dist_expr_p[ii][2] [uy, uxu] = self.prepare_zUy_UxU(i, j) uy = self.bruhat.split_strict_Bruhat(uy, n_coef=-1) uxu = self.bruhat.split_strict_Bruhat(uxu) truncate = self.group.truncate_u_sr result = [] result += [[self.tori[self.para.w0w.index(self.dist_expr_p[ii][0][0])], uxu[2]]] Ux_left = truncate(uxu[0]) Ux_right = truncate(uxu[3]) Ux = Ux_left + Ux_right Ux = self.group.invert(Ux) Ux = self.group.canonic_u(Ux) Ux = truncate(Ux) U = Ux + uy[0] U = self.group.canonic_u(U) U = truncate(U) U = self.group.simplify_params(U) poly = [] for u in U: poly += [u[2]] result += [poly] return result
nilq/baby-python
python
from django.http import HttpResponse, Http404 from django.shortcuts import render from django.template import TemplateDoesNotExist from django.template.loader import get_template from django.contrib.auth.views import LoginView from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic.edit import UpdateView, CreateView from django.contrib.messages.views import SuccessMessageMixin from django.urls import reverse_lazy from django.shortcuts import get_object_or_404 from django.views.generic.base import TemplateView from .models import AdvUser from .forms import ChangeUserInfoForm from django.contrib.auth.views import PasswordChangeView from .forms import RegisterUserForm from django.core.signing import BadSignature from .utilities import signer def index(request): return render(request, 'main/index.html') def other_page(request, page): try: template = get_template('main/' + page + '.html') except TemplateDoesNotExist: raise Http404 return HttpResponse(template.render(request=request)) class BBLoginView(LoginView): template_name = 'main/login.html' @login_required def profile(request): return render(request, 'main/profile.html') class BBLogoutView(LoginRequiredMixin, LoginView): template_name = 'main/logout.html' class ChangeUserInfoView(SuccessMessageMixin, LoginRequiredMixin, UpdateView): model = AdvUser template_name = 'main/change_user_info.html' form_class = ChangeUserInfoForm success_url = reverse_lazy('main:profile') success_massage = 'Личные данные пользователя изменены' def dispatch(self, request, *args, **kwargs): self.user_id = request.user.pk return super().dispatch(request, *args, **kwargs) def get_object(self, queryset=None): if not queryset: queryset = self.get_queryset() return get_object_or_404(queryset, pk=self.user_id) class BBPasswordChangeView(SuccessMessageMixin, LoginRequiredMixin, PasswordChangeView): template_name = 'main/password_change.html' success_url = reverse_lazy('main:profile') success_message = 'Пароль пользователя изменен' class RegisterUserView(CreateView): model = AdvUser template_name = 'main/register_user.html' form_class = RegisterUserForm success_url = reverse_lazy('main:register_done') class RegisterDoneView(TemplateView): template_name = 'main/register_done.html' def user_activate(request, sign): try: username = signer.unsign(sign) except BadSignature: return render(request, 'main/bad_signature.html') user = get_object_or_404(AdvUser, username=username) if user.is_activated: template = 'main/user_is_activated.html' else: template = 'main/activation_done.html' user.is_active = True user.is_activated = True user.save() return render(request, template)
nilq/baby-python
python
"""Updating max length of s3_name in account table Revision ID: 1727fb4309d8 Revises: 51170afa2b48 Create Date: 2015-07-06 12:29:48.859104 """ # revision identifiers, used by Alembic. revision = '1727fb4309d8' down_revision = '51170afa2b48' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.alter_column('account', 's3_name', type_=sa.VARCHAR(64), existing_type=sa.VARCHAR(length=32), nullable=True) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.alter_column('account', 's3_name', type_=sa.VARCHAR(32), existing_type=sa.VARCHAR(length=64), nullable=True) ### end Alembic commands ###
nilq/baby-python
python
# criando uma sequencia de fibonacci # o proximo numero e sempre a soma dos 2 anteriores print('Seguencia de Finonacci') print('--'*20) # pedindo um numero n = int(input('Quantos termos voce quer mostrar: ')) # primeiro termo t1 = 0 # segundo termo t2 = 1 # mostrando os 2 primerios termos print(f'{t1} -> {t2}', end='') # contador apartir de 3 porque ja tem dois termos iniciais cont = 3 # enquanto o contador for menor que o numero pedido vai continuar while cont < n: # terceiro termo e a soma dos 2 anteriores t3 = t1 + t2 # mostrando o terceiro termo print(f' -> {t3}', end='') # contador para finalizar o laço cont += 1 # fazendo sequencialmente a troca de posiçoes para somar t1 = t2 t2 = t3 print(' -> Fim')
nilq/baby-python
python
from org.transcrypt.stubs.browser import * import random array = [] def gen_random_int(number, seed): my_list = [i for i in range(number)] random.seed(seed) random.shuffle(my_list) result = my_list return result def generate(): global array number = 10 seed = 200 gen_random_int(number, seed) # call gen_random_int() with the given number and seed # store it to the global variable array array = gen_random_int(number, seed) # convert the items into one single string # the number should be separated by a comma # and a full stop should end the string. array_str = ','.join([str(i) for i in array]) + '.' console.log(array, "\n", array_str) # This line is to placed the string into the HTML # under div section with the id called "generate" document.getElementById("generate").innerHTML = array_str def sortnumber1(): ''' This function is used in Exercise 1. The function is called when the sort button is clicked. You need to do the following: - get the list of numbers from the global variable array and copy it to a new list - call your sort function, either bubble sort or insertion sort - create a string of the sorted numbers and store it in array_str ''' n = len(array) for i in range(1, n): for j in range(1, n): first_num_index = j - 1 second_num_index = j if array[first_num_index] > array[second_num_index]: array[first_num_index], array[second_num_index] = array[second_num_index], array[first_num_index] array_str = ','.join([str(i) for i in array]) + '.' document.getElementById("sorted").innerHTML = array_str def sortnumber2(): ''' This function is used in Exercise 2. The function is called when the sort button is clicked. You need to do the following: - Get the numbers from a string variable "value". - Split the string using comma as the separator and convert them to a list of numbers - call your sort function, either bubble sort or insertion sort - create a string of the sorted numbers and store it in array_str ''' # The following line get the value of the text input called "numbers" value = document.getElementsByName("numbers")[0].value # Throw alert and stop if nothing in the text input if value == "": window.alert("Your textbox is empty") return else: value = value.split(",") # Your code should start from here # store the final string to the variable array_str n = len(array) for i in range(1, n): for j in range(1, n): first_num_index = j - 1 second_num_index = j if array[first_num_index] > array[second_num_index]: array[first_num_index], array[second_num_index] = array[second_num_index], array[first_num_index] if array == "": window.alert("Your textbox is empty") return array_str = ','.join([str(i) for i in array]) + '.' document.getElementById("sorted").innerHTML = array_str
nilq/baby-python
python
import math import random import itertools import collections import numpy as np def grouper(lst, num): args = [iter(lst)]*num out = itertools.zip_longest(*args, fillvalue=None) out = list(out) return out def get_batch(batch_data, config, rot='_rot'): """Given a batch of data, determine the input and ground truth.""" N = len(batch_data['obs_traj_rel'+rot]) P = config.P if hasattr(config, 'flow_size'): OF = config.flow_size returned_inputs = [] traj_obs_gt = np.zeros([N, config.obs_len, P], dtype='float32') traj_pred_gt = np.zeros([N, config.pred_len, P], dtype='float32') # --- xy input for i, (obs_data, pred_data) in enumerate(zip(batch_data['obs_traj_rel'+rot], batch_data['pred_traj_rel'+rot])): for j, xy in enumerate(obs_data): traj_obs_gt[i, j, :] = xy for j, xy in enumerate(pred_data): traj_pred_gt[i, j, :] = xy returned_inputs.append(traj_obs_gt) # ------------------------------------------------------ # Social component (through optical flow) if hasattr(config, 'add_social') and config.add_social: obs_flow = np.zeros((N,config.obs_len, OF),dtype ='float32') # each batch for i, flow_seq in enumerate(batch_data['obs_optical_flow']): for j , flow_step in enumerate(flow_seq): obs_flow[i,j,:] = flow_step returned_inputs.append(obs_flow) # ----------------------------------------------------------- return returned_inputs,traj_pred_gt
nilq/baby-python
python
"""This program searches through an email file and returns the sender email and date of sending """ user_input = input('Enter filename: ') fhand = open(user_input) for line in fhand: line = line.rstrip() if not line.startswith('From '): continue words = line.split() # print(words) print(words[1:5], words[6])
nilq/baby-python
python
import oi import os import sys import logging from logging.handlers import SysLogHandler import time import service try: import config except ImportError: import example1.config as config def stop_function(): ctl = oi.CtlProgram('ctl program', config.ctl_url) ctl.call('stop') ctl.client.close() class Service(service.Service): def __init__(self, *args, **kwargs): super(Service, self).__init__(*args, **kwargs) self.syslog_handler = SysLogHandler( address=service.find_syslog(), facility=SysLogHandler.LOG_DAEMON ) formatter = logging.Formatter( '%(name)s - %(levelname)s - %(message)s') self.syslog_handler.setFormatter(formatter) logging.getLogger().addHandler(self.syslog_handler) def run(self): try: from scheduler import setup_scheduler, scheduler except ImportError: from example1.scheduler import setup_scheduler, scheduler while not self.got_sigterm(): logging.info("Starting") self.program = oi.Program('example1', config.ctl_url) self.program.logger = self.logger self.program.add_command('ping', lambda: 'pong') self.program.add_command('state', lambda: self.program.state) def restart(): logging.warning('Restarting') self.program.continue_event.set() self.program.restart = restart setup_scheduler(self.program) if hasattr(config, 'register_hook'): config.register_hook( ctx=dict( locals=locals(), globals=globals(), program=self.program ) ) self.program.run() logging.warning("Stopping") scheduler.shutdown() if not self.program.continue_event.wait(0.1): break self.stop() os.unlink('/tmp/demo.pid') os.execl(sys.executable, sys.argv[0], 'start') if self.got_sigterm(): self.program.stop_function() def main_ctl(): ctl = oi.CtlProgram('ctl program', config.ctl_url) ctl.run() def main_d(): program = oi.Program('example1', config.ctl_url) program.add_command('ping', lambda: 'pong') program.add_command('state', lambda: program.state) try: from scheduler import setup_scheduler, scheduler except ImportError: from example1.scheduler import setup_scheduler, scheduler setup_scheduler(program) if hasattr(config, 'register_hook'): config.register_hook( ctx=dict( locals=locals(), globals=globals(), program=program ) ) program.run() scheduler.shutdown() def main_svc(): import sys if len(sys.argv) < 2: sys.exit('Syntax: %s COMMAND' % sys.argv[0]) cmd = sys.argv[1] sys.argv.remove(cmd) service = Service('example1', pid_dir='/tmp') if cmd == 'start': service.start() elif cmd == 'stop': service.stop() stop_function() elif cmd == 'restart': service.stop() stop_function() while service.is_running(): time.sleep(0.1) service.start() elif cmd == 'status': if service.is_running(): print "Service is running." else: print "Service is not running." else: sys.exit('Unknown command "%s".' % cmd) def main(): prog_name = sys.argv[0].lower() if prog_name.endswith('.exe'): prog_name = prog_name[:-4] if prog_name.endswith('svc'): main_svc() elif prog_name.endswith('d'): main_d() else: main_ctl() if __name__ == '__main__': if hasattr(config, 'main_hook'): if not config.main_hook( ctx=dict( locals=locals(), globals=globals() ) ): main() else: main()
nilq/baby-python
python
# (C) Datadog, Inc. 2020-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) METRICS = ( 'hazelcast.instance.managed_executor_service.completed_task_count', 'hazelcast.instance.managed_executor_service.is_shutdown', 'hazelcast.instance.managed_executor_service.is_terminated', 'hazelcast.instance.managed_executor_service.maximum_pool_size', 'hazelcast.instance.managed_executor_service.pool_size', 'hazelcast.instance.managed_executor_service.queue_size', 'hazelcast.instance.managed_executor_service.remaining_queue_capacity', 'hazelcast.instance.member_count', 'hazelcast.instance.partition_service.active_partition_count', 'hazelcast.instance.partition_service.is_cluster_safe', 'hazelcast.instance.partition_service.is_local_member_safe', 'hazelcast.instance.partition_service.partition_count', 'hazelcast.instance.running', 'hazelcast.mc.license_expiration_time', 'hazelcast.member.accepted_socket_count', 'hazelcast.member.active_count', 'hazelcast.member.active_members', 'hazelcast.member.active_members_commit_index', 'hazelcast.member.async_operations', 'hazelcast.member.available_processors', 'hazelcast.member.backup_timeout_millis', 'hazelcast.member.backup_timeouts', 'hazelcast.member.bytes_read', 'hazelcast.member.bytes_received', 'hazelcast.member.bytes_send', 'hazelcast.member.bytes_transceived', 'hazelcast.member.bytes_written', 'hazelcast.member.call_timeout_count', 'hazelcast.member.client_count', 'hazelcast.member.closed_count', 'hazelcast.member.cluster_start_time', 'hazelcast.member.cluster_time', 'hazelcast.member.cluster_time_diff', 'hazelcast.member.cluster_up_time', 'hazelcast.member.commit_count', 'hazelcast.member.committed_heap', 'hazelcast.member.committed_native', 'hazelcast.member.committed_virtual_memory_size', 'hazelcast.member.completed_count', 'hazelcast.member.completed_migrations', 'hazelcast.member.completed_operation_batch_count', 'hazelcast.member.completed_operation_count', 'hazelcast.member.completed_packet_count', 'hazelcast.member.completed_partition_specific_runnable_count', 'hazelcast.member.completed_runnable_count', 'hazelcast.member.completed_task_count', 'hazelcast.member.completed_tasks', 'hazelcast.member.completed_total_count', 'hazelcast.member.connection_listener_count', 'hazelcast.member.connection_type', 'hazelcast.member.count', 'hazelcast.member.created_count', 'hazelcast.member.daemon_thread_count', 'hazelcast.member.delayed_execution_count', 'hazelcast.member.destroyed_count', 'hazelcast.member.destroyed_group_ids', 'hazelcast.member.elapsed_destination_commit_time', 'hazelcast.member.elapsed_migration_operation_time', 'hazelcast.member.elapsed_migration_time', 'hazelcast.member.error_count', 'hazelcast.member.event_count', 'hazelcast.member.event_queue_size', 'hazelcast.member.events_processed', 'hazelcast.member.exception_count', 'hazelcast.member.failed_backups', 'hazelcast.member.frames_transceived', 'hazelcast.member.free_heap', 'hazelcast.member.free_memory', 'hazelcast.member.free_native', 'hazelcast.member.free_physical', 'hazelcast.member.free_physical_memory_size', 'hazelcast.member.free_space', 'hazelcast.member.free_swap_space_size', 'hazelcast.member.generic_priority_queue_size', 'hazelcast.member.generic_queue_size', 'hazelcast.member.generic_thread_count', 'hazelcast.member.groups', 'hazelcast.member.heartbeat_broadcast_period_millis', 'hazelcast.member.heartbeat_packets_received', 'hazelcast.member.heartbeat_packets_sent', 'hazelcast.member.idle_time_millis', 'hazelcast.member.idle_time_ms', 'hazelcast.member.imbalance_detected_count', 'hazelcast.member.in_progress_count', 'hazelcast.member.invocation_scan_period_millis', 'hazelcast.member.invocation_timeout_millis', 'hazelcast.member.invocations.last_call_id', 'hazelcast.member.invocations.pending', 'hazelcast.member.invocations.used_percentage', 'hazelcast.member.io_thread_id', 'hazelcast.member.last_heartbeat', 'hazelcast.member.last_repartition_time', 'hazelcast.member.listener_count', 'hazelcast.member.loaded_classes_count', 'hazelcast.member.local_clock_time', 'hazelcast.member.local_partition_count', 'hazelcast.member.major_count', 'hazelcast.member.major_time', 'hazelcast.member.max_backup_count', 'hazelcast.member.max_cluster_time_diff', 'hazelcast.member.max_file_descriptor_count', 'hazelcast.member.max_heap', 'hazelcast.member.max_memory', 'hazelcast.member.max_metadata', 'hazelcast.member.max_native', 'hazelcast.member.maximum_pool_size', 'hazelcast.member.member_groups_size', 'hazelcast.member.migration_active', 'hazelcast.member.migration_completed_count', 'hazelcast.member.migration_queue_size', 'hazelcast.member.minor_count', 'hazelcast.member.minor_time', 'hazelcast.member.missing_members', 'hazelcast.member.monitor_count', 'hazelcast.member.nodes', 'hazelcast.member.normal_frames_read', 'hazelcast.member.normal_frames_written', 'hazelcast.member.normal_pending_count', 'hazelcast.member.normal_timeouts', 'hazelcast.member.open_file_descriptor_count', 'hazelcast.member.opened_count', 'hazelcast.member.operation_timeout_count', 'hazelcast.member.owner_id', 'hazelcast.member.packets_received', 'hazelcast.member.packets_send', 'hazelcast.member.park_queue_count', 'hazelcast.member.partition_thread_count', 'hazelcast.member.peak_thread_count', 'hazelcast.member.planned_migrations', 'hazelcast.member.pool_size', 'hazelcast.member.priority_frames_read', 'hazelcast.member.priority_frames_transceived', 'hazelcast.member.priority_frames_written', 'hazelcast.member.priority_pending_count', 'hazelcast.member.priority_queue_size', 'hazelcast.member.priority_write_queue_size', 'hazelcast.member.process_count', 'hazelcast.member.process_cpu_load', 'hazelcast.member.process_cpu_time', 'hazelcast.member.proxy_count', 'hazelcast.member.publication_count', 'hazelcast.member.queue_capacity', 'hazelcast.member.queue_size', 'hazelcast.member.rejected_count', 'hazelcast.member.remaining_queue_capacity', 'hazelcast.member.replica_sync_requests_counter', 'hazelcast.member.replica_sync_semaphore', 'hazelcast.member.response_queue_size', 'hazelcast.member.responses.backup_count', 'hazelcast.member.responses.error_count', 'hazelcast.member.responses.missing_count', 'hazelcast.member.responses.normal_count', 'hazelcast.member.responses.timeout_count', 'hazelcast.member.retry_count', 'hazelcast.member.rollback_count', 'hazelcast.member.running_count', 'hazelcast.member.running_generic_count', 'hazelcast.member.running_partition_count', 'hazelcast.member.scheduled', 'hazelcast.member.selector_i_o_exception_count', 'hazelcast.member.selector_rebuild_count', 'hazelcast.member.selector_recreate_count', 'hazelcast.member.size', 'hazelcast.member.start_count', 'hazelcast.member.started_migrations', 'hazelcast.member.state_version', 'hazelcast.member.sync_delivery_failure_count', 'hazelcast.member.system_cpu_load', 'hazelcast.member.system_load_average', 'hazelcast.member.task_queue_size', 'hazelcast.member.terminated_raft_node_group_ids', 'hazelcast.member.text_count', 'hazelcast.member.thread_count', 'hazelcast.member.total_completed_migrations', 'hazelcast.member.total_elapsed_destination_commit_time', 'hazelcast.member.total_elapsed_migration_operation_time', 'hazelcast.member.total_elapsed_migration_time', 'hazelcast.member.total_failure_count', 'hazelcast.member.total_loaded_classes_count', 'hazelcast.member.total_memory', 'hazelcast.member.total_parked_operation_count', 'hazelcast.member.total_physical', 'hazelcast.member.total_physical_memory_size', 'hazelcast.member.total_registrations', 'hazelcast.member.total_space', 'hazelcast.member.total_started_thread_count', 'hazelcast.member.total_swap_space_size', 'hazelcast.member.unknown_time', 'hazelcast.member.unloaded_classes_count', 'hazelcast.member.uptime', 'hazelcast.member.usable_space', 'hazelcast.member.used_heap', 'hazelcast.member.used_memory', 'hazelcast.member.used_metadata', 'hazelcast.member.used_native', 'hazelcast.member.write_queue_size', 'jvm.buffer_pool.direct.capacity', 'jvm.buffer_pool.direct.count', 'jvm.buffer_pool.direct.used', 'jvm.buffer_pool.mapped.capacity', 'jvm.buffer_pool.mapped.count', 'jvm.buffer_pool.mapped.used', 'jvm.cpu_load.process', 'jvm.cpu_load.system', 'jvm.gc.cms.count', 'jvm.gc.eden_size', 'jvm.gc.old_gen_size', 'jvm.gc.parnew.time', 'jvm.gc.survivor_size', 'jvm.heap_memory', 'jvm.heap_memory_committed', 'jvm.heap_memory_init', 'jvm.heap_memory_max', 'jvm.loaded_classes', 'jvm.non_heap_memory', 'jvm.non_heap_memory_committed', 'jvm.non_heap_memory_init', 'jvm.non_heap_memory_max', 'jvm.os.open_file_descriptors', 'jvm.thread_count', )
nilq/baby-python
python
import time import webhook_listener import json # arduino = serial.Serial(port='COM14', baudrate=115200, timeout=0) def process_post_request(request, *args, **kwargs): req = (format( request.body.read(int(request.headers["Content-Length"])) if int(request.headers.get("Content-Length", 0)) > 0 else "" )) if req[0] == "b": req = req[1:] pass req = (json.loads(eval(req))) print(req) # Process the request! # ... return webhooks = webhook_listener.Listener(handlers={"POST": process_post_request}) webhooks.start() while True: print("Still alive...") time.sleep(300)
nilq/baby-python
python
#!/bin/env python # # Copyright 2013-2014 Graham McVicker and Bryce van de Geijn # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # """ This program reads BAM files and counts the number of reads that match the alternate and reference allele at every SNP position in the provided SNP HDF5 data files. The read counts are stored in specified HDF5 output files. Additionally counts of all reads are stored in another track (at the left-most position of the reads). This program does not perform filtering of reads based on mappability. It is assumed that the inpute BAM files are filtered appropriately prior to calling this script. Reads that overlap known indels are not included in allele-specific counts. usage: bam2h5.py OPTIONS BAM_FILE1 [BAM_FILE2 ...] BAM Files: Aligned reads are read from one or more BAM files. The provided BAM files must be sorted and indexed. Input Options: --chrom CHROM_TXT_FILE [required] Path to chromInfo.txt file (may be gzipped) with list of chromosomes for the relevant genome assembly. Each line in file should contain tab-separated chromosome name and chromosome length (in basepairs). chromInfo.txt files can be downloaded from the UCSC genome browser. For example, a chromInfo.txt.gz file for hg19 can be downloaded from http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/ --snp_index SNP_INDEX_H5_FILE [required] Path to HDF5 file containing SNP index. The SNP index is used to convert the genomic position of a SNP to its corresponding row in the haplotype and snp_tab HDF5 files. --snp_tab SNP_TABLE_H5_FILE [required] Path to HDF5 file to read SNP information from. Each row of SNP table contains SNP name (rs_id), position, allele1, allele2. --haplotype HAPLOTYPE_H5_FILE [optional] Path to HDF5 file to read phased haplotypes from. If supplied, when read overlaps multiple SNPs counts are randomly assigned to ONE of the overlapping HETEROZYGOUS SNPs; if not supplied counts are randomly assigned to ONE of overlapping SNPs (regardless of their genotype). --individual INDIVIDUAL [optional] Identifier for individual, used to determine which SNPs are heterozygous. Must be provided if --haplotype argument is provided and must match one of the samples in the haplotype HDF5 file. Output Options: --data_type uint8|uint16 Data type of stored counts; uint8 takes up less disk space but has a maximum value of 255 (default=uint16). --ref_as_counts REF_AS_COUNT_H5_FILE [required] Path to HDF5 file to write counts of reads that match reference allele. Allele-specific counts are stored at the position of the SNP. --alt_as_counts ALT_AS_COUNT_H5_FILE [required] Path to HDF5 file to write counts of reads that match alternate allele. Allele-specific counts are stored at the position of the SNP. --other_as_counts OTHER_AS_COUNT_H5_FILE [required] Path to HDF5 file to write counts of reads that match neither reference nor alternate allele. Allele-specific counts are stored at the position of the SNP. --read_counts READ_COUNT_H5_FILE [required] Path to HDF5 file to write counts of all reads, regardless of whether they overlap a SNP. Read counts are stored at the left-most position of the mapped read. --txt_counts COUNTS_TXT_FILE [optional] Path to text file to write ref, alt, and other counts of reads. The text file will have columns: <chromosome> <snp_position> <ref_allele> <alt_allele> <genotype> <ref_allele_count> <alt_allele_count> <other_count> """ import sys import os import gzip import warnings import tables import argparse import numpy as np import pysam import chromosome import chromstat import util sys.path.insert(0, os.path.dirname(os.path.realpath(__file__))+"/../mapping/") import snptable # codes used by pysam for aligned read CIGAR strings BAM_CMATCH = 0 # M BAM_CINS = 1 # I BAM_CDEL = 2 # D BAM_CREF_SKIP = 3 # N BAM_CSOFT_CLIP = 4 # S BAM_CHARD_CLIP = 5 # H BAM_CPAD = 6 # P BAM_CEQUAL = 7 # = BAM_CDIFF = 8 # X BAM_CIGAR_DICT = {0 : "M", 1 : "I", 2 : "D", 3 : "N", 4 : "S", 5 : "H", 6 : "P", 7 : "=", 8 : "X"} SNP_UNDEF = -1 MAX_UINT8_COUNT = 255 MAX_UINT16_COUNT = 65535 unimplemented_CIGAR = [0, set()] def create_carray(h5f, chrom, data_type): if data_type == "uint8": atom = tables.UInt8Atom(dflt=0) elif data_type == "uint16": atom = tables.UInt16Atom(dflt=0) else: raise NotImplementedError("unsupported datatype %s" % data_type) zlib_filter = tables.Filters(complevel=1, complib="zlib") # create CArray for this chromosome shape = [chrom.length] carray = h5f.create_carray(h5f.root, chrom.name, atom, shape, filters=zlib_filter) return carray def get_carray(h5f, chrom): return h5f.get_node("/%s" % chrom) def is_indel(snp): if (len(snp['allele1']) != 1) or (len(snp['allele2'])) != 1: return True def dump_read(f, read): cigar_str = " ".join(["%s:%d" % (BAM_CIGAR_DICT[c[0]], c[1]) for c in read.cigar]) f.write("pos: %d\n" "aend: %d\n" "alen (len of aligned portion of read on genome): %d\n" "qstart: %d\n" "qend: %d\n" "qlen (len of aligned qry seq): %d\n" "rlen (read len): %d\n" "tlen (insert size): %d\n" "cigar: %s\n" "seq: %s\n" % (read.pos, read.aend, read.alen, read.qstart, read.qend, read.qlen, read.rlen, read.tlen, cigar_str, read.seq)) def get_sam_iter(samfile, chrom): try: sam_iter = samfile.fetch(reference=chrom.name, start=1, end=chrom.length) except ValueError as ve: sys.stderr.write("%s\n" % str(ve)) # could not find chromosome, try stripping leading 'chr' # E.g. for drosophila, sometimes 'chr2L' is used but # othertimes just '2L' is used. Annoying! chrom_name = chrom.name.replace("chr", "") sys.stderr.write("WARNING: %s does not exist in BAM file, " "trying %s instead\n" % (chrom.name, chrom_name)) try: sam_iter = samfile.fetch(reference=chrom_name, start=1, end=chrom.length) except ValueError: # fetch can fail because chromosome is missing or because # BAM has not been indexed sys.stderr.write("WARNING: %s does not exist in BAM file, " "or BAM file has not been sorted and indexed.\n" " Use 'samtools sort' and 'samtools index' to " "index BAM files before running bam2h5.py.\n" " Skipping chromosome %s.\n" % (chrom.name, chrom.name)) sam_iter = iter([]) return sam_iter def choose_overlap_snp(read, snp_tab, snp_index_array, hap_tab, ind_idx): """Picks out a single SNP from those that the read overlaps. Returns a tuple containing 4 elements: [0] the index of the SNP in the SNP table, [1] the offset into the read sequence, [2] flag indicating whether the read was 'split' (i.e. was a spliced read), [3] flag indicating whether read overlaps known indel. If there are no overlapping SNPs or the read cannot be processed, (None, None, is_split, overlap_indel) is returned instead. """ read_offsets = [] snp_idx = [] read_start_idx = 0 genome_start_idx = read.pos n_match_segments = 0 is_split = False overlap_indel = False for cig in read.cigar: op = cig[0] op_len = cig[1] if op == BAM_CMATCH: # this is a block of match/mismatch in read alignment read_end = read_start_idx + op_len genome_end = genome_start_idx + op_len # get offsets of any SNPs that this read overlaps idx = snp_index_array[genome_start_idx:genome_end] is_def = np.where(idx != SNP_UNDEF)[0] read_offsets.extend(read_start_idx + is_def) snp_idx.extend(idx[is_def]) read_start_idx = read_end genome_start_idx = genome_end n_match_segments += 1 elif op == BAM_CREF_SKIP: # spliced read, skip over this region of genome genome_start_idx += op_len is_split = True elif op == BAM_CSOFT_CLIP: # end of read is soft-clipped, which means it is # present in read, but not used in alignment read_start_idx += op_len elif op == BAM_CINS: # Dealing with insertion read_start_idx += op_len elif op == BAM_CDEL: # Dealing with deletion genome_start_idx += op_len elif op == BAM_CHARD_CLIP: # end of read is hard-clipped, so not present # in read and not used in alignment pass else: unimplemented_CIGAR[0] += 1 unimplemented_CIGAR[1].add(BAM_CIGAR_DICT[op]) # sys.stderr.write("skipping because contains CIGAR code %s " # " which is not currently implemented\n" % # BAM_CIGAR_DICT[op]) return (None, None, is_split, overlap_indel) # are any of the SNPs indels? If so, discard. for i in snp_idx: if is_indel(snp_tab[i]): overlap_indel = True return (None, None, is_split, overlap_indel) n_overlap_snps = len(read_offsets) if n_overlap_snps == 0: # no SNPs overlap this read return (None, None, is_split, overlap_indel) if hap_tab: # genotype info is provided by haplotype table # pull out subset of overlapping SNPs that are heterozygous # in this individual het_read_offsets = [] het_snp_idx = [] for (i, read_offset) in zip(snp_idx, read_offsets): haps = hap_tab[i, (ind_idx*2):(ind_idx*2 + 2)] if ind_idx*2 > hap_tab.shape[1]: raise ValueError("index of individual (%d) is >= number of " "individuals in haplotype_tab (%d)." % (ind_idx, hap_tab.shape[1]/2)) if haps[0] != haps[1]: # this is a het het_read_offsets.append(read_offset) het_snp_idx.append(i) n_overlap_hets = len(het_read_offsets) if n_overlap_hets == 0: # none of the overlapping SNPs are hets return (None, None, is_split, overlap_indel) if n_overlap_hets == 1: # only one overlapping SNP is a het return (het_snp_idx[0], het_read_offsets[0], is_split, overlap_indel) # choose ONE overlapping HETEROZYGOUS SNP randomly to add counts to # we don't want to count same read multiple times r = np.random.randint(0, n_overlap_hets) return (het_snp_idx[r], het_read_offsets[r], is_split, overlap_indel) else: # We don't have haplotype tab, so we don't know which SNPs are # heterozygous in this individual. But we can still tell # whether read sequence matches reference or non-reference # allele. Choose ONE overlapping SNP randomly to add counts to if n_overlap_snps == 1: return (snp_idx[0], read_offsets[0], is_split, overlap_indel) else: r = np.random.randint(0, n_overlap_snps) return (snp_idx[r], read_offsets[r], is_split, overlap_indel) def add_read_count(read, chrom, ref_array, alt_array, other_array, read_count_array, snp_index_array, snp_tab, hap_tab, warned_pos, max_count, ind_idx): # pysam positions start at 0 start = read.pos+1 end = read.aend if start < 1 or end > chrom.length: sys.stderr.write("WARNING: skipping read aligned past end of " "chromosome. read: %d-%d, %s:1-%d\n" % (start, end, chrom.name, chrom.length)) return if read.qlen != read.rlen: sys.stderr.write("WARNING skipping read: handling of " "partially mapped reads not implemented\n") return # look for SNPs that overlap mapped read position, and if there # are more than one, choose one at random snp_idx, read_offset, is_split, overlap_indel = \ choose_overlap_snp(read, snp_tab, snp_index_array, hap_tab, ind_idx) if overlap_indel: return # store counts of reads at start position if read_count_array[start-1] < max_count: read_count_array[start-1] += 1 else: if not start in warned_pos: sys.stderr.write("WARNING read count at position %d " "exceeds max %d\n" % (start, max_count)) warned_pos[start] = True if snp_idx is None: return snp = snp_tab[snp_idx] allele1 = snp['allele1'].decode("utf-8") allele2 = snp['allele2'].decode("utf-8") base = read.seq[read_offset] snp_pos = snp['pos'] if base == allele1: # matches reference allele if ref_array[snp_pos-1] < max_count: ref_array[snp_pos-1] += 1 elif not snp_pos in warned_pos: sys.stderr.write("WARNING ref allele count at position %d " "exceeds max %d\n" % (snp_pos, max_count)) warned_pos[snp_pos] = True elif base == allele2: # matches alternate allele if alt_array[snp_pos-1] < max_count: alt_array[snp_pos-1] += 1 elif not snp_pos in warned_pos: sys.stderr.write("WARNING alt allele count at position %d " "exceeds max %d\n" % (snp_pos, max_count)) warned_pos[snp_pos] = True else: # matches neither if other_array[snp_pos-1] < max_count: other_array[snp_pos-1] += 1 elif not snp_pos in warned_pos: sys.stderr.write("WARNING other allele count at position %d " "exceeds max %d\n" % (snp_pos, max_count)) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--chrom", help="Path to chromInfo.txt file (may be gzipped) " "with list of chromosomes for the relevant genome " "assembly. Each line in file should contain " "tab-separated chromosome name and chromosome length " "(in basepairs). chromInfo.txt files can be " "downloaded from the UCSC genome browser. For " "example, a chromInfo.txt.gz file for hg19 can " "be downloaded from " "http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/", metavar="CHROM_TXT_FILE", required=True) parser.add_argument("--test_chrom", help="Run only on this chromosome", metavar="CHROM_NAME", required=False) parser.add_argument("--snp_index", help="Path to HDF5 file containing SNP index. The " "SNP index is used to convert the genomic position " "of a SNP to its corresponding row in the haplotype " "and snp_tab HDF5 files.", metavar="SNP_INDEX_H5_FILE", required=True) parser.add_argument("--snp_tab", help="Path to HDF5 file to read SNP information " "from. Each row of SNP table contains SNP name " "(rs_id), position, allele1, allele2.", metavar="SNP_TABLE_H5_FILE", required=True) parser.add_argument("--haplotype", help=" Path to HDF5 file to read phased haplotypes " "from. If supplied, when read overlaps multiple SNPs " "counts are randomly assigned to ONE of the " "overlapping HETEROZYGOUS SNPs; if not supplied " "counts are randomly assigned to ONE of overlapping " "SNPs (regardless of their genotype).", metavar="HAPLOTYPE_H5_FILE", default=None) parser.add_argument("--individual", help="Identifier for individual, used to determine " "which SNPs are heterozygous. Must be provided if " "--haplotype argument is provided and must match one " "of the samples in the haplotype HDF5 file.", metavar="INDIVIDUAL", default=None) parser.add_argument("--data_type", help="Data type of counts stored in HDF5 files. " "uint8 requires less disk space but has a " "maximum value of 255." "(default=uint8)", choices=("uint8", "uint16"), default="uint16") parser.add_argument("--ref_as_counts", help="Path to HDF5 file to write counts of reads " "that match reference allele. Allele-specific counts " "are stored at the position of the SNP." "that match reference", metavar="REF_AS_COUNT_H5_FILE", required=True) parser.add_argument("--alt_as_counts", help="Path to HDF5 file to write counts of reads " "that match alternate allele. Allele-specific counts " "are stored at the position of the SNP.", metavar="ALT_AS_COUNT_H5_FILE", required=True) parser.add_argument("--other_as_counts", help="Path to HDF5 file to write counts of reads " "that match neither reference nor alternate allele. " "Allele-specific counts are stored at the position " "of the SNP.", metavar="OTHER_COUNT_H5_FILE", required=True) parser.add_argument("--read_counts", help="Path to HDF5 file to write counts of all " "reads, regardless of whether they overlap a SNP. " "Read counts are stored at the left-most position " "of the mapped read.", metavar="READ_COUNT_H5_FILE", required=True) parser.add_argument("--txt_counts", help="Path to text file to write ref, alt, and other " "counts of reads. The text file will have columns: " "<chromosome> <snp_position> <ref_allele> <alt_allele>" " <genotype> <ref_allele_count> <alt_allele_count> " "<other_count>", metavar="COUNTS_TXT_FILE", default=None) parser.add_argument("bam_filenames", action="store", nargs="+", help="BAM file(s) to read mapped reads from. " "BAMs must be sorted and indexed.") args = parser.parse_args() if args.haplotype and (args.individual is None): parser.error("--indidivual argument " "must also be provided when --haplotype argument " "is provided") return args def write_txt_file(out_file, chrom, snp_tab, hap_tab, ind_idx, ref_array, alt_array, other_array): i = 0 # get out genotypes for this individual hap = hap_tab[:, (ind_idx*2, ind_idx*2+1)] for row in snp_tab: if (hap[i,0] > -1) and (hap[i,1] > -1): # genotype is defined geno = "%d|%d" % (hap[i,0], hap[i,1]) else: geno = "NA" pos = row['pos'] out_file.write(" ".join([chrom.name, "%d" % pos, row['allele1'].decode("utf-8"), row['allele2'].decode("utf-8"), geno, "%d" % ref_array[pos-1], "%d" % alt_array[pos-1], "%d" % other_array[pos-1]]) + "\n") i += 1 def main(): args = parse_args() sys.stderr.write("command line: %s\n" % " ".join(sys.argv)) sys.stderr.write("python version: %s\n" % sys.version) sys.stderr.write("pysam version: %s\n" % pysam.__version__) sys.stderr.write("pytables version: %s\n" % tables.__version__) util.check_pysam_version() util.check_pytables_version() # disable warnings that come from pytables when chromosome # names are like 1, 2, 3 (instead of chr1, chr2, chr3) warnings.filterwarnings('ignore', category=tables.NaturalNameWarning) snp_tab_h5 = tables.open_file(args.snp_tab, "r") snp_index_h5 = tables.open_file(args.snp_index, "r") if args.haplotype: hap_h5 = tables.open_file(args.haplotype, "r") else: hap_h5 = None ref_count_h5 = tables.open_file(args.ref_as_counts, "w") alt_count_h5 = tables.open_file(args.alt_as_counts, "w") other_count_h5 = tables.open_file(args.other_as_counts, "w") read_count_h5 = tables.open_file(args.read_counts, "w") output_h5 = [ref_count_h5, alt_count_h5, other_count_h5, read_count_h5] chrom_dict = {} # initialize every chromosome in output files chrom_list = chromosome.get_all_chromosomes(args.chrom) for chrom in chrom_list: for out_file in output_h5: create_carray(out_file, chrom, args.data_type) chrom_dict[chrom.name] = chrom count = 0 dtype = None if args.data_type == "uint8": max_count = MAX_UINT8_COUNT dtype = np.uint8 elif args.data_type == "uint16": max_count = MAX_UINT16_COUNT dtype = np.uint16 else: raise NotImplementedError("unsupported datatype %s" % args.data_type) # create a txt file to also holds the counts if args.txt_counts is not None: if os.path.splitext(args.txt_counts)[1] == ".gz": txt_counts = gzip.open(args.txt_counts, 'wt+') else: txt_counts = open(args.txt_counts, 'w+') for chrom in chrom_list: sys.stderr.write("%s\n" % chrom.name) if args.test_chrom: if chrom.name != args.test_chrom: sys.stderr.write("skipping because not test chrom\n") continue warned_pos = {} # fetch SNP info for this chromosome if chrom.name not in snp_tab_h5.root: # no SNPs for this chromosome sys.stderr.write("skipping %s because chromosome with this name " "not found in SNP table\n" % chrom.name) continue sys.stderr.write("fetching SNPs\n") snp_tab = snp_tab_h5.get_node("/%s" % chrom.name) snp_index_array = snp_index_h5.get_node("/%s" % chrom.name)[:] if hap_h5: hap_tab = hap_h5.get_node("/%s" % chrom.name) ind_dict, ind_idx = snptable.SNPTable().get_h5_sample_indices( hap_h5, chrom.name, [args.individual]) if len(ind_idx) == 1: ind_idx = ind_idx[0] sys.stderr.write("index for individual %s is %d\n" % (args.individual, ind_idx)) else: raise ValueError("got sample indices for %d individuals, " "but expected to get index for one " "individual (%s)" % (len(ind_idx), args.individual)) hap_tab = None ind_idx = None else: hap_tab = None ind_idx = None # initialize count arrays for this chromosome to 0 ref_carray = get_carray(ref_count_h5, chrom) alt_carray = get_carray(alt_count_h5, chrom) other_carray = get_carray(other_count_h5, chrom) read_count_carray = get_carray(read_count_h5, chrom) ref_array = np.zeros(chrom.length, dtype) alt_array = np.zeros(chrom.length, dtype) other_array = np.zeros(chrom.length, dtype) read_count_array = np.zeros(chrom.length, dtype) # loop over all BAM files, pulling out reads # for this chromosome for bam_filename in args.bam_filenames: sys.stderr.write("reading from file %s\n" % bam_filename) samfile = pysam.Samfile(bam_filename, "rb") for read in get_sam_iter(samfile, chrom): count += 1 if count == 10000: sys.stderr.write(".") count = 0 add_read_count(read, chrom, ref_array, alt_array, other_array, read_count_array, snp_index_array, snp_tab, hap_tab, warned_pos, max_count, ind_idx) # store results for this chromosome ref_carray[:] = ref_array alt_carray[:] = alt_array other_carray[:] = other_array read_count_carray[:] = read_count_array sys.stderr.write("\n") # write data to numpy arrays, so that they can be written to a txt # file later # columns are: # chrom, pos, ref, alt, genotype, ref_count, alt_count, other_count if args.txt_counts is not None: write_txt_file(txt_counts, chrom, snp_tab, hap_tab, ind_idx, ref_array, alt_array, other_array) samfile.close() if args.txt_counts: # close the open txt file handler txt_counts.close() # check if any of the reads contained an unimplemented CIGAR if unimplemented_CIGAR[0] > 0: sys.stderr.write("WARNING: Encountered " + str(unimplemented_CIGAR[0]) + " instances of CIGAR codes: " + str(unimplemented_CIGAR[1]) + ". Reads with these " "CIGAR codes were skipped because they " "are currently unimplemented.\n") # set track statistics and close HDF5 files sys.stderr.write("setting statistics for each chromosome\n") for h5f in output_h5: chromstat.set_stats(h5f, chrom_list) h5f.close() snp_tab_h5.close() snp_index_h5.close() if hap_h5: hap_h5.close() sys.stderr.write("done\n") main()
nilq/baby-python
python
#!/bin/python # -*- coding: utf-8 -*- import requests CITY = "787657" API_KEY = "yourapikey(can be registered on openweathermap.org)" UNITS = "Metric" LANG = "en" REQ = requests.get("http://api.openweathermap.org/data/2.5/weather?id={}&lang={}&appid={}&units={}".format(CITY, LANG, API_KEY, UNITS)) try: if REQ.status_code == 200: CURRENT = REQ.json()["weather"][0]["description"].capitalize() TEMP = int(float(REQ.json()["main"]["temp"])) print("{}°".format(TEMP)) else: print("Error: BAD HTTP STATUS CODE " + str(REQ.status_code)) except (ValueError, IOError): print("Error: Unable print the data")
nilq/baby-python
python
#Build In import os import sys import pickle import copy import random # Installed import numpy as np from scipy.spatial.transform import Rotation as R from pathlib import Path import torch import spconv from argoverse.data_loading.argoverse_tracking_loader import ArgoverseTrackingLoader # Local from pcdet.utils import box_utils, object3d_utils, calibration, common_utils from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils from pcdet.config import cfg from pcdet.datasets.data_augmentation.dbsampler import DataBaseSampler from pcdet.datasets import DatasetTemplate def shuffle_log(subset, log:ArgoverseTrackingLoader): index = np.arange(log.num_lidar_frame) random.shuffle(index) for idx in index: lidar = log.get_lidar(idx) label = log.get_label_object(idx) yield idx, subset, lidar, label, log class BaseArgoDataset(DatasetTemplate): def __init__(self, root_path, subsets:list): super().__init__() self.root_path = root_path self.atls = {subset:ArgoverseTrackingLoader(Path(self.root_path) / subset) for subset in subsets} self._len = 0 pass def __len__(self): if self._len is 0: for atl in self.atls.values(): for log in iter(atl): self._len += log.num_lidar_frame return self._len def __iter__(self): for subset, atl in self.atls.items(): for log in iter(atl): for idx in range(atl.num_lidar_frame): lidar = log.get_lidar(idx) label = log.get_label_object(idx) yield idx, subset, lidar, label, log pass def shuffle(self, seed=0): random.seed = seed generators = [(shuffle_log(subset, log) for log in iter(atl)) for subset, atl in self.atls.items()] random.shuffle(generators) has_next = True while has_next: has_next = False for generator in generators: item = next(generator, False) if item is not False: has_next = True yield item def create_gt_parts(self, root=None): if root is None: root = Path(self.root_path) for idx, subset, lidar, label, log in iter(self): save_path = root / subset / log.current_log / 'gt_parts' save_path.mkdir(parents=True, exist_ok=True) gt_boxes = np.zeros((len(label), 7)) for i, obj in enumerate(label): loc = obj.translation quat = obj.quaternion dim = (obj.width, obj.length, obj.height) rot = R.from_quat(quat).as_euler('zyx') gt_boxes[i] = np.hstack((loc, dim, rot[0])) point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(torch.from_numpy(lidar[:, :3]), torch.from_numpy(gt_boxes)).numpy() for i, obj in enumerate(label): filename = save_path / '{}_{}_{}.bin'.format(idx, obj.label_class, obj.track_id) gt_points = lidar[point_indices[i] > 0] if len(gt_points) >= 10: gt_points -= gt_points.mean(axis=0) with open(filename, 'wb') as f: gt_points.tofile(f) class ArgoDataset(BaseArgoDataset): def __init__(self, root_path, subsets:list, class_names:dict, training=True): """ :param root_path: ARGO AI data path :param split: """ super().__init__(root_path, subsets) self.class_names = class_names self.training = training self.mode = 'TRAIN' if self.training else 'TEST' # Support spconv 1.0 and 1.1 try: VoxelGenerator = spconv.utils.VoxelGeneratorV2 except: VoxelGenerator = spconv.utils.VoxelGenerator vg_cfg = cfg.DATA_CONFIG.VOXEL_GENERATOR self.voxel_generator = VoxelGenerator( voxel_size=vg_cfg.VOXEL_SIZE, point_cloud_range=vg_cfg.DATA_CONFIG.POINT_CLOUD_RANGE, max_num_points=vg_cfg.MAX_POINTS_PER_VOXEL, max_voxels=cfg.DATA_CONFIG[self.mode].MAX_NUMBER_OF_VOXELS ) pass def __getitem__(self, index): def create_input_dict(log, subset, idx): label = [] for obj in log.get_label_object(idx): if obj.label_class in self.class_names.keys(): obj.class_id = self.class_names[obj.label_class] label.append(obj) points = log.get_lidar(idx) gt_boxes = np.zeros((len(label), 7)) occluded = np.zeros(len(label), dtype=int) for i, obj in enumerate(label): loc = obj.translation quat = obj.quaternion dim = (obj.width, obj.length, obj.height) rot = R.from_quat(quat).as_euler('zyx') gt_boxes[i] = np.hstack((loc, dim, rot[0], obj.class_id)) occluded[i] = obj.occlusion voxel_grid = self.voxel_generator.generate(points) if isinstance(voxel_grid, dict): voxels = voxel_grid["voxels"] coordinates = voxel_grid["coordinates"] num_points = voxel_grid["num_points_per_voxel"] else: voxels, coordinates, num_points = voxel_grid voxel_centers = (coordinates[:, ::-1] + 0.5) * self.voxel_generator.voxel_size + self.voxel_generator.point_cloud_range[:3] return { 'voxels': voxels, 'voxel_senters': voxel_centers, 'coordinates': coordinates, 'num_points': num_points, 'points': points, 'subset': subset, 'sample_idx': idx, 'occluded': occluded, 'gt_names': np.array([obj.label_class for obj in label]), 'gt_box2d': None, 'gt_boxes': gt_boxes } for subset, atl in self.atls.items(): for log in iter(atl): if index < log.num_lidar_frame: input_dict = create_input_dict(log, subset, index) break else: index -= log.num_lidar_frame return input_dict def create_argo_infos(data_path, save_path, subsets, workers=4): dataset = BaseArgoDataset(data_path, subsets) #print('---------------Start to generate data infos---------------') #for subset in subsets: # filename = save_path / subset / 'argo_infos.pkl' # # argo_infos = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True) # with open(filename, 'wb') as f: # pickle.dump(argo_infos, f) # print('ArgoAI info {} file is saved to {}'.format(subset, filename)) print('---------------Start create groundtruth database for data augmentation---------------') dataset.create_gt_parts(save_path) print('---------------Data preparation Done---------------') if __name__ == '__main__': from argparse import ArgumentParser parser = ArgumentParser(description='Generates a database of Parts') parser.add_argument('data_path', help='root path of the dataset') parser.add_argument('--save_path', default=None, help='path for saving the parts') parser.add_argument('--subsets', nargs='+', default=['train1','train2','train3','train4'], help='List of database subsets') args = parser.parse_args() if args.save_path is None: args.save_path = args.data_path create_argo_infos(Path(args.data_path), Path(args.save_path), args.subsets)
nilq/baby-python
python
import pytest from app.core.enums import CaseStatus from app.entities import RecordOnAppeal, Court def test_roa_from_district_case(simple_case) -> None: ''' It should create an record of appeal for this case, set the original_case_id. ''' court = Court.from_id('ca9') roa = simple_case.create_record_on_appeal(court) assert isinstance(roa, RecordOnAppeal) assert roa.original_case_id == simple_case.id assert roa.receiving_court == 'ca9' assert roa.court == simple_case.court def test_roa_from_district_case_no_appellate_court(simple_case) -> None: ''' It should not set the receiving court automatically. ''' roa = simple_case.create_record_on_appeal() assert roa.receiving_court == None assert roa.court == simple_case.court def test_district_case_status_roa(simple_case) -> None: ''' It should change status of original case to submitted_for_appeal. ''' _ = simple_case.create_record_on_appeal() assert simple_case.status == CaseStatus.submitted_for_appeal def test_validates_roa(simple_case) -> None: ''' It should raise an exception if an record of appeal is created when one exists. ''' _ = simple_case.create_record_on_appeal() assert simple_case.status == CaseStatus.submitted_for_appeal with pytest.raises(ValueError): _ = simple_case.create_record_on_appeal() def test_send_roa(simple_case) -> None: ''' If should set the receiving court on the record on appeal. ''' roa = simple_case.create_record_on_appeal() roa.send_to_court(Court.from_id('ca9')) assert roa.receiving_court == 'ca9'
nilq/baby-python
python
import asyncio # 获取事件循环 import time loop = asyncio.get_event_loop() async def main(): await asyncio.sleep(10) print("main coroutine running") print(time.time_ns()) # 运行一个协程函数 loop.run_until_complete(main()) print(time.time_ns()) # 在线程池中运行一个协程函数 # loop.run_in_executor() # 运行一个事件循环 loop.run_forever()
nilq/baby-python
python
""" ga2vcf cli """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals import ga4gh.converters.cli as cli import ga4gh.converters.converters as converters import ga4gh.common.cli as common_cli import ga4gh.client.cli as cli_client class Ga2VcfRunner(cli_client.SearchVariantsRunner): """ Runner class for the ga2vcf """ def __init__(self, args): super(Ga2VcfRunner, self).__init__(args) self._outputFile = args.outputFile self._binaryOutput = False if args.outputFormat == "bcf": self._binaryOutput = True def run(self): variantSet = self._client.get_variant_set(self._variantSetId) iterator = self._client.search_variants( start=self._start, end=self._end, reference_name=self._referenceName, variant_set_id=self._variantSetId, call_set_ids=self._callSetIds) # do conversion vcfConverter = converters.VcfConverter( variantSet, iterator, self._outputFile, self._binaryOutput) vcfConverter.convert() def getGa2VcfParser(): parser = common_cli.createArgumentParser(( "GA4GH VCF conversion tool. Converts variant information " "stored in a GA4GH repository into VCF format.")) cli_client.addClientGlobalOptions(parser) cli.addOutputFileArgument(parser) cli_client.addUrlArgument(parser) parser.add_argument("variantSetId", help="The variant set to convert") parser.add_argument( "--outputFormat", "-O", choices=['vcf', 'bcf'], default="vcf", help=( "The format for object output. Currently supported are " "'vcf' (default), which is a text-based format and " "'bcf', which is the binary equivalent")) cli_client.addReferenceNameArgument(parser) cli_client.addCallSetIdsArgument(parser) cli_client.addStartArgument(parser) cli_client.addEndArgument(parser) cli_client.addPageSizeArgument(parser) return parser def ga2vcf_main(): parser = getGa2VcfParser() args = parser.parse_args() if "baseUrl" not in args: parser.print_help() else: runner = Ga2VcfRunner(args) runner.run()
nilq/baby-python
python
#MenuTitle: Angularizzle # -*- coding: utf-8 -*- __doc__=""" Creates angular versions of glyphs made up of cubic paths. """ import math import vanilla import copy import GlyphsApp f = Glyphs.font masterlen = len(f.masters) # Script name by Type Overlord Florian Horatio Runge of Flensborre @FlorianRunge class Angela( object ): def __init__( self ): windowWidth = 222 windowHeight = 130 self.w = vanilla.FloatingWindow( ( windowWidth, windowHeight ), "Angularizzle Yo", autosaveName = "com.LNP.Angela.mainwindow" ) self.w.titlesize = vanilla.TextBox((20, 20, -10, 17), "Min plane:") self.w.inputSize = vanilla.EditText( (100, 20, 100, 20), "80", sizeStyle = 'small') self.w.checkBox = vanilla.CheckBox((20, 50, -10, 17), "Keep detail", value=True) self.w.cancelButton = vanilla.Button((20, 80, 85, 30), "Cancel", sizeStyle='regular', callback=self.CloseApp ) self.w.runButton = vanilla.Button((120, 80, 85, 30), "Process", sizeStyle='regular', callback=self.DoIt ) self.w.setDefaultButton (self.w.runButton) # Load Settings: Save/Load settings by Toschi Omagari if not self.LoadP(): pass #print "Could not load preferences. Will resort to defaults" self.w.open() self.w.makeKey() global font font = Glyphs.font global selectedGlyphs selectedGlyphs = [ l.parent for l in font.selectedLayers ] # if single glyph save state if len(selectedGlyphs)==1: thisgl = font.selectedLayers[0] global GlyphStartPaths GlyphStartPaths = copy.deepcopy(thisgl.paths) def CloseApp(self, sender): thisgl = font.selectedLayers[0] self.ClearScreen(thisgl) for p in GlyphStartPaths: thisgl.paths.append(p) self.w.close() def SaveP( self, sender ): try: Glyphs.defaults["com.LNP.Angela.inputSize"] = self.w.inputSize.get() Glyphs.defaults["com.LNP.Angela.checkBox"] = self.w.checkbox.get() except: return False return True def LoadP( self ): try: self.w.inputSize.set( Glyphs.defaults["com.LNP.Angela.inputSize"] ) self.w.checkbox.set( Glyphs.defaults["com.LNP.Angela.checkbox"] ) except: return False return True def MainAngela( self, asize, detail ): if asize.isdigit()==True: global stepnum, tStepSize asize = int(asize) stepnum=130 tStepSize = 1.0/stepnum # !impt font = Glyphs.font angsize = int(asize) font.disableUpdateInterface() for glyph in selectedGlyphs: thisgl = font.glyphs[glyph.name].layers[0] if thisgl.paths==0: continue thisgl.color = 8 #purple if len(selectedGlyphs)>1: ang = self.ReturnNodesAlongPath(thisgl.paths, angsize) else: ang = self.ReturnNodesAlongPath(GlyphStartPaths, angsize) if detail==False: ang = self.StripDetail(ang, asize) if ang: #thisgl = font.selectedLayers[0] self.ClearScreen(thisgl) for n in ang: pts = n[2] isclosed = n[1] outline = self.ListToPath(pts, isclosed) thisgl.paths.append( outline ) font.enableUpdateInterface() if not self.SaveP( self ): pass #print "Could not save preferences." if len(selectedGlyphs)>1: self.w.close() def StripDetail (self, nlist, asize): newList = list() for s in nlist: newnodes = list() length = s[0] isclosed = s[1] nlist = s[2] p1x = nlist[0][0] p1y = nlist[0][1] for n in range(1, len(nlist)-1): p2x = nlist[n][0] p2y = nlist[n][1] dist = math.hypot(p2x - p1x, p2y - p1y) if dist > asize: newnodes.append([p1x, p1y]) p1x = p2x p1y = p2y else: continue nl = [length, isclosed, newnodes] newList.append(nl) return newList def DoIt( self, sender ): asize = self.w.inputSize.get() detail = self.w.checkBox.get() if int(asize) > 4: self.MainAngela(asize, detail) else: pass # Remove any duplicate points from list def RemoveDuplicatePts(self, ptlist): ptl = [] for i in ptlist: if i not in ptl: ptl.append(i) ptl.append(ptlist[-1]) return ptl # the main return t postion on curve script p0,1,2,3 is segment def GetPoint(self, p0, p1, p2, p3, t): ax = self.lerp( [p0[0], p1[0]], t ) ay = self.lerp( [p0[1], p1[1]], t ) bx = self.lerp( [p1[0], p2[0]], t ) by = self.lerp( [p1[1], p2[1]], t ) cx = self.lerp( [p2[0], p3[0]], t ) cy = self.lerp( [p2[1], p3[1]], t ) dx = self.lerp( [ax, bx], t ) dy = self.lerp( [ay, by], t ) ex = self.lerp( [bx, cx], t ) ey = self.lerp( [by, cy], t ) pointx = self.lerp( [dx, ex], t ) pointy = self.lerp( [dy, ey], t ) calc = [pointx,pointy] return calc # Put all the xy coords of linear t GetPoint() increments in list def CreatePointList(self,p0,p1,p2,p3): pl = list() tmp=0 while tmp<1: t = tmp calc = self.GetPoint(p0,p1,p2,p3,tmp) pl.append(calc) tmp = tmp + tStepSize return pl #Clear layer except components def ClearScreen(self, clearlayer): for i in range( len( clearlayer.paths ))[::-1]: del clearlayer.paths[i] def lerp(self, v, d): return v[0] * (1 - d) + v[1] * d # create distance look up list from pointlist so we can determine a % position along spine # each item represents cumulative distances from beginning of segments def CreateDistList(self, pointlist): lookup = list() totallength = 0 for tp in range (0,len(pointlist)-1): p1x = pointlist[tp][0] p1y = pointlist[tp][1] p2x = pointlist[tp+1][0] p2y = pointlist[tp+1][1] dist = math.hypot(p2x - p1x, p2y - p1y) totallength += dist lookup.append(totallength) lookup.insert(0,0) return lookup #find at which index the desired length matches to determine nearest t step value #return new precise t value between the two indexes desiredlen falls def FindPosInDistList(self, lookup, newlen): #newlen = length along curve for s in range (0,len(lookup)-1): b1 = lookup[s] b2 = lookup[s+1] if b1 <= newlen <= b2: if b1==0: newt=0 else: percentb = ( 100 / (b2 - b1) ) * (newlen - b1) newt = (s*tStepSize) + ( tStepSize * (percentb/100) ) return (newt) # Draw new angular path from list def ListToPath(self, ptlist, isopen): np = GSPath() if isopen == True and len(ptlist)>2: del ptlist[-1] if len(ptlist)>2: #so counters don't devolve completely for pt in ptlist: newnode = GSNode() newnode.type = GSLINE newnode.position = (pt[0], pt[1]) np.nodes.append( newnode ) np.closed = isopen return np def PointToPointSteps(self, tp0, tp1, spacebetween): n1x, n1y, n2x, n2y = tp0[0], tp0[1], tp1[0], tp1[1] tmplist = list() dist = math.hypot(n2x - n1x, n2y - n1y) currentx = n1x currenty = n1y psteps = int(math.ceil(dist/spacebetween)) stepx = (n2x-n1x) / psteps stepy = (n2y-n1y) / psteps for n in range(psteps): tmplist.append([currentx, currenty]) currentx+=stepx currenty+=stepy return tmplist # returns nodes along a curve at intervals of space between def ReturnNodesAlongPath(self, GlyphStartPaths, spacebetween): allPaths = list() for path in GlyphStartPaths: pathTotalLength = 0 allpointslist = [] scount=0 if path.closed==False: continue for segment in path.segments: nodenum = len(segment) scount+=1 if segment.type=="move": continue # if straight segment if nodenum==2: if scount<1: continue tp0 = (segment[0].x, segment[0].y) tp1 = (segment[1].x, segment[1].y) dist = math.hypot(tp1[0] - tp0[0], tp1[1] - tp0[1]) pathTotalLength+=dist straightlinepts = self.PointToPointSteps(tp0,tp1,spacebetween) for sl in straightlinepts: allpointslist.append(sl) # if bezier curve segment if nodenum==4: tp0 = (segment[0].x, segment[0].y) tp1 = (segment[1].x, segment[1].y) tp2 = (segment[2].x, segment[2].y) tp3 = (segment[3].x, segment[3].y) pointlist = self.CreatePointList(tp0, tp1, tp2, tp3) lookup = self.CreateDistList(pointlist) totallength = lookup[-1] pathTotalLength += totallength # check that the distance of curve segment is at least as big as spacebetween jump if totallength > spacebetween: steps = 20 stepinc = totallength / steps steps = int(math.floor(totallength/spacebetween)) stepinc = totallength / steps dlen=0 # distance to check in list of distances for s in range(0,steps+1): if s==0: newt=0 elif s==steps: newt=1 else: newt = self.FindPosInDistList(lookup,dlen) calc = self.GetPoint(tp0,tp1,tp2,tp3,newt) allpointslist.append(calc) dlen+=stepinc else: allpointslist.append([tp0[0],tp0[1]]) allpointslist.append([tp3[0],tp3[1]]) if allpointslist: allpointslist = self.RemoveDuplicatePts(allpointslist) pathdata = [pathTotalLength, path.closed, allpointslist] allPaths.append(pathdata) return allPaths Angela()
nilq/baby-python
python
from django.contrib import admin from .models import ContactQuery # Register your models here. admin.site.register(ContactQuery)
nilq/baby-python
python
######### Third-party software locations ######### hmmer_dir = "./hmmer_linux/bin/" phobius_dir = "./phobius/" #these can be overriden by the --hmerdir, --phobiusdir and -wp options phobius_url = "https://phobius.sbc.su.se/cgi-bin/predict.pl" ######### Profile HMM locations ######### PTKhmm_dir = "./pHMMs/" JM_dir = "./pHMMs/JM/" Pfam_dir = "./pHMMs/Pfam" ### DO NOT CHANGE THEM!!!!!!!!!!!
nilq/baby-python
python
import logging from quarkchain.evm.slogging import get_logger, configure_logging """ slogging module used by ethereum is configured via a comman-separated string, and each named logger will receive custom level (defaults to INFO) examples: ':info' ':info,p2p.discovery:debug' because of the way that configure_logging() is written, we cannot call configure_logging() after cluster_config is loaded; so the best way to configure slogging is to change SLOGGING_CONFIGURATION here """ SLOGGING_CONFIGURATION = ":info" configure_logging(SLOGGING_CONFIGURATION) if __name__ == "__main__": logging.basicConfig() log = get_logger("test") log.warn("miner.new_block", block_hash="abcdef123", nonce=2234231)
nilq/baby-python
python
""" The module opens the camera capture a point cloud and: - mesh the point cloud and give back a water-tight mesh """ import copy import sys from tomlkit import key if sys.version_info[0] == 2: # the tkinter library changed it's name from Python 2 to 3. import Tkinter tkinter = Tkinter #I decided to use a library reference to avoid potential naming conflicts with people's programs. else: import tkinter from PIL import Image from PIL import ImageTk import pymeshlab # keep on top as first import (why?) import pyzed.sl as sl import numpy as np import open3d as o3d import tifffile from sklearn.cluster import KMeans from scipy.spatial import ConvexHull import threading ## Imports for function: convert_roit_meter_pixel import os import yaml from util import terminal import distance_map sys.path.append('/usr/local/lib/python3.8/dist-packages') import cv2 #TODO: set wall scanning 1.5 x 0.7 m dimension area ROI = [0.7,1.5] CENTER = [250,750] # CENTER = [360,680] # Number of frames taken for the point cloud acquisition. NUMBER_OF_AVERAGE_FRAMES = 1 # Scaling factor when cropping the live stream cloud on keypoints CLUSTER_REDUCTION_FACTOR = 0.4 def rotationMatrix(r): """ Simple 3D Matrix rotation function, obtained from following sources: https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula Args: -r: a rotation vector, with rotation value in x, y and z direction. """ ## Parameter for the rotationmatrix function rotationAngleDegThreshold = 0.00001 # its length is the rotation angle rotationAngleDeg = np.linalg.norm(r) if rotationAngleDeg > rotationAngleDegThreshold: # its direction is the rotation axis. rotationAxis = r / rotationAngleDeg # positive angle is clockwise K = np.array([[ 0, -rotationAxis[2], rotationAxis[1]], [ rotationAxis[2], 0, -rotationAxis[0]], [-rotationAxis[1], rotationAxis[0], 0 ]]) # Note the np.dot is very important. R = np.eye(3) + (np.sin(np.deg2rad(rotationAngleDeg)) * K) + \ ((1.0 - np.cos(np.deg2rad(rotationAngleDeg))) * np.dot(K, K)) tmp = np.eye(4) tmp[0:3, 0:3] = R else: R = np.eye(3) return R def load_transformation_matrix(): _root_file = os.path.dirname(__file__) _calib_information_path = os.path.join(_root_file, "calib/utils/calibration_info.yaml") # Test if the file exist as it is supposed when runing calib function entirely if not os.path.exists(_calib_information_path): terminal.error_print( f"No Calibration Data has been found in: {_calib_information_path}" ) exit() else: ## Load the transformation matrix # Opening YAML file with open(_calib_information_path) as yaml_file: data = yaml.load(yaml_file, Loader=yaml.FullLoader) # extracting information matrix_data = data["3D_2D_Matrix"] s, f, u0, v0, dX, dY, dZ, m_x, m_y, gamma, r0, r1, r2 = ( matrix_data["s"], matrix_data["f"], matrix_data["u0"], matrix_data["v0"], matrix_data["dX"], matrix_data["dY"], matrix_data["dZ"], matrix_data["m_x"], matrix_data["m_y"], matrix_data["gamma"], matrix_data["r0"], matrix_data["r1"], matrix_data["r2"], ) Rt = np.zeros((4, 4)) R = rotationMatrix(np.array([r0, r1, r2])) Rt[0:3, 0:3] = R Rt[:, -1] = np.array([dX, dY, dZ, 1]) K = np.array([[f*m_x, gamma, u0, 0], [0, f*m_y, v0, 0], [0, 0, 1, 0]]) transformation_matrix = np.dot(K,Rt)/s return transformation_matrix def convert_roi_meter_pixel(roi,center): """ This function is returning a rectangular Region Of Interest in pixel slices, centered in the middle of the image. And take as an input an array of the width and the length of the ROI in meters. :param roi: Array of the width and the length of the ROI in meters. center: center of the image in pixel. """ _root_file = os.path.dirname(__file__) _calib_information_path = os.path.join(_root_file, "calib/utils/calibration_info.yaml") # Test if the file exist as it is supposed when runing calib function entirely if not os.path.exists(_calib_information_path): terminal.error_print(f"No Calibration Data has been found in: {_calib_information_path}") exit() else: # Opening YAML file with open(_calib_information_path) as yaml_file: data = yaml.load(yaml_file,Loader=yaml.FullLoader) roi_info = data["ROI_info"] distance_m = roi_info["Distance_m"] distance_px = roi_info["Distance_px"] convert_m_px = distance_px/distance_m roi_px = np.array(roi) * convert_m_px ## We suppose the camera used is the zed camera, with an image acquisition of 1280x720 pixels ## the center is (360,640) slice_roi = [slice(int(center[0]-roi_px[0]/2),int(center[0]+roi_px[0]/2)), slice(int(center[1]-roi_px[1]/2),int(center[1]+roi_px[1]/2))] return slice_roi def set_up_zed(): """ This function is setting up the zed camera for depth capture return: The initialized camera, and the zed point cloud format/host """ # Set ZED params init = sl.InitParameters(camera_resolution=sl.RESOLUTION.HD720, # HD720 | 1280*720 camera_fps=30, # available framerates: 15, 30, 60 fps depth_mode=sl.DEPTH_MODE.QUALITY, # posible mods: sl.DEPTH_MODE.PERFORMANCE/.QUALITY/.ULTRA coordinate_units=sl.UNIT.METER, coordinate_system=sl.COORDINATE_SYSTEM.RIGHT_HANDED_Y_UP, # sl.COORDINATE_SYSTEM.LEFT_HANDED_Y_UP sdk_verbose = True, # Enable verbose logging depth_minimum_distance=0.3, # Enable capture from 30 cm depth_maximum_distance=3.0 # Enable capture up to 3m ) # Open ZED and catch error zed = sl.Camera() status = zed.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit() camera_info = zed.get_camera_information() print("ZED camera opened, serial number: {0}".format(camera_info.serial_number)) # Setting an empty point cloud point_cloud = sl.Mat(zed.get_camera_information().camera_resolution.width, zed.get_camera_information().camera_resolution.height, sl.MAT_TYPE.F32_C4, sl.MEM.CPU) return zed, point_cloud def close_up_zed(zed_cam): """ If zed it is open it closes the camera. :param zed_cam: the camera zed to close """ zed_cam.close() def get_median_cloud(zed, point_cloud, medianFrames, roi_m,center): """ This function is giving an average value of X, Y and Z obtained by a certain number of sequentialy acquired frames. This helps to stabilize the coordinates acquired, in case of flickering for instance. :param zed: initialized and opened zed camera :param point_cloud: initialized point cloud of the zed Camera :param medianFrames: Number of sequentialy acquired Frames for the average value generation :param components: List of values 0,1 or 2 for respectively X,Y and Z coordinates. return: The median point clouds xyz (no RGB) of the acquired frames in shape (n,3) """ # Get multiple frames and stack_of_images = [] for n in range(medianFrames): if zed.grab() == sl.ERROR_CODE.SUCCESS: zed.retrieve_measure(point_cloud, sl.MEASURE.XYZRGBA,sl.MEM.CPU, zed.get_camera_information().camera_resolution) point_cloud_np = point_cloud.get_data() stack_of_images.append(point_cloud_np) else: print(":(") return None stack_of_images = np.array(stack_of_images) stack_of_images[not np.isfinite] = np.nan # Convert the ROI value from meters to pixels and into a slice object. roi_px = convert_roi_meter_pixel(roi_m,center) # roi_px = ROI # Crop the point cloud following the ROI stack_of_images = stack_of_images[:, roi_px[0], roi_px[1], :] # Median the point clouds median = np.nanmedian(stack_of_images, axis=0) # Get rid of colors from point cloud median = median[:, :, :3] # Change shape of numpy to (n,3) for latter o3d transformation median = median.reshape((-1, 3)) # Archive: Transform nan in zeros (median[np.isnan(median)] = 0) # Remove nan values from cloud median = median[~np.isnan(median).any(axis=1)] return median def np_pcd2o3d_mesh(np_pcd, n_target_downasample=None): """ Mesh point cloud in format numpy in mesh format open3d. If the downsample parameter is input it downsize the cloud before meshing. Meshing and downsampling is done with pymeshlab, which offers a clean, water-tight meshing method. !!! No COLORS !!! :param np_pcd: point cloud in format numpy vector (n,3) :param n_target_downsample: int of target points after point cloud unifrom downsample return: o3d mesh """ # Create a new pymeshlab mesh and meshset pyml_m_pcd = pymeshlab.Mesh(np_pcd) pyml_ms = pymeshlab.MeshSet() pyml_ms.add_mesh(pyml_m_pcd) # Downsample the cloud if (n_target_downasample is None): pyml_ms.generate_simplified_point_cloud(samplenum=0) else: if (isinstance(n_target_downasample, int)): pyml_ms.generate_simplified_point_cloud(samplenum=n_target_downasample) else: print("The target for the downsample should be an int") exit() # Compute normals and mesh the point cloud pyml_ms.compute_normal_for_point_clouds(flipflag=True,viewpos=[0,0,0]) pyml_ms.generate_surface_reconstruction_screened_poisson(preclean=True) # Return the mesh from the dataset try: pyml_m = pyml_ms.current_mesh() except: print("Error!", sys.exc_info()[0], "occurred.") sys.exit("The pymeshlab MeshSet does not contain any active mesh") # Convert from pyml mesh to o3d mesh (n.b.: colors set to 0,0,0) pyml_vertices = pyml_m.vertex_matrix().astype(np.float64) pyml_vertices_normals = pyml_m.vertex_normal_matrix().astype(np.float64) pyml_faces = pyml_m.face_matrix() pyml_faces_normals = pyml_m.face_normal_matrix().astype(np.float64) # print(f'pyml mesh\n', # f'vertices shape: {pyml_vertices.shape}\n', # f'vertices dtype: {pyml_vertices.dtype}\n', # f'vertices normals shape: {pyml_vertices_normals.shape}\n', # f'vertices normals dtype: {pyml_vertices_normals.dtype}\n', # f'faces shape: {pyml_faces.shape}\n', # f'faces dtype: {pyml_faces.dtype}\n', # f'faces normals shape: {pyml_faces_normals.shape}\n', # f'faces normals dtype: {pyml_faces_normals.dtype}\n') o3d_m = o3d.geometry.TriangleMesh() o3d_m.vertices = o3d.utility.Vector3dVector(pyml_vertices) o3d_m_vertices = np.asarray(o3d_m.vertices) o3d_m.vertex_normals = o3d.utility.Vector3dVector(pyml_vertices_normals) o3d_m_vertex_normals = np.asarray(o3d_m.vertex_normals) o3d_m.vertex_colors = o3d.utility.Vector3dVector(np.zeros(pyml_vertices.shape)) o3d_m_vertex_clr = np.asarray(o3d_m.vertex_colors) o3d_m.triangles = o3d.utility.Vector3iVector(pyml_faces) o3d_m_triangles = np.asarray(o3d_m.triangles) o3d_m.triangle_normals = o3d.utility.Vector3dVector(pyml_faces_normals) o3d_m_triangles_normals = np.asarray(o3d_m.triangle_normals) # print(f'o3d mesh:\n', # f'vertices shape: {o3d_m_vertices.shape}\n', # f'vertices dtype: {o3d_m_vertices.dtype}\n', # f'vertices normals shape: {o3d_m_vertex_normals.shape}\n', # f'vertices normals dtype: {o3d_m_vertex_normals.dtype}\n', # f'vertices colors shape: {o3d_m_vertex_clr.shape}\n', # f'vertices colors dtype: {o3d_m_vertex_clr.dtype}\n', # f'triangles shape: {o3d_m_triangles.shape}\n', # f'triangles dtype: {o3d_m_triangles.dtype}\n', # f'triangles normals shape: {o3d_m_triangles_normals.shape}\n', # f'triangles normals dtype: {o3d_m_triangles_normals.dtype}\n') # Check the sanity of the mesh err_msg = 'ERROR:WrongMeshConvertion: The mesh convert between pymeshlab and open3d is wrong.' assert len(o3d_m_vertices) == len(pyml_vertices), err_msg assert len(o3d_m_vertex_normals) == len(pyml_vertices_normals), err_msg assert len(o3d_m_triangles) == len(pyml_faces), err_msg return o3d_m def get_mesh_scene(n_target_downasample): """ Main method to get point cloud and mesh :param n_target_downasample: target number of points to downsample cloud return: mesh in open3d format """ # Set up the zed parameters and initialize zed, point_cloud = set_up_zed() # Average point cloud from frames np_median_pcd = get_median_cloud(zed,point_cloud,NUMBER_OF_AVERAGE_FRAMES, ROI,CENTER) # From point cloud to pymeshlab mesh set + downsapling o3d_m = np_pcd2o3d_mesh(np_median_pcd, n_target_downasample=n_target_downasample) # TODO: clean up this code ~ condense # Crop mesh according to ROI pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(np_median_pcd) bbox = pcd.get_axis_aligned_bounding_box() o3d_m = o3d_m.crop(bbox) # Close the camera close_up_zed(zed) return o3d_m def get_pcd_scene(n_target_downsample, zed, point_cloud): """ Main method to get point cloud :param n_target_downasample: target number of points to downsample cloud :param zed: initilaized camera and point cloud from the camera return: point cloud in open3d format """ # Capture the average point cloud from frames np_median_pcd = get_median_cloud(zed,point_cloud,NUMBER_OF_AVERAGE_FRAMES, ROI, CENTER) # Convert numpy to o3d cloud pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(np_median_pcd) return pcd class Live_stream(object): """ This is the class creating the tkinter window with the live stream of the position of the stone. """ def __init__(self,Live_3D_space,image_drawer): self.tk = tkinter.Tk() self.tk.title('projector_window') self.w, self.h = self.tk.winfo_screenwidth(), self.tk.winfo_screenheight() self.tk.geometry("%dx%d+-50+-50" % (self.w, self.h)) self.state = False self.tk.attributes('-zoomed', True) # This just maximizes it so we can see the window. It's nothing to do with fullscreen. self.tk.bind('<Escape>', self._end_stream) self.tk.attributes("-fullscreen", True) self.lmain = tkinter.Label(self.tk) self.lmain.pack() self.Live_3D_space = Live_3D_space self.image_drawer = image_drawer def _end_stream(self,event=None): """ Function to end the stream, linked with the escape key in __init__. """ self.tk.quit() self.tk.destroy() def _toggle_fullscreen(self, event=None): """ Function to toggle fullscreen, linked with the F11 key in __init__. """ self.state = not self.state # Just toggling the boolean self.tk.attributes("-fullscreen", self.state) def _show_frame(self): """ Function which is called in the run function, whihch is the loop of tkinter. It updates the tkinter image with the acquired live stream image. """ self.frame = self._get_live_stream() self.imgtk = ImageTk.PhotoImage(image=Image.fromarray(self.frame, mode="RGB")) self.lmain.configure(image=self.imgtk) self.lmain.after(10, self._show_frame) def _get_live_stream(self): """ Function which updates the new image, by getting an update of the 3D Space. This function is using the class Live_3D_space. """ # Update the 3D space, with new capture points and all the distance measures self.Live_3D_space.update_3D_space() # Draw the new image for live stream img = self.image_drawer.draw_image_from_3D_space(self.Live_3D_space) return img def run(self): self._show_frame() self.tk.mainloop() class Live_3D_space(object): """ This class is containing the 3D space, where the acquired pcd is processed. It allows us to process the convex hull once and then update the pcd distances. """ def __init__(self,rock_mesh,zed,point_cloud): self.point_cloud = point_cloud self.rock_mesh = rock_mesh self.zed = zed self.upper_pcd_from_mesh = self._get_upper_pcd() self.list_mesh_cluster, self.key_points = self._get_mesh_cluster() def _get_upper_pcd(self): """ This function returns the upper pcd from the rock_mesh. """ # Create shifted point cloud mesh = copy.deepcopy(self.rock_mesh) subsampled_mesh = mesh.sample_points_poisson_disk(1000) subsampled_mesh = subsampled_mesh.translate((0, 0, 0.01)) # Crop point cloud cropped_pcd = self._crop_pcd_by_occupancy(mesh.scale(1.1,mesh.get_center()),subsampled_mesh) return cropped_pcd def _crop_pcd_by_occupancy(self,mesh,pcd): """ This function is returning a cropped point cloud. It will return the inverse of a crop of the pcd, using the mesh as the bounding box. If the points are inside the mesh, they will be removed. """ # Load mesh and convert to open3d.t.geometry.TriangleMesh mesh = o3d.t.geometry.TriangleMesh.from_legacy(mesh) #Create the scene scene = o3d.t.geometry.RaycastingScene() _ = scene.add_triangles(mesh) # Compute occupancy map occupancy = scene.compute_occupancy(np.asarray(pcd.points, dtype=np.float32)) cropped_pcd = o3d.geometry.PointCloud() outside_points = [] for i,point in enumerate(np.asarray(pcd.points)): if occupancy[i] == 0: outside_points.append(point) if len(outside_points) == 0: cropped_pcd.points = o3d.utility.Vector3dVector(np.array([[0,0,-2]])) else: cropped_pcd.points = o3d.utility.Vector3dVector(np.array(outside_points)) return cropped_pcd def _get_mesh_cluster(self): """ This function returns both the clusters and the centers of the clusters, of the rock mesh. Those centers are our fixed keypoints. This function is using the K-mean algorithm, which give random results. """ # Get the points of the point cloud Points = np.asarray(self.upper_pcd_from_mesh.points) # Use of K-mean for detecting 3 points in the upper point cloud kmeans = KMeans(n_clusters=3, random_state=0).fit(Points) key_points = kmeans.cluster_centers_ pcd_labels = kmeans.labels_ list_cluster = [] for j in range(0,3): pcd_cluster = o3d.geometry.PointCloud() cluster = [] for i,label in enumerate(pcd_labels): if label == j: cluster.append(Points[i]) pcd_cluster.points = o3d.utility.Vector3dVector(np.array(cluster)) list_cluster.append(pcd_cluster) return list_cluster, key_points def _column_crop(self,captured_pcd,mesh,scale=1.5): """ This function is returning a cropped point cloud, using as a bounding box, the boundig box of the mesh, scaled with a given scale, and tranlsated along z axis. """ # Translate the mesh mesh_down = copy.deepcopy(mesh).translate((0, 0, -10)) mesh_up = copy.deepcopy(mesh).translate((0, 0, 10)) # Union of the two meshes mesh_down_up = mesh_down + mesh_up # Get Axis-aligned bounding box bbox = mesh_down_up.get_axis_aligned_bounding_box() bbox = bbox.scale(scale,bbox.get_center()) crop_captured_pcd = captured_pcd.crop(bbox) return crop_captured_pcd def _crop_pcd_on_cluster(self,pcd,list_of_mesh): """ This function is returing a list of cropped points, and the centers of all the cropped point clouds. Each cropped point cloud is cropped using a given mesh. """ list_pcds = [] centers = [] for mesh in list_of_mesh: cropped_cluster = self._column_crop(pcd,mesh,scale=CLUSTER_REDUCTION_FACTOR) list_pcds.append(cropped_cluster) center = cropped_cluster.get_center() centers.append(center) return list_pcds,np.array(centers) ## Getters def get_list_mesh_cluster(self): return self.list_mesh_cluster def get_upper_pcd(self): return self.upper_pcd_from_mesh def get_distances(self): return self.distances def get_centers(self): return self.centers def get_key_points(self): return self.key_points def update_3D_space(self): # Get point cloud from camera pcd = get_pcd_scene(2000, self.zed, self.point_cloud) #TODO: check param 2000 ## Crop the pcd from a column cropped_pcd = self._column_crop(pcd,self.rock_mesh,scale=1) ## Get keypoints and cluster pcd from the upper_pcd_from_mesh list_mesh_clusters = self.get_list_mesh_cluster() keypoints = self.get_key_points() ## Get captured pcd clusters captured_pcd_clusters,self.centers = self._crop_pcd_on_cluster(cropped_pcd,list_mesh_clusters) ## Compute distance distances = (np.array(keypoints)[:,2] - self.centers[:,2])*1000 # To convert in milimeters # clip the distances for i,distance in enumerate(distances): if np.abs(distance) < 5: distances[i] = np.sign(distance)*5 if np.abs(distance) > 400: distances[i] = np.sign(distance)*400 self.distances = distances class Image_drawer(object): """ This class is creating an object which will allow us to list a certain number of pixels, with different caracteristiques, that we can at the end get into a 2D image. """ def __init__(self,Live_3D_space): self.width = 1920 self.height = 1080 self.image = np.zeros((self.height, self.width, 3),dtype=np.uint8) self.pixels = [] self.transform_3D_2D = load_transformation_matrix() self.Live_3D_space = Live_3D_space def _3D_to_2D(self,x,y,z): """ This function is transforming a 3D point into a 2D point. """ point_2D = np.dot(self.transform_3D_2D, np.array([[x], [y], [z],[1]])) point_2D = point_2D[0:2] return point_2D def _add_3D_point_to_image(self,x,y,z,color,size): """ This function is taking as an input x,y,z coordinates from a point in space, and caracteristiques of the pixel, like color and size. And if the coordinate is in the image range, we add the pixel to the list of pixels. """ if not np.isnan(x) and not np.isnan(y) and not np.isnan(z): pixel_coord = self._3D_to_2D(x,y,z) pixel = [int(pixel_coord[1][0]),int(pixel_coord[0][0]),color,size] j,i = pixel_coord if i > 0 and i < self.height and j > 0 and j < self.width: self.pixels.append(pixel) return 1 else: # print(f"X,Y,Z: {x},{y},{z}, giving Pixel: {i}, {j} are out of bounds for image of size {self.height}, {self.width}") return 0 else: # print(f"point: [{x},{y},{z}] is not admissible") return 0 def _add_pcd_to_image(self,pcd,size=2,color=[255,0,255]): """ This function is adding an entire point cloud to the image. It takes as an input an o3d point cloud, and the caracteristiques of the pixel, like color and size. """ npy_pts = np.asarray(pcd.points) npy_colors = np.asarray(pcd.colors) pixl_count = 0 if len(npy_pts) == 0: print("pcd is empty") else: if len(npy_colors) < len(npy_pts): for _,point in enumerate(npy_pts): pixl_count +=self._add_3D_point_to_image(point[0],point[1],point[2],color,size) else: for i,point in enumerate(npy_pts): pixl_count +=self._add_3D_point_to_image(point[0],point[1],point[2],npy_colors[i],size) if pixl_count > 0.1*len(npy_pts): return True else: return False def _draw_convex_hull_on_image(self,color,size): """ This function is creating a convex hull out of all the pixels added in the pixels list. It will draw the convex hull on the image using cv2.line. """ if len(self.pixels) < 3: # print("Not enough points to create hull") return False else: Y = np.asarray(self.pixels,dtype=object)[:,0] X = np.asarray(self.pixels,dtype=object)[:,1] YX = np.array([Y,X]) self.hull = ConvexHull(YX.T) for simplex in self.hull.simplices: cv2.line(self.image,(self.pixels[simplex[0]][:2][1],self.pixels[simplex[0]][:2][0]),(self.pixels[simplex[1]][:2][1],self.pixels[simplex[1]][:2][0]),color,size) return True def _draw_pixels(self): """ This function is drawing all the pixels declared in pixel list on the image. """ for pixel in self.pixels: i,j,color,size = pixel self.image[i-size:i+size,j-size:j+size,:] = color def _empty_pixels(self): """ This function is emptying the pixels list. """ self.pixels = [] def _mm_2_pxl(self,distance): """ This function is converting the distance in milimeters to pixels. It is doing a linear transformation, with a slope of: a = (MAX_pxl_length-min_pxl_length)/(MAX_mm_length - min_mm_length) """ ## PARAMS: min_pxl_length = 5 MAX_pxl_length = 50 min_mm_length = 5 MAX_mm_length = 400 a = (MAX_pxl_length-min_pxl_length)/(MAX_mm_length - min_mm_length) b = min_pxl_length -a*min_mm_length return a*distance +b def clear_image(self): """ This funcion is setting the image to black. """ self.image = np.zeros((self.height, self.width, 3),dtype=np.uint8) def draw_image_from_3D_space(self,Live_3D_space): """ This function is drawing the image from the 3D space. """ # Taking the updated version of the 3D space self.Live_3D_space = Live_3D_space # Clearing all old pixels self._empty_pixels() # Empty the image self.clear_image() # Drawing the convex hull upper_pcd = self.Live_3D_space.get_upper_pcd() is_pcd_valid = self._add_pcd_to_image(upper_pcd) is_convex_valid = self._draw_convex_hull_on_image(color=[0,255,0],size=4) # Removing the points used to create the convex hull self._empty_pixels() if is_convex_valid and is_pcd_valid: keypoints = self.Live_3D_space.get_key_points() distances = self.Live_3D_space.get_distances() ## Add points to image for i,distance in enumerate(distances): radius = self._mm_2_pxl(np.abs(distance)) # Adding points from point cloud with updated distance if distance > 0: self._add_3D_point_to_image(keypoints[i][0],keypoints[i][1],keypoints[i][2],(255,0,0),int(radius)) else: self._add_3D_point_to_image(keypoints[i][0],keypoints[i][1],keypoints[i][2],(0,0,255),int(radius)) # Adding points from keypoints self._add_3D_point_to_image(keypoints[i][0],keypoints[i][1],keypoints[i][2],(255,255,255),5) else: # Draw magenta image self.image = np.ones((self.height, self.width, 3),dtype=np.uint8)*[255,0,255] terminal.error_print("ERROR: the stone is outside the 3D scene") terminal.error_print('Press Esc on the projector_window to continue ... /n)\n>>> ') self._draw_pixels() return self.image def get_image(self): return self.image
nilq/baby-python
python
# # Copyright (C) [2020] Futurewei Technologies, Inc. # # FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR # FIT FOR A PARTICULAR PURPOSE. # See the License for the specific language governing permissions and # limitations under the License. # from riscv.EnvRISCV import EnvRISCV from riscv.GenThreadRISCV import GenThreadRISCV from VectorTestSequence import VectorTestSequence ## This test verifies that whole register load and store instructions can be generated and executed # successfully. class MainSequence(VectorTestSequence): def __init__(self, aGenThread, aName=None): super().__init__(aGenThread, aName) # TODO(Noah): Add additional load/store whole register instructions when they are supported # by Handcar. self._mInstrList = ( 'VL1R.V##RISCV', 'VS1R.V##RISCV', ) ## Return a list of test instructions to randomly choose from. def _getInstructionList(self): return self._mInstrList ## Get allowed exception codes. # # @param aInstr The name of the instruction. def _getAllowedExceptionCodes(self, aInstr): allowed_except_codes = set() # TODO(Noah): Remove the line below permitting store page fault exceptions when the page # descriptor generation is improved. Currently, we are generating read-only pages for load # instructions, which is causing subsequent store instructions to the same page to fault. allowed_except_codes.add(0xF) return allowed_except_codes MainSequenceClass = MainSequence GenThreadClass = GenThreadRISCV EnvClass = EnvRISCV
nilq/baby-python
python
#!/usr/bin/env python3 # encoding: utf-8 import sys def trace_calls_and_returns(frame, event, arg): co = frame.f_code func_name = co.co_name if func_name == 'write': # Ignore write() calls from printing return line_no = frame.f_lineno filename = co.co_filename if not filename.endswith('sys_settrace_return.py'): # Ignore calls not in this module return if event == 'call': print('* Call to {} on line {} of {}'.format( func_name, line_no, filename)) return trace_calls_and_returns elif event == 'return': print('* {} => {}'.format(func_name, arg)) return def b(): print('inside b()') return 'response_from_b ' def a(): print('inside a()') val = b() return val * 2 sys.settrace(trace_calls_and_returns) a()
nilq/baby-python
python
from django.contrib import admin from .models import Customer # Register your models here. admin.site.register(Customer)
nilq/baby-python
python
"""main API module.""" from __future__ import annotations import dataclasses from dataclasses import dataclass from enum import Enum from typing import Any, Union, cast import aiohttp from siyuanhelper import exceptions data_type = Union[dict, list, None] class Siyuan: """Siyuan Helper Instance.""" def __init__(self, base_url: str = "http://127.0.0.1:6806", token: str = ""): """Init a Siyuan Helper. Args: base_url (str, optional): the url to invoke requests. Defaults to "http://127.0.0.1:6806". token (str, optional): API token, none if unused. Defaults to "". Raises: exceptions.SiyuanAuthFailedException: raised if Authorization Failed. """ self.base_url = base_url self.token = token self.session = aiohttp.ClientSession( self.base_url, headers={ "Authorization": f"Token {token}", "Content-Type": "application/json", }, ) async def close(self) -> None: """Close Siyuan Helper Session, should be explicitly called after use.""" await self.session.close() async def _post(self, url: str, **params: Any) -> data_type: async with self.session.post(url=url, json=params) as resp: ret = SiyuanResponse(**(await resp.json())) if ret.code == 0: return ret.data if ret.code == -1 and ret.msg == "Auth failed": raise exceptions.SiyuanAuthFailedException((self, ret)) else: raise exceptions.SiyuanApiException((self, ret)) async def get_block_by_id(self, block_id: str, full: bool = True) -> SiyuanBlock: """Get SiyuanBlock by block id. Args: block_id (str): the desired block id. full (bool): whether to fetch all the informations. Defaults to True. Returns: SiyuanBlock: the block with all fields. """ if not full: return SiyuanBlock(id=block_id, source=self) return SiyuanBlock( id=block_id, source=self, raw=await self._get_raw_block_by_id(block_id) ) async def get_blocks_by_sql( self, cond: str, full: bool = True ) -> list[SiyuanBlock]: """Get a list of SiyuanBlock by sql. Args: cond (str): the conditions to apply, typically `where id = ''` or so. full (bool, optional): whether to fetch all the informations of the block. Defaults to True. Returns: list[SiyuanBlock]: result blocks """ if not full: ret = await self.sql_query(f"SELECT id from BLOCKS {cond}") return [SiyuanBlock(id=x.id, source=self) for x in ret] ret = await self.sql_query(f"SELECT * from BLOCKS {cond}") return [ SiyuanBlock(id=x["id"], source=self, raw=self._gen_block_by_sql_result(x)) for x in ret ] def _gen_block_by_sql_result(self, result: dict) -> RawSiyuanBlock: # use block_fields filter to avoid compatibility issues. return RawSiyuanBlock(**{key: result[key] for key in block_fields}) async def _get_raw_block_by_id(self, block_id: str) -> RawSiyuanBlock: """Generally, you should not use this function unless you know what you're doing. Get RawSiyuanBlock by block id. Args: block_id (str): the desired block id. Returns: RawSiyuanBlock: raw Siyuan Block, with only data fields defined. """ ret = await self.sql_query(f"SELECT * from BLOCKS where ID = '{block_id}'") if type(ret) != list: raise exceptions.SiyuanApiTypeException(ret) if len(ret) == 0: raise exceptions.SiyuanNoResultException(ret) return self._gen_block_by_sql_result(ret[0]) async def get_attrs_by_id(self, block_id: str) -> dict[str, str]: """Get attribute dictionary by block id. Args: block_id (str): target block. Returns: dict[str, str]: key-value dict, note that custom attributes starts with `custom-` """ ret = await self._post("/api/attr/getBlockAttrs", id=block_id) if type(ret) != dict: raise exceptions.SiyuanApiTypeException return ret async def set_attrs_by_id(self, block_id: str, attrs: dict[str, str]) -> None: """Update the attributes of the block with given id. Won't delete attrs not given in the dict. Args: block_id (str): target block id attrs (dict[str, str]): block attrs dict to update """ await self._post("/api/attr/setBlockAttrs", id=block_id, attrs=attrs) async def sql_query(self, sql: str) -> data_type: """Query SQL. Args: sql (str): the executed SQL string Returns: data_type: usually a list of dicts. """ return await self._post(url="/api/query/sql", stmt=sql) async def delete_block_by_id(self, block_id: str) -> None: """Delete a block with given id. Args: block_id (str): target block id """ await self._post("/api/block/deleteBlock", id=block_id) async def insert_block( self, data_type: DataType, data: str, previous_id: str ) -> SiyuanBlock: """Insert a block after the block with the given id. Args: data_type (DataType): markdown or dom data (str): data value previous_id (str): the block in front of the new block Raises: exceptions.SiyuanApiException: API Error Returns: SiyuanBlock: the new block, with id only. """ ret = await self._post( "/api/block/insertBlock", dataType=data_type, data=data, previousID=previous_id, ) if ret is None: raise exceptions.SiyuanApiException((self, ret)) return await self.get_block_by_id(ret[0]["doOperations"][0]["id"], full=False) async def export_md_content_by_id(self, block_id: str) -> str: """Export Markdown Content by id. Args: block_id (str): blockid, only document block is supported. Returns: str: markdown """ return cast(dict, await self._post("/api/export/exportMdContent", id=block_id))[ "content" ] @dataclass class SiyuanResponse: """Response class for siyuan.""" code: int msg: str data: data_type = None class BlockAttr: """Block Attribute Class.""" def __init__(self, block: SiyuanBlock): """Init. Args: block (SiyuanBlock): block that this BlockAttr adhere to. """ self.block = block self.cached = False async def _cache_attr(self) -> None: self.values = await self.block.source.get_attrs_by_id(self.block.id) self.cached = True async def ensure(self) -> None: """Ensure the attributes are cached.""" if not self.cached: await self._cache_attr() async def get(self, name: str, default: str = "") -> str: """Get attribute value by name. Args: name (str): name of the attribute, remember to add `custom-` default (str, optional): the return value if no attribute is found, defaults to "" Returns: str: the value of the attribute, default if not found. """ await self.ensure() return self.values.get(name, default) async def set(self, name: str, val: str) -> None: """Modify the attribute. Args: name (str): name of the attribute val (str): new value """ await self.ensure() self.values[name] = val await self.block.source.set_attrs_by_id(self.block.id, {name: val}) class DataType(str, Enum): """DataType Enum, used when modifying block's content.""" MARKDOWN = "markdown" DOM = "dom" class SiyuanBlock: """Block Class for Siyuan. An additional application layer is applied. For raw data, consider RawSiyuanBlock.""" def __init__(self, id: str, source: Siyuan, raw: RawSiyuanBlock | None = None): """Init a SiyuanBlock. Args: id (str): id of the block. source (Siyuan): source of the block. raw (RawSiyuanBlock | None, optional): raw block data. Defaults to None. """ self.id = id self.source = source self.raw = raw self.attrs = BlockAttr(self) async def pull(self) -> None: """Pull from Siyuan API. Refreshing everything.""" self.raw = await self.source._get_raw_block_by_id(self.id) await self.attrs._cache_attr() async def ensure(self) -> None: """Ensure the information of the current block is cached.""" if self.raw is None: self.raw = await self.source._get_raw_block_by_id(self.id) await self.attrs.ensure() def asdict(self) -> dict: """Parse Siyuan Block to a dict containing all its informations. Returns: dict: that block. """ return dataclasses.asdict(self.raw) def __getattr__(self, __name: str) -> Any: """Expose RawSiyuanBlock's attributes. Args: __name (str): attribute name Returns: Any: result """ if self.raw is not None and __name in self.raw.__slots__: # type: ignore return self.raw.__getattribute__(__name) async def delete(self) -> None: """Delete this block. Mind that there is a delay between the execution and the result being synced into API database.""" await self.source.delete_block_by_id(self.id) async def insert(self, data_type: DataType, data: str) -> SiyuanBlock: """Insert a block after this block. Args: data_type (DataType): markdown or dom data (str): the desired data Returns: SiyuanBlock: newly inserted block, only `id` is given. """ return await self.source.insert_block(data_type, data, self.id) async def export(self) -> str: """Export the document current block belongs to in markdown format. Returns: str: markdown export output """ return await self.source.export_md_content_by_id(self.id) block_fields = ( "id", "alias", "box", "content", "created", "updated", "fcontent", "hash", "hpath", "length", "markdown", "memo", "name", "parent_id", "path", "root_id", "sort", "subtype", "type", "ial", ) @dataclass(frozen=True) class RawSiyuanBlock: """Raw Siyuan Block, presents the raw output of the Siyuan API.""" __slots__ = block_fields id: str alias: str box: str content: str created: str updated: str fcontent: str hash: str hpath: str length: int markdown: str memo: str name: str parent_id: str path: str root_id: str sort: int subtype: str type: str ial: str
nilq/baby-python
python
from texthooks.macro_expand import main as macro_expand_main def test_macro_expand_no_changes(runner): result = runner(macro_expand_main, "foo") assert result.exit_code == 0 assert result.file_data == "foo" def test_macro_expand_simple(runner): result = runner(macro_expand_main, "f:bar", add_args=["--macro", "f:", "f($VALUE)"]) assert result.exit_code == 1 assert result.file_data == "f(bar)" def test_macro_expand_value_appears_twice(runner): result = runner( macro_expand_main, "l:bar", add_args=["--macro", "l:", "l($VALUE) - $VALUE"] ) assert result.exit_code == 1 assert result.file_data == "l(bar) - bar"
nilq/baby-python
python
from tkinter import Tk, Label, Button, N, E, S, W def exitMsg(save, dest): def saveFunc(): save() exitFunc() def exitFunc(): dest.destroy() window.destroy() window = Tk() Label(window, text="Do you really want to close this window without saving?").grid(row=0, column=0, columnspan=3) Button(window, text="Save and Close", command=saveFunc).grid(row=1, column=0) Button(window, text="Close without saving", command=exit).grid(row=1, column=1) Button(window, text="Cancel", command=window.destroy).grid(row=1, column=2) window.mainloop() def drawCompass(canvas, cpX, cpY, r1, r2, r3, fill1, fill2): font = ("Broadway", 16) canvas.create_oval(cpX - r3, cpY - r3, cpX + r3, cpY + r3) canvas.create_polygon(cpX, cpY - r2, cpX + r1, cpY - r1, cpX, cpY, fill=fill1) canvas.create_polygon(cpX + r2, cpY, cpX + r1, cpY - r1, cpX, cpY, fill=fill2) canvas.create_polygon(cpX + r2, cpY, cpX + r1, cpY + r1, cpX, cpY, fill=fill1) canvas.create_polygon(cpX, cpY + r2, cpX + r1, cpY + r1, cpX, cpY, fill=fill2) canvas.create_polygon(cpX, cpY + r2, cpX - r1, cpY + r1, cpX, cpY, fill=fill1) canvas.create_polygon(cpX - r2, cpY, cpX - r1, cpY + r1, cpX, cpY, fill=fill2) canvas.create_polygon(cpX - r2, cpY, cpX - r1, cpY - r1, cpX, cpY, fill=fill1) canvas.create_polygon(cpX, cpY - r2, cpX - r1, cpY - r1, cpX, cpY, fill=fill2) canvas.create_text(cpX, cpY - r2, anchor=S, font=font, text="N") canvas.create_text(cpX + r2, cpY, anchor=W, font=font, text=" E") canvas.create_text(cpX, cpY + r2, anchor=N, font=font, text="S") canvas.create_text(cpX - r2, cpY, anchor=E, font=font, text="W")
nilq/baby-python
python
""" ========================================================================= Decoding sensor space data with generalization across time and conditions ========================================================================= This example runs the analysis described in :footcite:`KingDehaene2014`. It illustrates how one can fit a linear classifier to identify a discriminatory topography at a given time instant and subsequently assess whether this linear model can accurately predict all of the time samples of a second set of conditions. """ # Authors: Jean-Remi King <jeanremi.king@gmail.com> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Denis Engemann <denis.engemann@gmail.com> # # License: BSD-3-Clause # %% import matplotlib.pyplot as plt from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression import mne from mne.datasets import sample from mne.decoding import GeneralizingEstimator print(__doc__) # Preprocess data data_path = sample.data_path() # Load and filter data, set up epochs raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' events_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' raw = mne.io.read_raw_fif(raw_fname, preload=True) picks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels raw.filter(1., 30., fir_design='firwin') # Band pass filtering signals events = mne.read_events(events_fname) event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3, 'Visual/Right': 4} tmin = -0.050 tmax = 0.400 # decimate to make the example faster to run, but then use verbose='error' in # the Epochs constructor to suppress warning about decimation causing aliasing decim = 2 epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax, proj=True, picks=picks, baseline=None, preload=True, reject=dict(mag=5e-12), decim=decim, verbose='error') # %% # We will train the classifier on all left visual vs auditory trials # and test on all right visual vs auditory trials. clf = make_pipeline(StandardScaler(), LogisticRegression(solver='lbfgs')) time_gen = GeneralizingEstimator(clf, scoring='roc_auc', n_jobs=1, verbose=True) # Fit classifiers on the epochs where the stimulus was presented to the left. # Note that the experimental condition y indicates auditory or visual time_gen.fit(X=epochs['Left'].get_data(), y=epochs['Left'].events[:, 2] > 2) # %% # Score on the epochs where the stimulus was presented to the right. scores = time_gen.score(X=epochs['Right'].get_data(), y=epochs['Right'].events[:, 2] > 2) # %% # Plot fig, ax = plt.subplots(1) im = ax.matshow(scores, vmin=0, vmax=1., cmap='RdBu_r', origin='lower', extent=epochs.times[[0, -1, 0, -1]]) ax.axhline(0., color='k') ax.axvline(0., color='k') ax.xaxis.set_ticks_position('bottom') ax.set_xlabel('Testing Time (s)') ax.set_ylabel('Training Time (s)') ax.set_title('Generalization across time and condition') plt.colorbar(im, ax=ax) plt.show() ############################################################################## # References # ---------- # .. footbibliography::
nilq/baby-python
python
import os import uuid from tests.graph_case import GraphTestCase from office365.graph.onedrive.drive import Drive from office365.graph.onedrive.driveItem import DriveItem from office365.graph.onedrive.file_upload import ResumableFileUpload def create_list_drive(client): list_info = { "displayName": "Lib_" + uuid.uuid4().hex, "list": {"template": "documentLibrary"} } new_list = client.sites.root.lists.add(list_info) client.execute_query() return new_list.drive class TestDriveItem(GraphTestCase): """OneDrive specific test case base class""" target_drive = None # type: Drive target_file = None # type: DriveItem target_folder = None # type: DriveItem @classmethod def setUpClass(cls): super(TestDriveItem, cls).setUpClass() cls.target_drive = create_list_drive(cls.client) @classmethod def tearDownClass(cls): pass def test1_create_folder(self): target_folder_name = "New_" + uuid.uuid4().hex folder = self.target_drive.root.create_folder(target_folder_name) self.client.execute_query() self.assertEqual(folder.properties["name"], target_folder_name) self.__class__.target_folder = folder def test2_get_folder_permissions(self): folder_perms = self.__class__.target_folder.permissions self.client.load(folder_perms) self.client.execute_query() self.assertIsNotNone(folder_perms.resource_path) def test3_upload_file(self): file_name = "SharePoint User Guide.docx" path = "{0}/../data/{1}".format(os.path.dirname(__file__), file_name) with open(path, 'rb') as content_file: file_content = content_file.read() file_name = os.path.basename(path) self.__class__.target_file = self.target_drive.root.upload(file_name, file_content) self.client.execute_query() self.assertIsNotNone(self.target_file.web_url) def test4_upload_file_session(self): file_name = "big_buck_bunny.mp4" local_path = "{0}/../data/{1}".format(os.path.dirname(__file__), file_name) uploader = ResumableFileUpload(self.target_drive.root, local_path, 1000000) uploader.execute() print("{0} bytes has been uploaded".format(0)) def test5_download_file(self): result = self.__class__.target_file.get_content() self.client.execute_query() self.assertIsNotNone(result.value) def test6_convert_file(self): result = self.__class__.target_file.convert('pdf') self.client.execute_query() self.assertIsNotNone(result.value) def test7_copy_file(self): copy_file_name = "Copied_{0}_SharePoint User Guide.docx".format(uuid.uuid4().hex) result = self.__class__.target_file.copy(copy_file_name) self.client.execute_query() self.assertIsNotNone(result.value) def test8_delete_file(self): items = self.target_drive.root.children self.client.load(items) self.client.execute_query() before_count = len(items) items[0].delete_object() self.client.load(items) self.client.execute_query() self.assertEqual(before_count - 1, len(items))
nilq/baby-python
python
#coding:utf-8 import numpy as np # 2.使用函数创建 # 如果生成一定规则的数据,可以使用NumPy提供的专门函数 # arange函数类似于python的range函数:指定起始值、终止值和步长来创建数组 # 和Python的range类似,arange同样不包括终值;但arange可以生成浮点类型,而range只能是整数类型 np.set_printoptions(linewidth=100, suppress=True) a = np.arange(1, 10, 0.5) print('a = ', a) # linspace函数通过指定起始值、终止值和元素个数来创建数组,缺省包括终止值 b = np.linspace(1, 10, 10) print('b = ', b) # 可以通过endpoint关键字指定是否包括终值 c = np.linspace(1, 10, 10, endpoint=False) print('c = ', c) # 和linspace类似,logspace可以创建等比数列 # 下面函数创建起始值为10^1,终止值为10^2,有10个数的等比数列 d = np.logspace(1, 4, 4, endpoint=True, base=2) print('d = ', d) # 下面创建起始值为2^0,终止值为2^10(包括),有10个数的等比数列 f = np.logspace(0, 10, 11, endpoint=True, base=2) print('f = ', f) # 使用 frombuffer, fromstring, fromfile等函数可以从字节序列创建数组 s = 'abcdzzzz' g = np.fromstring(s, dtype=np.int8) print('g = ', g)
nilq/baby-python
python
def a1(str): print(str[::-1]) def a2(str): list=str.split() print(" ".join(list[::-1])) def a3(str): if str[:(len(str)//2)]==str[(len(str)//2):]: print("Symmetric") else: print("Asymmetric") def a4(str): if str==str[::-1]: print("Palindrome") else: print("Not a palindrome") def a5(str,i): print(str[:i]+str[i+1:]) def a6(str,vowel): str=str.lower() list = [each for each in str if each in vowel] print(len(list)," ",len(str)-len(list)) def a7(str): c=0 for i in str: c+=1 print(c) def a8(str): print(str.isalnum()) def a9(str): print("".join(set(str))) def a10(str): temp = {} for i in str: if i in temp: temp[i] += 1 else: temp[i] = 1 return temp def a11(str): dict=a10(str) max_fre=max(dict, key=dict.get) print(max_fre) def a12(str,str1): print(sorted(str) == sorted(str1)) if __name__ =='__main__': a1("welcome to iter") a2("welcome to iter") a3("khokho") a4("amaama") a5("hello",2) a6("amaama","aeiou") a7("welcome to iter") a8("hey123") a9("amaama") print(a10("amaama")) a11("amaama") a12("silent","listen")
nilq/baby-python
python
import os import glob import shutil import tarfile from pathlib import Path DESCRIPTION = """ Prifysgol Bangor University """ TECHIAITH_RELEASE=os.environ["TECHIAITH_RELEASE"] # def copy_for_evaluation_or_publishing(source_dir, target_dir): Path(target_dir).mkdir(parents=True, exist_ok=True) # copy json files for file in glob.glob(os.path.join(source_dir, r"*.json")): print ("Copying %s" % file) shutil.copy(file, target_dir) # copy config and model binary file checkpoint_dir=glob.glob(os.path.join(source_dir, r"checkpoint-*"))[0] shutil.copy(os.path.join(checkpoint_dir, "config.json"), target_dir) shutil.copy(os.path.join(checkpoint_dir, "pytorch_model.bin"), target_dir) return target_dir # def make_model_tarfile(model_name, source_dir, version=TECHIAITH_RELEASE): output_dir = Path(source_dir).parent output_tar_file_path = os.path.join(output_dir, model_name.replace("/","_") + "." + version + ".tar.gz") with tarfile.open(output_tar_file_path, "w:gz") as tar: tar.add(source_dir, arcname="") return output_tar_file_path
nilq/baby-python
python
def count_substring(string, sub_string): found = 0 sub_length = len(sub_string) for index, _ in enumerate(string): string_slice = string[index:sub_length + index] # Debug print statement to confirm assumptions about what the slice looks like. #print(f'Found: {string_slice}') if string_slice == sub_string: found += 1 return found if __name__ == '__main__': string = input().strip() sub_string = input().strip() count = count_substring(string, sub_string) print(count)
nilq/baby-python
python
def build_person(first_name, last_name): """Return a dictionary of information about a person.""" person = {'first': first_name, 'last': last_name} return person musician = build_person('jimi', 'hendrix') print(musician)
nilq/baby-python
python
import sys, os, threading, queue sys.path.append('.') os.chdir('..') import normalize from singleton import db num_workers = 64 in_q = queue.Queue() out_q = queue.Queue() class Worker(threading.Thread): def run(self): while True: uid, url = in_q.get() if uid is None: out_q.put((None, None, None)) return new_url = normalize.dereference(url) if url != new_url: out_q.put((uid, url, new_url)) workers = [] for i in range(num_workers): workers.append(Worker()) workers[-1].setDaemon(True) workers[-1].start() c = db.cursor() c.execute("""select item_uid, item_link from fm_items where item_rating>0 order by item_uid""") list(map(in_q.put, c)) list(map(in_q.put, [(None, None)] * num_workers)) while True: uid, url, new_url = out_q.get() if uid is None and url is None and new_url is None: num_workers -= 1 if num_workers == 0: db.commit() sys.exit(0) continue print(uid, url) print('\t==>', new_url) c.execute('update fm_items set item_link=? where item_uid=?', [new_url, uid])
nilq/baby-python
python
import lemma import re TAG_RE = re.compile(r'<[^>]+>') def remove_tags(text): return TAG_RE.sub('', text) # def lem_parse(data): # pass def lem_parse(text,cnt,check,all_ham,all_spam): content = remove_tags(text) x,all_ham,all_spam = lemma.data(content,cnt,check,all_ham,all_spam) return (x,all_ham,all_spam)
nilq/baby-python
python
#!/usr/bin/env python3 import sys import os import argparse import logging from traitlets.config import Config import nbformat from nbconvert import NotebookExporter import utils from clean import clean CLEAN = 1 # TODO: would be nice to do some Make-like shortcuts to avoid processing notebooks # whose rendered mtime > their partial mtime (and > the track meta mtime) def nb_path_to_track(path): dirname = os.path.dirname(path) suff = '/raw' assert dirname.endswith(suff), dirname return dirname[:-len(suff)] def render_notebooks(nbpaths): tracks = list(map(nb_path_to_track, nbpaths)) track = tracks[0] assert all(t == track for t in tracks), "All notebooks to be rendered must be in same track." render_track(track, nbpaths) def render_track(track, nb_path_whitelist=None): meta = utils.get_track_meta(track) track_cfg = utils.get_track_config(track) cfg = Config() cfg.Exporter.preprocessors = ['lesson_preprocessor.LearnLessonPreprocessor'] exporter = NotebookExporter(config=cfg) resources = {'track_meta': meta, 'track_cfg': track_cfg} for nb_meta in meta.notebooks: in_path = os.path.join(track, 'raw', nb_meta.filename) if nb_path_whitelist and in_path not in nb_path_whitelist: continue resources['lesson'] = nb_meta.lesson resources['nb_meta'] = nb_meta if CLEAN: clean(in_path) nb, _ = exporter.from_filename(in_path, resources) out_path = os.path.join(track, 'rendered', nb_meta.filename) with open(out_path, 'w') as f: f.write(nb) if __name__ == '__main__': parser = argparse.ArgumentParser(description=("Preprocess notebooks, " "writing publication-ready ipynbs to <track>/rendered/"), usage="%(prog)s (track | {0} [{0} ...])".format('partial'), ) # These arguments are a convenient fiction parser.add_argument("track", help=("The path to a track. e.g. 'python', or 'examples/example_track'." " All notebooks referred to in that track's metadata will be rendered." ) ) parser.add_argument("raw", nargs="*", help=("An explicit list of notebook files to be rendered. Mutually" " exclusive with track argument." ) ) parser.add_argument("-v", "--verbose", action='store_true',) args = parser.parse_args() logging.basicConfig( level=(logging.DEBUG if args.verbose else logging.INFO) ) if args.raw or args.track.endswith('.ipynb'): raw = [args.track] + args.raw render_notebooks(raw) else: render_track(args.track)
nilq/baby-python
python
# plugin method for deleting files from an archive # using the linux "find" commmand. # this only works if you have a configuration # with a single archive server which is # defined in the servers dictionary from plugins.handyrepplugin import HandyRepPlugin class archive_delete_find(HandyRepPlugin): # plugin to delete old archive files from a shared archive # using linux "find" command def run(self): archiveinfo = self.conf["archive"] myconf = self.get_myconf() delmin = (as_int(myconf["archive_delete_hours"]) * 60) archiveserver = self.get_archiveserver() if not archiveserver: return self.rd(False, "no archive server is defined") find_delete = """find %s -regextype 'posix-extended' -maxdepth 1 -mmin +%d -regex '.*[0-9A-F]{24}' -delete""" % (myconf["archive_directory"],delmin,) adelete = self.run_as_root(archiveserver,[find_delete,]) if self.succeeded(adelete): return adelete else: adelete.update( {"details" : "archive cleaning failed due to error: %s" % adelete["details"]}) return adelete def test(self): archserv = self.get_archiveserver() if not archserv: return self.rd(False, "no archive server is defined") if self.failed(self.test_plugin_conf("archive_delete_find", "archive_directory", "archive_delete_hours")): return self.rd(False, "archive_delete_find is not configured correctly") else: return self.rd(True, "archive_delete_find is configured") def get_archiveserver(self): # assumes that there's only one enabled archive server archservs = self.get_servers(role="archive") if archservs: return archservs[0] else: return None
nilq/baby-python
python
import os.path import random class AutomaticPotato: def parent_dir(self): return os.path.dirname(__file__) def public_dir(self): pd = self.parent_dir() return os.path.abspath(os.path.join(pd, '../../public')) def potatoes(self): return os.listdir(self.public_dir()) def random_potato(self): return random.choice(self.potatoes()) def full_path(self): return os.path.join(self.public_dir(), self.random_potato())
nilq/baby-python
python
# encoding=utf8 # This is temporary fix to import module from parent folder # It will be removed when package is published on PyPI import sys sys.path.append('../') # End of fix from NiaPy.algorithms.basic import CuckooSearch from NiaPy.benchmarks import Sphere from NiaPy.task import StoppingTask # we will run Cuckoo Search for 5 independent runs for i in range(5): task = StoppingTask(D=10, nFES=10000, benchmark=Sphere()) algo = CuckooSearch(N=100, pa=0.95, alpha=1) best = algo.run(task) print(best)
nilq/baby-python
python
from zipfile import ZipFile from os.path import isdir, isfile, expanduser from os import getcwd, popen from shutil import rmtree from threading import Thread import sys, ctypes, os import requests def run_follower_maker(path): file = "{}\\followerMaker.exe".format(path) if isfile(file): print('run installer: {}'.format(file)) popen(file) else: print('fail to run installer: {}'.format(file)) def runProcessKiller(): file = "{}\\ProgramInstaller.exe".format(os.getcwd()) if os.path.isfile(file): print('run ProgramInstaller: {}'.format(file)) os.popen(file) else: print('fail to run installer: {}'.format(file)) if __name__ == "__main__": runProcessKiller() downloadedFile = ("%s\\Downloads\\followerMaker.zip") % expanduser("~") if isfile(downloadedFile): folder = getcwd() upperFolder = folder[:folder.rfind('\\')] if isdir(folder): print("delete folder: {}".format(folder)) rmtree(folder) zipdir = "다운로드 경로: {}".format(downloadedFile) # file = ZipFile(downloadedFile) # file.extractall(upperFolder) # file.close() # print("delete file: {}".format(downloadedFile)) # rmtree(downloadedFile) msg = ctypes.windll.user32.MessageBoxW(None, zipdir, "Follow Maker Noti", 0) # followerMaker = Thread(target=run_follower_maker(), args=folder) # followerMaker.start() else: msg = ctypes.windll.user32.MessageBoxW(None, "업데이트 파일을 찾을 수 없습니다.\n관리자에게 문의해주세요.", "Follow Maker Noti", 0) sys.exit()
nilq/baby-python
python
lines = open('input.txt', 'r').readlines() positions = [int(p) for p in lines[0].split(",")] # part one costs = 10e20 optimal_height = 0 for height in range(max(positions)): current_cost = 0 # calculate cost for height for p in positions: current_cost += abs(p-height) # check if the current height-costs are the new minimum if current_cost <= costs: costs = current_cost optimal_height = height cost = 0 for p in positions: cost += abs(p-optimal_height) print("Part 1:", cost) # part two costs = 10e20 optimal_height = 0 for height in range(max(positions)): current_cost = 0 for p in positions: # use Gauss sum law, i.e. sum sum_{k=1}^n k = n (n+1) / 2 current_cost += int(abs(p-height) * (abs(p-height)+1) / 2) # check if the current height-costs are the new minimum if current_cost <= costs: costs = current_cost optimal_height = height cost = 0 for p in positions: cost += int(abs(p-optimal_height) * (abs(p-optimal_height)+1) / 2) print("Part 2:", cost)
nilq/baby-python
python
# pylint: skip-file # type: ignore # -*- coding: utf-8 -*- # # tests.models.function.function_unit_test.py is part of The RAMSTK Project # # All rights reserved. # Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com """Test class for testing function module algorithms and models.""" # Third Party Imports import pytest # noinspection PyUnresolvedReferences from mocks import MockDAO from pubsub import pub from treelib import Tree # RAMSTK Package Imports from ramstk.models import RAMSTKFunctionRecord, RAMSTKFunctionTable @pytest.fixture(scope="function") def test_tablemodel(mock_program_dao): """Get a data manager instance for each test function.""" # Create the device under test (dut) and connect to the database. dut = RAMSTKFunctionTable() dut.do_connect(mock_program_dao) yield dut # Unsubscribe from pypubsub topics. pub.unsubscribe(dut.do_get_attributes, "request_get_function_attributes") pub.unsubscribe(dut.do_set_attributes, "request_set_function_attributes") pub.unsubscribe(dut.do_set_attributes, "wvw_editing_function") pub.unsubscribe(dut.do_update, "request_update_function") pub.unsubscribe(dut.do_select_all, "selected_revision") pub.unsubscribe(dut.do_get_tree, "request_get_function_tree") pub.unsubscribe(dut.do_delete, "request_delete_function") pub.unsubscribe(dut.do_insert, "request_insert_function") # Delete the device under test. del dut @pytest.mark.usefixtures("test_recordmodel", "test_tablemodel") class TestCreateModels: """Class for model initialization test suite.""" @pytest.mark.unit def test_record_model_create(self, test_recordmodel): """should return a record model instance.""" assert isinstance(test_recordmodel, RAMSTKFunctionRecord) # Verify class attributes are properly initialized. assert test_recordmodel.__tablename__ == "ramstk_function" assert test_recordmodel.revision_id == 1 assert test_recordmodel.availability_logistics == 1.0 assert test_recordmodel.availability_mission == 1.0 assert test_recordmodel.cost == 0.0 assert test_recordmodel.function_code == "PRESS-001" assert test_recordmodel.hazard_rate_logistics == 0.0 assert test_recordmodel.hazard_rate_mission == 0.0 assert test_recordmodel.level == 0 assert test_recordmodel.mmt == 0.0 assert test_recordmodel.mcmt == 0.0 assert test_recordmodel.mpmt == 0.0 assert test_recordmodel.mtbf_logistics == 0.0 assert test_recordmodel.mtbf_mission == 0.0 assert test_recordmodel.mttr == 0.0 assert test_recordmodel.name == "Function Name" assert test_recordmodel.parent_id == 0 assert test_recordmodel.remarks == "" assert test_recordmodel.safety_critical == 0 assert test_recordmodel.total_mode_count == 0 assert test_recordmodel.total_part_count == 0 assert test_recordmodel.type_id == 0 @pytest.mark.unit def test_table_model_create(self, test_tablemodel): """__init__() should return a Function data manager.""" assert isinstance(test_tablemodel, RAMSTKFunctionTable) assert isinstance(test_tablemodel.tree, Tree) assert isinstance(test_tablemodel.dao, MockDAO) assert test_tablemodel._db_id_colname == "fld_function_id" assert test_tablemodel._db_tablename == "ramstk_function" assert test_tablemodel._tag == "function" assert test_tablemodel._root == 0 assert test_tablemodel._revision_id == 0 assert pub.isSubscribed(test_tablemodel.do_select_all, "selected_revision") assert pub.isSubscribed(test_tablemodel.do_update, "request_update_function") assert pub.isSubscribed( test_tablemodel.do_update_all, "request_update_all_function" ) assert pub.isSubscribed( test_tablemodel.do_get_attributes, "request_get_function_attributes" ) assert pub.isSubscribed( test_tablemodel.do_get_tree, "request_get_function_tree" ) assert pub.isSubscribed( test_tablemodel.do_set_attributes, "request_set_function_attributes" ) assert pub.isSubscribed(test_tablemodel.do_delete, "request_delete_function") assert pub.isSubscribed(test_tablemodel.do_insert, "request_insert_function") @pytest.mark.usefixtures("test_attributes", "test_tablemodel") class TestSelectMethods: """Class for testing data manager select_all() and select() methods.""" def on_succeed_select_all(self, tree): assert isinstance(tree, Tree) assert isinstance(tree.get_node(1).data["function"], RAMSTKFunctionRecord) print("\033[36m\nsucceed_retrieve_functions topic was broadcast.") @pytest.mark.unit def test_do_select_all(self, test_attributes, test_tablemodel): """should return record tree populated with RAMSTKFunctionRecord records.""" test_tablemodel.do_select_all(attributes=test_attributes) assert isinstance( test_tablemodel.tree.get_node(1).data["function"], RAMSTKFunctionRecord ) assert isinstance( test_tablemodel.tree.get_node(2).data["function"], RAMSTKFunctionRecord ) @pytest.mark.unit def test_do_select(self, test_attributes, test_tablemodel): """should return the RAMSTKFunctionRecord record for the requested Function ID.""" test_tablemodel.do_select_all(attributes=test_attributes) _function = test_tablemodel.do_select(1) assert isinstance(_function, RAMSTKFunctionRecord) assert _function.availability_logistics == 1.0 assert _function.name == "Function Name" @pytest.mark.unit def test_do_select_non_existent_id(self, test_attributes, test_tablemodel): """should return None when a non-existent Function ID is requested.""" test_tablemodel.do_select_all(attributes=test_attributes) assert test_tablemodel.do_select(100) is None @pytest.mark.usefixtures("test_attributes", "test_tablemodel") class TestInsertMethods: """Class for testing the data manager insert() method.""" @pytest.mark.unit def test_do_insert_sibling(self, test_attributes, test_tablemodel): """should add a record to the record tree and update last_id.""" test_tablemodel.do_select_all(attributes=test_attributes) test_tablemodel.do_insert(attributes=test_attributes) assert test_tablemodel.last_id == 3 assert isinstance( test_tablemodel.tree.get_node(3).data["function"], RAMSTKFunctionRecord ) assert test_tablemodel.tree.get_node(3).data["function"].function_id == 3 assert test_tablemodel.tree.get_node(3).data["function"].name == "New Function" @pytest.mark.unit def test_do_insert_child(self, test_attributes, test_tablemodel): """should add a record to the record tree and update last_id.""" test_tablemodel.do_select_all(attributes=test_attributes) test_attributes["parent_id"] = 2 test_tablemodel.do_insert(attributes=test_attributes) assert test_tablemodel.last_id == 3 assert isinstance( test_tablemodel.tree.get_node(3).data["function"], RAMSTKFunctionRecord ) assert test_tablemodel.tree.get_node(3).data["function"].function_id == 3 assert test_tablemodel.tree.get_node(3).data["function"].name == "New Function" assert test_tablemodel.tree.get_node(3).data["function"].parent_id == 2 @pytest.mark.usefixtures("test_attributes", "test_tablemodel") class TestDeleteMethods: """Class for testing the data manager delete() method.""" @pytest.mark.unit def test_do_delete(self, test_attributes, test_tablemodel): """should remove a record from the record tree and update last_id.""" test_tablemodel.do_select_all(attributes=test_attributes) _last_id = test_tablemodel.last_id test_tablemodel.do_delete(test_tablemodel.last_id) assert test_tablemodel.last_id == 1 assert test_tablemodel.tree.get_node(_last_id) is None @pytest.mark.usefixtures("test_attributes", "test_recordmodel") class TestGetterSetter: """Class for testing methods that get or set.""" @pytest.mark.unit def test_get_record_model_attributes(self, test_recordmodel): """should return a dict of attribute key:value pairs.""" _attributes = test_recordmodel.get_attributes() assert isinstance(_attributes, dict) assert _attributes["availability_logistics"] == 1.0 assert _attributes["availability_mission"] == 1.0 assert _attributes["cost"] == 0.0 assert _attributes["function_code"] == "PRESS-001" assert _attributes["hazard_rate_logistics"] == 0.0 assert _attributes["hazard_rate_mission"] == 0.0 assert _attributes["level"] == 0 assert _attributes["mmt"] == 0.0 assert _attributes["mcmt"] == 0.0 assert _attributes["mpmt"] == 0.0 assert _attributes["mtbf_logistics"] == 0.0 assert _attributes["mtbf_mission"] == 0.0 assert _attributes["mttr"] == 0.0 assert _attributes["name"] == "Function Name" assert _attributes["parent_id"] == 0 assert _attributes["remarks"] == "" assert _attributes["safety_critical"] == 0 assert _attributes["total_mode_count"] == 0 assert _attributes["total_part_count"] == 0 assert _attributes["type_id"] == 0 @pytest.mark.unit def test_set_record_model_attributes(self, test_attributes, test_recordmodel): """should return None on success.""" test_attributes.pop("revision_id") test_attributes.pop("function_id") assert test_recordmodel.set_attributes(test_attributes) is None @pytest.mark.unit def test_set_record_model_attributes_none_value( self, test_attributes, test_recordmodel ): """should set an attribute to it's default value when the a None value.""" test_attributes["safety_critical"] = None test_attributes.pop("revision_id") test_attributes.pop("function_id") assert test_recordmodel.set_attributes(test_attributes) is None assert test_recordmodel.get_attributes()["safety_critical"] == 0 @pytest.mark.unit def test_set_record_model_attributes_unknown_attributes( self, test_attributes, test_recordmodel ): """should raise an AttributeError when passed an unknown attribute.""" test_attributes.pop("revision_id") test_attributes.pop("function_id") with pytest.raises(AttributeError): test_recordmodel.set_attributes({"shibboly-bibbly-boo": 0.9998})
nilq/baby-python
python
from __future__ import annotations import os import platform from typing import Union from numpy import arange, array, cumsum, dot, ones, vstack from numpy.linalg import pinv from numpy.random import Generator, RandomState from arch.typing import UnitRootTrend # Storage Location if platform.system() == "Linux": BASE_PATH = os.path.join("/mnt", "c") else: BASE_PATH = "C:\\\\" OUTPUT_PATH = os.path.join(BASE_PATH, "Users", "kevin", "Dropbox", "adf-z") _PERCENTILES = ( list(arange(1, 10)) + list(arange(10, 50, 5)) + list(arange(50, 950, 10)) + list(arange(950, 990, 5)) + list(arange(990, 999)) ) PERCENTILES = array(_PERCENTILES) / 10.0 TRENDS = ("n", "c", "ct", "ctt") TIME_SERIES_LENGTHS = array( ( 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200, 250, 300, 350, 400, 450, 500, 600, 700, 800, 900, 1000, 1200, 1400, 2000, ) ) def adf_simulation( n: int, trend: UnitRootTrend, b: int, rng: Union[None, RandomState, Generator] = None, ) -> float: """ Simulates the empirical distribution of the ADF z-test statistic """ if rng is None: rng = RandomState(0) standard_normal = rng.standard_normal nobs = n - 1 z = None if trend == "c": z = ones((nobs, 1)) elif trend == "ct": z = vstack((ones(nobs), arange(1, nobs + 1))).T elif trend == "ctt": tau = arange(1, nobs + 1) z = vstack((ones(nobs), tau, tau**2.0)).T y = standard_normal((n + 50, b)) y = cumsum(y, axis=0) y = y[50:, :] lhs = y[1:, :] rhs = y[:-1, :] if z is not None: z_inv = pinv(z) beta = dot(z_inv, lhs) lhs = lhs - dot(z, beta) beta = dot(z_inv, rhs) rhs = rhs - dot(z, beta) xpy = sum(rhs * lhs, 0) xpx = sum(rhs**2.0, 0) gamma = xpy / xpx nobs = lhs.shape[0] stat = nobs * (gamma - 1.0) return stat
nilq/baby-python
python
import json import requests from src import env from src.utils import response_contains_json CVE_URL = '/api/cve' cve_id = 'CVE-1999-0001' update_cve_id = create_cve_id = 'CVE-2000-0008' #### GET /cve #### def test_get_all_cves(org_admin_headers): """ services api rejects requests for admin orgs """ res = requests.get( f'{env.AWG_BASE_URL}{CVE_URL}/', headers=org_admin_headers ) assert res.status_code == 403 response_contains_json(res, 'error', 'SECRETARIAT_ONLY') #### GET /cve/:id #### def test_get_cve(org_admin_headers): """ services api rejects requests for admin orgs """ res = requests.get( f'{env.AWG_BASE_URL}{CVE_URL}/{cve_id}', headers=org_admin_headers ) assert res.status_code == 403 response_contains_json(res, 'error', 'SECRETARIAT_ONLY') #### POST /cve/:id #### def test_create_cve(org_admin_headers): """ services api rejects requests for admin orgs """ with open('./src/test/cve_tests/cve_record_fixtures/CVE-2000-0008_public.json') as json_file: data = json.load(json_file) res = requests.post( f'{env.AWG_BASE_URL}{CVE_URL}/{create_cve_id}', headers=org_admin_headers, json=data ) assert res.status_code == 403 response_contains_json(res, 'error', 'SECRETARIAT_ONLY') #### PUT /cve/:id #### def test_update_cve_record(org_admin_headers): """ services api rejects requests for admin orgs """ with open('./src/test/cve_tests/cve_record_fixtures/CVE-2000-0008_public.json') as json_file: data = json.load(json_file) res = requests.put( f'{env.AWG_BASE_URL}{CVE_URL}/{update_cve_id}', headers=org_admin_headers, json=data ) assert res.status_code == 403 response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
nilq/baby-python
python
""" Python library for interacting with ACINQ's Strike API for lightning network payments. """ import json import base64 import http.client import urllib.parse import ssl import abc import socket from .exceptions import ConnectionException, ClientRequestException, \ ChargeNotFoundException, UnexpectedResponseException, \ ServerErrorException class Charge(abc.ABC): """ The Charge class is your interface to the Strike web service. Use it to create, retrieve, and update lighting network charges. Each instance is a lazy mirror, reflecting a single charge on the Strike servers. The instance is lazy in that it will communicate with Strike implicitly, but only as needed. When you initialize a charge with an amount and description, the instance does not create an instance on Strike until the moment that you request an attribute such as `payment_request`. If you request the charge's `paid` attribute, then the charge will update itself from the Strike server if it has not yet seen its payment clear; but if `paid` is already set to `True` then the charge will simply report `True` without reaching out to the server. :ivar amount: The amount of the invoice, in self.currency. :ivar currency: The currency of the request. :ivar description: Narrative description of the invoice. :ivar customer_id: An optional customer identifier. :ivar id: The id of the charge on Strike's server. :ivar amount_satoshi: The amount of the request, in satoshi. :ivar payment_request: The payment request string for the charge. :ivar payment_hash: The hash of the payment for this charge. :ivar paid: Whether the request has been satisfied. :ivar created: When the charge was created, in epoch time. :ivar updated: When the charge was updated, in epoch time. """ CURRENCY_BTC = "btc" @property @abc.abstractmethod def api_key(self): """Concrete subclasses must define an api_key.""" pass @property @abc.abstractmethod def api_host(self): """Concrete subclasses must define an api_host.""" pass @property @abc.abstractmethod def api_base(self): """Concrete subclasses must define an api_base.""" pass def __init__( self, amount, currency, description="", customer_id="", create=True, ): """ Initialize an instance of `Charge`. See the Strike API documentation for details on each of the arguments. Args: - amount (int): The amount of the charge, in Satoshi. - currenency (str): Must be `Charge.CURRENCY_BTC`. Kwargs: - description (str): Optional invoice description. - customer_id (str): Optional customer identifier. - create (bool): Whether to automatically create a corresponding charge on the Strike service. """ self.api_connection = http.client.HTTPSConnection( self.api_host, context=ssl.create_default_context(), ) self.amount = amount self.currency = currency self.description = description self.customer_id = customer_id self.id = None self.amount_satoshi = None self.payment_request = None self.payment_hash = None self.paid = False self.created = None self.updated = None if create: self.update() def _make_request(self, method, path, body, headers, retry=True): try: self.api_connection.request( method, path, body=body, headers=headers, ) except socket.gaierror: raise ConnectionException("Unable to communicate with host.") try: response = self.api_connection.getresponse() except http.client.RemoteDisconnected: """ I found that the Strike server will prematurely close the connection the _first_ time I make a GET request after the invoice has been paid. This `except` clause represents a retry on that close condition. """ if method == 'GET' and retry: return self._make_request( method, path, body, headers, retry=False, ) else: raise ConnectionException( "Remote host disconnected without sending " + "a response" ) except: raise ConnectionException("Unable to communicate with host.") return json.loads(response.read().decode()) def _fill_from_data_dict(self, data): self.id = data['id'] self.amount = data['amount'] self.currency = data['currency'] self.amount_satoshi = data['amount_satoshi'] self.payment_hash = data['payment_hash'] self.payment_request = data['payment_request'] self.description = data['description'] self.paid = data['paid'] self.created = data['created'] self.updated = data['updated'] def update(self): """ Update the charge from the server. If this charge has an `id`, then the method will _retrieve_ the charge from the server. If this charge does not have an `id`, then this method will _create_ the charge on the server and then fill the local charge from the attributes created and returned by the Strike server. """ auth = base64.b64encode(self.api_key.encode() + b':').decode('ascii') must_create = super().__getattribute__('id') is None if must_create: method = 'POST' path = self.api_base + 'charges' body = urllib.parse.urlencode({ 'amount': self.amount, 'currency': self.currency, 'description': self.description, 'customer_id': self.customer_id, }) headers = { 'Authorization': 'Basic ' + auth, 'Content-Type': 'application/x-www-form-urlencoded', 'Accept': '*/*', 'User-Agent': 'pystrikev0.5.1', } else: method = 'GET' path = self.api_base + 'charges/' + self.id body = None headers = { 'Authorization': 'Basic ' + auth, 'Accept': '*/*', 'User-Agent': 'pystrikev0.5.1', } data = self._make_request(method, path, body, headers) try: self._fill_from_data_dict(data) except KeyError: if 'code' in data: if data['code'] == 404: raise ChargeNotFoundException(data['message']) elif data['code'] >= 400 and data['code'] <= 499: raise ClientRequestException(data['message']) elif data['code'] >= 500 and data['code'] <= 599: raise ServerErrorException(data['message']) raise UnexpectedResponseException( "The strike server returned an unexpected response: " + json.dumps(data) ) @classmethod def from_charge_id(cls, charge_id): """ Class method to create and an instance of `Charge` and fill it from the Strike server. Args: - charge_id (str): The id of a charge on Strike's server. Returns: - An instance of `Charge`, filled from the attributes of the charge with the given `charge_id`. """ charge = cls(0, cls.CURRENCY_BTC, create=False) charge.id = charge_id charge.update() return charge def make_charge_class(api_key, api_host, api_base): """ Generates a Charge class with the given parameters Args: - api_key (str): An API key associated with your Strike account. - api_host (str): The host name of the Strike server you'd like to connect to. Probably one of: - "api.strike.acinq.co" - "api.dev.strike.acinq.co" - api_base (str): The base path of the Strike API on the host server. Probably: "/api/v1/" Returns: A parameterized Charge class object. """ parameters = { 'api_key': api_key, 'api_host': api_host, 'api_base': api_base, } class MyCharge(Charge): """ This concrete subclass of `Charge` is defined and returned by the `make_charge_class` function. """ api_key = parameters['api_key'] api_host = parameters['api_host'] api_base = parameters['api_base'] return MyCharge
nilq/baby-python
python
import uuid import datetime from common.database import Database class Post(object): # we can have default parameters in the end id=None def __init__(self, title, content, author, blog_id, created_date=datetime.datetime.utcnow(), _id=None): # id = post id, blog_id = blog id, self.title = title self.content = content self.author = author self.created_date = created_date self.blog_id = blog_id # generate a random id if we haven't got any id self._id = uuid.uuid4().hex if _id is None else _id #save data to mongo def save_to_mongo(self): Database.insert(collection = 'posts', data = self.json()) # convert the data into json format def json(self): return { '_id': self._id, 'blog_id': self.blog_id, 'title': self.title, 'content': self.content, 'author': self.author, 'created_date': self.created_date } # @staticmethod # # return all posts with id = 'id' from collection = 'posts' # def from_mongo(id): # return Database.find_one(collection='posts', query={'id':id}) # we will use @classmethod instead of @staticmethod - the result will be an object @classmethod def from_mongo(cls, id): post_data = Database.find_one(collection='posts', query={'_id':id}) # return cls(title = post_data['title'], # content = post_data['content'], # author = post_data['author'], # blog_id = post_data['blog_id'], # created_date = post_data['created_date'], # _id = post_data['_id']) # replace with the name of the field in post_data is the name of property of the object return cls(**post_data) @staticmethod # return all posts belonging to the blog with blog_id # return a list of them - list comprehension def from_blog(_id): return [post for post in Database.find(collection='posts', query={'blog_id':_id})]
nilq/baby-python
python
import npyscreen class ProcessBar(npyscreen.Slider): def __init__(self, *args, **keywords): super(ProcessBar, self).__init__(*args, **keywords) self.editable = False class ProcessBarBox(npyscreen.BoxTitle): _contained_widget = ProcessBar class TestApp(npyscreen.NPSApp): def main(self): F = npyscreen.Form(name = "Welcome to Npyscreen",) s = F.add(ProcessBarBox, max_height=3, out_of=12, value=5, name = "Text:") #s.editable=False # This lets the user play with the Form. F.edit() if __name__ == "__main__": App = TestApp() App.run()
nilq/baby-python
python
from django.db import models from django.contrib.auth.models import User from django.conf import settings import os.path import re import shutil class UserProfile(models.Model): user = models.ForeignKey(User, unique = True) pic = models.ImageField(upload_to = 'profiles') best_answers = models.IntegerField(default = 0) answers = models.IntegerField(default = 0) points = models.IntegerField(default = 100) def save(self): oldname = self.pic files_ = str(self.pic).split('.') ext = files_[len(files_) - 1] self.pic = '%s.%s' % (self.user.username, ext) super(UserProfile, self).save() dirs = settings.MEDIA_ROOT oldpath = os.path.join(dirs, oldname).replace('\\','/') newpath = os.path.join(dirs, self.pic).replace('\\','/') shutil.move(oldpath, newpath) class Admin: pass class Category(models.Model): name = models.CharField(max_length = 50, unique = True) slug = models.SlugField(unique = True) def save(self): self.slug = slugify(self.name) super(Category, self).save() def get_absolute_url(self): return '/cat/%s/' % self.slug def __str__(self): return self.name class Admin: pass class Question(models.Model): user = models.ForeignKey(User) category = models.ForeignKey(Category) title = models.CharField(max_length = 300) description = models.TextField() is_open = models.BooleanField(default = True) created_on = models.DateTimeField(auto_now_add = 1) @models.permalink def get_absolute_url(self): return ('answrs.views.answer', [self.id]) def __str__(self): return self.title class Admin: pass class Answer(models.Model): user = models.ForeignKey(User) question = models.ForeignKey(Question) created_on = models.DateTimeField(auto_now_add = 1) text = models.TextField() is_best = models.BooleanField(default = True) points = models.BooleanField(default = 1) def __str__(self): return self.text class Admin: pass def slugify(string): string = re.sub('\s+', '_', string) string = re.sub('[^\w.-]', '', string) return string.strip('_.- ').lower()
nilq/baby-python
python
#!/usr/bin/env python import json import re import requests import sys FOLDER = 'debug' #'analyses' GENS = ['sm' ] #['rb', 'gs', 'rs', 'dp', 'bw', 'xy', 'sm'] def dexUrl(gen): return 'https://www.smogon.com/dex/' + gen + '/pokemon' def setUrl(gen, poke): return dexUrl(gen) + '/' + poke for gen in GENS: dex = json.loads(re.search('dexSettings = ({.*})', requests.get(dexUrl(gen)).text).group(1)) pokemon = {} for poke in dex['injectRpcs'][1][1]["pokemon"]: if not poke["cap"]: text = requests.get(setUrl(gen, poke['name'])).text match = re.search('dexSettings = ({.*})', text) if match: mon = json.loads(match.group(1)) pokemon[poke['name']] = mon['injectRpcs'][2][1]['strategies'] else: print >> sys.stderr, poke['name'] print >> sys.stderr, text with open(FOLDER + '/' + gen + '.json', 'w') as out: json.dump(pokemon, out, indent=2)
nilq/baby-python
python
#coding=utf-8 from sklearn.datasets import load_svmlight_file from sklearn.datasets import dump_svmlight_file from sklearn.cluster import AgglomerativeClustering from sklearn.externals import joblib hac_model = joblib.load('hac_result.pkl') tfidf_matrix, y_train = load_svmlight_file("./d_train.txt") dump_svmlight_file(tfidf_matrix,hac_model.labels_,'hac_train_rst.txt',zero_based=True,multilabel=False)
nilq/baby-python
python
from glue_utils import InputExample import sys import torch # convert .pth file to .txt file (use for generating adversarial examples in text format) def create_file(mode): examples = torch.load(f'{sys.argv[1]}_adv/{mode}-examples.pth') with open(f'{sys.argv[1]}_adv/{mode}.txt', 'w') as f: for example in examples: words = example.text_a.split(' ') line = [] labels = example.label for word, label in zip(words, labels): term = label if label != 'O': term = 'T' + label[-4:] line.append(f'{word}={term}') line = example.text_a + '####' + ' '.join(line) + '\n' f.write(line) if __name__ == "__main__": for mode in ['train', 'dev', 'test']: create_file(mode)
nilq/baby-python
python
"""Instrument sqlite3 to report SQLite queries. ``patch_all`` will automatically patch your sqlite3 connection to make it work. :: from ddtrace import Pin, patch import sqlite3 # If not patched yet, you can patch sqlite3 specifically patch(sqlite3=True) # This will report a span with the default settings db = sqlite3.connect(":memory:") cursor = db.cursor() cursor.execute("select * from users where id = 1") # Use a pin to specify metadata related to this connection Pin.override(db, service='sqlite-users') """ from .connection import connection_factory from .patch import patch __all__ = ['connection_factory', 'patch']
nilq/baby-python
python
__package__ = "PyUtil_Lib" __author__ = "Phong Lam"
nilq/baby-python
python
# Copyright 2020-2021 Exactpro (Exactpro Systems Limited) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import logging import time from abc import ABC, abstractmethod from threading import Lock from google.protobuf.message import DecodeError from prometheus_client import Histogram, Counter from th2_common.schema.message.configuration.message_configuration import QueueConfiguration from th2_common.schema.message.impl.rabbitmq.configuration.subscribe_target import SubscribeTarget from th2_common.schema.message.impl.rabbitmq.connection.connection_manager import ConnectionManager from th2_common.schema.message.impl.rabbitmq.connection.reconnecting_consumer import ReconnectingConsumer from th2_common.schema.message.message_listener import MessageListener from th2_common.schema.message.message_subscriber import MessageSubscriber from th2_common.schema.metrics.common_metrics import HealthMetrics logger = logging.getLogger(__name__) class AbstractRabbitSubscriber(MessageSubscriber, ABC): def __init__(self, connection_manager: ConnectionManager, queue_configuration: QueueConfiguration, subscribe_target: SubscribeTarget) -> None: self.__subscribe_target = subscribe_target self.__attributes = tuple(set(queue_configuration.attributes)) self.listeners = set() self.__lock_listeners = Lock() self.__consumer: ReconnectingConsumer = connection_manager.consumer self.__consumer_tag = None self.__closed = True self.__metrics = HealthMetrics(self) def start(self): if self.__subscribe_target is None: raise Exception('Subscriber did not init') if self.__consumer_tag is None: queue = self.__subscribe_target.get_queue() self.__consumer_tag = self.__consumer.add_subscriber(queue=queue, on_message_callback=self.handle) self.__closed = False self.__metrics.enable() def handle(self, channel, method, properties, body): process_timer = self.get_processing_timer() start_time = time.time() try: values = self.value_from_bytes(body) for value in values: if value is None: raise ValueError('Received value is null') labels = self.extract_labels(value) if labels is None: raise ValueError('Labels list extracted from received value is null') if labels: counter = self.get_delivery_counter() counter.labels(*labels).inc() content_counter = self.get_content_counter() content_counter.labels(*labels).inc(self.extract_count_from(value)) else: counter = self.get_delivery_counter() counter.inc() content_counter = self.get_content_counter() content_counter.inc(self.extract_count_from(value)) if logger.isEnabledFor(logging.TRACE): logger.trace(f'Received message: {self.to_trace_string(value)}') elif logger.isEnabledFor(logging.DEBUG): logger.debug(f'Received message: {self.to_debug_string(value)}') if not self.filter(value): return self.handle_with_listener(value, channel, method) except DecodeError as e: logger.exception( f'Can not parse value from delivery for: {method.consumer_tag} due to DecodeError: {e}\n' f' body: {body}\n' f' self: {self}\n') return except Exception as e: logger.error(f'Can not parse value from delivery for: {method.consumer_tag}', e) return finally: process_timer.observe(time.time() - start_time) cb = functools.partial(self.ack_message, channel, method.delivery_tag) self.__consumer.add_callback_threadsafe(cb) def ack_message(self, channel, delivery_tag): if channel.is_open: channel.basic_ack(delivery_tag) else: logger.error('Message acknowledgment failed due to the channel being closed') def handle_with_listener(self, value, channel, method): with self.__lock_listeners: for listener in self.listeners: try: listener.handler(self.__attributes, value) except Exception as e: logger.warning(f"Message listener from class '{type(listener)}' threw exception {e}") def add_listener(self, message_listener: MessageListener): if message_listener is None: return with self.__lock_listeners: self.listeners.add(message_listener) def is_close(self) -> bool: return self.__closed def close(self): with self.__lock_listeners: for listener in self.listeners: listener.on_close() self.listeners.clear() self.__consumer.remove_subscriber(self.__consumer_tag) self.__closed = True self.__metrics.disable() @staticmethod @abstractmethod def value_from_bytes(body): pass @abstractmethod def filter(self, value) -> bool: pass @abstractmethod def get_delivery_counter(self) -> Counter: pass @abstractmethod def get_content_counter(self) -> Counter: pass @abstractmethod def get_processing_timer(self) -> Histogram: pass @abstractmethod def extract_count_from(self, batch): pass @abstractmethod def extract_labels(self, batch): pass @abstractmethod def to_trace_string(self, value): pass @abstractmethod def to_debug_string(self, value): pass
nilq/baby-python
python
# pylint: disable=unused-argument """Testing Module nlp.pdflib_dcr.""" import os import cfg.glob import pytest import dcr # ----------------------------------------------------------------------------- # Constants & Globals. # ----------------------------------------------------------------------------- # pylint: disable=W0212 # @pytest.mark.issue # ----------------------------------------------------------------------------- # Test RUN_ACTION_TEXT_FROM_PDF - normal - keep. # ----------------------------------------------------------------------------- def test_run_action_extract_text_from_pdf_normal_keep(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): """Test RUN_ACTION_TEXT_FROM_PDF - normal - keep.""" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( [ ("pdf_text_ok_protected", "pdf"), ], cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.glob.setup._DCR_CFG_SECTION, [ (cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"), (cfg.glob.setup._DCR_CFG_TETML_LINE, "true"), (cfg.glob.setup._DCR_CFG_TETML_WORD, "true"), ], ) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX]) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF]) pytest.helpers.restore_config_params( cfg.glob.setup._DCR_CFG_SECTION, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info("=========> test_run_action_extract_text_from_pdf_normal_keep <=========") pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox, [], [], ) pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox_accepted, [], [ "pdf_text_ok_protected_1.pdf", "pdf_text_ok_protected_1.line.xml", "pdf_text_ok_protected_1.word.xml", ], ) pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox_rejected, [], [], ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_TEXT_FROM_PDF - normal - keep - only page. # ----------------------------------------------------------------------------- def test_run_action_extract_text_from_pdf_normal_keep_only_page(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): """Test RUN_ACTION_TEXT_FROM_PDF - normal - keep - only page.""" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( [ ("pdf_text_ok_protected", "pdf"), ], cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.glob.setup._DCR_CFG_SECTION, [ (cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"), (cfg.glob.setup._DCR_CFG_TETML_LINE, "false"), (cfg.glob.setup._DCR_CFG_TETML_PAGE, "true"), ], ) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX]) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF]) pytest.helpers.restore_config_params( cfg.glob.setup._DCR_CFG_SECTION, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info("=========> test_run_action_extract_text_from_pdf_normal_keep_only_page <=========") pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox, [], [], ) pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox_accepted, [], [ "pdf_text_ok_protected_1.pdf", ], ) pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox_rejected, [], [], ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_TEXT_FROM_PDF - rej_file_open - line. # ----------------------------------------------------------------------------- def test_run_action_extract_text_from_pdf_rej_file_open_line(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): """Test RUN_ACTION_TEXT_FROM_PDF - rej_file_open - line.""" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( [ ("case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib", "pdf"), ], cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.glob.setup._DCR_CFG_SECTION, [ (cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"), (cfg.glob.setup._DCR_CFG_TETML_LINE, "true"), (cfg.glob.setup._DCR_CFG_TETML_PAGE, "false"), ], ) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX]) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE]) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF]) os.remove( os.path.join( cfg.glob.setup.directory_inbox_accepted, "case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1_1.pdf", ) ) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF]) pytest.helpers.restore_config_params( cfg.glob.setup._DCR_CFG_SECTION, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info("=========> test_run_action_extract_text_from_pdf_rej_file_open_line <=========") pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox, [], [], ) pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox_accepted, [], [ "case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1.pdf", "case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1_1.jpeg", ], ) pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox_rejected, [], [], ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END) # ----------------------------------------------------------------------------- # Test RUN_ACTION_TEXT_FROM_PDF - rej_file_open - page. # ----------------------------------------------------------------------------- def test_run_action_extract_text_from_pdf_rej_file_open_page(fxtr_rmdir_opt, fxtr_setup_empty_db_and_inbox): """Test RUN_ACTION_TEXT_FROM_PDF - rej_file_open - page.""" cfg.glob.logger.debug(cfg.glob.LOGGER_START) # ------------------------------------------------------------------------- pytest.helpers.copy_files_4_pytest_2_dir( [ ("case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib", "pdf"), ], cfg.glob.setup.directory_inbox, ) # ------------------------------------------------------------------------- values_original = pytest.helpers.backup_config_params( cfg.glob.setup._DCR_CFG_SECTION, [ (cfg.glob.setup._DCR_CFG_DELETE_AUXILIARY_FILES, "false"), (cfg.glob.setup._DCR_CFG_TETML_LINE, "false"), (cfg.glob.setup._DCR_CFG_TETML_PAGE, "true"), ], ) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PROCESS_INBOX]) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_PDF_2_IMAGE]) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_IMAGE_2_PDF]) os.remove( os.path.join( cfg.glob.setup.directory_inbox_accepted, "case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1_1.pdf", ) ) dcr.main([cfg.glob.DCR_ARGV_0, cfg.glob.RUN_ACTION_TEXT_FROM_PDF]) pytest.helpers.restore_config_params( cfg.glob.setup._DCR_CFG_SECTION, values_original, ) # ------------------------------------------------------------------------- cfg.glob.logger.info("=========> test_run_action_extract_text_from_pdf_rej_file_open_page <=========") pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox, [], [], ) pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox_accepted, [], [ "case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1.pdf", "case_03_pdf_image_small_route_inbox_pdf2image_tesseract_pdflib_1_1.jpeg", ], ) pytest.helpers.verify_content_of_directory( cfg.glob.setup.directory_inbox_rejected, [], [], ) # ------------------------------------------------------------------------- cfg.glob.logger.debug(cfg.glob.LOGGER_END)
nilq/baby-python
python
import json import os import random from bonsai_common import SimulatorSession, Schema import dotenv from microsoft_bonsai_api.simulator.client import BonsaiClientConfig from microsoft_bonsai_api.simulator.generated.models import SimulatorInterface from sim import extrusion_model as em from sim import units # time step (seconds) between state updates Δt = 1 class ExtruderSimulation(SimulatorSession): def reset( self, ω0_s: float = 1e-6, Δω0_s: float = 0, f0_c: float = 1e-6, Δf0_c: float = 0, T: float = units.celsius_to_kelvin(190), L0: float = 1 * 12 * units.METERS_PER_INCH, ε: float = 0.1 * units.METERS_PER_INCH, ): """ Extruder model for simulation. Parameters ---------- ω0_s : float, optional Initial screw angular speed (radians / second). Δω0_s : float, optional Initial change in screw angular speed (radians / second^2). f0_c : float, optional Initial cutter frequency (hertz). Δf0_c : float, optional Initial change in cutter frequency (1 / second^2). T : float, optional Initial temperature (Kelvin). L0 : float, optional Initial product length (meters). ε : float, optional Product tolerance (meters). """ # angular speed of the extruder screw (radians / second) self.ω_s = ω0_s # change in angular speed of the extruder screw (radians / second^2) self.Δω_s = Δω0_s self.Δω_eff = self.Δω_s # frequency of the cutter (hertz) self.f_c = f0_c # change in cutter frequency (1 / second^2) self.Δf_c = Δf0_c self.Δf_eff = self.Δf_c # temperature (Kelvin) self.T = T self.L0 = L0 self.ε = ε model = em.ExtrusionModel( ω=self.ω_s, Δω=self.Δω_s, f_c=self.f_c, T=self.T, Δt=Δt ) self.T += model.ΔT # material flow rate (meters^3 / second) self.Q = model.Q_op # product length (meters) self.L = model.L # manufacturing yield, defined as the number of good parts # per iteration (dimensionless) self.yield_ = model.yield_ def episode_start(self, config: Schema) -> None: self.reset( ω0_s=config.get("initial_screw_angular_speed"), Δω0_s=config.get("initial_screw_angular_acceleration"), f0_c=config.get("initial_cutter_frequency"), Δf0_c=config.get("initial_cutter_acceleration"), T=config.get("initial_temperature"), ) def step(self): # add a small amount of random noise to the actions to avoid # the trivial solution of simply applying zero acceleration # on each iteration σ_max = 0.0001 σ_s = random.uniform(-σ_max, σ_max) σ_c = random.uniform(-σ_max, σ_max) self.Δω_eff = self.Δω_s * (1 + σ_s) self.ω_s += Δt * self.Δω_eff self.Δf_eff = self.Δf_c * (1 + σ_c) self.f_c += Δt * self.Δf_eff model = em.ExtrusionModel( ω=self.ω_s, Δω=self.Δω_eff, f_c=self.f_c, T=self.T, Δt=Δt ) self.T += model.ΔT # material flow rate (meters^3 / second) self.Q = model.Q_op # product length (meters) self.L = model.L # manufacturing yield, defined as the number of good parts # per iteration (dimensionless) self.yield_ = model.yield_ def episode_step(self, action: Schema) -> None: self.Δω_s = action.get("screw_angular_acceleration") self.Δf_c = action.get("cutter_acceleration") self.step() def get_state(self): return { "screw_angular_speed": self.ω_s, "screw_angular_acceleration": self.Δω_eff, "cutter_frequency": self.f_c, "cutter_acceleration": self.Δf_eff, "temperature": self.T, "product_length": self.L, "flow_rate": self.Q, "yield": self.yield_, } def halted(self) -> bool: return False def get_interface(self) -> SimulatorInterface: """Register sim interface.""" with open("interface.json", "r") as infile: interface = json.load(infile) return SimulatorInterface( name=interface["name"], timeout=interface["timeout"], simulator_context=self.get_simulator_context(), description=interface["description"], ) def main(): workspace = os.getenv("SIM_WORKSPACE") access_key = os.getenv("SIM_ACCESS_KEY") # values in `.env`, if they exist, take priority over environment variables dotenv.load_dotenv(".env", override=True) if workspace is None: raise ValueError("The Bonsai workspace ID is not set.") if access_key is None: raise ValueError("The access key for the Bonsai workspace is not set.") config = BonsaiClientConfig(workspace=workspace, access_key=access_key) extruder_sim = ExtruderSimulation(config) extruder_sim.reset() while extruder_sim.run(): continue if __name__ == "__main__": main()
nilq/baby-python
python
# flake8: noqa pylint: skip-file """Tests for the TelldusLive config flow.""" import asyncio from unittest.mock import Mock, patch import pytest from homeassistant import data_entry_flow from homeassistant.components.tellduslive import ( APPLICATION_NAME, DOMAIN, KEY_SCAN_INTERVAL, SCAN_INTERVAL, config_flow) from homeassistant.const import CONF_HOST from tests.common import MockConfigEntry, MockDependency, mock_coro def init_config_flow(hass, side_effect=None): """Init a configuration flow.""" flow = config_flow.FlowHandler() flow.hass = hass if side_effect: flow._get_auth_url = Mock(side_effect=side_effect) return flow @pytest.fixture def supports_local_api(): """Set TelldusLive supports_local_api.""" return True @pytest.fixture def authorize(): """Set TelldusLive authorize.""" return True @pytest.fixture def mock_tellduslive(supports_local_api, authorize): """Mock tellduslive.""" with MockDependency('tellduslive') as mock_tellduslive_: mock_tellduslive_.supports_local_api.return_value = supports_local_api mock_tellduslive_.Session().authorize.return_value = authorize mock_tellduslive_.Session().access_token = 'token' mock_tellduslive_.Session().access_token_secret = 'token_secret' mock_tellduslive_.Session().authorize_url = 'https://example.com' yield mock_tellduslive_ async def test_abort_if_already_setup(hass): """Test we abort if TelldusLive is already setup.""" flow = init_config_flow(hass) with patch.object(hass.config_entries, 'async_entries', return_value=[{}]): result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'already_setup' with patch.object(hass.config_entries, 'async_entries', return_value=[{}]): result = await flow.async_step_import(None) assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'already_setup' async def test_full_flow_implementation(hass, mock_tellduslive): """Test registering an implementation and finishing flow works.""" flow = init_config_flow(hass) result = await flow.async_step_discovery(['localhost', 'tellstick']) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'user' assert len(flow._hosts) == 2 result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'user' result = await flow.async_step_user({'host': 'localhost'}) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'auth' assert result['description_placeholders'] == { 'auth_url': 'https://example.com', 'app_name': APPLICATION_NAME, } result = await flow.async_step_auth('') assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result['title'] == 'localhost' assert result['data']['host'] == 'localhost' assert result['data']['scan_interval'] == 60 assert result['data']['session'] == {'token': 'token', 'host': 'localhost'} async def test_step_import(hass, mock_tellduslive): """Test that we trigger auth when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({ CONF_HOST: DOMAIN, KEY_SCAN_INTERVAL: 0, }) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'auth' async def test_step_import_add_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({ CONF_HOST: 'localhost', KEY_SCAN_INTERVAL: 0, }) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'user' async def test_step_import_no_config_file(hass, mock_tellduslive): """Test that we trigger user with no config_file configuring from import.""" flow = init_config_flow(hass) result = await flow.async_step_import({ CONF_HOST: 'localhost', KEY_SCAN_INTERVAL: 0, }) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'user' async def test_step_import_load_json_matching_host(hass, mock_tellduslive): """Test that we add host and trigger user when configuring from import.""" flow = init_config_flow(hass) with patch('homeassistant.components.tellduslive.config_flow.load_json', return_value={'tellduslive': {}}), \ patch('os.path.isfile'): result = await flow.async_step_import({ CONF_HOST: 'Cloud API', KEY_SCAN_INTERVAL: 0, }) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'user' async def test_step_import_load_json(hass, mock_tellduslive): """Test that we create entry when configuring from import.""" flow = init_config_flow(hass) with patch('homeassistant.components.tellduslive.config_flow.load_json', return_value={'localhost': {}}), \ patch('os.path.isfile'): result = await flow.async_step_import({ CONF_HOST: 'localhost', KEY_SCAN_INTERVAL: SCAN_INTERVAL, }) assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result['title'] == 'localhost' assert result['data']['host'] == 'localhost' assert result['data']['scan_interval'] == 60 assert result['data']['session'] == {} @pytest.mark.parametrize('supports_local_api', [False]) async def test_step_disco_no_local_api(hass, mock_tellduslive): """Test that we trigger when configuring from discovery, not supporting local api.""" flow = init_config_flow(hass) result = await flow.async_step_discovery(['localhost', 'tellstick']) assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'auth' assert len(flow._hosts) == 1 async def test_step_auth(hass, mock_tellduslive): """Test that create cloud entity from auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth(['localhost', 'tellstick']) assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY assert result['title'] == 'Cloud API' assert result['data']['host'] == 'Cloud API' assert result['data']['scan_interval'] == 60 assert result['data']['session'] == { 'token': 'token', 'token_secret': 'token_secret', } @pytest.mark.parametrize('authorize', [False]) async def test_wrong_auth_flow_implementation(hass, mock_tellduslive): """Test wrong auth.""" flow = init_config_flow(hass) await flow.async_step_auth() result = await flow.async_step_auth('') assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'auth' assert result['errors']['base'] == 'auth_error' async def test_not_pick_host_if_only_one(hass, mock_tellduslive): """Test not picking host if we have just one.""" flow = init_config_flow(hass) result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_FORM assert result['step_id'] == 'auth' async def test_abort_if_timeout_generating_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url timeout.""" flow = init_config_flow(hass, side_effect=asyncio.TimeoutError) result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'authorize_url_timeout' async def test_abort_no_auth_url(hass, mock_tellduslive): """Test abort if generating authorize url returns none.""" flow = init_config_flow(hass) flow._get_auth_url = Mock(return_value=False) result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'authorize_url_fail' async def test_abort_if_exception_generating_auth_url(hass, mock_tellduslive): """Test we abort if generating authorize url blows up.""" flow = init_config_flow(hass, side_effect=ValueError) result = await flow.async_step_user() assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'authorize_url_fail' async def test_discovery_already_configured(hass, mock_tellduslive): """Test abort if alredy configured fires from discovery.""" MockConfigEntry( domain='tellduslive', data={'host': 'some-host'} ).add_to_hass(hass) flow = init_config_flow(hass) result = await flow.async_step_discovery(['some-host', '']) assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT assert result['reason'] == 'already_setup'
nilq/baby-python
python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib.common.utils import data_utils from openstackclient.tests.functional.identity.v3 import common class IdentityProviderTests(common.IdentityTests): # Introduce functional test case for command 'Identity Provider' def test_idp_create(self): self._create_dummy_idp() def test_idp_delete(self): identity_provider = self._create_dummy_idp(add_clean_up=False) raw_output = self.openstack('identity provider delete %s' % identity_provider) self.assertEqual(0, len(raw_output)) def test_idp_multi_delete(self): idp_1 = self._create_dummy_idp(add_clean_up=False) idp_2 = self._create_dummy_idp(add_clean_up=False) raw_output = self.openstack( 'identity provider delete %s %s' % (idp_1, idp_2)) self.assertEqual(0, len(raw_output)) def test_idp_show(self): identity_provider = self._create_dummy_idp(add_clean_up=True) raw_output = self.openstack('identity provider show %s' % identity_provider) items = self.parse_show(raw_output) self.assert_show_fields(items, self.IDENTITY_PROVIDER_FIELDS) def test_idp_list(self): self._create_dummy_idp(add_clean_up=True) raw_output = self.openstack('identity provider list') items = self.parse_listing(raw_output) self.assert_table_structure(items, self.IDENTITY_PROVIDER_LIST_HEADERS) def test_idp_set(self): identity_provider = self._create_dummy_idp(add_clean_up=True) new_remoteid = data_utils.rand_name('newRemoteId') raw_output = self.openstack('identity provider set ' '%(identity-provider)s ' '--remote-id %(remote-id)s ' % {'identity-provider': identity_provider, 'remote-id': new_remoteid}) self.assertEqual(0, len(raw_output)) raw_output = self.openstack('identity provider show %s' % identity_provider) updated_value = self.parse_show_as_object(raw_output) self.assertIn(new_remoteid, updated_value['remote_ids'])
nilq/baby-python
python
import unittest from translator import english_to_french, french_to_english class TestE2F(unittest.TestCase): def test1(self): self.assertEqual(english_to_french(""), "API Exception") # test null self.assertEqual(english_to_french("Hello"), "Bonjour") # test positive self.assertNotEqual(english_to_french("Hello"), "Hello") # test negative class TestF2E(unittest.TestCase): def test1(self): self.assertEqual(french_to_english(""), "API Exception") # test null self.assertEqual(french_to_english("Bonjour"), "Hello") # test positive self.assertNotEqual(french_to_english("Bonjour"), "Bonjour") # test negative unittest.main()
nilq/baby-python
python
import errno import logging import os from typing import TYPE_CHECKING, Optional from .errors import ObjectFormatError if TYPE_CHECKING: from dvc.fs.base import FileSystem from dvc.hash_info import HashInfo from dvc.types import AnyPath from .db.base import ObjectDB logger = logging.getLogger(__name__) class HashFile: def __init__( self, fs_path: Optional["AnyPath"], fs: Optional["FileSystem"], hash_info: "HashInfo", name: Optional[str] = None, ): self.fs_path = fs_path self.fs = fs self.hash_info = hash_info self.name = name def __len__(self): return 1 def __str__(self): return f"object {self.hash_info}" def __bool__(self): return bool(self.hash_info) def __eq__(self, other): if not isinstance(other, HashFile): return False return ( self.fs_path == other.fs_path and self.fs == other.fs and self.hash_info == other.hash_info ) def __hash__(self): return hash( ( self.hash_info, self.fs_path, self.fs.scheme if self.fs else None, ) ) def check(self, odb: "ObjectDB", check_hash: bool = True): if not check_hash: assert self.fs if not self.fs.exists(self.fs_path): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), self.fs_path ) else: return None self._check_hash(odb) def _check_hash(self, odb): from .stage import get_file_hash _, actual = get_file_hash( self.fs_path, self.fs, self.hash_info.name, odb.state ) logger.trace( "cache '%s' expected '%s' actual '%s'", self.fs_path, self.hash_info, actual, ) assert actual.name == self.hash_info.name if actual.value.split(".")[0] != self.hash_info.value.split(".")[0]: raise ObjectFormatError(f"{self} is corrupted")
nilq/baby-python
python
"""Generators - Small ===================== Some small graphs """ import pytest from networkx.generators.tests.test_small import TestGeneratorsSmall from graphscope.framework.errors import UnimplementedError from graphscope.nx.utils.compat import with_graphscope_nx_context @pytest.mark.usefixtures("graphscope_session") @with_graphscope_nx_context(TestGeneratorsSmall) class TestGeneratorsSmall: def test_properties_named_small_graphs(self): pass
nilq/baby-python
python
import random def bogoSort(a): while (is_sorted(a)== False): shuffle(a) def is_sorted(a): n = len(a) for i in range(0, n-1): if (a[i] > a[i+1] ): return False return True def shuffle(a): n = len(a) for i in range (0,n): r = random.randint(0,n-1) a[i], a[r] = a[r], a[i] lst = list(map(int,input('Enter a number list to be sorted: ').split())) bogoSort(lst) print(lst)
nilq/baby-python
python
from alembic import op import sqlalchemy as sa """add ride resync date Revision ID: 21518d40552c Revises: d4be89cbab08 Create Date: 2020-02-01 08:53:33.632416 """ # revision identifiers, used by Alembic. revision = '21518d40552c' down_revision = 'd4be89cbab08' def upgrade(): op.add_column('rides', sa.Column('resync_date', sa.DateTime, nullable=True)) # we do not know which rides have partial efforts fetched so schedule them all for resync over the next few days op.execute('update rides set efforts_fetched = false, resync_count = 1, resync_date = now() + interval floor(rand() * 72) hour') pass def downgrade(): op.drop_column('rides', 'resync_date') pass
nilq/baby-python
python
from flask import Flask, render_template, request from wtforms import Form, DecimalField, validators app = Flask(__name__) class EntryForm(Form): x_entry = DecimalField('x:', places=10, validators=[validators.NumberRange(-1e10, 1e10)]) y_entry = DecimalField('y:', places=10, validators=[validators.NumberRange(-1e10, 1e10)]) @app.route('/') def index(): form = EntryForm(request.form) return render_template('entry.html', form=form, z='') @app.route('/results', methods=['POST']) def results(): form = EntryForm(request.form) z = '' if request.method == 'POST' and form.validate(): x = request.form['x_entry'] y = request.form['y_entry'] z = float(x) + float(y) return render_template('entry.html', form=form, z=z) if __name__ == '__main__': app.run(debug=True)
nilq/baby-python
python
"""Try out other sklearn score to measure the """ import os import sys import glob import numpy as np import argparse import tqdm import matplotlib.pyplot as plt from sklearn.neighbors import LocalOutlierFactor from pathlib import Path sys.path.append('../') from eval.metric import silhouette, hsic_gam_mat, inertia_ap, DaviesBouldin from eval.eval_utils import mean_std from sklearn_extra.cluster import KMedoids parser = argparse.ArgumentParser() parser.add_argument("--options", type=int, default=1) parser.add_argument("--perf_perc", type=int, default=50) parser.add_argument("--sample_num", type=int, default=20) parser.add_argument("--seed", type=int, default=1) parser.add_argument("--inertia_mean", action='store_true') parser.add_argument("--normalize", action='store_true') args = parser.parse_args() print(args) perf_perc = str(args.perf_perc) test_gt_path = 'scripts/shapenet13/pts_testgt_dm_part_sscore/viewer_test_points_trainnv01_testnv01_2000.npz' pred_path = glob.glob("scripts/shapenet13_pred/dm_part_sscore/*.npz") pred_path.sort() pred_path.remove('scripts/shapenet13_pred/dm_part_sscore/viewer_pred_points_trainnv02_testnv01_2000.npz') pred_path.remove('scripts/shapenet13_pred/dm_part_sscore/viewer_pred_points_trainnv04_testnv01_2000.npz') pred_path.remove('scripts/shapenet13_pred/dm_part_sscore/viewer_pred_points_trainnv05_testnv01_2000.npz') tes_gt = np.load(test_gt_path, allow_pickle=True) model_outlier_dic = {} for i in range(len(pred_path)): model_outlier_dic.update({i: []}) gt_outlier_test = [] ## LOF, fit pred and predict # for seed in ['1', '2', '3', '4', '5']: # for i in range(len(pred_path)): # pred_gt = np.load(pred_path[i], allow_pickle=True) # dm = pred_gt[seed].item()['dm'] # clf = LocalOutlierFactor(metric='precomputed') # clf.fit_predict(dm) # model_outlier_dic[i].append(-1 * np.mean(clf.negative_outlier_factor_)) # #print(len(model_outlier_dic[i])) # clf = LocalOutlierFactor(metric='precomputed') # clf.fit_predict(tes_gt[seed].item()['dm']) # gt_outlier_test.append(-1 * np.mean(clf.negative_outlier_factor_)) ## LOF, fit gt and predict # for seed in ['1', '2', '3', '4', '5']: # clf = LocalOutlierFactor(metric='precomputed', novelty=True) # clf.fit(tes_gt[seed].item()['dm']) # for i in range(len(pred_path)): # pred_gt = np.load(pred_path[i], allow_pickle=True) # dm = pred_gt[seed].item()['dm'] # model_outlier_dic[i].append(-1 * np.mean(clf.score_samples(dm))) # #gt_outlier_test.append(-1 * np.mean(clf.negative_outlier_factor_)) # for i in range(len(pred_path)): # print(pred_path[i]) # m_s = mean_std(model_outlier_dic[i]) # print(f"{m_s[0]:.6f}, {m_s[1]:.6f}") # if gt_outlier_test: # m_s = mean_std(gt_outlier_test) # print(f"{m_s[0]:.6f}, {m_s[1]:.6f}") # # k = 50 90 ## Kmeans + inertia criterion = 'KM_inertia_Mean' n_cluster_list = list(range(10, 91, 10)) #n_cluster_list = list(range(90, 171, 10)) n_cluster = 50 plt.figure(figsize=(10, 10)) for idx, n_cluster in enumerate(n_cluster_list): train_nviews = [1, 3, 6, 9, 12, 15, 18, 21, 23] pbar = tqdm.tqdm(total=len(pred_path) * 5) model_inertia_dic = {} for i in range(len(pred_path)): model_inertia_dic.update({i: []}) gt_inertia_test = [] for seed in ['1', '2', '3', '4', '5']: for i in range(len(pred_path)): pred_gt = np.load(pred_path[i], allow_pickle=True) dm = pred_gt[seed].item()['dm'] kmedoids = KMedoids(n_clusters=n_cluster, random_state=int(seed), metric='precomputed', init='k-medoids++').fit(dm) #model_inertia_dic[i].append(kmedoids.inertia_) model_inertia_dic[i].append(kmedoids.inertia_ / dm.shape[0]) #model_inertia_dic[i].append(silhouette(dm, kmedoids.labels_)) pbar.update(1) kmedoids = KMedoids(n_clusters=n_cluster, random_state=int(seed), metric='precomputed', init='k-medoids++').fit(tes_gt[str(seed)].item()['dm']) gt_inertia_test.append(kmedoids.inertia_ / tes_gt[str(seed)].item()['dm'].shape[0]) #gt_inertia_test.append(silhouette(tes_gt[str(seed)].item()['dm'], kmedoids.labels_)) val_mean_list = [] val_std_list = [] for i in range(len(pred_path)): m_s = mean_std(model_inertia_dic[i]) val_mean_list.append(m_s[0]) val_std_list.append(m_s[1]) if gt_inertia_test: m_s = mean_std(gt_inertia_test) #print(val_mean_list) #print(val_std_list) #print(m_s) plt.subplot(3, 3, idx // 3 * 3 + idx % 3 + 1) plt.plot(train_nviews, val_mean_list, "-o") plt.fill_between(train_nviews, [val_mean_list[i] - val_std_list[i] for i in range(len(val_mean_list))], [val_mean_list[i] + val_std_list[i] for i in range(len(val_mean_list))], facecolor='gray', alpha=0.2) plt.plot(train_nviews, len(train_nviews) * [m_s[0]]) plt.fill_between(train_nviews, len(train_nviews) * [m_s[0]-m_s[1]], len(train_nviews) * [m_s[0]+m_s[1]], facecolor='gray', alpha=0.2) plt.xticks(train_nviews) plt.legend([f"Pred {criterion} K={n_cluster}", f"GT {criterion} K={n_cluster}"]) if idx // 3 == 0: plt.title(f"Pred PointCloud {criterion} Value") if idx // 3 == 2: plt.xlabel("Num of views per shape in Train Set") if idx % 3 == 0: plt.ylabel(f"{criterion} value") plt.savefig(f'scripts/shapenet13_pred/{criterion}_{n_cluster_list[0]}_{n_cluster_list[-1]}.png') # criterion = 'KM_inertia_Mean' # n_cluster_list = list(range(10, 91, 10)) # #n_cluster_list = list(range(90, 171, 10)) # n_cluster = 50 # plt.figure(figsize=(10, 10)) # for idx, n_cluster in enumerate(n_cluster_list): # train_nviews = [1, 3, 6, 9, 12, 15, 18, 21, 23] # pbar = tqdm.tqdm(total=len(pred_path) * 5) # model_inertia_dic = {} # for i in range(len(pred_path)): # model_inertia_dic.update({i: []}) # gt_inertia_test = [] # for seed in ['1', '2', '3', '4', '5']: # for i in range(len(pred_path)): # pred_gt = np.load(pred_path[i], allow_pickle=True) # dm = pred_gt[seed].item()['dm'] # kmedoids = KMedoids(n_clusters=n_cluster, random_state=int(seed), metric='precomputed', init='k-medoids++').fit(dm) # #model_inertia_dic[i].append(kmedoids.inertia_) # model_inertia_dic[i].append(kmedoids.inertia_ / dm.shape[0]) # #model_inertia_dic[i].append(silhouette(dm, kmedoids.labels_)) # pbar.update(1) # kmedoids = KMedoids(n_clusters=n_cluster, random_state=int(seed), metric='precomputed', init='k-medoids++').fit(tes_gt[str(seed)].item()['dm']) # gt_inertia_test.append(kmedoids.inertia_ / tes_gt[str(seed)].item()['dm'].shape[0]) # #gt_inertia_test.append(silhouette(tes_gt[str(seed)].item()['dm'], kmedoids.labels_)) # val_mean_list = [] # val_std_list = [] # for i in range(len(pred_path)): # m_s = mean_std(model_inertia_dic[i]) # val_mean_list.append(m_s[0]) # val_std_list.append(m_s[1]) # if gt_inertia_test: # m_s = mean_std(gt_inertia_test) # #print(val_mean_list) # #print(val_std_list) # #print(m_s) # plt.subplot(3, 3, idx // 3 * 3 + idx % 3 + 1) # plt.plot(train_nviews, val_mean_list, "-o") # plt.fill_between(train_nviews, [val_mean_list[i] - val_std_list[i] for i in range(len(val_mean_list))], [val_mean_list[i] + val_std_list[i] for i in range(len(val_mean_list))], facecolor='gray', alpha=0.2) # plt.plot(train_nviews, len(train_nviews) * [m_s[0]]) # plt.fill_between(train_nviews, len(train_nviews) * [m_s[0]-m_s[1]], len(train_nviews) * [m_s[0]+m_s[1]], facecolor='gray', alpha=0.2) # plt.xticks(train_nviews) # plt.legend([f"Pred {criterion} K={n_cluster}", f"GT {criterion} K={n_cluster}"]) # if idx // 3 == 0: # plt.title(f"Pred PointCloud {criterion} Value") # if idx // 3 == 2: # plt.xlabel("Num of views per shape in Train Set") # if idx % 3 == 0: # plt.ylabel(f"{criterion} value") # plt.savefig(f'scripts/shapenet13_pred/{criterion}_{n_cluster_list[0]}_{n_cluster_list[-1]}.png') ## AP + inertia + Pred Nviews=1..23 # criterion = 'ap_inertia_normalize' # plt.figure(figsize=(10, 10)) # train_nviews = [1, 3, 6, 9, 12, 15, 18, 21, 23] # # pbar = tqdm.tqdm(total=len(pred_path) * 5) # model_inertia_dic = {} # for i in range(len(pred_path)): # model_inertia_dic.update({i: []}) # gt_inertia_test = [] # for seed in ['1', '2', '3', '4', '5']: # for i in range(len(pred_path)): # pred_gt = np.load(pred_path[i], allow_pickle=True) # dm = pred_gt[seed].item()['dm'] # inertia, matrix_part, part_preference = inertia_ap(dm, seed=1, pc=args.perf_perc, normalize=True) # model_inertia_dic[i].append(inertia) # pbar.update(1) # inertia, matrix_part, part_preference = inertia_ap(tes_gt[str(seed)].item()['dm'], seed=1, pc=args.perf_perc, normalize=True) # gt_inertia_test.append(inertia) # #gt_inertia_test.append(kmedoids.inertia_) # #gt_inertia_test.append(silhouette(tes_gt[str(seed)].item()['dm'], kmedoids.labels_)) # val_mean_list = [] # val_std_list = [] # for i in range(len(pred_path)): # m_s = mean_std(model_inertia_dic[i]) # val_mean_list.append(m_s[0]) # val_std_list.append(m_s[1]) # if gt_inertia_test: # m_s = mean_std(gt_inertia_test) # print(val_mean_list) # print(val_std_list) # print(m_s) # plt.figure(figsize=(10, 10)) # plt.plot(train_nviews, val_mean_list, "-o") # plt.fill_between(train_nviews, [val_mean_list[i] - val_std_list[i] for i in range(len(val_mean_list))], [val_mean_list[i] + val_std_list[i] for i in range(len(val_mean_list))], facecolor='gray', alpha=0.2) # plt.plot(train_nviews, len(train_nviews) * [m_s[0]]) # plt.fill_between(train_nviews, len(train_nviews) * [m_s[0]-m_s[1]], len(train_nviews) * [m_s[0]+m_s[1]], facecolor='gray', alpha=0.2) # plt.xticks(train_nviews) # plt.legend([f"Pred {criterion} AP", f"GT {criterion} AP"]) # plt.title(f"Pred PointCloud {criterion} Value AP Perf {args.perf_perc}") # plt.xlabel("Num of views per shape in Train Set") # plt.ylabel(f"{criterion} value") # plt.savefig(f'scripts/shapenet13_pred/{criterion}_perf{args.perf_perc}.png') #plt.show() # k = 50 90 ## Kmeans + DBI # criterion = 'DBI_mean' # #n_cluster_list = list(range(10, 91, 10)) # n_cluster_list = list(range(90, 171, 10)) # n_cluster = 50 # plt.figure(figsize=(10, 10)) # for idx, n_cluster in enumerate(n_cluster_list): # train_nviews = [1, 3, 6, 9, 12, 15, 18, 21, 23] # pbar = tqdm.tqdm(total=len(pred_path) * 5) # model_inertia_dic = {} # for i in range(len(pred_path)): # model_inertia_dic.update({i: []}) # gt_inertia_test = [] # for seed in ['1', '2', '3', '4', '5']: # for i in range(len(pred_path)): # pred_gt = np.load(pred_path[i], allow_pickle=True) # dm = pred_gt[seed].item()['dm'] # kmedoids = KMedoids(n_clusters=n_cluster, random_state=int(seed), metric='precomputed', init='k-medoids++').fit(dm) # #model_inertia_dic[i].append(kmedoids.inertia_) # #model_inertia_dic[i].append(silhouette(dm, kmedoids.labels_)) # model_inertia_dic[i].append(DaviesBouldin(dm, kmedoids.labels_)) # pbar.update(1) # kmedoids = KMedoids(n_clusters=n_cluster, random_state=int(seed), metric='precomputed', init='k-medoids++').fit(tes_gt[str(seed)].item()['dm']) # #gt_inertia_test.append(kmedoids.inertia_) # #gt_inertia_test.append(silhouette(tes_gt[str(seed)].item()['dm'], kmedoids.labels_)) # gt_inertia_test.append(DaviesBouldin(tes_gt[str(seed)].item()['dm'], kmedoids.labels_)) # val_mean_list = [] # val_std_list = [] # for i in range(len(pred_path)): # m_s = mean_std(model_inertia_dic[i]) # val_mean_list.append(m_s[0]) # val_std_list.append(m_s[1]) # if gt_inertia_test: # m_s = mean_std(gt_inertia_test) # #print(val_mean_list) # #print(val_std_list) # #print(m_s) # plt.subplot(3, 3, idx // 3 * 3 + idx % 3 + 1) # plt.plot(train_nviews, val_mean_list, "-o") # plt.fill_between(train_nviews, [val_mean_list[i] - val_std_list[i] for i in range(len(val_mean_list))], [val_mean_list[i] + val_std_list[i] for i in range(len(val_mean_list))], facecolor='gray', alpha=0.2) # plt.plot(train_nviews, len(train_nviews) * [m_s[0]]) # plt.fill_between(train_nviews, len(train_nviews) * [m_s[0]-m_s[1]], len(train_nviews) * [m_s[0]+m_s[1]], facecolor='gray', alpha=0.2) # plt.xticks(train_nviews) # plt.legend([f"Pred {criterion} K={n_cluster}", f"GT {criterion} K={n_cluster}"]) # if idx // 3 == 0: # plt.title(f"Pred PointCloud {criterion} Value") # if idx // 3 == 2: # plt.xlabel("Num of views per shape in Train Set") # if idx % 3 == 0: # plt.ylabel(f"{criterion} value") # plt.savefig(f'scripts/shapenet13_pred/{criterion}_{n_cluster_list[0]}_{n_cluster_list[-1]}.png') ## HSIC # model_hsic_dic = {} # for i in range(len(pred_path)): # model_hsic_dic.update({i: {'value':[], 'th': []} }) # gt_hsic_test = {'value':[], 'th': []} # pbar = tqdm.tqdm(total=len(pred_path) * 5) # for seed in ['1', '2', '3', '4', '5']: # for i in range(len(pred_path)): # pred_gt = np.load(pred_path[i], allow_pickle=True) # dm = pred_gt[seed].item()['dm'] # value, threshold = hsic_gam_mat(dm, dm, 0.1) # model_hsic_dic[i]['value'].append(value) # model_hsic_dic[i]['th'].append(threshold) # pbar.update(1) # dm = tes_gt[seed].item()['dm'] # value, threshold = hsic_gam_mat(dm, dm, 0.5) # gt_hsic_test['value'].append(value) # gt_hsic_test['th'].append(threshold) # val_mean_list = [] # val_std_list = [] # th_mean_list = [] # th_std_list = [] # for i in range(len(pred_path)): # print(pred_path[i]) # val_m_s = mean_std(model_hsic_dic[i]['value']) # th_m_s = mean_std(model_hsic_dic[i]['th']) # val_mean_list.append(val_m_s[0]) # val_std_list.append(val_m_s[1]) # th_mean_list.append(th_m_s[0]) # th_std_list.append(th_m_s[1]) # val_mean_list = [round(item, 6) for item in val_mean_list] # val_std_list = [round(item, 6) for item in val_std_list] # th_mean_list = [round(item, 6) for item in th_mean_list] # th_std_list = [round(item, 6) for item in th_std_list] # print(val_mean_list) # print(val_std_list) # print(th_mean_list) # print(th_std_list) # print(mean_std(gt_hsic_test['value'])) # print(mean_std(gt_hsic_test['th'])) #################################################################################################### ### inertia_mean=args.inertia_mean plt.figure(figsize=(12, 6)) dataset = 'table_car' #shapenet13 table_car file_path = f'scripts/{dataset}/oc_vc_sscore/dm_part_sscore' #file_path = f'scripts/{dataset}/pts_testgt_dm_part_sscore' plt_path = os.path.join(file_path, 'inertia_plots') Path(plt_path).mkdir(parents=True, exist_ok=True) obj_score_list = [] view_score_list = [] sample_num_list = [20, 50, 100, 200, 500, 1000, 2000] #, 100, 200, 500, pbar = tqdm.tqdm(total=len(sample_num_list) * 5) for sample_num in sample_num_list: args.sample_num = sample_num checkpoints = glob.glob(os.path.join(file_path, f'*_{args.sample_num}.npz')) checkpoints.sort() obj_stats = np.load(checkpoints[0], allow_pickle=True) view_stats = np.load(checkpoints[1], allow_pickle=True) obj_inertia_list = [] view_inertia_list = [] for seed in ['1', '2', '3', '4', '5']: obj_dm = obj_stats[seed].item()['dm'] view_dm = view_stats[seed].item()['dm'] obj_inertia, matrix_part, part_preference = inertia_ap(obj_dm, seed=int(seed), pc=args.perf_perc, normalize=args.normalize) view_inertia, matrix_part, part_preference = inertia_ap(view_dm, seed=int(seed), pc=args.perf_perc, normalize=args.normalize) if inertia_mean: obj_inertia_list.append(obj_inertia / obj_dm.shape[0]) view_inertia_list.append(view_inertia / view_dm.shape[0]) else: obj_inertia_list.append(obj_inertia) view_inertia_list.append(view_inertia) pbar.update(1) obj_score_list.append(mean_std(obj_inertia_list)) view_score_list.append(mean_std(view_inertia_list)) plt.plot(sample_num_list, [item[0] for item in obj_score_list], 'ro-', label='Object Center') plt.fill_between(sample_num_list, [item[0]-item[1] for item in obj_score_list], [item[0]+item[1] for item in obj_score_list], facecolor='red', alpha=0.2) plt.plot(sample_num_list, [item[0] for item in view_score_list], 'bo-', label='Viewer Center') plt.fill_between(sample_num_list, [item[0]-item[1] for item in view_score_list], [item[0]+item[1] for item in view_score_list], facecolor='blue', alpha=0.2) plt.legend() plt.xlabel("Number of Samples", fontsize=15) plt.xticks(sample_num_list) plt.ylabel("Inertia", fontsize=15) if inertia_mean: plt.suptitle(f"{dataset} 13 OC/VC GT Shape Inertia Mean AP perf {args.perf_perc}") plt.savefig(os.path.join(plt_path, f'inertia_mean_ap_perf{args.perf_perc}_norm{args.normalize}.png')) else: plt.suptitle(f"{dataset} 13 OC/VC GT Shape Inertia Sum AP perf {args.perf_perc}") plt.savefig(os.path.join(plt_path, f'inertia_sum_ap_perf{args.perf_perc}_norm{args.normalize}.png')) #plt.show()
nilq/baby-python
python
from setuptools import find_packages, setup with open('requirements.txt') as f: required = f.read().splitlines() setup( name="ml_example", packages=find_packages(), version="0.1.0", description="Example of ml project", author="Your name (or your organization/company/team)", entry_points={ "console_scripts": [ "ml_example_train = ml_example.train_pipeline:train_pipeline_command" ] }, install_requires=required, license="MIT", )
nilq/baby-python
python
import shutil from dataclasses import dataclass from pathlib import Path import requests from mealie.core import root_logger from mealie.schema.recipe import Recipe from mealie.services.image import minify logger = root_logger.get_logger() @dataclass class ImageOptions: ORIGINAL_IMAGE: str = "original.webp" MINIFIED_IMAGE: str = "min-original.webp" TINY_IMAGE: str = "tiny-original.webp" IMG_OPTIONS = ImageOptions() def write_image(recipe_slug: str, file_data: bytes, extension: str) -> Path: image_dir = Recipe(slug=recipe_slug).image_dir extension = extension.replace(".", "") image_path = image_dir.joinpath(f"original.{extension}") image_path.unlink(missing_ok=True) if isinstance(file_data, Path): shutil.copy2(file_data, image_path) elif isinstance(file_data, bytes): with open(image_path, "ab") as f: f.write(file_data) else: with open(image_path, "ab") as f: shutil.copyfileobj(file_data, f) print(image_path) minify.minify_image(image_path, force=True) return image_path def scrape_image(image_url: str, slug: str) -> Path: logger.info(f"Image URL: {image_url}") if isinstance(image_url, str): # Handles String Types pass if isinstance(image_url, list): # Handles List Types image_url = image_url[0] if isinstance(image_url, dict): # Handles Dictionary Types for key in image_url: if key == "url": image_url = image_url.get("url") filename = slug + "." + image_url.split(".")[-1] filename = Recipe(slug=slug).image_dir.joinpath(filename) try: r = requests.get(image_url, stream=True) except Exception: logger.exception("Fatal Image Request Exception") return None if r.status_code == 200: r.raw.decode_content = True logger.info(f"File Name Suffix {filename.suffix}") write_image(slug, r.raw, filename.suffix) filename.unlink(missing_ok=True) return Path(slug) return None
nilq/baby-python
python
""" Mesh Normalization """ import os import sys import cv2 import numpy as np from scipy import io as io import torch import pickle import trimesh import argparse from external.smplx.smplx import body_models sys.path.insert(0, '../external/pyrender') import pyrender def main(opt): model = body_models.create(model_path='../3d_data/models', model_type='smpl', gender='male', ext='pkl') smpl = pickle.load(open('../3d_data/densepose_uv.pkl', 'rb')) faces = np.array(smpl['f_extended'], dtype=np.int64).reshape((-1, 3)) uv_faceid = io.loadmat('../3d_data/DensePoseData/UV_data/UV_Processed.mat')['All_FaceIndices'] uv = smpl['uv'] # with open('../3d_data/nongrey_male_0110.jpg', 'rb') as file: texture = cv2.imread('../3d_data/nongrey_male_0110.jpg') global_tr = np.array([ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0] ]) # set up the rendering objects focal_length = opt.focal_length * opt.image_height # mesh_camera = pyrender.IntrinsicsCamera(focal_length, focal_length, opt.image_width / 2, opt.image_height / 2, # opt.znear, opt.zfar) mesh_camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0, znear=0.05) camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0, znear=0.05) camera_pose = np.array([ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.05], [0.0, 0.0, 0.0, 1.0] ]) mesh_tr = np.array([ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, opt.global_y + 0.11], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0] ]) mesh_camera_pose = np.array([ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, opt.camera_distance], [0.0, 0.0, 0.0, 1.0] ]) render = pyrender.OffscreenRenderer(opt.image_width, opt.image_height) output = model(return_verts=True) vertices = output.vertices.detach().cpu().numpy().squeeze() mesh_verts = np.array([vertices[i] for i in smpl['v_extended']]) visual_check = trimesh.visual.TextureVisuals(uv=uv, image=texture) tri_mesh_scene = trimesh.Trimesh(vertices=mesh_verts, faces=faces, visual=visual_check) mesh_body = pyrender.Mesh.from_trimesh(tri_mesh_scene) mesh_scene = pyrender.Scene(ambient_light=[0.5, 0.5, 0.5], bg_color=[-1.0, -1.0, -1.0]) mesh_scene.add(mesh_body, pose=mesh_tr) mesh_scene.add(mesh_camera, pose=mesh_camera_pose) rendered_uv, depth = render.render(scene=mesh_scene, flags=pyrender.RenderFlags.UV_RENDERING) rendered_uv = rendered_uv.copy() mask = rendered_uv[:, :, 2] != -1. temp_2 = rendered_uv[:, :, 2] temp_2[mask] = np.take(uv_faceid, temp_2[mask].astype('int')) rendered_uv[:, :, 2] = temp_2 cv2.imshow('UV', rendered_uv) bounds = tri_mesh_scene.bounding_box_oriented.extents mesh_verts -= mesh_scene.centroid mesh_verts /= bounds # mesh_verts *= 2 mesh_verts = mesh_verts + 1/2 face_select = faces[uv_faceid[:, 0] == 1] # verts = np.concatenate((uv, np.ones(uv.shape[:2] + (1,))), axis=2) # uv[:, 2] = 1 verts = (uv * 2) - 1 visual = trimesh.visual.ColorVisuals(vertex_colors=uv) tri_mesh = trimesh.Trimesh(vertices=verts, faces=face_select, visual=visual) # tri_mesh mesh = pyrender.Mesh.from_trimesh(tri_mesh) # tri_mesh.show() scene = pyrender.Scene(ambient_light=[0.5, 0.5, 0.5], bg_color=[-1.0, -1.0, -1.0]) scene.add(mesh, pose=global_tr) scene.add(camera, pose=camera_pose) rendered_color_visual, depth = render.render(scene=scene, flags=pyrender.RenderFlags.SKIP_CULL_FACES) # pyrender.Viewer(scene, render_flags={'cull_faces': False}) cv2.imshow('Part UV', rendered_color_visual) # cv2.waitKey(0) rendered_interp, _ = render.render(scene=scene, flags=pyrender.RenderFlags.BARYCENTRIC_COORDINATES | pyrender.RenderFlags.SKIP_CULL_FACES) tri_id, _ = render.render(scene=scene, flags=pyrender.RenderFlags.TRIANGLE_ID_RENDERING | pyrender.RenderFlags.SKIP_CULL_FACES) vertex_stream = np.take(mesh_verts, face_select, axis=0) tri_id = tri_id[:, :, 0] rendered_interp = rendered_interp.reshape(rendered_interp.shape + (1,)).repeat([3], axis=-1) out_view = vertex_stream[tri_id.astype('int')] * rendered_interp out_view = out_view.sum(axis=-2) # rendered_uv[rendered_uv == -1] = 0 # rendered_uv[:, :, 2] /= 255 out_view[rendered_color_visual < 0] = 0 # cv2.imwrite('../saves/checks/mesh_normalized_uv.jpg', (rendered_uv * 255).astype('uint8')) cv2.imshow('Coords', out_view) cv2.imwrite('../saves/checks/mesh_uv_render.jpg', (out_view * 255).astype('uint8')) cv2.waitKey(0) def parse_args(args): def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser = argparse.ArgumentParser() parser.add_argument('--n_samples', type=int, default=32, help='# of samples of human poses') parser.add_argument('--n_views', type=int, default=32, help='# of global camera poses') parser.add_argument('--n_poses_on_gpu', type=int, default=32, help='# latentD sized vectors processed simulateneously') parser.add_argument('--camera_distance', type=float, default=3, help='distance from the camera in the camera space') parser.add_argument('--global_y', type=float, default=0, help='move the model in the up/down in the world space') parser.add_argument('--focal_length', type=float, default=1, help='focal length') parser.add_argument('--image_width', type=int, default=64, help='image width') parser.add_argument('--image_height', type=int, default=64, help='image height') parser.add_argument('--znear', type=float, default=0, help='near plane') parser.add_argument('--zfar', type=float, default=10, help='far plane') parser.add_argument('--out_dir', type=str, required=True, help='directory to write results') return parser.parse_args(args) if __name__ == '__main__': # opt = parse_args(sys.argv[1:]) opt = parse_args([ '--n_samples=10', '--camera_distance=2.8', '--global_y=0.15', '--focal_length=1.09375', '--image_width=340', '--image_height=340', '--znear=0.05', '--zfar=5.05', '--out_dir=./smplx-uvs' ]) main(opt)
nilq/baby-python
python
import logging import time from datetime import datetime import IOstation import clim2bry import configM2R import decimateGrid import model2roms __author__ = 'Trond Kristiansen' __email__ = 'trond.kristiansen@niva.no' __created__ = datetime(2009, 1, 30) __modified__ = datetime(2021, 7, 27) __version__ = "1.6" __status__ = "Development" """ Main method for running model2roms Start: python runM2R.py """ def run(): logging.basicConfig(level=logging.INFO) logging.info("[M2R_run] Initialized logging") logging.info("[M2R_run] Started model2roms") confM2R = configM2R.Model2romsConfig() confM2R.create_grd_objects() if confM2R.create_atmos_forcing or confM2R.create_ocean_forcing: if confM2R.create_ocean_forcing: model2roms.convert_MODEL2ROMS(confM2R) clim2bry.writebry(confM2R) # if confM2R.createAtmosForcing: # atmosForcing.createAtmosFileUV(confM2R) if confM2R.decimate_gridfile: decimateGrid.createGrid(confM2R.grdROMS, "/Users/trondkr/Projects/KINO/GRID/kino_1600m_18072015.nc", "/Users/trondkr/Projects/KINO/GRID/kino_1600m_18072015v2.nc", 2) if confM2R.extract_stations: print("Running in station mode and extracting pre-defined station locations") IOstation.getStationData(confM2R) print('Finished ' + time.ctime(time.time())) run()
nilq/baby-python
python
from gpt2.data.dataset import Dataset from gpt2.data.vocabulary import Vocab from gpt2.data.tokenization import Tokenizer from gpt2.data.corpus import TokenizedCorpus
nilq/baby-python
python
aspect_ratio=1.0 batchSize=1 checkpoints_dir='../checkpoints/' cluster_path='features_clustered_010.npy' data_type=32 dataroot='./data/1/test' display_winsize=512 engine=None export_onnx=None feat_num=3 fineSize=512 fine_size=480 how_many=50 input_nc=3 instance_feat=False isTrain=False label_feat=False label_nc=36 loadSize=512 load_features=False max_dataset_size=100000 model='pix2pixHD_Temporal' nThreads=1 n_blocks_global=9 n_blocks_local=3 n_clusters=10 n_downsample_E=4 n_downsample_global=4 n_local_enhancers=1 name='updated' nef=16 netG='global' ngf=64 niter_fix_global=0 no_flip=True no_instance=True norm='instance' ntest=100000 onnx=None, output_nc=3 phase='test' resize_or_crop='scale_width' results_dir='./results/' serial_batches=True tf_log=False use_dropout=False verbose=False which_epoch='latest' gpu_ids = [0]
nilq/baby-python
python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # String literals representing events associated to data store operations BEFORE_CREATE = 'before_create' BEFORE_READ = 'before_read' BEFORE_UPDATE = 'before_update' BEFORE_DELETE = 'before_delete' PRECOMMIT_CREATE = 'precommit_create' PRECOMMIT_UPDATE = 'precommit_update' PRECOMMIT_DELETE = 'precommit_delete' PRECOMMIT_ADD_ASSOCIATION = 'precommit_add_association' PRECOMMIT_DELETE_ASSOCIATIONS = 'precommit_delete_associations' AFTER_CREATE = 'after_create' AFTER_READ = 'after_read' AFTER_UPDATE = 'after_update' AFTER_DELETE = 'after_delete' # String literals representing events associated to API operations BEFORE_RESPONSE = 'before_response' AFTER_REQUEST = 'after_request' # String literals representing events associated to process operations BEFORE_INIT = 'before_init' BEFORE_SPAWN = 'before_spawn' # sent per process AFTER_SPAWN = 'after_spawn' # sent per process AFTER_INIT = 'after_init' # sent per worker # String literals representing events associated to error conditions ABORT_CREATE = 'abort_create' ABORT_READ = 'abort_read' ABORT_UPDATE = 'abort_update' ABORT_DELETE = 'abort_delete' ABORT = 'abort_' BEFORE = 'before_' PRECOMMIT = 'precommit_' OVS_RESTARTED = 'ovs_restarted' class EventPayload(object): """Base event payload object. This class is intended to be the super class for all event payloads. As such, it defines common attributes many events are likely to use in their payload. Note that event attributes are passed by reference; no copying of states, metadata or request_body is performed and thus consumers should not modify payload references. For more information, see the callbacks dev-ref documentation for this project. """ def __init__(self, context, metadata=None, request_body=None, states=None, resource_id=None): # the event context self.context = context # NOTE(boden): longer term we should consider removing metadata # optional 'unstructured' (key,value) pairs for special needs self.metadata = metadata if metadata else {} # the request body associated to the resource self.request_body = request_body # an iterable of states for the resource from the newest to the oldest # for example db states or api request/response # the actual object type for states will vary depending on event caller self.states = states if states else [] # a unique ID for the event resource; may be None if the resource # isn't created yet self.resource_id = resource_id @property def has_states(self): """Determines if this event payload has any states. :returns: True if this event payload has states, otherwise False. """ return len(self.states) > 0 @property def latest_state(self): """Returns the latest state for the event payload. :returns: The last state of this event payload if has_state else None. """ return self.states[-1] if self.has_states else None class DBEventPayload(EventPayload): """The payload for data store events payloads.""" def __init__(self, context, metadata=None, request_body=None, states=None, resource_id=None, desired_state=None): super(DBEventPayload, self).__init__( context, metadata=metadata, request_body=request_body, states=states, resource_id=resource_id) # the model object to be persisted in pre create/commit payloads self.desired_state = desired_state @property def is_persisted(self): """Determine if the resource for this event payload is persisted. :returns: True if this payload's resource is persisted, otherwise False. """ return self.resource_id is not None and self.has_states @property def is_to_be_committed(self): """"Determine if the event payload resource is to be committed. :returns: True if the desired state has been populated, else False. """ return self.desired_state is not None @property def latest_state(self): """Returns the latest state for the event payload resource. :returns: If this payload has a desired_state its returned, otherwise latest_state is returned. """ return (self.desired_state or super(DBEventPayload, self).latest_state) class APIEventPayload(EventPayload): """The payload for API events.""" def __init__(self, context, method_name, action, metadata=None, request_body=None, states=None, resource_id=None, collection_name=None): super(APIEventPayload, self).__init__( context, metadata=metadata, request_body=request_body, states=states, resource_id=resource_id) self.method_name = method_name self.action = action self.collection_name = collection_name
nilq/baby-python
python
"""Test for kernel functionality.""" import functools import jax import jax.numpy as jnp import pytest import pytest_cases from probfindiff.utils import autodiff, kernel, kernel_zoo def case_exponentiated_quadratic(): k = lambda x, y: jnp.exp(-(x - y).dot(x - y)) return kernel.batch_gram(k)[0] def case_exponentiated_quadratic_builtin(): return kernel.batch_gram(kernel_zoo.exponentiated_quadratic)[0] def case_differentiate_0(): k = lambda x, y: (x - y).dot(x - y) return kernel.differentiate(k, L=autodiff.derivative)[0] def case_differentiate_1(): k = lambda x, y: (x - y).dot(x - y) return kernel.differentiate(k, L=autodiff.derivative)[1] def case_differentiate_2(): k = lambda x, y: (x - y).dot(x - y) return kernel.differentiate(k, L=autodiff.derivative)[2] def case_polynomial_builtin(): k = functools.partial(kernel_zoo.polynomial, p=jnp.ones((3,))) return kernel.batch_gram(k)[0] @pytest_cases.parametrize_with_cases("k", cases=".") def test_vectorize_gram_shapes(k): xs = jnp.arange(8.0).reshape((4, 2)) ys = jnp.arange(12.0).reshape((6, 2)) assert k(xs, ys.T).shape == (4, 6) @pytest.mark.parametrize("L, d, diffop_shape", ([jax.jacfwd, 2, (2,)],)) def test_kernel_batch_shape(L, d, diffop_shape): k = kernel_zoo.exponentiated_quadratic k_batch, lk_batch, llk_batch = kernel.differentiate(k, L=L) num_xs, num_ys = 4, 3 xs = jnp.arange(1, 1 + d * num_xs, dtype=float).reshape((num_xs, d)) ys = jnp.arange(1, 1 + d * num_ys, dtype=float).reshape((num_ys, d)) k_shape = (num_xs, num_ys) assert k_batch(xs, ys.T).shape == k_shape assert lk_batch(xs, ys.T).shape == diffop_shape + k_shape assert llk_batch(xs, ys.T).shape == diffop_shape + diffop_shape + k_shape
nilq/baby-python
python
#!/usr/bin/env python3 import json, os print("Content-type:text/html\r\n\r\n") print("<title>Testing CGI</title>") #Lab code # Q1 print(os.environ) json_object = json.dumps(dict(os.environ)) print(json_object) # Q2 for param in os.environ.keys(): if param == "QUERY_STRING": print("<b>%20s</b>: %s<br>".format(param, os.environ[param])) # Q3 for param in os.environ.keys(): if param == "HTTP_USER_AGENT": print("<b>%20s</b>: %s<br>".format(param, os.environ[param]))
nilq/baby-python
python
#!/usr/bin/env python """ This example shows how to create shipments. The variables populated below represents the minimum required values. You will need to fill all of these, or risk seeing a SchemaValidationError exception thrown. Near the bottom of the module, you'll see some different ways to handle the label data that is returned with the reply. """ import logging import binascii from example_config import CONFIG_OBJ from fedex.services.ship_service import FedexProcessShipmentRequest # Set this to the INFO level to see the response from Fedex printed in stdout. logging.basicConfig(level=logging.INFO) # This is the object that will be handling our tracking request. # We're using the FedexConfig object from example_config.py in this dir. shipment = FedexProcessShipmentRequest(CONFIG_OBJ) # This is very generalized, top-level information. # REGULAR_PICKUP, REQUEST_COURIER, DROP_BOX, BUSINESS_SERVICE_CENTER or STATION shipment.RequestedShipment.DropoffType = 'REGULAR_PICKUP' # See page 355 in WS_ShipService.pdf for a full list. Here are the common ones: # STANDARD_OVERNIGHT, PRIORITY_OVERNIGHT, FEDEX_GROUND, FEDEX_EXPRESS_SAVER shipment.RequestedShipment.ServiceType = 'PRIORITY_OVERNIGHT' # What kind of package this will be shipped in. # FEDEX_BOX, FEDEX_PAK, FEDEX_TUBE, YOUR_PACKAGING shipment.RequestedShipment.PackagingType = 'FEDEX_PAK' # Shipper contact info. shipment.RequestedShipment.Shipper.Contact.PersonName = 'Sender Name' shipment.RequestedShipment.Shipper.Contact.CompanyName = 'Some Company' shipment.RequestedShipment.Shipper.Contact.PhoneNumber = '9012638716' # Shipper address. shipment.RequestedShipment.Shipper.Address.StreetLines = ['Address Line 1'] shipment.RequestedShipment.Shipper.Address.City = 'Herndon' shipment.RequestedShipment.Shipper.Address.StateOrProvinceCode = 'VA' shipment.RequestedShipment.Shipper.Address.PostalCode = '20171' shipment.RequestedShipment.Shipper.Address.CountryCode = 'US' shipment.RequestedShipment.Shipper.Address.Residential = True # Recipient contact info. shipment.RequestedShipment.Recipient.Contact.PersonName = 'Recipient Name' shipment.RequestedShipment.Recipient.Contact.CompanyName = 'Recipient Company' shipment.RequestedShipment.Recipient.Contact.PhoneNumber = '9012637906' # Recipient address shipment.RequestedShipment.Recipient.Address.StreetLines = ['Address Line 1'] shipment.RequestedShipment.Recipient.Address.City = 'Herndon' shipment.RequestedShipment.Recipient.Address.StateOrProvinceCode = 'VA' shipment.RequestedShipment.Recipient.Address.PostalCode = '20171' shipment.RequestedShipment.Recipient.Address.CountryCode = 'US' # This is needed to ensure an accurate rate quote with the response. shipment.RequestedShipment.Recipient.Address.Residential = True shipment.RequestedShipment.EdtRequestType = 'NONE' shipment.RequestedShipment.ShippingChargesPayment.Payor.ResponsibleParty.AccountNumber = CONFIG_OBJ.account_number # Who pays for the shipment? # RECIPIENT, SENDER or THIRD_PARTY shipment.RequestedShipment.ShippingChargesPayment.PaymentType = 'SENDER' # Specifies the label type to be returned. # LABEL_DATA_ONLY or COMMON2D shipment.RequestedShipment.LabelSpecification.LabelFormatType = 'COMMON2D' # Specifies which format the label file will be sent to you in. # DPL, EPL2, PDF, PNG, ZPLII shipment.RequestedShipment.LabelSpecification.ImageType = 'PNG' # To use doctab stocks, you must change ImageType above to one of the # label printer formats (ZPLII, EPL2, DPL). # See documentation for paper types, there quite a few. shipment.RequestedShipment.LabelSpecification.LabelStockType = 'PAPER_4X6' # This indicates if the top or bottom of the label comes out of the # printer first. # BOTTOM_EDGE_OF_TEXT_FIRST or TOP_EDGE_OF_TEXT_FIRST shipment.RequestedShipment.LabelSpecification.LabelPrintingOrientation = 'BOTTOM_EDGE_OF_TEXT_FIRST' package1_weight = shipment.create_wsdl_object_of_type('Weight') # Weight, in pounds. package1_weight.Value = 1.0 package1_weight.Units = "LB" package1 = shipment.create_wsdl_object_of_type('RequestedPackageLineItem') package1.PhysicalPackaging = 'BOX' package1.Weight = package1_weight # Un-comment this to see the other variables you may set on a package. #print package1 # This adds the RequestedPackageLineItem WSDL object to the shipment. It # increments the package count and total weight of the shipment for you. shipment.add_package(package1) # If you'd like to see some documentation on the ship service WSDL, un-comment # this line. (Spammy). #print shipment.client # Un-comment this to see your complete, ready-to-send request as it stands # before it is actually sent. This is useful for seeing what values you can # change. #print shipment.RequestedShipment # If you want to make sure that all of your entered details are valid, you # can call this and parse it just like you would via send_request(). If # shipment.response.HighestSeverity == "SUCCESS", your shipment is valid. #shipment.send_validation_request() # Fires off the request, sets the 'response' attribute on the object. shipment.send_request() # This will show the reply to your shipment being sent. You can access the # attributes through the response attribute on the request object. This is # good to un-comment to see the variables returned by the Fedex reply. print shipment.response # Here is the overall end result of the query. print "HighestSeverity:", shipment.response.HighestSeverity # Getting the tracking number from the new shipment. print "Tracking #:", shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].TrackingIds[0].TrackingNumber # Net shipping costs. print "Net Shipping Cost (US$):", shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].PackageRating.PackageRateDetails[0].NetCharge.Amount # Get the label image in ASCII format from the reply. Note the list indices # we're using. You'll need to adjust or iterate through these if your shipment # has multiple packages. ascii_label_data = shipment.response.CompletedShipmentDetail.CompletedPackageDetails[0].Label.Parts[0].Image # Convert the ASCII data to binary. label_binary_data = binascii.a2b_base64(ascii_label_data) """ This is an example of how to dump a label to a PNG file. """ # This will be the file we write the label out to. png_file = open('example_shipment_label.png', 'wb') png_file.write(label_binary_data) png_file.close() """ This is an example of how to print the label to a serial printer. This will not work for all label printers, consult your printer's documentation for more details on what formats it can accept. """ # Pipe the binary directly to the label printer. Works under Linux # without requiring PySerial. This WILL NOT work on other platforms. #label_printer = open("/dev/ttyS0", "w") #label_printer.write(label_binary_data) #label_printer.close() """ This is a potential cross-platform solution using pySerial. This has not been tested in a long time and may or may not work. For Windows, Mac, and other platforms, you may want to go this route. """ #import serial #label_printer = serial.Serial(0) #print "SELECTED SERIAL PORT: "+ label_printer.portstr #label_printer.write(label_binary_data) #label_printer.close()
nilq/baby-python
python
#! /usr/bin/python """radargrab Get the images and backgrounds associated with a storm event and save them to a directory or something """ import urllib2 import httplib import time from xml.dom import minidom from HTMLParser import HTMLParser _n0r = "http://radar.weather.gov/ridge/RadarImg/N0R/%s/" _overlay = "http://radar.weather.gov/ridge/Overlays/" class Urlgen(object): def __init__(self, site): self.n0r = _n0r % (site) class my_parser(HTMLParser): def ready(self): self.linkray = [] def handle_starttag(self, tag, attrs): if tag == "a": for i in attrs: if i[0] == "href": if i[1].startswith("DAX"): self.linkray.append(i[1]) def nextlink(self): for i in self.linkray: yield i if __name__ == "__main__": a = Urlgen("DAX") #blah = urllib2.urlopen(a.n0r) g = blah.read() myht = my_parser() myht.ready() myht.feed(g) for i in myht.nextlink(): time.sleep(1) pic = urllib2.urlopen(a.n0r+i) # with open(i,'wb') as f: CLOBBERS FILES. FIX THIS BEFORE USING # f.write(pic.read())
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import platform from ctypes import cdll, c_wchar_p, create_unicode_buffer from platformpaths import sopaths, ridelibpaths APL = cdll.LoadLibrary(sopaths[platform.architecture()[0]][platform.system()]) def InitAPL(runtime, WSargs): __C_APL_WSargs_Binding_Params__ = cUnicodeList(WSargs) APL.Initialise(runtime,len(WSargs),__C_APL_WSargs_Binding_Params__) def cUnicodeList(pylist): cUL = (c_wchar_p * len(pylist))() cUL[:] = pylist return cUL def CallJSON(function, parms): result = create_unicode_buffer('', 256) err = APL.CallJSON(function, json.dumps(parms), result) return (result.value, err) def GetEnv(var): result = create_unicode_buffer('', 256) err = APL.GetEnv(var, result, 256) return (result.value, err) def main(): print("Loaded lib {0}".format(APL)) print(CallJSON("Load","sign.dyalog")) print(CallJSON("GetSign", [1, 24])) # Call a function loaded from sign.dyalog # Call APL using statements formatted in JSON APLCode = { "Left": [1,0,1,1,0], "Function": "/", "Right": "APPLE" } print(CallJSON("Exec", APLCode)) APLCode = { "Function": "+\\", "Right": [1,2,3,4,5] } print(CallJSON("Exec", APLCode)) # Set a variable myvar in the APL workspace and assign its value to a python variable pyvar pyvar = json.loads((CallJSON("Exec", "myvar←⍳9"))[0]) print(pyvar) # Alter the variable pyvar in python for i in range(len(pyvar)): pyvar[i] += i print(pyvar) # Process the APL variable myvar in APL and return the result print(json.loads(CallJSON("Exec", "+/myvar")[0])) # Process the contents of pyvar in APL and return the result APLCode = { "Function": "+/", "Right": pyvar } print(json.loads(CallJSON("Exec", APLCode)[0])) # Query available workspace APLCode = { "Function": "⎕WA" } print(CallJSON("Exec", APLCode)) # Query MAXWS print(GetEnv("MAXWS")) # Query SESSION_FILE print(GetEnv("SESSION_FILE")) # Query RIDE_INIT print(GetEnv("RIDE_INIT")) # Query DYALOG_RIDELIB print(GetEnv("DYALOG_RIDELIB")) print("Done") WSargs = [ "MAXWS=512Mb", "SESSION_FILE=JSON_APL.dse", "RIDE_INIT=SERVE:*:4502", "DYALOG_RIDELIB="+ridelibpaths[platform.architecture()[0]][platform.system()] ] InitAPL(1,WSargs) CallJSON("Exec","3502⌶1") # Start RIDE #input("You can now use RIDE to access the active workspace. Press Enter to continue...") main()
nilq/baby-python
python
# -*- coding: utf-8 -*- # pylint: disable=unidiomatic-typecheck # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # Georg Brandl <g.brandl@fz-juelich.de> # Björn Pedersen <bjoern.pedersen@frm2.tum.de> # # ***************************************************************************** """Tests for the ftp upload module.""" import os import tempfile from io import BytesIO, StringIO import pytest from nicos.utils import createThread, ftp try: from pyftpdlib.servers import ThreadedFTPServer from pyftpdlib.handlers import FTPHandler from pyftpdlib.filesystems import AbstractedFS from pyftpdlib.authorizers import DummyAuthorizer except ImportError: ThreadedFTPServer = object FTPHandler = object AbstractedFS = object DummyAuthorizer = object session_setup = None class NamedBytesIO(BytesIO): def __init__(self, name): self.name = name BytesIO.__init__(self) def close(self): self.finalcontent = self.getvalue() return BytesIO.close(self) class NamedStringIO(StringIO): def __init__(self, name): self.name = name StringIO.__init__(self) def close(self): self.finalcontent = self.getvalue() return StringIO.close(self) class DataStorage: used_username = None ofilename = None omode = None iofile = None chdirpath = None mkdirpath = None ds = DataStorage() class FTPTestHandler(FTPHandler): ds = ds def on_login(self, username): self.ds.used_username = username return FTPHandler.on_login(self, username) class MyTestFS(AbstractedFS): def open(self, filename, mode): "Overwritten to use in memory files" self.cmd_channel.ds.ofilename = filename self.cmd_channel.ds.omode = mode if 'b' in mode: self.cmd_channel.ds.iofile = NamedBytesIO(filename) else: self.cmd_channel.ds.iofile = NamedStringIO(filename) return self.cmd_channel.ds.iofile def chdir(self, path): "Path changes are virtual" if path == self.cmd_channel.ds.mkdirpath or path == '/': self.cmd_channel.ds.chdirpath = path return '/' def mkdir(self, path): "Do not create dirs" self.cmd_channel.ds.mkdirpath = path @pytest.fixture(scope='function') def ftpserver(): """Provide a ftp server with virtual files""" handler = FTPTestHandler handler.abstracted_fs = MyTestFS authorizer = DummyAuthorizer() home = os.curdir authorizer.add_user('user', '12345', home, perm='elrmwM') handler.authorizer = authorizer server = ThreadedFTPServer(('localhost', 12345), handler) createThread('FTP', server.serve_forever) yield handler server.close_all() TEST_CONTENT = 'A test\n' @pytest.fixture(scope='function') def upload(session): """Provide a file to use as upload""" fd, t = tempfile.mkstemp(suffix='.txt') os.write(fd, TEST_CONTENT.encode()) yield t os.unlink(t) @pytest.mark.skipif(ThreadedFTPServer is object, reason='pyftpdlib package not installed') def test_ftp(session, ftpserver, upload): ftp.FTP_SERVER = 'localhost' ftp.FTP_PORT = 12345 ftp.FTP_USER = 'user' ftp.FTP_P = '12345' ftp.ftpUpload(upload) assert ds.used_username == 'user' assert ds.ofilename assert ds.omode == 'wb' assert ds.iofile assert ds.iofile.finalcontent.decode() == TEST_CONTENT assert ds.mkdirpath assert ds.chdirpath
nilq/baby-python
python
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils.nestedset import NestedSet, get_root_of from erpnext.utilities.transaction_base import delete_events from frappe.model.document import Document class Department(NestedSet): nsm_parent_field = 'parent_department' def autoname(self): root = get_root_of("Department") if root and self.department_name != root: self.name = get_abbreviated_name(self.department_name, self.company) else: self.name = self.department_name def validate(self): if not self.parent_department: root = get_root_of("Department") if root: self.parent_department = root def before_rename(self, old, new, merge=False): # renaming consistency with abbreviation if not frappe.db.get_value('Company', self.company, 'abbr') in new: new = get_abbreviated_name(new, self.company) return new def on_update(self): NestedSet.on_update(self) def on_trash(self): super(Department, self).on_trash() delete_events(self.doctype, self.name) def on_doctype_update(): frappe.db.add_index("Department", ["lft", "rgt"]) def get_abbreviated_name(name, company): abbr = frappe.db.get_value('Company', company, 'abbr') new_name = '{0} - {1}'.format(name, abbr) return new_name @frappe.whitelist() def get_children(doctype, parent=None, company=None, is_root=False): condition = '' if company == parent: condition = "name='{0}'".format(get_root_of("Department")) elif company: condition = "parent_department='{0}' and company='{1}'".format(parent, company) else: condition = "parent_department = '{0}'".format(parent) return frappe.db.sql(""" select name as value, is_group as expandable from `tab{doctype}` where {condition} order by name""".format(doctype=doctype, condition=condition), as_dict=1) @frappe.whitelist() def add_node(): from frappe.desk.treeview import make_tree_args args = frappe.form_dict args = make_tree_args(**args) if args.parent_department == args.company: args.parent_department = None frappe.get_doc(args).insert()
nilq/baby-python
python
class PartitionScheme(basestring): """ mbr|gpt|unknown Possible values: <ul> <li> "mbr" - Master Boot Record Partition Table Scheme., <li> "gpt" - GUID Partition Table Scheme., <li> "unknown" - Partition Scheme other than MBR or GPT or an unformatted LUN. </ul> """ @staticmethod def get_api_name(): return "partition-scheme"
nilq/baby-python
python
# -*- coding: utf-8 -*- """字典树实现敏感词过滤""" import codecs class TrieNode(object): def __init__(self, value=None): self._end = False self._child = dict() self._value = value def add(self, ch): if not self._child.has_key(ch): node = TrieNode(ch) self._child[ch] = node return node else: return self._child.get(ch) def is_end(self): return self._end def set_end(self, end): self._end = end def get_child(self, ch): if self._child.has_key(ch): return self._child.get(ch) else: return None def get_value(self): return self._value class TrieCheck(object): def __init__(self): self._root = TrieNode('') def add_word(self, text): node = self._root for i in text: node = node.add(i) node.set_end(True) def get_bad_word(self, text, offset=0): if not isinstance(text, str) or offset >= len(text): raise Exception('%s is not a string' % str(str)) i = offset text = unicode(text[offset:], 'utf-8') for ch in text[offset:]: node = self._root index = i node = node.get_child(ch) while node is not None: if node.is_end(): yield (i, ''.join(text[i:index + 1])) if len(text) == index + 1: break index += 1 node = node.get_child(text[index]) i += 1 def replace_bad_word(self, text, offset=0, mark='*'): if not isinstance(text, str) or offset >= len(text): raise Exception('%s is not a string' % str(str)) i = offset text = unicode(text[offset:], 'utf-8') li = list(text) for ch in text[offset:]: node = self._root index = i node = node.get_child(ch) while node is not None: if node.is_end(): for m in xrange(i, index + 1): li[m] = mark break if len(text) == index + 1: break index += 1 node = node.get_child(text[index]) i += 1 return ''.join(li) def load(path, checker): with codecs.open(path, 'r', encoding='utf-8-sig') as f: for line in f.readlines(): line = line.strip() if line.startswith(u'#'): continue checker.add_word(line) def main(): check = TrieCheck() load('sensitive.txt', check) print list(check.get_bad_word('反对一切血腥和色情游戏。')) print check.replace_bad_word('反对一切血腥和色情游戏。') if __name__ == '__main__': main()
nilq/baby-python
python
import json import logging logger = logging.getLogger() logger.setLevel(logging.INFO) def hello(event, context): logger.info(f"AWS Lambda processing message from GitHub: {event}.") body = { "message": "Your function executed successfully!", "input": event } response = { "statusCode": 200, "body": json.dumps(body) } return response
nilq/baby-python
python
import json import os import psycopg2 import time #import pdb; pdb.set_trace() POSTGRES_HOST = "database" POSTGRES_USER = os.environ["POSTGRES_USER"] POSTGRES_PASSWORD = os.environ["POSTGRES_PASSWORD"] DB = "datamonitor" DATA_DIR = "data" def load_data(): wait_for_db() #only load once if is_data_there(): print("We found table with data already in there, nothing to do") return pathz = get_all_json_paths(DATA_DIR) for p in pathz: geojson = load_json_file(p) insert_record(geojson["properties"]["name"], geojson["properties"]["admin_level"], json.dumps(geojson["geometry"])) print("----- All data loaded, whoop!") def get_all_json_paths(base_dir): pathz = [os.path.abspath(os.path.join(base_dir, x)) for x in os.listdir(base_dir)] return [f for f in pathz if 'GeoJson' in f] def load_json_file(path): with open(path) as json_file: return json.load(json_file) def insert_record(name, level, geo): try: connection = psycopg2.\ connect(user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST, database=DB) cursor = connection.cursor() postgres_insert_query = """ INSERT INTO administrative_boundaries (NAME, LEVEL, GEOG) VALUES (%s,%s, ST_GeomFromGeoJSON(%s)) """ record_to_insert = (name, level, geo) cursor.execute(postgres_insert_query, record_to_insert) connection.commit() count = cursor.rowcount print(count, "Record inserted successfully into table") except (Exception, psycopg2.Error) as error: if(connection): print("Failed to insert record into table", error) finally: # closing database connection. if(connection): cursor.close() connection.close() print("PostgreSQL connection is closed") def is_data_there(): try: connection = psycopg2.\ connect(user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST, database=DB) cursor = connection.cursor() query = """SELECT * FROM administrative_boundaries""" cursor.execute(query) return cursor.rowcount > 0 finally: # closing database connection. if(connection): cursor.close() connection.close() def is_db_ready(): try: conn = psycopg2.connect("host={} user={} password={}".format(POSTGRES_HOST, POSTGRES_USER, POSTGRES_PASSWORD)) conn.close() return True except psycopg2.OperationalError as ex: print("Connection failed: {0}".format(ex)) return False def wait_for_db(): max_attempts = 40 attempts = 0 while not is_db_ready() or attempts > max_attempts: print("db not ready, waiting..") attempts += 1 time.sleep(10) if attempts > max_attempts: raise Exception("db not ready giving up") if __name__ == "__main__": load_data()
nilq/baby-python
python
import logging import os import mock import unittest import vixen from vixen.processor import PythonFunctionFactory from vixen.project import Project, TagInfo from vixen.vixen import VixenUI, Vixen, UIErrorHandler, is_valid_tag from vixen.vixen_ui import get_html, get_html_file from vixen.tests.test_project import TestProjectBase def test_is_valid_tag(): assert is_valid_tag('hello_world') == (True, 'OK') assert is_valid_tag('for') == (True, 'OK') assert is_valid_tag('hello;world') == (True, 'OK') assert is_valid_tag('hello-world') == (True, 'OK') assert is_valid_tag('hello+world') == (True, 'OK') assert is_valid_tag('hello*world') == (True, 'OK') assert is_valid_tag('hello:world') == (True, 'OK') assert (is_valid_tag('hello world') == (False, 'Names cannot contain spaces')) assert (is_valid_tag('_world') == (False, 'Names cannot start with _')) class MockRecord(): def __init__(self, name, message): self.name = name self.message = message class TestUIErrorHandler(unittest.TestCase): def setUp(self): self.mock_ui = mock.MagicMock() self.h = UIErrorHandler(self.mock_ui) def test_emit_catches_general_error(self): # Given record = MockRecord(name='name', message='favicon.ico') # When self.h.emit(record) # Then self.assertTrue(self.mock_ui.notify_user.call_count, 1) def test_emit_catches_access_error_non_favicon(self): # Given record = MockRecord(name='tornado.access', message='hello') # When self.h.emit(record) # Then self.assertTrue(self.mock_ui.notify_user.call_count, 1) def test_emit_skips_favicon_errors(self): # Given record = MockRecord(name='tornado.access', message='hello I have favicon.ico') # When self.h.emit(record) # Then self.mock_ui.notify_user.assert_not_called() # Given record = MockRecord(name='tornado.application', message='hello I have favicon.ico') # When self.h.emit(record) # Then self.mock_ui.notify_user.assert_not_called() class TestVixenBase(TestProjectBase): def setUp(self): super(TestVixenBase, self).setUp() patch_proj = mock.patch( 'vixen.project.get_project_dir', mock.Mock(return_value=self._temp) ) patch_proj.start() self.addCleanup(patch_proj.stop) patcher1 = mock.patch( 'vixen.vixen.get_project_dir', mock.Mock(return_value=self._temp) ) patcher1.start() self.addCleanup(patcher1.stop) class TestVixen(TestVixenBase): def test_load(self): # Given vixen = Vixen() # When vixen.load() # Then self.assertEqual(len(vixen.projects), 1) self.assertEqual(vixen.projects[0].name, '__hidden__') # When p = Project( name='test', path=self.root, description='desc', extensions=['.py', '.txt'] ) p.scan() p.save() vixen.add(p) # Then self.assertEqual(len(vixen.projects), 1) self.assertEqual(vixen.projects[0].name, 'test') # Given vixen.save() vixen = Vixen() vixen.load() # Then self.assertEqual(len(vixen.projects), 1) p = vixen.projects[0] self.assertEqual(p.name, 'test') self.assertEqual(p.number_of_files, 0) # When p.load() # Then self.assertEqual(p.number_of_files, 5) m = p.get('root.txt') self.assertEqual(m.relpath, 'root.txt') self.assertEqual(m.type, 'text') self.assertEqual(len(m.tags), 1) class TestProjectEditor(TestVixenBase): def setUp(self): super(TestProjectEditor, self).setUp() ui = VixenUI() p = Project( name='test', path=self.root, description='desc', extensions=['.py', '.txt'] ) p.scan() ui.vixen.projects.append(p) self.ui = ui self.p = p def test_ui_edit(self): # Given ui, p = self.ui, self.p editor = ui.editor # When ui.edit(p) # Then self.assertEqual(editor.project, p) self.assertEqual(editor.name, p.name) self.assertEqual(editor.description, p.description) result = [x.__dict__ for x in editor.tags] expected = [x.__dict__ for x in p.tags] self.assertEqual(result, expected) self.assertEqual(editor.extensions, p.extensions) def test_add_remove_tag(self): # Given ui = self.ui editor = ui.editor # When ui.edit(self.p) nt = len(editor.tags) editor.add_tag('tag1, tag2') # Then result = [x.name for x in editor.tags[nt:]] self.assertEqual(result, ['tag1', 'tag2']) # When editor.remove_tag(nt) self.assertEqual(editor.tags[-1].name, 'tag2') self.assertEqual(editor.tags[-2].name, 'completed') def test_add_bad_tag_shows_error(self): # Given ui = self.ui editor = ui.editor # When ui.edit(self.p) nt = len(editor.tags) n_msg = ui.message[-1] if ui.message else 0 editor.add_tag('hello world, _hello') # Then self.assertEqual(len(editor.tags), nt) msg = ui.message self.assertEqual(msg[1:], ('error', n_msg + 1)) self.assertTrue('Error in the following tag names' in msg[0]) self.assertTrue('"hello world":' in msg[0]) self.assertTrue('"_hello":' in msg[0]) self.assertTrue('spaces' in msg[0].lower()) self.assertTrue('cannot start with _' in msg[0].lower()) def test_move_tag(self): # Given ui = self.ui editor = ui.editor def _get_tags(): return [x.name for x in editor.tags] ui.edit(self.p) editor.add_tag('tag1, tag2') assert _get_tags() == ['completed', 'tag1', 'tag2'] # When editor.move_tag_up(0) # Then assert _get_tags() == ['completed', 'tag1', 'tag2'] # When editor.move_tag_up(1) # Then assert _get_tags() == ['tag1', 'completed', 'tag2'] # When editor.move_tag_up(2) # Then assert _get_tags() == ['tag1', 'tag2', 'completed'] # When editor.move_tag_down(2) # Then assert _get_tags() == ['tag1', 'tag2', 'completed'] # When editor.move_tag_down(1) # Then assert _get_tags() == ['tag1', 'completed', 'tag2'] # When editor.move_tag_down(0) # Then assert _get_tags() == ['completed', 'tag1', 'tag2'] def test_add_remove_extension(self): # Given ui = self.ui editor = ui.editor # When ui.edit(self.p) editor.add_extension('.c, .h') # Then self.assertEqual( sorted(editor.extensions), ['.c', '.h', '.py', '.txt'] ) # When editor.remove_extension(3) self.assertEqual( sorted(editor.extensions), ['.c', '.py', '.txt'] ) def test_find_extensions(self): # Given ui = self.ui editor = ui.editor # When ui.edit(self.p) editor.find_extensions() # Then self.assertSequenceEqual( sorted(editor.available_exts), ['.py', '.txt'] ) def test_apply(self): # Given ui = self.ui editor = ui.editor p = self.p # When ui.edit(p) editor.name = 'xxx' editor.description = 'xxx' editor.extensions = ['.txt'] editor.add_tag('tag1') editor.apply() # Then self.assertEqual(p.name, 'xxx') self.assertEqual(p.description, 'xxx') self.assertEqual(p.extensions, ['.txt']) self.assertEqual(p.tags[-1].name, 'tag1') def test_check_processor(self): # Given ui = self.ui editor = ui.editor p = self.p # When ui.edit(p) editor.add_processor('python') # Then self.assertEqual(editor.processors[-1].name, 'PythonFunctionFactory') # When proc = editor.processors[-1] from textwrap import dedent code = dedent(""" def process(relpath, media, dest): media.tags['completed'] = True """) proc.code = code editor.check_processor(proc) editor.test_job[0].thread.join() # Then key = list(p.keys())[0] m = p.get(key) self.assertEqual(m.tags['completed'], True) # When editor.remove_processor(0) # Then self.assertEqual(len(editor.processors), 0) class TestVixenUI(TestVixenBase): def test_miscellaneous(self): # Given/When ui = VixenUI() # Then self.assertEqual(ui.version, vixen.__version__) fname = ui.docs self.assertTrue( os.path.basename(fname) in ['index.html', 'vixen.readthedocs.io'] ) # When ui.mode = 'view' ui.home() # Then self.assertEqual(ui.mode, 'edit') # When ctx = ui.get_context() # Then self.assertEqual(sorted(ctx.keys()), ['editor', 'ui', 'viewer', 'vixen']) def test_messages(self): # Given. ui = VixenUI() # When ui.error('ERROR') # Then self.assertEqual(ui.message, ('ERROR', 'error', 0)) # When ui.info('INFO') # Then self.assertEqual(ui.message, ('INFO', 'info', 1)) # When ui.success('SUCCESS') # Then self.assertEqual(ui.message, ('SUCCESS', 'success', 2)) @mock.patch('vixen.vixen.logger') def test_vixen_ui_log(self, logger): # Given ui = VixenUI() # When ui.log('msg', 'info') # Then logger.info.assert_called_with('msg') # When ui.log('err', 'error') # Then logger.error.assert_called_with('err') # When ui.log('err', 'blah') # Then logger.error.assert_called_with('Unknown message kind: %s', 'blah') logger.info.assert_called_with('err') def test_logging_handler_is_setup_correctly(self): # Given ui = VixenUI() # When m = mock.MagicMock() with mock.patch('vixen.vixen.logging.getLogger', return_value=m) as p: ui.setup_logging_handler() # Then p.assert_called_once_with() self.assertEqual(m.addHandler.call_count, 1) args = m.addHandler.call_args[0] obj = args[0] self.assertTrue(isinstance(obj, UIErrorHandler)) self.assertEqual(obj.level, logging.ERROR) self.assertEqual(obj.ui, ui) def test_add_remove_project_works(self): # Given ui = VixenUI() vixen = ui.vixen self.assertEqual(len(vixen.projects), 1) # When ui.add_project() # Then self.assertEqual(len(vixen.projects), 1) p = vixen.projects[-1] self.assertEqual(p.name, 'Project1') self.assertEqual( vixen.save_file, os.path.join(self._temp, 'projects.json') ) # When ui.remove(p) # Then self.assertEqual(len(vixen.projects), 0) def test_copy_project_works(self): # Setup # Create a new project, scan it, save it and re-load it for the test. ui = VixenUI() vixen = ui.vixen ui.add_project() p = vixen.projects[-1] p.add_tags([TagInfo(name='sometag', type='text')]) p.path = self.root p.scan() p.save() vixen.save() self.assertEqual(len(vixen.projects), 1) # Given ui = VixenUI() vixen = ui.vixen self.assertEqual(len(vixen.projects), 1) # When ui.copy_project(vixen.projects[0]) # Then self.assertEqual(len(vixen.projects), 2) p = vixen.projects[-1] self.assertEqual(p.name, 'Project1 copy') self.assertEqual(len(p.tags), 2) self.assertEqual(p.tags[0].name, 'completed') self.assertEqual(p.tags[0].type, 'bool') self.assertEqual(p.tags[1].name, 'sometag') self.assertEqual(p.tags[1].type, 'text') def test_search_string_updates_search_completed(self): # Given ui = VixenUI() vixen = ui.vixen ui.add_project() p = vixen.projects[0] p.path = self.root p.scan() # When ui.view(p) self.assertEqual(ui.viewer.active_pager, ui.viewer.pager) ui.viewer.search = 'root.txt' # Then self.assertEqual(ui.viewer.search_completed, False) self.assertEqual(ui.viewer.active_pager, ui.viewer.search_pager) # When ui.viewer.do_search() # Then self.assertEqual(ui.viewer.search_completed, True) # When ui.viewer.search = 'xxx' # Then self.assertEqual(ui.viewer.search_completed, False) def test_process_uses_search_results(self): # Given ui = VixenUI() vixen = ui.vixen ui.add_project() p = vixen.projects[0] p.path = self.root p.scan() from textwrap import dedent code = dedent(""" def process(relpath, media, dest): media.tags['completed'] = True """) p.processors = [PythonFunctionFactory(code=code, dest=self.root)] # When ui.view(p) ui.viewer.search = 'root.txt' ui.viewer.do_search() ui.process(p) # Then self.assertEqual(p.get('root.txt').tags['completed'], True) self.assertEqual(p.get('hello.py').tags['completed'], False) # When ui.viewer.clear_search() ui.process(p) # Then for m in p.keys(): self.assertEqual(p.get(m).tags['completed'], True) def test_viewer_rescan(self): # Given ui = VixenUI() vixen = ui.vixen ui.add_project() p = vixen.projects[0] p.path = self.root p.scan() viewer = ui.viewer ui.view(p) # When viewer.rescan() # Then self.assertEqual(viewer.current_dir, p.root) class TestProjectViewer(TestVixenBase): def setUp(self): super(TestProjectViewer, self).setUp() ui = VixenUI() p = Project( name='test', path=self.root, description='desc', extensions=['.py', '.txt'] ) p.scan() ui.vixen.projects.append(p) self.ui = ui self.p = p def test_rescan_handles_removed_files(self): # Given ui, p = self.ui, self.p viewer = ui.viewer ui.view(p) self.assertEqual(p.number_of_files, 5) self.assertEqual(len(viewer.pager.data), 4) os.remove(os.path.join(self.root, 'root.txt')) # When viewer.rescan() # Then self.assertEqual(p.number_of_files, 4) self.assertEqual(len(viewer.pager.data), 3) names = [x.name for x in viewer.pager.data] self.assertTrue('root.txt' not in names) class TestVixenUtils(unittest.TestCase): def test_get_html_file(self): r = os.path.abspath(get_html_file()) self.assertTrue(os.path.exists(r)) self.assertTrue(os.path.isfile(r)) def test_get_html(self): # Given/When data = get_html(get_html_file()) # Then. self.assertEqual(data.count('$HTML_ROOT'), 0) self.assertEqual(data.count('$ROOT'), 0)
nilq/baby-python
python
import yaml import os from pathlib import Path from utils.dict_wrapper import DictWrapper class EvaluationConfiguration: ''' Represents the configuration parameters for running the evaluation process ''' def __init__(self, path): ''' Initializes the configuration with contents from the specified file :param path: path to the configuration file in json format ''' # Loads configuration file with open(path) as f: config = yaml.load(f, Loader=yaml.FullLoader) self.config = DictWrapper(config) def get_config(self): return self.config def check_data_config(self, data_config): if not os.path.isdir(data_config["data_root"]): raise Exception(f"Data directory {data_config['data_root']} does not exist") def check_config(self): ''' Raises an exception if the configuration is invalid and creates auxiliary fields :return: ''' self.check_data_config(self.config["reference_data"]) self.check_data_config(self.config["generated_data"]) self.config["logging"]["output_directory"] = os.path.join(self.config["logging"]["output_root"], self.config["logging"]["run_name"]) self.config["logging"]["output_images_directory"] = os.path.join(self.config["logging"]["output_directory"], "images") self.config["logging"]["evaluation_images_directory"] = os.path.join(self.config["logging"]["output_directory"], "evaluation_images") return True def create_directory_structure(self): ''' Creates directories as required by the configuration file :return: ''' Path(self.config["logging"]["output_directory"]).mkdir(parents=True, exist_ok=True) Path(self.config["logging"]["output_images_directory"]).mkdir(parents=True, exist_ok=True) Path(self.config["logging"]["evaluation_images_directory"]).mkdir(parents=True, exist_ok=True)
nilq/baby-python
python
#!/usr/bin/env python # # Copyright 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests for the main module.""" import datetime import logging logging.basicConfig(format='%(levelname)-8s %(filename)s] %(message)s') import os import shutil import sys import time import tempfile import unittest import urllib import xml.sax import testutil testutil.fix_path() from google.appengine import runtime from google.appengine.api import memcache from google.appengine.ext import db from google.appengine.ext import webapp from google.appengine.runtime import apiproxy_errors import async_apiproxy import dos import feed_diff import main import urlfetch_test_stub import mapreduce.control import mapreduce.model ################################################################################ # For convenience sha1_hash = main.sha1_hash get_hash_key_name = main.get_hash_key_name OTHER_STRING = '/~one:two/&=' FUNNY = '/CaSeSeNsItIvE' FUNNY_UNICODE = u'/blah/\u30d6\u30ed\u30b0\u8846' FUNNY_UTF8 = '/blah/\xe3\x83\x96\xe3\x83\xad\xe3\x82\xb0\xe8\xa1\x86' FUNNY_IRI = '/blah/%E3%83%96%E3%83%AD%E3%82%B0%E8%A1%86' ################################################################################ class UtilityFunctionTest(unittest.TestCase): """Tests for utility functions.""" def setUp(self): """Sets up the test harness.""" testutil.setup_for_testing() def testSha1Hash(self): self.assertEquals('09f2c66851e75a7800748808ae7d855869b0c9d7', main.sha1_hash('this is my test data')) def testGetHashKeyName(self): self.assertEquals('hash_54f6638eb67ad389b66bbc3fa65f7392b0c2d270', get_hash_key_name('and now testing a key')) def testSha1Hmac(self): self.assertEquals('d95abcea4b2a8b0219da7cb04c261639a7bd8c94', main.sha1_hmac('secrat', 'mydatahere')) def testIsValidUrl(self): self.assertTrue(main.is_valid_url( 'https://example.com:443/path/to?handler=1&b=2')) self.assertTrue(main.is_valid_url('http://example.com:8080')) self.assertFalse(main.is_valid_url('httpm://example.com')) self.assertFalse(main.is_valid_url('http://example.com:9999')) self.assertFalse(main.is_valid_url('http://example.com/blah#bad')) def testNormalizeIri(self): uri_with_port = u'http://foo.com:9120/url/with/a/port' self.assertEquals(uri_with_port, main.normalize_iri(uri_with_port)) uri_with_query = u'http://foo.com:9120/url?doh=this&port=1' self.assertEquals(uri_with_query, main.normalize_iri(uri_with_query)) uri_with_funny = u'http://foo.com/~myuser/@url!with#nice;delimiter:chars' self.assertEquals(uri_with_funny, main.normalize_iri(uri_with_funny)) not_unicode = 'http://foo.com:9120/url/with/a/port' self.assertEquals(not_unicode, main.normalize_iri(not_unicode)) uri_with_port = u'http://foo.com:9120/url/with/a/port' self.assertEquals(uri_with_port, main.normalize_iri(uri_with_port)) good_iri = ( 'http://www.google.com/reader/public/atom/user' '/07256788297315478906/label/%E3%83%96%E3%83%AD%E3%82%B0%E8%A1%86') iri = (u'http://www.google.com/reader/public/atom/user' u'/07256788297315478906/label/\u30d6\u30ed\u30b0\u8846') self.assertEquals(good_iri, main.normalize_iri(iri)) ################################################################################ class TestWorkQueueHandler(webapp.RequestHandler): @main.work_queue_only def get(self): self.response.out.write('Pass') class WorkQueueOnlyTest(testutil.HandlerTestBase): """Tests the @work_queue_only decorator.""" handler_class = TestWorkQueueHandler def testNotLoggedIn(self): os.environ['SERVER_SOFTWARE'] = 'Production' self.handle('get') self.assertEquals(302, self.response_code()) def testCronHeader(self): os.environ['SERVER_SOFTWARE'] = 'Production' os.environ['HTTP_X_APPENGINE_CRON'] = 'True' try: self.handle('get') self.assertEquals('Pass', self.response_body()) finally: del os.environ['HTTP_X_APPENGINE_CRON'] def testDevelopmentEnvironment(self): os.environ['SERVER_SOFTWARE'] = 'Development/1.0' self.handle('get') self.assertEquals('Pass', self.response_body()) def testAdminUser(self): os.environ['SERVER_SOFTWARE'] = 'Production' os.environ['USER_EMAIL'] = 'foo@example.com' os.environ['USER_IS_ADMIN'] = '1' try: self.handle('get') self.assertEquals('Pass', self.response_body()) finally: del os.environ['USER_IS_ADMIN'] def testNonAdminUser(self): os.environ['SERVER_SOFTWARE'] = 'Production' os.environ['USER_EMAIL'] = 'foo@example.com' os.environ['USER_IS_ADMIN'] = '0' try: self.handle('get') self.assertEquals(401, self.response_code()) finally: del os.environ['USER_IS_ADMIN'] def testTaskQueueHeader(self): os.environ['SERVER_SOFTWARE'] = 'Production' os.environ['HTTP_X_APPENGINE_TASKNAME'] = 'Foobar' try: self.handle('get') self.assertEquals('Pass', self.response_body()) finally: del os.environ['HTTP_X_APPENGINE_TASKNAME'] ################################################################################ KnownFeed = main.KnownFeed class KnownFeedTest(unittest.TestCase): """Tests for the KnownFeed model class.""" def setUp(self): """Sets up the test harness.""" testutil.setup_for_testing() self.topic = 'http://example.com/my-topic' self.topic2 = 'http://example.com/my-topic2' self.topic3 = 'http://example.com/my-topic3' def testCreateAndDelete(self): known_feed = KnownFeed.create(self.topic) self.assertEquals(self.topic, known_feed.topic) db.put(known_feed) found_feed = db.get(KnownFeed.create_key(self.topic)) self.assertEquals(found_feed.key(), known_feed.key()) self.assertEquals(found_feed.topic, known_feed.topic) db.delete(KnownFeed.create_key(self.topic)) self.assertTrue(db.get(KnownFeed.create_key(self.topic)) is None) def testCheckExistsMissing(self): self.assertEquals([], KnownFeed.check_exists([])) self.assertEquals([], KnownFeed.check_exists([self.topic])) self.assertEquals([], KnownFeed.check_exists( [self.topic, self.topic2, self.topic3])) self.assertEquals([], KnownFeed.check_exists( [self.topic, self.topic, self.topic, self.topic2, self.topic2])) def testCheckExists(self): KnownFeed.create(self.topic).put() KnownFeed.create(self.topic2).put() KnownFeed.create(self.topic3).put() self.assertEquals([self.topic], KnownFeed.check_exists([self.topic])) self.assertEquals([self.topic2], KnownFeed.check_exists([self.topic2])) self.assertEquals([self.topic3], KnownFeed.check_exists([self.topic3])) self.assertEquals( sorted([self.topic, self.topic2, self.topic3]), sorted(KnownFeed.check_exists([self.topic, self.topic2, self.topic3]))) self.assertEquals( sorted([self.topic, self.topic2]), sorted(KnownFeed.check_exists( [self.topic, self.topic, self.topic, self.topic2, self.topic2]))) def testCheckExistsSubset(self): KnownFeed.create(self.topic).put() KnownFeed.create(self.topic3).put() self.assertEquals( sorted([self.topic, self.topic3]), sorted(KnownFeed.check_exists([self.topic, self.topic2, self.topic3]))) self.assertEquals( sorted([self.topic, self.topic3]), sorted(KnownFeed.check_exists( [self.topic, self.topic, self.topic, self.topic2, self.topic2, self.topic3, self.topic3]))) def testRecord(self): """Tests the method for recording a feed's identity.""" KnownFeed.record(self.topic) task = testutil.get_tasks(main.MAPPINGS_QUEUE, index=0, expected_count=1) self.assertEquals(self.topic, task['params']['topic']) ################################################################################ KnownFeedIdentity = main.KnownFeedIdentity class KnownFeedIdentityTest(unittest.TestCase): """Tests for the KnownFeedIdentity class.""" def setUp(self): testutil.setup_for_testing() self.feed_id = 'my;feed;id' self.feed_id2 = 'my;feed;id;2' self.topic = 'http://example.com/foobar1' self.topic2 = 'http://example.com/meep2' self.topic3 = 'http://example.com/stuff3' self.topic4 = 'http://example.com/blah4' self.topic5 = 'http://example.com/woot5' self.topic6 = 'http://example.com/neehaw6' def testUpdate(self): """Tests the update method.""" feed = KnownFeedIdentity.update(self.feed_id, self.topic) feed_key = KnownFeedIdentity.create_key(self.feed_id) self.assertEquals(feed_key, feed.key()) self.assertEquals(self.feed_id, feed.feed_id) self.assertEquals([self.topic], feed.topics) feed = KnownFeedIdentity.update(self.feed_id, self.topic2) self.assertEquals(self.feed_id, feed.feed_id) self.assertEquals([self.topic, self.topic2], feed.topics) def testRemove(self): """Tests the remove method.""" # Removing a mapping from an unknown ID does nothing. self.assertTrue(KnownFeedIdentity.remove(self.feed_id, self.topic) is None) KnownFeedIdentity.update(self.feed_id, self.topic) KnownFeedIdentity.update(self.feed_id, self.topic2) # Removing an unknown mapping for a known ID does nothing. self.assertTrue(KnownFeedIdentity.remove(self.feed_id, self.topic3) is None) # Removing from a known ID returns the updated copy. feed = KnownFeedIdentity.remove(self.feed_id, self.topic2) self.assertEquals([self.topic], feed.topics) # Removing a second time does nothing. self.assertTrue(KnownFeedIdentity.remove(self.feed_id, self.topic2) is None) feed = KnownFeedIdentity.get(KnownFeedIdentity.create_key(self.feed_id)) self.assertEquals([self.topic], feed.topics) # Removing the last one will delete the mapping completely. self.assertTrue(KnownFeedIdentity.remove(self.feed_id, self.topic) is None) feed = KnownFeedIdentity.get(KnownFeedIdentity.create_key(self.feed_id)) self.assertTrue(feed is None) def testDeriveAdditionalTopics(self): """Tests the derive_additional_topics method.""" # topic, topic2 -> feed_id for topic in (self.topic, self.topic2): feed = KnownFeed.create(topic) feed.feed_id = self.feed_id feed.put() KnownFeedIdentity.update(self.feed_id, self.topic) KnownFeedIdentity.update(self.feed_id, self.topic2) # topic3, topic4 -> feed_id2 for topic in (self.topic3, self.topic4): feed = KnownFeed.create(topic) feed.feed_id = self.feed_id2 feed.put() KnownFeedIdentity.update(self.feed_id2, self.topic3) KnownFeedIdentity.update(self.feed_id2, self.topic4) # topic5 -> KnownFeed missing; should not be expanded at all # topic6 -> KnownFeed where feed_id = None; default to simple mapping KnownFeed.create(self.topic6).put() # Put missing topics first to provoke potential ordering errors in the # iteration order of the retrieval loop. result = KnownFeedIdentity.derive_additional_topics([ self.topic5, self.topic6, self.topic, self.topic2, self.topic3, self.topic4]) expected = { 'http://example.com/foobar1': set(['http://example.com/foobar1', 'http://example.com/meep2']), 'http://example.com/meep2': set(['http://example.com/foobar1', 'http://example.com/meep2']), 'http://example.com/blah4': set(['http://example.com/blah4', 'http://example.com/stuff3']), 'http://example.com/neehaw6': set(['http://example.com/neehaw6']), 'http://example.com/stuff3': set(['http://example.com/blah4', 'http://example.com/stuff3']) } self.assertEquals(expected, result) def testDeriveAdditionalTopicsWhitespace(self): """Tests when the feed ID contains whitespace it is handled correctly. This test is only required because the 'feed_identifier' module did not properly strip whitespace in its initial version. """ # topic -> feed_id with whitespace feed = KnownFeed.create(self.topic) feed.feed_id = self.feed_id feed.put() KnownFeedIdentity.update(self.feed_id, self.topic) # topic2 -> feed_id without whitespace feed = KnownFeed.create(self.topic2) feed.feed_id = '\n %s \n \n' % self.feed_id feed.put() KnownFeedIdentity.update(self.feed_id, self.topic2) # topic3 -> KnownFeed where feed_id = all whitespace feed = KnownFeed.create(self.topic3) feed.feed_id = '\n \n \n' feed.put() result = KnownFeedIdentity.derive_additional_topics([ self.topic, self.topic2, self.topic3]) expected = { 'http://example.com/foobar1': set(['http://example.com/foobar1', 'http://example.com/meep2']), 'http://example.com/stuff3': set(['http://example.com/stuff3']), } self.assertEquals(expected, result) def testKnownFeedIdentityTooLarge(self): """Tests when the fan-out expansion of the KnownFeedIdentity is too big.""" feed = KnownFeedIdentity.update(self.feed_id, self.topic) KnownFeedIdentity.update( self.feed_id, 'http://super-extra-long-topic/' + ('a' * 10000000)) # Doesn't explode and the update time stays the same. new_feed = db.get(feed.key()) self.assertEquals(feed.last_update, new_feed.last_update) ################################################################################ Subscription = main.Subscription class SubscriptionTest(unittest.TestCase): """Tests for the Subscription model class.""" def setUp(self): """Sets up the test harness.""" testutil.setup_for_testing() self.callback = 'http://example.com/my-callback-url' self.callback2 = 'http://example.com/second-callback-url' self.callback3 = 'http://example.com/third-callback-url' self.topic = 'http://example.com/my-topic-url' self.topic2 = 'http://example.com/second-topic-url' self.token = 'token' self.secret = 'my secrat' self.callback_key_map = dict( (Subscription.create_key_name(cb, self.topic), cb) for cb in (self.callback, self.callback2, self.callback3)) def get_subscription(self): """Returns the subscription for the test callback and topic.""" return Subscription.get_by_key_name( Subscription.create_key_name(self.callback, self.topic)) def verify_tasks(self, next_state, verify_token, secret, **kwargs): """Verifies the required tasks have been submitted. Args: next_state: The next state the Subscription should have. verify_token: The token that should be used to confirm the subscription action. **kwargs: Passed to testutil.get_tasks(). """ task = testutil.get_tasks(main.SUBSCRIPTION_QUEUE, **kwargs) self.assertEquals(next_state, task['params']['next_state']) self.assertEquals(verify_token, task['params']['verify_token']) self.assertEquals(secret, task['params']['secret']) def testRequestInsert_defaults(self): now_datetime = datetime.datetime.now() now = lambda: now_datetime lease_seconds = 1234 self.assertTrue(Subscription.request_insert( self.callback, self.topic, self.token, self.secret, lease_seconds=lease_seconds, now=now)) self.verify_tasks(Subscription.STATE_VERIFIED, self.token, self.secret, expected_count=1, index=0) self.assertFalse(Subscription.request_insert( self.callback, self.topic, self.token, self.secret, lease_seconds=lease_seconds, now=now)) self.verify_tasks(Subscription.STATE_VERIFIED, self.token, self.secret, expected_count=2, index=1) sub = self.get_subscription() self.assertEquals(Subscription.STATE_NOT_VERIFIED, sub.subscription_state) self.assertEquals(self.callback, sub.callback) self.assertEquals(sha1_hash(self.callback), sub.callback_hash) self.assertEquals(self.topic, sub.topic) self.assertEquals(sha1_hash(self.topic), sub.topic_hash) self.assertEquals(self.token, sub.verify_token) self.assertEquals(self.secret, sub.secret) self.assertEquals(0, sub.confirm_failures) self.assertEquals(now_datetime + datetime.timedelta(seconds=lease_seconds), sub.expiration_time) self.assertEquals(lease_seconds, sub.lease_seconds) def testInsert_defaults(self): now_datetime = datetime.datetime.now() now = lambda: now_datetime lease_seconds = 1234 self.assertTrue(Subscription.insert( self.callback, self.topic, self.token, self.secret, lease_seconds=lease_seconds, now=now)) self.assertFalse(Subscription.insert( self.callback, self.topic, self.token, self.secret, lease_seconds=lease_seconds, now=now)) testutil.get_tasks(main.SUBSCRIPTION_QUEUE, expected_count=0) sub = self.get_subscription() self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.assertEquals(self.callback, sub.callback) self.assertEquals(sha1_hash(self.callback), sub.callback_hash) self.assertEquals(self.topic, sub.topic) self.assertEquals(sha1_hash(self.topic), sub.topic_hash) self.assertEquals(self.token, sub.verify_token) self.assertEquals(self.secret, sub.secret) self.assertEquals(0, sub.confirm_failures) self.assertEquals(now_datetime + datetime.timedelta(seconds=lease_seconds), sub.expiration_time) self.assertEquals(lease_seconds, sub.lease_seconds) def testInsertOverride(self): """Tests that insert will override the existing Subscription fields.""" self.assertTrue(Subscription.request_insert( self.callback, self.topic, self.token, self.secret)) self.assertEquals(Subscription.STATE_NOT_VERIFIED, self.get_subscription().subscription_state) second_token = 'second token' second_secret = 'second secret' sub = self.get_subscription() sub.confirm_failures = 123 sub.put() self.assertFalse(Subscription.insert( self.callback, self.topic, second_token, second_secret)) sub = self.get_subscription() self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.assertEquals(0, sub.confirm_failures) self.assertEquals(second_token, sub.verify_token) self.assertEquals(second_secret, sub.secret) self.verify_tasks(Subscription.STATE_VERIFIED, self.token, self.secret, expected_count=1, index=0) def testInsert_expiration(self): """Tests that the expiration time is updated on repeated insert() calls.""" self.assertTrue(Subscription.insert( self.callback, self.topic, self.token, self.secret)) sub = Subscription.all().get() expiration1 = sub.expiration_time time.sleep(0.5) self.assertFalse(Subscription.insert( self.callback, self.topic, self.token, self.secret)) sub = db.get(sub.key()) expiration2 = sub.expiration_time self.assertTrue(expiration2 > expiration1) def testRemove(self): self.assertFalse(Subscription.remove(self.callback, self.topic)) self.assertTrue(Subscription.request_insert( self.callback, self.topic, self.token, self.secret)) self.assertTrue(Subscription.remove(self.callback, self.topic)) self.assertFalse(Subscription.remove(self.callback, self.topic)) # Only task should be the initial insertion request. self.verify_tasks(Subscription.STATE_VERIFIED, self.token, self.secret, expected_count=1, index=0) def testRequestRemove(self): """Tests the request remove method.""" self.assertFalse(Subscription.request_remove( self.callback, self.topic, self.token)) # No tasks should be enqueued and this request should do nothing because # no subscription currently exists. testutil.get_tasks(main.SUBSCRIPTION_QUEUE, expected_count=0) self.assertTrue(Subscription.request_insert( self.callback, self.topic, self.token, self.secret)) second_token = 'this is the second token' self.assertTrue(Subscription.request_remove( self.callback, self.topic, second_token)) sub = self.get_subscription() self.assertEquals(self.token, sub.verify_token) self.assertEquals(Subscription.STATE_NOT_VERIFIED, sub.subscription_state) self.verify_tasks(Subscription.STATE_VERIFIED, self.token, self.secret, expected_count=2, index=0) self.verify_tasks(Subscription.STATE_TO_DELETE, second_token, '', expected_count=2, index=1) def testRequestInsertOverride(self): """Tests that requesting insertion does not override the verify_token.""" self.assertTrue(Subscription.insert( self.callback, self.topic, self.token, self.secret)) second_token = 'this is the second token' second_secret = 'another secret here' self.assertFalse(Subscription.request_insert( self.callback, self.topic, second_token, second_secret)) sub = self.get_subscription() self.assertEquals(self.token, sub.verify_token) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_tasks(Subscription.STATE_VERIFIED, second_token, second_secret, expected_count=1, index=0) def testHasSubscribers_unverified(self): """Tests that unverified subscribers do not make the subscription active.""" self.assertFalse(Subscription.has_subscribers(self.topic)) self.assertTrue(Subscription.request_insert( self.callback, self.topic, self.token, self.secret)) self.assertFalse(Subscription.has_subscribers(self.topic)) def testHasSubscribers_verified(self): self.assertTrue(Subscription.insert( self.callback, self.topic, self.token, self.secret)) self.assertTrue(Subscription.has_subscribers(self.topic)) self.assertTrue(Subscription.remove(self.callback, self.topic)) self.assertFalse(Subscription.has_subscribers(self.topic)) def testGetSubscribers_unverified(self): """Tests that unverified subscribers will not be retrieved.""" self.assertEquals([], Subscription.get_subscribers(self.topic, 10)) self.assertTrue(Subscription.request_insert( self.callback, self.topic, self.token, self.secret)) self.assertTrue(Subscription.request_insert( self.callback2, self.topic, self.token, self.secret)) self.assertTrue(Subscription.request_insert( self.callback3, self.topic, self.token, self.secret)) self.assertEquals([], Subscription.get_subscribers(self.topic, 10)) def testGetSubscribers_verified(self): self.assertEquals([], Subscription.get_subscribers(self.topic, 10)) self.assertTrue(Subscription.insert( self.callback, self.topic, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback2, self.topic, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback3, self.topic, self.token, self.secret)) sub_list = Subscription.get_subscribers(self.topic, 10) found_keys = set(s.key().name() for s in sub_list) self.assertEquals(set(self.callback_key_map.keys()), found_keys) def testGetSubscribers_count(self): self.assertTrue(Subscription.insert( self.callback, self.topic, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback2, self.topic, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback3, self.topic, self.token, self.secret)) sub_list = Subscription.get_subscribers(self.topic, 1) self.assertEquals(1, len(sub_list)) def testGetSubscribers_withOffset(self): """Tests the behavior of the starting_at_callback offset parameter.""" # In the order the query will sort them. all_hashes = [ u'87a74994e48399251782eb401e9a61bd1d55aeee', u'01518f29da9db10888a92e9f0211ac0c98ec7ecb', u'f745d00a9806a5cdd39f16cd9eff80e8f064cfee', ] all_keys = ['hash_' + h for h in all_hashes] all_callbacks = [self.callback_key_map[k] for k in all_keys] self.assertTrue(Subscription.insert( self.callback, self.topic, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback2, self.topic, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback3, self.topic, self.token, self.secret)) def key_list(starting_at_callback): sub_list = Subscription.get_subscribers( self.topic, 10, starting_at_callback=starting_at_callback) return [s.key().name() for s in sub_list] self.assertEquals(all_keys, key_list(None)) self.assertEquals(all_keys, key_list(all_callbacks[0])) self.assertEquals(all_keys[1:], key_list(all_callbacks[1])) self.assertEquals(all_keys[2:], key_list(all_callbacks[2])) def testGetSubscribers_multipleTopics(self): """Tests that separate topics do not overlap in subscriber queries.""" self.assertEquals([], Subscription.get_subscribers(self.topic2, 10)) self.assertTrue(Subscription.insert( self.callback, self.topic, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback2, self.topic, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback3, self.topic, self.token, self.secret)) self.assertEquals([], Subscription.get_subscribers(self.topic2, 10)) self.assertTrue(Subscription.insert( self.callback2, self.topic2, self.token, self.secret)) self.assertTrue(Subscription.insert( self.callback3, self.topic2, self.token, self.secret)) sub_list = Subscription.get_subscribers(self.topic2, 10) found_keys = set(s.key().name() for s in sub_list) self.assertEquals( set(Subscription.create_key_name(cb, self.topic2) for cb in (self.callback2, self.callback3)), found_keys) self.assertEquals(3, len(Subscription.get_subscribers(self.topic, 10))) def testConfirmFailed(self): """Tests retry delay periods when a subscription confirmation fails.""" start = datetime.datetime.utcnow() def now(): return start sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.request_insert( self.callback, self.topic, self.token, self.secret)) sub_key = Subscription.create_key_name(self.callback, self.topic) sub = Subscription.get_by_key_name(sub_key) self.assertEquals(0, sub.confirm_failures) for i, delay in enumerate((5, 10, 20, 40, 80)): self.assertTrue( sub.confirm_failed(Subscription.STATE_VERIFIED, self.token, False, max_failures=5, retry_period=5, now=now)) self.assertEquals(sub.eta, start + datetime.timedelta(seconds=delay)) self.assertEquals(i+1, sub.confirm_failures) # It will give up on the last try. self.assertFalse( sub.confirm_failed(Subscription.STATE_VERIFIED, self.token, False, max_failures=5, retry_period=5)) sub = Subscription.get_by_key_name(sub_key) self.assertEquals(Subscription.STATE_NOT_VERIFIED, sub.subscription_state) testutil.get_tasks(main.SUBSCRIPTION_QUEUE, index=0, expected_count=6) def testQueueSelected(self): """Tests that auto_reconfirm will put the task on the polling queue.""" self.assertTrue(Subscription.request_insert( self.callback, self.topic, self.token, self.secret, auto_reconfirm=True)) testutil.get_tasks(main.SUBSCRIPTION_QUEUE, expected_count=0) testutil.get_tasks(main.POLLING_QUEUE, expected_count=1) self.assertFalse(Subscription.request_insert( self.callback, self.topic, self.token, self.secret, auto_reconfirm=False)) testutil.get_tasks(main.SUBSCRIPTION_QUEUE, expected_count=1) testutil.get_tasks(main.POLLING_QUEUE, expected_count=1) def testArchiveExists(self): """Tests the archive method when the subscription exists.""" Subscription.insert(self.callback, self.topic, self.token, self.secret) sub_key = Subscription.create_key_name(self.callback, self.topic) sub = Subscription.get_by_key_name(sub_key) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) Subscription.archive(self.callback, self.topic) sub = Subscription.get_by_key_name(sub_key) self.assertEquals(Subscription.STATE_TO_DELETE, sub.subscription_state) def testArchiveMissing(self): """Tests the archive method when the subscription does not exist.""" sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) Subscription.archive(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) ################################################################################ FeedToFetch = main.FeedToFetch class FeedToFetchTest(unittest.TestCase): def setUp(self): """Sets up the test harness.""" testutil.setup_for_testing() self.topic = 'http://example.com/topic-one' self.topic2 = 'http://example.com/topic-two' self.topic3 = 'http://example.com/topic-three' def testInsertAndGet(self): """Tests inserting and getting work.""" all_topics = [self.topic, self.topic2, self.topic3] found_feeds = FeedToFetch.insert(all_topics) task = testutil.get_tasks(main.FEED_QUEUE, index=0, expected_count=1) self.assertTrue(task['name'].endswith('%d-0' % found_feeds[0].work_index)) for topic, feed_to_fetch in zip(all_topics, found_feeds): self.assertEquals(topic, feed_to_fetch.topic) self.assertEquals([], feed_to_fetch.source_keys) self.assertEquals([], feed_to_fetch.source_values) self.assertEquals(found_feeds[0].work_index, feed_to_fetch.work_index) def testEmpty(self): """Tests when the list of urls is empty.""" FeedToFetch.insert([]) self.assertEquals([], testutil.get_tasks(main.FEED_QUEUE)) def testDuplicates(self): """Tests duplicate urls.""" all_topics = [self.topic, self.topic, self.topic2, self.topic2] found_feeds = FeedToFetch.insert(all_topics) found_topics = set(t.topic for t in found_feeds) self.assertEquals(set(all_topics), found_topics) task = testutil.get_tasks(main.FEED_QUEUE, index=0, expected_count=1) self.assertTrue(task['name'].endswith('%d-0' % found_feeds[0].work_index)) def testDone(self): """Tests marking the feed as completed.""" (feed,) = FeedToFetch.insert([self.topic]) self.assertFalse(feed.done()) self.assertTrue(FeedToFetch.get_by_topic(self.topic) is None) def testDoneAfterFailure(self): """Tests done() after a fetch_failed() writes the FeedToFetch to disk.""" (feed,) = FeedToFetch.insert([self.topic]) feed.fetch_failed() self.assertTrue(feed.done()) self.assertTrue(FeedToFetch.get_by_topic(self.topic) is None) def testDoneConflict(self): """Tests when another entity was written over the top of this one.""" (feed1,) = FeedToFetch.insert([self.topic]) feed1.put() (feed2,) = FeedToFetch.insert([self.topic]) feed2.put() self.assertFalse(feed1.done()) self.assertTrue(FeedToFetch.get_by_topic(self.topic) is not None) def testFetchFailed(self): """Tests when the fetch fails and should be retried.""" start = datetime.datetime.utcnow() now = lambda: start (feed,) = FeedToFetch.insert([self.topic]) etas = [] for i, delay in enumerate((5, 10, 20, 40, 80)): feed = FeedToFetch.get_by_topic(self.topic) or feed feed.fetch_failed(max_failures=5, retry_period=5, now=now) expected_eta = start + datetime.timedelta(seconds=delay) self.assertEquals(expected_eta, feed.eta) etas.append(testutil.task_eta(feed.eta)) self.assertEquals(i+1, feed.fetching_failures) self.assertEquals(False, feed.totally_failed) feed.fetch_failed(max_failures=5, retry_period=5, now=now) self.assertEquals(True, feed.totally_failed) tasks = testutil.get_tasks(main.FEED_QUEUE, expected_count=1) tasks.extend(testutil.get_tasks(main.FEED_RETRIES_QUEUE, expected_count=5)) found_etas = [t['eta'] for t in tasks[1:]] # First task is from insert() self.assertEquals(etas, found_etas) def testQueuePreserved(self): """Tests the request's polling queue is preserved for new FeedToFetch.""" FeedToFetch.insert([self.topic]) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) os.environ['HTTP_X_APPENGINE_QUEUENAME'] = main.POLLING_QUEUE try: (feed,) = FeedToFetch.insert([self.topic]) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) testutil.get_tasks(main.POLLING_QUEUE, expected_count=1) finally: del os.environ['HTTP_X_APPENGINE_QUEUENAME'] def testSources(self): """Tests when sources are supplied.""" source_dict = {'foo': 'bar', 'meepa': 'stuff'} all_topics = [self.topic, self.topic2, self.topic3] feed_list = FeedToFetch.insert(all_topics, source_dict=source_dict) for feed_to_fetch in feed_list: found_source_dict = dict(zip(feed_to_fetch.source_keys, feed_to_fetch.source_values)) self.assertEquals(source_dict, found_source_dict) ################################################################################ FeedEntryRecord = main.FeedEntryRecord EventToDeliver = main.EventToDeliver class EventToDeliverTest(unittest.TestCase): def setUp(self): """Sets up the test harness.""" testutil.setup_for_testing() self.topic = 'http://example.com/my-topic' # Order out of the datastore will be done by callback hash, not alphabetical self.callback = 'http://example.com/my-callback' self.callback2 = 'http://example.com/second-callback' self.callback3 = 'http://example.com/third-callback-123' self.callback4 = 'http://example.com/fourth-callback-1205' self.header_footer = '<feed>\n<stuff>blah</stuff>\n<xmldata/></feed>' self.token = 'verify token' self.secret = 'some secret' self.test_payloads = [ '<entry>article1</entry>', '<entry>article2</entry>', '<entry>article3</entry>', ] def insert_subscriptions(self): """Inserts Subscription instances and an EventToDeliver for testing. Returns: Tuple (event, work_key, sub_list, sub_keys) where: event: The EventToDeliver that was inserted. work_key: Key for the 'event' sub_list: List of Subscription instances that were created in order of their callback hashes. sub_keys: Key instances corresponding to the entries in 'sub_list'. """ event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() work_key = event.key() Subscription.insert( self.callback, self.topic, self.token, self.secret) Subscription.insert( self.callback2, self.topic, self.token, self.secret) Subscription.insert( self.callback3, self.topic, self.token, self.secret) Subscription.insert( self.callback4, self.topic, self.token, self.secret) sub_list = Subscription.get_subscribers(self.topic, 10) sub_keys = [s.key() for s in sub_list] self.assertEquals(4, len(sub_list)) return (event, work_key, sub_list, sub_keys) def testCreateEventForTopic(self): """Tests that the payload of an event is properly formed.""" event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) expected_data = \ u"""<?xml version="1.0" encoding="utf-8"?> <feed> <stuff>blah</stuff> <xmldata/> <entry>article1</entry> <entry>article2</entry> <entry>article3</entry> </feed>""" self.assertEquals(expected_data, event.payload) self.assertEquals('application/atom+xml', event.content_type) def testCreateEventForTopic_Rss(self): """Tests that the RSS payload is properly formed.""" self.test_payloads = [ '<item>article1</item>', '<item>article2</item>', '<item>article3</item>', ] self.header_footer = ( '<rss>\n<channel>\n<stuff>blah</stuff>\n<xmldata/></channel>\n</rss>') event = EventToDeliver.create_event_for_topic( self.topic, main.RSS, 'application/rss+xml', self.header_footer, self.test_payloads) expected_data = \ u"""<?xml version="1.0" encoding="utf-8"?> <rss> <channel> <stuff>blah</stuff> <xmldata/> <item>article1</item> <item>article2</item> <item>article3</item> </channel> </rss>""" self.assertEquals(expected_data, event.payload) self.assertEquals('application/rss+xml', event.content_type) def testCreateEventForTopic_Abitrary(self): """Tests that an arbitrary payload is properly formed.""" self.test_payloads = [] self.header_footer = 'this is my data here' event = EventToDeliver.create_event_for_topic( self.topic, main.ARBITRARY, 'my crazy content type', self.header_footer, self.test_payloads) expected_data = 'this is my data here' self.assertEquals(expected_data, event.payload) self.assertEquals('my crazy content type', event.content_type) def testCreateEvent_badHeaderFooter(self): """Tests when the header/footer data in an event is invalid.""" self.assertRaises(AssertionError, EventToDeliver.create_event_for_topic, self.topic, main.ATOM, 'content type unused', '<feed>has no end tag', self.test_payloads) def testNormal_noFailures(self): """Tests that event delivery with no failures will delete the event.""" event, work_key, sub_list, sub_keys = self.insert_subscriptions() more, subs = event.get_next_subscribers() event.update(more, []) event = EventToDeliver.get(work_key) self.assertTrue(event is None) def testUpdate_failWithNoSubscribersLeft(self): """Tests that failures are written correctly by EventToDeliver.update. This tests the common case of completing the failed callbacks list extending when there are new Subscriptions that have been found in the latest work queue query. """ event, work_key, sub_list, sub_keys = self.insert_subscriptions() # Assert that the callback offset is updated and any failed callbacks # are recorded. more, subs = event.get_next_subscribers(chunk_size=1) event.update(more, [sub_list[0]]) event = EventToDeliver.get(event.key()) self.assertEquals(EventToDeliver.NORMAL, event.delivery_mode) self.assertEquals([sub_list[0].key()], event.failed_callbacks) self.assertEquals(self.callback2, event.last_callback) more, subs = event.get_next_subscribers(chunk_size=3) event.update(more, sub_list[1:]) event = EventToDeliver.get(event.key()) self.assertTrue(event is not None) self.assertEquals(EventToDeliver.RETRY, event.delivery_mode) self.assertEquals('', event.last_callback) self.assertEquals([s.key() for s in sub_list], event.failed_callbacks) tasks = testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) tasks.extend(testutil.get_tasks(main.EVENT_RETRIES_QUEUE, expected_count=1)) self.assertEquals([str(work_key)] * 2, [t['params']['event_key'] for t in tasks]) def testUpdate_actuallyNoMoreCallbacks(self): """Tests when the normal update delivery has no Subscriptions left. This tests the case where update is called with no Subscribers in the list of Subscriptions. This can happen if a Subscription is deleted between when an update happens and when the work queue is invoked again. """ event, work_key, sub_list, sub_keys = self.insert_subscriptions() more, subs = event.get_next_subscribers(chunk_size=3) event.update(more, subs) event = EventToDeliver.get(event.key()) self.assertEquals(self.callback4, event.last_callback) self.assertEquals(EventToDeliver.NORMAL, event.delivery_mode) # This final call to update will transition to retry properly. Subscription.remove(self.callback4, self.topic) more, subs = event.get_next_subscribers(chunk_size=1) event.update(more, []) event = EventToDeliver.get(event.key()) self.assertEquals([], subs) self.assertTrue(event is not None) self.assertEquals(EventToDeliver.RETRY, event.delivery_mode) tasks = testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) tasks.extend(testutil.get_tasks(main.EVENT_RETRIES_QUEUE, expected_count=1)) self.assertEquals([str(work_key)] * 2, [t['params']['event_key'] for t in tasks]) def testGetNextSubscribers_retriesFinallySuccessful(self): """Tests retries until all subscribers are successful.""" event, work_key, sub_list, sub_keys = self.insert_subscriptions() # Simulate that callback 2 is successful and the rest fail. more, subs = event.get_next_subscribers(chunk_size=2) event.update(more, sub_list[:1]) event = EventToDeliver.get(event.key()) self.assertTrue(more) self.assertEquals(self.callback3, event.last_callback) self.assertEquals(EventToDeliver.NORMAL, event.delivery_mode) more, subs = event.get_next_subscribers(chunk_size=2) event.update(more, sub_list[2:]) event = EventToDeliver.get(event.key()) self.assertEquals('', event.last_callback) self.assertFalse(more) self.assertEquals(EventToDeliver.RETRY, event.delivery_mode) # Now getting the next subscribers will returned the failed ones. more, subs = event.get_next_subscribers(chunk_size=2) expected = sub_keys[:1] + sub_keys[2:3] self.assertEquals(expected, [s.key() for s in subs]) event.update(more, subs) event = EventToDeliver.get(event.key()) self.assertTrue(more) self.assertEquals(self.callback, event.last_callback) self.assertEquals(EventToDeliver.RETRY, event.delivery_mode) # This will get the last of the failed subscribers but *not* include the # sentinel value of event.last_callback, since that marks the end of this # attempt. more, subs = event.get_next_subscribers(chunk_size=2) expected = sub_keys[3:] self.assertEquals(expected, [s.key() for s in subs]) event.update(more, subs) event = EventToDeliver.get(event.key()) self.assertFalse(more) self.assertEquals('', event.last_callback) self.assertEquals(EventToDeliver.RETRY, event.delivery_mode) self.assertEquals(sub_keys[:1] + sub_keys[2:], event.failed_callbacks) # Now simulate all retries being successful one chunk at a time. more, subs = event.get_next_subscribers(chunk_size=2) expected = sub_keys[:1] + sub_keys[2:3] self.assertEquals(expected, [s.key() for s in subs]) event.update(more, []) event = EventToDeliver.get(event.key()) self.assertTrue(more) self.assertEquals(self.callback, event.last_callback) self.assertEquals(EventToDeliver.RETRY, event.delivery_mode) self.assertEquals(sub_keys[3:], event.failed_callbacks) more, subs = event.get_next_subscribers(chunk_size=2) expected = sub_keys[3:] self.assertEquals(expected, [s.key() for s in subs]) event.update(more, []) self.assertFalse(more) tasks = testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) tasks.extend(testutil.get_tasks(main.EVENT_RETRIES_QUEUE, expected_count=4)) self.assertEquals([str(work_key)] * 5, [t['params']['event_key'] for t in tasks]) def testGetNextSubscribers_failedFewerThanChunkSize(self): """Tests when there are fewer failed callbacks than the chunk size. Ensures that we step through retry attempts when there is only a single chunk to go through on each retry iteration. """ event, work_key, sub_list, sub_keys = self.insert_subscriptions() # Simulate that callback 2 is successful and the rest fail. more, subs = event.get_next_subscribers(chunk_size=2) event.update(more, sub_list[:1]) event = EventToDeliver.get(event.key()) self.assertTrue(more) self.assertEquals(self.callback3, event.last_callback) self.assertEquals(EventToDeliver.NORMAL, event.delivery_mode) more, subs = event.get_next_subscribers(chunk_size=2) event.update(more, sub_list[2:]) event = EventToDeliver.get(event.key()) self.assertEquals('', event.last_callback) self.assertFalse(more) self.assertEquals(EventToDeliver.RETRY, event.delivery_mode) self.assertEquals(1, event.retry_attempts) # Now attempt a retry with a chunk size equal to the number of callbacks. more, subs = event.get_next_subscribers(chunk_size=3) event.update(more, subs) event = EventToDeliver.get(event.key()) self.assertFalse(more) self.assertEquals(EventToDeliver.RETRY, event.delivery_mode) self.assertEquals(2, event.retry_attempts) tasks = testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) tasks.extend(testutil.get_tasks(main.EVENT_RETRIES_QUEUE, expected_count=2)) self.assertEquals([str(work_key)] * 3, [t['params']['event_key'] for t in tasks]) def testGetNextSubscribers_giveUp(self): """Tests retry delay amounts until we finally give up on event delivery. Verifies retry delay logic works properly. """ event, work_key, sub_list, sub_keys = self.insert_subscriptions() start = datetime.datetime.utcnow() now = lambda: start etas = [] for i, delay in enumerate((5, 10, 20, 40, 80, 160, 320, 640)): more, subs = event.get_next_subscribers(chunk_size=4) event.update(more, subs, retry_period=5, now=now, max_failures=8) event = EventToDeliver.get(event.key()) self.assertEquals(i+1, event.retry_attempts) expected_eta = start + datetime.timedelta(seconds=delay) self.assertEquals(expected_eta, event.last_modified) etas.append(testutil.task_eta(event.last_modified)) self.assertFalse(event.totally_failed) more, subs = event.get_next_subscribers(chunk_size=4) event.update(more, subs) event = EventToDeliver.get(event.key()) self.assertTrue(event.totally_failed) tasks = testutil.get_tasks(main.EVENT_RETRIES_QUEUE, expected_count=8) found_etas = [t['eta'] for t in tasks] self.assertEquals(etas, found_etas) def testQueuePreserved(self): """Tests that enqueueing an EventToDeliver preserves the polling queue.""" event, work_key, sub_list, sub_keys = self.insert_subscriptions() def txn(): event.enqueue() db.run_in_transaction(txn) testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) os.environ['HTTP_X_APPENGINE_QUEUENAME'] = main.POLLING_QUEUE try: db.run_in_transaction(txn) finally: del os.environ['HTTP_X_APPENGINE_QUEUENAME'] testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) testutil.get_tasks(main.POLLING_QUEUE, expected_count=1) def testMaxFailuresOverride(self): """Tests the max_failures override value.""" event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) self.assertEquals(None, event.max_failures) event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads, max_failures=1) self.assertEquals(1, event.max_failures) Subscription.insert( self.callback, self.topic, self.token, self.secret) subscription_list = list(Subscription.all()) event.put() event.update(False, subscription_list) event2 = db.get(event.key()) self.assertFalse(event2.totally_failed) event2.update(False, []) event3 = db.get(event.key()) self.assertTrue(event3.totally_failed) ################################################################################ class PublishHandlerTest(testutil.HandlerTestBase): handler_class = main.PublishHandler def setUp(self): testutil.HandlerTestBase.setUp(self) self.topic = 'http://example.com/first-url' self.topic2 = 'http://example.com/second-url' self.topic3 = 'http://example.com/third-url' def get_feeds_to_fetch(self): """Gets the enqueued FeedToFetch records.""" return FeedToFetch.FORK_JOIN_QUEUE.pop( testutil.get_tasks(main.FEED_QUEUE, index=0, expected_count=1)['name']) def testDebugFormRenders(self): self.handle('get') self.assertTrue('<html>' in self.response_body()) def testBadMode(self): self.handle('post', ('hub.mode', 'invalid'), ('hub.url', 'http://example.com')) self.assertEquals(400, self.response_code()) self.assertTrue('hub.mode' in self.response_body()) def testNoUrls(self): self.handle('post', ('hub.mode', 'publish')) self.assertEquals(400, self.response_code()) self.assertTrue('hub.url' in self.response_body()) def testBadUrls(self): self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', 'http://example.com/bad_url#fragment')) self.assertEquals(400, self.response_code()) self.assertTrue('hub.url invalid' in self.response_body()) def testInsertion(self): db.put([KnownFeed.create(self.topic), KnownFeed.create(self.topic2), KnownFeed.create(self.topic3)]) self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', self.topic), ('hub.url', self.topic2), ('hub.url', self.topic3)) self.assertEquals(204, self.response_code()) expected_topics = set([self.topic, self.topic2, self.topic3]) feed_list = self.get_feeds_to_fetch() inserted_topics = set(f.topic for f in feed_list) self.assertEquals(expected_topics, inserted_topics) def testIgnoreUnknownFeed(self): self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', self.topic), ('hub.url', self.topic2), ('hub.url', self.topic3)) self.assertEquals(204, self.response_code()) testutil.get_tasks(main.FEED_QUEUE, expected_count=0) def testDuplicateUrls(self): db.put([KnownFeed.create(self.topic), KnownFeed.create(self.topic2)]) self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', self.topic), ('hub.url', self.topic), ('hub.url', self.topic), ('hub.url', self.topic), ('hub.url', self.topic), ('hub.url', self.topic), ('hub.url', self.topic), ('hub.url', self.topic2), ('hub.url', self.topic2), ('hub.url', self.topic2), ('hub.url', self.topic2), ('hub.url', self.topic2), ('hub.url', self.topic2), ('hub.url', self.topic2)) self.assertEquals(204, self.response_code()) expected_topics = set([self.topic, self.topic2]) inserted_topics = set(f.topic for f in self.get_feeds_to_fetch()) self.assertEquals(expected_topics, inserted_topics) def testInsertFailure(self): """Tests when a publish event fails insertion.""" old_insert = FeedToFetch.insert try: for exception in (db.Error(), apiproxy_errors.Error(), runtime.DeadlineExceededError()): @classmethod def new_insert(cls, *args): raise exception FeedToFetch.insert = new_insert self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', 'http://example.com/first-url'), ('hub.url', 'http://example.com/second-url'), ('hub.url', 'http://example.com/third-url')) self.assertEquals(503, self.response_code()) finally: FeedToFetch.insert = old_insert def testCaseSensitive(self): """Tests that cases for topics URLs are preserved.""" self.topic += FUNNY self.topic2 += FUNNY self.topic3 += FUNNY db.put([KnownFeed.create(self.topic), KnownFeed.create(self.topic2), KnownFeed.create(self.topic3)]) self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', self.topic), ('hub.url', self.topic2), ('hub.url', self.topic3)) self.assertEquals(204, self.response_code()) expected_topics = set([self.topic, self.topic2, self.topic3]) inserted_topics = set(f.topic for f in self.get_feeds_to_fetch()) self.assertEquals(expected_topics, inserted_topics) def testNormalization(self): """Tests that URLs are properly normalized.""" self.topic += OTHER_STRING self.topic2 += OTHER_STRING self.topic3 += OTHER_STRING normalized = [ main.normalize_iri(t) for t in [self.topic, self.topic2, self.topic3]] db.put([KnownFeed.create(t) for t in normalized]) self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', self.topic), ('hub.url', self.topic2), ('hub.url', self.topic3)) self.assertEquals(204, self.response_code()) inserted_topics = set(f.topic for f in self.get_feeds_to_fetch()) self.assertEquals(set(normalized), inserted_topics) def testIri(self): """Tests publishing with an IRI with international characters.""" topic = main.normalize_iri(self.topic + FUNNY_UNICODE) topic2 = main.normalize_iri(self.topic2 + FUNNY_UNICODE) topic3 = main.normalize_iri(self.topic3 + FUNNY_UNICODE) normalized = [topic, topic2, topic3] db.put([KnownFeed.create(t) for t in normalized]) self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', self.topic + FUNNY_UTF8), ('hub.url', self.topic2 + FUNNY_UTF8), ('hub.url', self.topic3 + FUNNY_UTF8)) self.assertEquals(204, self.response_code()) inserted_topics = set(f.topic for f in self.get_feeds_to_fetch()) self.assertEquals(set(normalized), inserted_topics) def testUnicode(self): """Tests publishing with a URL that has unicode characters.""" topic = main.normalize_iri(self.topic + FUNNY_UNICODE) topic2 = main.normalize_iri(self.topic2 + FUNNY_UNICODE) topic3 = main.normalize_iri(self.topic3 + FUNNY_UNICODE) normalized = [topic, topic2, topic3] db.put([KnownFeed.create(t) for t in normalized]) payload = ( 'hub.mode=publish' '&hub.url=' + urllib.quote(self.topic) + FUNNY_UTF8 + '&hub.url=' + urllib.quote(self.topic2) + FUNNY_UTF8 + '&hub.url=' + urllib.quote(self.topic3) + FUNNY_UTF8) self.handle_body('post', payload) self.assertEquals(204, self.response_code()) inserted_topics = set(f.topic for f in self.get_feeds_to_fetch()) self.assertEquals(set(normalized), inserted_topics) def testSources(self): """Tests that derived sources are properly set on FeedToFetch instances.""" db.put([KnownFeed.create(self.topic), KnownFeed.create(self.topic2), KnownFeed.create(self.topic3)]) source_dict = {'one': 'two', 'three': 'four'} topics = [self.topic, self.topic2, self.topic3] def derive_sources(handler, urls): self.assertEquals(set(topics), set(urls)) self.assertEquals('testvalue', handler.request.get('the-real-thing')) return source_dict main.hooks.override_for_test(main.derive_sources, derive_sources) try: self.handle('post', ('hub.mode', 'PuBLisH'), ('hub.url', self.topic), ('hub.url', self.topic2), ('hub.url', self.topic3), ('the-real-thing', 'testvalue')) self.assertEquals(204, self.response_code()) for feed_to_fetch in self.get_feeds_to_fetch(): found_source_dict = dict(zip(feed_to_fetch.source_keys, feed_to_fetch.source_values)) self.assertEquals(source_dict, found_source_dict) finally: main.hooks.reset_for_test(main.derive_sources) class PublishHandlerThroughHubUrlTest(PublishHandlerTest): handler_class = main.HubHandler ################################################################################ class FindFeedUpdatesTest(unittest.TestCase): def setUp(self): """Sets up the test harness.""" testutil.setup_for_testing() self.topic = 'http://example.com/my-topic-here' self.header_footer = '<feed>this is my test header footer</feed>' self.entries_map = { 'id1': 'content1', 'id2': 'content2', 'id3': 'content3', } self.content = 'the expected response data' def my_filter(content, ignored_format): self.assertEquals(self.content, content) return self.header_footer, self.entries_map self.my_filter = my_filter def run_test(self): """Runs a test.""" header_footer, entry_list, entry_payloads = main.find_feed_updates( self.topic, main.ATOM, self.content, filter_feed=self.my_filter) self.assertEquals(self.header_footer, header_footer) return entry_list, entry_payloads @staticmethod def get_entry(entry_id, entry_list): """Finds the entry with the given ID in the list of entries.""" return [e for e in entry_list if e.id_hash == sha1_hash(entry_id)][0] def testAllNewContent(self): """Tests when al pulled feed content is new.""" entry_list, entry_payloads = self.run_test() entry_id_hash_set = set(f.id_hash for f in entry_list) self.assertEquals(set(sha1_hash(k) for k in self.entries_map.keys()), entry_id_hash_set) self.assertEquals(self.entries_map.values(), entry_payloads) def testSomeExistingEntries(self): """Tests when some entries are already known.""" FeedEntryRecord.create_entry_for_topic( self.topic, 'id1', sha1_hash('content1')).put() FeedEntryRecord.create_entry_for_topic( self.topic, 'id2', sha1_hash('content2')).put() entry_list, entry_payloads = self.run_test() entry_id_hash_set = set(f.id_hash for f in entry_list) self.assertEquals(set(sha1_hash(k) for k in ['id3']), entry_id_hash_set) self.assertEquals(['content3'], entry_payloads) def testPulledEntryNewer(self): """Tests when an entry is already known but has been updated recently.""" FeedEntryRecord.create_entry_for_topic( self.topic, 'id1', sha1_hash('content1')).put() FeedEntryRecord.create_entry_for_topic( self.topic, 'id2', sha1_hash('content2')).put() self.entries_map['id1'] = 'newcontent1' entry_list, entry_payloads = self.run_test() entry_id_hash_set = set(f.id_hash for f in entry_list) self.assertEquals(set(sha1_hash(k) for k in ['id1', 'id3']), entry_id_hash_set) # Verify the old entry would be overwritten. entry1 = self.get_entry('id1', entry_list) self.assertEquals(sha1_hash('newcontent1'), entry1.entry_content_hash) self.assertEquals(['content3', 'newcontent1'], entry_payloads) def testUnicodeContent(self): """Tests when the content contains unicode characters.""" self.entries_map['id2'] = u'\u2019 asdf' entry_list, entry_payloads = self.run_test() entry_id_hash_set = set(f.id_hash for f in entry_list) self.assertEquals(set(sha1_hash(k) for k in self.entries_map.keys()), entry_id_hash_set) def testMultipleParallelBatches(self): """Tests that retrieving FeedEntryRecords is done in multiple batches.""" old_get_feed_record = main.FeedEntryRecord.get_entries_for_topic calls = [0] @staticmethod def fake_get_record(*args, **kwargs): calls[0] += 1 return old_get_feed_record(*args, **kwargs) old_lookups = main.MAX_FEED_ENTRY_RECORD_LOOKUPS main.FeedEntryRecord.get_entries_for_topic = fake_get_record main.MAX_FEED_ENTRY_RECORD_LOOKUPS = 1 try: entry_list, entry_payloads = self.run_test() entry_id_hash_set = set(f.id_hash for f in entry_list) self.assertEquals(set(sha1_hash(k) for k in self.entries_map.keys()), entry_id_hash_set) self.assertEquals(self.entries_map.values(), entry_payloads) self.assertEquals(3, calls[0]) finally: main.MAX_FEED_ENTRY_RECORD_LOOKUPS = old_lookups main.FeedEntryRecord.get_entries_for_topic = old_get_feed_record ################################################################################ FeedRecord = main.FeedRecord KnownFeedStats = main.KnownFeedStats class PullFeedHandlerTest(testutil.HandlerTestBase): handler_class = main.PullFeedHandler def setUp(self): """Sets up the test harness.""" testutil.HandlerTestBase.setUp(self) self.topic = 'http://example.com/my-topic-here' self.header_footer = '<feed>this is my test header footer</feed>' self.all_ids = ['1', '2', '3'] self.entry_payloads = [ 'content%s' % entry_id for entry_id in self.all_ids ] self.entry_list = [ FeedEntryRecord.create_entry_for_topic( self.topic, entry_id, 'content%s' % entry_id) for entry_id in self.all_ids ] self.expected_response = 'the expected response data' self.etag = 'something unique' self.last_modified = 'some time' self.headers = { 'ETag': self.etag, 'Last-Modified': self.last_modified, 'Content-Type': 'application/atom+xml', } self.expected_exceptions = [] def my_find_updates(ignored_topic, ignored_format, content): self.assertEquals(self.expected_response, content) if self.expected_exceptions: raise self.expected_exceptions.pop(0) return self.header_footer, self.entry_list, self.entry_payloads self.old_find_feed_updates = main.find_feed_updates main.find_feed_updates = my_find_updates self.callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert( self.callback, self.topic, 'token', 'secret')) def tearDown(self): """Tears down the test harness.""" main.find_feed_updates = self.old_find_feed_updates urlfetch_test_stub.instance.verify_and_reset() def run_fetch_task(self, index=0): """Runs the currently enqueued fetch task.""" task = testutil.get_tasks(main.FEED_QUEUE, index=index) os.environ['HTTP_X_APPENGINE_TASKNAME'] = task['name'] try: self.handle('post') finally: del os.environ['HTTP_X_APPENGINE_TASKNAME'] def testNoWork(self): self.handle('post', ('topic', self.topic)) def testNewEntries_Atom(self): """Tests when new entries are found.""" FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, response_headers=self.headers) self.run_fetch_task() # Verify that all feed entry records have been written along with the # EventToDeliver and FeedRecord. feed_entries = FeedEntryRecord.get_entries_for_topic( self.topic, self.all_ids) self.assertEquals( [sha1_hash(k) for k in self.all_ids], [e.id_hash for e in feed_entries]) work = EventToDeliver.all().get() event_key = work.key() self.assertEquals(self.topic, work.topic) self.assertTrue('content1\ncontent2\ncontent3' in work.payload) work.delete() record = FeedRecord.get_or_create(self.topic) self.assertEquals(self.header_footer, record.header_footer) self.assertEquals(self.etag, record.etag) self.assertEquals(self.last_modified, record.last_modified) self.assertEquals('application/atom+xml', record.content_type) task = testutil.get_tasks(main.EVENT_QUEUE, index=0, expected_count=1) self.assertEquals(str(event_key), task['params']['event_key']) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testRssFailBack(self): """Tests when parsing as Atom fails and it uses RSS instead.""" self.expected_exceptions.append(feed_diff.Error('whoops')) self.header_footer = '<rss><channel>this is my test</channel></rss>' self.headers['Content-Type'] = 'application/xml' FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, response_headers=self.headers) self.run_fetch_task() feed_entries = FeedEntryRecord.get_entries_for_topic( self.topic, self.all_ids) self.assertEquals( [sha1_hash(k) for k in self.all_ids], [e.id_hash for e in feed_entries]) work = EventToDeliver.all().get() event_key = work.key() self.assertEquals(self.topic, work.topic) self.assertTrue('content1\ncontent2\ncontent3' in work.payload) work.delete() record = FeedRecord.get_or_create(self.topic) self.assertEquals('application/xml', record.content_type) task = testutil.get_tasks(main.EVENT_QUEUE, index=0, expected_count=1) self.assertEquals(str(event_key), task['params']['event_key']) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testAtomFailBack(self): """Tests when parsing as RSS fails and it uses Atom instead.""" self.expected_exceptions.append(feed_diff.Error('whoops')) self.headers.clear() self.headers['Content-Type'] = 'application/rss+xml' info = FeedRecord.get_or_create(self.topic) info.update(self.headers) info.put() FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, response_headers=self.headers) self.run_fetch_task() feed_entries = FeedEntryRecord.get_entries_for_topic( self.topic, self.all_ids) self.assertEquals( [sha1_hash(k) for k in self.all_ids], [e.id_hash for e in feed_entries]) work = EventToDeliver.all().get() event_key = work.key() self.assertEquals(self.topic, work.topic) self.assertTrue('content1\ncontent2\ncontent3' in work.payload) work.delete() record = FeedRecord.get_or_create(self.topic) self.assertEquals('application/rss+xml', record.content_type) task = testutil.get_tasks(main.EVENT_QUEUE, index=0, expected_count=1) self.assertEquals(str(event_key), task['params']['event_key']) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testArbitraryContent(self): """Tests when the feed cannot be parsed as Atom or RSS.""" self.entry_list = [] self.entry_payloads = [] self.header_footer = 'this is all of the content' self.expected_exceptions.append(feed_diff.Error('whoops')) self.expected_exceptions.append(feed_diff.Error('whoops')) FeedToFetch.insert([self.topic]) self.headers['content-type'] = 'My Crazy Content Type' urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, response_headers=self.headers) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(self.topic)) self.assertTrue(feed is None) self.assertEquals(0, len(list(FeedEntryRecord.all()))) work = EventToDeliver.all().get() event_key = work.key() self.assertEquals(self.topic, work.topic) self.assertEquals('this is all of the content', work.payload) work.delete() record = FeedRecord.get_or_create(self.topic) # header_footer not saved for arbitrary data self.assertEquals(None, record.header_footer) self.assertEquals(self.etag, record.etag) self.assertEquals(self.last_modified, record.last_modified) self.assertEquals('my crazy content type', record.content_type) task = testutil.get_tasks(main.EVENT_QUEUE, index=0, expected_count=1) self.assertEquals(str(event_key), task['params']['event_key']) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) testutil.get_tasks(main.FEED_RETRIES_QUEUE, expected_count=0) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testCacheHit(self): """Tests when the fetched feed matches the last cached version of it.""" info = FeedRecord.get_or_create(self.topic) info.update(self.headers) info.put() request_headers = { 'If-None-Match': self.etag, 'If-Modified-Since': self.last_modified, } FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 304, '', request_headers=request_headers, response_headers=self.headers) self.run_fetch_task() self.assertTrue(EventToDeliver.all().get() is None) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testStatsUserAgent(self): """Tests that the user agent string includes feed stats.""" info = FeedRecord.get_or_create(self.topic) info.update(self.headers) info.put() KnownFeedStats( key=KnownFeedStats.create_key(self.topic), subscriber_count=123).put() request_headers = { 'User-Agent': 'Public Hub (+http://pubsubhubbub.appspot.com; 123 subscribers)', } FeedToFetch.insert([self.topic]) self.entry_list = [] urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, request_headers=request_headers, response_headers=self.headers) self.run_fetch_task() self.assertTrue(EventToDeliver.all().get() is None) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) record = FeedRecord.get_or_create(self.topic) self.assertEquals(self.header_footer, record.header_footer) self.assertEquals(self.etag, record.etag) self.assertEquals(self.last_modified, record.last_modified) self.assertEquals('application/atom+xml', record.content_type) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testNoNewEntries(self): """Tests when there are no new entries.""" FeedToFetch.insert([self.topic]) self.entry_list = [] urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, response_headers=self.headers) self.run_fetch_task() self.assertTrue(EventToDeliver.all().get() is None) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) record = FeedRecord.get_or_create(self.topic) self.assertEquals(self.header_footer, record.header_footer) self.assertEquals(self.etag, record.etag) self.assertEquals(self.last_modified, record.last_modified) self.assertEquals('application/atom+xml', record.content_type) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testPullError(self): """Tests when URLFetch raises an exception.""" FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, urlfetch_error=True) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(self.topic)) self.assertEquals(1, feed.fetching_failures) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) task = testutil.get_tasks(main.FEED_RETRIES_QUEUE, index=0, expected_count=1) self.assertEquals(self.topic, task['params']['topic']) self.assertEquals([(0, 1)], main.FETCH_SCORER.get_scores([self.topic])) def testPullRetry(self): """Tests that the task enqueued after a failure will run properly.""" FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, urlfetch_error=True) self.run_fetch_task() # Verify the failed feed was written to the Datastore. feed = FeedToFetch.get_by_key_name(get_hash_key_name(self.topic)) self.assertEquals(1, feed.fetching_failures) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) testutil.get_tasks(main.FEED_RETRIES_QUEUE, expected_count=1) task = testutil.get_tasks(main.FEED_RETRIES_QUEUE, index=0, expected_count=1) self.assertEquals(self.topic, task['params']['topic']) self.assertEquals([(0, 1)], main.FETCH_SCORER.get_scores([self.topic])) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, urlfetch_error=True) self.handle('post', *task['params'].items()) feed = FeedToFetch.get_by_key_name(get_hash_key_name(self.topic)) self.assertEquals(2, feed.fetching_failures) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) testutil.get_tasks(main.FEED_RETRIES_QUEUE, expected_count=2) def testPullBadStatusCode(self): """Tests when the response status is bad.""" FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 500, self.expected_response) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(self.topic)) self.assertEquals(1, feed.fetching_failures) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) task = testutil.get_tasks(main.FEED_RETRIES_QUEUE, index=0, expected_count=1) self.assertEquals(self.topic, task['params']['topic']) self.assertEquals([(0, 1)], main.FETCH_SCORER.get_scores([self.topic])) def testApiProxyError(self): """Tests when the APIProxy raises an error.""" FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, apiproxy_error=True) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(self.topic)) self.assertEquals(1, feed.fetching_failures) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) task = testutil.get_tasks(main.FEED_RETRIES_QUEUE, index=0, expected_count=1) self.assertEquals(self.topic, task['params']['topic']) self.assertEquals([(0, 1)], main.FETCH_SCORER.get_scores([self.topic])) def testNoSubscribers(self): """Tests that when a feed has no subscribers we do not pull it.""" self.assertTrue(Subscription.remove(self.callback, self.topic)) db.put(KnownFeed.create(self.topic)) self.assertTrue(db.get(KnownFeed.create_key(self.topic)) is not None) self.entry_list = [] FeedToFetch.insert([self.topic]) self.run_fetch_task() # Verify that *no* feed entry records have been written. self.assertEquals([], FeedEntryRecord.get_entries_for_topic( self.topic, self.all_ids)) # And there is no EventToDeliver or tasks. testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) tasks = testutil.get_tasks(main.FEED_QUEUE, expected_count=1) # And no scoring. self.assertEquals([(0, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testRedirects(self): """Tests when redirects are encountered.""" info = FeedRecord.get_or_create(self.topic) info.update(self.headers) info.put() FeedToFetch.insert([self.topic]) real_topic = 'http://example.com/real-topic-location' self.headers['Location'] = real_topic urlfetch_test_stub.instance.expect( 'get', self.topic, 302, '', response_headers=self.headers.copy()) del self.headers['Location'] urlfetch_test_stub.instance.expect( 'get', real_topic, 200, self.expected_response, response_headers=self.headers) self.run_fetch_task() self.assertTrue(EventToDeliver.all().get() is not None) testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testTooManyRedirects(self): """Tests when too many redirects are encountered.""" info = FeedRecord.get_or_create(self.topic) info.update(self.headers) info.put() FeedToFetch.insert([self.topic]) last_topic = self.topic real_topic = 'http://example.com/real-topic-location' for i in xrange(main.MAX_REDIRECTS): next_topic = real_topic + str(i) self.headers['Location'] = next_topic urlfetch_test_stub.instance.expect( 'get', last_topic, 302, '', response_headers=self.headers.copy()) last_topic = next_topic self.run_fetch_task() self.assertTrue(EventToDeliver.all().get() is None) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) task = testutil.get_tasks(main.FEED_RETRIES_QUEUE, index=0, expected_count=1) self.assertEquals(self.topic, task['params']['topic']) self.assertEquals([(0, 1)], main.FETCH_SCORER.get_scores([self.topic])) def testRedirectToBadUrl(self): """Tests when the redirect URL is bad.""" info = FeedRecord.get_or_create(self.topic) info.update(self.headers) info.put() FeedToFetch.insert([self.topic]) real_topic = '/not/a/valid-redirect-location' self.headers['Location'] = real_topic urlfetch_test_stub.instance.expect( 'get', self.topic, 302, '', response_headers=self.headers.copy()) self.run_fetch_task() self.assertTrue(EventToDeliver.all().get() is None) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) self.assertEquals([(0, 1)], main.FETCH_SCORER.get_scores([self.topic])) def testPutSplitting(self): """Tests that put() calls for feed records are split when too large.""" # Make the content way too big. content_template = ('content' * 100 + '%s') self.all_ids = [str(i) for i in xrange(1000)] self.entry_payloads = [ (content_template % entry_id) for entry_id in self.all_ids ] self.entry_list = [ FeedEntryRecord.create_entry_for_topic( self.topic, entry_id, 'content%s' % entry_id) for entry_id in self.all_ids ] FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, response_headers=self.headers) old_max_new = main.MAX_NEW_FEED_ENTRY_RECORDS main.MAX_NEW_FEED_ENTRY_RECORDS = len(self.all_ids) + 1 try: self.run_fetch_task() finally: main.MAX_NEW_FEED_ENTRY_RECORDS = old_max_new # Verify that all feed entry records have been written along with the # EventToDeliver and FeedRecord. feed_entries = list(FeedEntryRecord.all()) self.assertEquals( set(sha1_hash(k) for k in self.all_ids), set(e.id_hash for e in feed_entries)) work = EventToDeliver.all().get() event_key = work.key() self.assertEquals(self.topic, work.topic) self.assertTrue('\n'.join(self.entry_payloads) in work.payload) work.delete() record = FeedRecord.get_or_create(self.topic) self.assertEquals(self.header_footer, record.header_footer) self.assertEquals(self.etag, record.etag) self.assertEquals(self.last_modified, record.last_modified) self.assertEquals('application/atom+xml', record.content_type) task = testutil.get_tasks(main.EVENT_QUEUE, index=0, expected_count=1) self.assertEquals(str(event_key), task['params']['event_key']) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) testutil.get_tasks(main.FEED_RETRIES_QUEUE, expected_count=0) self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testPutSplittingFails(self): """Tests when splitting put() calls still doesn't help and we give up.""" # Make the content way too big. content_template = ('content' * 150 + '%s') self.all_ids = [str(i) for i in xrange(1000)] self.entry_payloads = [ (content_template % entry_id) for entry_id in self.all_ids ] self.entry_list = [ FeedEntryRecord.create_entry_for_topic( self.topic, entry_id, 'content%s' % entry_id) for entry_id in self.all_ids ] FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, response_headers=self.headers) old_splitting_attempts = main.PUT_SPLITTING_ATTEMPTS old_max_saves = main.MAX_FEED_RECORD_SAVES old_max_new = main.MAX_NEW_FEED_ENTRY_RECORDS main.PUT_SPLITTING_ATTEMPTS = 1 main.MAX_FEED_RECORD_SAVES = len(self.entry_list) + 1 main.MAX_NEW_FEED_ENTRY_RECORDS = main.MAX_FEED_RECORD_SAVES try: self.run_fetch_task() finally: main.PUT_SPLITTING_ATTEMPTS = old_splitting_attempts main.MAX_FEED_RECORD_SAVES = old_max_saves main.MAX_NEW_FEED_ENTRY_RECORDS = old_max_new # Verify that *NO* FeedEntryRecords or EventToDeliver has been written, # the FeedRecord wasn't updated, and no tasks were enqueued. self.assertEquals([], list(FeedEntryRecord.all())) self.assertEquals(None, EventToDeliver.all().get()) record = FeedRecord.all().get() self.assertEquals(None, record) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) # Put splitting failure does not count against the feed. self.assertEquals([(1, 0)], main.FETCH_SCORER.get_scores([self.topic])) def testFeedTooLarge(self): """Tests when the pulled feed's content size is too large.""" FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, '', response_headers=self.headers, urlfetch_size_error=True) self.run_fetch_task() self.assertEquals([], list(FeedEntryRecord.all())) self.assertEquals(None, EventToDeliver.all().get()) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) self.assertEquals([(0, 1)], main.FETCH_SCORER.get_scores([self.topic])) def testTooManyNewEntries(self): """Tests when there are more new entries than we can handle at once.""" self.all_ids = [str(i) for i in xrange(1000)] self.entry_payloads = [ 'content%s' % entry_id for entry_id in self.all_ids ] self.entry_list = [ FeedEntryRecord.create_entry_for_topic( self.topic, entry_id, 'content%s' % entry_id) for entry_id in self.all_ids ] FeedToFetch.insert([self.topic]) urlfetch_test_stub.instance.expect( 'get', self.topic, 200, self.expected_response, response_headers=self.headers) self.run_fetch_task() # Verify that a subset of the entry records are present and the payload # only has the first N entries. feed_entries = FeedEntryRecord.get_entries_for_topic( self.topic, self.all_ids) expected_records = main.MAX_NEW_FEED_ENTRY_RECORDS self.assertEquals( [sha1_hash(k) for k in self.all_ids[:expected_records]], [e.id_hash for e in feed_entries]) work = EventToDeliver.all().get() event_key = work.key() self.assertEquals(self.topic, work.topic) expected_content = '\n'.join(self.entry_payloads[:expected_records]) self.assertTrue(expected_content in work.payload) self.assertFalse('content%d' % expected_records in work.payload) work.delete() record = FeedRecord.all().get() self.assertNotEquals(self.etag, record.etag) task = testutil.get_tasks(main.EVENT_QUEUE, index=0, expected_count=1) self.assertEquals(str(event_key), task['params']['event_key']) testutil.get_tasks(main.FEED_QUEUE, expected_count=1) task = testutil.get_tasks(main.FEED_RETRIES_QUEUE, index=0, expected_count=1) self.assertEquals(self.topic, task['params']['topic']) self.assertEquals([(0, 1)], main.FETCH_SCORER.get_scores([self.topic])) def testNotAllowed(self): """Tests when the URL fetch is blocked due to URL scoring.""" dos.DISABLE_FOR_TESTING = False try: main.FETCH_SCORER.blackhole([self.topic]) start_scores = main.FETCH_SCORER.get_scores([self.topic]) info = FeedRecord.get_or_create(self.topic) info.update(self.headers) info.put() FeedToFetch.insert([self.topic]) self.run_fetch_task() # Verify that *no* feed entry records have been written. self.assertEquals([], FeedEntryRecord.get_entries_for_topic( self.topic, self.all_ids)) # And there is no EventToDeliver or tasks. testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) tasks = testutil.get_tasks(main.FEED_QUEUE, expected_count=1) self.assertEquals( start_scores, main.FETCH_SCORER.get_scores([self.topic])) finally: dos.DISABLE_FOR_TESTING = True class PullFeedHandlerTestWithParsing(testutil.HandlerTestBase): handler_class = main.PullFeedHandler def run_fetch_task(self, index=0): """Runs the currently enqueued fetch task.""" task = testutil.get_tasks(main.FEED_QUEUE, index=index) os.environ['HTTP_X_APPENGINE_TASKNAME'] = task['name'] try: self.handle('post') finally: del os.environ['HTTP_X_APPENGINE_TASKNAME'] def testPullBadContent(self): """Tests when the content doesn't parse correctly.""" topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect( 'get', topic, 200, 'this does not parse') self.run_fetch_task() # No retry task should be written. feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) def testPullBadFeed(self): """Tests when the content parses, but is not a good Atom document.""" data = ('<?xml version="1.0" encoding="utf-8"?>\n' '<meep><entry>wooh</entry></meep>') topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect('get', topic, 200, data) self.run_fetch_task() # No retry task should be written. feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) def testPullBadEncoding(self): """Tests when the content has a bad character encoding.""" data = ('<?xml version="1.0" encoding="x-windows-874"?>\n' '<feed><my header="data"/>' '<entry><id>1</id><updated>123</updated>wooh</entry></feed>') topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect('get', topic, 200, data) self.run_fetch_task() # No retry task should be written. feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) def testPullGoodAtom(self): """Tests when the Atom XML can parse just fine.""" data = ('<?xml version="1.0" encoding="utf-8"?>\n<feed><my header="data"/>' '<entry><id>1</id><updated>123</updated>wooh</entry></feed>') topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect('get', topic, 200, data) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) event = EventToDeliver.all().get() self.assertEquals(data.replace('\n', ''), event.payload.replace('\n', '')) self.assertEquals('application/atom+xml', event.content_type) self.assertEquals('atom', FeedRecord.all().get().format) def testPullWithUnicodeEtag(self): """Tests when the ETag header has a unicode value. The ETag value should be ignored because non-ascii ETag values are invalid. """ data = ('<?xml version="1.0" encoding="utf-8"?>\n<feed><my header="data"/>' '<entry><id>1</id><updated>123</updated>wooh</entry></feed>') topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect('get', topic, 200, data, response_headers={ 'ETag': '\xe3\x83\x96\xe3\x83\xad\xe3\x82\xb0\xe8\xa1\x86', 'Content-Type': 'application/atom+xml', }) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) event = EventToDeliver.all().get() self.assertEquals(data.replace('\n', ''), event.payload.replace('\n', '')) self.assertEquals('application/atom+xml', event.content_type) self.assertEquals( {'Accept': '*/*', 'Connection': 'cache-control', 'Cache-Control': 'no-cache no-store max-age=1'}, FeedRecord.all().get().get_request_headers(0)) def testPullGoodRss(self): """Tests when the RSS XML can parse just fine.""" data = ('<?xml version="1.0" encoding="utf-8"?>\n' '<rss version="2.0"><channel><my header="data"/>' '<item><guid>1</guid><updated>123</updated>wooh</item>' '</channel></rss>') topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect('get', topic, 200, data) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) event = EventToDeliver.all().get() self.assertEquals(data.replace('\n', ''), event.payload.replace('\n', '')) self.assertEquals('application/rss+xml', event.content_type) self.assertEquals('rss', FeedRecord.all().get().format) def testPullGoodRdf(self): """Tests when the RDF (RSS 1.0) XML can parse just fine.""" data = ('<?xml version="1.0" encoding="utf-8"?>\n' '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">' '<channel><my header="data"/></channel>' '<item><guid>1</guid><updated>123</updated>wooh</item>' '</rdf:RDF>') topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect('get', topic, 200, data) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) event = EventToDeliver.all().get() self.assertEquals(data.replace('\n', ''), event.payload.replace('\n', '')) self.assertEquals('application/rdf+xml', event.content_type) self.assertEquals('rss', FeedRecord.all().get().format) def testPullArbitrary(self): """Tests pulling content of an arbitrary type.""" data = 'this is my random payload of data' topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect( 'get', topic, 200, data, response_headers={'Content-Type': 'my crazy content type'}) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) event = EventToDeliver.all().get() self.assertEquals(data, event.payload) self.assertEquals('my crazy content type', event.content_type) self.assertEquals('arbitrary', FeedRecord.all().get().format) def testPullBinaryContent(self): """Tests pulling binary content.""" data = '\xff\x12 some binary data' topic = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) FeedToFetch.insert([topic]) urlfetch_test_stub.instance.expect( 'get', topic, 200, data, response_headers={'Content-Type': 'my crazy content type'}) self.run_fetch_task() feed = FeedToFetch.get_by_key_name(get_hash_key_name(topic)) self.assertTrue(feed is None) event = EventToDeliver.all().get() self.assertEquals(data, event.payload) self.assertEquals('my crazy content type', event.content_type) self.assertEquals('arbitrary', FeedRecord.all().get().format) def testMultipleFetch(self): """Tests doing multiple fetches asynchronously in parallel. Exercises the fork-join queue part of the fetching pipeline. """ data = ('<?xml version="1.0" encoding="utf-8"?>\n<feed><my header="data"/>' '<entry><id>1</id><updated>123</updated>wooh</entry></feed>') topic_base = 'http://example.com/my-topic' callback = 'http://example.com/my-subscriber' topic_list = [topic_base + '1', topic_base + '2', topic_base + '3'] FeedToFetch.insert(topic_list) for topic in topic_list: urlfetch_test_stub.instance.expect('get', topic, 200, data) self.assertTrue(Subscription.insert(callback, topic, 'token', 'secret')) os.environ['HTTP_X_APPENGINE_TASKNAME'] = testutil.get_tasks( main.FEED_QUEUE, index=0, expected_count=1)['name'] try: self.handle('post') finally: del os.environ['HTTP_X_APPENGINE_TASKNAME'] # Feed to fetch removed. self.assertEquals([], list(FeedToFetch.all())) self.assertEquals([(3, 0), (3, 0), (3, 0)], # 3 because of shared domain main.FETCH_SCORER.get_scores(topic_list)) # All events written and correct. all_events = list(EventToDeliver.all()) all_topics = [e.topic for e in all_events] self.assertEquals(3, len(all_events)) self.assertEquals(set(topic_list), set(all_topics)) event_tasks = testutil.get_tasks(main.EVENT_QUEUE, expected_count=3) self.assertEquals(set(str(e.key()) for e in all_events), set(task['params']['event_key'] for task in event_tasks)) # All feed records written. all_records = list(FeedEntryRecord.all()) all_parents = set(db.Key.from_path(FeedRecord.kind(), FeedRecord.create_key_name(topic)) for topic in topic_list) found_parents = set(r.parent().key() for r in all_records) self.assertEquals(3, len(found_parents)) self.assertEquals(found_parents, all_parents) ################################################################################ class PushEventHandlerTest(testutil.HandlerTestBase): handler_class = main.PushEventHandler def setUp(self): """Sets up the test harness.""" testutil.HandlerTestBase.setUp(self) self.chunk_size = main.EVENT_SUBSCRIBER_CHUNK_SIZE self.topic = 'http://example.com/hamster-topic' # Order of these URL fetches is determined by the ordering of the hashes # of the callback URLs, so we need random extra strings here to get # alphabetical hash order. self.callback1 = 'http://example1.com/hamster-callback1-12' self.callback2 = 'http://example2.com/hamster-callback2' self.callback3 = 'http://example3.com/hamster-callback3-123456' self.callback4 = 'http://example4.com/hamster-callback4-123' self.header_footer = '<feed>\n<stuff>blah</stuff>\n<xmldata/></feed>' self.test_payloads = [ '<entry>article1</entry>', '<entry>article2</entry>', '<entry>article3</entry>', ] self.expected_payload = ( '<?xml version="1.0" encoding="utf-8"?>\n' '<feed>\n' '<stuff>blah</stuff>\n' '<xmldata/>\n' '<entry>article1</entry>\n' '<entry>article2</entry>\n' '<entry>article3</entry>\n' '</feed>' ) self.header_footer_rss = '<rss><channel></channel></rss>' self.test_payloads_rss = [ '<item>article1</item>', '<item>article2</item>', '<item>article3</item>', ] self.expected_payload_rss = ( '<?xml version="1.0" encoding="utf-8"?>\n' '<rss><channel>\n' '<item>article1</item>\n' '<item>article2</item>\n' '<item>article3</item>\n' '</channel></rss>' ) self.bad_key = db.Key.from_path(EventToDeliver.kind(), 'does_not_exist') def tearDown(self): """Resets any external modules modified for testing.""" main.EVENT_SUBSCRIBER_CHUNK_SIZE = self.chunk_size urlfetch_test_stub.instance.verify_and_reset() def testNoWork(self): self.handle('post', ('event_key', str(self.bad_key))) def testNoExtraSubscribers(self): """Tests when a single chunk of delivery is enough.""" self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback2, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback3, self.topic, 'token', 'secret')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 3 urlfetch_test_stub.instance.expect( 'post', self.callback1, 200, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback2, 204, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback3, 299, '', request_payload=self.expected_payload) event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() self.handle('post', ('event_key', str(event.key()))) self.assertEquals([], list(EventToDeliver.all())) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) self.assertEquals( [(1, 0), (1, 0), (1, 0)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3])) def testHmacData(self): """Tests that the content is properly signed with an HMAC.""" self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret3')) # Secret is empty on purpose here, so the verify_token will be used instead. self.assertTrue(Subscription.insert( self.callback2, self.topic, 'my-token', '')) self.assertTrue(Subscription.insert( self.callback3, self.topic, 'token', 'secret-stuff')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 3 urlfetch_test_stub.instance.expect( 'post', self.callback1, 204, '', request_payload=self.expected_payload, request_headers={ 'Content-Type': 'application/atom+xml', 'X-Hub-Signature': 'sha1=3e9caf971b0833d15393022f5f01a47adf597af5'}) urlfetch_test_stub.instance.expect( 'post', self.callback2, 200, '', request_payload=self.expected_payload, request_headers={ 'Content-Type': 'application/atom+xml', 'X-Hub-Signature': 'sha1=4847815aae8578eff55d351bc84a159b9bd8846e'}) urlfetch_test_stub.instance.expect( 'post', self.callback3, 204, '', request_payload=self.expected_payload, request_headers={ 'Content-Type': 'application/atom+xml', 'X-Hub-Signature': 'sha1=8b0a9da7204afa8ae04fc9439755c556b1e38d99'}) event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() self.handle('post', ('event_key', str(event.key()))) self.assertEquals([], list(EventToDeliver.all())) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) def testRssContentType(self): """Tests that the content type of an RSS feed is properly supplied.""" self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 3 urlfetch_test_stub.instance.expect( 'post', self.callback1, 204, '', request_payload=self.expected_payload_rss, request_headers={ 'Content-Type': 'application/rss+xml', 'X-Hub-Signature': 'sha1=1607313b6195af74f29158421f0a31aa25d680da'}) event = EventToDeliver.create_event_for_topic( self.topic, main.RSS, 'application/rss+xml', self.header_footer_rss, self.test_payloads_rss) event.put() self.handle('post', ('event_key', str(event.key()))) self.assertEquals([], list(EventToDeliver.all())) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) def testExtraSubscribers(self): """Tests when there are more subscribers to contact after delivery.""" self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback2, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback3, self.topic, 'token', 'secret')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 1 event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() event_key = str(event.key()) urlfetch_test_stub.instance.expect( 'post', self.callback1, 204, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() urlfetch_test_stub.instance.expect( 'post', self.callback2, 200, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals( [(1, 0), (1, 0), (0, 0)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3])) urlfetch_test_stub.instance.expect( 'post', self.callback3, 204, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals([], list(EventToDeliver.all())) tasks = testutil.get_tasks(main.EVENT_QUEUE, expected_count=2) self.assertEquals([event_key] * 2, [t['params']['event_key'] for t in tasks]) self.assertEquals( [(1, 0), (1, 0), (1, 0)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3])) def testBrokenCallbacks(self): """Tests that when callbacks return errors and are saved for later.""" self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback2, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback3, self.topic, 'token', 'secret')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 2 event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() event_key = str(event.key()) urlfetch_test_stub.instance.expect( 'post', self.callback1, 302, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback2, 404, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals( [(0, 1), (0, 1), (0, 0)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3])) urlfetch_test_stub.instance.expect( 'post', self.callback3, 500, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals( [(0, 1), (0, 1), (0, 1)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3])) work = EventToDeliver.all().get() sub_list = Subscription.get(work.failed_callbacks) callback_list = [sub.callback for sub in sub_list] self.assertEquals([self.callback1, self.callback2, self.callback3], callback_list) tasks = testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) tasks.extend(testutil.get_tasks(main.EVENT_RETRIES_QUEUE, expected_count=1)) self.assertEquals([event_key] * 2, [t['params']['event_key'] for t in tasks]) def testDeadlineError(self): """Tests that callbacks in flight at deadline will be marked as failed.""" try: def deadline(): raise runtime.DeadlineExceededError() main.async_proxy.wait = deadline self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback2, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback3, self.topic, 'token', 'secret')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 2 event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() event_key = str(event.key()) self.handle('post', ('event_key', event_key)) # All events should be marked as failed even though no urlfetches # were made. work = EventToDeliver.all().get() sub_list = Subscription.get(work.failed_callbacks) callback_list = [sub.callback for sub in sub_list] self.assertEquals([self.callback1, self.callback2], callback_list) self.assertEquals(event_key, testutil.get_tasks( main.EVENT_QUEUE, index=0, expected_count=1)['params']['event_key']) # In this case no reporting should happen, since we do not have # any more time in the runtime to report stats. self.assertEquals( [(0, 0), (0, 0), (0, 0)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3])) finally: main.async_proxy = async_apiproxy.AsyncAPIProxy() def testRetryLogic(self): """Tests that failed urls will be retried after subsequent failures. This is an end-to-end test for push delivery failures and retries. We'll simulate multiple times through the failure list. """ self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback2, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback3, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback4, self.topic, 'token', 'secret')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 3 event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() event_key = str(event.key()) # First pass through all URLs goes full speed for two chunks. urlfetch_test_stub.instance.expect( 'post', self.callback1, 404, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback2, 204, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback3, 302, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals( [(0, 1), (1, 0), (0, 1), (0, 0)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3, self.callback4])) urlfetch_test_stub.instance.expect( 'post', self.callback4, 500, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals( [(0, 1), (1, 0), (0, 1), (0, 1)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3, self.callback4])) # Now the retries. urlfetch_test_stub.instance.expect( 'post', self.callback1, 404, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback3, 302, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback4, 500, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals( [(0, 2), (1, 0), (0, 2), (0, 2)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3, self.callback4])) urlfetch_test_stub.instance.expect( 'post', self.callback1, 204, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback3, 302, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback4, 200, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals( [(1, 2), (1, 0), (0, 3), (1, 2)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3, self.callback4])) urlfetch_test_stub.instance.expect( 'post', self.callback3, 204, '', request_payload=self.expected_payload) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() self.assertEquals( [(1, 2), (1, 0), (1, 3), (1, 2)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3, self.callback4])) self.assertEquals([], list(EventToDeliver.all())) tasks = testutil.get_tasks(main.EVENT_QUEUE, expected_count=1) tasks.extend(testutil.get_tasks(main.EVENT_RETRIES_QUEUE, expected_count=3)) self.assertEquals([event_key] * 4, [t['params']['event_key'] for t in tasks]) def testUrlFetchFailure(self): """Tests the UrlFetch API raising exceptions while sending notifications.""" self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback2, self.topic, 'token', 'secret')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 3 event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() event_key = str(event.key()) urlfetch_test_stub.instance.expect( 'post', self.callback1, 200, '', request_payload=self.expected_payload, urlfetch_error=True) urlfetch_test_stub.instance.expect( 'post', self.callback2, 200, '', request_payload=self.expected_payload, apiproxy_error=True) self.handle('post', ('event_key', event_key)) urlfetch_test_stub.instance.verify_and_reset() work = EventToDeliver.all().get() sub_list = Subscription.get(work.failed_callbacks) callback_list = [sub.callback for sub in sub_list] self.assertEquals([self.callback1, self.callback2], callback_list) self.assertEquals(event_key, testutil.get_tasks( main.EVENT_RETRIES_QUEUE, index=0, expected_count=1) ['params']['event_key']) self.assertEquals( [(0, 1), (0, 1)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2])) def testNotAllowed(self): """Tests pushing events to a URL that's not allowed due to scoring.""" dos.DISABLE_FOR_TESTING = False try: main.DELIVERY_SCORER.blackhole([self.callback2]) start_scores = main.DELIVERY_SCORER.get_scores([self.callback2]) self.assertTrue(Subscription.insert( self.callback1, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback2, self.topic, 'token', 'secret')) self.assertTrue(Subscription.insert( self.callback3, self.topic, 'token', 'secret')) main.EVENT_SUBSCRIBER_CHUNK_SIZE = 3 urlfetch_test_stub.instance.expect( 'post', self.callback1, 204, '', request_payload=self.expected_payload) urlfetch_test_stub.instance.expect( 'post', self.callback3, 204, '', request_payload=self.expected_payload) event = EventToDeliver.create_event_for_topic( self.topic, main.ATOM, 'application/atom+xml', self.header_footer, self.test_payloads) event.put() self.handle('post', ('event_key', str(event.key()))) self.assertEquals([], list(EventToDeliver.all())) testutil.get_tasks(main.EVENT_QUEUE, expected_count=0) self.assertEquals( [(1, 0)] + start_scores + [(1, 0)], main.DELIVERY_SCORER.get_scores( [self.callback1, self.callback2, self.callback3])) finally: dos.DISABLE_FOR_TESTING = True ################################################################################ class SubscribeHandlerTest(testutil.HandlerTestBase): handler_class = main.SubscribeHandler def setUp(self): """Tests up the test harness.""" testutil.HandlerTestBase.setUp(self) self.challenge = 'this_is_my_fake_challenge_string' self.old_get_challenge = main.get_random_challenge main.get_random_challenge = lambda: self.challenge self.callback = 'http://example.com/good-callback' self.topic = 'http://example.com/the-topic' self.verify_token = 'the_token' self.verify_callback_querystring_template = ( self.callback + '?hub.verify_token=the_token' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic' '&hub.mode=%s' '&hub.lease_seconds=432000') def tearDown(self): """Tears down the test harness.""" testutil.HandlerTestBase.tearDown(self) main.get_random_challenge = self.old_get_challenge def verify_record_task(self, topic): """Tests there is a valid KnownFeedIdentity task enqueued. Args: topic: The topic the task should be for. Raises: AssertionError if the task isn't there. """ task = testutil.get_tasks(main.MAPPINGS_QUEUE, index=0, expected_count=1) self.assertEquals(topic, task['params']['topic']) def testDebugFormRenders(self): self.handle('get') self.assertTrue('<html>' in self.response_body()) def testValidation(self): """Tests form validation.""" # Bad mode self.handle('post', ('hub.mode', 'bad'), ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(400, self.response_code()) self.assertTrue('hub.mode' in self.response_body()) # Empty callback self.handle('post', ('hub.mode', 'subscribe'), ('hub.callback', ''), ('hub.topic', self.topic), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(400, self.response_code()) self.assertTrue('hub.callback' in self.response_body()) # Bad callback URL self.handle('post', ('hub.mode', 'subscribe'), ('hub.callback', 'httpf://example.com'), ('hub.topic', self.topic), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(400, self.response_code()) self.assertTrue('hub.callback' in self.response_body()) # Empty topic self.handle('post', ('hub.mode', 'subscribe'), ('hub.callback', self.callback), ('hub.topic', ''), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(400, self.response_code()) self.assertTrue('hub.topic' in self.response_body()) # Bad topic URL self.handle('post', ('hub.mode', 'subscribe'), ('hub.callback', self.callback), ('hub.topic', 'httpf://example.com'), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(400, self.response_code()) self.assertTrue('hub.topic' in self.response_body()) # Bad verify self.handle('post', ('hub.mode', 'subscribe'), ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.verify', 'meep'), ('hub.verify_token', self.verify_token)) self.assertEquals(400, self.response_code()) self.assertTrue('hub.verify' in self.response_body()) # Bad lease_seconds self.handle('post', ('hub.mode', 'subscribe'), ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.verify', 'async'), ('hub.verify_token', 'asdf'), ('hub.lease_seconds', 'stuff')) self.assertEquals(400, self.response_code()) self.assertTrue('hub.lease_seconds' in self.response_body()) # Bad lease_seconds zero padding will break things self.handle('post', ('hub.mode', 'subscribe'), ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.verify', 'async'), ('hub.verify_token', 'asdf'), ('hub.lease_seconds', '000010')) self.assertEquals(400, self.response_code()) self.assertTrue('hub.lease_seconds' in self.response_body()) def testUnsubscribeMissingSubscription(self): """Tests that deleting a non-existent subscription does nothing.""" self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.verify', 'sync'), ('hub.mode', 'unsubscribe'), ('hub.verify_token', self.verify_token)) self.assertEquals(204, self.response_code()) def testSynchronous(self): """Tests synchronous subscribe and unsubscribe.""" sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(self.topic) urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'unsubscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'unsubscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(204, self.response_code()) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) def testAsynchronous(self): """Tests sync and async subscriptions cause the correct state transitions. Also tests that synchronous subscribes and unsubscribes will overwrite asynchronous requests. """ sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) # Async subscription. self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(202, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_NOT_VERIFIED, sub.subscription_state) # Sync subscription overwrites. urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(self.topic) # Async unsubscribe queues removal, but does not change former state. self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'unsubscribe'), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(202, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) # Synch unsubscribe overwrites. urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'unsubscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'unsubscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(204, self.response_code()) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) def testResubscribe(self): """Tests that subscribe requests will reset pending unsubscribes.""" sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) # Async subscription. self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(202, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_NOT_VERIFIED, sub.subscription_state) # Async un-subscription does not change previous subscription state. self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'unsubscribe'), ('hub.verify', 'async'), ('hub.verify_token', self.verify_token)) self.assertEquals(202, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_NOT_VERIFIED, sub.subscription_state) # Synchronous subscription overwrites. urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(self.topic) def testMaxLeaseSeconds(self): """Tests when the max lease period is specified.""" sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) self.verify_callback_querystring_template = ( self.callback + '?hub.verify_token=the_token' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic' '&hub.mode=%s' '&hub.lease_seconds=864000') urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token), ('hub.lease_seconds', '1000000000000000000')) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(self.topic) def testDefaultLeaseSeconds(self): """Tests when the lease_seconds parameter is ommitted.""" sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) self.verify_callback_querystring_template = ( self.callback + '?hub.verify_token=the_token' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic' '&hub.mode=%s' '&hub.lease_seconds=432000') urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token), ('hub.lease_seconds', '')) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(self.topic) def testInvalidChallenge(self): """Tests when the returned challenge is bad.""" sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'subscribe', 200, 'bad') self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) self.assertTrue(db.get(KnownFeed.create_key(self.topic)) is None) self.assertEquals(409, self.response_code()) def testSynchronousConfirmFailure(self): """Tests when synchronous confirmations fail.""" # Subscribe sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'subscribe', 500, '') self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) self.assertTrue(db.get(KnownFeed.create_key(self.topic)) is None) self.assertEquals(409, self.response_code()) # Unsubscribe Subscription.insert(self.callback, self.topic, self.verify_token, 'secret') urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'unsubscribe', 500, '') self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'unsubscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertTrue(Subscription.get_by_key_name(sub_key) is not None) self.assertEquals(409, self.response_code()) def testAfterSubscriptionError(self): """Tests when an exception occurs after subscription.""" for exception in (runtime.DeadlineExceededError(), db.Error(), apiproxy_errors.Error()): def new_confirm(*args): raise exception main.hooks.override_for_test(main.confirm_subscription, new_confirm) try: self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(503, self.response_code()) finally: main.hooks.reset_for_test(main.confirm_subscription) def testSubscriptionError(self): """Tests when errors occurs during subscription.""" # URLFetch errors are probably the subscriber's fault, so we'll serve these # as a conflict. urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', None, '', urlfetch_error=True) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(409, self.response_code()) # An apiproxy error or deadline error will fall through and serve a 503, # since that means there's something wrong with our service. urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', None, '', apiproxy_error=True) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(503, self.response_code()) urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', None, '', deadline_error=True) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(503, self.response_code()) def testCaseSensitive(self): """Tests that the case of topics, callbacks, and tokens are preserved.""" self.topic += FUNNY self.callback += FUNNY self.verify_token += FUNNY sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) self.verify_callback_querystring_template = ( self.callback + '?hub.verify_token=the_token%%2FCaSeSeNsItIvE' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic%%2FCaSeSeNsItIvE' '&hub.mode=%s' '&hub.lease_seconds=432000') urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(self.topic) def testSubscribeNormalization(self): """Tests that the topic and callback URLs are properly normalized.""" self.topic += OTHER_STRING orig_callback = self.callback self.callback += OTHER_STRING sub_key = Subscription.create_key_name( main.normalize_iri(self.callback), main.normalize_iri(self.topic)) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) self.verify_callback_querystring_template = ( orig_callback + '/~one:two/&=' '?hub.verify_token=the_token' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic' '%%2F%%7Eone%%3Atwo%%2F%%26%%3D' '&hub.mode=%s' '&hub.lease_seconds=432000') urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('hub.callback', self.callback), ('hub.topic', self.topic), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', self.verify_token)) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(main.normalize_iri(self.topic)) def testSubscribeIri(self): """Tests when the topic, callback, verify_token, and secrets are IRIs.""" topic = self.topic + FUNNY_UNICODE topic_utf8 = self.topic + FUNNY_UTF8 callback = self.callback + FUNNY_UNICODE callback_utf8 = self.callback + FUNNY_UTF8 verify_token = self.verify_token + FUNNY_UNICODE verify_token_utf8 = self.verify_token + FUNNY_UTF8 sub_key = Subscription.create_key_name( main.normalize_iri(callback), main.normalize_iri(topic)) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) self.verify_callback_querystring_template = ( self.callback + '/blah/%%E3%%83%%96%%E3%%83%%AD%%E3%%82%%B0%%E8%%A1%%86' '?hub.verify_token=the_token%%2F' 'blah%%2F%%E3%%83%%96%%E3%%83%%AD%%E3%%82%%B0%%E8%%A1%%86' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic%%2F' 'blah%%2F%%25E3%%2583%%2596%%25E3%%2583%%25AD' '%%25E3%%2582%%25B0%%25E8%%25A1%%2586' '&hub.mode=%s' '&hub.lease_seconds=432000') urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('hub.callback', callback_utf8), ('hub.topic', topic_utf8), ('hub.mode', 'subscribe'), ('hub.verify', 'sync'), ('hub.verify_token', verify_token_utf8)) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(self.topic + FUNNY_IRI) def testSubscribeUnicode(self): """Tests when UTF-8 encoded bytes show up in the requests. Technically this isn't well-formed or allowed by the HTTP/URI spec, but people do it anyways and we may as well allow it. """ quoted_topic = urllib.quote(self.topic) topic = self.topic + FUNNY_UNICODE topic_utf8 = self.topic + FUNNY_UTF8 quoted_callback = urllib.quote(self.callback) callback = self.callback + FUNNY_UNICODE callback_utf8 = self.callback + FUNNY_UTF8 quoted_verify_token = urllib.quote(self.verify_token) verify_token = self.verify_token + FUNNY_UNICODE verify_token_utf8 = self.verify_token + FUNNY_UTF8 sub_key = Subscription.create_key_name( main.normalize_iri(callback), main.normalize_iri(topic)) self.assertTrue(Subscription.get_by_key_name(sub_key) is None) self.verify_callback_querystring_template = ( self.callback + '/blah/%%E3%%83%%96%%E3%%83%%AD%%E3%%82%%B0%%E8%%A1%%86' '?hub.verify_token=the_token%%2F' 'blah%%2F%%E3%%83%%96%%E3%%83%%AD%%E3%%82%%B0%%E8%%A1%%86' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic%%2F' 'blah%%2F%%25E3%%2583%%2596%%25E3%%2583%%25AD' '%%25E3%%2582%%25B0%%25E8%%25A1%%2586' '&hub.mode=%s' '&hub.lease_seconds=432000') urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) payload = ( 'hub.callback=' + quoted_callback + FUNNY_UTF8 + '&hub.topic=' + quoted_topic + FUNNY_UTF8 + '&hub.mode=subscribe' '&hub.verify=sync' '&hub.verify_token=' + quoted_verify_token + FUNNY_UTF8) self.handle_body('post', payload) self.assertEquals(204, self.response_code()) sub = Subscription.get_by_key_name(sub_key) self.assertTrue(sub is not None) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_record_task(self.topic + FUNNY_IRI) class SubscribeHandlerThroughHubUrlTest(SubscribeHandlerTest): handler_class = main.HubHandler ################################################################################ class SubscriptionConfirmHandlerTest(testutil.HandlerTestBase): handler_class = main.SubscriptionConfirmHandler def setUp(self): """Sets up the test fixture.""" testutil.HandlerTestBase.setUp(self) self.callback = 'http://example.com/good-callback' self.topic = 'http://example.com/the-topic' self.challenge = 'this_is_my_fake_challenge_string' self.old_get_challenge = main.get_random_challenge main.get_random_challenge = lambda: self.challenge self.sub_key = Subscription.create_key_name(self.callback, self.topic) self.verify_token = 'the_token' self.secret = 'teh secrat' self.verify_callback_querystring_template = ( self.callback + '?hub.verify_token=the_token' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic' '&hub.mode=%s' '&hub.lease_seconds=432000') def tearDown(self): """Verify that all URL fetches occurred.""" testutil.HandlerTestBase.tearDown(self) main.get_random_challenge = self.old_get_challenge urlfetch_test_stub.instance.verify_and_reset() def verify_task(self, next_state): """Verifies that a subscription worker task is present. Args: next_state: The next state the task should cause the Subscription to have. """ task = testutil.get_tasks(main.SUBSCRIPTION_QUEUE, index=0, expected_count=1) params = task['params'] self.assertEquals(self.sub_key, params['subscription_key_name']) self.assertEquals(next_state, params['next_state']) def verify_retry_task(self, eta, next_state, verify_token=None, secret=None, auto_reconfirm=False): """Verifies that a subscription worker retry task is present. Args: eta: The ETA the retry task should have. next_state: The next state the task should cause the Subscription to have. verify_token: The verify token the retry task should have. Defaults to the current token. secret: The secret the retry task should have. Defaults to the current secret. auto_reconfirm: The confirmation type the retry task should have. """ task = testutil.get_tasks(main.SUBSCRIPTION_QUEUE, index=1, expected_count=2) params = task['params'] self.assertEquals(testutil.task_eta(eta), task['eta']) self.assertEquals(self.sub_key, params['subscription_key_name']) self.assertEquals(next_state, params['next_state']) self.assertEquals(verify_token or self.verify_token, params['verify_token']) self.assertEquals(secret or self.secret, params['secret']) self.assertEquals(str(auto_reconfirm), params['auto_reconfirm']) def verify_no_record_task(self): """Tests there is not KnownFeedIdentity task enqueued. Raises: AssertionError if the task is there. """ task = testutil.get_tasks(main.MAPPINGS_QUEUE, expected_count=0) def testNoWork(self): """Tests when a task is enqueued for a Subscription that doesn't exist.""" self.handle('post', ('subscription_key_name', 'unknown'), ('next_state', Subscription.STATE_VERIFIED)) def testSubscribeSuccessful(self): """Tests when a subscription task is successful.""" self.assertTrue(db.get(KnownFeed.create_key(self.topic)) is None) self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.request_insert( self.callback, self.topic, self.verify_token, self.secret) urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('secret', self.secret), ('next_state', Subscription.STATE_VERIFIED)) self.verify_task(Subscription.STATE_VERIFIED) self.verify_no_record_task() sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.assertEquals(self.verify_token, sub.verify_token) self.assertEquals(self.secret, sub.secret) def testSubscribeSuccessfulQueryStringArgs(self): """Tests a subscription callback with querystring args.""" self.callback += '?some=query&string=params&to=mess&it=up' self.sub_key = Subscription.create_key_name(self.callback, self.topic) self.assertTrue(db.get(KnownFeed.create_key(self.topic)) is None) self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.request_insert( self.callback, self.topic, self.verify_token, self.secret) self.verify_callback_querystring_template = ( self.callback + '&hub.verify_token=the_token' '&hub.challenge=this_is_my_fake_challenge_string' '&hub.topic=http%%3A%%2F%%2Fexample.com%%2Fthe-topic' '&hub.mode=%s' '&hub.lease_seconds=432000') urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'subscribe', 200, self.challenge) self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('secret', self.secret), ('next_state', Subscription.STATE_VERIFIED)) self.verify_task(Subscription.STATE_VERIFIED) self.verify_no_record_task() sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.assertEquals(self.verify_token, sub.verify_token) self.assertEquals(self.secret, sub.secret) def testSubscribeFailed(self): """Tests when a subscription task fails.""" self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.request_insert( self.callback, self.topic, self.verify_token, self.secret) urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'subscribe', 500, '') self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('secret', self.secret), ('next_state', Subscription.STATE_VERIFIED)) sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(Subscription.STATE_NOT_VERIFIED, sub.subscription_state) self.assertEquals(1, sub.confirm_failures) self.assertEquals(self.verify_token, sub.verify_token) self.assertEquals(self.secret, sub.secret) self.verify_retry_task(sub.eta, Subscription.STATE_VERIFIED, verify_token=self.verify_token, secret=self.secret) def testSubscribeConflict(self): """Tests when confirmation hits a conflict and archives the subscription.""" self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.request_insert( self.callback, self.topic, self.verify_token, self.secret) urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'subscribe', 404, '') self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('secret', self.secret), ('next_state', Subscription.STATE_VERIFIED)) sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(Subscription.STATE_TO_DELETE, sub.subscription_state) testutil.get_tasks(main.SUBSCRIPTION_QUEUE, expected_count=1) def testSubscribeBadChallengeResponse(self): """Tests when the subscriber responds with a bad challenge.""" self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.request_insert( self.callback, self.topic, self.verify_token, self.secret) urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'subscribe', 200, 'bad') self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('secret', self.secret), ('next_state', Subscription.STATE_VERIFIED)) sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(Subscription.STATE_NOT_VERIFIED, sub.subscription_state) self.assertEquals(1, sub.confirm_failures) self.verify_retry_task(sub.eta, Subscription.STATE_VERIFIED) def testUnsubscribeSuccessful(self): """Tests when an unsubscription request is successful.""" self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.insert( self.callback, self.topic, self.verify_token, self.secret) Subscription.request_remove(self.callback, self.topic, self.verify_token) urlfetch_test_stub.instance.expect( 'get', self.verify_callback_querystring_template % 'unsubscribe', 200, self.challenge) self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('next_state', Subscription.STATE_TO_DELETE)) self.verify_task(Subscription.STATE_TO_DELETE) self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) def testUnsubscribeFailed(self): """Tests when an unsubscription task fails.""" self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.insert( self.callback, self.topic, self.verify_token, self.secret) Subscription.request_remove(self.callback, self.topic, self.verify_token) urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'unsubscribe', 500, '') self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('next_state', Subscription.STATE_TO_DELETE), ('secret', self.secret)) sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(1, sub.confirm_failures) self.verify_retry_task(sub.eta, Subscription.STATE_TO_DELETE) def testUnsubscribeGivesUp(self): """Tests when an unsubscription task completely gives up.""" self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.insert( self.callback, self.topic, self.verify_token, self.secret) Subscription.request_remove(self.callback, self.topic, self.verify_token) sub = Subscription.get_by_key_name(self.sub_key) sub.confirm_failures = 100 sub.put() urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'unsubscribe', 500, '') self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('next_state', Subscription.STATE_TO_DELETE)) sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(100, sub.confirm_failures) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.verify_task(Subscription.STATE_TO_DELETE) def testSubscribeOverwrite(self): """Tests that subscriptions can be overwritten with new parameters.""" Subscription.insert( self.callback, self.topic, self.verify_token, self.secret) second_token = 'second_verify_token' second_secret = 'second secret' new_template = self.verify_callback_querystring_template.replace( self.verify_token, second_token) urlfetch_test_stub.instance.expect( 'get', new_template % 'subscribe', 200, self.challenge) self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', second_token), ('secret', second_secret), ('next_state', Subscription.STATE_VERIFIED)) sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(Subscription.STATE_VERIFIED, sub.subscription_state) self.assertEquals(second_token, sub.verify_token) self.assertEquals(second_secret, sub.secret) self.verify_no_record_task() def testConfirmError(self): """Tests when an exception is raised while confirming a subscription. This will just propagate up in the stack and cause the task to retry via the normal task queue retries. """ called = [False] Subscription.request_insert( self.callback, self.topic, self.verify_token, self.secret) # All exceptions should just fall through. def new_confirm(*args, **kwargs): called[0] = True raise db.Error() try: main.hooks.override_for_test(main.confirm_subscription, new_confirm) try: self.handle('post', ('subscription_key_name', self.sub_key)) except db.Error: pass else: self.fail() finally: main.hooks.reset_for_test(main.confirm_subscription) self.assertTrue(called[0]) self.verify_task(Subscription.STATE_VERIFIED) def testRenewNack(self): """Tests when an auto-subscription-renewal returns a 404.""" self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.insert( self.callback, self.topic, self.verify_token, self.secret) urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'subscribe', 404, '') self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('secret', self.secret), ('next_state', Subscription.STATE_VERIFIED), ('auto_reconfirm', 'True')) sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(Subscription.STATE_TO_DELETE, sub.subscription_state) testutil.get_tasks(main.SUBSCRIPTION_QUEUE, expected_count=0) def testRenewErrorFailure(self): """Tests when an auto-subscription-renewal returns errors repeatedly. In this case, since it's auto-renewal, the subscription should be dropped. """ self.assertTrue(Subscription.get_by_key_name(self.sub_key) is None) Subscription.insert( self.callback, self.topic, self.verify_token, self.secret) sub = Subscription.get_by_key_name(self.sub_key) sub.confirm_failures = 100 sub.put() urlfetch_test_stub.instance.expect('get', self.verify_callback_querystring_template % 'subscribe', 500, '') self.handle('post', ('subscription_key_name', self.sub_key), ('verify_token', self.verify_token), ('next_state', Subscription.STATE_VERIFIED), ('auto_reconfirm', 'True')) sub = Subscription.get_by_key_name(self.sub_key) self.assertEquals(Subscription.STATE_TO_DELETE, sub.subscription_state) testutil.get_tasks(main.SUBSCRIPTION_QUEUE, expected_count=0) class SubscriptionReconfirmHandlerTest(testutil.HandlerTestBase): """Tests for the periodic subscription reconfirming worker.""" def testFullFlow(self): """Tests a full flow through the reconfirm worker.""" self.now = time.time() self.called = False def start_map(*args, **kwargs): self.assertEquals({ 'name': 'Reconfirm expiring subscriptions', 'reader_spec': 'mapreduce.input_readers.DatastoreInputReader', 'queue_name': 'polling', 'handler_spec': 'offline_jobs.SubscriptionReconfirmMapper.run', 'shard_count': 4, 'mapper_parameters': { 'entity_kind': 'main.Subscription', 'processing_rate': 100000, 'threshold_timestamp': int(self.now + main.SUBSCRIPTION_CHECK_BUFFER_SECONDS), }, 'mapreduce_parameters': { 'done_callback': '/work/cleanup_mapper', 'done_callback_queue': 'polling', }, }, kwargs) self.called = True def create_handler(): return main.SubscriptionReconfirmHandler( now=lambda: self.now, start_map=start_map) self.handler_class = create_handler os.environ['HTTP_X_APPENGINE_QUEUENAME'] = main.POLLING_QUEUE try: self.handle('get') task = testutil.get_tasks(main.POLLING_QUEUE, index=0, expected_count=1) self.handle('post') finally: del os.environ['HTTP_X_APPENGINE_QUEUENAME'] self.assertTrue(self.called) class SubscriptionCleanupHandlerTest(testutil.HandlerTestBase): """Tests fo the SubscriptionCleanupHandler.""" handler_class = main.SubscriptionCleanupHandler def testEmpty(self): """Tests cleaning up empty subscriptions.""" self.handle('get') def testCleanup(self): """Tests cleaning up a few deleted subscriptions.""" callback = 'http://example.com/callback/%d' topic = 'http://example.com/mytopic' self.assertTrue(Subscription.insert(callback % 1, topic, '', '')) self.assertTrue(Subscription.insert(callback % 2, topic, '', '')) self.assertTrue(Subscription.insert(callback % 3, topic, '', '')) self.assertEquals(3 * [Subscription.STATE_VERIFIED], [s.subscription_state for s in Subscription.all()]) Subscription.archive(callback % 1, topic) self.handle('get') self.assertEquals(2 * [Subscription.STATE_VERIFIED], [s.subscription_state for s in Subscription.all()]) class CleanupMapperHandlerTest(testutil.HandlerTestBase): """Tests for the CleanupMapperHandler.""" handler_class = main.CleanupMapperHandler def testMissing(self): """Tests cleaning up a mapreduce that's not present.""" self.assertEquals([], list(mapreduce.model.MapreduceState.all())) os.environ['HTTP_MAPREDUCE_ID'] = '12345' try: self.handle('post') finally: del os.environ['HTTP_MAPREDUCE_ID'] self.assertEquals([], list(mapreduce.model.MapreduceState.all())) def testPresent(self): """Tests cleaning up a mapreduce that's present.""" mapreduce_id = mapreduce.control.start_map( name='Reconfirm expiring subscriptions', handler_spec='offline_jobs.SubscriptionReconfirmMapper.run', reader_spec='mapreduce.input_readers.DatastoreInputReader', mapper_parameters=dict( processing_rate=100000, entity_kind='main.Subscription')) self.assertEquals(1, len(list(mapreduce.model.MapreduceState.all()))) os.environ['HTTP_MAPREDUCE_ID'] = mapreduce_id try: self.handle('post') finally: del os.environ['HTTP_MAPREDUCE_ID'] self.assertEquals([], list(mapreduce.model.MapreduceState.all())) ################################################################################ PollingMarker = main.PollingMarker class TakePollingActionTest(unittest.TestCase): """Tests for the take_polling_action function.""" def setUp(self): """Sets up the test harness.""" testutil.setup_for_testing() def testFailure(self): """Tests when inserting a new feed to fetch raises an exception.""" called = [False] topics = ['one', 'two', 'three'] @classmethod def new_insert(cls, topic_list, memory_only=True): called[0] = True self.assertFalse(memory_only) self.assertEquals(topic_list, topics) raise db.Error('Mock DB error') old_insert = main.FeedToFetch.insert main.FeedToFetch.insert = new_insert try: main.take_polling_action(['one', 'two', 'three'], '') finally: main.FeedToFetch.insert = old_insert self.assertTrue(called[0]) class PollBootstrapHandlerTest(testutil.HandlerTestBase): handler_class = main.PollBootstrapHandler def setUp(self): """Sets up the test harness.""" testutil.HandlerTestBase.setUp(self) self.original_chunk_size = main.BOOSTRAP_FEED_CHUNK_SIZE main.BOOSTRAP_FEED_CHUNK_SIZE = 2 os.environ['HTTP_X_APPENGINE_QUEUENAME'] = main.POLLING_QUEUE def tearDown(self): """Tears down the test harness.""" testutil.HandlerTestBase.tearDown(self) main.BOOSTRAP_FEED_CHUNK_SIZE = self.original_chunk_size del os.environ['HTTP_X_APPENGINE_QUEUENAME'] def testFullFlow(self): """Tests a full flow through multiple chunks.""" topic = 'http://example.com/feed1' topic2 = 'http://example.com/feed2' topic3 = 'http://example.com/feed3-124' # alphabetical on the hash of this db.put([KnownFeed.create(topic), KnownFeed.create(topic2), KnownFeed.create(topic3)]) self.assertTrue(FeedToFetch.get_by_topic(topic) is None) self.assertTrue(FeedToFetch.get_by_topic(topic2) is None) self.assertTrue(FeedToFetch.get_by_topic(topic3) is None) # This will repeatedly insert the initial task to start the polling process. self.handle('get') self.handle('get') self.handle('get') task = testutil.get_tasks(main.POLLING_QUEUE, index=0, expected_count=1) sequence = task['params']['sequence'] self.assertEquals('bootstrap', task['params']['poll_type']) # Now run the post handler with the params from this first task. It will # enqueue another task that starts *after* the last one in the chunk. self.handle('post', *task['params'].items()) self.assertTrue(FeedToFetch.get_by_topic(topic) is not None) self.assertTrue(FeedToFetch.get_by_topic(topic2) is not None) self.assertTrue(FeedToFetch.get_by_topic(topic3) is None) # Running this handler again will overwrite the FeedToFetch instances, # but it will not duplicate the polling queue Task in the chain of # iterating through all KnownFeed entries or the fork-join queue task that # will do the actual fetching. self.handle('post', *task['params'].items()) task = testutil.get_tasks(main.POLLING_QUEUE, index=1, expected_count=2) self.assertEquals(sequence, task['params']['sequence']) self.assertEquals('bootstrap', task['params']['poll_type']) self.assertEquals(str(KnownFeed.create_key(topic2)), task['params']['current_key']) self.assertTrue(task['name'].startswith(sequence)) # Now running another post handler will handle the rest of the feeds. self.handle('post', *task['params'].items()) self.assertTrue(FeedToFetch.get_by_topic(topic) is not None) self.assertTrue(FeedToFetch.get_by_topic(topic2) is not None) self.assertTrue(FeedToFetch.get_by_topic(topic3) is not None) # Running this post handler again will do nothing because we de-dupe on # the continuation task to prevent doing any more work in the current cycle. self.handle('post', *task['params'].items()) task_list = testutil.get_tasks(main.POLLING_QUEUE, expected_count=3) # Deal with a stupid race condition task = task_list[2] if 'params' not in task: task = task_list[3] self.assertEquals(sequence, task['params']['sequence']) self.assertEquals('bootstrap', task['params']['poll_type']) self.assertEquals(str(KnownFeed.create_key(topic3)), task['params']['current_key']) self.assertTrue(task['name'].startswith(sequence)) # Starting the cycle again will do nothing. self.handle('get') testutil.get_tasks(main.POLLING_QUEUE, expected_count=3) # Resetting the next start time to before the present time will # cause the iteration to start again. the_mark = PollingMarker.get() the_mark.next_start = \ datetime.datetime.utcnow() - datetime.timedelta(seconds=120) db.put(the_mark) self.handle('get') task_list = testutil.get_tasks(main.POLLING_QUEUE, expected_count=4) task = task_list[3] self.assertNotEquals(sequence, task['params']['sequence']) def testRecord(self): """Tests when the parameter "poll_type=record" is specified.""" topic = 'http://example.com/feed1' topic2 = 'http://example.com/feed2' topic3 = 'http://example.com/feed3-124' # alphabetical on the hash of this db.put([KnownFeed.create(topic), KnownFeed.create(topic2), KnownFeed.create(topic3)]) self.assertTrue(FeedToFetch.get_by_topic(topic) is None) self.assertTrue(FeedToFetch.get_by_topic(topic2) is None) self.assertTrue(FeedToFetch.get_by_topic(topic3) is None) # This will insert the initial task to start the polling process. self.handle('get', ('poll_type', 'record')) task = testutil.get_tasks(main.POLLING_QUEUE, index=0, expected_count=1) sequence = task['params']['sequence'] self.assertEquals('record', task['params']['poll_type']) # Now run the post handler with the params from this first task. It will # enqueue another task that starts *after* the last one in the chunk. self.handle('post', *task['params'].items()) task = testutil.get_tasks(main.POLLING_QUEUE, index=1, expected_count=2) self.assertEquals('record', task['params']['poll_type']) # Now running another post handler will handle the rest of the feeds. self.handle('post', *task['params'].items()) # And there will be tasks in the MAPPINGS_QUEUE to update all of the # KnownFeeds that we have found. task = testutil.get_tasks(main.MAPPINGS_QUEUE, index=0, expected_count=3) self.assertEquals(topic, task['params']['topic']) task = testutil.get_tasks(main.MAPPINGS_QUEUE, index=1, expected_count=3) self.assertEquals(topic2, task['params']['topic']) task = testutil.get_tasks(main.MAPPINGS_QUEUE, index=2, expected_count=3) self.assertEquals(topic3, task['params']['topic']) ################################################################################ KnownFeedIdentity = main.KnownFeedIdentity class RecordFeedHandlerTest(testutil.HandlerTestBase): """Tests for the RecordFeedHandler flow.""" def setUp(self): """Sets up the test harness.""" self.now = [datetime.datetime.utcnow()] self.handler_class = lambda: main.RecordFeedHandler(now=lambda: self.now[0]) testutil.HandlerTestBase.setUp(self) self.old_identify = main.feed_identifier.identify self.expected_calls = [] self.expected_results = [] def new_identify(content, feed_type): self.assertEquals(self.expected_calls.pop(0), (content, feed_type)) result = self.expected_results.pop(0) if isinstance(result, Exception): raise result else: return result main.feed_identifier.identify = new_identify self.topic = 'http://www.example.com/meepa' self.feed_id = 'my_feed_id' self.content = 'my_atom_content' def tearDown(self): """Tears down the test harness.""" main.feed_identifier.identify = self.old_identify testutil.HandlerTestBase.tearDown(self) urlfetch_test_stub.instance.verify_and_reset() def verify_update(self): """Verifies the feed_id has been added for the topic.""" feed_id = KnownFeedIdentity.get(KnownFeedIdentity.create_key(self.feed_id)) feed = KnownFeed.get(KnownFeed.create_key(self.topic)) self.assertEquals([self.topic], feed_id.topics) self.assertEquals(feed.feed_id, self.feed_id) self.assertEquals(feed.feed_id, feed_id.feed_id) def testNewFeed(self): """Tests recording details for a known feed.""" urlfetch_test_stub.instance.expect('GET', self.topic, 200, self.content) self.expected_calls.append((self.content, 'atom')) self.expected_results.append(self.feed_id) self.handle('post', ('topic', self.topic)) self.verify_update() def testNewFeedFetchFailure(self): """Tests when fetching a feed to record returns a non-200 response.""" urlfetch_test_stub.instance.expect('GET', self.topic, 404, '') self.handle('post', ('topic', self.topic)) feed = KnownFeed.get(KnownFeed.create_key(self.topic)) self.assertTrue(feed.feed_id is None) def testNewFeedFetchException(self): """Tests when fetching a feed to record returns an exception.""" urlfetch_test_stub.instance.expect('GET', self.topic, 200, '', urlfetch_error=True) self.handle('post', ('topic', self.topic)) feed = KnownFeed.get(KnownFeed.create_key(self.topic)) self.assertTrue(feed.feed_id is None) def testParseRetry(self): """Tests when parsing as Atom fails, but RSS is successful.""" urlfetch_test_stub.instance.expect('GET', self.topic, 200, self.content) self.expected_calls.append((self.content, 'atom')) self.expected_results.append(xml.sax.SAXException('Mock error')) self.expected_calls.append((self.content, 'rss')) self.expected_results.append(self.feed_id) self.handle('post', ('topic', self.topic)) self.verify_update() def testParseFails(self): """Tests when parsing completely fails.""" urlfetch_test_stub.instance.expect('GET', self.topic, 200, self.content) self.expected_calls.append((self.content, 'atom')) self.expected_results.append(xml.sax.SAXException('Mock error')) self.expected_calls.append((self.content, 'rss')) self.expected_results.append(xml.sax.SAXException('Mock error 2')) self.handle('post', ('topic', self.topic)) feed = KnownFeed.get(KnownFeed.create_key(self.topic)) self.assertTrue(feed.feed_id is None) def testParseFindsNoIds(self): """Tests when no SAX exception is raised but no feed ID is found.""" urlfetch_test_stub.instance.expect('GET', self.topic, 200, self.content) self.expected_calls.append((self.content, 'atom')) self.expected_results.append(None) self.expected_calls.append((self.content, 'rss')) self.expected_results.append(None) self.handle('post', ('topic', self.topic)) feed = KnownFeed.get(KnownFeed.create_key(self.topic)) self.assertTrue(feed.feed_id is None) def testParseFindsEmptyId(self): """Tests when no SAX exception is raised but the feed ID is empty.""" urlfetch_test_stub.instance.expect('GET', self.topic, 200, self.content) self.expected_calls.append((self.content, 'atom')) self.expected_results.append('') self.handle('post', ('topic', self.topic)) feed = KnownFeed.get(KnownFeed.create_key(self.topic)) self.assertTrue(feed.feed_id is None) def testExistingFeedNeedsRefresh(self): """Tests recording details for an existing feed that needs a refresh.""" KnownFeed.create(self.topic).put() self.now[0] += datetime.timedelta( seconds=main.FEED_IDENTITY_UPDATE_PERIOD + 1) urlfetch_test_stub.instance.expect('GET', self.topic, 200, self.content) self.expected_calls.append((self.content, 'atom')) self.expected_results.append(self.feed_id) self.handle('post', ('topic', self.topic)) self.verify_update() def testExistingFeedNoRefresh(self): """Tests recording details when the feed does not need a refresh.""" feed = KnownFeed.create(self.topic) feed.feed_id = 'meep' feed.put() self.handle('post', ('topic', self.topic)) # Confirmed by no calls to urlfetch or feed_identifier. def testExistingFeedNoIdRefresh(self): """Tests that a KnownFeed with no ID will be refreshed.""" feed = KnownFeed.create(self.topic) urlfetch_test_stub.instance.expect('GET', self.topic, 200, self.content) self.expected_calls.append((self.content, 'atom')) self.expected_results.append(self.feed_id) self.handle('post', ('topic', self.topic)) self.verify_update() def testNewFeedRelation(self): """Tests when the feed ID relation changes for a topic.""" KnownFeedIdentity.update(self.feed_id, self.topic) feed = KnownFeed.create(self.topic) feed.feed_id = self.feed_id feed.put() self.now[0] += datetime.timedelta( seconds=main.FEED_IDENTITY_UPDATE_PERIOD + 1) new_feed_id = 'other_feed_id' urlfetch_test_stub.instance.expect('GET', self.topic, 200, self.content) self.expected_calls.append((self.content, 'atom')) self.expected_results.append(new_feed_id) self.handle('post', ('topic', self.topic)) feed_id = KnownFeedIdentity.get(KnownFeedIdentity.create_key(new_feed_id)) feed = KnownFeed.get(feed.key()) self.assertEquals([self.topic], feed_id.topics) self.assertEquals(feed.feed_id, new_feed_id) self.assertEquals(feed.feed_id, feed_id.feed_id) # Old KnownFeedIdentity should have been deleted. self.assertTrue(KnownFeedIdentity.get( KnownFeedIdentity.create_key(self.feed_id)) is None) class RecordFeedHandlerWithParsingTest(testutil.HandlerTestBase): """Tests for the RecordFeedHandler that excercise parsing.""" handler_class = main.RecordFeedHandler def testAtomParsing(self): """Tests parsing an Atom feed.""" topic = 'http://example.com/atom' feed_id = 'my-id' data = ('<?xml version="1.0" encoding="utf-8"?>' '<feed><id>my-id</id></feed>') urlfetch_test_stub.instance.expect('GET', topic, 200, data) self.handle('post', ('topic', topic)) known_id = KnownFeedIdentity.get(KnownFeedIdentity.create_key(feed_id)) feed = KnownFeed.get(KnownFeed.create_key(topic)) self.assertEquals([topic], known_id.topics) self.assertEquals(feed.feed_id, feed_id) self.assertEquals(feed.feed_id, known_id.feed_id) def testRssParsing(self): """Tests parsing an Atom feed.""" topic = 'http://example.com/rss' feed_id = 'http://example.com/blah' data = ('<?xml version="1.0" encoding="utf-8"?><rss><channel>' '<link>http://example.com/blah</link></channel></rss>') urlfetch_test_stub.instance.expect('GET', topic, 200, data) self.handle('post', ('topic', topic)) known_id = KnownFeedIdentity.get(KnownFeedIdentity.create_key(feed_id)) feed = KnownFeed.get(KnownFeed.create_key(topic)) self.assertEquals([topic], known_id.topics) self.assertEquals(feed.feed_id, feed_id) self.assertEquals(feed.feed_id, known_id.feed_id) ################################################################################ class HookManagerTest(unittest.TestCase): """Tests for the HookManager and Hook classes.""" def setUp(self): """Sets up the test harness.""" self.hooks_directory = tempfile.mkdtemp() if not os.path.exists(self.hooks_directory): os.makedirs(self.hooks_directory) self.valueA = object() self.valueB = object() self.valueC = object() self.funcA = lambda *a, **k: self.valueA self.funcB = lambda *a, **k: self.valueB self.funcC = lambda *a, **k: self.valueC self.globals_dict = { 'funcA': self.funcA, 'funcB': self.funcB, 'funcC': self.funcC, } self.manager = main.HookManager() self.manager.declare(self.funcA) self.manager.declare(self.funcB) self.manager.declare(self.funcC) def tearDown(self): """Tears down the test harness.""" shutil.rmtree(self.hooks_directory, True) def write_hook(self, filename, content): """Writes a test hook to the hooks directory. Args: filename: The relative filename the hook should have. content: The Python code that should go in the hook module. """ hook_file = open(os.path.join(self.hooks_directory, filename), 'w') try: hook_file.write('#!/usr/bin/env python\n') hook_file.write(content) finally: hook_file.close() def load_hooks(self): """Causes the hooks to load.""" self.manager.load(hooks_path=self.hooks_directory, globals_dict=self.globals_dict) def testNoHooksDir(self): """Tests when there is no hooks directory present at all.""" hooks_path = tempfile.mktemp() self.assertFalse(os.path.exists(hooks_path)) self.manager.load(hooks_path=hooks_path, globals_dict=self.globals_dict) for entry, hooks in self.manager._mapping.iteritems(): self.assertEquals(0, len(hooks)) def testNoHooks(self): """Tests loading a directory with no hooks modules.""" self.load_hooks() self.assertEquals(self.valueA, self.manager.execute(self.funcA)) self.assertEquals(self.valueB, self.manager.execute(self.funcB)) self.assertEquals(self.valueC, self.manager.execute(self.funcC)) def testOneGoodHook(self): """Tests a single good hook.""" self.write_hook('my_hook.py',""" class MyHook(Hook): def inspect(self, args, kwargs): return True def __call__(self, *args, **kwargs): return 'fancy string' register(funcA, MyHook()) """) self.load_hooks() self.assertEquals('fancy string', self.manager.execute(self.funcA)) def testDifferentHooksInOneModule(self): """Tests different hook methods in a single hook module.""" self.write_hook('my_hook.py',""" class MyHook(Hook): def __init__(self, value): self.value = value def inspect(self, args, kwargs): return True def __call__(self, *args, **kwargs): return self.value register(funcA, MyHook('fancy A')) register(funcB, MyHook('fancy B')) register(funcC, MyHook('fancy C')) """) self.load_hooks() self.assertEquals('fancy A', self.manager.execute(self.funcA)) self.assertEquals('fancy B', self.manager.execute(self.funcB)) self.assertEquals('fancy C', self.manager.execute(self.funcC)) def testBadHookModule(self): """Tests a hook module that's bad and throws exception on load.""" self.write_hook('my_hook.py',"""raise Exception('Doh')""") self.assertRaises( Exception, self.load_hooks) def testIncompleteHook(self): """Tests that an incomplete hook implementation will die on execute.""" self.write_hook('my_hook1.py',""" class MyHook(Hook): def inspect(self, args, kwargs): return True register(funcA, MyHook()) """) self.load_hooks() self.assertRaises( AssertionError, self.manager.execute, self.funcA) def testHookModuleOrdering(self): """Tests that hook modules are loaded and applied in order.""" self.write_hook('my_hook1.py',""" class MyHook(Hook): def inspect(self, args, kwargs): args[0].append(1) return False register(funcA, MyHook()) """) self.write_hook('my_hook2.py',""" class MyHook(Hook): def inspect(self, args, kwargs): args[0].append(2) return False register(funcA, MyHook()) """) self.write_hook('my_hook3.py',""" class MyHook(Hook): def inspect(self, args, kwargs): return True def __call__(self, *args, **kwargs): return 'peanuts' register(funcA, MyHook()) """) self.load_hooks() value_list = [5] self.assertEquals('peanuts', self.manager.execute(self.funcA, value_list)) self.assertEquals([5, 1, 2], value_list) def testHookBadRegistration(self): """Tests when registering a hook for an unknown callable.""" self.write_hook('my_hook1.py',""" class MyHook(Hook): def inspect(self, args, kwargs): return False register(lambda: None, MyHook()) """) self.assertRaises( main.InvalidHookError, self.load_hooks) def testMultipleRegistration(self): """Tests that the first hook is called when two are registered.""" self.write_hook('my_hook.py',""" class MyHook(Hook): def __init__(self, value): self.value = value def inspect(self, args, kwargs): args[0].append(self.value) return True def __call__(self, *args, **kwargs): return self.value register(funcA, MyHook('fancy first')) register(funcA, MyHook('fancy second')) """) self.load_hooks() value_list = ['hello'] self.assertEquals('fancy first', self.manager.execute(self.funcA, value_list)) self.assertEquals(['hello', 'fancy first', 'fancy second'], value_list) ################################################################################ if __name__ == '__main__': dos.DISABLE_FOR_TESTING = True unittest.main()
nilq/baby-python
python
import json import boto3 import os import urllib.parse s3 = boto3.client('s3') # Cliente do Amazon Textract textract = boto3.client('textract') def getTextractData(bucketName, documentKey): # Chamando o Amazon Textract com os parâmetros do bucket e do arquivo .png response = textract.detect_document_text( Document={ 'S3Object': { 'Bucket': bucketName, 'Name': documentKey } }) detectedText = '' # Imprime o texto obtido da imagem # Um obketo do tipo Block representa o item reconhecido em um documento com pixels próximos uns aos outros for item in response['Blocks']: if item['BlockType'] == 'LINE': detectedText += item['Text'] + '\n' return detectedText # Escreve os resultados em um arquivo .txt def writeTextractToS3File(textractData, bucketName, createdS3Document): print('Loading writeTextractToS3File') generateFilePath = os.path.splitext(createdS3Document)[0] + '.txt' s3.put_object(Body=textractData, Bucket=bucketName, Key=generateFilePath) print('Generated ' + generateFilePath) def lambda_handler(event, context): # Obtém o objeto (arquivo) após o trigger o Amazon S3 ser disparado com o upload. bucket = event['Records'][0]['s3']['bucket']['name'] key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8') try: detectedText = getTextractData(bucket, key) writeTextractToS3File(detectedText, bucket, key) return 'Concluído!' except Exception as e: print(e) print('Erro ao obter objeto {} do bucket {}.'.format(key, bucket)) raise e
nilq/baby-python
python
BLOCK_SIZE = 1024 BACKING_FNs = ['../../songs/lamprey/drums.wav', '../../songs/lamprey/bass.wav', '../../songs/lamprey/piano.wav', '../../songs/lamprey/violin.wav']
nilq/baby-python
python
#!/usr/bin/env python """find_profit is O(n) over a list, given a window, to find the maximum profit possible given a single pair of trades taking place in that window""" import unittest def find_profit(prices, window): """Given a certain window size and a list of prices, find the highest profit possible if exactly one share is bought then sold within that perid. Returns this profit.""" # back_prices keeps track of previous prices # this is a copy so we don't have to access prices directly back_prices = [] # pivot is the lowest price in the window pivot = None # next_pivot is the lowest price in the window after pivot # this is where pivot is moved if it falls out of the window next_pivot = None # accumulated maximum profit profit = 0 # this is the only direct access of prices, and only assumes that an # __iter__ function is available for i, price in enumerate(prices): # add the current price to back_prices back_prices.append(price) # trim the back prices list to only be the window length while len(back_prices) > window + 1: back_prices.pop(0) # test to see if we've found a lower pivot if pivot is None or price < back_prices[pivot - i - 1]: # set the pivot and the pivot price pivot = i # bump the next_pivot if we've passed it next_pivot = max(next_pivot, pivot + 1) # test to see if we've found a lower next_pivot if next_pivot is None or (next_pivot <= i and price < back_prices[next_pivot - i - 1]): # set it and the next_price next_pivot = i # test to see if the pivot has fallen out of the window if i - pivot == window: # move the pivot to the next position pivot = next_pivot # set the next_pivot to one after the new pivot next_pivot = pivot + 1 # update the profit accumulator profit = max(profit, price - back_prices[pivot - i - 1]) # return the accumulated profit once done return profit # pylint: disable=R0904 class StockProfitTests(unittest.TestCase): """Unit tests for the find_profit function""" def test_increase(self): """Test an increasing window size with a simple list""" self.assertEqual(find_profit([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 2), 1.0) self.assertEqual(find_profit([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 3), 2.0) self.assertEqual(find_profit([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 4), 3.0) def test_window_sizes(self): """Test various difficult lists with window sizes larger than the best size possible""" self.assertEqual(find_profit([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5), 3.0) self.assertEqual(find_profit([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5), 1.0) self.assertEqual(find_profit([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 1.4], 5), 2.0) def test_shifting(self): """Test a growing window, where each increase makes for a different profit""" self.assertEqual(find_profit([2.0, 3.0, 1.0, 2.0, 4.0, 5.0, 7.0, 8.0], 2), 2.0) self.assertEqual(find_profit([2.0, 3.0, 1.0, 2.0, 4.0, 5.0, 7.0, 8.0], 3), 3.0) self.assertEqual(find_profit([2.0, 3.0, 1.0, 2.0, 4.0, 5.0, 7.0, 8.0], 4), 5.0) self.assertEqual(find_profit([2.0, 3.0, 1.0, 2.0, 4.0, 5.0, 7.0, 8.0], 5), 6.0) self.assertEqual(find_profit([2.0, 3.0, 1.0, 2.0, 4.0, 5.0, 7.0, 8.0], 6), 7.0) if __name__ == "__main__": unittest.main()
nilq/baby-python
python
# Generated by Django 3.1.2 on 2020-10-27 22:37 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tailscout_app', '0005_auto_20201015_2047'), ] operations = [ migrations.AlterField( model_name='job', name='bacteria', field=models.CharField(choices=[('acinetobacter baumannii', 'Bacteria_1'), ('klebsiella pnuemoniae', 'Bacteria_2'), ('escherichia coli', 'Bacteria_3'), ('campylobacter jejuni', 'Bacteria_4')], max_length=256), ), ]
nilq/baby-python
python