index
int64
0
1,000k
blob_id
stringlengths
40
40
code
stringlengths
7
10.4M
22,200
7bf95886e5e8b2c82d039f97e3d59e325b1ea163
def print_text(text): print text return text
22,201
cb4fab9267409a213aab4b4cbfd8a1fad09ae644
""" no more space """ class Solution: def merge_sorted_array(self, num1, m, num2, n): if len(num1) == 0: return num2 if len(num2) == 0: return num1 num1.extend([None] * n) while m != 0 and n != 0: if num1[m - 1] < num2[n - 1]: num1[m + n - 1] = num2[n - 1] n -= 1 else: num1[m + n - 1] = num1[m - 1] m -= 1 return num1 s = Solution() print(s.merge_sorted_array([1, 2, 3], 3, [1, 2, 3, 4], 4))
22,202
535319491a78b687345c0c3003a9c38be17a2508
import mysql.connector from mysql.connector import errorcode try: cnn=mysql.connector.connect( user='root', password='tinni123', host='localhost', database='mobileshop') print ("it works!!!!") except mysql.connector.Error as e: if e.errno== errorcode.ER_ACCESS_DENIED_ERROR: print("something is wrong") elif e.errno==errorcode.ER_BAD_DB_ERROR: print("no database") else: print(e) print("WELCOME TO PHONE CRAFT MOBILE SHOP!") mydb=cnn.cursor() #searching for mobiles def search(): print("Hello! Welcome to the shop!") print("these are the phones we have:") sqlite = """SELECT sno,mobile_name,price from mobile_shop order by sno asc""" mydb.execute(sqlite) records = mydb.fetchall() for r in records: print("Serial number=",r[0]) print("Mobile_name=", r[1]) print("Price= ", r[2]) print("\n") sno= input("PLEASE ENTER THE SERIAL NUMBER OF THE PHONE YOU ARE LOOKING FOR (THIS IS JUST FOR SHOWING YOU THE DETAILS OF YOUR FAVOURITE PHONES)") mydb.execute(f"SELECT * FROM mobile_shop WHERE sno={sno}") results=mydb.fetchall() for row in results: print("Serial number=",row[0]) print("\n") print("Mobile_name=", row[1]) print("Price= ", row[2]) print("\n") print("Operating_system= ", row[3]) print("\n") print("Memory_storage_sim=", row[4]) print("\n") print("Camera=", row[5]) print("\n") print("Battery= ", row[6]) print("\n") print("Processor=", row[7]) print("\n") print("Display_features=", row[8]) print("\n") print("Display_technology=", row[9]) print("\n") print("Warranty=", row[10]) print("\n") print("With_the_box=", row[11]) print("\n") print("Model_no=", row[12]) print("\n") cnn.commit() mydb=cnn.cursor() #shopping def shopping(): while True: a=input("do you want to look for more phones?Y/N OR y/n") if a=="Y" or a=="y": sno= input("PLEASE ENTER THE SERIAL NUMBER OF THE PHONE YOU ARE LOOKING FOR (THIS IS JUST FOR SHOWING YOU THE DETAILS OF YOUR FAVOURITE PHONES)") mydb.execute(f"SELECT * FROM mobile_shop WHERE sno={sno}") results=mydb.fetchall() for row in results: print("Serial number=",row[0]) print("\n") print("Mobile_name=", row[1]) print("Price= ", row[2]) print("\n") print("Operating_system= ", row[3]) print("\n") print("Memory_storage_sim=", row[4]) print("\n") print("Camera=", row[5]) print("\n") print("Battery= ", row[6]) print("\n") print("Processor=", row[7]) print("\n") print("Display_features=", row[8]) print("\n") print("Display_technology=", row[9]) print("\n") print("Warranty=", row[10]) print("\n") print("With_the_box=", row[11]) print("\n") print("Model_no=", row[12]) print("\n") cnn.commit() else: break def cart(): sql=("select mobile_name from mobile_shop") mydb.execute(sql) result=mydb.fetchall(); sql1=("select sno from mobile_shop") mydb.execute(sql1) re=mydb.fetchall(); sql2=("select price from mobile_shop") mydb.execute(sql2) res=mydb.fetchall(); mydb.execute(f"create table FINALBILL (serialno int(50), mobilename varchar(100), price varchar(100))") x=input("type the serial number of the phone you want to buy") x1=(f"SELECT sno,mobile_name,price FROM mobile_shop WHERE sno={x}") mydb.execute(x1) r1=mydb.fetchall(); y=list(r1) cnn.commit() cart1=[] list(cart1) for i in y: print(i) cart1+=i a=cart1[0] print(a) b=cart1[1] print(b) c=cart1[2] print(c) query="INSERT INTO finalbill (serialno,mobilename,price)\ VALUES ('%s', '%s', '%s')" % (a,str(b),str(c)) mydb.execute(query) cnn.commit() cart1.clear() print(cart1) #do you want to add more? def more(): sql=("select mobile_name from mobile_shop") mydb.execute(sql) result=mydb.fetchall(); sql1=("select sno from mobile_shop") mydb.execute(sql1) re=mydb.fetchall(); sql2=("select price from mobile_shop") mydb.execute(sql2) res=mydb.fetchall(); x=input("type the serial number of the phone you want to buy") x1=(f"SELECT sno,mobile_name,price FROM mobile_shop WHERE sno={x}") mydb.execute(x1) r1=mydb.fetchall(); y=list(r1) cnn.commit() cart1=[] list(cart1) for i in y: print(i) cart1+=i a=cart1[0] print(a) b=cart1[1] print(b) c=cart1[2] print(c) query="INSERT INTO finalbill (serialno,mobilename,price)\ VALUES ('%s', '%s', '%s')" % (a,str(b),str(c)) mydb.execute(query) cnn.commit() cart1.clear() print(cart1) #adding more def add(): for i in range(0,50): a=input("do you want to buy more phones?Y/N OR y/n") if a=="y" or a=="Y": more() else: print("thank you for shopping with us, we will shortly show you the bill now") break #deleting from cart def delete(): sql1=("select * from finalbill order by serialno asc") mydb.execute(sql1) result=mydb.fetchall(); for row in result: print("Serial number=",row[0]) print("Mobile_name=", row[1]) print("Price= ", row[2]) print("\n") deldel=input("do you want to remove any item from your cart?") while (True): if deldel=='y' or deldel=='Y': x=input("enter the serial number of the phone you want to delete from the cart") sql=(f"delete from finalbill where serialno={x}") mydb.execute(sql) cnn.commit() e=input("do you want to delete more from the cart?") if e=='y' or e=='Y': sqlr=(f"delete from finalbill where serialno={x}") mydb.execute(sqlr) cnn.commit() else: break else: break sql2=("select * from finalbill order by serialno asc") mydb.execute(sql2) result2=mydb.fetchall(); for row2 in result2: print("these are the phones you have bought:") print("Serial number=",row2[0]) print("Mobile_name=", row2[1]) print("Price= ", row2[2]) print("\n") z=("select sum(price) from finalbill") mydb.execute(z) r1=mydb.fetchone(); global w w=r1 print(w) #billing part in files def billing(): f=open("mobile1.txt",'w') a=input("enter your name") b=input("enter your address") c=input("enter your mobile number") d=input("type mode of payment, cash on delivery or online payment?") g=str(w) f.write(a) f.write(b) f.write(c) f.write(g) f.close() f=open("mobile1.txt",'r') e=f.readline().split(',') print("Name:",a,"\n","address:",b,"\n","mobile number:",c,"\n",d,"\n","the total price of the items you have bought is",g) user=input("enter a username") password=input("enter a password") username=[user,password] print(username) search() shopping() cart() add() delete() billing() mydb.execute("drop table finalbill") print("thank you for shopping with us")
22,203
a8e2089eae32406f72d72442fcb50c3e8b3984cd
from datetime import datetime from enum import Enum class PolicyEffect(Enum): ALLOW = "allow" DENY = "deny" class Policy: def __init__(self, scope: str, ref: str = "*", access: PolicyEffect = PolicyEffect.ALLOW): self.ref = ref self.scope = scope self.effect = access self.created_at = datetime.utcnow() @classmethod def allow(cls, scope: str, ref: str = "*") -> "Policy": return Policy(scope, ref, PolicyEffect.ALLOW) @classmethod def deny(cls, scope: str, ref: str = "*") -> "Policy": return Policy(scope, ref, PolicyEffect.DENY)
22,204
d38268beebf3514afcbc34258d7a629e9c489588
import turtle #creating turtle pen pen= turtle.Turtle() window = turtle.Screen() #screen with canavas #set the fillcolor pen.fillcolor('yellow') #starting the filling code pen.begin_fill() #drawing the circle of radius r pen.up() pen.goto(0,-120) pen.circle(200) # ending the filling of the color pen.end_fill() #eye 1 pen.fillcolor('white') pen.begin_fill() pen.up() pen.goto(-70,130) pen.down() pen.pensize(2) pen.circle(40) pen.end_fill() # eyeball 1 pen.fillcolor('black') pen.begin_fill() pen.up() pen.goto(-70,140) pen.down() pen.circle(20) pen.end_fill() #eye 2 pen.fillcolor('white') pen.begin_fill() pen.up() pen.goto(70,130) pen.down() pen.pensize(2) pen.circle(40) pen.end_fill() # eyeball 2 pen.fillcolor('black') pen.begin_fill() pen.up() pen.goto(70,140) pen.down() pen.circle(20) pen.end_fill() #mouth pen.up() pen.goto(-100,20) pen.down() pen.pensize(4) pen.right(90) pen.circle(100,180) pen.left(90) pen.forward(200)
22,205
9899bce4f9d60a2ea7f3a82be3bd5c00271bea58
import pandas as pd from FlokAlgorithmLocal import FlokDataFrame, FlokAlgorithmLocal import json import sys, os import numpy as np import librosa # import cv2 from pandas import Series, DataFrame class Batch_AudioLogMel(FlokAlgorithmLocal): def array2str(self,arr): return " ".join([str(x) for x in arr]) def run(self, inputDataSets, params): audio_dict = inputDataSets.get(0) text_dict={} for audio_name, audio in audio_dict.items(): y = audio[0] sr = audio[1] melspec = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=int(params.get("vector_size"))) logmelspec = librosa.amplitude_to_db(melspec) # 转换到对数刻度 text_name=audio_name.split('.')[0]+"_LogMelSpec.txt" #将向量变为行 logmelspec = logmelspec.T ans_list=[self.array2str(vec) for vec in logmelspec] text_dict[text_name]="\n".join(ans_list) result = FlokDataFrame() result.addDF(text_dict) return result if __name__ == "__main__": all_info = json.loads(sys.argv[1]) # f = open("test.json", encoding = 'utf-8') # all_info = json.loads(f) # all_info = { # "input": ["in.mp3"], # "inputFormat": ["mp3"], # "inputLocation":["local_fs"], # "output": ["test_mel.csv"], # "outputFormat": ["csv"], # "outputLocation": ['local_fs'], # "parameters": {"n_mels": 128 # } # } params = all_info["parameters"] inputPaths = all_info["input"] inputTypes = all_info["inputFormat"] inputLocation = all_info["inputLocation"] outputPaths = all_info["output"] outputTypes = all_info["outputFormat"] outputLocation = all_info["outputLocation"] algorithm = Batch_AudioLogMel() audioList = algorithm.read(inputPaths, inputTypes, inputLocation, outputPaths, outputTypes) result = algorithm.run(audioList, params) algorithm.write(outputPaths, result, outputTypes, outputLocation)
22,206
8b784c8afa1d77dab491319bdd5d40eb48e7dc04
import pygame class Powerup(object): x, y = '', '' desc = "powerup" # kind of powerups: # speed of moving player ==> player_speed ==> S # number of bombs ==> bombs_no ==> N # range of bomb ==> bombs_range ==> R kind = '' # view will be changed from brick to powerup when on view should be not brick but powerup view = "brick" def __init__(self, pos, kind): self.x = pos[0] self.y = pos[1] self.rect = pygame.Rect(self.x, self.y, 50, 50) self.kind = kind def change_view(self, view): self.view = view def get_powerup(self): return self
22,207
bb294c8b0a61c52c7da59074b65c0aae493bb605
from pico2d import* x if __name__ == "__main__": test_ui()
22,208
abe44a9ad110c4fff723d1ee65948917f6c912d2
def convertDir(): direction = int(input("Enter either 1 for metric to imperial or 2 for imperial to metric: ")) value = float(input("Enter the ")) ##if direction == 1: dir def convert2inch(direction, value): if
22,209
b65ab832ccc323cc7cf1eb2eabf707724762d56b
import unittest from pyquilted.quilted.group import Group class TestGroup(unittest.TestCase): def setUp(self): self.group = Group() def test_group(self): self.assertTrue(hasattr(self.group, 'blocks')) self.assertTrue(hasattr(self.group, 'add_section')) self.assertTrue(hasattr(self.group, 'get_sections')) self.assertIsInstance(self.group.blocks, list) self.assertTrue(callable(self.group.add_section)) self.assertTrue(callable(self.group.get_sections)) def test_group_add_section(self): self.group.add_section({'one': 1}) self.assertEqual(len(self.group.blocks), 1) def test_group_get_section_single(self): self.group.add_section({'one': 1}) sblocks = self.group.get_sections() self.assertTrue('one' in sblocks) def test_group_get_section_more(self): self.group.add_section({'one': 1}) self.group.add_section({'two': 2}) mblocks = self.group.get_sections() self.assertTrue('group' in mblocks) if __name__ == '__main__': unittest.main()
22,210
48cca75988b8455093b6ca774bbeea0212871027
from django.shortcuts import render from events.models import Event def home(request): return render( request, 'pew/home.html', _home_context_dict() ) def _home_context_dict(): return { 'upcoming_events': Event.upcoming.all() }
22,211
500f351e704a5d378b65a2f7ad0207b68962075f
from . import apis from flask import request, session, Markup import json from config import RAINVERSE_CODE @apis.route('/login', methods=['POST']) def login(): if not request.form: return json.dumps({'status': False}, ensure_ascii=False) else: username = Markup(request.form['username']).striptags() if 0 < len(username) < 16: auth = 1 else: return json.dumps({'status': False}, ensure_ascii=False) if username == RAINVERSE_CODE: auth = 2 session['auth'] = str(auth) print(session) return json.dumps({'status': True, 'auth': auth, 'username': username}, ensure_ascii=False) @apis.route('/logout', methods=['GET']) def logout(): session.pop('auth', None) return json.dumps({'status': True}, ensure_ascii=False)
22,212
343cc0ff7584afd4dae893c97211107ddab8ca83
# -*- coding: utf-8 -*- from app import app from .admin import admin from .user import user from .pink import pink from .cf_pink import cf_pink from .zy_pink import zy_pink #这里分别给app注册了两个蓝图admin,user #参数url_prefix='/xxx'的意思是设置request.url中的url前缀, #即当request.url是以/admin或者/user的情况下才会通过注册的蓝图的视图方法处理请求并返回 app.register_blueprint(admin,url_prefix='/admin') app.register_blueprint(user, url_prefix='/user') app.register_blueprint(pink, url_prefix='/pink') app.register_blueprint(cf_pink, url_prefix='/cf_pink') app.register_blueprint(zy_pink, url_prefix='/zy_pink')
22,213
fde6e62c2c3569710105049b7374a924c819a974
# -*- coding: utf-8 -*- from setuptools import setup, find_packages import os version = '0.0.1' setup( name='bugsystem', version=version, description='Bug Tracking System', author='Nishta', author_email='anandh.nishta@gmail.com', packages=find_packages(), zip_safe=False, include_package_data=True, install_requires=("frappe",), )
22,214
2c69ce3d106c242a0fa8873bca147813b4adc010
import torch import pickle import datetime import torch.nn as nn import src.NeuralNetwork.parameters as parameters from torch.utils.data import DataLoader from src.NeuralNetwork.Dataset.dataset_utils import create_dataset, collate, generate_all_inputs_and_labels from src.NeuralNetwork.GCN.GCN_net import GCNNet from src.NeuralNetwork.Gated_GCN.gated_gcn_net import GatedGCNNet MODEL_PARAMETERS_PATH = parameters.MODEL_PARAMETERS_PATH INPUTS_LABELS_PATH = parameters.INPUTS_LABELS_PATH INPUTS_LABELS_PREFIX = parameters.INPUTS_LABELS_PREFIX ALNS_STATISTICS_FILE = parameters.ALNS_STATISTICS_FILE INPUTS_LABELS_NAME = parameters.INPUTS_LABELS_NAME HIDDEN_NODE_DIMENSIONS = parameters.HIDDEN_NODE_DIMENSIONS HIDDEN_EDGE_DIMENSIONS = parameters.HIDDEN_EDGE_DIMENSIONS HIDDEN_LINEAR_DIMENSIONS = parameters.HIDDEN_LINEAR_DIMENSIONS OUTPUT_SIZE = parameters.OUTPUT_SIZE DROPOUT_PROBABILITY = parameters.DROPOUT_PROBABILITY MAX_EPOCH = parameters.MAX_EPOCH BATCH_SIZE = parameters.BATCH_SIZE INITIAL_LEARNING_RATE = parameters.INITIAL_LEARNING_RATE MIN_LEARNING_RATE = parameters.MIN_LEARNING_RATE LEARNING_RATE_DECREASE_FACTOR = parameters.LEARNING_RATE_DECREASE_FACTOR PATIENCE = parameters.PATIENCE DISPLAY_EVERY_N_EPOCH = parameters.DISPLAY_EVERY_N_EPOCH NETWORK_GCN = parameters.NETWORK_GCN NETWORK_GATEDGCN = parameters.NETWORK_GATEDGCN def make_training_step(graph_convolutional_network, loss_function, softmax_function, optimizer, scheduler): def train_step(graph_batch, label_batch): logits = graph_convolutional_network(graph_batch, graph_batch.ndata['n_feat'], graph_batch.edata['e_feat'], 0, 0) loss = loss_function(logits, label_batch) optimizer.zero_grad() loss.backward() optimizer.step() return loss.detach().item() return train_step def evaluate(network, loss_function, softmax_function, test_loader, test_set_size): """ Evaluate a neural network on a given test set. Parameters ---------- softmax_function loss_function test_loader : the test dataset network : the network to evaluate Returns ------- The proportion of right predictions """ running_loss = 0.0 confusion_matrix = { # Of shape [predicted value][real value] 0: {0: 0, 1: 0, 2: 0}, 1: {0: 0, 1: 0, 2: 0}, 2: {0: 0, 1: 0, 2: 0}, } batch_size = -1 network.eval() with torch.no_grad(): correct = 0 for graph_batch, label_batch in test_loader: if batch_size == -1: batch_size = label_batch.size(0) logits = network(graph_batch, graph_batch.ndata['n_feat'], graph_batch.edata['e_feat'], 0, 0) running_loss += loss_function(logits, label_batch).detach().item() predicted_classes = torch.argmax(logits, dim=1).detach() correct += (predicted_classes == label_batch).sum().item() for predicted_class, label in zip(predicted_classes, label_batch): confusion_matrix[predicted_class.item()][label.item()] += 1 if batch_size <= 0: print("Error : batch size is {}".format(batch_size)) exit(1) return correct / test_set_size, running_loss / len(test_loader), confusion_matrix def evaluate_random(test_loader, test_set_size): correct = 0 batch_size = -1 for _, label_batch in test_loader: if batch_size == -1: batch_size = label_batch.size(0) random_tensor = torch.randint(0, OUTPUT_SIZE, size=label_batch.size(), device=label_batch.device) correct += (random_tensor == label_batch).sum().item() return correct / test_set_size def evaluate_with_null_iteration(test_loader, test_set_size): correct = 0 batch_size = -1 for _, label_batch in test_loader: if batch_size == -1: batch_size = label_batch.size(0) ones_tensor = torch.ones(size=label_batch.size(), device=label_batch.device) correct += (ones_tensor == label_batch).sum().item() return correct / test_set_size def compute_classes_weights(train_loader, test_loader, device): training_set_size = 0 test_set_size = 0 number_of_elements_train_set = { 0: 0, 1: 0, 2: 0 } number_of_elements_test_set = { 0: 0, 1: 0, 2: 0 } for _, labels in train_loader: for label in labels: number_of_elements_train_set[label.item()] += 1 training_set_size += 1 for _, labels in test_loader: for label in labels: number_of_elements_test_set[label.item()] += 1 test_set_size += 1 print("{:^20}{:^7.2%}{:^7.2%}{:^7.2%}".format( 'Training set', round(number_of_elements_train_set[0] / training_set_size, 4), round(number_of_elements_train_set[1] / training_set_size, 4), round(number_of_elements_train_set[2] / training_set_size, 4), )) print("{:^20}{:^7.2%}{:^7.2%}{:^7.2%}".format( 'Test set', round(number_of_elements_test_set[0] / test_set_size, 4), round(number_of_elements_test_set[1] / test_set_size, 4), round(number_of_elements_test_set[2] / test_set_size, 4), )) print("Dataset size : {}".format(training_set_size + test_set_size)) print("Training set size : {}".format(training_set_size)) train_weights = [ 1 - number_of_elements_train_set[0] / training_set_size, 1 - number_of_elements_train_set[1] / training_set_size, 1 - number_of_elements_train_set[2] / training_set_size, ] test_weights = [ 1 - number_of_elements_test_set[0] / test_set_size, 1 - number_of_elements_test_set[1] / test_set_size, 1 - number_of_elements_test_set[2] / test_set_size, ] print("{:^20}{:^7.2}{:^7.2}{:^7.2}".format( 'Training weights', round(train_weights[0], 4), round(train_weights[1], 4), round(train_weights[2], 4), )) print("{:^20}{:^7.2}{:^7.2}{:^7.2}".format( 'Test weights', round(test_weights[0], 4), round(test_weights[1], 4), round(test_weights[2], 4), )) return torch.tensor(train_weights, device=device), torch.tensor(test_weights, device=device), \ training_set_size, test_set_size def display_confusion_matrix(confusion_matrix): print("Confusion matrix :") print("{:^20}|{:^7}|{:^7}|{:^7}".format('Predicted \\ Real', '0', '1', '2')) print("-" * 38) print("{1:^20}|{0[0]:^7}|{0[1]:^7}|{0[2]:^7}".format(confusion_matrix[0], '0 (delta > 0)')) print("-" * 38) print("{1:^20}|{0[0]:^7}|{0[1]:^7}|{0[2]:^7}".format(confusion_matrix[1], '1 (delta = 0)')) print("-" * 38) print("{1:^20}|{0[0]:^7}|{0[1]:^7}|{0[2]:^7}".format(confusion_matrix[2], '2 (delta < 0)')) def save_model_parameters(graph_convolutional_network, optimizer, softmax_function_name, initial_learning_rate, epoch, training_loss, test_loss, device): name_model_parameters_file = 'GCNparams_ep' + str(epoch) name_model_parameters_file += '_lr' + str(initial_learning_rate) name_model_parameters_file += '_dev' + device name_model_parameters_file += '_' + softmax_function_name name_model_parameters_file += '.pt' torch.save({'graph_convolutional_network_state': graph_convolutional_network.state_dict(), 'optimizer_state': optimizer.state_dict(), 'epoch': epoch, 'training_loss': training_loss, 'test_loss': test_loss}, MODEL_PARAMETERS_PATH + name_model_parameters_file) print("Successfully saved the model's parameters in {}".format(MODEL_PARAMETERS_PATH + name_model_parameters_file)) def main(recreate_dataset=False, batch_size=BATCH_SIZE, test_batch_size=BATCH_SIZE, weight_loss=False, network=NETWORK_GCN, hidden_node_dimensions=None, hidden_edge_dimensions=None, hidden_linear_dimensions=None, output_size=OUTPUT_SIZE, dropout_probability=DROPOUT_PROBABILITY, max_epoch=MAX_EPOCH, initial_learning_rate=INITIAL_LEARNING_RATE, min_learning_rate=MIN_LEARNING_RATE, learning_rate_decrease_factor=LEARNING_RATE_DECREASE_FACTOR, patience=PATIENCE, save_parameters_on_exit=True, load_parameters_from_file=None, **keywords_args): # Avoid mutable default arguments if hidden_edge_dimensions is None: hidden_edge_dimensions = HIDDEN_EDGE_DIMENSIONS if hidden_node_dimensions is None: hidden_node_dimensions = HIDDEN_NODE_DIMENSIONS if hidden_linear_dimensions is None: hidden_linear_dimensions = HIDDEN_LINEAR_DIMENSIONS """ Use GPU if available. """ if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' if recreate_dataset: print("Creating dataset from ALNS statistics :") if 'alns_statistics_file' not in keywords_args: alns_statistics_file = ALNS_STATISTICS_FILE else: alns_statistics_file = keywords_args['alns_statistics_file'] """ Create the train and test sets. """ inputs, labels = generate_all_inputs_and_labels(alns_statistics_file, device) print("Created dataset !") if 'pickle_dataset' in keywords_args and type(keywords_args['pickle_dataset']) is bool: if keywords_args['pickle_dataset']: inputs_labels_name = INPUTS_LABELS_PREFIX + alns_statistics_file # Cannot use torch.save on DGL graphs, see https://github.com/dmlc/dgl/issues/1524 # Using pickle.dump instead with open(INPUTS_LABELS_PATH + inputs_labels_name, 'wb') as dataset_file: pickle.dump({ 'inputs': inputs, 'labels': labels }, dataset_file) print("Successfully saved the data in {}".format(INPUTS_LABELS_PATH + inputs_labels_name)) else: if 'inputs_labels_name' not in keywords_args: inputs_labels_name = INPUTS_LABELS_NAME else: inputs_labels_name = keywords_args['inputs_labels_name'] print("Retrieving dataset {} ... ".format(inputs_labels_name), end='', flush=True) with open(INPUTS_LABELS_PATH + inputs_labels_name, 'rb') as dataset_file: dataset = pickle.load(dataset_file) inputs = dataset['inputs'] labels = dataset['labels'] print("Done !", flush=True) train_set, test_set = create_dataset(inputs, labels) train_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True, collate_fn=collate) test_loader = DataLoader(dataset=test_set, batch_size=test_batch_size, collate_fn=collate) number_of_node_features = len(train_loader.dataset[0][0].ndata['n_feat'][0]) number_of_edge_features = len(train_loader.dataset[0][0].edata['e_feat'][0]) """ Display the proportion of null iterations (iterations that do not change the cost value of the CVRP solution. """ train_weights, test_weights, training_set_size, test_set_size = compute_classes_weights(train_loader, test_loader, device) """ Create the gated graph convolutional network """ if network == NETWORK_GATEDGCN: net_params = { 'in_dim': number_of_node_features, 'in_dim_edge': number_of_edge_features, 'hidden_dim': hidden_node_dimensions[0], 'out_dim': hidden_node_dimensions[-1], 'n_classes': OUTPUT_SIZE, 'dropout': dropout_probability, 'L': len(HIDDEN_NODE_DIMENSIONS), 'readout': 'mean', 'graph_norm': False, 'batch_norm': False, 'residual': False, 'edge_feat': True, 'device': device } graph_convolutional_network = GatedGCNNet(net_params) else: graph_convolutional_network = GCNNet(input_node_features=number_of_node_features, hidden_node_dimension_list=hidden_node_dimensions, input_edge_features=number_of_edge_features, hidden_edge_dimension_list=hidden_edge_dimensions, hidden_linear_dimension_list=hidden_linear_dimensions, output_feature=output_size, dropout_probability=dropout_probability, device=device) graph_convolutional_network = graph_convolutional_network.to(device) print("Created GCNNet", flush=True) """ Define the optimizer, the learning rate scheduler and the loss function. We use the Adam optimizer and a MSE loss. """ optimizer = torch.optim.Adam(graph_convolutional_network.parameters(), lr=initial_learning_rate) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=learning_rate_decrease_factor, min_lr=min_learning_rate, verbose=True) if weight_loss: loss_function_training = nn.CrossEntropyLoss(weight=train_weights) loss_function_testing = nn.CrossEntropyLoss(weight=test_weights) else: loss_function_training = nn.CrossEntropyLoss() loss_function_testing = nn.CrossEntropyLoss() softmax_function = nn.LogSoftmax(dim=1) train_step = make_training_step(graph_convolutional_network, loss_function_training, softmax_function, optimizer, scheduler) print("#" * 50) print("# Date : {0:%y}-{0:%m}-{0:%d}_{0:%H}-{0:%M}".format(datetime.datetime.now())) print("# Using {}".format(NETWORK_GATEDGCN)) print("# Hidden node dimensions : {}".format(hidden_node_dimensions)) print("# Hidden edge dimensions : {}".format(hidden_edge_dimensions)) print("# Hidden linear dimensions : {}".format(hidden_linear_dimensions)) print("# Dropout probability : {}".format(dropout_probability)) print("# Max epoch : {}".format(max_epoch)) print("# Initial learning rate : {}".format(initial_learning_rate)) print("# Device : {}".format(device)) print("# Training batch size : {}".format(batch_size)) print("# Testing batch size : {}".format(test_batch_size)) print("#" * 50) """ Resume training state """ initial_epoch = 0 training_loss = [] test_loss = [] if load_parameters_from_file is not None: try: training_state = torch.load(MODEL_PARAMETERS_PATH + load_parameters_from_file, map_location=torch.device(device)) graph_convolutional_network.load_state_dict(training_state['graph_convolutional_network_state']) graph_convolutional_network.train() optimizer.load_state_dict(training_state['optimizer_state']) initial_epoch = training_state['epoch'] training_loss = training_state['training_loss'] test_loss = training_state['test_loss'] print("Loaded parameters values from {}".format(MODEL_PARAMETERS_PATH + load_parameters_from_file)) print("Resuming at epoch {}".format(initial_epoch)) except (pickle.UnpicklingError, TypeError, RuntimeError, KeyError) as exception_value: print("Unable to load parameters from {}".format(MODEL_PARAMETERS_PATH + load_parameters_from_file)) print("Exception : {}".format(exception_value)) should_continue = '' while should_continue != 'y' and should_continue != 'n': should_continue = input("Continue anyway with random parameters ? (y/n) ") if should_continue == 'n': exit(1) print("\nStarting training {}\n".format(chr(8987))) """ Train the network. """ for epoch in range(initial_epoch, max_epoch + 1): try: running_loss = 0.0 if epoch % DISPLAY_EVERY_N_EPOCH == 1: accuracy, test_loss_element, confusion_matrix = evaluate(graph_convolutional_network, loss_function_testing, softmax_function, test_loader, test_set_size) test_loss.append(test_loss_element) random_accuracy = evaluate_random(test_loader, test_set_size) guessing_null_iteration_accuracy = evaluate_with_null_iteration(test_loader, test_set_size) print("Epoch {:d}, loss {:.6f}, test_loss {:.6f}, accuracy {:.4f}, random accuracy {:.4f}, " "always guessing null iterations {:.4f}" .format(epoch, training_loss[-1], test_loss[-1], accuracy, random_accuracy, guessing_null_iteration_accuracy)) display_confusion_matrix(confusion_matrix) for graph_batch, label_batch in train_loader: loss = train_step(graph_batch, label_batch) running_loss += loss epoch_loss = running_loss / len(train_loader) scheduler.step(epoch_loss) training_loss.append(epoch_loss) except KeyboardInterrupt: print("Received keyboard interrupt.") print("Computing confusion matrix on training data.") _, _, train_confusion_matrix = evaluate(graph_convolutional_network, loss_function_testing, softmax_function, train_loader, training_set_size) display_confusion_matrix(train_confusion_matrix) if save_parameters_on_exit: print("Saving parameters before quiting ...", flush=True) save_model_parameters(graph_convolutional_network, optimizer, str(softmax_function.__class__()).partition('(')[0], initial_learning_rate, epoch, training_loss, test_loss, device) exit(0) print("Computing confusion matrix on training data.") _, _, train_confusion_matrix = evaluate(graph_convolutional_network, loss_function_testing, softmax_function, train_loader, training_set_size) display_confusion_matrix(train_confusion_matrix) if save_parameters_on_exit: save_model_parameters(graph_convolutional_network, optimizer, str(softmax_function.__class__()).partition('(')[0], initial_learning_rate, max_epoch, training_loss, test_loss, device) if __name__ == '__main__': recreate = 1 if recreate: main(recreate_dataset=True, alns_statistics_file='stats_30it50in.pickle', pickle_dataset=True, save_parameters_on_exit=False, network=NETWORK_GATEDGCN, max_epoch=20) else: main(inputs_labels_name='inputs_labels_' 'stats_1' '000iter.pickle', network=NETWORK_GATEDGCN, save_parameters_on_exit=False, max_epoch=99)
22,215
e4393a552a066c918c4dff4d285085d378ac74ab
__author__ = 'Juxi Leitner, juxi@idsia.ch' from pybrain.rl.environments import EpisodicTask from docking import DockingEnvironment from math import fabs class DockingTask(EpisodicTask): __name__ = "Docking Task" logBuffer = None logging = False logfileName = None timeNeuron_Threshold = 0.99 """ The task of ... """ def __init__(self, env = None, maxtime = 25, timestep = 0.1, logging = False): """ :key env: (optional) an instance of a DockingEnvironment (or a subclass thereof) :key maxtime: (optional) maximum number per task (default: 25) """ if env == None: env = DockingEnvironment() EpisodicTask.__init__(self, env) self.maxTime = maxtime self.dt = timestep self.env.dt = self.dt self.t = 0 self.logging = logging self.logfileName = 'logging_' + str() + '.txt' # actions: u_l, u_r self.actor_limits = [(-0.1, +0.1), (-0.1, +0.1), (0.0, 1.0)] self.lastFitness = 0.0 self.bestFitness = 0.0 def reset(self): EpisodicTask.reset(self) self.t = 0 self.lastFitness = 0.0 self.bestFitness = 0.0 self.appendLog() # write first line! def performAction(self, action): EpisodicTask.performAction(self, action) self.t += self.dt self.appendLog() def isFinished(self): # TODO query self action!? time neuron that is if self.t + self.dt >= self.maxTime or self.env.action[2] >= self.timeNeuron_Threshold : # maximal timesteps # self.appendLog("docking::objfun_: return value: %f\n", average); self.writeLog() return True # stop neuron! return False def getTotalReward(self): """ Return the accumulated reward since the start of the episode """ return self.lastFitness # return self.bestFitness def getReward(self): # def evaluateFitness(self): """getReward, returns the fitness at the current time""" fitness = 0.0 distance = self.env.getDistance() speed = self.env.getSpeed() theta = self.env.getOrientation() ## implementation 101 timeBonus = (self.maxTime - self.t)/self.maxTime alpha = 1.0/((1+distance)*(1+fabs(theta))*(speed+1)); if distance < 0.5*self.env.init_distance : if(distance < self.env.vicinity_distance and abs(theta) < self.env.vicinity_orientation and speed < self.env.vicinity_speed ): fitness = 1 + timeBonus; else: fitness = alpha; else: fitness = 0 self.lastFitness = fitness if fitness > self.bestFitness : self.bestFitness = fitness return fitness def clearLog(self): """clear the current logbuffer""" self.logBuffer = None def appendLog(self): """append the current state to the logbuffer""" if self.logBuffer == None : self.logBuffer = "Some header\nhere\n\n" self.logBuffer += "\tx\ty\ttheta : ul\tur\tt-neur\n"; self.logBuffer += '%2.1f: %2.6f\t %2.6f\t %2.6f : ' % \ ( self.t, self.env.state[0], self.env.state[2], self.env.state[4] ) self.logBuffer += '%1.3f\t %1.3f \t%1.2f \t' % \ ( self.env.action[0], self.env.action[1], self.env.action[2] ) self.logBuffer += 'Dst/Theta/Speed: \t%f\t%f\t%f \tF: %.2f \n' % \ ( self.env.getDistance(), self.env.getOrientation(), self.env.getDistance(), self.getReward() ) def writeLog(self): """write the state of the current task into a logfile""" if self.logBuffer != None and self.logging : f = open(self.logfileName, 'w') self.logBuffer += "Final Fitness: %f\n" % self.getTotalReward() self.logBuffer += "\n" f.write(self.logBuffer) f.close() # def setMaxLength(self, n): # self.N = n
22,216
e4ce74adc7b74676864c7245da3db465597ee0e0
from ..api import get_many, ApiVersion, int_field, date_field from datetime import date from typing import Iterator, List, Optional class KrajOkresTesty: """ Celkový (kumulativní) počet provedených testů podle krajů a okresů ČR Datová sada obsahující přírůstkové a kumulativní denní počty provedených PCR testů s korekcí na opakovaně pozitivní (kontrolní) testy, a to bez rozdílu způsobu úhrady (tedy včetně samoplátců), na COVID 19 za celou ČR podle hlášení laboratoří (rychlotesty nejsou do přehledu zařazeny). Tento přehled je k dispozici od 1. srpna 2020 z důvodu úplnosti nahlášených dat jednotlivými laboratořemi na individuální úrovni. V předchozím období existuje riziko neúplnosti těchto individuálních dat a proto do 31. července vycházíme pouze z dat agregovaných, která však neumožňují složitější analytické výpočty. Attributes ---------- datum: date kraj_nuts_kod: str Identifikátor kraje podle klasifikace NUTS 3. okres_lau_kod: str Identifikátor okresu podle klasifikace LAU 1. prirustkovy_pocet_testu_okres: int Přírůstkový počet všech provedených testů v daném okrese. kumulativni_pocet_testu_okres: int Kumulativní počet všech provedených testů v daném okrese. prirustkovy_pocet_testu_kraj: int Přírůstkový počet všech provedených testů v daném kraji. kumulativni_pocet_testu_kraj: int Kumulativní počet všech provedených testů v daném kraji. prirustkovy_pocet_prvnich_testu_okres: int Přírůstkový počet provedených testů s korekcí na opakovaně pozitivní (kontrolní) testy v daném okrese. kumulativni_pocet_prvnich_testu_okres: int Kumulativní počet provedených testů s korekcí na opakovaně pozitivní (kontrolní) testy v daném okrese. prirustkovy_pocet_prvnich_testu_kraj: int Přírůstkový počet provedených testů s korekcí na opakovaně pozitivní (kontrolní) testy v daném kraji. kumulativni_pocet_prvnich_testu_kraj: int Kumulativní počet provedených testů s korekcí na opakovaně pozitivní (kontrolní) testy v daném kraji. """ def __init__(self, line: List[str]) : self.datum: Optional[date] = date_field(line[0]) self.kraj_nuts_kod: str = line[1] self.okres_lau_kod: str = line[2] self.prirustkovy_pocet_testu_okres: int = int_field(line[3]) self.kumulativni_pocet_testu_okres: int = int_field(line[4]) self.prirustkovy_pocet_testu_kraj: int = int_field(line[5]) self.kumulativni_pocet_testu_kraj: int = int_field(line[6]) self.prirustkovy_pocet_prvnich_testu_okres: int = int_field(line[7]) self.kumulativni_pocet_prvnich_testu_okres: int = int_field(line[8]) self.prirustkovy_pocet_prvnich_testu_kraj: int = int_field(line[9]) self.kumulativni_pocet_prvnich_testu_kraj: int = int_field(line[10]) @staticmethod def get(cache_dir: Optional[str]) -> Iterator['KrajOkresTesty'] : return get_many('kraj-okres-testy', KrajOkresTesty, ApiVersion.V2, cache_dir)
22,217
6b6016e56aa8f6acbad50846cf8e668bf66bb25d
import numpy as np from time import time import itertools import random def sample_noreplace(arr, n, k): """samples n k-ary subsets from a given iterable (with length property) :param arr: the iterable sampled from :type arr: iterable/list/np.array/... :param n: amount of subsets that should be sampled :type n: int :param k: arity of every subset, i.e. the amount of elements :type k: int :return: array with subsets :rtype: np.array[n,k] """ # code from https://www.iditect.com/how-to/58566613.html idx = np.random.randint(len(arr) - np.arange(k), size=[n, k]) for i in range(k-1, 0, -1): idx[:,i:] += idx[:,i:] >= idx[:,i-1,None] return np.array(arr)[idx] def partial_cost(data, indexing, combinations, cf, cf_prime): """computes the costs of a dataset with respect to some indexing, a selection of 3-ary subsets (of the dataset), and cost functions cf and cf_prime :param data: the dataset containing 3d-points for n vertices :type data: np.array[n,3,dtype=float] :param indexing: indexing that maps vertices to indices :type indexing: np.array[n,dtype=int] :param combinations: selection of m 3-ary subsets of vertices :type combinations: np.array[m,3,dtype=int] :param cf: cost function c that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :param cf_prime: cost function c' that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf_prime: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :return: the costs of the dataset w.r.t. the indexing and the subset-selection :rtype: float """ # selects indices that are mapped to by u,v and w vertices respectively part_us = indexing[combinations[:,0]] part_vs = indexing[combinations[:,1]] part_ws = indexing[combinations[:,2]] result = 0 # cf_prime is applied to cf_prime_combs and cf to cf_combs respectively # the result is the sum over both functions if cf_prime is not None: # select pairings where u,v and w vertices share the same index (i.e. same partition) cf_prime_combs = combinations[(part_us == part_vs) & (part_us == part_ws)] if cf_prime_combs.shape[0] > 0: result += cf_prime(data, cf_prime_combs).sum() if cf is not None: # select pairings where u,v and w vertices have # all distinct indices (i.e. distinct partitions) cf_combs = combinations[(part_us != part_vs) & (part_us != part_ws) & (part_vs != part_ws)] if cf_combs.shape[0] > 0: result += cf(data, cf_combs).sum() # divide by the overall amount of selected pairings return result/combinations.shape[0] def cost(data, indexing, cf, cf_prime, uvw_sample_count=None): """computes the cost of a dataset with respect to some indexing and cost functions cf, cf_prime :param data: the dataset containing 3d-points for n vertices :type data: np.array[n,3,dtype=float] :param indexing: indexing that maps vertices to indices :type indexing: np.array[n,dtype=int] :param cf: cost function c that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :param cf_prime: cost function c' that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf_prime: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :return: the costs of the dataset w.r.t. the indexing :rtype: float """ points = None if uvw_sample_count is None: us = np.arange(data.shape[0]) # compute all possible 3-ary pairings and remove duplicates (if interpreted as sets) points = np.array(np.meshgrid(us,us,us)).T.reshape(-1,3) points = points[ (points[:,0] < points[:,1]) & (points[:,1] < points[:,2]) ] else: points = sample_noreplace(np.arange(indexing.shape[0]), uvw_sample_count, 3) return partial_cost(data, indexing, points, cf, cf_prime) def reduced_cost(data, indexing, cf, cf_prime, v, k, uw_sample_count=None): """computes the reduced cost of a dataset with respect to a given indexing if vertex v is moved to index k :param data: the dataset containing 3d-points for n vertices :type data: np.array[n,3,dtype=float] :param indexing: indexing that maps vertices to indices :type indexing: np.array[n,dtype=int] :param cf: cost function c that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :param cf_prime: cost function c' that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf_prime: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :param v: the vertex that is selected :type v: int :param k: the index the vertex is moved to :type k: int :param uw_sample_count: if None, the reduced costs are computed explicitly for all pairings. otherwise, an amount of uw_sample_count of random vertices is selected and the sample mean of the costs is computed :type uw_sample_count: None or int :return: the reduced costs of the dataset w.r.t. the indexing and movement of v to k :rtype: float """ # copy indexing and move v to k cpy = indexing.copy() cpy[v] = k vs = np.arange(data.shape[0]) points = None if uw_sample_count is None: # compute all possible 3-ary subsets that contain v points = np.array(np.meshgrid(v,vs,vs)).T.reshape(-1,3) points = points[ (points[:,0] != points[:,1]) & (points[:,0] != points[:,2]) & (points[:,1] < points[:,2]) ] else: # sample 3-ary subsets that contain v pointsv = np.ones((uw_sample_count,1), dtype=np.int)*v allowed = np.arange(indexing.shape[0])[np.arange(indexing.shape[0]) != v] pointsuw = sample_noreplace(allowed, uw_sample_count, 2) points = np.concatenate((pointsv, pointsuw), axis=1) # the result is the difference between the original indexing # and the indexing after the move-operation result = (partial_cost(data, indexing, points, cf, cf_prime) - partial_cost(data, cpy, points, cf, cf_prime)) return result def compute_index_counts(indexing): """computes the amount of vertices that are mapped to each index :param indexing: indexing that maps vertices to indices :type indexing: np.array[n,dtype=int] :return: array that contains the vertex-count per index :rtype: np.array[n,dtype=int] """ counts = np.zeros(indexing.shape) for index in indexing: counts[index] += 1 return counts def compute_probability_weights(indexing, counts, image, binary_set_mappings): """computes an array that contains the probability for each vertex of being subject of a move-operation w.r.t. the given indexing :param indexing: the indexing :type indexing: np.array[n,dtype=int] :param counts: array containing number of vertices/index :type counts: np.array[n,dtype=int] :param image: list containing non-empty indices :type image: list :param binary_set_mappings: result of `compute_binary_set_mappings` :type binary_set_mappings: np.array[n,dtype=int] :return: the probabilities for each vertex :rtype: np.array[n,dtype=float] """ S_w_cardinalities = np.zeros_like(indexing) countsgeq2 = sum(c>=2 for c in counts) # compute amount of indices that have count>=2 countseq1 = [v for v in range(indexing.shape[0]) if counts[indexing[v]]==1] K_cardinalities = np.zeros_like(indexing) for card,w in enumerate(countseq1[::-1]): K_cardinalities[w] = card for w,index in enumerate(indexing): if counts[index] >= 3: S_w_cardinalities[w] = len(image) elif counts[index] == 2: offset = 1 if w==binary_set_mappings[index] else 0 S_w_cardinalities[w] = len(image) - 1 + offset elif counts[index] == 1: S_w_cardinalities[w] = countsgeq2 + K_cardinalities[w] return S_w_cardinalities/np.sum(S_w_cardinalities) def find_empty(counts): """returns an index with not assigned vertices, if there is one :param counts: array containing the amount of vertices/index :type counts: np.array[n,dtype=int] :return: index with no assigned vertices or None :rtype: int/None """ for index,count in enumerate(counts): if count == 0: return index return None def compute_binary_set_mappings(indexing, counts): """computes an array that contains a mapping from indices to vertices if these indices carry only two vertices. in particular, every index is only mapped to the lower index :param indexing: the indexing :type indexing: np.array[n,dtype=int] :param counts: the amount of vertices/index :type counts: np.array[n,dtype=int] :return: an array A that maps indices to vertices, such that A[i]=-1 if counts[i]!=2 and A[i]= v if counts[i]==2, and indexing(v)=i, [v]_indexing={v,u} and v < u :rtype: np.array[n,dtype=int] """ ret = np.zeros_like(indexing)-1 for vertex,index in enumerate(indexing): if counts[index] == 2: if ret[index] == -1: ret[index] = vertex return ret def compute_unary_set_mappings(indexing, counts): """computes an array that contains a mapping from indices to vertices if these indices carry only one vertex :param indexing: the indexing :type indexing: np.array[n,dtype=int] :param counts: the amount of vertices/index :type counts: np.array[n,dtype=int] :return: an array A that maps indices to vertices, such that A[i]=-1 if counts[i]!=1 and A[i]= v if indexing(v)=i, [v]_indexing={v} :rtype: np.array[n,dtype=int] """ ret = np.zeros_like(indexing)-1 for vertex,index in enumerate(indexing): if counts[index] == 1: ret[index] = vertex return ret def neighbours(indexing, random_stream=None): """enumerates the neighbours of an indexing. the neighbourhood is defined by the set of possible moves of vertices to other indices :param indexing: the indexing :type indexing: np.array[n,dtype=int] :param random_stream: amount of neighbours that should be sampled from the neighbourhood. if None, then every neighbour is returned :type random_stream: int/None :yield: iterator over the neighbours, where a neighbour is encoded as a vertex/index-pair :rtype: iterator[Tuple[int,int]] """ # pre-compute some necessary values counts = compute_index_counts(indexing) binary_sm = compute_binary_set_mappings(indexing, counts) unary_sm = compute_unary_set_mappings(indexing, counts) empty = find_empty(counts) image = [idx for idx,count in enumerate(counts) if count != 0] def candidates(vertex, index, image, binary_sm, unary_sm, counts, empty): """generates the set of possible target indices for a given vertex :param vertex: the vertex :type vertex: int :param index: the current index of the vertex :type index: int :param image: the image of the current indexing :type image: list :param binary_sm: result of `compute_binary_set_mappings` :type binary_sm: np.array[n,dtype=int] :param unary_sm: result of `compute_unary_set_mappings` :type unary_sm: np.array[n,dtype=int] :param counts: number of vertices/index :type counts: np.array[n,dtype=int] :param empty: an index that is assigned no vertex, None is also allowed :type empty: int/None :yield: iterator over target indices :rtype: Iterator[int] """ for k in image: if k == index: continue if counts[index] > 1 or counts[k] > 1: yield k elif vertex < unary_sm[k]: # implicitly: counts[index]==1 and counts[k]==1 yield k if counts[index] > 2 or (counts[index] == 2 and vertex==binary_sm[index]): yield empty if random_stream is not None: # Random Move-Enumeration pweights = compute_probability_weights(indexing, counts, image, binary_sm) vertices = np.random.choice(indexing.shape[0], random_stream, p=pweights) for vertex in vertices: index = indexing[vertex] ks = list(candidates(vertex, index, image, binary_sm, unary_sm, counts, empty)) k = random.choice(ks) yield vertex, k else: # Move-Enumeration for vertex, index in enumerate(indexing): for k in candidates(vertex, index, image, binary_sm, unary_sm, counts, empty): yield vertex, k def best_move(data, indexing, cf, cf_prime, N=20, M=30): """computes the best (or a good) move :param data: array containing the points :type data: np.ndarray[n,3,dtype=float] :param indexing: initial indexing :type indexing: np.ndarray[n,dtype=int] :param cf: cost function c that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :param cf_prime: cost function c' that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf_prime: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :param N: number of neighbours to be sampled. If None, then the complete neighbourhood is cosidered, defaults to 20 :type N: int, optional :param M: number of vertex-samples for estimating the cost-function. If None, then the costs are computed explicitly, defaults to 30 :type M: int, optional :return: a tuple containing the vertex that is moved, the target index and a dictionary containing more information :rtype: Tuple[int,int,Dict] """ stats = {} timer = time() ns = list(neighbours(indexing, random_stream=N)) stats["n_neighbours"] = len(ns) stats["t_neighbours"] = 1000*(time() - timer) dt_rcs = [] bestpair, best_rcost = None, None for v,k in ns: timer = time() rc = reduced_cost(data, indexing, cf, cf_prime, v, k, uw_sample_count=M) dt_rcs.append(1000*(time() - timer)) if bestpair is None or rc > best_rcost: bestpair = v,k best_rcost = rc stats["t_rcs_mean"] = np.mean(dt_rcs) stats["t_rcs_std"] = np.std(dt_rcs) stats["t_rcs_sum"] = np.sum(dt_rcs) stats["rc"] = best_rcost stats["partcount"] = np.unique(indexing).shape[0] return bestpair, best_rcost, stats def greedy_search(data, indexing, cf, cf_prime, stop=1000, N=None, M=None): """implementation of the greedy search algorithm. iterates until the stopping criterion was met, or, if N and M were set to None, until no improving neighbour can be found. :param data: array containing the points :type data: np.ndarray[n,3,dtype=float] :param indexing: initial indexing :type indexing: np.ndarray[n,dtype=int] :param cf: cost function c that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :param cf_prime: cost function c' that computes the cost of a dataset w.r.t. a selection of 3-ary subsets :type cf_prime: np.array[n,3,dtype=float] x np.array[m,3,dtype=int] -> np.array[m,dtype=float] :param stop: function that returns False if algorithm should continue and True otherwise, can also be a number, in which case the algorithm stops after the given amount of iterations was met, defaults to 1000 :type stop: int or function int x np.ndarray[n,dtype=int] -> {True,False}, optional :param N: number of neighbours to be sampled. If None, then the complete neighbourhood is cosidered, defaults to None :type N: int, optional :param M: number of vertex-samples for estimating the cost-function. If None, then the costs are computed explicitly, defaults to None :type M: int, optional :yield: an iterator such that every element is a tuple containing the current indexing, the vertex that was previously moved, the target index and a dictionary containing other information :rtype: Iterator[Tuple[np.ndarray[n,dtype=int], int, int, Dict]] """ if isinstance(stop,int): nstop = stop stop = lambda i,index: i>=nstop for i in itertools.count(0,1): if stop(i, indexing): break (v,k), c, stats_bm = best_move(data, indexing, cf, cf_prime, N=N, M=M) if c > 0: indexing[v] = k yield indexing, v, k, stats_bm else: yield indexing, None, None, stats_bm if M is None and N is None: break
22,218
a485aca8793415f4ba91fe24bfd7e6055bcbc9b9
from e3.os.process import Run p = Run('gpr2clean -p -q p.gpr --config=p.cgpr'.split()) print(p.out)
22,219
8da0da2e392d277154e2d6e6e5e525e3c133a248
../../from_cpython/Lib/test/test___future__.py
22,220
ff525a60ff151559e312143ec4a9b672c12f1c15
# Echo server program import socket import sys import threading import message_handler HOST = '' # Symbolic name meaning all available interfaces PORT = 8080 # Arbitrary non-privileged port s = None for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): af, socktype, proto, canonname, sa = res try: s = socket.socket(af, socktype, proto) except socket.error as msg: s = None continue try: s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(sa) s.listen(5) except socket.error as msg: s.close() s = None continue break if s is None: print('could not open socket') sys.exit(1) print('Server is waiting requests...') while True: try: conn, addr = s.accept() threading._start_new_thread(message_handler.handle, (conn,)) except: print('Stopping server') break
22,221
9a1e0428e56eb1d8c66f5e8e1d6e4dd74cd0e021
a = int(input("Please input the first number: ")) b = int(input("Please input the second number: ")) print(a + b) print(a - b) print(a * b) print(a / b) print(a % b) # 檔名: exercise0502.py # 作者: Kaiching Chang # 時間: July, 2014
22,222
d28c86af2ca21a90ccf4c5ecb17c7bc248f116f3
# encoding=utf-8 import requests import urlparse import time import re import json from xsspayload import xsspayload from bs4 import BeautifulSoup from sqlDB import SQLConnect def xss(url): if '?' in url: domain = url.split('?')[0] _url = url.split('?')[-1] pararm = {} for val in _url.split("&"): pararm[val.split("=")[0]] = val.split("=")[-1] urls = [] for val in pararm.values(): new_url = domain + _url.replace(val, "Payload") urls.append(new_url) for payload in xsspayload: for __url in urls: try: content = requests.get(__url.replace('Payload', payload)).content if content.find(payload) and requests.get(__url.replace('Payload', payload)).status_code == 202: return json.dumps(dict(status="1",url=__url)) except Exception, e: pass return json.dumps(dict(status="0", dec="can not find xss")) else: for payload in xsspayload: url=url+'/'+payload try: content = requests.get(url).content if content.find(payload) and requests.get(url).status_code==200: #print 'chenggong' SQLConnect.insertsx('xss',url) return json.dumps(dict(status="1", url=url)) except Exception, e: pass return json.dumps(dict(status="0", dec="can not find xss"))
22,223
f3e5f738df5dc49fee3a013760213370458c2559
__author__ = 'Che' from django.contrib import admin from screenwriter.models import Slug, Action, Dialogue, Character, Screenplay, ScreenplayElements, Parentheses, ScreenplayElementType class SlugAdmin(admin.ModelAdmin): fields = ['slug'] list_display = ('slug',) class ActionAdmin(admin.ModelAdmin): pass class DialogueAdmin(admin.ModelAdmin): pass class CharacterAdmin(admin.ModelAdmin): pass class ScreenplayAdmin(admin.ModelAdmin): pass class ScreenplayElementsAdmin(admin.ModelAdmin): pass class ParenthesesAdmin(admin.ModelAdmin): pass class ScreenplayElementTypeAdmin(admin.ModelAdmin): pass admin.site.register(Slug, SlugAdmin) admin.site.register(Action, ActionAdmin) admin.site.register(Dialogue, DialogueAdmin) admin.site.register(Character, CharacterAdmin) admin.site.register(Screenplay, ScreenplayAdmin) admin.site.register(ScreenplayElements, ScreenplayElementsAdmin) admin.site.register(Parentheses, ParenthesesAdmin) admin.site.register(ScreenplayElementType, ScreenplayElementTypeAdmin)
22,224
ec8c5afe740862fbf35a6b5b335439e9b045ba38
from flask import Flask, render_template, request, redirect, session, flash, url_for from mysqlconnection import MySQLConnector from flask.ext.bcrypt import Bcrypt import re import pprint from itertools import groupby app = Flask(__name__) bcrypt = Bcrypt(app) mysql = MySQLConnector(app,'wall') app.secret_key = 'secret' emailRegex = re.compile(r'^[a-zA-Z0-9\.\+_-]+@[a-zA-Z0-9\._-]+\.[a-zA-Z]*$') passwordRegex = re.compile(r'^(?=.*[a-z])(?=.*[A-Z])(?=.*\d).+$') def validate(): errors = 0 if request.form['first_name'] == '': flash('Name cannot be blank', 'first_nameError') errors += 1 pass elif any(char.isdigit() for char in request.form['first_name']) == True: flash('Name cannot have numbers', 'first_nameError') errors += 1 pass else: session['first_name'] = str(request.form['first_name']) if request.form['last_name'] == '': flash('Name cannot be blank', 'last_nameError') errors += 1 pass elif any(char.isdigit() for char in request.form['last_name']) == True: flash('Name cannot have numbers', 'last_nameError') errors += 1 pass else: session['last_name'] = str(request.form['last_name']) if str(request.form['email']) == '': flash('Email cannot be blank', 'emailError') errors += 1 pass elif not emailRegex.match(request.form['email']): flash('Invalid email address', 'emailError') errors += 1 pass else: session['email'] = str(request.form['email']) if str(request.form['password']) == '': flash('Password cannot be blank', 'passwordError') errors += 1 pass elif len(request.form['password']) < 8: flash('Password must be greater than 8 characters', 'passwordError') errors += 1 pass elif not passwordRegex.match(request.form['password']): flash('Password must contain at least one lowercase letter, one uppercase letter, and one digit', 'passwordError') errors += 1 pass else: session['password'] = str(request.form['password']) if str(request.form['confirm_password']) == '': flash('Please confirm password', 'confirm_passwordError') errors += 1 pass elif str(request.form['confirm_password']) != str(request.form['password']): flash('Passwords do not match', 'confirm_passwordError') errors += 1 else: session['confirm_password'] = str(request.form['confirm_password']) if errors > 0: session['password'] = '' session['confirm_password'] = '' return False else: return True def validateLogin(): errors = 0 if str(request.form['email']) == '': flash('Email cannot be blank', 'loginEmailError') errors += 1 pass elif not emailRegex.match(request.form['email']): flash('Invalid email address', 'loginEmailError') errors += 1 pass else: session['email'] = str(request.form['email']) if request.form['password'] == '': flash('Password cannot be blank', 'loginPasswordError') errors += 1 pass else: session['password'] = str(request.form['password']) if errors > 0: session['password'] = '' return False else: return True def setUserId(): getUserId = "SELECT idusers FROM users WHERE email = '{}'".format(session['email']) id = mysql.query_db(getUserId) session['userid'] = id[0]['idusers'] return True def checkIfEmailExists(): getEmails = "SELECT email FROM users WHERE email = '{}'".format(session['email']) emailList = mysql.query_db(getEmails) if emailList == None: return False else: return True def getPostsWithUsers(): getPosts = "SELECT posts.idposts, posts.post_text, posts.created_at, users.idusers, users.idusers, users.first_name, users.last_name FROM posts LEFT JOIN users ON posts.user_id = users.idusers ORDER BY posts.created_at DESC" posts = mysql.query_db(getPosts) return posts def getComments(): getComments = "SELECT comments.post_id, comments.comment_text, comments.created_at, users.first_name, users.last_name FROM comments LEFT JOIN posts ON comments.post_id = posts.idposts LEFT JOIN users ON comments.user_id = users.idusers" comments = mysql.query_db(getComments) return comments def getPostsWithComments(): comments = getComments() posts = getPostsWithUsers() commentList = {} postsWithCommentContainer = [] for comment in comments: info = { "post_id": comment['post_id'], "created_at": comment['created_at'].strftime("%B %d, %Y %-I:%M %p" ), "first_name": comment['first_name'], "last_name": comment['last_name'], "comment_text": comment['comment_text'] } if comment['post_id'] in commentList: commentList[comment['post_id']].append(info) else: commentList[comment['post_id']] = [info] for post in posts: postData = { "created_at": post['created_at'].strftime("%B %d, %Y %-I:%M %p" ), "post_id": post['idposts'], "first_name": post['first_name'], "last_name": post['last_name'], "post_text": post['post_text'], "new_id": post['created_at'].strftime("%B%d%Y%-I%M%s" ) } if post['idposts'] in commentList: postData['comments'] = commentList[post['idposts']] postsWithCommentContainer.append(postData) return postsWithCommentContainer @app.route('/') def index(): return render_template('index.html') @app.route('/create', methods=['POST']) def create(): if validate() == False: session['loggedin'] = False return redirect('/') else: if checkIfEmailExists() == True: encryptedPassword = bcrypt.generate_password_hash(request.form['password']) query = "INSERT INTO users (first_name, last_name, email, password, created_at, updated_at) VALUES ('{}', '{}', '{}','{}', NOW(), NOW())".format(session['first_name'], session['last_name'], session['email'], encryptedPassword) mysql.run_mysql_query(query) session['password'] = '' session['confirm_password'] = '' session['loggedin'] = True return redirect('/dashboard') else: flash('Account with email already exists. Please use another email', 'emailError') return redirect('/') @app.route('/validate', methods=['POST']) def validateLoginInfo(): if validateLogin() == False: session['loggedin'] = False return redirect('/login') else: userInfo = mysql.query_db("SELECT * FROM users WHERE email = '{}'".format(session['email'])) if userInfo: inputPassword = request.form['password'] if bcrypt.check_password_hash(userInfo[0]['password'], inputPassword): session['loggedin'] = True session['userid'] = userInfo[0]['idusers'] session['first_name'] = userInfo[0]['first_name'] return redirect('/dashboard') else: flash('Incorrect password', 'loginPasswordError') else: flash('No user with that email exists. Please create new user', 'loginEmailError') return redirect('/') return redirect('/dashboard') @app.route('/dashboard') def returnDashboard(): if session['loggedin'] == True: setUserId() postData = getPostsWithComments() return render_template('sucess.html', posts=postData) else: return redirect('/') @app.route('/post', methods=['POST']) def postData(): if session['loggedin'] == True: postMessage = str(request.form['post']) query = "INSERT INTO messages (post_text, created_at, updated_at, user_id) VALUES ('{}', NOW(), NOW(), '{}')".format(postMessage, session['userid']) mysql.run_mysql_query(query) return redirect('/dashboard') else: return redirect('/') @app.route('/post/<post_id>/comment', methods=['POST']) def postComment(post_id): if session['loggedin'] == True: commentMessage = str(request.form['comment']) query = "INSERT INTO comments (post_id, user_id, comment_text, created_at, updated_at) VALUES ('{}', '{}', '{}', NOW(), NOW())".format(post_id, session['userid'], commentMessage) mysql.run_mysql_query(query) return redirect('/dashboard') else: return redirect('/') @app.route('/logout') def clear(): session['first_name'] = '' session['last_name'] = '' session['email'] = '' session['password'] = '' session['confirm_password'] = '' session['userid'] = '' session['loggedin'] = False return redirect('/') app.run(debug=True)
22,225
4c1d1c3cadb902c2cd51e7952e646403aebcfbb2
""" image detection with video using YOLO by Takashi MATSUSHITA """ import numpy as np from PIL import Image import yolo config = { "model_path": 'model_data/yolo-tiny.h5', "anchors_path": 'model_data/tiny_yolo_anchors.txt', "classes_path": 'model_data/coco_classes.txt', "score" : 0.3, "iou" : 0.45, "model_image_size" : (416, 416), "gpu_num" : 1, } path = 'some_interesting_video.mp4' output = 'test.mov' o = yolo.YOLO(**config) yolo.detect_video(o, path, output)
22,226
4966b7d001b22e2411759ae462f7f479daa790c5
''' Integration between iBRAIN and brainy. ''' import os import re from datetime import datetime import logging logger = logging.getLogger(__name__) from brainy.flags import FlagManager from brainy.config import get_config from brainy.scheduler import (BrainyScheduler, SHORT_QUEUE, LONG_QUEUE, NORM_QUEUE) class BrainyModule(FlagManager): def __init__(self, name, env): self.name = name # Check for missing values? self.env = env self.__results = None # At this points the iBRAIN ROOT has to be configured: set externally # or guessed. brainy_config = get_config() logger.info('Initializing "%s" as scheduling engine' % brainy_config['scheduling_engine']) self.scheduler = BrainyScheduler.build_scheduler( brainy_config['scheduling_engine']) def _get_flag_prefix(self): return os.path.join(self.env['project_dir'], self.name) def get_results(self): if self.__results is None: # Find result files only once. results_regex = re.compile('%s_\d+.results' % self.name) self.__results = [filename for filename in os.listdir(self.env['batch_dir']) if results_regex.search(filename)] return self.__results def submit_job(self, script, queue=SHORT_QUEUE): results_file = os.path.join(self.env['batch_dir'], '%s_%s.results') % \ (self.name, datetime.now().strftime('%y%m%d%H%M%S')) self.scheduler.submit_job(script, queue, results_file) @property def results_count(self): return len(self.get_results()) def job_count(self, needle=None): if needle is None: needle = os.path.basename(self.env['project_dir']) return self.scheduler.count_working_jobs(needle)
22,227
a02a131580fc1c7ec38d17cb06f49f1755438553
import sys import fst import random import math import numpy as np from scipy.sparse import bsr_matrix reload(sys) sys.setdefaultencoding('utf8') def ran_lab_prob(n_samps): r = [random.random() for i in range(138)] s = sum(r) return [[i/s for i in r]]*n_samps def genBigGraph(label_prob, symbols, seq_len, label='x'): t = fst.Transducer() sym=fst.SymbolTable() symbols = map(str,symbols) x=0 for j in range(seq_len): for i in range(len(symbols)): prob = label_prob[j][i] #"%.4f" % t.add_arc(0+x, 1+x,str(label+str(j)),symbols[i],-math.log(prob)) x+=1 t[j+1].final = -1 return t def gen_utt_graph(labels,symdict): t2=fst.Transducer() sym=fst.SymbolTable() #3x3 states for this example count = 0 x = 0 # print labels for l in labels: symbols = symdict[l] symbols = map(str,symbols) for i in range(len(symbols)): if i == 0: t2.add_arc(0+x,1+x,symbols[i],str(l+"/"+"("+symbols[i]+")")) else: t2.add_arc(0+x,1+x,symbols[i],str(sym.find(0)+"("+symbols[i]+")")) t2.add_arc(1+x,1+x,symbols[i],str(sym.find(0)+"("+symbols[i]+")")) x+=1 t2[x].final=True return t2 def gen_parents_dict(graph): parents={} for state in graph.states: for arc in state.arcs: if arc.nextstate in parents: parents[arc.nextstate].append(state.stateid) else: parents[arc.nextstate]=[state.stateid] return parents def make_prob_dict(graph,n_samps,n_labels): y_t_s = np.zeros((n_samps + 1,n_labels)) # dictionary to store probabilities indexed by time and label F = [0] for t in range(n_samps + 1): # y_t_s[t] = {} for s in F: arcs = graph[s].arcs for a in arcs: osym = graph.osyms.find(a.olabel) osym = osym[osym.find("(")+1:osym.find(")")] y_t_s[t][int(osym)] = np.exp(-1 * float(a.weight)) F = map(lambda x: map(lambda y: y.nextstate,graph[x].arcs),F) F = set([s for ss in F for s in ss]) y_t_s = bsr_matrix(y_t_s,dtype='float32') return y_t_s def calc_alpha(n_samps, symbols, y_t_s): alpha = {} # symbols = map(str,symbols) for t in range(n_samps + 1): alpha[t] = {} for i in range(len(symbols)): # print alpha # print t,i, if t == 0: if i == 0: alpha[t][symbols[i]] = float(y_t_s[t,symbols[i]]) else: alpha[t][symbols[i]] = 0.0 else: if i == 0: alpha[t][symbols[i]] = float(y_t_s[t,symbols[i]]) * alpha[t-1][symbols[i]] else: alpha[t][symbols[i]] = float(y_t_s[t,symbols[i]]) * (alpha[t-1][symbols[i]] + alpha[t-1][symbols[i-1]]) # print alpha[t][symbols[i]] return alpha def calc_beta(n_samps,symbols,y_t_s): beta = {} # symbols = map(str,symbols) for t in range(n_samps,0,-1): beta[t] = {} for i in range(len(symbols)): if t == n_samps: if i == len(symbols) - 1: beta[t][symbols[i]] = float(y_t_s[t,symbols[i]]) else: beta[t][symbols[i]] = 0.0 else: if i < len(symbols) - 1: score = beta[t+1][symbols[i]] + beta[t+1][symbols[i+1]] else: score = beta[t+1][symbols[i]] beta[t][symbols[i]] = float(y_t_s[t,symbols[i]]) * score return beta def print_graph(t): for state in t.states: for arc in state.arcs: print('{} -> {} / {}:{} / {}'.format(state.stateid, arc.nextstate, t.isyms.find(arc.ilabel), t.osyms.find(arc.olabel), arc.weight)) # symbols = ['G1','G2','G3','UH1','UH2','UH3','D1','D2','D3'] # t = genBigGraph(ran_lab_prob,symbols,11) # labels = ['G','UH','D'] # symdict={'G': ['G1','G2','G3'], # 'UH': ['UH1','UH2','UH3'], # 'D': ['D1','D2','D3']} # t2 = gen_utt_graph(labels, symdict) # t3 = t>>t2 # parents = gen_parents_dict(t3) # y_t_s = make_prob_dict(t3) # print calc_alpha(10,symbols,y_t_s) # print calc_beta(10,symbols,y_t_s)
22,228
2eebbc2596fdb4645897ce9dcaf460910ef677c2
from verification.domain.models.duns_verification import DUNSVerification, Comment from verification.domain.models.jumio import JumioVerification from verification.domain.models.verfication import Verification class VerificationFactory: @staticmethod def verification_entity_from_db_list(verification_db_list): verifications = [] for verification_db in verification_db_list: verifications.append(VerificationFactory.verification_entity_from_db(verification_db)) return verifications @staticmethod def verification_entity_from_db(verification_db): return Verification( verification_db.id, verification_db.verification_type, verification_db.entity_id, verification_db.status, verification_db.requestee, verification_db.created_at, verification_db.updated_at, reject_reason=verification_db.reject_reason ) @staticmethod def jumio_verification_entity_from_db_list(verification_db_list): verifications = [] for verification_db in verification_db_list: verifications.append(VerificationFactory.jumio_verification_entity_from_db(verification_db)) return verifications @staticmethod def jumio_verification_entity_from_db(verification_db): return JumioVerification( verification_id=verification_db.verification_id, username=verification_db.username, user_reference_id=verification_db.user_reference_id, verification_status=verification_db.verification_status, transaction_status=verification_db.transaction_status, created_at=verification_db.created_at, redirect_url=verification_db.redirect_url, jumio_reference_id=verification_db.jumio_reference_id, transaction_date=verification_db.transaction_date, callback_date=verification_db.callback_date, reject_reason=verification_db.reject_reason ) @staticmethod def duns_verification_entity_from_db(verification_db): return DUNSVerification( verification_id=verification_db.verification_id, org_uuid=verification_db.org_uuid, status=verification_db.status, comments=VerificationFactory.comment_entity_list_from_json(verification_db.comments), created_at=verification_db.created_at, update_at=verification_db.updated_at ) @staticmethod def comment_entity_list_from_json(comments_json): return [Comment(comment=comment["comment"], created_by=comment["created_by"], created_at=comment["created_at"]) for comment in comments_json]
22,229
8729b662727c240137983ce7ad22d5df6087feef
import logging from pprint import pformat from traceback import format_stack logger = logging.getLogger('toggl.utils.metas') sentinel = object() class CachedFactoryMeta(type): """ Meta class that implements pattern similar to Singleton, except there are more instances cached based on a input parameter. It utilizes Factory pattern and forbids direct instantion of the class. To retrieve/create unique instance use `factory(key)` class method. It is possible to leave out 'key' parameter and then default value is returned. Related to this, it is possible to set a default object using `set_default(obj)` class method. """ SENTINEL_KEY = '20800fa4-c75d-4c2c-9c99-fb35122e1a18' def __new__(mcs, name, bases, namespace): mcs.cache = {} def new__init__(_): raise ValueError('Cannot directly instantiate new object, you have to use \'factory\' method for that!') old_init = namespace.get('__init__') namespace['__init__'] = new__init__ def factory(cls_obj, key=sentinel, *args, **kwargs): # Key with None are not cached if key is None: obj = cls_obj.__new__(cls_obj, key, *args, **kwargs) old_init(obj, key, *args, **kwargs) return obj cached_key = mcs.SENTINEL_KEY if key == sentinel else key # Is already cached ==> return it if cached_key in mcs.cache: return mcs.cache[cached_key] # Default value if key == sentinel: obj = cls_obj.__new__(cls_obj, *args, **kwargs) old_init(obj, *args, **kwargs) else: obj = cls_obj.__new__(cls_obj, key, *args, **kwargs) old_init(obj, key, *args, **kwargs) mcs.cache[cached_key] = obj return obj def set_default(_, obj): mcs.cache[mcs.SENTINEL_KEY] = obj namespace['set_default'] = classmethod(set_default) namespace['factory'] = classmethod(factory) return super().__new__(mcs, name, bases, namespace) class ClassAttributeModificationWarning(type): """ Meta class that logs warnings when class's attributes are overridden. """ def __setattr__(cls, attr, value): logger.warning('You are modifying class attribute of \'{}\' class. You better know what you are doing!' .format(cls.__name__)) logger.debug(pformat(format_stack())) super().__setattr__(attr, value)
22,230
b02526262151a6cb3e2fc9fd0665ec24766e0f7c
# encoding: UTF-8 """ 展示如何执行策略回测。 """ from __future__ import division import sys sys.path.append("/home/yf/Downloads/vnpy-1.9.1/vnpy/trader/app/ctaStrategy/strategy") from vnpy.trader.app.ctaStrategy.ctaBacktesting import BacktestingEngine, MINUTE_DB_NAME if __name__ == '__main__': from vnpy.trader.app.ctaStrategy.strategy.strategyKingKeltner import KkStrategy from vnpy.trader.app.ctaStrategy.strategy.strategyAtrRsi import AtrRsiStrategy from vnpy.trader.app.ctaStrategy.strategy.strategyBollChannel import BollChannelStrategy from vnpy.trader.app.ctaStrategy.strategy.strategyDoubleMa import DoubleMaStrategy from vnpy.trader.app.ctaStrategy.strategy.strategyDualThrust import DualThrustStrategy from vnpy.trader.app.ctaStrategy.strategy.strategyMultiSignal import MultiSignalStrategy from vnpy.trader.app.ctaStrategy.strategy.strategyTurtleTrading import TurtleTradingStrategy # 创建回测引擎 engine = BacktestingEngine() # 设置引擎的回测模式为K线 engine.setBacktestingMode(engine.BAR_MODE) # 设置回测用的数据起始日期 engine.setStartDate('20140101') #engine.setStartDate('20130104') #engine.setStartDate('20110101') # 设置产品相关参数 engine.setSlippage(1) # 股指1跳 engine.setRate(0) # 万0.3 engine.setSize(1) # 股指合约大小 engine.setPriceTick(1) # 股指最小价格变动 # 设置使用的历史数据库 #engine.setDatabase(MINUTE_DB_NAME, 'IF99') #engine.setDatabase(MINUTE_DB_NAME, 'eosquarter.OKEX') engine.setDatabase('VnTrader_Daily_Db', 'CU888') # 在引擎中创建策略对象 d = {} engine.initStrategy(TurtleTradingStrategy, d) # 开始跑回测 engine.runBacktesting() # 显示回测结果 engine.showBacktestingResult()
22,231
c2ba2c6e33f3b80e4a3e7545beef53fe75e78435
from django.db import models class Mjesto(models.Model): postanskibroj = models.CharField(primary_key=True, max_length=5) naziv = models.CharField(max_length=40) class Meta: managed = False db_table = 'mjesto' class Predmet(models.Model): id = models.IntegerField(primary_key=True) naziv = models.CharField(max_length=70) jmbagnositelja = models.ForeignKey('Profesor', models.CASCADE, db_column='jmbagnositelja') class Meta: managed = False db_table = 'predmet' class Profesor(models.Model): jmbag = models.CharField(primary_key=True, max_length=10) prezime = models.CharField(max_length=25) ime = models.CharField(max_length=25) spol = models.CharField(max_length=1) datumrod = models.DateField(blank=True, null=True) postanskibrojprb = models.ForeignKey(Mjesto, models.CASCADE, db_column='postanskibrojprb', blank=True, null=True, related_name='profpbr') postanskibrojrod = models.ForeignKey(Mjesto, models.CASCADE, db_column='postanskibrojrod', blank=True, null=True) sluzbenimail = models.CharField(max_length=30) class Meta: managed = False db_table = 'profesor' class Student(models.Model): jmbag = models.CharField(primary_key=True, max_length=10) prezime = models.CharField(max_length=25) ime = models.CharField(max_length=25) spol = models.CharField(max_length=1) datumrod = models.DateField() postanskibrojprb = models.ForeignKey(Mjesto, models.CASCADE, db_column='postanskibrojprb', blank=True, null=True, related_name='studPbr') postanskibrojrod = models.ForeignKey(Mjesto, models.CASCADE, db_column='postanskibrojrod', blank=True, null=True) email = models.CharField(max_length=30, blank=True, null=True) class Meta: managed = False db_table = 'student' class Upisanipredmet(models.Model): idpredmeta = models.ForeignKey(Predmet, models.CASCADE, db_column='idpredmeta', primary_key=True) jmbagstudenta = models.ForeignKey(Student, models.CASCADE, db_column='jmbagstudenta') ocjena = models.SmallIntegerField(blank=True, null=True) class Meta: managed = False db_table = 'upisanipredmet' unique_together = (('idpredmeta', 'jmbagstudenta'),)
22,232
0143e98b6bd525bcb00280c04b1ddaf67c45b879
from django.db import models from django.conf import settings from django.contrib import admin # Create your models here. class BaseAPIUser (models.Model): """ User object used by the authentication library. """ username = models.CharField ('Username', max_length=255, unique=True) secret = models.CharField ('Secret', max_length=255) active = models.BooleanField('Active',default = True) last_login = models.DateTimeField('Last auth', null = True, blank = True) def __unicode__ (self): return self.username class Meta: verbose_name = 'API User' abstract = True class SampleUser (BaseAPIUser): """Importable reincarnation of the BaseAPIUser""" pass class APIUserAdmin(admin.ModelAdmin): """Sample admin for the API User""" date_hierarchy = 'last_login' list_display = ('username', 'last_login', 'active') list_filter = ('active','last_login',) search_fields = ('username',)
22,233
3d9c1239a0d04929b82d72c67567e1d27629565a
# Generated by Django 3.0.4 on 2020-04-19 03:52 import ckeditor.fields from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('projects', '0003_optional_description'), ] operations = [ migrations.AddField( model_name='project', name='include_in_resume', field=models.BooleanField(default=True, help_text='Determines whether or not the project will be included in the auto generated resume.'), ), migrations.AddField( model_name='project', name='resume_description', field=models.CharField(blank=True, help_text='If included, will be used instead of the description for the auto generated resume.', max_length=256, null=True), ), migrations.AlterField( model_name='project', name='description', field=ckeditor.fields.RichTextField(blank=True, help_text='Required if the project is tied to an education or an experience.', null=True), ), ]
22,234
a8fa0a03218972a6404b5c265fc8977df9405061
from __future__ import print_function from datetime import timedelta import pandas class Event: def __init__(self, user_id, time, type): self.user_id = user_id self.time = time self.type = type class ChurnEventsParser: def __init__(self, cur_date, crm_mysql_cursor): # churn threshold defines interval for user to renew subscription # if there is a pause between subscriptions A and B < churn_threshold_in_days # we count it as 'renewal', otherwise as 'churn' event self.churn_threshold_in_days = 2 self.cur_date = cur_date self.crm_mysql_cursor = crm_mysql_cursor # computed events list self.events_list = [] def load_data(self): # one can tell if user churned/renewed for subscriptions ending on the 'subscriptions_churn_threshold_date' day subscriptions_churn_threshold_date = self.cur_date - timedelta(days=self.churn_threshold_in_days) threshold_day = subscriptions_churn_threshold_date.strftime("%Y-%m-%d") threshold_day_start = threshold_day + ' 00:00:00' threshold_day_end = threshold_day + ' 23:59:59' print("ChurnEventsParser - loading churn/renewal users data for date " + str(self.cur_date) + ", churn threshold " + str(self.churn_threshold_in_days) + " day(s)") # Look for subscriptions(s1) that ended on 'threshold_day' (having length > 5) # and join them with the same user|type future subscriptions (s2) (having length > 5) # that start within 'self.churn_threshold_in_days' sql = ''' SELECT s1.user_id AS user_id, s1.end_time AS sub_end, s2.start_time AS next_sub_start FROM subscriptions s1 JOIN subscription_types st ON s1.subscription_type_id = st.id AND st.length > 5 LEFT JOIN (SELECT subscriptions.* FROM subscriptions JOIN subscription_types ON subscriptions.subscription_type_id = subscription_types.id AND subscription_types.length > 5) s2 ON s1.user_id = s2.user_id AND s1.end_time <= s2.start_time AND DATE_ADD(s1.end_time, INTERVAL {} DAY) >= s2.start_time AND DATE_ADD(s1.end_time, INTERVAL {} DAY) <= s2.end_time WHERE s1.end_time >= '{}' AND s1.end_time <= '{}' ''' sql = sql.format(self.churn_threshold_in_days, self.churn_threshold_in_days, threshold_day_start, threshold_day_end) self.crm_mysql_cursor.execute(sql) events_list = [] churn_events_count = 0 renewal_events_count = 0 for user_id, sub_end, next_sub_start in self.crm_mysql_cursor: if next_sub_start is None: events_list.append(Event(str(user_id), sub_end, "churn")) churn_events_count += 1 else: events_list.append(Event(str(user_id), next_sub_start, "renewal")) renewal_events_count += 1 self.events_list = events_list print("Loaded " + str(churn_events_count) + " churn event(s) and " + str(renewal_events_count) + " renewal event(s)") def upload_to_bq(self, bq_uploader): print("ChurnEventsParser - uploading data to BigQuery") # TODO delete data first? records = [] for event in self.events_list: records.append({ "user_id": event.user_id, "time": str(event.time), "type": event.type, "computed_for_date": str(self.cur_date), }) df = pandas.DataFrame( records, columns=["user_id", "time", "type", "computed_for_date"] ) bq_uploader.upload_to_table('events', data_source=df)
22,235
428c0deaa7d17ef990c25fc84d8ea1229e074426
# Generated by Django 3.2 on 2021-05-24 08:45 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('flashcards', '0006_auto_20210524_1744'), ] operations = [ migrations.AlterField( model_name='card', name='due_date', field=models.DateField(default=django.utils.timezone.now), ), ]
22,236
2ff3ba1622b4718fb3de8543161b35287cf947cb
import random from tqdm import tqdm import numpy as np prob = 0.5 batch_size = 128 test_size = 77092 res = batch_size - test_size % batch_size def process_text(path): X, e1_list, e2_list = [], [], [] with open(path) as f: for line in tqdm(f.readlines()): _, e1, e2, words = line.strip().split("\t") X.append(words) e1_list.append(e1) e2_list.append(e2) return X, e1_list, e2_list def process_label(path): y = [] with open(path) as f: for line in tqdm(f.readlines()): label = line.strip().split("\t")[1].split()[0] y.append(label) return y print("process train file...") X_train, e1_train, e2_train = process_text("../data/sent_train.txt") y_train = process_label("../data/sent_relation_train.txt") # calculating truncated length X_train_length = [len(x.split()) for x in X_train] print("95% length - [{}]".format(np.percentile(X_train_length, 95))) print("process dev file...") X_dev, e1_dev, e2_dev = process_text("../data/sent_dev.txt") y_dev = process_label("../data/sent_relation_dev.txt") print("process test file...") X_test, e1_test, e2_test = process_text("../data/sent_test.txt") # ######################################### # reproduce train file, dev file. test file # train print("generating train.txt...") train = zip(e1_train, e2_train, y_train, X_train) with open("./train.txt", "w") as f: for e1, e2, y, x in train: if y != "0": f.write(e1 + "\t" + e2 + "\t" + y + "\t" + x + "\n") if y == "0" and random.random() > prob: f.write(e1 + "\t" + e2 + "\t" + y + "\t" + x + "\n") # dev print("generating dev.txt...") dev = zip(e1_dev, e2_dev, y_dev, X_dev) with open("./dev.txt", "w") as f: for e1, e2, y, x in dev: f.write(e1 + "\t" + e2 + "\t" + y + "\t" + x + "\n") # test print("generating test.txt...") test = zip(e1_test + e1_test[:res], e2_test + e2_test[:res], X_test + X_test[:res]) with open("./test.txt", "w") as f: for e1, e2, x in test: f.write(e1 + "\t" + e2 + "\t" + x + "\n") # ################################################### # ################################################### # generating word2vec txt with open("../emb_build/train_vec.txt", "w") as f: for x in X_train + X_dev + X_test: f.write(x + "\n") # ###################################################
22,237
eec556abc4f7c473ac9f83ac558569af6acf061d
from django import forms from .models import Prueba class PruebaForm(forms.ModelForm): """Form definition for MODELNAME.""" class Meta: """Meta definition for MODELNAMEform.""" model = Prueba fields = ( 'titulo', 'subtitulo', 'cantidad' ) widgets={ 'titulo':forms.TextInput( attrs={ 'placeholder':'Ingrese titulo' } ) } def clean_cantidad(self): cantidad=self.cleaned_data['cantidad'] if cantidad<10: raise forms.ValidationError('Ingrese una cantidad mayor a 10') return cantidad
22,238
aa657e39221e88c0fe2f9b49ced8f6b720287818
# - * - encoding : utf - 8 - * - """ :copyright: 2017 H2O.ai, Inc. :license: Apache License Version 2.0 (see LICENSE for details) """ import numpy as np from ..libs.lib_pca import parameters from ..solvers.utils import _setter from ..solvers.truncated_svd import TruncatedSVDH2O, TruncatedSVD, _as_fptr from ..utils.extmath import svd_flip class PCAH2O(TruncatedSVDH2O): """Principal Component Analysis (PCA) Dimensionality reduction using truncated Singular Value Decomposition for GPU This implementation uses the ARPACK implementation of the truncated SVD. Contrary to SVD, this estimator does center the data before computing the singular value decomposition. :param: n_components Desired dimensionality of output data :param: whiten : bool, optional When True (False by default) the `components_` vectors are multiplied by the square root of (n_samples) and divided by the singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making their data respect some hard-wired assumptions. """ def __init__(self, n_components=2, whiten=False): super().__init__(n_components) self.whiten = whiten self.n_components_ = n_components self.mean_ = None self.noise_variance_ = None # pylint: disable=unused-argument def fit(self, X, y=None): """Fit PCA on matrix X. :param: X {array-like, sparse matrix}, shape (n_samples, n_features) Training data. :param y Ignored, for ScikitLearn compatibility :returns self : object """ self.fit_transform(X) return self # pylint: disable=unused-argument def fit_transform(self, X, y=None): """Fit PCA on matrix X and perform dimensionality reduction on X. :param: X {array-like, sparse matrix}, shape (n_samples, n_features) Training data. :param: y Ignored, for ScikitLearn compatibility :returns X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = np.asfortranarray(X, dtype=np.float64) Q = np.empty( (self.n_components, X.shape[1]), dtype=np.float64, order='F') U = np.empty( (X.shape[0], self.n_components), dtype=np.float64, order='F') w = np.empty(self.n_components, dtype=np.float64) explained_variance = np.empty(self.n_components, dtype=np.float64) explained_variance_ratio = np.empty(self.n_components, dtype=np.float64) mean = np.empty(X.shape[1], dtype=np.float64) param = parameters() param.X_m = X.shape[0] param.X_n = X.shape[1] param.k = self.n_components param.whiten = self.whiten lib = self._load_lib() lib.pca( _as_fptr(X), _as_fptr(Q), _as_fptr(w), _as_fptr(U), _as_fptr(explained_variance), _as_fptr(explained_variance_ratio), _as_fptr(mean), param) self._w = w self._U, self._Q = svd_flip(U, Q) # TODO Port to cuda? self._X = X n = X.shape[0] # To match sci-kit #TODO Port to cuda? self.explained_variance = self.singular_values_**2 / (n - 1) self.explained_variance_ratio = explained_variance_ratio self.mean_ = mean # TODO noise_variance_ calculation # can be done inside lib.pca if a bottleneck n_samples, n_features = X.shape total_var = np.var(X, ddof=1, axis=0) if self.n_components_ < min(n_features, n_samples): self.noise_variance_ = \ (total_var.sum() - self.explained_variance_.sum()) self.noise_variance_ /= \ min(n_features, n_samples) - self.n_components else: self.noise_variance_ = 0. X_transformed = U * w return X_transformed # Util to load gpu lib def _load_lib(self): from ..libs.lib_pca import GPUlib gpu_lib = GPUlib().get() return gpu_lib class PCA(TruncatedSVD): """ PCA Wrapper Selects between h2o4gpu.decomposition.PCASklearn and h2o4gpu.solvers.pca.PCAH2O Documentation: import h2o4gpu.decomposition ; help(h2o4gpu.decomposition.PCASklearn) help(h2o4gpu.solvers.pca.PCA) :param: backend : Which backend to use. Options are 'auto', 'sklearn', 'h2o4gpu'. Default is 'auto'. Saves as attribute for actual backend used. """ # pylint: disable=unused-argument def __init__(self, n_components=2, copy=True, whiten=False, svd_solver="arpack", tol=0., iterated_power="auto", random_state=None, verbose=False, backend='auto'): super().__init__(n_components, random_state, tol, verbose, backend) self.svd_solver = svd_solver self.whiten = whiten import os _backend = os.environ.get('H2O4GPU_BACKEND', None) if _backend is not None: backend = _backend # Fall back to Sklearn # Can remove if fully implement sklearn functionality self.do_sklearn = False if backend == 'auto': params_string = [ 'svd_solver', 'random_state', 'tol', 'iterated_power' ] params = [svd_solver, random_state, tol, iterated_power] params_default = ['arpack', None, 0., 'auto'] i = 0 for param in params: if param != params_default[i]: self.do_sklearn = True if verbose: print("WARNING:" " The sklearn parameter " + params_string[i] + " has been changed from default to " + str(param) + ". Will run Sklearn PCA.") self.do_sklearn = True i = i + 1 elif backend == 'sklearn': self.do_sklearn = True elif backend == 'h2o4gpu': self.do_sklearn = False if self.do_sklearn: self.backend = 'sklearn' else: self.backend = 'h2o4gpu' from h2o4gpu.decomposition.pca import PCASklearn self.model_sklearn = PCASklearn( n_components=n_components, copy=copy, whiten=whiten, svd_solver=svd_solver, tol=tol, iterated_power=iterated_power, random_state=random_state) self.model_h2o4gpu = PCAH2O(n_components=n_components, whiten=whiten) if self.do_sklearn: self.model = self.model_sklearn else: self.model = self.model_h2o4gpu def set_attributes(self): s = _setter(oself=self, e1=NameError, e2=AttributeError) s('oself.components_ = oself.model.components_') s('oself.explained_variance_= oself.model.explained_variance_') s('oself.explained_variance_ratio_ = ' 'oself.model.explained_variance_ratio_') s('oself.singular_values_ = oself.model.singular_values_') s('oself.mean_ = oself.model.mean_') s('oself.n_components_ = oself.model.n_components_') s('oself.noise_variance_ = oself.model.noise_variance_')
22,239
48389fca01b43ff6c2933623bcdcbc637040b76a
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = Axes3D(fig) x = np.arange(-4, 4, 0.25) y = np.arange(-4, 4, 0.25) X, Y = np.meshgrid(x, y) R = np.sqrt(X ** 2 + Y ** 2) Z = np.sin(R) #Z = np.sin(np.sqrt(x**2+y**2)) ax.plot_surface(X,Y,Z, rstride=1, # rstride(row)指定行的跨度 cstride=1, # cstride(column)指定列的跨度 cmap="rainbow") # 设置颜色映射 plt.title("3D图") #投影 projection ax.contour(X, Y, Z, zdim='z', offset=-2, cmap="rainbow") ax.set_zlim(-2, 2) #plt.show() ########################### fig = plt.figure() ax = fig.add_subplot(111, projection="3d") #plt.show() ########################### 3D line fig = plt.figure() ax = fig.gca(projection="3d") theta = np.linspace(-4 * np.pi, 4 * np.pi, 100) z = np.linspace(-2, 2, 100) r = z**2 x = r * np.sin(theta) y = r * np.cos(theta) ax.plot(x, y, z) #plt.show() ############################# 3D 散点图 Scatter plot np.random.seed(1) def randrange(n, vmin, vmax): return (vmax - vmin)*np.random.rand(n) + vmin fig = plt.figure() ax = fig.add_subplot(111, projection="3d") n= 100 for c, m, zlow, zhigh in [("r", "o", -50,-25),("b","x", -30, -5)]: xs = randrange(n, 23, 32) ys = randrange(n, 0, 100) zs = randrange(n, zlow, zhigh) ax.scatter(xs, ys, zs, c=c, marker=m) ##指定三维图对着自己的方向 ##specify the 3D map to face your direction ax.view_init(30,10) plt.show() ################################### 条形图bar chart fig = plt.figure() ax = fig.add_subplot(111, projection="3d") for c, z in zip(['r','g','b','y'], [30,20,10,0]): xs = np.arange(20) ys = np.random.rand(20) cs = [c]*len(xs) ax.bar(xs, ys, zs=z, zdir='y') #ax.bar(xs, ys, zs=z, zdir='y', color=cs, alpha=0.7) plt.show()
22,240
632b2a79d1e98659445b8e767ff5f9edc7e1a3c8
from tkinter import * #import tkinter as tk from tkinter import font as tkfont from tkinter import messagebox #window = Tk() #window.geometry("600x600") #window.title("Timer app") #window.mainloop() class TimerApp(Tk): def __init__(self, *args, **kwargs): Tk.__init__(self, *args, **kwargs) self.title_font = tkfont.Font(family='Helvetica', size=18, weight="bold", slant="italic") # the container is where we'll stack a bunch of frames # on top of each other, then the one we want visible # will be raised above the others container = Frame(self) container.pack(side="top", fill="both", expand=True) container.grid_rowconfigure(0, weight=1) container.grid_columnconfigure(0, weight=1) self.frames = {} for F in (AddInterval, EditInterval): page_name = F.__name__ frame = F(parent=container, controller=self) self.frames[page_name] = frame # put all of the pages in the same location; # the one on the top of the stacking order # will be the one that is visible. frame.grid(row=0, column=0, sticky="nsew") self.show_frame("AddInterval") def show_frame(self, page_name): '''Show a frame for the given page name''' frame = self.frames[page_name] frame.tkraise() class AddInterval(Frame): def __init__(self, parent, controller): Frame.__init__(self, parent) self.controller = controller self.breakSeconds = [] self.breakMinutes = [] self.intervalSeconds = [] self.intervalMinutes =[] beginingLabel: Label = Label(self, text="Please enter as many sequences and intervals you like") beginingLabel.pack() intervalLabel: Label = Label(self, text="Enter interval time", bg="white") intervalLabel.pack() intervalSecondsLabel: Label = Label(self, text="Enter interval seconds", bg="white") intervalSecondsLabel.pack() intervalSecondsSpinbox: Spinbox = Spinbox(self, to=59, state="readonly") self.intervalSeconds.append(intervalSecondsSpinbox) intervalSecondsSpinbox.pack() intervalMinutesLabel: Label = Label(self, text="Enter interval minutes", bg="white") intervalMinutesLabel.pack() intervalMinutesSpinbox: Spinbox = Spinbox(self, to=720, state="readonly") self.intervalMinutes.append(intervalMinutesSpinbox) intervalMinutesSpinbox.pack() addIntervalButton: Button = Button(self, text="Add New Interval", command=self.addInterval) addIntervalButton.pack() saveIntervalButton: Button = Button(self, text="Save New Interval", command=self.saveInterval) saveIntervalButton.pack() def addInterval(self): breakLabel: Label = Label(self, text="Enter break time", bg="white") breakLabel.pack() breakSecondsLabel: Label = Label(self, text="Enter break seconds", bg="white") breakSecondsLabel.pack() breakSecondsSpinbox: Spinbox = Spinbox(self, to=59, state="readonly") self.breakSeconds.append(breakSecondsSpinbox) breakSecondsSpinbox.pack() breakMinutesLabel: Label = Label(self, text="Enter break minutes", bg="white") breakMinutesLabel.pack() breakMinutesSpinbox: Spinbox = Spinbox(self, to=720, state="readonly") self.breakMinutes.append(breakMinutesSpinbox) breakMinutesSpinbox.pack() intervalLabel: Label = Label(self, text="Enter interval time", bg="white") intervalLabel.pack() intervalSecondsLabel: Label = Label(self, text="Enter interval seconds", bg="white") intervalSecondsLabel.pack() intervalSecondsSpinbox: Spinbox = Spinbox(self, to=59, state="readonly") self.intervalSeconds.append(intervalSecondsSpinbox) intervalSecondsSpinbox.pack() intervalMinutesLabel: Label = Label(self, text="Enter interval minutes", bg="white") intervalMinutesLabel.pack() intervalMinutesSpinbox: Spinbox = Spinbox(self, to=720, state="readonly") self.intervalMinutes.append(intervalMinutesSpinbox) intervalMinutesSpinbox.pack() def saveInterval(self): messagebox.showinfo("Interval Seconds", self.getIntervalSeconds(self)) #messagebox.showinfo("Title", getIntervals()) #w = Label(separator, text=intervals, bg="white") #w.pack() def getIntervalSeconds(self): values = [intervalSecondsSpinbox.get() for intervalSecondsSpinbox in self.intervalSeconds] return values class EditInterval(Frame): def __init__(self, parent, controller): Frame.__init__(self, parent) self.controller = controller if __name__ == "__main__": app = TimerApp() app.mainloop()
22,241
bb4a2f22bac2ad63549a780d2ccabb7e19fd0354
class GraphicFX: class Filters: BLUR = 0 GAUSSIAN_BLUR = 1
22,242
d86422492be7e4ae8c31e629a0855842b2f52d78
from Datastructures import LinkedList class TestLinkedList: def test_init(self): ll = LinkedList() assert ll.head.val is None assert ll.head.next is None def test_insert(self): ll = LinkedList() ll.insert(5) ll.insert("a") ll.insert(-200) ptr = ll.head.next assert ptr.val == -200 ptr = ptr.next assert ptr.val == "a" ptr = ptr.next assert ptr.val == 5 def test_search(self): ll = LinkedList() ll.insert(5) ll.insert(25) ll.insert("k") ll.insert(98) ll.insert("String") test_node = ll.search("k") assert test_node.val == "k" test_node = ll.search("Not there") assert test_node is None def test_delete_node(self): ll = LinkedList() ll.insert(25) ll.insert(98) ll.insert(105) ll.delete_node(25) test_node = ll.search(25) assert test_node is None
22,243
44b40b5fe641d3e6b46842f191837bd751edbdff
""" Data Structures :: LRU Cache """ class CacheNode: def __init__(self, key, value, prev=None, next=None): """A node in a doubly-linked list-based LRU cache. :param key : Key by which to access nodes. :param value : Value accessed by key. :param prev [CacheNode] : Previous CacheNode in list, defaults to None :param next [CacheNode] : Next CacheNode in list, defaults to None """ self.key = key self.value = value self.prev = prev self.next = next def delete(self): """Rearranges the node's previous and next pointers accordingly, effectively deleting it.""" if self.prev: self.prev.next = self.next if self.next: self.next.prev = self.prev class LRUCache: def __init__(self, node=None, limit=10): """The LRUCache class keeps track of the max number of nodes it can hold, the current number of nodes it is holding, a doubly- linked list that holds the key-value entries in the correct order, as well as a storage dict that provides fast access to every node stored in the cache. :param node [CacheNode] : Optional initial CacheNode. :param limit [int] : Max number of elements in cache, default 10. """ self.head = node # Head of cache is most recent self.tail = node # Tail of cache is oldest self.limit = limit self.length = 1 if node is not None else 0 # self.storage = DoublyLinkedList() def __len__(self): """Returns number of elements currently in cache.""" return self.length def move_to_head(self, node): """Removes the input node from its current spot in the List and inserts it as the new head node of the List. :param node (ListNode) : Node to be moved to head. """ if node is self.head: return value = node.value self.delete(node) self.add_to_head(value) def get(self, key): """Retrieves the value associated with the given key. Moves the key-value pair to the end of the order such that the pair is considered most-recently used. Returns the value associated with the key or None if the key-value pair doesn't exist in the cache. """ node = self.head value = None exists = False while node: # Loop through nodes, looking for key if node.key == key: exists = True break if exists: if node is self.head: value = node.value else: self.delete(node) new_node = CacheNode(key, value) self.length += 1 return value def set(self, key, value): """Adds the given key-value pair to the cache. The newly-added pair is considered the most-recently used entry in the cache. If the cache is already at max capacity before this entry is added, then the oldest entry in the cache is removed to make room. In the case that the key already exists in the cache, the old value associated with the key is overwritten by the new value. """ # First, look for the key in the cache using `self.get()` # If not exists (returns None), add key-value to head # If exists, pop old key-value from list, add new value to head pass def delete(self, node): """Removes a node from the list and handles cases where the node was the head or the tail. :param node (ListNode) : Node to be removed from list. """ # TODO: Catch errors if empty or node not in list self.length -= 1 # Update length # If head and tail, both get set to None if self.head is self.tail: self.head = None self.tail = None elif node is self.head: # If head, set current head to next self.head = self.head.next node.delete() elif node is self.tail: # If tail, set current tail to prev self.tail = self.tail.prev node.delete() else: # If regular node, just delete node.delete()
22,244
f0909a166b3e01c27cf191ec22e0f6d5f3e271de
# -*- coding: utf-8 -*- import numpy as np def first_non_zero(A, p, k, n, curr): if A[curr][curr] == 0: for j in range(k, n): if A[j][curr] != 0: A[[curr,j]] = A[[j, curr]] p[curr] = j break def greatest_non_zero(A, p, k, n, curr): max_index = np.argmax(np.array([abs(A[j][curr]) for j in range(curr, n)])) A[[curr, curr + max_index]] = A[[curr + max_index, curr]] p[curr] = curr + max_index def zerlegung(A, swap_function): n = len(A) p = [] for k in range(1, n): curr = k - 1 p.append(curr) swap_function(A, p, k, n, curr) for y in range(k, n): l = (A[y][curr] / A[curr][curr]) for x in range(k, n): A[y][x] -= l * A[curr][x] A[y][curr] = l return A, p def permutation(p, x): for index, perm in enumerate(p): x[index], x[perm] = x[perm], x[index] return x def vorwaerts(LU, b): res = [] for y in range(len(LU)): res.append((b[y] - sum([ LU[y,x] * res[x] for x in range(y)]))) return res def rueckwaerts(LU, b): res = [ 0 for i in range(len(LU)) ] for y in range(len(LU)-1, -1, -1): res[y] = (b[y] - sum([ LU[y,x] * res[x] for x in range(y+1, len(LU))])) / LU[y,y] return res
22,245
73c8fd450457231cd253a80cd39ea9b1d722d11b
class CertificateSummary(object): def __init__(self, model): self.__id = model.get('id', None) self.__subscription_id = model.get('subscriptionId', None) self.__ca_id = model.get('caId', None) self.__key_id = model.get('keyId', None) self.__date_issued = model.get('dateIssued', None) self.__date_expires = model.get('dateExpires', None) self.__date_revoked = model.get('dateRevoked', None) self.__alias = model.get('alias', None) self.__subject_display_name = model.get('subjectDisplayName', None) self.__serial_number = model.get('serialNumber', None) self.__is_ca = model.get('isCA', None) self.__kind = model.get('kind', None) self.__format = model.get('format', None) @property def id(self): return self.__id @id.setter def id(self, value): self.__id = value @property def subscription_id(self): return self.__subscription_id @subscription_id.setter def subscription_id(self, value): self.__subscription_id = value @property def ca_id(self): return self.__ca_id @ca_id.setter def ca_id(self, value): self.__ca_id = value @property def key_id(self): return self.__key_id @key_id.setter def key_id(self, value): self.__key_id = value @property def date_issued(self): return self.__date_issued @date_issued.setter def date_issued(self, value): self.__date_issued = value @property def date_expires(self): return self.__date_expires @date_expires.setter def date_expires(self, value): self.__date_expires = value @property def date_revoked(self): return self.__date_revoked @date_revoked.setter def date_revoked(self, value): self.__date_revoked = value @property def alias(self): return self.__alias @alias.setter def alias(self, value): self.__alias = value @property def subject_display_name(self): return self.__subject_display_name @subject_display_name.setter def subject_display_name(self, value): self.__subject_display_name = value @property def serial_number(self): return self.__serial_number @serial_number.setter def serial_number(self, value): self.__serial_number = value @property def is_ca(self): return self.__is_ca @is_ca.setter def is_ca(self, value): self.__is_ca = value @property def is_ca(self): return self.__is_ca @is_ca.setter def is_ca(self, value): self.__is_ca = value @property def kind(self): return self.__kind @kind.setter def kind(self, value): self.__kind = value @property def format(self): return self.__format @format.setter def format(self, value): self.__format = value __all__ = ['CertificateSummary']
22,246
6d192bf8aea7c3005637941fd7d19248559c5155
def is_han(n): if len(str(n)) <= 2: return True nums = list(map(int, list(str(n)))) a = nums[0] delta = nums[1] - nums[0] for i in range(len(nums)): if nums[i] != a + delta * i: return False return True N = int(input()) count = 0 for i in range(1, N+1): if is_han(i): count+=1 print(count)
22,247
148bb1169b536366e83f57f5d7773d8147f69f14
import csv import numpy as np import matplotlib.pyplot as plt file_data = "file_data.txt" dump_time = [] with open(file_data, 'r') as data_file: reader = csv.reader(data_file, delimiter=' ') for row in reader: file_match = [s for s in row if 'output' in s] if len(file_match) > 0: time_ind = row.index(file_match[0]) - 1 time_split = row[time_ind].split(':') time_dec = float(time_split[0] + str(int(time_split[1])/60)[1:]) if len(dump_time) == 0: dump_time.append(time_dec) if dump_time[-1] != time_dec: dump_time.append(time_dec) data_file.close() dt = [] for time_it in range(1, len(dump_time)): if dump_time[time_it] < dump_time[time_it-1]: dt_val = (24+dump_time[time_it]) - dump_time[time_it-1] else: dt_val = dump_time[time_it] - dump_time[time_it-1] dt.append(dt_val) plt.clf() plt.plot(dt) plt.ylabel("time between dumps") plt.savefig("dump_frequency.png")
22,248
21bf1fad54569ccab990aa5a33c97b74fec745f8
class Stack: """A stack class. push: add the item to the last level of the stack. pop: get the last item from the top of the stack. its_empty: test if the stack is empty. height: return the number of levels of the stack. top_stack: return the element from the top of the stack. show_stack: show the content of the stack from the top to the bottom of the stack. """ def __init__(self): """The constructor of the class""" self.items = [] def show_stack(self) -> None: """Show the content of the stack from the top to the bottom of the stack.""" print("Show stack: ") ok = 1 for i in reversed(self.items): print(i) ok = 0 if ok: print("The stack is empty!") print("\n") def push(self, item) -> None: """Add the item to the last level of the stack.""" self.items.append(item) def pop(self) -> object or None: """Get the last item from the top of the stack. If the stack is empty, will print that the stack is empty. """ if len(self.items) > 0: return self.items.pop() # use the pop() method from a list else: print("Stack is empty! \n") def its_empty(self) -> bool: """test if the stack is empty. @:return: bool """ return self.items == [] def height(self) -> int: """Return the number of levels of the stack.""" return len(self.items) def top_stack(self) -> object or None: """:return: the top element from the stack if not empty, else return None.""" if len(self.items) > 0: return self.items[len(self.items)-1] else: return None
22,249
3e5003f787f6626d9d1f29106bd9b0b7eb55ffa0
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import re import scrapy from bs4 import BeautifulSoup from util import db, config, tool import logging class UrlItem(scrapy.Item): # attribute in DB id = scrapy.Field() url = scrapy.Field() is_target = scrapy.Field() content_hash = scrapy.Field() layout_hash = scrapy.Field() extractor = scrapy.Field() last_access_ts = scrapy.Field() last_extract_ts = scrapy.Field() title = scrapy.Field() content_type = scrapy.Field() # attribute from response/file content = scrapy.Field() # str map_part = scrapy.Field() def save(self): db.general_update_url(self['id'], self['is_target'], self['content_hash'], self['layout_hash'], tool.extractor2str(self['extractor']), self['title'], self['content_type']) @staticmethod def load_with_content(id=None, url=None, file_path=None, response=None): r = UrlItem.load(id=id, url=url) if r is None: return None if response is not None: r['content'] = response.body r['content_type'] = tool.get_content_type_for_response(response) elif file_path is not None: r['content'] = open(file_path + "/" + r.filename()).read() else: raise Exception('must provide file_path or response') return r @staticmethod def load(id=None, url=None): r = UrlItem() if id is not None: res = db.get_url_by_id(id) elif url is not None: res = db.get_url_by_url(url) else: raise Exception("must provide id or url") if res is None: return None r['id'] = res['id'] r['url'] = res['url'] r['is_target'] = res['is_target'] r['content_hash'] = res['content_hash'] r['layout_hash'] = res['layout_hash'] r['extractor'] = tool.str2extractor(res['extractor']) r['last_access_ts'] = res['last_access_ts'] r['last_extract_ts'] = res['last_extract_ts'] r['title'] = res['title'] r['content_type'] = res['content_type'] return r def filename(self): ext = self['content_type'].split('/')[1] filename = "%s.%s" % (self['id'], ext) return filename """ ''' from tree """ def get_part(self,part): if 'map_part' not in self.keys(): self['map_part']=dict() if part in self['map_part'].keys(): return self['map_part'][part] else: if part == "text": self['map_part'][part] = self.get_part('soup').get_text("\n") elif part == "html": self['map_part'][part] = str(self.get_part('soup')) elif part == "tag": self['map_part'][part] = " ".join(re.findall("<.*?>", self.get_part("html"))) elif part == "title": self['map_part'][part] = " ".join([x.text for x in self.get_part('soup').findAll('title')]) elif part == "keyword": tags = self.get_part('soup').select('meta[name="Keywords"]') + self.get_part('soup').select('meta[name="keywords"]') self['map_part'][part] = " ".join([x['content'] for x in tags]) elif part == "description": tags = self.get_part('soup').select('meta[name="Description"]') + self.get_part('soup').select( 'meta[name="description"]') self['map_part'][part] = " ".join([x['content'] for x in tags]) elif part == "url": self['map_part'][part] = self['url'] elif part == "soup": self['map_part'][part] = BeautifulSoup(self['content'],'lxml') import urlparse for k, v in config.retriever_absolute_url_replace_pattern.iteritems(): tags = self.get_part('soup').findAll(k, {v: True}) for tag in tags: tag[v] = urlparse.urljoin(self['url'], tag[v]) elif part == "layout": soup = BeautifulSoup(self['content'],'lxml') # remove tags for tag in config.layout_tag_remove: for t in soup.select(tag): t.decompose() soup = BeautifulSoup(str(soup),'lxml') if soup.body is None: result = "" else: result = soup.body.prettify() # Comments r = re.compile(r"<!.*?>", re.S) result = r.sub("", result) # Content r = re.compile(r"(?<=>).*?(?=<)", re.S) result = r.sub("", result) # attributes (remove attributes) r = "|".join( ["(?<=<" + x + " ).*?(?=(/)?>)" for x in config.layout_tag_clear_attr] + [" " + x + "=\".*?\"" for x in config.layout_attr_remove] + ["(?<= " + x + "=\").*?(?=\")" for x in config.layout_attr_clear] ) r = re.compile(r, re.S) result = r.sub("", result) soup = BeautifulSoup(result,'lxml') logging.debug(str(soup)) self['map_part'][part] = str(soup) return self['map_part'][part] def get_short_title(self): title = self.get_part('title') if len(title) > 128: title = title[0:128] return title
22,250
ff24d7a74e32dd4aa8e8e5e2e7e7dda7e0ed55e7
import matplotlib.pyplot as plt from matplotlib import rc import numpy as np # Set the global font to be DejaVu Sans, size 10 (or any other sans-serif font of your choice!) rc('font',**{'family':'sans-serif','sans-serif':['Arial'],'size':10}) # Set the font used for MathJax - more on this later rc('mathtext',**{'default':'regular'}) # The following %config line changes the inline figures to have a higher DPI. # You can comment out (#) this line if you don't have a high-DPI (~220) display. #%config InlineBackend.figure_format = 'retina' plt.rcParams["axes.titlesize"]='medium' def custom_lineplot(ax, x, y, error, xlims, ylims, color='red'): """Customized line plot with error bars.""" ax.errorbar(x, y, yerr=error, color=color, ls='--', marker='o', capsize=5, capthick=1, ecolor='black') ax.set_xlim(xlims) ax.set_ylim(ylims) return ax def custom_scatterplot(ax, x, y, error, xlims, ylims, color='green', markerscale=100): """Customized scatter plot where marker size is proportional to error measure.""" markersize = error * markerscale ax.scatter(x, y, color=color, marker='o', s=markersize, alpha=0.5) ax.set_xlim(xlims) ax.set_ylim(ylims) return ax def custom_barchart(ax, x, y, error, xlims, ylims, error_kw, color='lightblue', width=0.75): """Customized bar chart with positive error bars only.""" error = [np.zeros(len(error)), error] ax.bar(x, y, color=color, width=width, yerr=error, error_kw=error_kw, align='center') ax.set_xlim(xlims) ax.set_ylim(ylims) return ax def custom_boxplot(ax, x, y, error, xlims, ylims, mediancolor='magenta'): """Customized boxplot with solid black lines for box, whiskers, caps, and outliers.""" medianprops = {'color': mediancolor, 'linewidth': 2} boxprops = {'color': 'black', 'linestyle': '-'} whiskerprops = {'color': 'black', 'linestyle': '-'} capprops = {'color': 'black', 'linestyle': '-'} flierprops = {'color': 'black', 'marker': 'x'} ax.boxplot(y, positions=x, medianprops=medianprops, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, flierprops=flierprops) ax.set_xlim(xlims) ax.set_ylim(ylims) return ax def stylize_axes(ax, title, xlabel, ylabel, xticks=[], yticks=[], xticklabels=[], yticklabels=[]): """Customize axes spines, title, labels, ticks, and ticklabels.""" ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.xaxis.set_tick_params(top=False, direction='out', width=1) ax.yaxis.set_tick_params(right=False, direction='out', width=1) ax.set_title(title) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) if len(xticks)>0: ax.set_xticks(xticks) if len(yticks)>0: ax.set_yticks(yticks) if len(xticklabels)>0: ax.set_xticklabels(xticklabels) if len(yticklabels)>0: ax.set_yticklabels(yticklabels) def cm2inch(value): return value/2.54 def goldenRatio(width): height = width / (.5*np.sqrt(5) + .5) #1.6 return height # PNAS Figure Sizes oneColumnWidth = cm2inch(8.7)#3.42 oneandhalfColumnWidth = cm2inch(11.4)#4.5 twoColumnWidth = cm2inch(17.8)#7 maxheight = cm2inch (22.5) # Text size minimum 6-8 points
22,251
fb3255332ff3b76c7a899435c95f9323994530b3
from enum import Enum class StrictnessModesEnum(Enum): STRICT = 1 NONSTRICT = 2 UNDEFINED = 3
22,252
26f13ed6f00585a42f526cb7395145db12f3c27c
import scipy.special import numpy as np import math # alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j' # , 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't' # , 'u', 'v', 'w', 'x', 'y', 'z'] alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j' , 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't' , 'u', 'v', 'w', 'x', 'y', 'z' , '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] # it was necessary to add numbers def calc_single_col_dist(column): letter_map = {} for letter in alphabet: letter_map[letter] = 0.0 for letter in column: letter_map[letter] += 1.0 alpha_len = len(alphabet) for letter in alphabet: letter_map[letter] = letter_map[letter] / len(column) uniform_dist = 1.0 / alpha_len distance = 0.0 for val in letter_map.values(): distance += abs(val - uniform_dist) distance /= 2.0 return distance, letter_map def calc_freq_over_cols(lines, columns): letter_map = {} for letter in alphabet: letter_map[letter] = 0.0 for col_index in columns: col = '' for i in range(len(lines)): col += lines[i][col_index] _, col_map = calc_single_col_dist(col) for letter in alphabet: letter_map[letter] += col_map[letter] for letter in alphabet: letter_map[letter] /= len(columns) return letter_map def calculate_total_var_dist(lines, line_len): totat_variation_dist = [] for j in range(line_len): col = '' for i in range(len(lines)): col += lines[i][j] dist, _ = calc_single_col_dist(col) totat_variation_dist.append(dist) return totat_variation_dist def get_combination_count(n, k, element_distance=1): n -= (k - 1) * (element_distance - 1) return int(scipy.special.binom(n, k)) def get_next_combination(combination, vec_size, line_len, element_distance=1): increased_index = -1 for i in range(1, vec_size + 1): j = i - 1 if combination[vec_size - i] != line_len - j * element_distance - 1: combination[vec_size - i] += 1 increased_index = vec_size - i break if increased_index == -1: return None for i in range(increased_index + 1, vec_size): combination[i] = combination[i - 1] + element_distance return combination def calc_letter_correlation(letter, lines, answers, combination): letter_count = [] for line in lines: count_in_line = 0.0 for pos in combination: if line[pos] == letter: count_in_line += 1.0 letter_count.append(count_in_line) answers_float = map(lambda x: float(int(x)), answers) covs = np.cov(letter_count, answers_float) stand_deviation_first = math.sqrt(covs[0][0]) stand_deviation_second = math.sqrt(covs[1][1]) if stand_deviation_first * stand_deviation_second != 0.0: correlation_coef = covs[0][1] / (stand_deviation_first * stand_deviation_second) return correlation_coef else: return 0.0 def calc_correlations(lines, answers, combination): correlations = [] for letter in alphabet: correlation = calc_letter_correlation(letter, lines, answers, combination) correlations.append(correlation) return correlations def calc_entropy_of_col(column, frequency_map): column = np.squeeze(np.asarray(column)) column = np.unique(column) tmp = map(lambda x: frequency_map[x] * math.log(frequency_map[x], 2), column) return -np.sum(tmp)
22,253
487dfa28d82f5bbf478f04444eedc32170fe67a0
def find(numbers,n): for i in range(0,len(numbers)): if numbers[i][0]==n: return i def check(begin,end,res,numbers): if begin==end: return res else: i=find(numbers,begin) begin=numbers[i][1] return check(begin,end,res,numbers)^numbers[i][2] n=int(input()) numbers=[] for x in range(0,n-1): numbers.append(list(map(int,input().split(" ")))) m=int(input()) nums=[] for x in range(0,m): nums.append(list(map(int,input().split(" ")))) for i in range(0,m): res=0 begin=nums[i][0] end=nums[i][1] res=check(begin,end,res,numbers) print(res)
22,254
58abcce60d397a07f8ca6289821076507f55d88c
#!/bin/python """ This script will create private repositories on Bitbucket for each student. The repo name will be same as the university ID of the student. Input : List of students ids, Bitbucket credentials usage: $python create_repos.py the above will consider the file path mentioned in dir_settings.py $python create_repos.py students-info.json """ import sys import json import requests from requests.auth import HTTPBasicAuth from bb_settings import * from dir_settings import * if len(sys.argv) == 2: students_info = json.loads(open(sys.argv[1], 'r').read()) else: students_info = json.loads(open(STUDENTS_INFO, 'r').read()) def create_repo(repo_name): auth = HTTPBasicAuth(BB_USERNAME, BB_PASSWORD) # the request URL format is https://api.bitbucket.org/2.0/repositories/BB_USERNAME/repo_name url = '%s%s/%s' % (REPO_API_URL, BB_USERNAME, repo_name) payload = { "scm": "git", "is_private": "true"} headers = {'content-type': 'application/json'} response = requests.post(url=url, data=payload, auth=auth) print repo_name, response.status_code #print response.text def main(): for student_id, student_email in students_info.iteritems(): create_repo(student_id) if __name__ == '__main__': main()
22,255
bea69f2579c315da61e935dedf8c5ef1adb590a1
import datetime import logging import os import json from google.appengine.api import taskqueue from google.appengine.ext import ndb from google.appengine.ext import webapp from google.appengine.ext.webapp import template from consts.event_type import EventType from controllers.api.api_status_controller import ApiStatusController from database.district_query import DistrictsInYearQuery from database.event_query import DistrictEventsQuery, EventQuery from database.match_query import EventMatchesQuery from database.team_query import DistrictTeamsQuery from helpers.award_manipulator import AwardManipulator from helpers.bluezone_helper import BlueZoneHelper from helpers.district_helper import DistrictHelper from helpers.district_manipulator import DistrictManipulator from helpers.event_helper import EventHelper from helpers.event_manipulator import EventManipulator from helpers.event_details_manipulator import EventDetailsManipulator from helpers.event_insights_helper import EventInsightsHelper from helpers.event_team_manipulator import EventTeamManipulator from helpers.event_team_status_helper import EventTeamStatusHelper from helpers.event_team_repairer import EventTeamRepairer from helpers.event_team_updater import EventTeamUpdater from helpers.firebase.firebase_pusher import FirebasePusher from helpers.insights_helper import InsightsHelper from helpers.match_helper import MatchHelper from helpers.match_time_prediction_helper import MatchTimePredictionHelper from helpers.matchstats_helper import MatchstatsHelper from helpers.notification_helper import NotificationHelper from helpers.outgoing_notification_helper import OutgoingNotificationHelper from helpers.playoff_advancement_helper import PlayoffAdvancementHelper from helpers.prediction_helper import PredictionHelper from helpers.insight_manipulator import InsightManipulator from helpers.suggestions.suggestion_fetcher import SuggestionFetcher from helpers.team_manipulator import TeamManipulator from helpers.match_manipulator import MatchManipulator from models.district import District from models.event import Event from models.event_details import EventDetails from models.event_team import EventTeam from models.match import Match from models.sitevar import Sitevar from models.suggestion import Suggestion from models.team import Team class EventShortNameCalcEnqueue(webapp.RequestHandler): """ Enqueues Event short_name computation for official events """ def get(self, year): event_keys = Event.query(Event.official == True, Event.year == int(year)).fetch(200, keys_only=True) events = ndb.get_multi(event_keys) for event in events: taskqueue.add( url='/tasks/math/do/event_short_name_calc_do/{}'.format(event.key.id()), method='GET') template_values = {'events': events} path = os.path.join(os.path.dirname(__file__), '../templates/math/event_short_name_calc_enqueue.html') self.response.out.write(template.render(path, template_values)) class EventShortNameCalcDo(webapp.RequestHandler): """ Computes Event short_name """ def get(self, event_key): event = Event.get_by_id(event_key) event.short_name = EventHelper.getShortName(event.name) EventManipulator.createOrUpdate(event) template_values = {'event': event} path = os.path.join(os.path.dirname(__file__), '../templates/math/event_short_name_calc_do.html') self.response.out.write(template.render(path, template_values)) class EventTeamRepairDo(webapp.RequestHandler): """ Repair broken EventTeams. """ def get(self): event_teams_keys = EventTeam.query(EventTeam.year == None).fetch(keys_only=True) event_teams = ndb.get_multi(event_teams_keys) event_teams = EventTeamRepairer.repair(event_teams) event_teams = EventTeamManipulator.createOrUpdate(event_teams) # sigh. -gregmarra if type(event_teams) == EventTeam: event_teams = [event_teams] template_values = { 'event_teams': event_teams, } path = os.path.join(os.path.dirname(__file__), '../templates/math/eventteam_repair_do.html') self.response.out.write(template.render(path, template_values)) class FinalMatchesRepairDo(webapp.RequestHandler): """ Repairs zero-indexed final matches """ def get(self, year): year_event_keys = Event.query(Event.year == int(year)).fetch(1000, keys_only=True) final_match_keys = [] for event_key in year_event_keys: final_match_keys.extend(Match.query(Match.event == event_key, Match.comp_level == 'f').fetch(100, keys_only=True)) match_keys_to_repair = [] for match_key in final_match_keys: key_name = match_key.id() if '_f0m' in key_name: match_keys_to_repair.append(match_key) deleted_keys = [] matches_to_repair = ndb.get_multi(match_keys_to_repair) for match in matches_to_repair: deleted_keys.append(match.key) event = ndb.get_multi([match.event])[0] match.set_number = 1 match.key = ndb.Key(Match, Match.render_key_name( event.key.id(), match.comp_level, match.set_number, match.match_number)) MatchManipulator.createOrUpdate(matches_to_repair) MatchManipulator.delete_keys(deleted_keys) template_values = {'deleted_keys': deleted_keys, 'new_matches': matches_to_repair} path = os.path.join(os.path.dirname(__file__), '../templates/math/final_matches_repair_do.html') self.response.out.write(template.render(path, template_values)) class YearInsightsEnqueue(webapp.RequestHandler): """ Enqueues Insights calculation of a given kind for a given year """ def get(self, kind, year): taskqueue.add( target='backend-tasks-b2', url='/backend-tasks-b2/math/do/insights/{}/{}'.format(kind, year), method='GET') template_values = { 'kind': kind, 'year': year } path = os.path.join(os.path.dirname(__file__), '../templates/math/year_insights_enqueue.html') self.response.out.write(template.render(path, template_values)) class YearInsightsDo(webapp.RequestHandler): """ Calculates insights of a given kind for a given year. Calculations of a given kind should reuse items fetched from the datastore. """ def get(self, kind, year): year = int(year) insights = None if kind == 'matches': insights = InsightsHelper.doMatchInsights(year) elif kind == 'awards': insights = InsightsHelper.doAwardInsights(year) elif kind == 'predictions': insights = InsightsHelper.doPredictionInsights(year) if insights != None: InsightManipulator.createOrUpdate(insights) template_values = { 'insights': insights, 'year': year, 'kind': kind, } path = os.path.join(os.path.dirname(__file__), '../templates/math/year_insights_do.html') self.response.out.write(template.render(path, template_values)) def post(self): self.get() class OverallInsightsEnqueue(webapp.RequestHandler): """ Enqueues Overall Insights calculation for a given kind. """ def get(self, kind): taskqueue.add( target='backend-tasks-b2', url='/backend-tasks-b2/math/do/overallinsights/{}'.format(kind), method='GET') template_values = { 'kind': kind, } path = os.path.join(os.path.dirname(__file__), '../templates/math/overall_insights_enqueue.html') self.response.out.write(template.render(path, template_values)) class OverallInsightsDo(webapp.RequestHandler): """ Calculates overall insights of a given kind. Calculations of a given kind should reuse items fetched from the datastore. """ def get(self, kind): insights = None if kind == 'matches': insights = InsightsHelper.doOverallMatchInsights() elif kind == 'awards': insights = InsightsHelper.doOverallAwardInsights() if insights != None: InsightManipulator.createOrUpdate(insights) template_values = { 'insights': insights, 'kind': kind, } path = os.path.join(os.path.dirname(__file__), '../templates/math/overall_insights_do.html') self.response.out.write(template.render(path, template_values)) def post(self): self.get() class UpcomingNotificationDo(webapp.RequestHandler): """ Sends out notifications for upcoming matches """ def get(self): live_events = EventHelper.getEventsWithinADay() NotificationHelper.send_upcoming_matches(live_events) class BlueZoneUpdateDo(webapp.RequestHandler): """ Update the current "best match" """ def get(self): live_events = EventHelper.getEventsWithinADay() try: BlueZoneHelper.update_bluezone(live_events) except Exception, e: logging.error("BlueZone update failed") logging.exception(e)
22,256
3ad43312dc7f9e4601b16e35845582078f72c57c
#!/usr/bin/env python import sys, os, telnetlib, pyodbc, re, logging #, math, commands, cPickle, datetime, shutil, _mssql, #sys.path.append("/mobileye/shared/scripts/QA_Bundle_scripts/lib/") from E_lib import * color = paintText() def status_lp(): #host, port ): global logFile global logPrefix if verbose: print color.blue("\t=== Start Status ===") telnet.read_until('>',10) telnet.write("fs\n") response = telnet.read_until('>',10).split('\n') lp_list = [] not_connected = [] for line in response: if "not-connected" in line: not_connected.append(line.split()[0]) elif "connected" in line: lp_list.append(line.split()[0]) logging.warning('non-connected LPs: %s ' % ', '.join(not_connected)) logging.info('connected LPs: %s' % ', '.join(lp_list)) #logStr = 'non-connected LPs:\n' + ', '.join(not_connected) + '\nconnected LPs:\n' + ', '.join(lp_list) #printStr = color.red('non-connected LPs:\n') + ', '.join(not_connected) + color.green('\nconnected LPs:\n') + ', '.join(lp_list) #logFile.writelines(logStr) #if verbose: print printStr return lp_list def info_lp(lp_list): if verbose: print color.blue("\t=== Start Info ===") telnet.read_until('>',10) for lp in lp_list : #print color.blue("LP: %s" %lp) telnet.write("clear\ninfo %s\n"% lp) logging.info("LP: %s" %lp) LPinfo = telnet.read_until('>',10) logging.info(LPinfo) if verbose: print color.blue("\t=== End Info ===") def topOfBook_ccy(ccy): telnet.read_until('>',10) telnet.write("clear\n") def buildTradeFromDB(LP_list, CCY_list, VWAP ): LP_list = "','".join(LP_list) CCY_list = "','".join(CCY_list) con = _mssql.connect(server='10.0.0.202',user='qa',password='1234567') if VWAP: BasicQueryString = "SELECT ORDERS FROM dbo.Vwap_Orders_view WHERE LP IN ('%s') AND CCYPAIR IN ('%s')" %(LP_list, CCY_list) else: BasicQueryString = "SELECT ORDERS FROM dbo.Orders_view WHERE LP IN ('%s') AND CCYPAIR IN ('%s')" %(LP_list, CCY_list) con.execute_query(BasicQueryString) return [row['ORDERS'] for row in con] def trade_sequential(orders): if verbose: print color.blue("\t=== Start Trading ===") telnet.read_until('>',3) for order in orders: #if verbose: print "running order: ", order logging.info("running order: %s" %order) telnet.write("clear\n") response = telnet.read_until('>',10) telnet.write("%s\n"% order) responseSTR = '' while 'COMPLETE' not in response: tmp = telnet.read_until('>',10) #.split('\n') responseSTR += tmp if tmp == '': break #print "+++ debug: \n", response, "--- debug: \n" response = responseSTR.split('\n') orderID = '' output = False for line in response: if ("status" in line) and ('*' not in line): #print "+++ debug: \n", line, "--- debug: \n" output = True orderID = line.split(',')[1] #print "order: " , orderID logging.info("order: %s" % orderID) orderStatus = line.split(',')[2] if '(' in orderStatus: orderStatus, orderRes = line.split(',')[2].split('(') #print "status: ",orderStatus #print "result: ",orderRes[:-1] logging.info("status: %s - result: %s" % (orderStatus,orderRes[:-1])) else: logging.info("status: %s" % orderStatus) #print "status: ",orderStatus elif 'time' in line : if not re.search(r'[0-9]{8}-[0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9]{3,6}',line): output = True logging.warning("time-stamp is wrong: %s" %line) #print color.red("time-stamp is wrong: %s" %line) if output: logging.debug(responseSTR) else: logging.info(responseSTR) if verbose: print color.blue("\t=== End Trading ===") #print telnet.read_until('>',10) #ccy ="eurusd" #telnet.write("afr %s \n" % ccy) ##telnet.interact() #print telnet.read_until('>',10) ##print telnet.read_until('>',10) #telnet.write(b"exit\n") #telnet.close() def getOptions(argv): # This function will Parse cmdline options import optparse usage = "\t%prog [options] \n\t%prog --help" options = optparse.OptionParser(usage, version='''Version: 0.0 16/09/2013: First revision! ''') #mandatoryGroup = optparse.OptionGroup(options, color.red("Mandatory"), "this options must be defined") #optionalGroup = optparse.OptionGroup(options, "Optional") options.add_option("-l", "--lp", action="store", dest="lp", default=None, help="list of LPs ") options.add_option("-c", "--ccy", action="store", dest="ccy", default=None, help="list of CCY pairs") options.add_option("-H", "--host", action="store", dest="host", default="localhost", help="IP or name of the host to connect (default: localhost)") options.add_option("-p", "--port", action="store", dest="port", default="60000", help="Port number to connect (default: 60000)") options.add_option("-o", "--out", action="store", dest="output", default="./", help="location of the output log (default: local './')") options.add_option("-w", "--vwap",action="store_true",dest="vwap",default=False, help="use vwap orders.") options.add_option("-v", "--verbose",action="store_true",dest="verbose",default=False, help="Print full output.") #mandatoryGroup.add_option("-l", "--logs", action="store", dest="logs", default=None, help="path to logs directory") #optionalGroup.add_option("-v", "--verbose",action="store_true",dest="verbose",default=False, help="Print full output.") #options.add_option_group(mandatoryGroup) #options.add_option_group(optionalGroup) (op,args) = options.parse_args() if not op.lp: options.error(color.red('"-l, --lp" is not defined!')) else: op.lp = open(op.lp,'r').read().replace('\n',' ').split() if not op.ccy : print color.red('"-c, --ccy" is not defined - using default of 1 ccypar (EUR/USD)') op.ccy = ['eurusd'] else: op.ccy = open(op.ccy,'r').read().replace('\n',' ').split() return (op, args) def main(op): # This function will run all basic usage of Telnet_Basic_Test. print color.brown("\n\n\t*** Telnet - Basic Test ***\n") global logFile global logPrefix telnet.open(op.host, op.port) try: LP_LIST = status_lp() #op.host,op.port) info_lp(LP_LIST) #print "LPs:\n",op.lp #print"CCY:", op.ccy #buildTradeFile(LP_LIST,op.ccy) #orders = open("orders.list",'r').readlines() orders = buildTradeFromDB(LP_LIST,op.ccy, op.vwap) trade_sequential(orders) except KeyboardInterrupt : print "\nclosing telnet" telnet.close() #logFile.close() if(__name__ == "__main__"): try: op, args = getOptions(sys.argv) except Exception, e: print >> sys.stderr, e sys.exit(2) #logFile = None #logPrefix = '' logPrefix = os.path.basename(__file__).split('.')[0] logFileName = op.output + logPrefix + '.log' #logFile = open(logFileName,'w') # set up logging to file - see previous section for more details logging.basicConfig(level=logging.INFO,#level=logging.DEBUG format='%(asctime)s,%(levelname)-8s,%(message)s', #format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M', filename=logFileName, filemode='w') # define a Handler which writes WARNING messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(logging.WARNING) # set a format which is simpler for console use formatter = logging.Formatter('%(levelname)-8s: %(message)s') # tell the handler to use this format console.setFormatter(formatter) # add the handler to the root logger logging.getLogger('').addHandler(console) # Now, define a couple of other loggers which might represent areas in your # application: #logger1 = logging.getLogger('myapp.area1') #logger2 = logging.getLogger('myapp.area2') # #logger1.debug('Quick zephyrs blow, vexing daft Jim.') #logger1.info('How quickly daft jumping zebras vex.') #logger2.warning('Jail zesty vixen who grabbed pay from quack.') #logger2.error('The five boxing wizards jump quickly.') verbose = op.verbose telnet = telnetlib.Telnet() #op.host, op.port) main(op) #def buildTradeFile(lp_list,ccy_list): # with open("orders.list",'w') as orders: # #orders = open("orders.list",'w') # TIF_list = ('ioc', 'fok', 'day') # OT_list = ('pq', 'limit', 'market') # VOLUMES = ('500000','1m', '3m', '5m', '10m') # for lp in lp_list: # for ccy in ccy_list: # for TIF in TIF_list: # for OT in OT_list: # for VOL in VOLUMES: # orders.write("buy %s %s %s %s %s\n" % (lp, ccy, VOL, TIF, OT)) # orders.write("sell %s %s %s %s %s\n" % (lp, ccy, VOL, TIF, OT)) # orders.write("buyv %s %s %s %s %s\n" % (lp, ccy, VOL, TIF, OT)) # orders.write("sellv %s %s %s %s %s\n" % (lp, ccy, VOL, TIF, OT)) # #orders.close()
22,257
acbf35149945a5b1748d7ec8b6d046f49887e260
#!/usr/bin/env python import sys w = sys.stdout.write print('Image injection test') injection = 'R0lGODlhAQABAIAAAP///wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw==" onload="alert(\'pwnd\')" /><img src="data:image/gif;base64,R0lGODlhAQABAIAAAP///wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw==' w('\x1bP;IMAGE|image/gif;%s' % injection) w('\x1bP') print('HTML script execution test') w('\x1bP;HTML|<img src="https://imgs.xkcd.com/comics/hack.png" onload="alert(\'pwnd\')" />') w('\x1bP')
22,258
b50ecf639f3c02d345004c5e3d80e70f0d401b2c
# -*- coding: utf-8 -*- """ <DefineSource> @Date : Fri Nov 14 13:20:38 2014 \n @Author : Erwan Ledoux \n\n </DefineSource> A Parenter completes the list of grand-parent nodes that a child node could have. It acts only at one level. """ #<DefineAugmentation> import ShareYourSystem as SYS BaseModuleStr="ShareYourSystem.Standards.Noders.Distinguisher" DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer" SYS.setSubModule(globals()) #</DefineAugmentation> #<ImportSpecificModules> import copy from ShareYourSystem.Standards.Itemizers import Pather from ShareYourSystem.Standards.Noders import Noder #</ImportSpecificModules> #<DefineClass> @DecorationClass( **{'ClassingSwitchMethodStrsList':['parent']} ) class ParenterClass(BaseClass): #Definition RepresentingKeyStrsList=[ 'ParentingTopPickVariablesList', 'ParentingWalkBool', 'ParentedDeriveParentersList', 'ParentedNodeCollectionStrsList', 'ParentedNodePathStr', 'ParentedCollectionPathStr', 'ParentedTotalPathStr', 'ParentedTopDeriveParenterVariable' ] def default_init(self, _ParentingTopPickVariablesList=None, _ParentingWalkBool=True, _ParentedDeriveParentersList=None, _ParentedNodeCollectionStrsList=None, _ParentedNodePathStr="", _ParentedCollectionPathStr="", _ParentedTotalPathStr="", _ParentedTopDeriveParenterVariable=None, **_KwargVariablesDict): #Call the parent init method BaseClass.__init__(self,**_KwargVariablesDict) def do_parent(self): #debug ''' self.debug(('self.',self,[ ])) ''' #Check of a parent pointer if self.NodePointDeriveNoder!=None: #debug ''' self.debug('We are going to node the parent pointer') ''' #Parent the parent maybe if self.ParentingWalkBool: #parent the parent self.NodePointDeriveNoder.parent( self.ParentingTopPickVariablesList, self.ParentingWalkBool ) #set self.ParentedDeriveParentersList=[self.NodePointDeriveNoder ]+self.NodePointDeriveNoder.ParentedDeriveParentersList self.ParentedNodeCollectionStrsList=[self.NodedCollectionStr ]+self.NodePointDeriveNoder.ParentedNodeCollectionStrsList self.ParentedNodeCollectionStrsList.reverse() #definition ParentedNodePathStrsList=map( lambda __ParentedDeriveParenter: __ParentedDeriveParenter.NodeKeyStr, self.ParentedDeriveParentersList ) ParentedNodePathStrsList.reverse() #definition ParentedTotalPathTuplesList=map( lambda __ParentedDeriveParenter: ( Noder.NodingPrefixGetStr+__ParentedDeriveParenter.NodeCollectionStr+Noder.NodingSuffixGetStr, __ParentedDeriveParenter.NodeKeyStr ), self.ParentedDeriveParentersList ) ParentedTotalPathTuplesList.reverse() #Debug ''' self.debug('ParentedTotalPathTuplesList is '+str(ParentedTotalPathTuplesList)) ''' #set self.ParentedNodePathStr=Pather.PathPrefixStr.join( SYS.unzip(ParentedTotalPathTuplesList,[1]) ) self.ParentedCollectionPathStr=Pather.PathPrefixStr.join( SYS.unzip(ParentedTotalPathTuplesList,[0]) ) self.ParentedTotalPathStr=Pather.PathPrefixStr.join( map( lambda __ParentedTotalPathTuple: __ParentedTotalPathTuple[0]+__ParentedTotalPathTuple[1], ParentedTotalPathTuplesList ) ) #Check if len(self.ParentedDeriveParentersList)>0: self.ParentedTopDeriveParenterVariable=self.ParentedDeriveParentersList[-1] else: self.ParentedTopDeriveParenterVariable=self #Link self.update( zip( self.ParentingTopPickVariablesList, self.ParentedTopDeriveParenterVariable.pick( self.ParentingTopPickVariablesList ) ) ) else: self.ParentedTopDeriveParenterVariable=self #</DefineClass>
22,259
8da4c04c76680dcf0d89ee592af3ff59113d25d9
#!/usr/bin/python3 a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89] def selection_function(user_d): less_then = [] for i in a: if i < int(user_d): less_then.append(i) else: continue print(less_then) userDigit = input("Print smaller numbers than: ") selection_function(userDigit)
22,260
eae53b9cf76fd298d8fbf42a1b2f4ccf02c8651c
import chainer from chainer import optimizers def set_optimizer(model, alpha, beta, weight_decay): optimizer = optimizers.Adam(alpha=alpha, beta1=beta) optimizer.setup(model) optimizer.add_hook(chainer.optimizer.WeightDecay(weight_decay)) return optimizer
22,261
aa2b7d04df6e375adaf68f1e6a2b2861d897f94b
from .models import Kanban from django.forms import ModelForm class KanbanForm(ModelForm): class Meta: model = Kanban fields = ["title"] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # bootstrap対応 for field in self.fields.values(): field.widget.attrs['class'] = 'form-control mr-sm-2' self.fields['title'].widget.attrs['placeholder'] = "Kanban Name"
22,262
f8c149080667fda65d39df1961d3263a82b58ce1
import sys, os, gc, json, pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from collections import defaultdict # Replace these paths with your own absolute paths data_path = '../../data/' # Evaluation Metrics def overlap_score(tracks, pred_tracks, test_size = 10): ''' Computes the overlap score for tracks and pred_tracks. returns #overlap''' assert len(tracks) == len(pred_tracks) == test_size return sum([a in tracks for a in pred_tracks]) def avg_overlap(true_dict, pred): ''' Returns the accuracy score given true_label and pred''' assert len(true_dict) == len(pred) avg_overlap = np.mean([overlap_score(a, b) for a,b in zip(true_dict.values(), pred)]) return avg_overlap def r_precision(prediction, val_set, test_size = 10): # prediction should be a list of predictions # val_set should be pandas Series of ground truths assert len(prediction) == len(val_set) == test_size val_set = pd.Series(val_set) score = np.sum(val_set.isin(prediction))/val_set.shape[0] return score def dcg(predictions, labels, test_size = 10): ''' Calculates the discounted cumulative gain for prediction and labels. Inputs: Prediction: list of predictions labels: list of actual labels test_size: size of each of the two sets''' assert len(predictions) == len(labels) == test_size zero_one_label = [predictions[i] in labels for i in range(len(predictions))] zero_one_label = [zero_one_label[i]/np.log2(i+2) for i in range(len(zero_one_label))] return np.sum(zero_one_label)
22,263
dc582d08575fce9fbb718f227111bbd4723c2899
import string from copy import deepcopy class Proof_node(object): def __init__(self,literal,bindings,facts): """proof node during backtracking """ self.literal = literal self.bindings = bindings self.facts = [] for fact in facts: lit_pred = self.literal.split('(')[0].strip() fact_pred = fact.split('(')[0].strip() lit_args = self.literal.split('(')[1][:-1].split(',') fact_args = fact.split('(')[1][:-1].split(',') n = len(lit_args) m = len(fact_args) if lit_pred == fact_pred and n == m: self.facts.append(fact) def __repr__(self): """call to print displays this info """ rep = "" rep += str(self.literal)+"\n" rep += str(self.bindings)+"\n" rep += str(self.facts)+"\n" return (rep) def substitute_with_bindings(self,bindings): """substitutes literal vars with given bindings """ n_chars = len(self.literal) term = ['' for i in range(n_chars)] for i in range(n_chars): if self.literal[i] in bindings: term[i] = bindings[self.literal[i]] else: term[i] = self.literal[i] return (''.join(term)) def substitute(self): """substitutes literal vars with bindings """ n_chars = len(self.literal) term = ['' for i in range(n_chars)] for i in range(n_chars): if self.literal[i] in self.bindings: term[i] = self.bindings[self.literal[i]] else: term[i] = self.literal[i] return (''.join(term)) def unify(self,term,fact,bindings): """unification of two terms """ n = len(term.split('(')[1][:-1].split(',')) term_args = term.split('(')[1][:-1].split(',') fact_args = fact.split('(')[1][:-1].split(',') for i in range(n): if (not Prover.is_var(term_args[i])) and (not Prover.is_var(fact_args[i])): if term_args[i] != fact_args[i]: return False elif (Prover.is_var(term_args[i])) and (not Prover.is_var(fact_args[i])): bindings[term_args[i]] = fact_args[i] elif (not Prover.is_var(term_args[i])) and (Prover.is_var(fact_args[i])): bindings[fact_args[i]] = term_args[i] return bindings def search(self): """searches facts after substituting with bindings """ term = self.substitute() ##print ("searching:",term) ##print ("in facts",self.facts) ##input() bindings = deepcopy(self.bindings) found = False for fact in self.facts: found = self.unify(term,fact,bindings) if found: bound_vars = list(bindings.keys()) n_bound_vars = len(bound_vars) for i in range(n_bound_vars): for j in range(i+1,n_bound_vars): if bindings[bound_vars[i]] == bindings[bound_vars[j]]: return False self.facts.remove(self.substitute_with_bindings(bindings)) #THINK ABOUT THIS break return found class Prover(object): """contains functions for proving theories """ facts = [] rule = "" @staticmethod def is_var(argument): """checks is argument is a variable by checking if it starts with uppercase """ if argument[0] in list(string.ascii_uppercase): return True return False @staticmethod def prove_rule(example,exists=True): """checks if example satisfies rule and against the facts """ facts = Prover.facts rule = Prover.rule #if no rule body then trivially true if not rule.split(':-')[1]: return True #assume example is true proved = True #collect head variables and bind to example atoms bindings = {} head = rule.split(':-')[0].strip() head_args = head.split('(')[1][:-1].split(',') example_args = example.split('(')[1][:-1].split(',') n_args = len(head_args) for i in range(n_args): if Prover.is_var(head_args[i]): bindings[head_args[i]] = example_args[i] else: if head_args[i] != example_args[i]: bindings = {} break #if no binding for head, example is false if not bindings: proved = False #two different vars cant have same binding bound_vars = list(bindings.keys()) n_bound_vars = len(bound_vars) for i in range(n_bound_vars): for j in range(i+1,n_bound_vars): if bindings[bound_vars[i]] == bindings[bound_vars[j]]: return False #collect body literals body = rule.split(':-')[1].strip() body_literals = body.split(';') pointer = 0 stack = [Proof_node(body_literals[0],bindings,facts)] solutions = [] while stack: node = stack[-1] ##print (node) ##input() x = node.search() ##print (x) ##input() if x: if len(stack) != len(body_literals): new_node = Proof_node(body_literals[len(stack)],x,facts) stack.append(new_node) else: if exists: return True solutions.append(x) stack.pop() elif not x: stack.pop() if not solutions: return False return (solutions) #============TEST-CASE-1=====================# ''' facts = ['p(a,b,d,20)','p(a,b,c,20)','h(a,c,50)','h(a,d,50)'] example = 'q(a,b)' rule = 'q(A,B) :- p(A,B,C,20);h(A,C,50)' Prover.facts = facts Prover.rule = rule solutions = Prover.prove_rule(example) print (solutions) '''
22,264
1947fab8a10291b059a747516af3ab484eaadb7b
from django.apps import AppConfig class AdmincoreConfig(AppConfig): name = 'admincore'
22,265
64cea1671ebb2a20c2790f959acd18250327e58a
# import the necessary packages import sys, argparse import cv2 # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="Path to the image") ap.add_argument("-s", "--save", required=True, help="Path to save image") args = vars(ap.parse_args()) print args["image"] # load the image and show some basic information on it image = cv2.imread(args["image"]) if not image.size: print "Image not found." sys.exit(0) print "width: %d pixels" % (image.shape[1]) print "height: %d pixels" % (image.shape[0]) print "channels: %d" % (image.shape[2]) # show the image and wait for a keypress try: while(1): cv2.imshow("Image", image) wait = cv2.waitKey(33) except KeyboardInterrupt: # save the image -- OpenCV handles converting filetypes # automatically cv2.imwrite(args["save"], image) print "Image saved: %s" % args["save"] sys.exit(0)
22,266
7c2c9c2c23154c051104f756ab340290cc155383
# -*- coding: utf-8 -*- import simplejson as json import lxml import logging from lxml import objectify logger = logging.getLogger(__name__) class objectJSONEncoder(json.JSONEncoder): """A specialized JSON encoder that can handle simple lxml objectify types """ def default(self, o): if isinstance(o, lxml.objectify.IntElement): return int(o) if isinstance(o, lxml.objectify.NumberElement) or \ isinstance(o, lxml.objectify.FloatElement): return float(o) if isinstance(o, lxml.objectify.ObjectifiedDataElement): return str(o) if hasattr(o, '__dict__'): return o.__dict__ return json.JSONEncoder.default(self, o) def decode_xml(xml_str): response = xml_str.replace('encoding="UTF-8"', '') try: obj = objectify.fromstring(response) encoded_obj = objectJSONEncoder().encode(obj) return {"type": "json", "data": json.loads(encoded_obj)} except Exception as e: logger.error(e.args) return {"type": "xml", "data": xml_str}
22,267
7713a2caa38cd7abff6fd7cd45210354728c45a9
from flask_wtf import FlaskForm from wtforms import SubmitField from wtforms.fields.core import StringField from wtforms.fields.simple import TextAreaField from wtforms.validators import Required class FeedbackForm(FlaskForm): subject = StringField('', validators=[Required()], render_kw={'placeholder':'Subject'}) feedback = TextAreaField('', validators=[Required()], render_kw={'placeholder':'Write us your feedback...'}) submit = SubmitField('Send') class SearchBar(FlaskForm): query = StringField('', validators=[Required()], render_kw={'placeholder':'Search for a meal...'}) submit = SubmitField('Search')
22,268
b187479d3b3585250e0b4d7f6f7dd958e3667c9b
import pygame class Player(ImgBase): def __init__(self, x, y, w, h, speed): super().__init__("plane.png", x, y, w, h, speed) self.isLeft = False self.isRight = False self.bullets = [] def keyDown(self, key): self.key = key if self.key == pygame.K_d: self.isRight = True if self.key == pygame.K_a: self.isLeft = True if self.key == pygame.K_SPACE: self.bullets.append(Bullet(self.x, self.y, 10, 5, 10)) def keyUp(self, key): self.key = key if self.key == pygame.K_d: self.isRight = False if self.key == pygame.K_a: self.isLeft = False def move(self): if self.isRight == True: self.x += self.speed if self.isLeft == True: self.x -= self.speed def render(self, gamepad): gamepad.blit(self.img , (self.x , self.y))
22,269
bce8c63d23b7dfee830e61d7ba38851a89b082fa
import sys # gcd # from bisect import bisect_left,bisect_right # from fractions import gcd # 切り上げ,切り捨て # from math import ceil, floor # リストの真のコピー(変更が伝播しない) # from copy import deepcopy # 累積和。list(accumulate(A))としてAの累積和 # from itertools import accumulate # l = ['a', 'b', 'b', 'c', 'b', 'a', 'c', 'c', 'b', 'c', 'b', 'a'] # S = Counter(l) # カウンタークラスが作られる。S=Counter({'b': 5, 'c': 4, 'a': 3}) # print(S.most_common(2)) # [('b', 5), ('c', 4)] # print(S.keys()) # dict_keys(['a', 'b', 'c']) # print(S.values()) # dict_values([3, 5, 4]) # print(S.items()) # dict_items([('a', 3), ('b', 5), ('c', 4)]) # from collections import Counter # import math # from functools import reduce # # input関係の定義 # fin = open('in_1.txt', 'r') # sys.stdin = fin input = sys.stdin.readline def ii(): return int(input()) def mi(): return map(int, input().rstrip().split()) def lmi(): return list(map(int, input().rstrip().split())) def li(): return list(input().rstrip()) # template def iterate_tokens(): for line in sys.stdin: for word in line.split(): yield word if __name__ == '__main__': tokens = iterate_tokens() N = int(next(tokens)) # type: int a = [int(next(tokens)) for _ in range(N)] # type: "List[int]" # write code anslist = [] MAX_A = max(a) MIN_A = min(a) if MIN_A >= 0: for i in range(1, N): anslist.append((i,i+1)) elif MAX_A < 0: for i in range(N, 1, -1): anslist.append((i, i - 1)) else: if abs(MAX_A) >= abs(MIN_A): MAX_INDEX = a.index(MAX_A) for i in range(1,N+1): anslist.append((MAX_INDEX+1, i)) for i in range(1, N): anslist.append((i, i + 1)) else: MIN_INDEX = a.index(MIN_A) for i in range(1, N + 1): anslist.append((MIN_INDEX+1, i)) for i in range(N, 1, -1): anslist.append((i, i - 1)) print(len(anslist)) for tup in anslist: print(tup[0],tup[1])
22,270
35befd09fb4f1321d35c3870d6f48fb31803e100
import pandas as pd import numpy as np import matplotlib.pyplot as plt from abupy import ABuSymbolPd if __name__ == '__main__': kl_pd=ABuSymbolPd.make_kl_df('TSLA',n_folds=2) # 1、这里采用N日趋势突破,即超过N1天内的最高价,就买入,低于N2天内的最低价,就卖出 N1=42 N2=21 # 2.1 采用pd.rolling_max可以寻找一个窗口长度内最大值 kl_pd['n1_high']=pd.rolling_max(kl_pd['high'],window=N1) # 2.2 但这样会导致前N1-1个元素为NAN, # 我们使用pd.expanding_max来填充NAN,expanding_max会逐个遍历数组元素,并把返回直到当前位置看到过的最大元素 # 用前k天的收盘价来代替,k∈[0,N1] expan_max=pd.expanding_max(kl_pd['close']) kl_pd['n1_high'].fillna(expan_max,inplace=True) # 2.3 最小值同理 kl_pd['n2_low']=pd.rolling_min(kl_pd['low'],window=N2) expan_min=pd.expanding_min(kl_pd['close']) kl_pd['n2_low'].fillna(expan_min,inplace=True) print(kl_pd.head()) # 3.1 根据n1_high和n2_low来定义买入卖出的信号序列 # 注意,是当天的收盘价,高于昨天以前的n1值,就买入,不能包括今天的,因为今天的收盘价,怎么也不会高于今天的最高值 buy_signal=kl_pd[kl_pd.close > kl_pd.n1_high.shift(1)].index kl_pd.loc[buy_signal,'signal']=1 # 3.2 n2_low的卖出信号同理 sell_signal=kl_pd[kl_pd.close < kl_pd.n2_low.shift(1)].index kl_pd.loc[sell_signal,'signal']=0 # 3.3 这里可以不用考虑Nan的情况 kl_pd.signal.value_counts().plot(kind='pie') plt.show() # 4.1 将买入卖出的信号转化为持股的状态 # 由于买入卖出的信号用了当天的收盘价,所以真正的持有和抛售是在第二天进行的 # 所以这里要shifit kl_pd['keep']=kl_pd['signal'].shift(1) # 填充Nan kl_pd['keep'].fillna(method='ffill',inplace=True) kl_pd['keep'].fillna(0) print(kl_pd) # 5. 基准收益,是指我从一开始就持有,然后一直到最后才卖的收益 # 5.1 计算每天基准收益 kl_pd['benchmark_profit']=kl_pd['close']/kl_pd['close'].shift(1)-1 # 5.2 计算策略每天收益 kl_pd['trend_profit']=kl_pd['keep']*kl_pd['benchmark_profit'] # 5.3 计算累加基准收益和策略收益 kl_pd['benchmark_profit_accum']=kl_pd['benchmark_profit'].cumsum() kl_pd['trend_profit_accum']=kl_pd['trend_profit'].cumsum() print(kl_pd.head(10)) # 5.4 可视化 kl_pd[['benchmark_profit_accum','trend_profit_accum']].plot() plt.show()
22,271
bdea1307258d8ffad21b32fb2344d7c42c5b7c15
import json from knack_api_library import get_knack_dataset, get_contact_object class Knack: # get all objects in json for easy assignment # using static methods because not using or assigning anything to properties # of the class @staticmethod def get_in_json(ident): res = get_knack_dataset(ident) return json.loads(res.read()) # function to create multiple values in case relation of object to # dataset is 'many'. @staticmethod def list_values(list_obj): count = len(list_obj) value_list = [] # assumes that with first element empty, no values at all if count and list_obj[0]['identifier']: if count > 1: for obj in list_obj: value_list.append(str(obj['identifier'])) else: value_list.append(str(list_obj[0]['identifier'])) return_obj = value_list else: return_obj = "none" return return_obj # add function to separate list values with pipe @staticmethod def display(list_obj): return '|'.join(list_obj) @staticmethod def value_none(val): value = '' if not val: value = 'none' else: value = val return value
22,272
e01b1c22c930be1a24539aa536d591056d9ac628
from phrase import Phrase import random class Game: missed = 0 phrases = [] active_phrase = None guesses = [] proceed_answer = 'y' def __init__(self): self.guesses = [" "] self.phrases = [Phrase('Bananas are yellow'), Phrase('I Like to Run'), Phrase('The Pandemic needs to End'), Phrase('Tonight there is a full Moon'), Phrase('I CANNOT wait to eat Donuts')] self.active_phrase = self.get_random_phrase() def get_random_phrase(self): self.pointer = random.randint(0, 4) self.selected_phrase = self.phrases[self.pointer] return self.selected_phrase def welcome(self): print(""" \n************************************** \n****Welcome to Phrase Hunters !!****** \n************************************** """) def start(self): while self.proceed_answer.lower() == 'y': self.welcome() while self.missed < 5 and self.active_phrase.check_complete(self.guesses) is False: print("\nNumber missed: {}".format(self.missed)) self.active_phrase.display(self.guesses) user_guess = self.get_guess() self.guesses.append(user_guess) if not self.active_phrase.check_guess(user_guess): self.missed += 1 self.active_phrase.display(self.guesses) self.game_over() self.proceed_answer = input("Would you like to play again (y/n): ") if self.proceed_answer == 'y': self.game_reset() else: print("Thanks for playing ! come back soon !") def get_guess(self): self.prompt = input("\nPlease enter a letter : ") while len(self.prompt) > 1 or self.prompt in self.guesses or self.prompt.isalpha() == False: if len(self.prompt) > 1: print("\nPlease enter only one letter at a time") print("This attempt will not count towards your missed letters count") elif self.prompt in self.guesses: print("\nYou already entered that letter, please try again") print("This repeated letter attempt will not count towards your missed letters count") elif self.prompt.isalpha() == False: print("Please do not enter characters other than letters from a to z") print("This attempt will not count towards your missed letters count") self.prompt = input("Please enter a letter : ") return self.prompt def game_over(self): if self.missed == 5: print("\nYou reached the maximum number of missed letters") print("Your game is over !") else: print("\nCongratulations, you won!!") def game_reset(self): self.missed = 0 self.guesses = [" "] self.active_phrase = self.get_random_phrase()
22,273
7c68cb20c6076a38be3d735ea083201311739e2e
# -*- coding: utf-8 -*- """The formatter mediator object.""" from __future__ import unicode_literals import os from plaso.formatters import winevt_rc from plaso.winnt import language_ids class FormatterMediator(object): """Class that implements the formatter mediator.""" DEFAULT_LANGUAGE_IDENTIFIER = 'en-US' # TODO: add smarter language ID to LCID resolving e.g. # 'en-US' falls back to 'en'. # LCID 0x0409 is en-US. DEFAULT_LCID = 0x0409 _WINEVT_RC_DATABASE = 'winevt-rc.db' def __init__(self, data_location=None): """Initializes a formatter mediator object. Args: data_location (str): path of the formatter data files. """ super(FormatterMediator, self).__init__() self._data_location = data_location self._language_identifier = self.DEFAULT_LANGUAGE_IDENTIFIER self._lcid = self.DEFAULT_LCID self._winevt_database_reader = None def _GetWinevtRcDatabaseReader(self): """Opens the Windows Event Log resource database reader. Returns: WinevtResourcesSqlite3DatabaseReader: Windows Event Log resource database reader or None. """ if not self._winevt_database_reader and self._data_location: database_path = os.path.join( self._data_location, self._WINEVT_RC_DATABASE) if not os.path.isfile(database_path): return None self._winevt_database_reader = ( winevt_rc.WinevtResourcesSqlite3DatabaseReader()) if not self._winevt_database_reader.Open(database_path): self._winevt_database_reader = None return self._winevt_database_reader @property def lcid(self): """int: preferred Language Code identifier (LCID).""" return self._lcid def GetWindowsEventMessage(self, log_source, message_identifier): """Retrieves the message string for a specific Windows Event Log source. Args: log_source (str): Event Log source, such as "Application Error". message_identifier (int): message identifier. Returns: str: message string or None if not available. """ database_reader = self._GetWinevtRcDatabaseReader() if not database_reader: return None if self._lcid != self.DEFAULT_LCID: message_string = database_reader.GetMessage( log_source, self.lcid, message_identifier) if message_string: return message_string return database_reader.GetMessage( log_source, self.DEFAULT_LCID, message_identifier) def SetPreferredLanguageIdentifier(self, language_identifier): """Sets the preferred language identifier. Args: language_identifier (str): language identifier string such as "en-US" for US English or "is-IS" for Icelandic. Raises: KeyError: if the language identifier is not defined. ValueError: if the language identifier is not a string type. """ if not isinstance(language_identifier, str): raise ValueError('Language identifier is not a string.') values = language_ids.LANGUAGE_IDENTIFIERS.get( language_identifier.lower(), None) if not values: raise KeyError('Language identifier: {0:s} is not defined.'.format( language_identifier)) self._language_identifier = language_identifier self._lcid = values[0]
22,274
191a67d02e5ccb993d4e73ff8ff5a3dae6991790
# coding=utf-8 """ Copyright (C) 2012 DECOIT GmbH <asterisk4ucs@decoit.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 3 as published by the Free Software Foundation This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ import univention.admin.filter import univention.admin.syntax import univention.admin.handlers import univention.admin.handlers.asterisk import univention.admin.handlers.asterisk.contact import univention.admin.handlers.asterisk.phoneGroup import univention.admin.handlers.asterisk.waitingLoop import univention.admin.handlers.asterisk.sipPhone import univention.admin.handlers.asterisk.conferenceRoom import univention.admin.handlers.asterisk.phoneType import univention.admin.handlers.asterisk.mailbox import univention.admin.handlers.asterisk.faxGroup import univention.admin.handlers.asterisk.server import univention.admin.handlers.asterisk.fax import univention.admin.handlers.asterisk.phoneBook import univention.admin.handlers.asterisk.music import univention.admin.handlers.asterisk.agiscript import operator module = "asterisk/asterisk" short_description = "Asterisk4UCS-Management" long_description = '' operations = ['search'] default_containers = [ "cn=asterisk" ] childs = 0 virtual = 1 modulesWithSuperordinates = { "None": [ univention.admin.handlers.asterisk.server, univention.admin.handlers.asterisk.phoneBook, ], "asterisk/server": [ univention.admin.handlers.asterisk.phoneGroup, univention.admin.handlers.asterisk.waitingLoop, univention.admin.handlers.asterisk.sipPhone, univention.admin.handlers.asterisk.conferenceRoom, univention.admin.handlers.asterisk.phoneType, univention.admin.handlers.asterisk.mailbox, univention.admin.handlers.asterisk.faxGroup, univention.admin.handlers.asterisk.fax, univention.admin.handlers.asterisk.music, univention.admin.handlers.asterisk.agiscript, ], "asterisk/phoneBook": [ univention.admin.handlers.asterisk.contact, ] } usewizard = 1 wizardmenustring="Asterisk4UCS-Management" wizarddescription="Asterisk verwalten" childmodules = [x.module for x in reduce(operator.add, modulesWithSuperordinates.values())] def superordinatecmp(x, y): if x == "None": return -1 elif y == "None": return 1 return cmp(x, y) wizardsuperordinates = sorted(modulesWithSuperordinates.keys(), cmp=superordinatecmp) wizardtypesforsuper = {} for key, value in modulesWithSuperordinates.items(): wizardtypesforsuper[key] = [x.module for x in value] wizardoperations={"add":["Add", "Add DNS object"],"find":["Search", "Search DNS object(s)"]} options = {} layout = [] property_descriptions = {} mapping = univention.admin.mapping.mapping() class object(univention.admin.handlers.simpleLdap): module=module def __init__(self, co, lo, position, dn='', superordinate=None, arg=None): global mapping global property_descriptions self.co = co self.lo = lo self.dn = dn self.position = position self._exists = 0 self.mapping = mapping self.descriptions = property_descriptions univention.admin.handlers.simpleLdap.__init__(self, co, lo, position, dn, superordinate) def exists(self): return self._exists def lookup(co, lo, filter_s, base='', superordinate=None, scope='sub', unique=0, required=0, timeout=-1, sizelimit=0): ret = [] supi = "None" if superordinate: supi = superordinate.module for module in modulesWithSuperordinates[supi]: if module == univention.admin.handlers.asterisk.music: continue ret += module.lookup(co, lo, filter_s, base, superordinate, scope, unique, required, timeout, sizelimit) return ret def identify(dn, attr, canonical=0): pass
22,275
cf6c0f9491f76aed383c54614125724bcbc3ce50
import os import time from decimal import Decimal import selenium.webdriver from selenium.webdriver.common.action_chains import ActionChains as AC from selenium.webdriver.support.wait import WebDriverWait as WW from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By from common_utils import conf from .base_page import BasePage from case_locators.login_page import LoginPage as LP __all__ = ['LoginPage'] class LoginPage(BasePage): ''' 登陆页面 ''' def open_url(self): url = conf.get('common','url') path = conf.get('common','login_path') self.full_url = url+path self.driver.get(self.full_url) def login(self,user_id,user_pw): self.find_element(LP.login_url).click() self.find_element(LP.userid).send_keys(user_id) self.find_element(LP.password).send_keys(user_pw) self.find_element(LP.login_button).click() def login_ok(self): self.ww.until(EC.url_changes(self.full_url)) return self.driver.current_url def login_failed(self): e = self.find_element(LP.error_locator) return e.text
22,276
56dae158377610bedf624d769b3f8d62d1333839
import torch import torch.nn as nn def prune(X, lower_limit, upper_limit): return torch.max(torch.min(X, upper_limit), lower_limit) def attack(binary, white_model, black_model, dataloader, epsilon_size, epochs, attack_name, rand=True): if attack_name == "cifar": mean, std = torch.Tensor([0.471, 0.448, 0.408]), torch.Tensor([0.234, 0.239, 0.242]) epsilon = ((1 / 255) - mean) / std - ((0 / 255) - mean) / std epsilon = epsilon_size * epsilon.view(1,3,1,1).repeat(32, 1, 32, 32).cuda() clip_min, clip_max = ((0 / 255) - mean) / std, ((255 / 255) - mean) / std clip_min, clip_max = clip_min.view(1,3,1,1).repeat(32, 1, 32, 32).cuda(), clip_max.view(1,3,1,1).repeat(32, 1, 32, 32).cuda() elif attack_name == "mnist": mean, std = torch.Tensor([0.1307,]), torch.Tensor([0.3081,]) epsilon = ((1 / 255) - mean) / std - ((0 / 255) - mean) / std epsilon = epsilon_size * epsilon.view(1,1,1,1).repeat(1, 1, 28, 28).cuda() clip_min, clip_max = ((0 / 255) - mean) / std, ((255 / 255) - mean) / std clip_min, clip_max = clip_min.view(1,1,1,1).repeat(32, 1, 28, 28).cuda(), clip_max.view(1,1,1,1).repeat(32, 1, 28, 28).cuda() loss_func = nn.CrossEntropyLoss() attack_num, num = 0., 0. for data,label in dataloader: data, label = data.cuda(), label.cuda() adv_data = data.detach().clone().cuda() if rand: adv_data = adv_data + torch.rand_like(adv_data, device = "cuda") / 500 adv_data.requires_grad = True for epoch in range(epochs): loss_func(white_model(adv_data), label).backward() with torch.no_grad(): if binary == False: update_value = adv_data.grad / adv_data.grad.pow(2).sum(dim = [1,2,3]).sqrt().view(-1,1,1,1) elif binary == True: update_value = 0.7*torch.sign(adv_data.grad) adv_data = adv_data + update_value adv_data = prune(adv_data, clip_min, clip_max) adv_data = prune(adv_data, data - epsilon, data + epsilon) adv_data.requires_grad = True attack_num += (black_model(adv_data).max(dim = 1)[1] != label).sum().item() num += label.size()[0] return attack_num / num * 100 def cifar(): import torchvision import os mean, std = torch.Tensor([0.471, 0.448, 0.408]), torch.Tensor([0.234, 0.239, 0.242]) transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),torchvision.transforms.Normalize(mean,std)]) data = torchvision.datasets.CIFAR10(root="../data",train=False,download=False,transform=transform) loader = torch.utils.data.DataLoader(data,batch_size=32,shuffle=False,drop_last=True) import models.wideresnet as models white_model = models.WideResNet(num_classes=10).cuda() import models.mobilenet as BlackModel black_model = BlackModel.MobileNet().cuda() black_model.load_state_dict(torch.load("black_model/mobilenet.p")["net"]) # black_model.eval() temp = torch.load(os.path.join('wideresnet_vs_mobilenet/result_1000', "model_best.pth.tar")) white_model.load_state_dict(temp['state_dict']) white_model.eval() trans = attack(False, white_model, black_model, loader, epsilon, attack_num, "cifar", True) if __name__ == '__main__': cifar()
22,277
73a1e2dd14bb97aed0f9052511f15cca52a8392d
n = int(input()) #number of students t = int(input()) #time passed s = input() #string representing boys and girls in the queue index_list = [] for i in range(1, n): if s[i-1] == 'B' and s[i] == 'G': index_list.append(i) for elem in index_list: s = s[elem] + s[elem-1] + s[elem+1::]
22,278
6c447b122f98c056bedfca521a60354f1db4dd0a
import sys arg1 = sys.argv[1] arg2 = sys.argv[2] print('You enetered', arg1, arg2)
22,279
106baf1173e99c817dddd590b5aab1bbf29b9225
# -*- coding: utf-8 -*- import os import shutil import sqlite3 import pandas as pd def print_and_exec(cursor, query): print('>> "' + query + '"') return cursor.execute(query) def process_db_file(db_path): print('\nProcessing "%s"' % db_path) conn = sqlite3.connect(db_path) c = conn.cursor() column_dtypes = {} result = print_and_exec(c, 'PRAGMA table_info(PROTOCOL2)') for row in result: column_dtypes[row[1].lower()] = row[2] validation_column_name = 'xray_validated' if validation_column_name not in column_dtypes: print('\nCreating X-ray validation field') query = 'ALTER TABLE protocol2 ADD COLUMN %s boolean' % validation_column_name print_and_exec(c, query) conn.commit() _, ptd = os.path.split(db_path) ptd = ptd[:4] batch_size = 10000 df = pd.read_csv('../data/files_info_all_together.txt') counter = 0 values = [] batch_counter = 0 for row in df.iterrows(): path = row[1]['partDir'] + '\\' + row[1]['gdbDir'] + '\\' + row[1]['originalFilename'] if path.startswith(ptd): validated = 1 - row[1]['isDeleted'] values.append('(\'' + path + '\', ' + str(validated) + ')') counter += 1 if counter == batch_size: batch_counter += 1 update_validation_batch(c, values) print('Batch %i / %i' % (batch_counter, df.shape[0] // batch_size)) values = [] counter = 0 if values: update_validation_batch(c, values) print('Final batch') conn.commit() def update_validation_batch(c, values): values_str = ', \n'.join(values) query = 'WITH Tmp(path, valid) as (VALUES\n%s) \n' % values_str query += 'UPDATE protocol2 SET xray_validated = (SELECT valid FROM Tmp WHERE protocol2.pngfilepath = Tmp.path) \n' query += 'WHERE pngfilepath IN (SELECT path from Tmp)' c.execute(query) def add_validation_flag(): db_paths = ['../data/PTD1_BASA_CLD.GDB.sqlite', '../data/PTD2_BASA_CLD.GDB.sqlite'] for db_path in db_paths: process_db_file(db_path) if __name__ == '__main__': add_validation_flag()
22,280
479be7116cfe980c460536d9d8ead95f4c2182d4
#!/usr/bin/env python # -*- coding:utf-8 -*- # Copyright 2016 Takashi Ando - http://blog.rinka-blossom.com/ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict from datetime import datetime from sqlalchemy import create_engine, MetaData from sqlalchemy.sql import func from werkzeug.security import generate_password_hash class INeccoDb(object): """ Interface to necco database. """ pass class SqliteDb(INeccoDb): """ To create instance of Sqlite DB. This class to use mainly for testing. arguments: path_to_db: Path to your Sqlite DB file. """ def __init__(self, path_to_db, **kwargs): self.__db_meta = MetaData( bind=create_engine("sqlite:///" + path_to_db)) self.__db_meta.reflect() for name, table in self.__db_meta.tables.items(): setattr(self, name, table) class MySqlDb(INeccoDb): """ To create an instance of MySQL DB as singleton. arguments: user: User name to login MySQL DB. pasword: Password for the above user. server: IP address or something of MySQL server. port: Port number. db_name: DB name. """ __instance = None def __new__(cls, user, password, server, port, db_name): if cls.__instance is None: cls.__instance = object.__new__(cls) engine = create_engine( "mysql+pymysql://{USER}:{PASSWORD}@{SERVER}:{PORT}/{DB}?charset=utf8".format( USER=user, PASSWORD=password, SERVER=server, PORT=port, DB=db_name), pool_recycle=14400) cls.__db_meta = MetaData(bind=engine) cls.__db_meta.reflect() for name, table in cls.__db_meta.tables.items(): setattr(cls, name, table) return cls.__instance class AbstractModel(object): """ Abstract base model to provide APIs to access database for necco. arguments: config: Configuration object of necco.config.Configuration. keyword arguments: db: Dabase instance with INeccoDb. This's mainly used for testing. """ def __init__(self, config, db=None, **kwargs): self._db = db if db else MySqlDb( config.SQL_USER, config.SQL_PASSWORD, config.SQL_SERVER, config.SQL_PORT, config.SQL_DB) def get_all_column(self): raise NotImplementedError def yield_record(self, columns=None): raise NotImplementedError class AccountModel(AbstractModel): """ Model to provide APIs to access user's account on necco DB. arguments: config: Configuration object of necco.config.Configuration. keyword arguments: db: Dabase instance with INeccoDb. This's mainly used for testing. """ def __init__(self, config, **kwargs): super().__init__(config, db=kwargs.get("db")) self.account_columns = OrderedDict() self.account_columns["lastName"] = self._db.Profile.c.lastName self.account_columns["firstName"] = self._db.Profile.c.firstName self.account_columns["lastKanaName"] = self._db.Profile.c.lastKanaName self.account_columns["firstKanaName"] = self._db.Profile.c.firstKanaName self.account_columns["nickName"] = self._db.Profile.c.nickName self.account_columns["email"] = self._db.User.c.email self.account_columns["prefecture"] = self._db.Prefecture.c.name_ self.account_columns["address"] = self._db.Profile.c.address self.account_columns["streetAddress"] = self._db.Profile.c.streetAddress self.account_columns["phoneNumber"] = self._db.Profile.c.phoneNumber self.account_columns["faxNumber"] = self._db.Profile.c.faxNumber self.account_columns["profile"] = self._db.Profile.c.profile def get_all_column(self): return self.account_columns.keys() def get_hashed_password(self, user_id): proxy = self._db.User.select(self._db.User.c.id_ == user_id).execute() if not proxy.rowcount: raise ValueError("Account not found.") index = proxy.keys().index("password_") password = proxy.fetchone()[index] return password def get_id(self, email): """ Getter function returns user id against the specified email. """ query = self._db.User.select(self._db.User.c.email == email) query = query.with_only_columns([self._db.User.c.id_, ]) record = query.execute().fetchone() return record[0] def get_email(self, id_): """ Getter function returns the specified user's email. """ query = self._db.User.select(self._db.User.c.id_ == id_) query = query.with_only_columns([self._db.User.c.email, ]) record = query.execute().fetchone() return record[0] def get_all(self, id_): """ Getter function returns the specified user infomation. SELECT Profile.name_, Profile.kana, Profile.nickname, ... from Profile inner join User on Profile.userId = User.id_ inner join Prefecture on Profile.prefectureId = Prefecture.id_; """ joined_query = self._db.User.join( self._db.Profile, self._db.User.c.id_ == self._db.Profile.c.userId) joined_query = joined_query.join( self._db.Prefecture, self._db.Profile.c.prefectureId == self._db.Prefecture.c.id_) joined_query = joined_query.select( self._db.User.c.id_ == id_).with_only_columns(self.account_columns.values()) record = joined_query.execute().fetchone() return {str(key): str(value) for key, value in zip(self.account_columns.keys(), record)} def update_user_with(self, id_, **kwargs): query = self._db.User.update().where(self._db.User.c.id_ == id_) if kwargs.get("email"): query = query.values(email=kwargs.get("email")) if kwargs.get("password_"): hashed_password = generate_password_hash(kwargs.get("password_")) query = query.values(password_=hashed_password) query.values(updatedAt=datetime.now()).execute() def update_profile_with(self, id_, **kwargs): query = self._db.Profile.update().where(self._db.Profile.c.userId == id_) params = {"updatedAt": datetime.now()} for column in self._db.Profile.c.keys(): val = kwargs.get(column) if val: params[column] = val query.values(**params).execute() def update_account_with(self, id_, **kwargs): """ Update account information against the specified user id. """ self.update_user_with(id_, **kwargs) self.update_profile_with(id_, **kwargs) # TODO: # self.update_prefecture_with(id_, kwargs) class AbilityModel(AbstractModel): """ Model to provide APIs to access user's ability on necco DB. arguments: config: Configuration object of necco.config.Configuration. keyword arguments: db: Dabase instance with INeccoDb. This's mainly used for testing. """ def __init__(self, config, **kwargs): super().__init__(config, db=kwargs.get("db")) self.ability_columns = { "lastName": self._db.Profile.c.lastName, "firstName": self._db.Profile.c.firstName, "lastKanaName": self._db.Profile.c.lastKanaName, "firstKanaName": self._db.Profile.c.firstKanaName, "genre": self._db.Ability.c.genre, "detail": self._db.Ability.c.detail, } def get_all_column(self): return [k for k in self.ability_columns.keys()] def yield_record(self, user_ids=[], columns=None): """ Generator function which returns ability records with the specified users and below query. SELECT Profile.lastName, Profile.firstName, ... FROM User INNER JOIN Profile ON Profile.userId = User.id_ INNER JOIN UsersAbility ON User.id_ = UsersAbility.userId INNER JOIN Ability ON UsersAbility.abilityId = Ability.id_; """ filter_ = None if user_ids: filter_ = self._db.User.c.id_ == user_ids[0] for user_id in user_ids[1:]: filter_ |= self._db.User.c.id_ == user_id if columns: columns = [col for col in columns if col in self.ability_columns.keys()] else: columns = [col for col in self.ability_columns.keys()] db_columns = [self.ability_columns.get(col) for col in columns] joined_query = self._db.User.join(self._db.Profile, self._db.User.c.id_ == self._db.Profile.c.userId) joined_query = joined_query.join(self._db.UsersAbility, self._db.User.c.id_ == self._db.UsersAbility.c.userId) joined_query = joined_query.join(self._db.Ability, self._db.UsersAbility.c.abilityId == self._db.Ability.c.id_) selected_query = joined_query.select() # if filter_: # not operated... why? if filter_ is not None: selected_query = selected_query.where(filter_) executed = selected_query.with_only_columns(db_columns).execute() for record in executed.fetchall(): yield {columns[i]: r for i, r in enumerate(record)} class RequestModel(AbstractModel): """ Model to provide APIs to access user's requests on necco DB. arguments: config: Configuration object of necco.config.Configuration. keyword arguments: db: Dabase instance with INeccoDb. This's mainly used for testing. """ def __init__(self, config, **kwargs): super().__init__(config, db=kwargs.get("db")) self.request_columns = { "lastName": self._db.Profile.c.lastName, "firstName": self._db.Profile.c.firstName, "lastKanaName": self._db.Profile.c.lastKanaName, "firstKanaName": self._db.Profile.c.firstKanaName, "genre": self._db.Request.c.genre, "detail": self._db.Request.c.detail, } def _get_request_count(self): """ Get the number of requests. SELECT COUNT(*) FROM Request INNER JOIN UsersRequest ON UsersRequest.requestId = Request.id_; """ joined_query = self._db.Request.join( self._db.UsersRequest, self._db.UsersRequest.c.requestId == self._db.Request.c.id_) joined_query = joined_query.select().with_only_columns([func.count()]) res = joined_query.execute() return [_ for _ in res][0][0] def get_all_column(self): return [k for k in self.request_columns.keys()] def yield_record(self, user_ids=[], columns=None): """ Generator function which returns request records with below query. SELECT Profile.lastName, Profile.firstName, Profile.lastKanaName, Profile.firstKanaName, Request.detail FROM User INNER JOIN Profile ON User.id = Profile.userId INNER JOIN UsersRequest ON User.id_ = UsersRequest.userId INNER JOIN Request ON UsersRequest.requestId = Request.id_; """ filter_ = None if user_ids: filter_ = self._db.User.c.id_ == user_ids[0] for user_id in user_ids[1:]: filter_ |= self._db.User.c.id_ == user_id if columns: columns = [col for col in columns if col in self.request_columns.keys()] else: columns = [col for col in self.request_columns.keys()] db_columns = [self.request_columns.get(col) for col in columns] joined_query = self._db.User.join(self._db.Profile, self._db.User.c.id_ == self._db.Profile.c.userId) joined_query = joined_query.join(self._db.UsersRequest, self._db.User.c.id_ == self._db.UsersRequest.c.userId) joined_query = joined_query.join(self._db.Request, self._db.UsersRequest.c.requestId == self._db.Request.c.id_) selected_query = joined_query.select() if filter_ is not None: selected_query = selected_query.where(filter_) executed = selected_query.with_only_columns(db_columns).execute() for record in executed.fetchall(): yield {columns[i]: r for i, r in enumerate(record)} class PrefectureModel(AbstractModel): def __init__(self, config, **kwargs): super().__init__(config, db=kwargs.get("db")) def get_all_column(self): return [ self._db.Prefecture.c.id_, self._db.Prefecture.c.name_ ] def yield_record(self, columns=None): """ Generator function which returns prefectures with below query. SELECT Prefecture.id_, Prefecture.name_ FROM Prefecture; """ query = self._db.Prefecture.select().with_only_columns(self.get_all_column()) for record in query.execute(): yield record
22,281
2a9668fabced843f5884ee6ce998000325a02c9b
from mcpi.minecraft import Minecraft cherinehsu = Minecraft.create() x,y,z = cherinehsu.player.getTilePos() try: answer = input('請問你右邊要放什麼方塊:') cherinehsu.setBlocs(x+1,y,z,answer) except: print('只能輸入數字!!!!!!')
22,282
021ec174c63083f8bb68ca42e2909b95d48ac93a
from pylab import * import matplotlib.pyplot as plt import matplotlib.lines as lines import heapq import math import copy from copy import deepcopy class Node: def __init__(self,name,x,y): self.name = name self.x = x self.y = y def setName(self,name): self.name = name def setPos(self,x,y): self.x = x self.y = y def setIdx(self,idx): self.idx = idx def getName(self): return self.name def getPos(self): return (self.x,self.y) def getIdx(self): return self.idx def getDistance(self,Node): selisihx = self.x - Node.x selisihy = self.y - Node.y return math.sqrt((selisihx)**2 + (selisihy)**2) def printNode(self): print ("Name : ", self.name) print ("x : ", self.x) print ("y : ", self.y) class Map: def __init__(self,n): self.n = n self.nodes = [] self.matrix = [] def setNode(self,Node,idx): self.nodes[idx] = Node def setMatrix(self,row,col,bobot): self.matrix[row][col] = bobot def getNode(self,idx): return self.nodes[idx] def getMatrix(self,row,col): return self.matrix[row][col] def printMatrix(self): print(self.matrix) def getNodeIdx(self,name): found = False i = 0 while ((not found) and (i < self.n)): if (self.nodes[i].getName() == name): found = True return i else: i = i+1 class State: def __init__(self, costtotal, idx, costf): self.costtotal = costtotal self.idx = idx self.costf = costf self.path = [] def __lt__(self, other): return self.costtotal < other.costtotal def setIdx(self, idx): self.idx = idx def setCosttotal(self, costtotal): self.costtotal = costtotal def setCostf(self, costf): self.costf = costf def addPath(self, path): self.path.append(path) def getIdx(self): return self.idx def getCosttotal(self): return self.costtotal def getCostf(self): return self.costf def getPath(self): return self.path def isVisited(self, idx): return idx in self.path def printPath(self, listOfNode): print("Rute Yang Dilalui \t: ", end='') for i in range(0, len(self.path)-1): print(listOfNode[self.path[i]].getName(), ' -> ', end=''), print(listOfNode[self.path[len(self.path)-1]].getName()) def main(): file_name = input('Masukkan nama file \t\t\t: ') files = open(file_name, 'r') file_line_list = files.readlines() n = int(file_line_list[0]) M = Map(n) for i in range(n): idx = i+1 line_list = file_line_list[idx].split() M.nodes.append(Node(line_list[0], float(line_list[1]), float(line_list[2]))) for i in range(0,n): idx = i+n+1 line_list = file_line_list[idx].split() listtemp = [] for j in range(n): listtemp.append(float(line_list[j])) M.matrix.append(listtemp) startname = input('Masukkan nama tempat awal \t\t: ') startNode = M.getNodeIdx(startname) goalname = input('Masukkan nama tempat tujuan akhir \t: ') goalNode = M.getNodeIdx(goalname) startState = State(M.getNode(startNode).getDistance(M.getNode(goalNode)),startNode,0) startState.addPath(startNode) listOfState = [] #(costtotal, id, costf) heapq.heappush(listOfState, startState) iterates = True while iterates: currentState = heapq.heappop(listOfState) idx = currentState.getIdx() costfb = currentState.getCostf() if (idx == goalNode): iterates = False else: for i in range (0,M.n): if not (M.matrix[idx][i]==-1 or currentState.isVisited(i)): costf = M.getMatrix(int(idx),i) + costfb costtotal = costf + M.getNode(i).getDistance(M.getNode(goalNode)) nextState = State(costtotal,i,costf) nextState.path = copy.deepcopy(currentState.path) nextState.addPath(i) heapq.heappush(listOfState, nextState) print("Jarak Total \t\t:",currentState.getCostf()) currentState.printPath(M.nodes) # Plot Jawaban All = [] Sequence = [] xPoints = [] yPoints = [] xSequence = [] ySequence = [] xA = [0.1,0.1] yA = [0.1,0.1] for i in range (len(M.nodes)): xPoints.append(M.nodes[i].x) yPoints.append(M.nodes[i].y) for i in range (len(currentState.path)): xSequence.append(M.nodes[currentState.path[i]].x) ySequence.append(M.nodes[currentState.path[i]].y) Sequence.append((M.nodes[currentState.path[i]].x,M.nodes[currentState.path[i]].y)) fig = plt.figure() ax = fig.add_subplot(111) for i in range (len(M.nodes)): scatter(xPoints[i],yPoints[i], s=100 ,marker='o', c='grey') ax.text(xPoints[i]-100,yPoints[i], M.nodes[i].getName()) scatter(xSequence,ySequence, s=100 ,marker='o', color="red") left,right = ax.get_xlim() low,high = ax.get_ylim() arrow( left, 0, right -left, 0, length_includes_head = True, head_width = 0.15 ) arrow( 0, low, 0, high-low, length_includes_head = True, head_width = 0.15 ) for i in range (n): for j in range (n): if (M.matrix[i][j]!=-1): xA[0]=M.nodes[i].x xA[1]=M.nodes[j].x yA[0]=M.nodes[i].y yA[1]=M.nodes[j].y ax.add_line(Line2D(xA, yA, linewidth=1, color='blue')) middleX = (xA[0]+xA[1])/2 middleY = (yA[0]+yA[1])/2 ax.text(middleX,middleY, M.matrix[i][j]) # for i in range (0, len(All)-1, 2): # xA[0]=All[i][0] # xA[1]=All[i+1][0] # yA[0]=All[i][1] # yA[1]=All[i+1][1] # ax.add_line(Line2D(xA, yA, linewidth=1, color='blue')) # middleX = (xA[0]+xA[1])/2 # middleY = (yA[0]+yA[1])/2 # ax.text(middleX,middleY, M.matrix[i%M.n][i//M.n]) for i in range (0, len(Sequence)-1): xA[0]=Sequence[i][0] xA[1]=Sequence[i+1][0] yA[0]=Sequence[i][1] yA[1]=Sequence[i+1][1] ax.add_line(Line2D(xA, yA, linewidth=2, color='green')) grid() show() if __name__ == '__main__': main()
22,283
342a4b2a3d052ee8249f5ca80de51b404f52dd45
from __future__ import absolute_import from celery import Celery from celery.task import periodic_task, task from datetime import datetime, timedelta from random import random, triangular, randint, gauss from django.core.mail import send_mail from email.utils import parsedate_tz, parsedate_to_datetime import sys import pytz import re import imaplib import email import string import json import requests import parsedatetime as pdt # for parsing of datetime shit for NLP from .settings import EMAIL_HOST_USER, EMAIL_HOST_PASSWORD, use_gmail from ent.models import AlternateText, PossibleText, Collection, Timing, Tag, ActualText, Carrier, UserSetting, Outgoing, Incoming from ent.views import time_window_check, date_check_fun from .celery import app from celery.task.control import inspect # app = Celery() # @app.on_after_configure.connect # def setup_periodic_tasks(sender, **kwargs): # # Calls test('hello') every 10 seconds. # sender.add_periodic_task(10.0, schedule_texts, name='add every 10') # sender.add_periodic_task(10.0, send_texts, name='add every 10') # sender.add_periodic_task(10.0, check_email_for_new, name='add every 10') # sender.add_periodic_task(10.0, process_new_mail, name='add every 10') # task_seconds_between = 6 task_seconds_between = 15 task_seconds_between_moon = 10 rate_limit_moon = "6/m" rate_limit_all_else = "2/m" # 10800 - 3hr app.conf.beat_schedule = { 'schedule': { 'task': 'schedule_texts', # 'schedule': crontab(hour=0, minute=1), 'schedule': timedelta(seconds=task_seconds_between), 'args': () }, 'send': { 'task': 'send_texts', # 'schedule': crontab(hour=0, minute=1), 'schedule': timedelta(seconds=task_seconds_between), 'args': () }, 'check': { 'task': 'check_email_for_new', # 'schedule': crontab(hour=0, minute=1), 'schedule': timedelta(seconds=task_seconds_between), 'args': () }, 'process': { 'task': 'process_new_mail', # 'schedule': crontab(hour=0, minute=1), 'schedule': timedelta(seconds=task_seconds_between), 'args': () }, 'sun': { 'task': 'schedule_sun_texts', 'schedule': timedelta(seconds=task_seconds_between_moon), 'args': () }, 'moon': { 'task': 'schedule_moon_texts', 'schedule': timedelta(seconds=task_seconds_between_moon), 'args': () }, } # @app.on_after_configure.connect # def setup_periodic_tasks(sender, **kwargs): # sender.add_periodic_task(20.0, schedule_texts, name='add every 10') # sender.add_periodic_task(20.0, send_texts, name='add every 10') # sender.add_periodic_task(20.0, check_email_for_new, name='add every 10') # sender.add_periodic_task(20.0, process_new_mail, name='add every 10') ############################################# ######## PERODIC TASK TO SCHEDULE NOW TEXTS ############################################# def schedule_specific_text(text,working_settings,user_timezone, time_window,day): date_today = datetime.now(pytz.utc).astimezone(user_timezone) scheduled_date = user_timezone.localize(datetime.combine(date_today, text.timing.hour_start)) scheduled_date = scheduled_date.astimezone(pytz.UTC) for i in range(text.timing.repeat_in_window): seconds_to_add = randint(0,round(time_window.total_seconds())) atext = ActualText(user=text.user,text=text) if datetime.now(pytz.utc) > user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)).astimezone(pytz.UTC): atext.time_to_send = scheduled_date + timedelta(0,(86400+seconds_to_add)) elif datetime.now(pytz.utc) > scheduled_date and datetime.now(pytz.utc) < user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)).astimezone(pytz.UTC): time_window = datetime.now(pytz.utc) - user_timezone.localize(datetime.combine(date_today, text.timing.hour_start)).astimezone(pytz.UTC) seconds_to_add = randint(0,int(time_window.total_seconds())) atext.time_to_send = scheduled_date + timedelta(0,(seconds_to_add))+ timedelta(day,0) else: atext.time_to_send = scheduled_date + timedelta(0,(seconds_to_add))+ timedelta(day,0) atext.save() text.date_scheduled = datetime.now(pytz.utc) text.save() def get_sun_time(sundata,desired): for i in range(0,len(sundata)): if sundata[i]['phen'] == desired: return sundata[i]['time'] @task(name='schedule_sun_texts',rate_limit=rate_limit_moon) def schedule_sun_texts(): print("SUUUUUUUUNNNNNNN") date_now = str(datetime.now(pytz.utc).strftime('%-m/%-d/%Y')) distinct_users = PossibleText.objects.all().filter(tmp_save=False).filter(active=True).filter(text_type="sun").values('user').distinct() for user in distinct_users: working_settings = UserSetting.objects.all().get(user=user['user']) user_timezone = pytz.timezone(working_settings.timezone) user_location = working_settings.city_state() print("date_now", date_now) print("user_location", user_location) data = requests.get(str('http://api.usno.navy.mil/rstt/oneday?date='+ date_now +'&loc=' + user_location)) dataj = data.json() print("DATAJ", dataj) if dataj != "NOT FOUND": print("DATA FOUND") #Get the sun data working_texts = PossibleText.objects.all().filter(user=working_settings.user).filter(tmp_save=False).filter(active=True).filter(text_type="sun") for text in working_texts: print("text",text.text) if ActualText.objects.all().filter(user=text.user).filter(text=text).filter(time_sent__isnull=True).filter(time_to_send__gte=pytz.utc.localize(datetime.now())).count() < 1: if 'Sun Rise' in text.text: text_to_send = 'The sun is rising right now!' time_out = get_sun_time(dataj['sundata'],'R') elif 'Sun Set' in text.text: text_to_send = 'The sun is setting right now!' time_out = get_sun_time(dataj['sundata'],'S') elif 'Solar Noon' in text.text: text_to_send = 'The sun is at the highest point in the sky today right now!' time_out = get_sun_time(dataj['sundata'],'U') # time_out 8:34 p.m. DT time_out_time = time_out.split(' ')[0] time_out_ampm = time_out.split(' ')[1] if time_out_ampm == "a.m.": time_out_ampm = "AM" else: time_out_ampm = "PM" date_out = str(str(datetime.now(pytz.utc).date()) + ' ' + time_out_time + ' ' + time_out_ampm) time_to_send = datetime.strptime(date_out, "%Y-%m-%d %I:%M %p") time_to_send = user_timezone.localize(time_to_send) time_to_send = time_to_send.astimezone(pytz.UTC) print("time_to_send",time_to_send) print("datetime.now(pytz.utc)",datetime.now(pytz.utc)) if datetime.now(pytz.utc) < time_to_send: atext = ActualText(user=text.user,text=text,time_to_send=time_to_send,text_sent=text_to_send) atext.save() @task(name='schedule_moon_texts',rate_limit=rate_limit_moon) def schedule_moon_texts(): print("MOOOOOOOOON") import requests date_now = str(datetime.now(pytz.utc).strftime('%-m/%-d/%Y')) # print("BEFORE GET") # print("BEFORE AFTER") try: data = requests.get(str('http://api.usno.navy.mil/moon/phase?date='+date_now+'&nump=4')) dataj = data.json() # print("dataj", dataj) except: dataj="NOT FOUND" if dataj != "NOT FOUND": next_phase = dataj['phasedata'][0] moon_dt = datetime.strptime(str(dataj['phasedata'][0]['date'])+' '+dataj['phasedata'][0]['time'], '%Y %b %d %H:%M') moon_dt_utc = pytz.utc.localize(moon_dt) working_texts = PossibleText.objects.all().filter(tmp_save=False).filter(active=True).filter(text_type="moon") for text in working_texts: if str(next_phase['phase']) in str(text.text): working_settings = UserSetting.objects.all().get(user=text.user) user_timezone = pytz.timezone(working_settings.timezone) moon_dt_user = moon_dt_utc.astimezone(user_timezone) # #See if there is a text scheduled in the future for this phase. if not, then schedule it. if ActualText.objects.all().filter(user=text.user).filter(text=text).filter(time_sent__isnull=True).filter(time_to_send__gte=pytz.utc.localize(datetime.now())).count() < 1: date_today = datetime.now(pytz.utc).astimezone(user_timezone) time_window = user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)) - user_timezone.localize(datetime.combine(date_today, text.timing.hour_start)) moon_dt_user = moon_dt_user - timedelta(1,0) scheduled_date = user_timezone.localize(datetime.combine(moon_dt_user.date(), text.timing.hour_start)) scheduled_date = scheduled_date.astimezone(pytz.UTC) time_to_send = scheduled_date + timedelta(0,randint(0,round(time_window.total_seconds()))) text_to_send = "The " + dataj['phasedata'][0]['phase'] + " will happen at " + str(moon_dt_user.strftime('%-I:%M %p')) + " on " + str(moon_dt_user.strftime(' %B %d, %Y')) + "!" atext = ActualText(user=text.user,text=text,time_to_send=time_to_send,text_sent=text_to_send) atext.save() else: print(dataj) # @periodic_task(run_every=timedelta(seconds=10)) # @periodic_task(run_every=timedelta(seconds=task_seconds_between)) # @app.task @task(name='schedule_texts',rate_limit=rate_limit_all_else) # @task() def schedule_texts(): print("TASK 1 - STARTING schedule_texts") # INSPECT # i = app.control.inspect() # print("REGISTERED", i.registered()) # print("ACTIVE", i.active()) # print("SCHEDULED", i.scheduled()) # print("RESERVED", i.reserved()) #Specific Timings working_texts = PossibleText.objects.all().filter(text_type='standard').filter(tmp_save=False).filter(active=True).filter(timing__fuzzy=False).filter(timing__date_start__lte=pytz.utc.localize(datetime.now())) for text in working_texts: if text.timing.dow_check() == 1: if ActualText.objects.all().filter(text=text).filter(time_sent__isnull=True).count()<1: # The following conditional is to only schedule texts once a day working_settings = UserSetting.objects.all().get(user=text.user) user_timezone = pytz.timezone(working_settings.timezone) # print("TEXT:", text.text) # print("TEXT:", text.timing.repeat_in_window) #If the text is not newly scheduled if text.date_scheduled is not None: #Idea is to schedule it once a day for the next day if pytz.utc.localize(datetime.now()) > text.date_scheduled + timedelta(1,0): date_today = datetime.now(pytz.utc).astimezone(user_timezone) time_window = user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)) - user_timezone.localize(datetime.combine(date_today, text.timing.hour_start)) schedule_specific_text(text,working_settings,user_timezone,time_window,1) else: #This is for newly scheduled texts. Text it today # print("NOT SCHEULDED") #Schedule it for today date_today = datetime.now(pytz.utc).astimezone(user_timezone) starting_time = user_timezone.localize(datetime.combine(date_today, text.timing.hour_start)) starting_time = starting_time.astimezone(pytz.UTC) ending_time = user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)) ending_time = ending_time.astimezone(pytz.UTC) # print("STARTING TIME:", starting_time) # print("date_today", date_today) # print("ENDING TIME:", ending_time) ###### SCHEDULE IT FOR TODAY - this is just like an extra thing. if not starting_time == ending_time: # print("THERE IS A RANGE") # Schedule them for today if starting_time < date_today < ending_time: # print("DATE NOW BETWEEN RANGE") time_window = user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)) - date_today schedule_specific_text(text,working_settings,user_timezone,time_window,0) else: # print("DATE BEFORE RANGE") time_window = user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)) - user_timezone.localize(datetime.combine(date_today, text.timing.hour_start)) schedule_specific_text(text,working_settings,user_timezone,time_window,0) else: if date_today < starting_time: time_window = user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)) - user_timezone.localize(datetime.combine(date_today, text.timing.hour_start)) schedule_specific_text(text,working_settings,user_timezone,time_window,0) #Schedule it for tomorrow! time_window = user_timezone.localize(datetime.combine(date_today, text.timing.hour_end)) - user_timezone.localize(datetime.combine(date_today, text.timing.hour_start)) schedule_specific_text(text,working_settings,user_timezone,time_window,1) #Fuzzy Timings working_texts = PossibleText.objects.all().filter(text_type='standard').filter(tmp_save=False).filter(active=True).filter(timing__fuzzy=True).filter(timing__date_start__lte=pytz.utc.localize(datetime.now())) for text in working_texts: if ActualText.objects.all().filter(text=text).filter(time_sent__isnull=True).count()<1: # print("SCHEDULING NEW FUZZY TEXT") # Get the timing info # Hack to have discrete values in slider noise_tmp = 100 if text.timing.iti_noise == 1: noise_tmp = 100 if text.timing.iti_noise == 2: noise_tmp = 400 if text.timing.iti_noise == 3: noise_tmp = 600 if text.timing.iti_noise == 4: noise_tmp = 800 ITI_noise_tmp = noise_tmp/100 ITI_mean = text.timing.iti max_minutes = ITI_mean + (ITI_mean*ITI_noise_tmp) min_minutes = ITI_mean - (ITI_mean*ITI_noise_tmp) # Add seconds seconds_to_add = 60 * int(triangular(min_minutes, max_minutes, ITI_mean)) #Get user timezone working_settings = UserSetting.objects.all().get(user=text.user) user_timezone = pytz.timezone(working_settings.timezone) #Get today's date and end in UTC date_today = datetime.now(pytz.utc).astimezone(user_timezone) possible_date = date_today + timedelta(0,seconds_to_add) possible_date = possible_date.astimezone(pytz.UTC) possible_date = time_window_check(text,possible_date) date_check = date_check_fun(text,possible_date) possible_date = possible_date.astimezone(pytz.UTC) # possible_date = pytz.utc.localize(possible_date) if date_check == 1: atext = ActualText(user=text.user,text=text,time_to_send=possible_date) atext.save() ############################################# ######## Send the texts ############################################# def send_text(text): # Save all outgoing print("SENDING TEXT") tmp_user = UserSetting.objects.all().get(user=text.user) addressee = tmp_user.sms_address #Check for alternate texts and save if text.text.alt_text.all().count()>0: alt_text = text.text.alt_text.all().order_by('?')[0] message_to_send = alt_text.alt_text text.alt_text = alt_text text.save() else: message_to_send = text.text #check for moon texts if text.text.text_type == 'moon' or text.text.text_type == 'sun' : message_to_send = text.text_sent past_ten_minutes = datetime.now(pytz.utc) - timedelta(minutes=1) out_check = Outgoing.objects.all().filter(text=text).filter(date_sent__gte=past_ten_minutes).count() actual_check = ActualText.objects.all().filter(user=text.user).filter(text=text.id).filter(time_sent__gte=past_ten_minutes).count() # print("ACUTAL CHECK", actual_check) # print("OUTCHECK:", out_check) if out_check < 1 and actual_check < 1: if tmp_user.send_email_check == True: send_mail('',message_to_send, str(EMAIL_HOST_USER), [text.user.email], fail_silently=False) text.time_sent = datetime.now(pytz.utc) text.save() outgoing_tmp = Outgoing(text=text,date_sent=datetime.now(pytz.utc)) outgoing_tmp.save() # print("Sent 1 email") if tmp_user.send_text_check == True: send_mail('',str(message_to_send), str(EMAIL_HOST_USER), [addressee], fail_silently=False) text.time_sent = datetime.now(pytz.utc) text.save() outgoing_tmp = Outgoing(text=text,date_sent=datetime.now(pytz.utc)) outgoing_tmp.save() print("Sent 1 text") # @periodic_task(run_every=timedelta(seconds=10)) # @app.task # @periodic_task(run_every=timedelta(seconds=task_seconds_between)) # @app.task @task(name="send_texts",rate_limit=rate_limit_all_else) # @task() def send_texts(): print("TASK 2 - STARTING send_texts ") today_date = datetime.now(pytz.utc) #filter out the ones that haven't been sent out yet AND the ones that are suppose to be sent out now user_texts = ActualText.objects.filter(time_to_send__lte=datetime.now(pytz.utc)).filter(time_sent=None) for text in user_texts: # YOU WILL HAVE TO ADD SOME CHECKS INTO THIS working_settings = UserSetting.objects.all().get(user=text.user) if working_settings.text_request_stop == False: # user specific text (i.e. they texted "stop") # This is to remove any old texts that are backed up. td = datetime.now(pytz.utc) - text.time_to_send.astimezone(pytz.UTC) if td.seconds/60 > 5: text.delete() else: send_text(text) # time specific checks (i.e. sent more than 5 in the last 10 minutes, etc) ############################################# ######## GET THE REPLIES ############################################# def get_first_text_part(msg): maintype = msg.get_content_maintype() if maintype == 'multipart': for part in msg.get_payload(): if part.get_content_maintype() == 'text': return part.get_payload() elif maintype == 'text': return msg.get_payload() # @periodic_task(run_every=timedelta(seconds=10)) # @app.task # @periodic_task(run_every=timedelta(seconds=task_seconds_between)) # @app.task @task(name="check_email_for_new",rate_limit=rate_limit_all_else) # @task() def check_email_for_new(): #Set up the email print("TASK 3 - RECIEVE MAIL") if use_gmail == 1: mail = imaplib.IMAP4_SSL('imap.gmail.com') else: mail = imaplib.IMAP4_SSL('imappro.zoho.com',993) #### GMAIL # mail = imaplib.IMAP4_SSL('imap.gmail.com') # mail.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD) # mail.list() #### ZOHO mail.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD) mail.list() # Out: list of "folders" aka labels in gmail. mail.select("inbox") # connect to inbox. #Get the IDs of the mail in the boxes result, data = mail.search(None, "ALL") ids = data[0] # data is a list. id_list = ids.split() #Go through each new email and figure out what it corresponds to for id in id_list: # print("DOWNLOADING MESSAGES") print(id) result, data = mail.fetch(id, "(RFC822)") # fetch the email body (RFC822) for the given ID if data is not None: print("data is not none") raw_email = data[0][1] # here's the body, which is raw text of the whole email email_message = email.message_from_bytes(raw_email) #converts to email object #Get the email info email_user = email_message['From'].split('@',1)[0] email_date = parsedate_to_datetime(email_message.get('date')) email_content = get_first_text_part(email_message) Incoming(email_user=email_user,email_date=email_date,email_message=email_message,email_content=email_content).save() # print("NEW INCOMING SAVED") mail.store(id, '+FLAGS', '\\Deleted') # flage this for deletion mail.expunge() #delete them # print("BOX CLEANED") # @periodic_task(run_every=timedelta(seconds=10)) # @app.task # @task(name="process_new_mail") # @periodic_task(run_every=timedelta(seconds=task_seconds_between)) # @app.task @task(name="process_new_mail",rate_limit=rate_limit_all_else) # @task() def process_new_mail(): print("TASK 4 - PROCESS MAIL") Toprocess = Incoming.objects.all().filter(processed=0) for tp in Toprocess: #need conditional # print("tp.email_user", tp.email_user) if UserSetting.objects.all().filter(phone=tp.email_user).exists(): if UserSetting.objects.all().filter(phone=tp.email_user).count() == 1: working_user = UserSetting.objects.all().get(phone=tp.email_user) else: working_user = UserSetting.objects.all().filter(phone=tp.email_user).first() #check to see if the user wants to stop ############################################################ ############### CHECK FOR RESPONSE AND DETERMINE WHAT IT IS ############################################################ # print("tp.email_content ", tp.email_content ) if tp.email_content is not None: #this is new working_text = ActualText.objects.all().filter(user=working_user.user).exclude(time_sent__isnull=True) working_text = working_text.filter(time_sent__lte=tp.email_date) new_text_conditional = 0 # print("TEXT CONTENT: ", tp.email_content.lower()) if len(str(tp.email_content.lower())) < 6: if 'stop' in tp.email_content.lower(): working_user.text_request_stop = True working_user.save() if len(str(tp.email_content.lower())) < 6: if 'start' in tp.email_content.lower(): working_user.text_request_stop = False working_user.save() if len(str(tp.email_content.lower())) > 3: if 'new:' in tp.email_content.lower()[:4]: default_timing = Timing.objects.all().filter(user=working_user.user).get(default_timing=True) if PossibleText.objects.all().filter(user=working_user.user).filter(text=tp.email_content[4:]).count() < 1: new_text = PossibleText(user=working_user.user,tmp_save=False,timing=default_timing,text=tp.email_content[4:],date_created=pytz.utc.localize(datetime.now())) new_text.save() new_text_conditional = 1 if working_text.count() > 0 and new_text_conditional == 0: working_text = working_text.latest('time_sent') if working_text.response is None or working_text.response == "": # print("first consolidate conditional") working_text.time_response = tp.email_date working_text.response = tp.email_content working_text.save() tp.processed = 1 tp.save()
22,284
c4e0d1d6ec26ee1f86b900b474dd5444448764e2
import asyncio from flask import Flask, request from routes import home, products, offers, page_not_found from utils import url_for_page from config import config app = Flask(__name__) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) # add globals so they can be used in all templates app.jinja_env.globals["config"] = config app.jinja_env.globals["url_for_page"] = url_for_page @app.route("/") @app.route("/home/") def render_home(): page = request.args.get("page", 0, type=int) return home(page) @app.route("/<category>") def render_products(category): category_id = request.args.get("id", type=int) page = request.args.get("page", 0, type=int) return products(loop, category_id, page) @app.route("/<category>/<product>") def render_offers(category, product): product_id = request.args.get("id", type=int) return offers(loop, product_id) @app.errorhandler(404) def render_page_not_found(error): return page_not_found()
22,285
1f881c33e53c9ada0c0c919ebc38e27eb4be1f8b
NICE_WEATHER = [ 800, # Clear Sky 801, # Few Clouds 802, # Scattered Clouds ] NON_PRECIPITATION = [ 800, # Clear Sky 801, # Few Clouds 802, # Scattered Clouds 803, # Broken Clouds 804, # Overcast Clouds ]
22,286
eeac1e3ca6ed2078b91b835d2e86cb3b4156c2ed
import requests import pandas as pd import json import datetime import pprint pp = pprint.PrettyPrinter(indent=4) streak_api_key = '****' pipeline_url = '****' call_headers = {'content-type' : 'application/json'} upswing_pipeline_key = ''****'' upswing_pipeline_response = requests.get(url=(pipeline_url + '/' + upswing_pipeline_key + '/' + 'boxes'), auth=(streak_api_key, ''), headers=call_headers ) pipeline_fields_response = requests.get(url=(pipeline_url + '/' + upswing_pipeline_key + '/' + 'fields'), auth=(streak_api_key, ''), headers=call_headers ) pipeline_stage_response = requests.get(url=(pipeline_url + '/' + upswing_pipeline_key + '/' + 'stages'), auth=(streak_api_key, ''), headers=call_headers ) streak_schools = json.loads(upswing_pipeline_response.text) pipeline_fields = json.loads(pipeline_fields_response.text) pipeline_stages = json.loads(pipeline_stage_response.text) #Streak stuffs all custom columns into a "fields" object. Extract the key_id and name and store it into a new var for later use pipeline_keys = {key['key'] : key['name'] for key in pipeline_fields} pipeline_key_names = {pipeline_keys[key] : key for key in pipeline_keys} #We only care about these objects on each box (i.e. school) wanted_default_school_fields = ['****','****','****','****','****','****'] #there are a lot of unnecessary custom columns in streak. The wanted_fields list filters for the ones we care about wanted_custom_field_names = ['****','****'] wanted_custom_field_keys = [pipeline_key_names[name] for name in wanted_custom_field_names] custom_field_value_map = {} for field in pipeline_fields: if 'tagSettings' in list(field.keys()): custom_field_value_map[field['name']] = {tag['key'] : tag['tag'] for tag in field['tagSettings']['tags']} elif 'dropdownSettings' in list(field.keys()): custom_field_value_map[field['name']] = {item['key'] : item['name'] for item in field['dropdownSettings']['items']} custom_field_value_map['stage_names'] = {key : value['name'] for key, value in pipeline_stages.items()} filtered_schools = [] for school in streak_schools: school_filtered = {} for school_field in wanted_default_school_fields: #for each school, for each default field on the school, if the object is one of the desired default fields, append to our new filtered variable if school_field == 'assignedToSharingEntries' and len(school[school_field]) > 0: #the assignedToSharingEntries variable designates the HEROS, this variable can be empty or an array #with a bunch of additional details about the hero. This is why we check the length of the array to parse accordingly school_filtered[school_field] = school[school_field][0]['fullName'] elif school_field == 'assignedToSharingEntries' and len(school[school_field]) == 0: school_filtered[school_field] = '' else: try: if school_field in ['****','****']: school_filtered[school_field] = str(datetime.datetime.fromtimestamp(school[school_field]/1000)) else: school_filtered[school_field] = school[school_field] except: continue filtered_schools.append(school_filtered) for school in filtered_schools: school['stage'] = custom_field_value_map['stage_names'][school['stageKey']] del school['stageKey'] school_keys = list(school['fields'].keys()) school_custom_field_keys = list(school['fields'].keys()) #we turn this into a list because if we did for "field_key in school['fields'].keys()" python #would throw an error because it would update before the loop is done for custom_field_key in wanted_custom_field_keys: try: #Use names instead of keys for easier readability custom_field_name = pipeline_keys[custom_field_key] #Check if it is a wanted key. If it is wanted, create new record for the key where we use #the name of the column/key rather than the integer lookup value in streak. if isinstance(school['fields'][custom_field_key], list): if len(school['fields'][custom_field_key]) > 0: school[custom_field_name] = [custom_field_value_map[custom_field_name][item] for item in school['fields'][custom_field_key]] else: school[custom_field_name] = '' else: school[custom_field_name] = (custom_field_value_map[custom_field_name][school['fields'][custom_field_key]] if custom_field_name in list(custom_field_value_map.keys()) else school['fields'][custom_field_key]) if 'date' in custom_field_name.lower() or 'contact' in custom_field_name.lower(): school.update({custom_field_name : str(datetime.datetime.fromtimestamp(school[custom_field_name]/1000))}) #Delete the old record of the key del school['fields'][custom_field_key] except: school[custom_field_name] = '' try: del school['fields'] except: continue # pp.pprint(filtered_schools) all_schools_df = pd.DataFrame(filtered_schools) all_schools_df.fillna(value='', inplace=True) all_schools_df = all_schools_df[['****']] all_schools_df.rename( columns={'****'}, inplace=True ) ########################################################################################## #Initialize variables for Upswing API Call upswing_api_secret = '****' url = '****' headers = { 'Content-Type': 'application/json', '****': upswing_api_secret, 'Accept': 'application/json' } method = '****' ########################################################################################## #Push data to the DB errors = {} for columns, values in all_schools_df.iterrows(): params = list(values) payload = { 'method': method, 'params': params, 'jsonrpc': '2.0', 'id': 0, } response = requests.post(url, data=json.dumps(payload), headers=headers) print(response.text) if 'error' in response.text: errors[params[0]] = params else: continue pp.pprint(errors)
22,287
e4e59435f2df853c45e6442f0a69f1649a7069eb
print("4.3.4+5") import turtle import math bob = turtle.Turtle() def circle(t,r,angle): length=math.pi*r*2/360 for i in range(angle): t.fd(length) t.lt(360/360) print(circle(bob,40,360))
22,288
31b2a515048a0c95241d854a5e7ecb55990222b1
# This file is responsible for checking secure hashes # against configured user/password sistuations. import os import random,string import datetime import LogActivity import pickle import hashlib #from PricesPaidGUI.ppGuiConfig import RelativePathToHashesFile,TokenTimeout from configs.commonconfigs import RelativePathToHashesFile,TokenTimeout hashes = {} GLOBAL_BAD_LOGIN = {} LIMIT_NUMBER_BAD_LOGINS = 5 # We'll make them wait one hour if they have 5 bad logins. #LIMIT_TIME_TO_RETRY = 60*60 LIMIT_TIME_TO_RETRY = 1 # Load from disk def loadHashes(): hshs_file = RelativePathToHashesFile if os.path.exists(hshs_file): hashes = pickle.load(open(hshs_file, "rb")) else: hashes = {} return hashes def record_bad_login(username): if username not in GLOBAL_BAD_LOGIN: GLOBAL_BAD_LOGIN[username] = [0,datetime.datetime.now()] else: GLOBAL_BAD_LOGIN[username][0] = GLOBAL_BAD_LOGIN[username][0]+1 GLOBAL_BAD_LOGIN[username][1] = datetime.datetime.now() def does_authenticate(username,password,p3apisalt,remote_addr): hashes = loadHashes() if username in GLOBAL_BAD_LOGIN: timenow = datetime.datetime.now() timestamp = GLOBAL_BAD_LOGIN[username][1] timedelta = timenow - timestamp if (timedelta >= datetime.timedelta(seconds=LIMIT_TIME_TO_RETRY)): # An hour has gone by, so we givem them a pass.... GLOBAL_BAD_LOGIN.pop(username, None) if username in GLOBAL_BAD_LOGIN: count = GLOBAL_BAD_LOGIN[username][0] if (count >= LIMIT_NUMBER_BAD_LOGINS): # Probably should have a separate log message for this.. LogActivity.logTooManyLoginAttempts(username,remote_addr) return False; if username not in hashes: LogActivity.logBadCredentials(username,remote_addr) record_bad_login(username) return False; if hashes[username] == hashlib.sha256(password+p3apisalt).hexdigest(): return True; else: LogActivity.logBadCredentials(username,remote_addr) record_bad_login(username) return False; GLOBAL_SESSION_DICT = {} def create_session_id(): session_id = get_rand_string(13); acsrf = get_rand_string(13); timestamp = datetime.datetime.now(); GLOBAL_SESSION_DICT[session_id] = [acsrf,timestamp] return session_id; def update_acsrf_nonce_form(session_id): acsrf = get_rand_string(13); return update_new_acsrf(session_id,acsrf) def update_acsrf(session_id,remote_addr): acsrf = GLOBAL_SESSION_DICT[session_id][0]; return update_new_acsrf(session_id,acsrf,remote_addr) def update_new_acsrf(session_id,acsrf,remote_addr): timestamp = datetime.datetime.now(); GLOBAL_SESSION_DICT[session_id] = [acsrf,timestamp] LogActivity.logDebugInfo("SETTING ACSRF session, acsrf "+session_id+"."+GLOBAL_SESSION_DICT[session_id][0],remote_addr) return session_id; CHARS = string.ascii_letters + string.digits def get_rand_string(length): return ''.join(random.choice(CHARS) for i in range(length)) def is_valid_acsrf_old(session_id): if (session_id in GLOBAL_SESSION_DICT): timestamp = GLOBAL_SESSION_DICT[session_id][1] timenow = datetime.datetime.now() timedelta = timenow - timestamp if (timedelta < datetime.timedelta(seconds=TokenTimeout)): return True else: LogActivity.logTimeout(session_id) return False else: LogActivity.logMissingSession(session_id) return False; def is_valid_acsrf(session_id,acsrf,remote_addr): if (session_id in GLOBAL_SESSION_DICT): timestamp = GLOBAL_SESSION_DICT[session_id][1] timenow = datetime.datetime.now() timedelta = timenow - timestamp if (timedelta < datetime.timedelta(seconds=TokenTimeout)): if (acsrf != GLOBAL_SESSION_DICT[session_id][0]): LogActivity.logDebugInfo("ACSRF Mismatch provided vs. stored :"+acsrf+","+GLOBAL_SESSION_DICT[session_id][0],remote_addr) return False else: return True else: LogActivity.logTimeout(session_id,remote_addr) return False else: LogActivity.logMissingSession(session_id,remote_addr) return False; def get_acsrf(session_id): return GLOBAL_SESSION_DICT[session_id][0] def del_session(session_id,remote_addr): obj = (GLOBAL_SESSION_DICT.pop(session_id, None)) if session_id in GLOBAL_SESSION_DICT: LogActivity.logMissingSession(str(session_id)+"failed to remove",remote_addr) else: LogActivity.logMissingSession(str(session_id)+"removed",remote_addr)
22,289
57f760b112b7c17f121103f06ed4b7df2476cc85
from xlrd import open_workbook import os class read_excel(): excel_path = os.path.dirname(os.getcwd()) + r'\Config\test.xlsx' def readExcel(self,sheet_name): cls = [] file = open_workbook(self.excel_path) #打开excel sheet = file.sheet_by_name(sheet_name) #获取excel的sheet页 nrows = sheet.nrows #获取sheet页的行数 for i in range(nrows): if sheet.row_values(i)[0] != 'case_name': cls.append(sheet.row_values(i)) return cls if __name__ == '__main__': excel = read_excel().readExcel('test') print(excel)
22,290
08b74ac46ae85a608d2f06b56acc2dbe45065b6a
import time atmpin=[1111,2222,3333,4444,5555] #default pins print('Hello... \nwelcome to the ATM') time.sleep(3)#time dealy print("\n\nPleas chose your language\n") print("kannada ---> 1") print("English ---> 2") print("Hindi -----> 3") lang=int(input('Enter here :')) #this code display the kannada text if lang==1: pin=int(input("ನಿಮ್ಮ ಪಿನ್ ನಮೂದಿಸಿ : ")) if len(str(pin))!=4: print("**********ದಯವಿಟ್ಟು ಮಾನ್ಯವಾದ ಪಿನ್ ನಮೂದಿಸಿ**********") elif pin in atmpin: print("\nನಿಮ್ಮ ಉದ್ದೇಶವನ್ನು ಆರಿಸಿ : \n") print("Withdraw -------> 1") print("ಪಿನ್ ಬದಲಾವಣೆ -----> 2") print("ಮಿನಿಸ್ಟೇಟ್ಮೆಂಟ್ --------> 3\n") secstep=int(input("ಇಲ್ಲಿ ನಮೂದಿಸಿ : ")) if secstep==1: for i in range(0,5): amount=int(input("ಮೊತ್ತವನ್ನು ನಮೂದಿಸಿ : ")) if amount%100!=0: print("\n**********ದಯವಿಟ್ಟು 100 ರ ಗುಣಾಕಾರವನ್ನು ನಮೂದಿಸಿ**********\n") continue elif amount>15000: print("\n**********ಗರಿಷ್ಠ ಮಿತಿ 15000 ಮಾತ್ರ**********\n") else: print("Loading.......") time.sleep(4) print("\nದಯವಿಟ್ಟು ನಿಮ್ಮ ಹಣವನ್ನು ಸಂಗ್ರಹಿಸಿ...\n") time.sleep(2) break elif secstep==2: pinchange=int(input("ನಿಮ್ಮ ಹಳೆಯ ಪಿನ್ ಅನ್ನು ನಮೂದಿಸಿ : ")) pinstr=str(pinchange) if len(pinstr)==4: if pinchange in atmpin: newpin=int(input("4 ಅಂಕಿಯ ಹೊಸ ಪಿನ್ ನಮೂದಿಸಿ : ")) new=atmpin.index(pinchange) atmpin[new]=newpin print("\n----ಪಿನ್ ಬದಲಾವಣೆ ಯಶಸ್ವಿಯಾಗಿ ಪೂರ್ಣಗೊಂಡಿದೆ----\n") else: print("\n**********ದಯವಿಟ್ಟು ಸರಿಯಾದ ಪಿನ್ ನಮೂದಿಸಿ**********\n") else: print("\n**********ದಯವಿಟ್ಟು ಮಾನ್ಯವಾದ ಪಿನ್ ನಮೂದಿಸಿ**********\n") elif secstep==3: print("\nಕ್ಷಮಿಸಿ, ನಿಮ್ಮ ಸಚಿವಾಲಯವನ್ನು ಮುದ್ರಿಸಲು ಯಂತ್ರದಲ್ಲಿ ಹೆಚ್ಚಿನ ಪತ್ರಿಕೆಗಳಿಲ್ಲ...\n") else: print("\n**********ದಯವಿಟ್ಟು ಎಟಿಎಂ ಕಾರ್ಡ್ ಅನ್ನು ಮಾಲೀಕರಿಗೆ ಹಿಂತಿರುಗಿಸಿ**********\n") #this code display the English text if lang==2: pin=int(input("enter your pin : ")) if len(str(pin))!=4: print("\n**********please enter a valid pin**********\n") elif pin in atmpin: print("choose your purpose :\n") print("Withdraw -----> 1") print("Pin Change -----> 2") print("Ministatement -----> 3\n") secstep=int(input("enter here : ")) if secstep==1: for i in range(0,5): amount=int(input("Enter the amount : ")) if amount%100!=0: print("\n\n**********Please enter the multipule of 100's**********\n\n") continue elif amount>15000: print("\n\n**********maximum limit is 15000 only**********\n\n") else: print("Loading.....") time.sleep(4) print("\nPlease collect your cash...") time.sleep(2) break elif secstep==2: pinchange=int(input("Enter your old pin : ")) pinstr=str(pinchange) if len(pinstr)==4: if pinchange in atmpin: newpin=int(input("Enter 4 digit new pin : ")) new=atmpin.index(pinchange) atmpin[new]=newpin print("\n-----Pin change successfully completed-----\n") else: print("\n**********Please enter correct pin**********\n") else: print("\nplease enter a valid pin\n") elif secstep==3: print("\nSorry, no more papers in machine to print your ministatement\n") else: print("\n**********Please return back the ATM card to owner**********\n") #this code display the hindi text if lang==3: pin=int(input("अपना पिन दर्ज करो : ")) if len(str(pin))!=4: print("कृपया एक मान्य पिन दर्ज करें") elif pin in atmpin: print("cअपने उद्देश्य को पूरा करें") print("Withdraw -----> 1") print("पिन बदलें -----> 2") print("Ministatement -----> 3") secstep=int(input("यहाँ से प्रवेश करें : ")) if secstep==1: for i in range(0,5): amount=int(input("राशि दर्ज करें : ")) if amount%100!=0: print("कृपया 100 के गुणा में प्रवेश करें") continue elif amount>15000: print("अधिकतम सीमा 15000 ही है") else: print("Loading.....") time.sleep(4) print("कृपया अपना कैश जमा करें") time.sleep(2) break elif secstep==2: pinchange=int(input("अपना पुराना पिन डालें : ")) pinstr=str(pinchange) if len(pinstr)==4: if pinchange in atmpin: newpin=int(input("4 अंक नया पिन दर्ज करें : ")) new=atmpin.index(pinchange) atmpin[new]=newpin print("पिन परिवर्तन सफलतापूर्वक पूरा हुआ") else: print("कृपया सही पिन दर्ज करें") else: print("कृपया एक मान्य पिन दर्ज करें") elif secstep==3: print("क्षमा करें, आपके मंत्रालयों को मुद्रित करने के लिए मशीन में कोई और कागजात नहीं है") else: print("कृपया स्वामी को एटीएम कार्ड वापस करें ") time.sleep(1) print("Thanks for Transaction...")
22,291
f4f838f6fb30a94476d51fbcbc569c2fb16147b5
from __future__ import print_function import json from cloud_processor import CloudProcessor LANGUAGES = 'de,es,fr,it,pt' CONFIG_FILENAME = 'diagnostics-service-config.xml' RESOURCE_DESTINATION = './kc_diagnostics/resources/' CONFIG_FILENAME_PREFIX = 'diagnostics-service-config' class Diagnostics(): def __init__(self): self.languages = LANGUAGES.split(',') self.processor = CloudProcessor(CONFIG_FILENAME, RESOURCE_DESTINATION, self.languages) self.config_data = self.processor.read_config_file(CONFIG_FILENAME) self.translations = self.load_translations() def load_translations(self): translations = {} for language in self.languages: config_name = '{}-{}.xml'.format( CONFIG_FILENAME_PREFIX, language ) translations[language] = self.processor.read_config_file(config_name) return translations def generate_alerts(self, data): if isinstance(data, str): data = json.loads(data) processed_data = self.processor.process_data(self.config_data, data, self.translations) return json.dumps( processed_data ) if __name__ == '__main__': diag = Diagnostics() diag.generate_alerts({})
22,292
22326ebecb0f24a8b936176ad906924e8f77e9d7
from collections import defaultdict from heapq import * import random import torch def dijkstra(edges, f, t): g = defaultdict(list) for l,r,c in edges: g[l].append((c,r)) q, seen, mins = [(0,f,())], set(), {f: 0} while q: (cost,v1,path) = heappop(q) if v1 not in seen: seen.add(v1) path = (v1, path) if v1 == t: return (cost, path) for c, v2 in g.get(v1, ()): if v2 in seen: continue prev = mins.get(v2, None) next = cost + c if prev is None or next < prev: mins[v2] = next heappush(q, (next, v2, path)) return -1, None def raster(h,w,sl): return h*sl + w def deraster(x, sl): w = x%sl h = (x - w)//sl return h, w def flatten(nest_path, path): if len(nest_path) == 0: return path else: path.append(nest_path[0]) return flatten(nest_path[1], path) def build_all_waypoints(args): train_waypoints = [] test_waypoints = [] for s in range(args.num_waypoints): train_waypoints.append([]) test_waypoints.append([]) for i in range(args.sidelength): for j in range(args.sidelength): if torch.rand(1).item() < args.pct_unseen_components: test_waypoints[s].append((i,j)) else: train_waypoints[s].append((i,j)) return train_waypoints, test_waypoints class GW(): def __init__(self, sidelength=10, num_blobs=6, blob_size=2, gen_len = 15): self.sidelength = sidelength self.num_blobs = num_blobs self.blob_size = blob_size self.gen_len = gen_len def hash_waypoints(self, waypoints): return raster(self.waypoints[-1][0], self.waypoints[-1][1], self.sidelength) + self.sidelength**2 def register_task(self, waypoints): self.waypoints = waypoints self.waypoints_idx = [raster(w[0], w[1], self.sidelength) + self.sidelength**2 for w in waypoints] self.waypoints_dict = {} for i in range(len(self.waypoints)): self.waypoints_dict[self.waypoints[i]] = i def check_reachable(self, h, w, blob_pos): if h < 0 or w < 0: return False if h>= self.sidelength or w >= self.sidelength: return False for i in range(len(blob_pos)//2): bh = blob_pos[2*i] bw = blob_pos[2*i + 1] if h >= bh and h < bh+self.blob_size and w >= bw and w < bw+self.blob_size: return False return True def generate_world_and_path(self): ok = False count = 0 while not ok: count += 1 blob_pos = torch.randint(0, self.sidelength, (2*self.num_blobs,)).tolist() if all([self.check_reachable(h,w, blob_pos) for (h,w) in self.waypoints]): ok = True if count > 1000: raise Exception('world is too small? cant find place for blobs') self.blob_pos = blob_pos edges = [] #(h,w)--> h*self.sidelength + w for h in range(self.sidelength): for w in range(self.sidelength): for u in [h + 1, h, h - 1]: for v in [w + 1, w, w - 1]: if self.check_reachable(u, v, blob_pos): source = raster(h, w, self.sidelength) target = raster(u, v, self.sidelength) edges.append((source, target, 1)) path = [] for i in range(len(self.waypoints) - 1): start = self.waypoints[i] end = self.waypoints[i + 1] start = raster(start[0], start[1], self.sidelength) end = raster(end[0], end[1], self.sidelength) cost, local_path = dijkstra(edges, start, end) if local_path: fpath = flatten(local_path,[])[::-1] else: fpath = [] for p in fpath[:-1]: path.append(p ) path.append(end) self.edges = edges self.path = path def generate(self): self.generate_world_and_path() source = [] for i in range(0,len(self.blob_pos), 2): source.append(raster(self.blob_pos[i], self.blob_pos[i+1], self.sidelength)) if self.path is None: return (source, [self.waypoints_idx[0]]*self.gen_len) else: target = [self.waypoints_idx[-1]]*self.gen_len for i in range(min(len(self.path), self.gen_len)): target[i] = self.path[i] + self.sidelength**2 return (source, target) def display_ascii(self): m = '' for h in range(self.sidelength): # for w in range(self.sidelength): # m = m + ' ' # m = m + '\n' for w in range(self.sidelength): wid = self.waypoints_dict.get((h,w)) if wid is not None: m = m + ' ' + str(wid) + ' ' elif self.check_reachable(h, w, self.blob_pos): m = m + ' . ' else: m = m + ' B ' m = m + '\n' for w in range(self.sidelength): m = m + ' ' m = m + '\n' return m if __name__ == "__main__": from tqdm import tqdm import pickle import argparse parser = argparse.ArgumentParser() parser.add_argument("--just_waypoints_list", action="store_true") parser.add_argument("--waypoints_list_path", default='') parser.add_argument("--sidelength", type=int, default=10) parser.add_argument("--num_blobs", type=int, default=8) parser.add_argument("--num_train_tasks", type=int, default=10000) parser.add_argument("--num_test_tasks", type=int, default=100) parser.add_argument("--num_waypoints", type=int, default=3) parser.add_argument("--pct_unseen_components", type=float, default=.1) parser.add_argument("--examples_per_task", type=int, default=1500) parser.add_argument("--save_path", default='/checkpoint/aszlam/laja/gridworld_tasks_comp.pkl') args = parser.parse_args() generator = GW(sidelength=args.sidelength, num_blobs=args.num_blobs) out = [] if args.just_waypoints_list: train_waypoints, test_waypoints = build_all_waypoints(args) f = open(args.waypoints_list_path, 'wb') pickle.dump([train_waypoints, test_waypoints], f) f.close() quit() else: if args.waypoints_list_path == '': train_waypoints, test_waypoints = build_all_waypoints(args) else: f = open(args.waypoints_list_path, 'rb') l = pickle.load(f) train_waypoints, test_waypoints = l f.close() for i in tqdm(range(args.num_train_tasks)): waypoints = [] for s in range(args.num_waypoints): waypoints.append(random.choice(train_waypoints[s])) generator.register_task(waypoints) task_out = [] for j in range(args.examples_per_task): x = generator.generate() task_out.append(x) out.append((task_out, generator.waypoints_idx, None)) for i in tqdm(range(args.num_test_tasks)): waypoints = [] new_task_position = random.choice([0,1,2]) for s in range(args.num_waypoints): if s == new_task_position: waypoints.append(random.choice(test_waypoints[s])) else: waypoints.append(random.choice(train_waypoints[s])) generator.register_task(waypoints) task_out = [] for j in range(args.examples_per_task): x = generator.generate() task_out.append(x) out.append((task_out, generator.waypoints_idx, new_task_position)) sp = args.save_path f = open(sp, 'wb') pickle.dump(out, f) f.close()
22,293
eafeb279a425108049c8953017924ccfc711e78e
from django.db import models import datetime as dt class Category(models.Model): name = models.CharField(max_length =30) def __str__(self): return self.name class Location(models.Model): city = models.CharField(max_length =30) country = models.TextField() def save_(self): self.save() # Create your models here. class Image(models.Model): image = models.ImageField(upload_to = 'gallery/') image_name = models.CharField(max_length =30) image_description = models.CharField(max_length =100) pub_date = models.DateTimeField(auto_now_add=True) category = models.ForeignKey('Category', on_delete=models.CASCADE,) location = models.ForeignKey('Location',on_delete=models.CASCADE,) def __str__(self): return self.image_name @classmethod def get_image(cls,id): try: image=Image.objects.get(id=id) return image except DoesNotExist: return Image.objects.get(id=1) @classmethod def search_by_category(cls,search_term): # category = Category.objects.filter(name__icontains=search_term).first() image = cls.objects.filter(category__name__icontains=search_term) return image class Meta: ordering = ['image_name'] @classmethod def search_by_location(cls,search_term): image_location = Location.objects.filter(location__country__icontains=search_term).first() return image_location
22,294
cc783a5eec0b4506458beee5c0dec100033c541a
import pygame from pygame.locals import DOUBLEBUF as DB import cv2 class DisplayImage(object): def __init__(self, W,H): pygame.init() self.screen = pygame.display.set_mode((W,H),DB) self.surface = pygame.Surface(self.screen.get_size()).convert() def showImage(self, img): #for RGB we swap channels pygame.surfarray.blit_array(self.surface,img.swapaxes(0,1)[:,:,[0,1,2]]) self.screen.blit(self.surface, (0,0)) pygame.display.flip() def show_frame_opencv(self, img): cv2.imshow('image',img) cv2.waitKey(1)
22,295
94561094c652f1f2a119ee97adb022284fb064c5
# Werkzeug from werkzeug.test import Client # Pytest import pytest # Retic from retic import App as app, Router # Utils from retic.services.core.general import get_body_request PATHS = [ ("/withoutress") ] CONTROLLERS = [ lambda req, res: print("REST api Python example 🐍") ] @pytest.fixture def app_client(): """Clear the app""" app.clear() """Returns an app client without routes""" return Client(app.application) @pytest.fixture def app_routes(): """Clear the app""" app.clear() """Returns an app client with routes""" _router = Router() for _path in PATHS: """define a new path using the response from a path definition""" _router \ .get(_path, *CONTROLLERS) \ .get("/", *CONTROLLERS) app.use(_router) return Client(app.application) @pytest.mark.lib_api @pytest.mark.parametrize("path", PATHS) def test_response_without_method(app_client, path): """we include a valid route and controllers""" app_iter, status, headers = app_client.get(path) assert status.upper() == '404 NOT FOUND', "A status 404 is necesary, but a status {} was got from the request".format( status) @pytest.mark.lib_api @pytest.mark.parametrize("path", PATHS) def test_response_without_method_routes(app_routes, path): """we include a valid route and controllers""" app_iter, status, headers = app_routes.get(path) assert status.upper() == '200 OK', "A status 200 is necesary, but a status {} was got from the request".format( status) assert get_body_request( app_iter) == '200 OK', "The default from the api when this one doesn't have routes is different to documentation"
22,296
c307dc1126d2e02f7570fe4ced0d740e7ee2f936
from django.shortcuts import render, HttpResponse import json # Create your views here. def index(request): return HttpResponse('Hi django :)') def about(request): me = { 'name': 'jj', 'role': 'student', 'email': 'jay@gmail.com', } return HttpResponse(json.dumps(me)) # HTML을 내보내고 싶다~ def portfolio(request): return render(request, 'portfolio.html') # views 함수 1 단위 def help(request): return render(request, 'help.html')
22,297
5860a80c67c2379f92a8e98c7c19ad8487be07c9
# Generated by Django 3.0.8 on 2020-07-11 06:23 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('products', '0002_auto_20200707_1509'), ] operations = [ migrations.AlterModelTable( name='product', table='tbl_products', ), ]
22,298
97e5e235637a901d8bcf030b831978d7b58059f7
''' Written by London Lowmanstone Based on code I wrote in high school. ''' import random import useful_functions as useful def test_against(players, start_game, game_parameters=None, rounds=50, games_per_round=1000, randomize_first_player=True, is_initial_state=True, comment=2, pct_increment=2): '''Returns a tuple of lists in the form (highs, lows, ranges, averages) with stats for player 0's performance against player 1 Each list is in the form [wins, losses, ties], and the names are pretty self-explanatory. For example, highs is a list of the most amount of wins, losses, and ties it achieved across any round. players: a tuple in the form (player 0, player 1) where both are Player objects start_game: an instance of a Game (or Game subclass) that the players should start at for each game they play rounds: the number of rounds the players will play against each other games_per_round: how many games are played per round randomize_first_player: whether or not to randomize who goes first; if False, uses the active player in the start game is_initial_state: whether or not the start_game is in its initial state (if False, randomizing the first player may take longer) comment: how verbose the print statements are; higher integers give more print statements pct_increment: how much the percentage should increase by when it prints out ''' # default to values for each list that will be overridden lows = [games_per_round + 1] * 3 highs = [-1] * 3 overall = [0] * 3 for i in range(rounds): results = [0] * 3 # [wins, losses, ties] for the player for game_num in range(games_per_round): if comment > 3: print("Testing game {}/{}".format((game_num + 1), games_per_round)) # set up the game from the start_game if randomize_first_player: # randomize who goes first if is_initial_state: # no need to swap the players; no one has moved yet game = start_game.get_copy() if random.choice([True, False]): game.active_player = 0 else: game.active_player = 1 else: # we need to swap the players if a different one is going to go first if random.choice([True, False]): game = start_game.get_copy() else: game = start_game.get_swapped_copy() else: # not randomizing the first player, so we can just copy the starting game game = start_game.get_copy() while game.who_won() is None: players[game.active_player].make_move(game) if comment > 5: print(game) # update the results; this works because player 0 is always the player we care about results[game.who_won()] += 1 if comment > 3: if comment > 4: print(game) print("Results so far in round:\n{}".format(results)) if comment > 2: print("Results of one round:\n{}".format(results)) if pct_increment > 0 and comment > 1: useful.print_percent(i, rounds, increment_amt=pct_increment, round_amt=i) # update the lists for j, testVal in enumerate(results): lows[j] = min(lows[j], testVal) highs[j] = max(highs[j], testVal) overall[j] += testVal # compute the other stats lists ranges = [highs[i] - lows[i] for i in range(3)] avgs = [overall[i] / rounds for i in range(3)] if comment > 0: print("Highs: {}".format(highs)) print("Lows: {}".format(lows)) print("Ranges: {}".format(ranges)) print("Averages: {}".format(avgs)) return (highs, lows, ranges, avgs) if __name__ == "__main__": '''Play and test different players. Currently this is set up for you to play against the perfect tic-tac-toe player ''' ''' These are some notes about how good different players were against each other: # significant difference: 74 - 15, no ties. # test_against((BasicMonteCarloPlayer(30, 1), BasicMonteCarloPlayer(30)), ConnectFour, 100, 100, comment=4) # no significant difference: 29 - 28 - 2 # test_against((BasicMonteCarloPlayer(40, 1), BasicMonteCarloPlayer(30, 1)), ConnectFour, 1, 100, comment=4) # significant: 71 - 6 - 2 (Mainly lost when it had a perfect trap set up that was blocked by an opponent, then it gave up easy wins; should be fixed with minimax) # test_against((BasicMonteCarloPlayer(5, 3), BasicMonteCarloPlayer(30)), ConnectFour, 1, 100, comment=6) ''' from tic_tac_toe import TicTacToe from connect_four import ConnectFour from otrio import Otrio from pentago import Pentago from players import RandomPlayer, HumanPlayer from advised_monte_carlo_player import AdvisedMonteCarloPlayer from basic_monte_carlo_player import BasicMonteCarloPlayer from solve_player import SolvePlayer from sizeable_connect_x import SizeableConnectX from onitama import Onitama, PawnCountingEvaluator from onitama import move_to_string as onitama_move_to_string # game # start_game = SizeableConnectX(4, 4, 4) # start_game = Otrio() # start_game = Onitama() start_game = Pentago() # player 1 # p1 = HumanPlayer() # p1 = AdvisedMonteCarloPlayer(2, 2, 2) # p1 = BasicMonteCarloPlayer() # p1 = RandomPlayer() # Connect4 plays with AdvisedMonteCarloPlayer(7, 2, 4) # the 4 layers of lookahead is too far, and 7 is too much position evaluation # p1 = AdvisedMonteCarloPlayer(7, 2, 4) # visible progress, but monte carlo simulations are still too slow # p1 = AdvisedMonteCarloPlayer(4, 7, 2) # just barely playable p1 = AdvisedMonteCarloPlayer(8, 20, 1) # player 2 # p2 = BasicMonteCarloPlayer(5, 2) # p2 = RandomPlayer() p2 = HumanPlayer() ''' # Create a player that solves the game s = SolvePlayer() print("Solving...") s.make_move(start_game.get_copy()) print("Solved!") p2 = s ''' # Test how good player 1 is against player 2 test_against((p1, p2), start_game, rounds=1, games_per_round=1, comment=6, randomize_first_player=True)
22,299
707a867d81e17227fb9385b07c3f8c94be1697c8
#!/usr/bin/env python # encoding=utf8 from views.base import BaseView from core.logic import Group class MineView(BaseView): def get(self): me = self.get_current_user() my_group_list = Group.filter(created_user_id=me.id) group_map_list = [] for group in my_group_list: group_map_list.append(group.info()) self.render("group/mine.html", groups=group_map_list)