text stringlengths 38 1.54M |
|---|
program_filename = NIH-diffractometer_PP.ab
ip_address = 'nih-instrumentation.cars.aps.anl.gov:2000' |
import functools
import json
import torch
import tqdm
import cargan
###############################################################################
# Objective evaluation
###############################################################################
def pitch(name, datasets, checkpoint, num=256, gpu=None):
"""Perform objective evaluation"""
# Evaluate on each dataset
for dataset in datasets:
# Setup output directory
directory = cargan.EVAL_DIR / 'objective' / dataset
directory.mkdir(exist_ok=True, parents=True)
# Setup metrics
batch_metrics = cargan.evaluate.objective.metrics.Pitch()
metrics = cargan.evaluate.objective.metrics.Pitch()
# Pitch and periodicity extraction
pitch_fn = functools.partial(
cargan.preprocess.pitch.from_audio,
gpu=gpu)
device = torch.device('cpu' if gpu is None else f'cuda:{gpu}')
# Setup data loader
loader = cargan.data.loaders(datasets)[2]
iterator = tqdm.tqdm(
loader,
total=num,
dynamic_ncols=True,
desc='Evaluating')
file_results = {}
for i, (features, audio, _, _) in enumerate(iterator):
# Stop after num samples
if i >= num:
break
# Get true pitch
audio = audio.to(device)
true_pitch, true_periodicity = pitch_fn(audio.squeeze(0))
# Vocode
vocoded = cargan.from_features(features, checkpoint, gpu)
# Estimate pitch
pred_pitch, pred_periodicity = pitch_fn(vocoded)
# Get metrics for this file
metrics.reset()
metrics.update(
true_pitch,
true_periodicity,
pred_pitch,
pred_periodicity)
file_results[i] = metrics()
# Update running metrics
batch_metrics.update(
true_pitch,
true_periodicity,
pred_pitch,
pred_periodicity)
# Write results
results = batch_metrics()
results['file_results'] = file_results
with open(directory / f'{name}.json', 'w') as file:
json.dump(results, file, ensure_ascii=False, indent=4)
# Print to stdout
print(results)
###############################################################################
# Synthetic cumsum experiment evaluation
###############################################################################
def cumsum(name, checkpoint, num, gpu=None):
"""Evaluate cumsum experiment from checkpoint"""
# Setup output directories
directory = cargan.EVAL_DIR / 'objective' / 'cumsum' / name
directory.mkdir(exist_ok=True, parents=True)
# Evaluate at various lengths
results = {}
for length in [1024, 2048, 4096, 8192, 'full']:
# Setup RMSE metric
l1 = cargan.evaluate.objective.metrics.L1()
# Setup data loader
loader = cargan.data.loaders('cumsum')[2]
iterator = tqdm.tqdm(
loader,
dynamic_ncols=True,
desc='Cumsum evaluation',
total=num)
for i, (cumsum_input, cumsum_output, _, _) in enumerate(iterator):
# Stop once we've generated enough
if i > num:
break
# Get directory to save results for this trial
trial_directory = directory / str(i) / str(length)
trial_directory.mkdir(exist_ok=True, parents=True)
# Maybe truncate
if length != 'full':
cumsum_input = cumsum_input[:, :, :length]
cumsum_output = cumsum_output[:, :, :length]
# Infer
cumsum_pred = cargan.from_features(cumsum_input, checkpoint, gpu)
# Place target on device
device = torch.device('cpu' if gpu is None else f'cuda:{gpu}')
cumsum_output = cumsum_output.to(device)
# Update metric
l1.update(cumsum_output, cumsum_pred)
# Save all for later plotting
with cargan.data.chdir(trial_directory):
torch.save(cumsum_input.cpu(), 'cumsum_input.pt')
torch.save(cumsum_output.cpu(), 'cumsum_output.pt')
torch.save(cumsum_pred.cpu(), 'cumsum_pred.pt')
# Save result for this length
results[str(length)] = l1()
# Write results
with open(directory / f'{name}.json', 'w') as file:
json.dump(results, file, ensure_ascii=False, indent=4)
# Print to stdout
print(results)
|
from setuptools import setup, find_packages
setup(
name='wonder_tool',
version='1.0',
packages=find_packages(),
install_requires=[],
entry_points={
'console_scripts':
'wonder = wonder_tool.main:wonder_main'
},
zip_safe=False,
classifiers=[
'Enviroment :: Console',
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python3.6',
],
)
|
# -*- coding: utf-8 -*-
"""cpo-pipeline.tree.parsers.result_parsers
This module provides functions for parsing result files generated by tools
during the Tree phase of the cpo-pipeline.
"""
import csv
def parse_workflow_results(path_to_result):
"""
Args:
path_to_result (str): Path to the result file.
Returns:
list of dict: Parsed results.
For example:
[
{
"id": "BC11-Cfr001",
"expected_species": "Citrobacter freundii",
"mlst_species": "Citrobacter freundii",
"sequence_type": "22",
"mlst_scheme": "cfreundii",
"carbapenem_resistance_genes": [
"NDM-1",
"Haemophilus influenzae PBP3 conferring resistance to beta-lactam antibiotics",
"marA"
],
"other_amr_genes": [
"mdtB",
"TEM-1",
"msrE"
],
"plasmids": [
{
"id": "683",
"num_contigs", 112,
"length_bp", 1702592,
"rep_type": "IncL/M,rep_cluster_1254",
"mobility": "Conjugative",
"nearest_reference": "JX988621"
},
...
],
}
]
"""
worflow_results = []
with open(path_to_result) as result_file:
reader = csv.DictReader(result_file, delimiter = '\t')
for row in reader:
workflow_result = {}
workflow_result['id'] = row['id']
workflow_result['expected_species'] = row['expected_species']
workflow_result['mlst_species'] = row['expected_species']
workflow_result['sequence_type'] = row['sequence_type']
workflow_result['mlst_scheme'] = row['mlst_scheme']
workflow_result['carbapenem_resistance_genes'] = row['carbapenem_resistance_genes'].split(';')
workflow_result['other_amr_genes'] = row['other_amr_genes'].split(';')
plasmids = []
for plasmid_id in row['plasmid_id'].split(';'):
plasmids.append({"id": plasmid_id})
for index, plasmid_num_contigs in enumerate(row['plasmid_num_contigs'].split(';')):
plasmids[index]['num_contigs'] = plasmid_num_contigs
for index, plasmid_length_bp in enumerate(row['plasmid_length_bp'].split(';')):
plasmids[index]['length_bp'] = plasmid_length_bp
for index, plasmid_rep_type in enumerate(row['plasmid_rep_type'].split(';')):
plasmids[index]['rep_type'] = plasmid_rep_type
for index, plasmid_mobility in enumerate(row['plasmid_mobility'].split(';')):
plasmids[index]['mobility'] = plasmid_mobility
for index, plasmid_nearest_reference in enumerate(row['plasmid_nearest_reference'].split(';')):
plasmids[index]['nearest_reference'] = plasmid_nearest_reference
workflow_result['plasmids'] = plasmids
return worflow_results
|
import os
import argparse
from utils import create_dataset, create_train_dir
from network import MobileNetv2_DeepLabv3
from config import Params
from utils import print_config
LOG = lambda x: print('\033[0;31;2m' + x + '\033[0m')
def main():
# add argumentation
parser = argparse.ArgumentParser(description='MobileNet_v2_DeepLab_v3 Pytorch Implementation')
#todo maybe make it work with multiple datasets?
#parser.add_argument('--dataset', default='cityscapes', choices=['cityscapes', 'other'],
# help='Dataset used in training MobileNet v2+DeepLab v3')
parser.add_argument('--root', default='./data/cityscapes', help='Path to your dataset')
parser.add_argument('--epoch', default=None, help='Total number of training epoch')
parser.add_argument('--lr', default=None, help='Base learning rate')
parser.add_argument('--pretrain', default=None, help='Path to a pre-trained backbone model')
parser.add_argument('--resume_from', default=None, help='Path to a checkpoint to resume model')
parser.add_argument('--logdir', default=None, help='Directory to save logs for Tensorboard')
parser.add_argument('--batch_size', default=128, help='Batch size for training')
args = parser.parse_args()
params = Params()
# parse args
if not os.path.exists(args.root):
if params.dataset_root is None:
raise ValueError('ERROR: Root %s doesn\'t exist!' % args.root)
else:
params.dataset_root = args.root
if args.epoch is not None:
params.num_epoch = int(args.epoch)
if args.lr is not None:
params.base_lr = args.lr
if args.pretrain is not None:
params.pre_trained_from = args.pretrain
if args.resume_from is not None:
params.resume_from = args.resume_from
if args.logdir is not None:
params.logdir = args.logdir
params.summary_dir, params.ckpt_dir = create_train_dir(params.logdir)
params.train_batch = int(args.batch_size)
LOG('Network parameters:')
print_config(params)
# create dataset and transformation
LOG('Creating Dataset and Transformation......')
datasets = create_dataset(params)
LOG('Creation Succeed.\n')
# create model
LOG('Initializing MobileNet and DeepLab......')
net = MobileNetv2_DeepLabv3(params, datasets)
LOG('Model Built.\n')
# let's start to train!
net.Train()
net.Test()
if __name__ == '__main__':
main()
|
from django.shortcuts import render, redirect
# Create your views here.
from pets.forms import CreatePetForm
from pets.models import Pet, Like
def pets_index(request):
context = {
'pets': Pet.objects.all(),
}
return render(request, "pets/pet_list.html", context)
def see_details(request, pk):
pet = Pet.objects.get(pk=pk)
context = {
'pet': pet,
}
return render(request, "pets/pet_detail.html", context)
def like(request, pk):
pet = Pet.objects.get(pk=pk)
likes = Like(test=str(pk))
likes.pet = pet
likes.save()
return redirect('details', pk)
def create_pet(request):
if request.method == "GET":
context = {
"form": CreatePetForm(),
}
return render(request, 'pets/pet_create.html', context)
else:
form = CreatePetForm(request.POST)
if form.is_valid():
pet = Pet(type=form.cleaned_data['type'],
name=form.cleaned_data['name'],
age=form.cleaned_data['age'],
description=form.cleaned_data['description'],
image_url=form.cleaned_data['image_url'])
pet.save()
return redirect('pets index')
def edit_pet(request):
pass
def delete_pet(request):
pass
|
# Chapter 04
import requests
from bs4 import BeautifulSoup
import re
try:
print('before request')
r = requests.get('http://google.com')
print(r)
except:
print('test')
# get bitcoin price from livecoin using API
r = requests.get('https://api.livecoin.net/exchange/ticker?currencyPair=BTC/USD')
price = r.json()
print('highest price of bitcoin: %.2f' % price['high'])
# pip install beautifulsoup4
r = requests.get('https://bama.ir/car/all-brands/all-models/all-trims?price=20-50')
soup = BeautifulSoup(r.text, 'html.parser')
# find first h2
val = soup.find('h2')
# this is equal to following two lines
all_cars = soup.find_all('h2')
val = all_cars[0]
print(val.attrs['class'])
print(re.sub(r'\s+', ' ', val.text).strip())
for car in all_cars:
print(re.sub(r'\s+', ' ', car.text).strip())
|
# https://leetcode.com/problems/replace-elements-with-greatest
# -element-on-right-side/
# Given an array arr, replace every element in that array with the
# greatest element among the elements to its right, and replace
# the last element with -1.
# After doing so, return the array.
from typing import List
class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
idx = 0
length = len(arr)
if length > 1:
max_num = max(arr[idx+1:])
while idx < length - 1:
if arr[idx] == max_num:
max_num = max(arr[idx+1:])
arr[idx] = max_num
idx += 1
arr[-1] = -1
return arr
####################
### TESTING ONLY ###
####################
# arr = [17,18,5,4,6,1]
# output: [18,6,6,6,1,-1]
arr = [1,1,1]
solution = Solution()
print(solution.replaceElements(arr))
|
from config10 import *
from tensorboardX import SummaryWriter
from utils10 import get_tensors, delta_E1994, tensor_Lab2RGB
from Unet10 import InputNet, UnetD, UnetDL
from VGG import VGG
writer = SummaryWriter()
BATCH_SIZE = 16
epoch = 120000+1 #720000*6/BATCH_SIZE
ALPHA = 1e-7
BATE = 10
LAMBDA = 2
ITERATION = 4
LR = 1e-5
version = 'new_test1'
torch.backends.cudnn.benchmark=True
#net = InputNet().to(device)
#netD = UnetDL().to(device)
#fnet = FeatureNet().to(device)
net = torch.load('v10_3_2.net').to(device)
netD=torch.load('v10_3_2_D.net').to(device)
L1_loss = nn.L1Loss().to(device)
L2_loss = nn.MSELoss().to(device)
#BCE_loss = nn.BCEWithLogitsLoss().to(device)
init_lr = 1e-3
optimizer = optim.Adam(net.parameters(), lr = LR, betas=(0.9,0.999))
optimizerD = optim.Adam(netD.parameters(), lr = LR, betas=(0.9,0.999))
width = 16
real_target = torch.ones([BATCH_SIZE, 1, width, width]).to(device)
fake_target = torch.zeros(real_target.size()).to(device)
def loss_func_d(i,outputs, label_batch):
real = netD(label_batch)
fake = netD(outputs.detach())
d_loss = 0.5*LAMBDA*(L2_loss(real, real_target)+L2_loss(fake, fake_target))
v = d_loss.item()
writer.add_scalar(version+'/d_loss', v, i)
print('d_loss {:.4f}'.format(v))
return d_loss
def loss_func_g(i,outputs, label_batch):
l={}
color_loss = 0.1*delta_E1994(label_batch, outputs)
l['color_loss']=color_loss
content_loss = L1_loss(outputs[:,0], label_batch[:,0])
l['content_loss']=content_loss
ab_loss = L1_loss(outputs[:,1:3], label_batch[:,1:3])
l['ab_loss']=ab_loss
#if i>10000:
g_loss =LAMBDA* 0.5*L2_loss( netD(outputs), real_target)
l['g_loss']=g_loss
output_str = ''
total_loss=0
for loss_name, loss_val in l.items():
v = loss_val.item()
output_str+=loss_name+' {:.4f}, '.format(v)
writer.add_scalar(version+'/'+loss_name, v, i)
total_loss+=loss_val
print(output_str, end='')
return total_loss
gap = 10
if __name__ == '__main__':
start_time = time.time()
for i in range(1, epoch+1):
filenames = random.sample(file_list, BATCH_SIZE)
inputs_batch, hint_batch, label_batch = get_tensors(filenames)
outputs = net(inputs_batch, hint_batch, ITERATION)
# rgb_label = tensor_Lab2RGB(label_batch)
# rgb_outputs = tensor_Lab2RGB(outputs)
# if i==60000:
# for param_group in optimizer.param_groups:
# param_group['lr'] = 0.5*LR
# for param_group in optimizerD.param_groups:
# param_group['lr'] = 0.5*LR
#train
loss_g = loss_func_g(i,outputs, label_batch)#, rgb_outputs)
optimizer.zero_grad()
loss_g.backward()
optimizer.step()
#optimizerL.step()
# loss_d = loss_func_d(i,outputs, label_batch)#, rgb_outputs, rgb_label)
# optimizerD.zero_grad()
# loss_d.backward()
# optimizerD.step()
if i%10>4:
loss_d = loss_func_d(i,outputs, label_batch)#, rgb_outputs, rgb_label)
optimizerD.zero_grad()
loss_d.backward()
optimizerD.step()
else:
print('n')
#if i % batch_scale == 0:
#l_1+=(loss_g.item()+loss_d.item())
if i % gap == 0:
end_time = time.time()
#print('epoch {}, loss {:.6f}, time {:.3f}s'.format(i,l_1/gap,end_time-start_time))
print('epoch {}, time {:.4f}s'.format(i,end_time-start_time))
start_time = time.time()
#l_1=0
if i % 1000 == 0:
torch.save(net,version+'.net')
torch.save(netD, version+'_D.net')
writer.close()
|
class EmptyFileError(Exception):
pass
class UnrecognisedFieldError(BaseException):
pass
class EarlyReconciliationError(BaseException):
pass
class UpstreamServiceUnavailable(BaseException):
pass
|
from piston.resource import Resource as PistonResource
from piston.utils import rc
import json
class Resource(PistonResource):
def form_validation_response(self, e):
resp = rc.BAD_REQUEST
resp.write(' ' + dict(e.form.errors.items()).__str__())
return resp
|
from ApplicationDate import application_date
from sqlwrapper import gensql, dbget, dbput
import json
import datetime
def HOTEL_FD_POST_UPDATE_RoomAssign(request):
d = request.json
res_id = d.get("Res_id")
room = d.get("Res_room")
unique_id = d.get("Res_unique_id")
a,e = {},{}
e = { k : v for k,v in d.items() if v != '' if k not in ('Res_id','Res_unique_id')}
print(a)
a = { k : v for k,v in d.items() if k != '' if k in ('Res_id','Res_unique_id')}
print(e)
app_datetime = application_date()
Today_date = app_datetime[1]
#Today_date = str(Today_date)
arrival = dbget("select res_arrival,res_room_type,res_block_code from reservation.res_reservation where res_id = '"+res_id+"' and res_unique_id = '"+unique_id+"' ")
arrival = json.loads(arrival)
print(arrival)
arrival_date = arrival[0]['res_arrival']
if arrival[0]['res_block_code'] is not None and arrival[0]['res_block_code'] != 'PM':
if arrival[0]['res_room_type']== 'Kngn':
sql = dbput("update business_block.current_grid set kngn = kngn +'1' where block_id='"+str(arrival[0]['res_block_code'])+"' and grid_type =3")
print(sql)
elif arrival[0]['res_room_type'] =='Kngs':
sql = dbput("update business_block.current_grid set kngs = kngs+'1' where block_id='"+str(arrival[0]['res_block_code'])+"' and grid_type =3")
print(sql)
elif arrival[0]['res_room_type'] =='Ksbn':
sql = dbput("update business_block.current_grid set Ksbn = Ksbn+'1' where block_id='"+str(arrival[0]['res_block_code'])+"' and grid_type =3")
print(sql)
elif arrival[0]['res_room_type'] == 'Ksbs':
print("workingits fine")
sql = dbput("update business_block.current_grid set ksbs = ksbs +'1' where block_id='"+str(arrival[0]['res_block_code'])+"' and grid_type =3")
print(sql)
elif arrival[0]['res_room_type'] =='Sjsn' :
sql = dbput("update business_block.current_grid set sjsn = sjsn+'1' where block_id='"+str(arrival[0]['res_block_code'])+"' and grid_type =3")
print(sql)
elif arrival[0]['res_room_type'] =='Sdbn':
sql = dbput("update business_block.current_grid set sdbn = sdbn +'1' where block_id='"+str(arrival[0]['res_block_code'])+"' and grid_type =3")
print(sql)
elif arrival[0]['res_room_type'] =='Sjss':
sql = dbput("update business_block.current_grid set sjss = sjss +'1' where block_id='"+str(arrival[0]['res_block_code'])+"' and grid_type =3")
print(sql)
elif arrival[0]['res_room_type'] =='Comp':
sql = dbput("update business_block.current_grid set comp = comp +'1' where block_id='"+str(arrival[0]['res_block_code'])+"' and grid_type =3")
print(sql)
pickup = dbput("update business_block.room_revenue set room_nights_picked = room_nights_picked + '1' where block_id='"+str(arrival[0]['res_block_code'])+"'")
print(pickup)
else:
pass
arrival = datetime.datetime.strptime(arrival[0]['res_arrival'],'%Y-%m-%d').date()
totday_date = datetime.datetime.strptime(Today_date,'%Y-%m-%d').date()
yesterday= totday_date - datetime.timedelta(days=1)
print(yesterday)
if totday_date == arrival:
e['res_guest_status'] = "arrival"
sql_value = gensql('update','reservation.res_reservation',e,a)
room = e.get("Res_room")
print(room)
res_status = "reserved"
sqlvalue = dbput("update room_management.rm_room_list set rm_reservation_status = '"+res_status+"' where rm_room in ("+room+")")
return(json.dumps({'Status': 'Success', 'StatusCode': '200','Return': 'Record Updated Successfully','ReturnCode':'RUS'}, sort_keys=True, indent=4))
elif yesterday == totday_date:
e['res_guest_status'] = "due in"
sql_value = gensql('update','reservation.res_reservation',e,a)
room = e.get("Res_room")
res_status = "reserved"
sqlvalue = dbput("update room_management.rm_room_list set rm_reservation_status = '"+res_status+"' where rm_room in ("+room+")")
print(sql_value)
return(json.dumps({'Status': 'Success', 'StatusCode': '200','Return': 'Record Updated Successfully','ReturnCode':'RUS'}, sort_keys=True, indent=4))
else:
return(json.dumps({'Status': 'Success', 'StatusCode': '200','Return': 'Record Assign Not Available','ReturnCode':'RANA'}, sort_keys=True, indent=4))
#if Today_date == arrival_date:
# e['res_guest_status'] = "arrival"
|
import numpy as np #using numpy for arrays and optimized matrix multiplication
np.random.seed(1) #seeding so that repeated results are the same and we can observe
#changes from editing.
from sklearn.model_selection import train_test_split #used to randomly split the
#dataset into 2 parts so that we can train and test on different datapoints
#Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))
#Derivative of Sigmoid Function
def derivatives_sigmoid(x):
return x * (1 - x)
def input_table(x): #function to a truth table with n variables
l = []
for i in range(2**x):
a = bin(i).lstrip("0b")
while len(a) < x:
a = '0' + a
l.append(list(a))
for i in range(len(l)):
l[i] = [int(x) for x in l[i]]
return np.array(l)
X = input_table(8) #primary input which is a dataset of a 8 variable
#truth table
print (X)
y = [] #outputs for corresponding values in table
with open('abc.txt', 'r') as f:
s = f.read() #outputs stored in a txt file as numbers eg 010101101....
x = list(s)
y = x
y = [[int(i)] for i in y]
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
#splitting the data into 2 parts
#Variable initialization
epoch = 5001 #Setting training iterations
lr = 0.1 #Setting learning rate
inputlayer_neurons = X.shape[1] #no of inputs (in this case 4)
hiddenlayer_neurons = 3 #number of hidden layers neurons
output_neurons = 1 #number of neurons at output layer
#weight and bias initialization
wh = np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh = np.random.uniform(size=(1,hiddenlayer_neurons))
#hidden layer weights and biases
wout = np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout = np.random.uniform(size=(1,output_neurons))
#output layer weights and biases
#training loop
for i in range(epoch):
#Forward Propogation
hidden_layer_input1 = np.dot(X_train,wh)
hidden_layer_input = hidden_layer_input1 + bh
hiddenlayer_activations = sigmoid(hidden_layer_input)
output_layer_input1 = np.dot(hiddenlayer_activations,wout)
output_layer_input = output_layer_input1+ bout
output = sigmoid(output_layer_input)
#Backpropagation
E = y_train-output
slope_output_layer = derivatives_sigmoid(output)
slope_hidden_layer = derivatives_sigmoid(hiddenlayer_activations)
d_output = E * slope_output_layer
Error_at_hidden_layer = d_output.dot(wout.T)
d_hiddenlayer = Error_at_hidden_layer * slope_hidden_layer
wout += hiddenlayer_activations.T.dot(d_output) *lr
bout += np.sum(d_output, axis=0,keepdims=True) *lr
wh += X_train.T.dot(d_hiddenlayer) *lr
bh += np.sum(d_hiddenlayer, axis=0,keepdims=True) *lr
if i%50 == 0:
accuracy = 0
for x in range(len(output)):
if int(round(output[x][0])) == y_train[x][0]:
accuracy += 1
print (i, "Accuracy: ", accuracy)
#test
hidden_layer_input1 = np.dot(X_test,wh)
hidden_layer_input = hidden_layer_input1 + bh
hiddenlayer_activations = sigmoid(hidden_layer_input)
output_layer_input1 = np.dot(hiddenlayer_activations,wout)
output_layer_input = output_layer_input1+ bout
output = sigmoid(output_layer_input)
accuracy = 0
for x in range(len(output)):
if int(round(output[x][0])) == y_test[x][0]:
accuracy += 1
print ("Test Accuracy: ", accuracy) |
import os
import sys
# Append paths so that dependencies would work.
_FINDIT_DIR = os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir)
_THIRD_PARTY_DIR = os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'third_party')
_FIRST_PARTY_DIR = os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'first_party')
sys.path.insert(0, _FINDIT_DIR)
sys.path.insert(0, _THIRD_PARTY_DIR)
sys.path.insert(0, _FIRST_PARTY_DIR)
# Activate script as findit prod.
from local_libs import remote_api
remote_api.EnableFinditRemoteApi()
# Add imports below.
import datetime
import textwrap
from libs import analysis_status
from model.flake.analysis.master_flake_analysis import MasterFlakeAnalysis
from services import bigquery_helper
class FlakeAnalyzerReport(object):
"""Encapsulates an in-depth flake analyzer report for one week."""
_METRICS_QUERY = textwrap.dedent("""
select
count(*) as number_of_analyses,
sum(
case when analysis_info.culprit.revision is not null
then 1
else 0
end
) as number_of_culprits,
avg(
regression_range_confidence
) as average_regression_range_confidence,
avg(
case when analysis_info.culprit.revision is not null
then analysis_info.culprit.confidence
else null
end
) as average_culprit_confidence,
sum(
case when array_to_string(analysis_info.actions, ' ') != ''
then 1
else 0
end
) as number_of_autoactions_taken,
sum(
case when array_to_string(analysis_info.actions, ' ') \
like '%BUG_CREATED%'
then 1
else 0
end
) as number_of_bugs_filed,
sum(
case when array_to_string(analysis_info.actions, ' ') \
like '%BUG_COMMENTED%'
then 1
else 0
end
) as number_of_bugs_commented,
sum(
case when array_to_string(analysis_info.actions, ' ') \
like '%CL_COMMENTED%'
then 1
else 0
end
) as number_of_cls_commented
from
`findit-for-me.events.test`
where
# Analyses completed in the past day.
analysis_info.timestamp.completed >=
timestamp_sub(current_timestamp(), interval {end_days_back} day)
and analysis_info.timestamp.completed <
timestamp_sub(current_timestamp(), interval {start_days_back} day)
# Flake analyses.
and flake = true
""")
_TEMPLATE_STRING = textwrap.dedent("""
Flake Analyzer Stats for this past week {week_start} though {week_end}:
Total analyses: {number_of_analyses} ({number_of_analyses_dx})
Regression range error rate: {rr_error_rate}% ({rr_error_rate_dx})
Total culprits: {number_of_culprits} ({number_of_culprits_dx})
Culprit analysis error rate: {ca_error_rate}% ({ca_error_rate_dx})
Total auto-actions taken: {number_of_autoactions_taken} \
({number_of_autoactions_taken_dx})
Total number of bugs filed: {number_of_bugs_filed} \
({number_of_bugs_filed_dx})
Total number of bugs commented: {number_of_bugs_commented} \
({number_of_bugs_commented_dx})
Total number of CLs commented: {number_of_cls_commented} \
({number_of_cls_commented_dx})
""")
_TEMPLATE_STRING_NO_CHANGE = textwrap.dedent("""
Flake Analyzer Stats for the week {week_start} though {week_end}:
Total analyses: {number_of_analyses}
Regression range error rate: {rr_error_rate}%
Total culprits: {number_of_culprits}
Culprit analysis error rate: {ca_error_rate}%
Total auto-actions taken: {number_of_autoactions_taken}
Total number of bugs filed: {number_of_bugs_filed}
Total number of bugs commented: {number_of_bugs_commented}
Total number of CLs commented: {number_of_cls_commented}
""")
def __init__(self, start_days_back, display_change=True):
self._display_change = display_change
self._week_start = (
datetime.datetime.now() - datetime.timedelta(days=start_days_back) -
datetime.timedelta(days=7))
# Ordered aggregate data from newest --> oldest.
self._query_results = [
bigquery_helper.ExecuteQuery(
'findit-for-me',
FlakeAnalyzerReport._METRICS_QUERY.format(
start_days_back=start_days_back,
end_days_back=start_days_back + 7))[1][0],
bigquery_helper.ExecuteQuery(
'findit-for-me',
FlakeAnalyzerReport._METRICS_QUERY.format(
start_days_back=start_days_back + 7,
end_days_back=start_days_back + 14))[1][0],
]
# Ordered error rates data from newest --> oldest.
self._error_rates = [
self._GetErrorRates(start_days_back, start_days_back + 7),
self._GetErrorRates(start_days_back + 7, start_days_back + 14),
]
def _GetAllAnalyses(self, start_days_back, end_days_back, PAGE_SIZE=500):
"""Get all the analyses within the day range."""
start = datetime.datetime.now() - datetime.timedelta(days=start_days_back)
end = datetime.datetime.now() - datetime.timedelta(days=end_days_back)
all_analyses = []
cursor = None
more = True
while more:
query = MasterFlakeAnalysis.query(
MasterFlakeAnalysis.request_time >= end,
MasterFlakeAnalysis.request_time < start)
analyses, cursor, more = query.fetch_page(PAGE_SIZE, start_cursor=cursor)
all_analyses.extend(analyses)
return all_analyses
def _GetErrorRates(self, start_days_back, end_days_back):
"""Get the error rate within the day range."""
flake_analyses = self._GetAllAnalyses(start_days_back, end_days_back)
total_regression_analyses = 0
regression_analysis_errors = 0
total_culprit_analyses = 0
culprit_analysis_errors = 0
for analysis in flake_analyses:
if analysis.status != analysis_status.SKIPPED:
total_regression_analyses += 1
if analysis.status == analysis_status.ERROR:
regression_analysis_errors += 1
if analysis.try_job_status != analysis_status.SKIPPED:
total_culprit_analyses += 1
if analysis.try_job_status == analysis_status.ERROR:
culprit_analysis_errors += 1
ra_error_rate = (
float(regression_analysis_errors) / float(total_regression_analyses)
if total_regression_analyses > 0 else 0)
ca_error_rate = (
float(culprit_analysis_errors) / float(total_culprit_analyses)
if total_culprit_analyses > 0 else 0)
return {
'regression_analysis': int(ra_error_rate * 100),
'culprit_analysis': int(ca_error_rate * 100)
}
def _CalculateXOverY(self, x, y):
"""Calculate the growth rate."""
change = int(100 * float(x - y) / float(x)) if x != 0 else -100
if change >= 0:
return '{}% increase'.format(change)
else:
change = abs(change)
return '{}% decrease'.format(change)
def __repr__(self):
"""Return the string representation of this report."""
if self._display_change:
return FlakeAnalyzerReport._TEMPLATE_STRING.format(
week_start=self._week_start.strftime('%x'),
week_end=(
self._week_start + datetime.timedelta(days=7)).strftime('%x'),
number_of_analyses=self._query_results[0]['number_of_analyses'],
number_of_analyses_dx=self._CalculateXOverY(
self._query_results[0]['number_of_analyses'],
self._query_results[1]['number_of_analyses']),
rr_error_rate=self._error_rates[0]['regression_analysis'],
rr_error_rate_dx=self._CalculateXOverY(
self._error_rates[0]['regression_analysis'],
self._error_rates[1]['regression_analysis']),
number_of_culprits=self._query_results[0]['number_of_culprits'],
number_of_culprits_dx=self._CalculateXOverY(
self._query_results[0]['number_of_culprits'],
self._query_results[1]['number_of_culprits']),
ca_error_rate=self._error_rates[0]['culprit_analysis'],
ca_error_rate_dx=self._CalculateXOverY(
self._error_rates[0]['culprit_analysis'],
self._error_rates[1]['culprit_analysis']),
number_of_autoactions_taken=self._query_results[0][
'number_of_autoactions_taken'],
number_of_autoactions_taken_dx=self._CalculateXOverY(
self._query_results[0]['number_of_autoactions_taken'],
self._query_results[1]['number_of_autoactions_taken']),
number_of_bugs_filed=self._query_results[0]['number_of_bugs_filed'],
number_of_bugs_filed_dx=self._CalculateXOverY(
self._query_results[0]['number_of_bugs_filed'],
self._query_results[1]['number_of_bugs_filed']),
number_of_bugs_commented=self._query_results[0][
'number_of_bugs_commented'],
number_of_bugs_commented_dx=self._CalculateXOverY(
self._query_results[0]['number_of_bugs_commented'],
self._query_results[1]['number_of_bugs_commented']),
number_of_cls_commented=self._query_results[0][
'number_of_cls_commented'],
number_of_cls_commented_dx=self._CalculateXOverY(
self._query_results[0]['number_of_cls_commented'],
self._query_results[1]['number_of_cls_commented']),
)
return FlakeAnalyzerReport._TEMPLATE_STRING_NO_CHANGE.format(
week_start=self._week_start.strftime('%x'),
week_end=(self._week_start + datetime.timedelta(days=7)).strftime('%x'),
number_of_analyses=self._query_results[0]['number_of_analyses'],
rr_error_rate=self._error_rates[0]['regression_analysis'],
number_of_culprits=self._query_results[0]['number_of_culprits'],
ca_error_rate=self._error_rates[0]['culprit_analysis'],
number_of_autoactions_taken=self._query_results[0][
'number_of_autoactions_taken'],
number_of_bugs_filed=self._query_results[0]['number_of_bugs_filed'],
number_of_bugs_commented=self._query_results[0][
'number_of_bugs_commented'],
number_of_cls_commented=self._query_results[0][
'number_of_cls_commented'],
)
if __name__ == '__main__':
# Last 6 weeks of data.
print FlakeAnalyzerReport(0)
print '--------------------------------------------------------------------'
print FlakeAnalyzerReport(7)
print '--------------------------------------------------------------------'
print FlakeAnalyzerReport(14)
print '--------------------------------------------------------------------'
print FlakeAnalyzerReport(21)
print '--------------------------------------------------------------------'
print FlakeAnalyzerReport(28)
print '--------------------------------------------------------------------'
print FlakeAnalyzerReport(35, display_change=False)
|
from flask_cors import CORS
from flask import (
Response,
stream_with_context,
session,
request,
redirect,
url_for,
jsonify,
)
from flask_login import (
LoginManager,
login_user,
UserMixin,
login_required,
logout_user,
current_user,
)
from .app import (
app,
socketio,
user_store,
use_predefined_user,
use_embed_mode,
use_proxy,
redirect_url,
trans_tbl,
domains_dict,
origins,
verify_ssl,
)
from .websocket import exchangeCodeData, RDSNamespace
import json
import requests
import uuid
import os
import jwt
CORS(app, origins=origins, supports_credentials=True)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "index"
socketio.on_namespace(RDSNamespace("/"))
def proxy(host, path):
req = requests.get(f"{host}{path}", stream=True, timeout=1)
return Response(
stream_with_context(req.iter_content(chunk_size=1024)),
content_type=req.headers["content-type"],
)
class User(UserMixin):
def to_dict(self):
return {
"id": self.id,
"websocketId": self.websocketId,
"userId": self.userId,
"token": self.token,
"servername": self.servername,
}
@classmethod
def from_dict(cls, obj):
return cls(**obj)
def __init__(
self, servername=None, id=None, userId=None, websocketId=None, token=None
):
super().__init__()
if id is None:
raise ValueError("id needs to be set-")
self.id = id
self.websocketId = websocketId
self.userId = userId
self.token = token
self.servername = servername
if use_embed_mode and use_predefined_user:
return
if userId is None and token is not None:
headers = {"Authorization": f"Bearer {token}"}
for key, domain in domains_dict.items():
# If the EFSS (OwnCloud / NextCloud) is running locally within the k8s environment,
# (probably under minikube and without a public IP)
# we need to access the informations endpoint of the integration app through an internal URL.
url = domain.get("INTERNAL_ADDRESS", domain["ADDRESS"]) or os.getenv(
"OWNCLOUD_URL", "https://localhost/index.php"
)
req = requests.get(
f"{url}/index.php/apps/rds/api/1.0/informations",
headers=headers,
verify=verify_ssl,
)
if req.status_code == 200:
text = req.json()["jwt"]
data = jwt.decode(
text, domains_dict.get_publickey(key), algorithms=["RS256"]
)
app.logger.debug(data)
self.userId = data["cloudID"]
self.servername = key
return
raise ValueError
@app.route("/informations")
def informations():
data = {}
if redirect_url is not None:
data["redirectUrl"] = redirect_url
return json.dumps(data)
@app.route("/faq")
def questions():
from .questions import questions
from string import Template
return json.dumps(
{
lang: {
category: {
quest: Template(answer).substitute(**(session["oauth"]))
for quest, answer in quests.items()
}
for category, quests in categories.items()
}
for lang, categories in questions.items()
}
)
@app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "GET":
# This causes serious security issues:
# return ("", 200) if (current_user.is_authenticated) else ("", 401)
return ("", 401)
try:
reqData = request.get_json()
except Exception as e:
app.logger.error(e, exc_info=True)
reqData = request.form
app.logger.debug("reqdata: {}".format(reqData))
data = reqData.get("informations", "")
unverified = jwt.decode(data, options={"verify_signature": False})
_, _, servername = unverified["cloudID"].rpartition("@")
servername = servername.translate(trans_tbl)
publickey = domains_dict.get_publickey(servername)
user = None
try:
decoded = jwt.decode(data, publickey, algorithms=["RS256"])
user = User(
id=str(uuid.uuid4()), userId=decoded["cloudID"], servername=servername
)
session["informations"] = decoded
session["servername"] = servername
session["oauth"] = domains_dict[servername]
# check if everything is given for later usage
keys = ["email", "UID", "cloudID"]
values_from_keys = [decoded.get(key) for key in keys]
if None in values_from_keys:
error = {
"error": "Missing key: email or UID or cloudID is missing in given informations.",
"errorCode": "MissingKey",
"key": keys[values_from_keys.index(None)],
}
return jsonify(error), 401
except Exception as e:
app.logger.error(e, exc_info=True)
if user is not None:
user_store[user.get_id()] = user.to_dict()
login_user(user)
app.logger.info("logged? {}".format(current_user.is_authenticated))
return "", 201
error = {
"error": "Given informations weren`t valid or some keys were missing.",
"errorCode": "UserInformationsNotValid",
}
app.logger.error("error occured: {}".format(error))
return jsonify(error), 401
@login_manager.user_loader
def load_user(user_id):
return User.from_dict(user_store.get(user_id))
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("index"))
@app.route("/", defaults={"path": "index.html"})
@app.route("/<path:path>")
def index(path):
# only for testing condition
if use_embed_mode and use_predefined_user:
app.logger.debug("skip authentication")
servername = next(iter(domains_dict.values()))["ADDRESS"]
user = User(
id=str(uuid.uuid4()),
userId=os.getenv("DEV_FLASK_USERID"),
servername=servername,
)
session["servername"] = servername
session["oauth"] = {
"SUPPORT_EMAIL": os.getenv("SUPPORT_EMAIL"),
"MANUAL_URL": os.getenv("MANUAL_URL"),
}
session["informations"] = {
"UID" : "1234",
"email" : "user@user.com",
"name" : "someUser" ,
"cloudID" : "cloud@cloud@localhost:8000"
}
user_store[user.get_id()] = user.to_dict()
login_user(user)
if "access_token" in request.args:
user = User(id=str(uuid.uuid4()), token=request.args["access_token"])
user_store[user.get_id()] = user.to_dict()
login_user(user)
return redirect("/")
if current_user.is_authenticated:
if "code" in request.args and "state" in request.args:
if exchangeCodeData(request.args):
return app.send_static_file("exchangeCode.html")
return app.send_static_file("exchangeCode_error.html")
if use_embed_mode or current_user.is_authenticated:
if use_proxy:
return proxy(os.getenv("DEV_WEBPACK_DEV_SERVER_HOST"), request.path)
if use_embed_mode:
return app.send_static_file(path)
return redirect(redirect_url)
|
from copy import deepcopy
import pytest
import time
from threading import Thread
from mock import MagicMock
from switchboard.engine import SwitchboardEngine, EngineError, _Client
from switchboard.module import SwitchboardModule
class TimeElapsed:
def __enter__(self):
self.start_time = time.time()
def __exit__(self, type, value, traceback):
self.elapsed = time.time() - self.start_time
class EngineTest(SwitchboardEngine):
def __init__(self):
super(EngineTest, self).__init__(self, self)
self.loop_count = 0
self.configs = { 'poll_period': 0.05 }
self.running = True
self.modules = { 'mod1': MagicMock(), 'mod2': MagicMock() }
def test_terminate():
def loop():
eng.loop_count += 1
time.sleep(0.05)
if eng.loop_count > 6: raise KeyboardInterrupt
eng = EngineTest()
eng.switchboard_loop = loop
# Make sure that setting terminate stops the engine
eng.start()
time.sleep(0.12)
eng.terminate = True
eng._swb_thread.join()
assert eng.loop_count == 3
# Same with KeyboardInterrupt
eng.terminate = False
eng.start()
time.sleep(0.22)
eng._swb_thread.join()
assert eng.loop_count == 7
def TestSwitchboardLoop():
@pytest.fixture
def engine():
eng = EngineTest()
eng._update_devices_values = MagicMock()
eng.take_snapshot = MagicMock()
return eng
def test_loop_no_delay(self, eng):
''' Run a standard loop with no delay '''
with TimeElapsed() as t:
eng.switchboard_loop()
assert t.elapsed < 0.01
eng._update_devices_values.assert_called_once()
eng.modules['mod1'].assert_called_once()
eng.modules['mod2'].assert_called_once()
eng.take_snapshot.assert_called_once()
def test_full_delay(self, eng):
''' Run a loop with delay and modules disabled '''
eng.prev_cycle_time = time.time()
eng.running = False
with TimeElapsed() as t:
eng.switchboard_loop()
assert t.elapsed > 0.05 and t.elapsed < 0.06
eng._update_devices_values.assert_called_once()
eng.modules['mod1'].assert_not_called()
eng.modules['mod2'].assert_not_called()
eng.take_snapshot.assert_called_once()
def test_lock(self, eng):
''' Test the locking logic '''
def block():
with eng.lock:
time.sleep(0.05)
eng._update_devices_values.assert_not_called()
Thread(target=block).start()
with TimeElapsed() as t:
eng.switchboard_loop()
assert t.elapsed > 0.05 and t.elapsed < 0.06
eng._update_devices_values.assert_called_once()
def test_add_and_update_client():
''' Ensure various checks are performed when adding or updating clients '''
eng = EngineTest()
eng._upsert_client = MagicMock()
eng.clients = { 'client1': None }
with pytest.raises(EngineError):
eng.add_client('http://abc', 'client1')
eng.clients = { 'client1': _Client('http://abc', None, None, None) }
with pytest.raises(EngineError):
eng.add_client('http://abc', 'client2')
with pytest.raises(EngineError):
eng.update_client('client3')
eng._upsert_client.assert_not_called()
def test_upsert_client():
# TODO
pass
def test_get_modules_using_client():
eng = EngineTest()
@SwitchboardModule(['other_in'], ['out'])
def uses_out(inp, out): pass
@SwitchboardModule(['in'], ['other_out'])
def uses_in(inp, out): pass
@SwitchboardModule(['other_in'], ['other_out'])
def uses_nothing(inp, out): pass
eng.clients = { 'client1': _Client(None, None, { 'in': None, 'out': None }, None) }
eng.modules = { 'uses_out': uses_out,
'uses_in': uses_in,
'uses_nothing': uses_nothing }
modules_using_client = eng.get_modules_using_client('client1')
assert modules_using_client == set(['uses_in', 'uses_out'])
|
# EJERCICIO 31
h = int(input("Ingrese un número natural: "))
# el valor inicial de i es 2 ya que es innecesario verificar si
# obtenemos 0 como resto al dividirlo por 1
i = 2
cont = 0
# no utilizamos <= porque verificar
# si el n es divisible por si mismo es innecesario
while(i < h):
if h % i == 0:
cont += 1
i += 1
if cont > 0:
print("El número no es primo")
else:
print("El número es primo") |
# Generated by Django 2.2.2 on 2019-07-14 05:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('todo', '0017_auto_20190711_0912'),
]
operations = [
migrations.CreateModel(
name='SchemeStats',
fields=[
('scheme', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='todo.Scheme')),
('dump', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('calc_date', models.DateField()),
('one_year_abs_ret', models.FloatField()),
('three_year_abs_ret', models.FloatField()),
('three_year_cagr_ret', models.FloatField()),
('five_year_abs_ret', models.FloatField()),
('five_year_cagr_ret', models.FloatField()),
('ten_year_abs_ret', models.FloatField()),
('ten_year_cagr_ret', models.FloatField()),
('since_begin_abs_ret', models.FloatField()),
('since_begin_cagr_ret', models.FloatField()),
],
),
]
|
from django.contrib import admin
from items.models.asset_custom_fields import LongTextAssetField, ShortTextAssetField, FloatAssetField, IntAssetField, \
AssetField
from items.models.asset_models import Asset
from items.models.item_models import Item, Tag
from items.models.custom_field_models import Field, IntField, FloatField, ShortTextField, LongTextField
# Register your models here.
admin.site.register(Item)
admin.site.register(Tag)
admin.site.register(Field)
admin.site.register(IntField)
admin.site.register(FloatField)
admin.site.register(ShortTextField)
admin.site.register(LongTextField)
admin.site.register(Asset)
admin.site.register(AssetField)
admin.site.register(IntAssetField)
admin.site.register(FloatAssetField)
admin.site.register(ShortTextAssetField)
admin.site.register(LongTextAssetField) |
#!/usr/bin/env python
import csv
from pymarc import MARCReader
from os import listdir
from re import search
# change this line to match your folder structure
SRC_DIR = '/home/Zwounds/workshop'
# get a list of all .mrc files in source directory
file_list = filter(lambda x: search('.mrc', x), listdir(SRC_DIR))
csv_out = csv.writer(open('marc_records.csv', 'w'), delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
for item in file_list:
fd = file(SRC_DIR + '/' + item, 'r')
reader = MARCReader(fd)
for record in reader:
title = author = date = subject = oclc = publisher = ''
# title
if record['245'] is not None:
title = record['245']['a']
if record['245']['b'] is not None:
title = title + " " + record['245']['b']
# determine author
if record['100'] is not None:
author = record['100']['a']
elif record['110'] is not None:
author = record['110']['a']
elif record['700'] is not None:
author = record['700']['a']
elif record['710'] is not None:
author = record['710']['a']
# date
if record['260'] is not None:
date = record['260']['c']
# subject
if record['650'] is not None:
subject = record['650']['a']
# oclc number
if record['035'] is not None:
if len(record.get_fields('035')[0].get_subfields('a')) > 0:
oclc = record['035']['a'].replace('(OCoLC)', '')
# publisher
if record['260'] is not None:
publisher = record['260']['b']
csv_out.writerow([title, author, date, subject, oclc, publisher])
fd.close() |
import tempfile
from urllib.parse import urlparse, parse_qs
import requests
from bs4 import BeautifulSoup
from top_app.models import App, Video, ScreenShot
def scrape_all():
res = requests.get('https://play.google.com/store/apps/collection/topselling_free')
soup = BeautifulSoup(res.text, 'html.parser')
a = soup.find_all('div', attrs={'class': 'ImZGtf mpg5gc'})
top_apps = set(App.objects.filter(is_top=True))
for k in a:
parsed = urlparse(k.find('a').get('href'))
if parsed:
package_name = parse_qs(parsed.query)['id'][0]
try:
app = App.objects.get(package_name=package_name)
except App.DoesNotExist:
app = App.objects.create(package_name=package_name)
app.get_remote_image(k.find('img').get('data-src'))
app.developer = k.find('a', attrs={'class': 'mnKHRc'}).string
app.name = k.find('div', attrs={'class': "WsMG1c nnK0zc"}).string
print(k.find('img').get('data-src'),
k.find('div', attrs={'class': "WsMG1c nnK0zc"}).string,
k.find('div', attrs={'class': 'KoLSrc'}).string
)
app.is_top = True
app.save()
if app in top_apps:
top_apps.remove(app)
for app in top_apps:
app.is_top = False
app.save()
def scrape_one(package_name):
app = App.objects.get(package_name=package_name)
res = requests.get(f'https://play.google.com/store/apps/details?id={package_name}')
soup = BeautifulSoup(res.text, 'html.parser')
tags = soup.find_all('button', attrs={'class': 'Q4vdJd'})
for tag in tags:
screen_shot = None
if tag.find('img').get('data-src'):
screen_shot = tag.find('img').get('data-src')
elif tag.find('img').get('src'):
screen_shot = tag.find('img').get('src')
if screen_shot:
try:
ScreenShot.objects.get(app=app, url=screen_shot)
except ScreenShot.DoesNotExist:
ScreenShot.objects.create(app=app, url=screen_shot)
# video
video_tag = soup.find('div', attrs={'class': "MSLVtf Q4vdJd"})
if video_tag:
if video_tag.find('img').get('src') and video_tag.find('button').get('data-trailer-url'):
try:
Video.objects.get(app=app,
url=video_tag.find('button').get('data-trailer-url'),
thumbnail=video_tag.find('img').get('src')
)
except Video.DoesNotExist:
Video.objects.create(app=app,
url=video_tag.find('button').get('data-trailer-url'),
thumbnail=video_tag.find('img').get('src')
)
return app
|
#!/usr/bin/python
import argparse
import time
import struct
import socket
import select
import sys
from .opts import PingOptions
class PingUtil(object):
def __init__(self):
pass
def __chesksum(self, data):
n = len(data)
m = n % 2
sum = 0
for i in range(0, n - m ,2):
sum += (data[i]) + ((data[i+1]) << 8)
if m:
sum += (data[-1])
sum = (sum >> 16) + (sum & 0xffff)
sum += (sum >> 16)
answer = ~sum & 0xffff
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def __raw_socket(self, dst_addr,imcp_packet):
rawsocket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.getprotobyname("icmp"))
send_request_ping_time = time.time()
#send data to the socket
rawsocket.sendto(imcp_packet, (dst_addr, 80))
return send_request_ping_time, rawsocket, dst_addr
def __request_ping(self, data_type, data_code, data_checksum, data_ID, data_Sequence, payload_body):
'''
request ping
'''
icmp_packet = struct.pack('>BBHHH{}s'.format(self.packet_size), data_type, data_code,
data_checksum, data_ID, data_Sequence, payload_body)
icmp_chesksum = self.__chesksum(icmp_packet)
icmp_packet = struct.pack('>BBHHH{}s'.format(self.packet_size), data_type, data_code,
icmp_chesksum, data_ID, data_Sequence, payload_body)
return icmp_packet
def __reply_ping(self, send_request_ping_time,rawsocket,data_Sequence,timeout = 2):
'''
reply ping
'''
while True:
started_select = time.time()
what_ready = select.select([rawsocket], [], [], timeout)
wait_for_time = (time.time() - started_select)
if what_ready[0] == []: # Timeout
return -1
time_received = time.time()
buf_size = 2048 if self.packet_size < 2048 else int(self.packet_size * 1.5)
received_packet, addr = rawsocket.recvfrom(buf_size)
icmpHeader = received_packet[20:28]
type, code, checksum, packet_id, sequence = struct.unpack(
">BBHHH", icmpHeader
)
if type == 0 and sequence == data_Sequence:
return time_received - send_request_ping_time
timeout = timeout - wait_for_time
if timeout <= 0:
return -1
def ping(self, opt):
host = opt.host
self.packet_size = opt.packet_size
ping_times = opt.ping_times
data_type = 8 # ICMP Echo Request
data_code = 0 # must be zero
data_checksum = 0 # "...with value 0 substituted for this field..."
data_ID = 0 #Identifier
data_Sequence = 1 #Sequence number
payload_body = b'abcdefghijklmnopqrstuvwabcdefghi0123456789' #data
dst_addr = socket.gethostbyname(host)
print("now Ping {0} [{1}] with {2} bytes of data:".format(host, dst_addr, self.packet_size))
for i in range(0, ping_times):
icmp_packet = self.__request_ping(data_type, data_code, data_checksum,
data_ID, data_Sequence + i, payload_body)
send_request_ping_time,rawsocket,addr = self.__raw_socket(dst_addr, icmp_packet)
times = self.__reply_ping(send_request_ping_time, rawsocket, data_Sequence + i)
if times > 0:
print("{0}/{1}: reply from {2} bytes = {3} time ={4}ms" \
.format(i+1, ping_times, addr, self.packet_size, int(times * 1000)))
time.sleep(0.7)
else:
print("request time out")
print()
if __name__ == "__main__":
opt = PingOptions().parse()
ping_util = PingUtil()
ping_util.ping(opt)
|
# -*- coding: latin-1 -*-
# Medic Calculator
#
# Ref: http://www-users.med.cornell.edu/~spon/picu/calc/index.htm
# Ref: http://www.medcalc.com/
# Ref: http://www.medal.org/
#
#ensymble_python2.5-0.27.py py2sis --appname=medcalc --version=0.4.1 -l EN -t H:\S60\devices\S60_3rd_FP2_SDK_v1.1\epoc32\winscw\c\python\disclaimer.txt --icon=H:\S60\devices\S60_3rd_FP2_SDK_v1.1\epoc32\winscw\c\python\logo2.svg --extrasdir=python --caption="Medic Calc" --shortcaption="Med Calc" --vendor="NF.org" H:\S60\devices\S60_3rd_FP2_SDK_v1.1\epoc32\winscw\c\python medcalc
#ensymble.py mergesis medcalc.sis PythonForS60_1_4_5_3rdEd.SIS medcalc_standalone_v1_0_0.sis
#H:/S60/devices/S60_3rd_FP2_SDK_v1.1/epoc32/winscw/c/python/
#-l EN -t H:\S60\devices\S60_3rd_FP2_SDK_v1.1\epoc32\winscw\c\python\disclaimer.appuifwtxt --icon=H:\S60\devices\S60_3rd_FP2_SDK_v1.1\epoc32\winscw\c\python\logo2.svg --extrasdir=python --caption="Medic Calc" --shortcaption=MedCalc --vendor="NF.org"
def to_unicode():
return lambda x:x.encode('utf-8')
import sys
from e32 import in_emulator
if in_emulator():
sys.path.append('c:/data/python/')
# install only in memory phone
sys.path.append(u"c:\\data\\python\\medcalc\\")
import os
import e32, graphics
from audio import say
from geralclass import *
from geral import *
from neuro import *
from uti import *
from rx import *
# Level 1 Menu
class MenuGeral (MenuFilho):
def __init__(self):
self.Children = [u"BSA",u"BMI",u"BEE"]
#self.Children = [u"BSA",u"BMI",u"BEE",u"Risco Cirúrgico"]
self.Title = u"Geral"
self.MenuKid = [BSA(),BMI(),BEE()]
#self.MenuKid = [BSA(),BMI(),BEE(),AnestesiaRisk()]
class MenuNeuro (MenuFilho):
def __init__(self):
self.Children = [u"Glasgow CS",u'Teste Mental Abreviado',u'Zung Depressão',u'NINDS 3-Item',u'Hachinski Indice Isquemico',u'CHADS2 - AVC/AFib']
self.Title = u"Neuro"
self.MenuKid = [GCS(),AbbreviatedMentalTest(),Zung(),NINDS3(),Hachinski(),CHADS2()]
class MenuUTI (MenuFilho):
def __init__(self):
self.Children = [u"Gradiente Arterial Alveolar",u"Bicarbonato e base excesso ",u"Indíce de Ventilação", u"Osmolaridade Sérica",u"Quantidade Oxigênio",u"Saturação Oxigênio"]
self.Title = u"UTI"
self.MenuKid = [AaGrad(),Bicarb(),VentIndex(),OsmSerica(),OxygenContent(),SatO2()]
class MenuRX (MenuFilho):
def __init__(self):
self.Children = [u"Raio-X Torax PA",u"Raio-X Torax Lat",u"Raio-X Torax PA (F)",u"Raio-X Pneumonia",u"Outro Raio-X Pneumonia",u"Raio-X Antrax",u"Raio-X Marfan",u"Raio-X Câncer"]
self.Title = u"RX"
self.MenuKid = [RxTorax(),RxToraxLat(),RxToraxFem(),RxToraxPneumonia(),RxToraxPneumonia2(),RxToraxAntrax(),RxMarfan(),RxCancer()]
class MRI (MenuFilho):
def __init__(self):
self.Children = []
self.Title = u"MRI"
self.MenuKid = []
class MenuStruct:
def __init__(self):
self.script_lock = e32.Ao_lock()
self.Parent = None
self.Children = [u"Geral",u"Neuro",u"UTI",u"RX"]
self.MenuKid = [MenuGeral(),MenuNeuro(),MenuUTI(),MenuRX()]
def run(self):
from key_codes import EKeyLeftArrow
self.lb = appuifw.Listbox(self.Children, self.lbox_observe)
self.lb.bind(EKeyLeftArrow, lambda: self.lbox_observe(0))
old_title = appuifw.app.title
self.refresh()
self.script_lock.wait()
appuifw.app.title = old_title
appuifw.app.body = None
self.lb = None
def refresh(self):
appuifw.app.title = u"Medical"
appuifw.app.menu = []
appuifw.app.exit_key_handler = self.exit_key_handler
appuifw.app.body = self.lb
def do_exit(self):
self.exit_key_handler()
def exit_key_handler(self):
appuifw.app.exit_key_handler = None
self.script_lock.signal()
sys.exit()
def lbox_observe(self, ind = None):
if not ind == None:
index = ind
else:
index = self.lb.current()
focused_item = 0
self.MenuKid[index].run(self)
appuifw.app.screen='normal'
def back(self):
pass
def splash ():
possible_locations = ["E:\\python\\logo.png", "C:\\data\\python\\medcalc\\logo.png", "logo.png"]
possible_locations.append(os.path.join(sys.path[0], "logo.png"))
appuifw.app.screen='full' #(a full screen)
for location in possible_locations:
if os.path.exists(location):
try:
img1 = graphics.Image.open(location)
except:
print "Error"
def handle_redraw(rect): canvas.blit(img1)
canvas=appuifw.Canvas(event_callback=None, redraw_callback=handle_redraw)
canvas.blit(img1)
appuifw.app.body=canvas
e32.ao_sleep(3)
appuifw.app.screen='normal' #(a full screen)
try:
splash()
MenuStruct().run()
except Exception, e:
import appuifw
import traceback
import sys
e1,e2,e3 = sys.exc_info()
err_msg = unicode(repr(e)) + u"\u2029"*2
err_msg += u"Call stack:\u2029" + unicode(traceback.format_exception(e1,e2,e3))
lock = e32.Ao_lock()
appuifw.app.body = appuifw.Text(err_msg)
appuifw.app.menu = [(u"Exit", lambda: lock.signal())]
appuifw.app.title = u"Error log"
lock.wait()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import glob
import multiprocessing as mp
import os
import time
import cv2
import tqdm
import json
from detectron2.config import get_cfg
# from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
from ffmpeg_main import pre_process
from np_to_json import convert_json
# constants
WINDOW_NAME = "COCO detections"
basepath = f'/app'
def setup_cfg(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.MODEL.WEIGHTS = f'{basepath}/model_final_cafdb1.pkl'
# cfg.MODEL.WEIGHTS = '/data1/code_base/mnt_data/ODbatch/model_final_cafdb1.pkl'
# Set score_threshold for builtin models
#confidence_threshold
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = 0.5
# confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
parser.add_argument(
"--config-file",
# default = '/app/docker_files/detectron2/configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml',
default = '/app/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml',
# default = '/data1/code_base/mnt_data/visd2/d2sourcecode/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml',
# default = '/app/docker_files/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml',
# default = f'{basepath}/panoptic_fpn_R_101_3x.yaml',
metavar="FILE",
help="path to config file",
)
return parser
def load_model():
args, unknown = get_parser().parse_known_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
model = VisualizationDemo(cfg)
return model
def visual_od(video_id=None, model = None):
video = cv2.VideoCapture(f'{basepath}/{video_id}.mp4')
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
output_fname = f'{basepath}/{video_id}out.webm'
# assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc('V','P','8','0'),
# fourcc = -1,
# fourcc = 0x00000021,
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
frames = []
for vis_frame, frame in tqdm.tqdm(model.run_on_video(video), total=num_frames):
output_file.write(vis_frame)
frames.append(frame)
video.release()
output_file.release()
cv2.destroyAllWindows()
data = convert_json(video_id=video_id, basepath=basepath, width=width, height=height, \
frames_per_second= frames_per_second, num_frames=num_frames, all_preds=frames)
with open(f'{basepath}/{video_id}.json', 'w') as f:
json.dump(data,f)
# print(out)
# visual_od(video_id='15341_', model=load_model())
# pre_process(video_id='15341') |
name = " Troy "
print(name.rstrip() + "\n") # Removes Space From Right End
print(name.lstrip() + "\n") # Removes Space From Left End
print(name.strip() + "\n") # Removes Space From Both Ends
|
import os
import numpy as np
import pandas as pd
import SimpleITK as sitk
import six
import sys
from radiomics import imageoperations, featureextractor
def radiomic_feature_extraction(casename,image_path,roi_path,save_dir,param_file='/home/kwl16/Projects/kwlqtim/mets_Params.yml'):
params = param_file #replace with location of parameters file
extractor = featureextractor.RadiomicsFeatureExtractor(params)
#specify the image and roi path names and which case
casename = casename
image_path = image_path #replace with location of image file
roi_path = roi_path #replace with location of roi file
#load image and roi
image = sitk.ReadImage(image_path)
roi = sitk.ReadImage(roi_path)
#check for presence of labels (1,2,3) in roi
roi_arr = sitk.GetArrayFromImage(roi) #image -> array
met_labels = np.unique(roi_arr)[1:] #which labels are present in the roi?
radiomics_table = None
for i, label in enumerate(met_labels):
if len(np.where(roi_arr==label)[0]) <= 10: # remove tumors smaller than 10 voxels
continue
if len(np.unique(np.where(roi_arr==label)[0])) == 1 or len(np.unique(np.where(roi_arr==label)[1])) == 1 or len(np.unique(np.where(roi_arr==label)[2])) == 1: # drop 1-d, or 2-d ROIs
continue
label_mask = np.where(roi_arr == label) #identify where the label is in the original roi
single_label_roi_arr = np.zeros(roi_arr.shape) #create a dummy roi
single_label_roi_arr[label_mask] = 1 #assign a label of 1 in the dummy roi where the label was in the original roi
single_label_roi = sitk.GetImageFromArray(single_label_roi_arr) #array -> image
single_label_roi.CopyInformation(image)
#extract features
result = extractor.execute(image,single_label_roi)
#put features in a table
if radiomics_table is None:
radiomics_table = pd.DataFrame(index=list(result.keys()) , columns = [label]).T
radiomics_table.loc[label]=list(result.values())
save_path = save_dir #replace with location for file to be saved
#save feature table
radiomics_table.to_csv(os.path.join(save_path,casename + '-radiomics.csv')) |
'''
Write a Python program to split a given dictionary of lists into list of dictionaries.
Input :
{'Science': [88, 89, 62, 95], 'Language': [77, 78, 84, 80]}
Output :
[{'Science': 88, 'Language': 77}, {'Science': 89, 'Language': 78}, {'Science': 62, 'Language': 84}, {'Science': 95, 'Language': 80}]
'''
input_dict = eval(input())
keys = list(input_dict.keys())
values = list(input_dict.values())
result_dict = [{keys[0]: values[0][i], keys[-1]: values[-1][i]} for k, v in input_dict.items() for i in range(0, len(v) - 1)]
print(str(result_dict))
# by Shervin Hasanzadeh
|
from consts.notification_type import NotificationType
from helpers.model_to_dict import ModelToDict
from notifications.base_notification import BaseNotification
class MatchVideoNotification(BaseNotification):
def __init__(self, match):
self.match = match
self.event = match.event.get()
@property
def _type(self):
return NotificationType.MATCH_VIDEO
def _build_dict(self):
data = {}
data['notification_type'] = NotificationType.type_names[self._type]
data['message_data'] = {}
data['message_data']['event_name'] = self.event.name
data['message_data']['match_key'] = self.match.key.id()
data['message_data']['match'] = ModelToDict.matchConverter(self.match)
return data
class EventMatchVideoNotification(BaseNotification):
def __init__(self, match):
self.match = match
self.event = match.event.get()
@property
def _timeout(self):
return 'EventMatchVideoNotification:{}'.format(self.event.key.id()), 60*10
@property
def _type(self):
return NotificationType.EVENT_MATCH_VIDEO
def _build_dict(self):
data = {}
data['notification_type'] = NotificationType.type_names[self._type]
data['message_data'] = {}
data['message_data']['event_key'] = self.event.key.id()
data['message_data']['event_name'] = self.event.name
return data
|
#!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
import tempfile
import time
import zipfile
PIPELINES = [
"prepare_gene_models",
"prepare_datasets",
"prepare_downloads",
"combine_datasets",
]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("pipeline", choices=PIPELINES, help="Pipeline to run")
parser.add_argument(
"--environment",
choices=("local", "dataproc"),
default="local",
help="Environment in which to run the pipeline (defaults to %(default)s",
)
parser.add_argument("--dry-run", action="store_true", help="Print pipeline command without running it")
args, other_args = parser.parse_known_args()
# Set working directory so that config.py finds pipeline_config.ini
os.chdir(os.path.dirname(os.path.abspath(__file__)))
from data_pipeline.config import pipeline_config # pylint: disable=import-outside-toplevel
start_time = time.time()
if args.environment == "local":
command = ["python3", "-m", f"data_pipeline.pipelines.{args.pipeline}"]
if other_args:
command.extend(other_args)
print(" ".join(command[:2]) + " \\\n " + " \\\n ".join(command[2:]))
if not args.dry_run:
sys.path.insert(1, os.getcwd())
try:
subprocess.check_call(
command,
env={
**os.environ,
"PYSPARK_SUBMIT_ARGS": "--driver-memory 4g pyspark-shell",
},
)
elapsed_time = time.time() - start_time
print(f"Done in {int(elapsed_time // 60)}m{int(elapsed_time % 60)}s")
except subprocess.CalledProcessError:
print(f"Error running data_pipeline/pipelines/{args.pipeline}.py")
sys.exit(1)
elif args.environment == "dataproc":
# Zip contents of data_pipeline directory for upload to Dataproc cluster
with tempfile.NamedTemporaryFile(prefix="pyfiles_", suffix=".zip") as tmp_file:
with zipfile.ZipFile(tmp_file.name, "w", zipfile.ZIP_DEFLATED) as zip_file:
for root, _, files in os.walk("data_pipeline"):
for name in files:
if name.endswith(".py"):
zip_file.write(
os.path.join(root, name),
os.path.relpath(os.path.join(root, name)),
)
# `hailctl dataproc submit` does not support project/region/zone arguments,
# so use `gcloud dataproc jobs submit` instead.
command = [
"gcloud",
"dataproc",
"jobs",
"submit",
"pyspark",
]
for option in ["project", "region"]:
value = pipeline_config.get("dataproc", option, fallback=None)
if value:
command.append(f"--{option}={value}")
command.extend(
[
"--cluster=exome-results",
f"--py-files={tmp_file.name}",
"--files=pipeline_config.ini",
f"data_pipeline/pipelines/{args.pipeline}.py",
]
)
if other_args:
command.append("--")
command.extend(other_args)
print(" ".join(command[:5]) + " \\\n " + " \\\n ".join(command[5:]))
if not args.dry_run:
subprocess.check_call(command)
elapsed_time = time.time() - start_time
print(f"Done in {elapsed_time // 60}m{elapsed_time % 60}s")
if __name__ == "__main__":
main()
|
from utils import linear_lr_decay
import torch
import torch.nn as nn
import numpy as np
#PPO Agent Class
class PPO:
#-----------------------
# Constructor
#-----------------------
def __init__(
self,
policy_net,
value_net,
dis_net,
a_dim,
beta,
lr=1e-4,
max_grad_norm=0.5,
ent_weight=0.01,
clip_val=0.2,
sample_n_epoch=4,
sample_mb_size=64,
mb_size=1024,
device="cuda:0",
conti=False
):
self.opt_actor = torch.optim.Adam(policy_net.parameters(), lr)
self.opt_critic = torch.optim.Adam(value_net.parameters(), lr)
self.opt_dis = torch.optim.Adam(dis_net.parameters(), lr)
self.a_dim = a_dim
self.beta = beta
self.lr = lr
self.max_grad_norm = max_grad_norm
self.ent_weight = ent_weight
self.clip_val = clip_val
self.sample_n_epoch = sample_n_epoch
self.sample_mb_size = sample_mb_size
self.sample_n_mb = mb_size // sample_mb_size
self.rand_idx = np.arange(mb_size)
self.criterion = nn.BCELoss()
self.ones_label = torch.autograd.Variable(torch.ones((sample_mb_size, 1))).to(device)
self.zeros_label = torch.autograd.Variable(torch.zeros((sample_mb_size, 1))).to(device)
self.device = device
self.conti = conti
#-----------------------
# Train PPO
#-----------------------
def train(
self,
policy_net,
value_net,
dis_net,
mb_obs,
mb_actions,
mb_old_values,
mb_advs,
mb_returns,
mb_old_a_logps,
sa_real
):
mb_obs = torch.from_numpy(mb_obs).to(self.device)
mb_actions = torch.from_numpy(mb_actions).to(self.device)
mb_old_values = torch.from_numpy(mb_old_values).to(self.device)
mb_advs = torch.from_numpy(mb_advs).to(self.device)
mb_returns = torch.from_numpy(mb_returns).to(self.device)
mb_old_a_logps = torch.from_numpy(mb_old_a_logps).to(self.device)
#Train PPO
for i in range(self.sample_n_epoch):
np.random.shuffle(self.rand_idx)
for j in range(self.sample_n_mb):
sample_idx = self.rand_idx[j*self.sample_mb_size : (j+1)*self.sample_mb_size]
sample_obs = mb_obs[sample_idx]
sample_actions = mb_actions[sample_idx]
sample_old_values = mb_old_values[sample_idx]
sample_advs = mb_advs[sample_idx]
sample_returns = mb_returns[sample_idx]
sample_old_a_logps = mb_old_a_logps[sample_idx]
sample_a_logps, sample_ents = policy_net.evaluate(sample_obs, sample_actions)
sample_values = value_net(sample_obs)
ent = sample_ents.mean()
#PPO loss
v_pred_clip = sample_old_values + torch.clamp(sample_values - sample_old_values, -self.clip_val, self.clip_val)
v_loss1 = (sample_returns - sample_values).pow(2)
v_loss2 = (sample_returns - v_pred_clip).pow(2)
v_loss = torch.max(v_loss1, v_loss2).mean()
ratio = (sample_a_logps - sample_old_a_logps).exp()
pg_loss1 = -sample_advs * ratio
pg_loss2 = -sample_advs * torch.clamp(ratio, 1.0-self.clip_val, 1.0+self.clip_val)
pg_loss = torch.max(pg_loss1, pg_loss2).mean() - self.ent_weight*ent
#Train actor
self.opt_actor.zero_grad()
pg_loss.backward()
nn.utils.clip_grad_norm_(policy_net.parameters(), self.max_grad_norm)
self.opt_actor.step()
#Train critic
self.opt_critic.zero_grad()
v_loss.backward()
nn.utils.clip_grad_norm_(value_net.parameters(), self.max_grad_norm)
self.opt_critic.step()
#Train Discriminator
np.random.shuffle(self.rand_idx)
for i in range(self.sample_n_mb):
sample_idx = self.rand_idx[i*self.sample_mb_size : (i+1)*self.sample_mb_size]
sample_obs = mb_obs[sample_idx]
sample_actions = mb_actions[sample_idx]
#Continuous: concat (s, a)
if self.conti:
mb_sa_fake = torch.cat([sample_obs, sample_actions], 1)
#Discrete: concat (s, a_onehot)
else:
sample_actions_onehot = np.zeros([self.sample_mb_size, self.a_dim], dtype=np.float32)
for j in range(self.sample_mb_size):
sample_actions_onehot[j, sample_actions[j]] = 1
mb_sa_fake = torch.cat([sample_obs, torch.from_numpy(sample_actions_onehot).to(self.device)], 1)
mb_sa_real = sa_real[np.random.randint(0, sa_real.shape[0], self.sample_mb_size), :]
#Adversarial loss
dis_real, z_mean_real, z_logstd_real = dis_net(torch.from_numpy(mb_sa_real).to(self.device))
dis_fake, z_mean_fake, z_logstd_fake = dis_net(mb_sa_fake)
kl_real = self.kl_loss(z_mean_real, z_logstd_real).mean()
kl_fake = self.kl_loss(z_mean_fake, z_logstd_fake).mean()
avg_kl = 0.5 * (kl_real + kl_fake)
dis_loss = self.criterion(dis_real, self.ones_label) + self.criterion(dis_fake, self.zeros_label) + self.beta*avg_kl
self.opt_dis.zero_grad()
dis_loss.backward()
self.opt_dis.step()
self.update_beta(avg_kl)
return pg_loss.item(), v_loss.item(), ent.item(), dis_loss.item(), dis_real.mean().item(), dis_fake.mean().item(), avg_kl.item()
#-----------------------
# Learning rate decay
#-----------------------
def lr_decay(self, it, n_it):
linear_lr_decay(self.opt_actor, it, n_it, self.lr)
linear_lr_decay(self.opt_critic, it, n_it, self.lr)
#-----------------------
# Compute KL loss
#-----------------------
def kl_loss(self, mean, logstd):
std = torch.exp(logstd)
return torch.sum(-logstd + 0.5*(std**2 + mean**2), dim=-1) - 0.5*mean.shape[1]
#-----------------------
# Update beta
#-----------------------
def update_beta(self, avg_kl, target_kl=0.1, beta_step=1e-5):
with torch.no_grad():
self.beta = self.beta - beta_step * (target_kl - avg_kl) |
from django import forms
#from django.contrib.localflavor.br.forms import BRZipCodeField
#from django.contrib.localflavor.br.forms import BRPhoneNumberField
#from django.contrib.localflavor.br.forms import BRCNPJField
#from django.contrib.localflavor.br.forms import BRCPFField
#from django.contrib.localflavor.br.forms import BRStateChoiceField
#from models import Cliente
#from django.localflavor.br.forms import BRCPFField
#class ClienteForm(forms.Form):
# telefone = BRPhoneNumberField(label=_('Telefone'))
# cpf = BRCPFField(label = 'CPF', required = True)
# cnpj = BRCNPJField(label='CNPJ', required=False)
# class Meta:
# model = Cliente
|
'''
@Author: Sankar
@Date: 2021-04-09 09:06:25
@Last Modified by: Sankar
@Last Modified time: 2021-04-09 09:11:09
@Title : Dictionary_Python-5
'''
'''
Write a Python script to generate and print a dictionary that contains a
number (between 1 and n) in the form (x, x*x).
Sample Dictionary ( n = 5) :
Expected Output : {1: 1, 2: 4, 3: 9, 4: 16, 5: 25}
'''
n = 5
dict = {}
for i in range(1,n+1):
dict.update({i:i*i})
print("Dictionary: {}".format(dict)) |
"""BSD 2-Clause License
Copyright (c) 2019, Allied Vision Technologies GmbH
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
THE SOFTWARE IS PRELIMINARY AND STILL IN TESTING AND VERIFICATION PHASE AND
IS PROVIDED ON AN “AS IS” AND “AS AVAILABLE” BASIS AND IS BELIEVED TO CONTAIN DEFECTS.
A PRIMARY PURPOSE OF THIS EARLY ACCESS IS TO OBTAIN FEEDBACK ON PERFORMANCE AND
THE IDENTIFICATION OF DEFECT SOFTWARE, HARDWARE AND DOCUMENTATION.
"""
import unittest
import docopt
# Inject 'assertNotRaise' to default test module. Tests are derived from this class.
def _assertNoRaise(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except BaseException as e:
self.fail('Function raised: {}'.format(e))
# Inject shared test camera id into the base TestCase
def _get_test_camera_id(self) -> str:
return unittest.TestCase.test_cam_id
def _set_test_camera_id(test_cam_id) -> str:
unittest.TestCase.test_cam_id = test_cam_id
unittest.TestCase.assertNoRaise = _assertNoRaise
unittest.TestCase.set_test_camera_id = _set_test_camera_id
unittest.TestCase.get_test_camera_id = _get_test_camera_id
def main():
CLI = """VimbaPython test runner.
Usage:
runner.py -h
runner.py -s basic -o console
runner.py -s basic -o junit_xml REPORT_DIR
runner.py -s (real_cam | all) -c CAMERA_ID -o console
runner.py -s (real_cam | all) -c CAMERA_ID -o junit_xml REPORT_DIR
Arguments:
CAMERA_ID Camera Id from Camera that shall be used during testing
REPORT_DIR Directory used for junit_export.
Options:
-h Show this screen.
-s Testsuite to execute. real_cam and all require a camera to
run tests against, therefore -c is mandatory.
-c Camera Id used while testing.
-o Test output: Either console or junit_xml.
"""
args = docopt.docopt(CLI)
loader = unittest.TestLoader()
if args['CAMERA_ID']:
unittest.TestCase.set_test_camera_id(args['CAMERA_ID'])
else:
unittest.TestCase.set_test_camera_id(None)
# Select TestRunner
if args['console']:
runner = unittest.TextTestRunner(verbosity=1)
elif args['junit_xml']:
import xmlrunner
runner = xmlrunner.XMLTestRunner(output=args['REPORT_DIR'])
# Import tests cases
import tests.c_binding_test
import tests.util_runtime_type_check_test
import tests.util_tracer_test
import tests.vimba_test
import real_cam_tests.vimba_test
import real_cam_tests.feature_test
import real_cam_tests.camera_test
import real_cam_tests.frame_test
# Assign test cases to test suites
BASIC_TEST_MODS = [
tests.c_binding_test,
tests.util_runtime_type_check_test,
tests.util_tracer_test,
tests.vimba_test
]
REAL_CAM_TEST_MODS = [
real_cam_tests.vimba_test,
real_cam_tests.feature_test,
real_cam_tests.camera_test,
real_cam_tests.frame_test
]
# Prepare TestSuites
suite_basic = unittest.TestSuite()
for mod in BASIC_TEST_MODS:
suite_basic.addTests(loader.loadTestsFromModule(mod))
suite_real_cam = unittest.TestSuite()
for mod in REAL_CAM_TEST_MODS:
suite_real_cam.addTests(loader.loadTestsFromModule(mod))
# Execute TestSuites
if args['basic']:
runner.run(suite_basic)
elif args['real_cam']:
runner.run(suite_real_cam)
elif args['all']:
runner.run(suite_basic)
runner.run(suite_real_cam)
if __name__ == '__main__':
main()
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
req_url = "https://www.baidu.com"
chrome_options = Options()
# 设置chrome浏览器无界面模式
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(options=chrome_options)
# 开始请求
browser.get(req_url)
# 打印页面源代码
print(browser.page_source)
# 关闭浏览器
browser.close()
|
from django.db import models
from django.utils.text import slugify
from wagtail.core.models import Page, Locale
from wagtail.snippets.models import get_snippet_models
from wagtail.images.models import AbstractImage
from wagtail.documents.models import AbstractDocument
from wagtail_localize.models import TranslatableObject
class Resource(models.Model):
"""
An object that is pushed to the git repo.
"""
object = models.OneToOneField(TranslatableObject, on_delete=models.CASCADE, related_name='git_resource')
# We need to save the path so that it doesn't change when pages are moved.
path = models.TextField(unique=True)
class Meta:
ordering = ['path']
@classmethod
def get_for_object(cls, object):
try:
return cls.objects.get(object=object)
except cls.DoesNotExist:
# Raises exception if doesn't exist (let it crash!)
instance = object.get_instance(Locale.get_default())
return cls.objects.create(
object=object,
# TODO: How to deal with duplicate paths?
path=cls.get_path(instance),
)
@classmethod
def get_path(cls, instance):
if isinstance(instance, Page):
# Page paths have the format: `pages/URL_PATH`
# Note: Page.url_path always starts with a '/'
return 'pages' + instance.url_path.rstrip('/')
else:
model_name = instance._meta.app_label + '.' + instance.__class__.__name__
if isinstance(instance, tuple(get_snippet_models())):
# Snippet paths have the format `snippets/app_label.ModelName/ID-title-slugified`
base_path = 'snippets/' + model_name
elif isinstance(instance, AbstractImage):
# Image paths have the format `images/ID-title-slugified`
base_path = 'images'
elif isinstance(instance, AbstractDocument):
# Document paths have the format `documents/ID-title-slugified`
base_path = 'documents'
else:
# All other models paths have the format `other/app_label.ModelName/ID-title-slugified`
base_path = 'other/' + model_name
return base_path + '/' + str(instance.pk) + '-' + slugify(str(instance))
class SyncLog(models.Model):
"""
Logs whenever we push or pull.
"""
ACTION_PUSH = 1
ACTION_PULL = 2
ACTION_CHOICES = [(ACTION_PUSH, "Push"), (ACTION_PULL, "Pull")]
action = models.PositiveIntegerField(choices=ACTION_CHOICES)
time = models.DateTimeField(auto_now_add=True)
commit_id = models.CharField(max_length=40, blank=True)
def add_translation(self, translation):
SyncLogResource.objects.create(
log=self,
resource=Resource.get_for_object(translation.source.object),
locale_id=translation.target_locale_id,
source_id=translation.source_id,
)
class Meta:
ordering = ['time']
class SyncLogResourceQuerySet(models.QuerySet):
def unique_resources(self):
return Resource.objects.filter(
object_id__in=self.values_list("resource__object_id", flat=True)
)
def unique_locales(self):
return Locale.objects.filter(id__in=self.values_list("locale_id", flat=True))
class SyncLogResource(models.Model):
"""
Logs each resource that was transferred in a push/pull
"""
log = models.ForeignKey(
SyncLog, on_delete=models.CASCADE, related_name="resources"
)
resource = models.ForeignKey(
Resource, on_delete=models.CASCADE, related_name="logs"
)
# Null if pushing this resource, otherwise set to the locale being pulled
locale = models.ForeignKey(
"wagtailcore.Locale",
null=True,
on_delete=models.CASCADE,
related_name="+",
)
# The source that was active at the time this resource was pushed/pulled
source = models.ForeignKey(
"wagtail_localize.TranslationSource",
null=True,
on_delete=models.SET_NULL,
related_name="+",
)
objects = SyncLogResourceQuerySet.as_manager()
class Meta:
ordering = ['log__time', 'resource__path']
|
"""
Regex linked-list node type definitions
"""
class RegexNode:
"""
Base node type
"""
def derive(self, _):
return NeverMatches()
def matchEnd(self):
return False
def canMatchMore(self):
return not self.matchEnd()
def __repr__(self):
return "RegexNode"
class NeverMatches(RegexNode):
def __repr__(self):
return "NeverMatches"
class EmptyString(RegexNode):
def matchEnd(self):
return True
def __repr__(self):
return "EmptyString"
class CharacterNode(RegexNode):
def __init__(self, char, next_node):
self.char = char
self.next = next_node
def derive(self, char):
if char == self.char:
return self.next
return NeverMatches()
def __repr__(self):
return "CharNode({})".format(self.char)
class AlternationNode(RegexNode):
def __init__(self, alternatives):
self.alternatives = alternatives
def derive(self, char):
return AlternationFactory(
list(map(lambda c: c.derive(char), self.alternatives))
)
def matchEnd(self):
if [altern for altern in self.alternatives if altern.matchEnd()]:
return True
return False
def canMatchMore(self):
if [altern for altern in self.alternatives if altern.canMatchMore()]:
return True
return False
def __repr__(self):
return "Alternode({})".format(self.alternatives)
def AlternationFactory(alternatives):
_alternatives = list(
filter(lambda x: not isinstance(x, NeverMatches), alternatives)
)
if not _alternatives:
return NeverMatches()
elif len(_alternatives) == 1:
return _alternatives[0]
return AlternationNode(_alternatives)
class AnyCharacterNode(RegexNode):
def __init__(self, next_node):
self.next = next_node
def derive(self, _):
return self.next
def __repr__(self):
return "AnyNode"
class RepetitionNode(RegexNode):
def __init__(self, next_node):
self.head = NeverMatches()
self.next = next_node
def derive(self, char):
return AlternationFactory([self.head.derive(char), self.next.derive(char)])
def matchEnd(self):
return self.next.matchEnd()
def canMatchMore(self):
return True
def __repr__(self):
return "RepNode(head: {}, next: {})".format(self.head, self.next)
|
#! /usr/bin/python
# coding=utf-8
"""
1.进入公司详情
2.进入股东信息
3.查看股东信息所有公司
4.匹配人详情并打开新的tab
5.查验他的所有公司是否一致
"""
import time
from selenium import webdriver
from tools.color_out import UseStyle
driver = webdriver.Chrome()
# driver.maximize_window()
driver.set_window_size(1920, 1080)
# driver.implicitly_wait(6)
login_url = 'http://sss.tianyancha.com/login'
user_phone = '13811567526'
user_pwd = 'ls123456'
# user_edit_element = ".//input[@class='_input input_nor contactphone']"
user_edit_element = ".//*[@id='web-content']/div/div/div/div[2]/div/div[2]/div[2]/div[2]/div[2]/input"
# pwd_edit_element = "//div/input[@class='_input input_nor contactword']"
pwd_edit_element = ".//*[@id='web-content']/div/div/div/div[2]/div/div[2]/div[2]/div[2]/div[3]/input"
# login_button_element = ".//div[@class='c-white b-c9 pt8 f18 text-center login_btn']"
login_button_element = ".//*[@id='web-content']/div/div/div/div[2]/div/div[2]/div[2]/div[2]/div[5]"
driver.get(login_url)
time.sleep(2)
driver.find_element_by_xpath(user_edit_element).clear()
# driver.find_element_by_xpath(user_edit_element).click()
driver.find_element_by_xpath(user_edit_element).send_keys(user_phone)
# driver.find_element_by_xpath(pwd_edit_element).clear()
driver.find_element_by_xpath(pwd_edit_element).send_keys(user_pwd)
driver.find_element_by_xpath(login_button_element).click()
time.sleep(2)
group_id_file = open("/Users/lishuang/Work/PythonProject/Selenium_Python/shareholder_number_check/data_test/graph.txt")
while 1:
lines = group_id_file.readlines(100000)
if not lines:
break
for line in lines:
# print line
print UseStyle('sss.tianyancha.com/company/%s?nav=nav-main-holderCount' % line, fore='cyan')
# print 'sss.tianyancha.com/company/%s?nav=nav-main-holderCount' % line
# url = raw_input("请输入测试地址:")
url = 'sss.tianyancha.com/company/%s?nav=nav-main-holderCount' % line
driver.get("http://" + url.rstrip())
time.sleep(2)
# 获取table
shareholderTable = driver.find_element_by_xpath(".//*[@id='_container_holder']/div/table")
# table的总行数,包含标题
shareholder_table_rows = shareholderTable.find_elements_by_tag_name('tr')
# 去掉首行
print "股东信息表格-行数:", len(shareholder_table_rows) - 1
time.sleep(2)
for x in range(len(shareholder_table_rows)):
if x >= 1:
shareholder_number_element = ".//*[@id='_container_holder']/div/table/tbody/tr[" + str(
x) + "]/td[1]/div/a"
shareholder_value_element = ".//*[@id='_container_holder']/div/table/tbody/tr[" + str(x) + "]/td[1]/a"
shareholderNumber = driver.find_element_by_xpath(shareholder_number_element).text
shareholderValue = driver.find_element_by_xpath(shareholder_value_element).text
# print '\n股东:%s,%s' % (shareholderValue.encode("utf-8"), shareholderNumber.encode("utf-8"))
print UseStyle('\n股东:%s,%s' % (shareholderValue.encode("utf-8"), shareholderNumber.encode("utf-8")), fore='red')
handle_1 = driver.current_window_handle
time.sleep(3)
driver.find_element_by_xpath(shareholder_number_element).click()
handles = driver.window_handles
for handle in handles:
# if handle != handles[0]:
if handle != handle_1:
time.sleep(1)
print 'switch to ', handle
driver.switch_to.window(handle)
# title方法可以获取当前页面的标题显示的字段
print 'title:%s ' % driver.title
time.sleep(5)
if driver.title.encode("utf-8").__contains__(shareholderValue.encode("utf-8")):
if '有限合伙' in shareholderValue.encode("utf-8"):
print '不是老板,是有限合伙'
elif '公司' in shareholderValue.encode("utf-8"):
print '不是老板,是公司'
elif '股' in shareholderValue.encode("utf-8"):
print '不是老板,是股'
else:
driver.find_element_by_xpath(
".//*[@id='web-content']/div/div/div[2]/div/div[1]/div[2]/div[2]/div[1]/div/span[2]").click()
time.sleep(1)
# 获取table
human_table = driver.find_element_by_xpath(
"//div[@class='p20 human-table collapse humanTab humanTab2 in']/table")
# table的总行数,包含标题
human_table_rows = human_table.find_elements_by_tag_name('tr')
# print "他的所有公司表格-行数:", len(human_table_rows) - 1
print UseStyle("他的所有公司表格-行数:%s" % str(len(human_table_rows) - 1), fore='blue')
# 输出具体公司信息
# for y in range(len(human_table_rows)):
# if y >= 1:
# print str(y)
# company_value_element = ".//tr[" + str(y) + "]/td[1]/a[@class='c9 hover_underline point']"
# company_value = driver.find_element_by_xpath(company_value_element).text
# print '公司:%s' % company_value.encode("utf-8")
driver.close()
# driver.switch_to.window(handles[0])
driver.switch_to.window(handle_1)
time.sleep(1)
driver.quit()
|
# def get_box_area(width, length, height):
# box_area = width * length * height
# print(box_area)
#
# get_box_area(4, 4, 2)
# get_box_area(width=1, length=1, height=2)
def get_box_area(width, length, height):
if width < 0 or length < 0 or height < 0:
return 0
box_area = width * length * height
return box_area
box1 = get_box_area(4, -4, 2)
box2 = get_box_area(width=1, length=1, height=2)
print(box1, box2) |
import numpy as np
def np2flatstr( X, fmt='% .6f' ):
return ' '.join( [fmt % x for x in X.flatten() ] )
class GMMPrior(object):
#meanP = dict()
#covarP = dict()
def __init__(self, degFree, invW, muPrec, muMean=0.0):
#self.meanP['prec'] = muPrec
#self.meanP['mean'] = 0
#self.covarP['degFree'] = degFree
#self.covarP['W'] = W
self.beta = muPrec
self.invW = invW
self.dF = degFree
self.D = invW.shape[0]
muMean =np.asarray( muMean)
if muMean.size == self.D:
self.m = muMean
elif muMean.size ==1:
self.m = np.tile( muMean, (self.D) )
def __str__(self):
return '%s %s %s %s' % (np2flatstr( self.beta ), np2flatstr(self.m), np2flatstr(self.dF), np2flatstr(self.invW) )
#print 'Prior on mu: Normal with \n mean %s\n covar %s' % (self.meanP['mean'], self.meanP['prec'] )
#print 'Prior on prec. matrix: Wishart with\n %d deg freedom\n mean = %s' % (self.covarP['degFree'], self.covarP['W'] )
def getMean( self ):
mu = self.m
cov = self.invW / ( self.dF - self.D - 1 )
return mu,cov
def getMAP( self ):
assert self.dF > self.D+1
muMAP = self.m
covMAP = self.invW / (self.dF + self.D + 1 )
return muMAP, covMAP
def getPosteriorParams( self, N, mean, covar ):
beta = self.beta+N
m = ( self.beta*self.m + N*mean ) / beta
mdiff = mean - self.m
invW = self.invW + N*covar \
+ (self.beta*N)/(self.beta+N)*np.outer(mdiff,mdiff)
return GMMPrior( self.dF+N, invW, beta, m )
|
def getMonth(month):
if month == 1:
return "Nisan"
elif month == 2:
return "Iyyar"
elif month == 3:
return "Sivan"
elif month == 4:
return "Tammuz"
elif month == 5:
return "Av"
elif month == 6:
return "Elul"
elif month == 7:
return "Tishri"
elif month == 8:
return "Heshvan"
elif month == 9:
return "Kislev"
elif month == 10:
return "Teveth"
elif month == 11:
return "Shevat"
elif month == 12:
if date_utils.calendar_util.hebrew_leap(year):
return "Adar I"
else:
return "Adar"
elif month == 13:
return "Adar II"
def getWeekday(julian):
weekday = (int(julian) + 2) % 7
return weekday
def getLastDayOfGregorianMonth(month, year):
if month == 2 and date_utils.calendar_util.leap_gregorian(year):
return 29
else:
month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
return month_days[month-1]
|
print("Esse arquivo é o primeiro teste para clonar um repositório diretamente do VSCode")
print("Depois de muitas tentativas pelo gitBash") |
# Python program to convert a real value
# to IEEE 754 Floating Point Representation.
# Function to convert a
# fraction to binary form.
def binaryOfFraction(fraction):
# Declaring an empty string
# to store binary bits.
binary = str()
# Iterating through
# fraction until it
# becomes Zero.
while (fraction):
# Multiplying fraction by 2.
fraction *= 2
# Storing Integer Part of
# Fraction in int_part.
if (fraction >= 1):
int_part = 1
fraction -= 1
else:
int_part = 0
# Adding int_part to binary
# after every iteration.
binary += str(int_part)
# Returning the binary string.
return binary
# Function to get sign bit,
# exp bits and mantissa bits,
# from given real no.
def floatingPoint(real_no):
# Setting Sign bit
# default to zero.
sign_bit = 0
# Sign bit will set to
# 1 for negative no.
if(real_no < 0):
sign_bit = 1
# converting given no. to
# absolute value as we have
# already set the sign bit.
real_no = abs(real_no)
# Converting Integer Part
# of Real no to Binary
int_str = bin(int(real_no))[2 : ]
# Function call to convert
# Fraction part of real no
# to Binary.
fraction_str = binaryOfFraction(real_no - int(real_no))
# Getting the index where
# Bit was high for the first
# Time in binary repres
# of Integer part of real no.
ind = int_str.index('1')
# The Exponent is the no.
# By which we have right
# Shifted the decimal and
# it is given below.
# Also converting it to bias
# exp by adding 127.
exp_str = bin((len(int_str) - ind - 1) + 127)[2 : ]
# getting mantissa string
# By adding int_str and fraction_str.
# the zeroes in MSB of int_str
# have no significance so they
# are ignored by slicing.
mant_str = int_str[ind + 1 : ] + fraction_str
# Adding Zeroes in LSB of
# mantissa string so as to make
# it's length of 23 bits.
mant_str = mant_str + ('0' * (23 - len(mant_str)))
# Returning the sign, Exp
# and Mantissa Bit strings.
return sign_bit, exp_str, mant_str
# Driver Code
if __name__ == "__main__":
# Function call to get
# Sign, Exponent and
# Mantissa Bit Strings.
sign_bit, exp_str, mant_str = floatingPoint(-2.250000)
# Final Floating point Representation.
ieee_32 = str(sign_bit) + '|' + exp_str + '|' + mant_str
# Printing the ieee 32 representation.
print("IEEE 754 representation of -2.250000 is :")
print(ieee_32)
|
import pygame.ftfont
import time
class Course:
def __init__(self,screen,stats):
self.screen=screen
self.stats=stats
self.screen_rect=screen.get_rect()
self.rect=pygame.Rect(550,350,200,100)
self.show_flag=False
self.show_time=1
def show_course(self):
if self.show_time<=7 and self.show_flag==True:
self.course_image = pygame.image.load('images/course_' + str(self.show_time) + '.png')
self.screen.blit(self.course_image, (321, 483))
time.sleep(0.25)
self.show_time+=1
if self.show_time==8:
self.stats.game_active=True
self.show_time=1
self.show_flag=False
|
from rest_framework import serializers
from admission.serializers import UserSerializer
from .models import Payments,Studentpayments,Accountant
from student.models import Student
class AccountantSerializer(serializers.Serializer):
user= UserSerializer()
esp_id= serializers.SlugField()
class PaymentsSerializer(serializers.Serializer):
payment_type=serializers.IntegerField()
paid_method=serializers.IntegerField()
paid_by=UserSerializer()
paid_for=serializers.IntegerField()
paid_to=serializers.PrimaryKeyRelatedField(queryset=Accountant.objects.all())
paid_amount=serializers.IntegerField()
date_of_transaction=serializers.DateField()
short_description=serializers.CharField()
cheque_no=serializers.CharField(required=False)
class StudentpaymentsSerializer(serializers.Serializer):
student=serializers.PrimaryKeyRelatedField(queryset=Student.objects.all())
payment=serializers.PrimaryKeyRelatedField(queryset=Payments.objects.all())
class FeesDueSerializer(serializers.Serializer):
student=serializers.PrimaryKeyRelatedField(queryset=Student.objects.all())
fee_type=serializers.IntegerField()
ac_start_date=serializers.DateField()
rate=serializers.IntegerField()
date=serializers.DateField() |
from itertools import islice
from ..providers import shutterstock, local
from .. import celery_app
providers = [
{'module': shutterstock, 'weight': 0.9},
{'module': local, 'weight': 1.0}
]
@celery_app.task
def search(concept):
weight = float(concept['relevance'])
images = []
for provider in providers:
images.append([(provider['weight'] * weight, i)
for i in provider['module'].fetch(concept)])
# weight by positions
images = [(w * (1-n/len(l)), i) for l in images for n,(w,i) in enumerate(l)]
# sort by weight
images = reversed(sorted(images, key=lambda i:i[0]))
# keep top 10 images
return list(islice(images, 10))
|
# -*- coding: utf-8 -*-
import datetime
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from django.db.models.aggregates import Avg
class Sensor(models.Model):
name = models.CharField("nom du capteur", max_length=200)
type = models.CharField("type du capteur", max_length=20, choices=((u'temp', u'Température'),
(u'heating', u'Chauffage'),
(u'lux', u'Luminosité'),
(u'lamp', u'Lampe'),
(u'window', u'Fenêtre')), default='temp')
description = models.CharField("description", max_length=200, blank=True,
help_text="Cette description sera utilisée dans le reste de l'interface de GreenHub pour " \
"vous permettre d'identifier précisément ce capteur par rapport à un autre.")
hardware_id = models.CharField("identifiant matériel", max_length=20, blank=True,
help_text="Cet identifiant est indiqué sur une étiquette au dos du capteur, ou sur l'emballage " \
"qui le contient ; il est formé de huit caractères hexadécimaux.")
user = models.ForeignKey(User)
def last_state(self):
try:
return self.state_set.order_by("captured_at").reverse()[0]
except IndexError:
return None
def last_hour_state(self):
return self.state_set.filter(captured_at__gte = datetime.datetime.now() - datetime.timedelta(hours=1)).aggregate(Avg('value'))['value__avg']
@property
def last_day_states(self):
return self.state_set.filter(captured_at__gte = datetime.datetime.now() - datetime.timedelta(days=1)).all()
def __unicode__(self):
return u"%s - %s" % (self.name, self.description)
class State(models.Model):
captured_at = models.DateTimeField('Date de capture', default=datetime.datetime.now)
value = models.FloatField('Valeur')
sensor = models.ForeignKey(Sensor)
def __unicode__(self):
return u"%s - %s (%s)" % (self.captured_at, self.value, self.sensor.name)
class Score(models.Model):
calculated_at = models.DateTimeField('Date', default=datetime.datetime.now)
value = models.FloatField('Score')
user = models.ForeignKey(User)
def __unicode__(self):
"""
Retourne une représentation textuelle du score.
"""
return "%s" % self.value
def __repr__(self):
"""
Retourne une représentation de debug du score.
"""
return "<Score: user=%s value=%s calculated_at=%s>" % (self.user, self.value, self.calculated_at)
class Message(models.Model):
user = models.ForeignKey(User)
code = models.CharField('Code', max_length=30)
data = models.CharField('Données', max_length=1000, blank=True)
emitted_at = models.DateTimeField('Date de création', auto_now_add=True)
def __unicode__(self):
return u"%s : %s" % (self.user, self.code)
|
# Generated by Django 3.1 on 2020-10-30 21:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_remove_useraccount_user_ptr'),
('user_account', '0001_initial'),
('admin', '0003_logentry_add_action_flag_choices'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profiles', to='user_account.useraccount'),
),
migrations.DeleteModel(
name='UserAccount',
),
]
|
from django.db import models
from django.utils.timezone import now
# Create your models here.
class Upload(models.Model):
caption = models.CharField(default="File", max_length=200)
date_added = models.DateField(default=now())
file = models.FileField(upload_to= "uploads/%d-%m-%y", default=None)
def __str__(self):
return self.caption
|
def solution(A):
# write your code in Python 2.7
left = A[0]
right = 0
for i in range(1,len(A)):
right += A[i]
min_diff = abs(left-right)
for i in range(1,len(A)-1):
left += A[i]
right -= A[i]
diff = abs(left - right)
if diff < min_diff:
min_diff = diff
return min_diff
print(solution([3,1,2,4,3])) |
from django.db import models
from uuid import uuid4
# id = Default calls a function to randomly generate a unique identifier.
#auto_now_add only sets on create, while auto_now will set on both create and update.
class Note(models.Model):
id = models.UUIDField(primary_key=True, default=uuid4, editable=False)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
#You might notice that the new fields aren’t showing up in the admin interface. This is because when you use auto_now, the field gets set to read-only, and such fields aren’t shown in the panel.
#To get the read-only fields to show up in the interface add this into notes/admin.py:
# class NoteAdmin(admin.ModelAdmin):
# readonly_fields=('created_at', 'last_modified')
# admin.site.register(Note, NoteAdmin) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from logger.logger import app_logger
from functools import wraps
def recordLog(func):
def wapper(*args, **kwargs):
app_logger.info("[%s started][param:%s]"%(wraps.func_name, args))
return func(*args, **kwargs)
return wapper
|
#! /usr/bin/env python
# -*- coding: utf8 -*-
__author__ = "Anita Annamalé"
__version__ = "1.0"
__copyright__ = "copyleft"
__date__ = "2016/05"
#-------------------------- MODULES IMPORTATION -------------------------------#
import sys
import os
#-------------------------- FUNCTIONS DEFINITION ------------------------------#
def parse_cltr(filename):
"""
Function that parse a cluster information file from CDHIT-est
Args :
filename [STR] = cluster informations file name
Returns:
annot [DICT] = contains gene as key and his cluster name as value
clstr_vs_ref [DICT] = contains cluster name as key and cluster
representative gene as value
"""
# initialize
annot = {}
clstr_vs_ref = {}
with open(filename, "rt") as infile:
for line in infile:
if line.startswith('>Cluster '):
clst_nb = line.rstrip()
clst_nb = clst_nb.replace(" ", "") # number of the cluster
else :
genes_info = line.split(" ")
# annotate each gene to a cluster
annot[genes_info[1][:-3]] = clst_nb[1:]
# if gene is the representative gene of the cluster
if genes_info[2] == '*\n':
# annotate each cluster to his reference sequence
clstr_vs_ref[clst_nb[1:]] = genes_info[1][1:-3]
return annot, clstr_vs_ref
def create_multifasta(fasta_file, annot, clstr_vs_ref, outdir):
"""
Function that create multifasta file in th provided directory (outdir).
Multifasta files are named by the clusters representatives name, and
contains all the genes that are similar to the representatives.
Args:
fasta_file [STR] = fasta file of the un clusterized database
annot [DICT] = contains gene as key and his cluster name as value
clstr_vs_ref [DICT] = contains cluster name as key and cluster
representative gene as value
outdir [STR] = output directory name
No return
"""
# initialize
fasta_data = {}
# open the database fasta file and get all information
with open(fasta_file, "rt") as multifasta:
for line in multifasta:
if line.startswith('>'):
name = line.rstrip() # get gene name
clstr_name = annot[name] # get gene cluster name
ref_name = clstr_vs_ref[clstr_name] # get cluster
# representative gene name
# append to the fasta dictionnary
fasta_data.setdefault(ref_name, []).append(line)
# write all the multifasta files
for element in fasta_data: # for representative
with open('{0}/{1}.fa'.format(outdir,element), "wt") as output:
output.write("".join(fasta_data[element])) # write sequences
#--------------------------------- MAIN ---------------------------------------#
if __name__ == '__main__' :
# command-line parsing
if not len(sys.argv) == 4:
msg = "usage: python cluster2multifasta.py <file.clstr> <file.fasta> \
<fasta_dir>"
print msg
exit(1)
# get absolute path
clstr_name = os.path.abspath(sys.argv[1])
fasta_name = os.path.abspath(sys.argv[2])
dir_name = os.path.abspath(sys.argv[3])
# parse the cluster information file from CDHIT
annotate_gene, get_ref = parse_cltr(clstr_name)
# create a repertory in the working directory
os.mkdir(dir_name)
# create multifasta files
create_multifasta(fasta_name, annotate_gene, get_ref, dir_name)
|
import os
import xbmcplugin
import xbmcgui
import xbmcaddon
import xbmcvfs
#import StorageServer
import sys
import xbmc
import urllib2
import time
try:
import json
except:
import simplejson as json
from addon import *
from zipfile import ZipFile
import sqlite3
class MormonChannel(Plugin):
LANGUAGES = {'0':1,'1':2}
def __init__(self,plugin):
self.home = plugin.home
self.mdb = xbmc.translatePath(os.path.join(self.home, 'resources','mdb.sqlite'))
self.config_url = "http://broadcast3.lds.org/crowdsource/Mobile/MormonChannel/production/v1/1/"
self.mdb_url = None
self.icon = plugin.mcicon
self.fanart = plugin.mcfanart
self.__settings__ = plugin.__settings__
self.language_id = MormonChannel.LANGUAGES[self.__settings__.getSetting('mc_language')]
self.conn = sqlite3.connect(self.mdb)
self.c = self.conn.cursor()
# This is to map a xbmc type to the different types of media in the sqlite database according to the ID from the item_type table
#self.mtypes = ['video','music','video','music','video','video','pictures','video','video']
#since you can't have Plot with music, we'll call everything video
self.mtypes = ['video','video','video','video','video','video','pictures','video','viceo']
def get_db(self):
js = json.loads(make_request(self.config_url + 'config.json'))
version = str(js['catalog_version'])
self.mdb_url = self.config_url + version + '.sqlite.zip'
mdbzip = make_request(self.mdb_url)
tmppath = os.path.join(xbmc.translatePath("special://temp"),"mdb.zip")
with open(tmppath,'wb') as f:
f.write(mdbzip)
z = ZipFile(tmppath,'r')
mdb = z.open('mdb.sqlite','r')
with open(self.mdb, 'wb') as f:
f.write(mdb.read())
mdb.close()
for i in range(0,5):
try:
os.remove(tmppath)
break
except:
time.sleep(.5)
else:
print "ERROR: Coudn't remove temp mdb. %s" % sys.exc_info()[1]
self.conn = sqlite3.connect(self.mdb)
self.c = self.conn.cursor()
def get_main_menu(self):
self.get_db()
self.add_dir(self.icon,{'Title':'Featured'},{'name':"Featured",'feature':True,'mode':14},self.fanart)
for root_row in self.c.execute("SELECT collection_id FROM root WHERE language_id = %d ORDER BY sort ASC" % self.language_id).fetchall():
collection_id = root_row[0]
for col_row in self.c.execute("SELECT title,item FROM collection WHERE gid = %s AND language_id = %d" % (collection_id,self.language_id)).fetchall():
title = col_row[0]
self.add_dir(self.icon,{'Title':title.encode('utf8')},{'name':title.encode('utf8'),'collection_id':collection_id,'has_item':col_row[1],'mode':14},self.fanart)
def get_media_type(self,collection_id):
mtype = "video"
for row in self.c.execute("SELECT item_id FROM item_collection_map WHERE collection_id = %s ORDER BY sort ASC" % collection_id):
for item_row in self.c.execute("SELECT type FROM item WHERE gid = %s" % row[0]):
mtype = self.mtypes[item_row[0]]
return mtype
def get_subcollections(self,params):
for row in self.c.execute("SELECT child_collection FROM collection_map WHERE parent_collection = %s ORDER BY sort ASC" % params['collection_id']).fetchall():
collection_id = row[0]
for col_row in self.c.execute("SELECT title,subtitle,description,image_id,item FROM collection WHERE gid = %s AND language_id = %d" % (collection_id,self.language_id)).fetchall():
title = col_row[0].encode('utf8')
subtitle = col_row[1].encode('utf8')
description = col_row[2].encode('utf8')
image_id = col_row[3]
has_item = col_row[4]
(thumb_url,fanart_url) = self.get_images(image_id)
mtype = self.get_media_type(collection_id)
self.add_dir(thumb_url,{'Title':col_row[0],'Plot':description},{'name':title,'collection_id':collection_id,'has_item':has_item,'mode':14},fanart_url,mtype)
def get_images(self,image_id):
img_list = []
for img_row in self.c.execute("SELECT width,url FROM image WHERE image_id = %s" % image_id).fetchall():
img_list.append((img_row[0],img_row[1]))
img_list.sort(key=lambda tup: tup[0])
try: thumb_url = [url for width,url in img_list if width < 400][-1]
except: thumb_url = None
#try: fanart_url = [url for width,url in img_list if width > 1000][-1]
try: fanart_url = [url for width,url in img_list][-1]
except: fanart_url = None
return (thumb_url,fanart_url)
def get_radio_meta(self,meta_url):
xml = make_request(meta_url)
info = {}
try: info['Artist'] = [xml.split('<artist>')[1].split('</artist>')[0].encode('utf8')]
except: info['Artist'] = [""]
try: info['Title'] = xml.split('<title>')[1].split('</title>')[0].encode('utf8')
except: info['Title'] = ""
try:
info['Plot'] = xml.split('<comment>')[1].split('</comment>')[0].encode('utf8')
info['Album'] = info['Plot']
info['TVShowTitle'] == info['Plot']
except: pass
try: info['Duration'] = xml.split('<length>')[1].split('</length>')[0].encode('utf8')
except: pass
return info
def get_items(self,item_id,collection):
for item_row in self.c.execute("SELECT type,title,subtitle,description,author,url,brightcove_id,\
share_url,alternate_url,live_stream_meta_url,image_id,duration,downloadable \
FROM item WHERE gid = %s AND language_id = %d" % (item_id,self.language_id)).fetchall():
mtype = item_row[0]
title = item_row[1].encode('utf8')
subtitle = item_row[2].encode('utf8')
description = item_row[3].encode('utf8')
author = item_row[4].encode('utf8')
# Determine which url is active
urls = [item_row[5],item_row[6],item_row[7],item_row[8]]
url = None
for u in urls:
if u == None or u == '': continue
#print "%s -- %s" % (title,u.encode('utf8'))
try:
# for some reason the URLs that also have a duration of NULL, do not exist
if not item_row[11]:
res = urllib2.urlopen(u)
res.close()
url = u
break
except:
print "Couldn't open url %s. Trying another" % u
if url and "www.youtube.com" in url:
url = self.get_youtube_link(url) # get youtube plugin URL
meta_url = item_row[9]
image_id = item_row[10]
duration = str(float(float(item_row[11])/60.0)) if item_row[11] else 0
downloadable = item_row[12]
(thumb_url,fanart_url) = self.get_images(image_id)
#if mtype == 1: # Audio
# self.add_link(thumb_url,{'Title':title,'Album':collection,'Artist':[author],'Duration':duration},{'name':title,'url':url,'mode':5},fanart_url,self.mtypes[mtype])
if mtype == 2 or mtype == 1: # Video
self.add_link(thumb_url,{'Title':title,'Plot':description,'TVShowTitle':collection,'Artist':[author],'Duration':duration},{'name':title,'url':url,'mode':5},fanart_url)
if mtype == 3 or mtype == 4: # Audio Stream and Video Stream
info = self.get_radio_meta(meta_url)
self.add_link(thumb_url,{},{'name':title + ' - ' + info['Title'] + ' - ' + info['Artist'][0],'url':url,'mode':5},fanart_url,self.mtypes[mtype])
if mtype == 6: # Image
# Take this opportunity to remove old images from the temp folder
tempdir = xbmc.translatePath('special://temp')
for filename in os.listdir(tempdir):
if filename[:7] == "tmpimg-":
try:
shutil.rmtree(os.path.join(tempdir,filename))
except:
print "Couldn't delete folder %s from the temp folder" % os.path.join(tempdir,filename)
if not url and fanart_url:
url = fanart_url
self.add_link(thumb_url,{'Title':title,'Caption':description,'Category':collection,'Author':author},{'name':title,'url':url,'mode':16},fanart_url,self.mtypes[mtype])
def get_items_from_collection(self,params):
for row in self.c.execute("SELECT item_id FROM item_collection_map WHERE collection_id = %s ORDER BY sort ASC" % params['collection_id']).fetchall():
item_id = row[0]
collection = None
for col_row in self.c.execute("SELECT title FROM collection WHERE gid = %s" % params['collection_id']):
collection = col_row[0]
self.get_items(item_id,collection)
def get_featured(self,params):
for row in self.c.execute("SELECT collection_id,item_id FROM feature WHERE language_id = %d ORDER BY sort ASC" % self.language_id).fetchall():
item_id = row[1]
collection = None
for col_row in self.c.execute("SELECT title FROM collection WHERE gid = %s" % row[0]):
collection = col_row[0]
self.get_items(item_id,collection)
def broker(self,params):
#print params
try: collection_id = params['collection_id']
except: collection_id = None
try: has_item = int(params['has_item'])
except: has_item = None
try: feature = params['feature']
except: feature = None
if collection_id == None and has_item == None and feature == None:
self.get_main_menu()
elif feature:
self.get_featured(params)
elif not has_item:
self.get_subcollections(params)
else:
self.get_items_from_collection(params)
|
import pytest
from retention import models,utils
testobj = models.ShiftedBetaGeom()
data_junk_vals = [
['one','two',3,4],[8,4,3,-1], [0,0,0,0],[]
]
@pytest.mark.parametrize("a",data_junk_vals)
def test_data_loading_bad_data(a):
with pytest.raises(ValueError):
testobj.load_training_data(a)
param_values = [
(1,40,20),
(20,40,20),
(3,2,3),
(3,0.1293,3),
(4,3,0.2038),
(0,0,0),
(4,102938504309348,0),
(3,3,1029833)
]
@pytest.mark.parametrize("t,a,b",param_values)
def test_get_churn_prob_t(t,a,b):
p = testobj.get_churn_prob_t(t,a,b)
assert (p >= 0.0) & (p<=1.0) #probability can' tbe outside of [0,1]
with pytest.raises(AssertionError):
testobj.get_churn_prob_t(-1,10,12)
testobj.get_churn_prob_t(1000,2,3)
good_training_data = [
[100,98,94,93,91,90],
[100,98,95,92,91],
[1,1,1,1,1],
[100,50,50,50,50]
]
bad_training_data = [
[1000,494,201,100],
[100,101,98,94,93,91,90],
[-100,-98,-97],
[0,0,0,0,0,0]
]
@pytest.mark.parametrize("x",good_training_data)
def test_predict_happy(x):
testobj.train(x)
testobj.predict()
@pytest.mark.parametrize("x",bad_training_data)
def test_predict_fail(x):
with pytest.raises((ValueError,AssertionError)):
testobj.train(x)
testobj.predict(x)
|
#!/usr/bin/env python3
import csv
import json
import math
from scipy import stats
ANSWER_KEY = [
"The student sleeps like a Person",
"tweety = Bird()",
"robot.turnLeft()\nrobot.moveForward()\nrobot.moveForward()",
"awooo!",
"(none of these cause an error)",
]
OPINION_MAP = {
"Strongly Agree": 2,
"Agree": 1,
"Neutral": 0,
"Disagree": -1,
"Strongly Disagree": -2
}
if __name__ == "__main__":
pretest_answers = {}
posttest_answers = {}
with open("pretest.tsv") as pretest, open("posttest.tsv") as posttest:
pretest = csv.reader(pretest, delimiter="\t")
posttest = csv.reader(posttest, delimiter="\t")
# Discard the headers
next(pretest)
next(posttest)
for row in pretest:
user_id = row[0]
answers = json.loads(row[1])
answers[2] = answers[2]["code"] if isinstance(answers[2], dict) else answers[2]
pretest_answers[user_id] = answers
for row in posttest:
user_id = row[0]
answers = json.loads(row[1])
answers[2] = answers[2]["code"] if isinstance(answers[2], dict) else answers[2]
posttest_answers[user_id] = answers
pre_score_list = []
post_score_list = []
enjoyed = []
knew_oop_before = []
knew_oop_better = []
wrong_to_right = [0, 0, 0, 0, 0]
right_to_wrong = [0, 0, 0, 0, 0]
wrong_to_wrong = [0, 0, 0, 0, 0]
right_to_right = [0, 0, 0, 0, 0]
pretest_right = [0, 0, 0, 0, 0]
posttest_right = [0, 0, 0, 0, 0]
for user_id, post_answers in posttest_answers.items():
pre_answers = pretest_answers.get(user_id)
if not pre_answers:
print "\nNo pretest for " + user_id
continue
pre_score = sum(1 for response, truth in zip(pre_answers, ANSWER_KEY) if response == truth)
post_score = sum(1 for response, truth in zip(post_answers, ANSWER_KEY) if response == truth)
pre_score_list.append(pre_score)
post_score_list.append(post_score)
enjoyed.append(OPINION_MAP[post_answers[5]])
knew_oop_before.append(OPINION_MAP[post_answers[6]])
knew_oop_better.append(OPINION_MAP[post_answers[7]])
for i in range(len(pre_answers)):
ans = ANSWER_KEY[i]
pre = pre_answers[i]
post = post_answers[i]
if pre != ans and post != ans:
wrong_to_wrong[i] += 1
elif pre != ans and post == ans:
wrong_to_right[i] += 1
elif pre == ans and post != ans:
right_to_wrong[i] += 1
elif pre == ans and post == ans:
right_to_right[i] += 1
if pre_answers[i] == ANSWER_KEY[i]:
pretest_right[i] += 1
if post_answers[i] == ANSWER_KEY[i]:
posttest_right[i] += 1
print ""
print("User ID:", user_id)
print("Pretest score:", pre_score)
print("Posttest score:", post_score)
t_value, p_value = stats.ttest_rel(post_score_list, pre_score_list)
_, _, pre_mean, pre_sample_variance, _, _ = stats.describe(pre_score_list)
_, _, post_mean, post_sample_variance, _, _ = stats.describe(post_score_list)
print "\n==============================\n"
print "n = " + str(len(post_score_list)) + "\n"
print "Pretest sample mean: " + str(pre_mean)
print "Pretest sample SD: " + str(math.sqrt(pre_sample_variance))
print "Posttest sample mean: " + str(post_mean)
print "Posttest sample SD: " + str(math.sqrt(post_sample_variance))
print ""
print "'I enjoyed this game' average: " + str(stats.describe(enjoyed).mean)
print "'I knew OOP before playing' average: " + str(stats.describe(knew_oop_before).mean)
print "'I knew OOP better after playing' average: " + str(stats.describe(knew_oop_better).mean)
print ""
print "Paired t-test results"
print "t-value: " + str(t_value)
print "p-value: " + str(p_value)
# print ""
# print str(pretest_right) + "\tCorrect on pretest"
# print str(posttest_right) + "\tCorrect on posttest\n"
# print pre_score_list
# print post_score_list
print ""
print str(wrong_to_wrong) + "\t\tWrong, Wrong"
print str(wrong_to_right) + "\t\tWrong, Right"
print str(right_to_wrong) + "\t\tRight, Wrong"
print str(right_to_right) + "\tRight, Right"
|
from numpy import *
# Vetor contendo o nome dos meses do ano
vet_mes = array(['janeiro', 'fevereiro', 'marco', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro'])
data = input("digite dia mes ano: ")
dia = int(data[:2])
ano = int(data[4:9])
i = int(data[2:4]) -1
mes = vet_mes[i]
print(dia,"de",mes,"de",ano) |
from random import randrange, choice
from uuid import uuid4
from argparse import ArgumentParser
import sys
import numpy as np
def main(fname, size, dist, lam):
with open(fname, 'w') as fout:
#Each graph have the same amount of nodes
size = int(size) - 1
#Title of each test file
fout.write("parity " + str(size+1) + ";\n")
#Loop to create each node
for i in range(size+1):
#node_name
fout.write(str(i) + ' ')
#color
priority = randrange(1, 100)
fout.write(str(priority) + ' ')
#player
owner = randrange(0, 2)
fout.write(str(owner) + ' ')
#Neighbours
#Number of edges of the this node
if dist == "unif":
number_of_edges = randrange(1, size+1)
elif dist == "poisson":
number_of_edges = np.random.poisson(int(lam)) + 1
if number_of_edges > size:
number_of_edges = size
elif dist == "heavy":
number_of_edges = 10
#Defining connected nodes to node i
sequence = list(range(size+1))
#No self-loops allowed
sequence.remove(i)
for j in range(number_of_edges):
t = choice(sequence)
fout.write(str(t))
sequence.remove(t)
if j == number_of_edges - 1:
fout.write(" ")
else:
fout.write(",")
#Create a uuid for each node
uuid = uuid4()
fout.write("\"" + str(uuid) + "\";\n")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--fname", help="name of the file to be created", nargs=1, required=True)
parser.add_argument("--size", help="size of each graph. Default = 30", nargs=1, required=True)
parser.add_argument("--dist", help="unif/poisson/heavy -> probability distribution of the degree of each node", nargs=1, required=True)
parser.add_argument("--lam", help="value of lambda for poisson distribuitions", nargs=1, required=True)
try:
args = parser.parse_args()
except:
parser.print_help(sys.stderr)
exit(1)
main(args.fname[0], args.size[0], args.dist[0], args.lam[0])
|
import numpy as np
class Grid:
def __init__(self):
self.grid = np.array([[None,None,None,None,None],[None,None,None,None,None],[None,None,None,None,None],[None,None,None,None,None],[None,None,None,None,None]])
self.hor = np.ones(5*4).reshape(5,4)
self.ver = np.ones(4*5).reshape(4,5)
def printGrid(self):
spacer = 5
# print(self.hor)
# print(self.ver)
for r in self.grid:
for e in r:
print(str(e)+" "*(spacer-len(str(e))) +"\t",end="")
print()
def check(self,dr,dc):
if(self.row + dr > 4):
return False
if(self.row + dr < 0):
return False
if(self.col + dc > 4):
return False
if(self.col + dc < 0):
return False
# print(dr,dc)
if(dr == 0):
K = 0
if(dc == -1):
K = -1
return self.hor[self.row][self.col + K] != 0
if(dc == 0):
K = 0
if(dr == -1):
K = -1
# print(self.row + K,self.col)
return self.ver[self.row + K][self.col] != 0
return True
def ended_or_true(self):
if self.row == 4 and self.col == 4 :
# if int(self.grid[self.row][self.col]) <= 0:
# self.printGrid()
# print("------------------------------------")
# import sys
# sys.exit(0)
return None
return True
def left(self):
if not self.check(0,-1):
return False
self.hor[self.row][self.col-1] = 0
v = self.grid[self.row][self.col]
self.col -= 1
self.grid[self.row][self.col] = v - 2
return self.ended_or_true()
def right(self):
if not self.check(0,1):
return False
self.hor[self.row][self.col] = 0
v = self.grid[self.row][self.col]
self.col += 1
self.grid[self.row][self.col] = v + 2
return self.ended_or_true()
def down(self):
if not self.check(1,0):
return False
self.ver[self.row][self.col] = 0
v = self.grid[self.row][self.col]
self.row += 1
self.grid[self.row][self.col] = v * 2
return self.ended_or_true()
def up(self):
if not self.check(-1,0):
return False
self.ver[self.row-1][self.col] = 0
v = self.grid[self.row][self.col]
self.row -= 1
self.grid[self.row][self.col] = v / 2
return self.ended_or_true()
def init(self):
self.row = 0
self.col = 0
self.grid[self.row][self.col] = 0
self.right()
self.right()
return self
def copy(self):
f = Grid()
f.grid = np.copy(self.grid)
f.hor = np.copy(self.hor)
f.ver = np.copy(self.ver)
f.row = self.row
f.col = self.col
return f
def get_options(self):
d = []
if(self.check(0,1)):
d.append("right")
if(self.check(0,-1)):
d.append("left")
if(self.check(-1,0)):
d.append("up")
if(self.check(1,0)):
d.append("down")
return d
def exc(self, name):
d = {
"right":self.right,
"left":self.left,
"up":self.up,
"down":self.down
}
return d[name]()
def iterate(g,c=0):
global min_val, max_c, count
options = g.get_options()
# print(options)
if len(options) == 0:
# g.printGrid()
max_c = max(max_c,c)
count += 1
return
for fn_name in options:
k = g.copy()
if k is None:
print()
continue
ret_val = k.exc(fn_name)
# k.printGrid()
# print(fn_name," ==> ",ret_val)
if ret_val is None:
# STOP NEW ITERATIONS
min_val = min(min_val, k.grid[4][4])
if k.grid[4][4] <= 0:
k.printGrid()
print("------------------------------------",c)
max_c = max(max_c,c)
count += 1
continue
iterate(k,c+1)
max_c = 0
count = 0
min_val = 100000
g = Grid().init()
# print("down\t",g.down())
# print("right\t",g.right())
# print("up\t\t",g.up())
# # print("left\t",g.left())
# # print("left\t",g.left())
# print("down\t",g.down())
# print("right\t",g.right())
# print("left\t",g.left())
# print("down\t",g.down())
# print("right\t",g.right())
# print("left\t",g.left())
# print(g.hor)
# print(g.ver)
# g.printGrid()
# print("====================================")
iterate(g)
print(min_val)
print(max_c)
print(count)
# _ _ _ _
# |_|_|_|_|
# |_|_|_|_|
# |_|_|_|_|
# |_|_|_|_|
|
from enum import Enum
from selenium.webdriver.common.by import By
from pages.base_page import BasePage
class SignUpConstants(Enum):
SIGN_UP_BTN = (By.ID, "signUpButton")
class SignUpPage(BasePage):
def __init__(self, context):
BasePage.__init__(self, context.driver)
def is_initialize(self):
return self.is_exist(*SignUpConstants.SIGN_UP_BTN.value, seconds=5)
|
import webapp2
import os
import jinja2
from src.urls import pages
jinja_enviroment = jinja2.Environment(autoescape=True, loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))
class AppPage(webapp2.RequestHandler):
def get(self):
request_path = self.request.path if self.request.path.endswith('/') else self.request.path + '/'
if request_path in pages:
template = jinja_enviroment.get_template(pages[request_path]['template'])
self.response.write(template.render(pages[request_path]['data']))
app = webapp2.WSGIApplication([
('/.*', AppPage)
], debug=True)
|
from datetime import datetime, timedelta
from mongoengine.queryset import DoesNotExist, MultipleObjectsReturned
from scrapy.dupefilters import RFPDupeFilter
from immobilier.mongodb.models import RawPage
from immobilier.apps.scrapy_crawl.settings import RECRAWLING_DELAY
RECRAWLING_TIME_DELTA = timedelta(days=RECRAWLING_DELAY)
class MongoStorageFilter(RFPDupeFilter):
def request_seen(self, request):
rfp_seen = super(MongoStorageFilter, self).request_seen(request)
if rfp_seen:
return rfp_seen
else:
try:
RawPage.objects.filter(
fetch_at__lte=datetime.utcnow() - RECRAWLING_TIME_DELTA).get(url=request.url)
except DoesNotExist:
return False
except MultipleObjectsReturned:
return True
return True
|
#!/usr/bin/env
import paho.mqtt.client as mqtt
def on_connect(client, userdata, flags, rc):
client.subscribe("image")
client.subscribe("imagedata")
print("Local Connected with result code "+str(rc))
def on_connect_cloud(client, userdata, flags, rc):
print("Cloud connected with result code "+str(rc))
client.subscribe("image")
client.subscribe("imagedata")
def on_disconnect(client, userdata, rc):
if rc != 0:
print("Unexpected disconnection.")
def on_message(client, userdata, msg):
j=j+1
print("local "+msg.topic+" "+str(msg.payload))
#cloud.publish("imagedata", payload="/tmp/face-" + str(j) + ".jpg", qos=2, retain=False)
def on_message_cloud(client, userdata, msg):
print("cloud"+msg.topic+" "+str(msg.payload))
j=2
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.on_disconnect = on_disconnect
client.connect("mosquitto", 1883, 60)
print "connced??"
#cloud = mqtt.Client()
#cloud.on_connect = on_connect_cloud
#cloud.on_message = on_message_cloud
#cloud.on_disconnect = on_disconnect
#cloud.connect("52.117.25.20", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
|
# Generated by Django 3.1.4 on 2021-04-12 07:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='BloodValidator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request_id', models.CharField(max_length=5)),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('organisername', models.CharField(max_length=50)),
('location', models.CharField(max_length=50)),
('event_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Register',
fields=[
('register_id', models.OneToOneField(on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='auth.user')),
('name', models.CharField(max_length=50)),
('gender', models.CharField(choices=[('1', 'Male'), ('2', 'Female'), ('3', 'Others')], max_length=2)),
('address', models.CharField(max_length=50)),
('number', models.PositiveIntegerField(unique=True)),
('bloodGroup', models.CharField(choices=[('1', 'A+'), ('2', 'A-'), ('3', 'B+'), ('4', 'B-'), ('5', 'AB+'), ('6', 'AB-'), ('3', 'O+'), ('4', 'O-')], max_length=2)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('is_verified', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Blood',
fields=[
('request_id', models.OneToOneField(on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='WebApp.bloodvalidator')),
('patientName', models.CharField(max_length=50)),
('bloodGroup', models.CharField(choices=[('1', 'A+'), ('2', 'A-'), ('3', 'B+'), ('4', 'B-'), ('5', 'AB+'), ('6', 'AB-'), ('3', 'O+'), ('4', 'O-')], max_length=2)),
('gender', models.CharField(choices=[('1', 'Male'), ('2', 'Female'), ('3', 'Others')], max_length=2)),
('number', models.PositiveIntegerField(unique=True)),
('case', models.CharField(choices=[('1', 'Accident'), ('2', 'Delivery'), ('3', 'Anemia'), ('4', 'Dialysis'), ('5', 'Operation'), ('6', 'Urgent'), ('7', 'Others')], max_length=20)),
('hospital', models.CharField(choices=[('1', 'BPKIHS'), ('2', 'Vijaypur Hospital')], max_length=20)),
('requiredDate', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Donation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.BooleanField(default=False)),
('donor_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='WebApp.register')),
('request_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='WebApp.blood')),
],
),
]
|
#!/bin/python
from datetime import datetime
import sys
import time
import subprocess
import imp
# ======================================================================
# ======================== PROGRAM CONSTANTS ===========================
# ======================================================================
# max number of jobs running at once
max_running_jobs = 1
# list with running jobs
running_jobs = []
# stack with jobs in queue
queue_jobs = []
all_jobs = 0
done_jobs = 0
GLOBALTIME = 0
TIME = 0
doneOnTime = 0
numErrors = 0
output = ''
# status file
statusFile = '.status'
statusFILE = ''
# error file
errorFile = '.errors'
errorFILE = ''
# ======================================================================
# ======================== PROGRAM FUNCTIONS ===========================
# ======================================================================
def SetErrorFile():
global errorFILE,errorFile
errorFILE = open(errorFile,'w')
def LoadJobs(filename):
global queue_jobs
queue_jobs = open(filename,'r').read().split('\n')[:-1]
def InitWorkers():
global running_jobs,queue_jobs
for i in range(all_jobs if max_running_jobs > all_jobs else max_running_jobs):
running_jobs.append(subprocess.Popen(queue_jobs.pop(), stderr=errorFILE, shell = True))
def SetGlobalTime():
global GLOBALTIME
GLOBALTIME = time.time()
def ResetTime():
global TIME,doneOnTime
TIME = time.time()
doneOnTime = 0
BACK_TO_PREVLINE = "\033[F"
CLEAR_LINE = "\033[K"
thirdLine = 0
def ConvertToDHMS(time):
days = divmod(time, 86400)
hours = divmod(days[1], 3600)
minutes = divmod(hours[1], 60)
seconds = divmod(minutes[1], 1)
return '%d days, %d hours, %d minutes and %d seconds' % (days[0], hours[0], minutes[0], seconds[0])
def JobsDone():
return 'Jobs done: '+str(done_jobs)+' / '+str(all_jobs)+' (workers: '+str(len(running_jobs))+')\n'
def TotalComputationTime():
GLTIME = time.time() - GLOBALTIME
sys.stdout.write("\033[K")
return 'Total computation time: '+ConvertToDHMS(GLTIME)
def EstimatingTime():
return 'Estimating time ...'
def EstimatedTime():
deltaTIME = (time.time() - TIME) / doneOnTime
deltaTIME = deltaTIME*(all_jobs-done_jobs)
return 'Estimated time: '+datetime.fromtimestamp(time.time()+deltaTIME).strftime("%A, %B %d, %Y %H:%M:%S")
def OccuredErrors():
return '\nOccured errors: '+str(numErrors)+' (see log: '+errorFile+')'
def SaveStatus():
global statusFILE
statusFILE = open(statusFile,'w')
statusFILE.write(output)
statusFILE.close()
def ClearLines():
global thirdLine
if numErrors>0:
if thirdLine==0:
print
thirdLine += 1
print BACK_TO_PREVLINE,CLEAR_LINE,
print BACK_TO_PREVLINE,CLEAR_LINE,'\r',
def PrintDone():
global thirdLine,output
ClearLines()
output = ''
output += JobsDone()
if done_jobs ==0: output+= EstimatingTime()
if done_jobs == all_jobs: output+= TotalComputationTime()
if doneOnTime != 0 and done_jobs != all_jobs: output+= EstimatedTime()
if numErrors > 0: output+= OccuredErrors()
print output,
SaveStatus()
sys.stdout.flush()
def PopJobFromQueue(i):
global done_jobs,doneOnTime,running_jobs,queue_jobs
running_jobs[i] = subprocess.Popen(queue_jobs.pop(), stderr=errorFILE, shell = True)
done_jobs += 1
doneOnTime += 1
PrintDone()
def PopWorkerFromJobs(proc):
global max_running_jobs,running_jobs
ResetTime()
running_jobs.remove(proc)
max_running_jobs -= 1
def AppendWorkersToJobs(howMany):
global running_jobs,queue_jobs
for j in range(howMany):
running_jobs.append(subprocess.Popen(queue_jobs.pop(), stderr=errorFILE, shell = True))
config = ''
def importSettings(settingsFileName):
global config
config = imp.load_source('config', '.config.py')
def reloadLib():
global config
config = imp.load_source('config', '.config.py')
# TODO
# reload doesnt work, dont know why
#reload(config)
|
import glob
import pybel
import RASPA2
from pymongo import MongoClient
from datetime import datetime
# get cif
cif_list = glob.glob('*.cif')
for cif_file in cif_list:
print cif_file # python2
# print(cif_file) # python3
#
# Use pybel to parse, fill, and charge cif structure
mol = pybel.readfile("cif", cif_file).next()
mol.unitcell.FillUnitCell(mol.OBMol)
print mol;
#mol.calccharges("eqeq")
#
# Mongo setting
#client_1 = MongoClient()
#client_2 = MongoClient('localhost', 27017)
#client_3 = MongoClient('mongodb://localhost:27017/')
#
class Mongo_cif_DB(object):
def __init__(self):
self.clint = MongoClient()
self.db = self.clint['mof']
def add_one(self):
"""insert data"""
post = {
'title': cif_file,
'mol': mol,
'created_at': datetime.now()
}
return self.db.mof.insert_one(post)
def main():
obj = Mongo_cif_DB()
rest = obj.add_one()
print(rest)
if __name__ == '__main__':
main()
|
import card
import random
class player:
def __init__(self, playerId, gameId, playerName, playerHand, playerPos, cardPool, handSize):
self.playerId = playerId#integer, unique identifier for each player
self.gameId = gameId#integer, unique identifier indicating the game this player is part of
self.playerName = playerName#string, the name the player has selected
self.playerHand = playerHand(card)#array holding the cards the player has in hand
#playerHand variable will only be used for player on local machine, for all other players array will be empty
self.playerPos = playerPos#integer, player's position in the rotation
self.cardPool = cardPool(card)#array holding the cards the user can draw from, possibly temporary
self.handSize = handSize#maxmimum amount of cards the player can have in hand
def fillHand(self):#fills the player's hand with new cards
if len(self.playerHand) < self.handSize:
self.playerHand.append(self.cardPool.pop(random.randrange(0, len(self.cardpool))))
self.fillHand()
def fetchCards(self):#fetch the pool of cards this player will use
todo = True |
from __future__ import print_function
import sys
try:
input = raw_input
except NameError:
pass
if __name__ == '__main__':
num_cases = input()
for case_idx, starting_num in enumerate(iter(sys.stdin.readline, ''), 1):
starting_num = int(starting_num)
if starting_num == 0:
print("Case #{}: INSOMNIA".format(case_idx))
else:
current_num = 0
seen_digits = set()
while len(seen_digits) != 10:
current_num += starting_num
current_num_digits = set(str(current_num))
seen_digits.update(current_num_digits)
else:
print("Case #{}: {}".format(case_idx, current_num))
|
# infoHeaders = {
# 'Host': 'output.nsfc.gov.cn',
# 'Connection': 'keep-alive',
# 'Cache-Control': 'max-age=0',
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36',
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9'
# }
# from fake_useragent import UserAgent
# ua = UserAgent(verify_ssl=False)
# infoHeaders['User-Agent']=ua.random
# print(infoHeaders)
sq_code = ['A','B']
zz_lei = ['218', '220', '222', '339', '429', '432', '649', '579', '630''631', '632', '2699'] # 资助类别
#
# for sq in sq_code:
# for zz in zz_lei:
# for year in range(1981,2019):
# print(sq,zz,year)
print(len(zz_lei[0])) |
import datetime
import requests
import pprint
import pandas as pd
from settings import USER_ID, TOKEN
USER_ID = USER_ID # spotify username
TOKEN = TOKEN
# https://developer.spotify.com/console/get-recently-played/
def check_if_valid_data(df: pd.DataFrame):
# Check if dataframe is empty
if df.empty:
print("No songs donwloaded. Finishing execution")
return False
# Primary Key Check -> using play at as primary key -> cuz it's unique
if pd.Series(df['played_at']).is_unique:
pass # this is what we want.
else:
raise Exception("Primary Key Check is violated")
# Check for nulls
if df.isnull().values.any():
raise Exception("Null valued found")
return True
if __name__ == '__main__':
# we need to send some information in the header with our request according to the API instruction
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer {token}".format(token=TOKEN)
}
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
yesterday_unix_timestamp = int(yesterday.timestamp()) * 1000
r = requests.get(
"https://api.spotify.com/v1/me/player/recently-played?after={time}".format(time=yesterday_unix_timestamp),
headers=headers)
data = r.json()
# pprint.pprint(data)
song_names = []
artist_names = []
played_at_list = []
timestamps = []
for song in data["items"]:
song_names.append(song["track"]["name"])
artist_names.append(song["track"]["artists"][0]["name"])
played_at_list.append(song["played_at"])
timestamps.append(song["played_at"][0:10])
song_dict = {
"song_name": song_names,
"artist_name": artist_names,
"played_at": played_at_list,
"timestamp": timestamps
}
song_df = pd.DataFrame(song_dict, columns=["song_name", "artist_name", "played_at", "timestamp"])
print(song_df)
if check_if_valid_data(song_df):
print("Data valid, proceed to Load stage")
|
# -*- coding: utf-8 -*-
# x y 111000
# k
# 110111
x, y = map(int, input().split())
k = int(input())
if k <= y:
print(k + x)
else:
print(y + (x - (k - y))) |
#
# Binary operator classes
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import pybamm
import numbers
class BinaryOperator(pybamm.Symbol):
"""A node in the expression tree representing a binary operator (e.g. `+`, `*`)
Derived classes will specify the particular operator
**Extends**: :class:`Symbol`
Parameters
----------
name : str
name of the node
left : :class:`Symbol` or :class:`Number`
lhs child node (converted to :class:`Scalar` if Number)
right : :class:`Symbol` or :class:`Number`
rhs child node (converted to :class:`Scalar` if Number)
"""
def __init__(self, name, left, right):
assert isinstance(left, (pybamm.Symbol, numbers.Number)) and isinstance(
right, (pybamm.Symbol, numbers.Number)
), TypeError(
"""left and right must both be Symbols or Numbers
but they are {} and {}""".format(
type(left), type(right)
)
)
if isinstance(left, numbers.Number):
left = pybamm.Scalar(left)
if isinstance(right, numbers.Number):
right = pybamm.Scalar(right)
domain = self.get_children_domains(left.domain, right.domain)
super().__init__(name, children=[left, right], domain=domain)
def __str__(self):
""" See :meth:`pybamm.Symbol.__str__()`. """
return "{!s} {} {!s}".format(self.children[0], self.name, self.children[1])
def get_children_domains(self, ldomain, rdomain):
if ldomain == rdomain:
return ldomain
elif ldomain == []:
return rdomain
elif rdomain == []:
return ldomain
else:
raise pybamm.DomainError("""children must have same (or empty) domains""")
class Power(BinaryOperator):
"""A node in the expression tree representing a `**` power operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("**", left, right)
def evaluate(self, t=None, y=None):
""" See :meth:`pybamm.Symbol.evaluate()`. """
return self.children[0].evaluate(t, y) ** self.children[1].evaluate(t, y)
class Addition(BinaryOperator):
"""A node in the expression tree representing an addition operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("+", left, right)
def evaluate(self, t=None, y=None):
""" See :meth:`pybamm.Symbol.evaluate()`. """
return self.children[0].evaluate(t, y) + self.children[1].evaluate(t, y)
class Subtraction(BinaryOperator):
"""A node in the expression tree representing a subtraction operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("-", left, right)
def evaluate(self, t=None, y=None):
""" See :meth:`pybamm.Symbol.evaluate()`. """
return self.children[0].evaluate(t, y) - self.children[1].evaluate(t, y)
class Multiplication(BinaryOperator):
"""A node in the expression tree representing a multiplication operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("*", left, right)
def evaluate(self, t=None, y=None):
""" See :meth:`pybamm.Symbol.evaluate()`. """
return self.children[0].evaluate(t, y) * self.children[1].evaluate(t, y)
class MatrixMultiplication(BinaryOperator):
"""A node in the expression tree representing a matrix multiplication operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("*", left, right)
def evaluate(self, t=None, y=None):
""" See :meth:`pybamm.Symbol.evaluate()`. """
return self.children[0].evaluate(t, y) @ self.children[1].evaluate(t, y)
class Division(BinaryOperator):
"""A node in the expression tree representing a division operator
**Extends:** :class:`BinaryOperator`
"""
def __init__(self, left, right):
""" See :meth:`pybamm.BinaryOperator.__init__()`. """
super().__init__("/", left, right)
def evaluate(self, t=None, y=None):
""" See :meth:`pybamm.Symbol.evaluate()`. """
return self.children[0].evaluate(t, y) / self.children[1].evaluate(t, y)
|
import os, shutil
def movep(src, dst, overlay = True):
""" 移動文件
overlay: True / False, True為自動覆蓋 """
if not os.path.isdir(dst): raise TypeError("dst must be a directory.")
# 移動文件
if os.path.isfile(src):
dst_dir = os.path.join(dst, os.path.basename(src))
if os.path.exists(dst_dir):
if not overlay: return
os.remove(dst_dir)
os.rename(src, dst_dir)
return
# 移動文件夾
for folder in os.walk(src):
# 把目標路徑, 系統分隔符 和 src 文件夾的子路徑合成一層路徑
dst_dir = dst + os.sep + os.path.basename(src) + folder[0].split(src, 1)[-1]
# 當路徑已存在於目標文件夾, 刪除目標文件夾的文件, 再把新的文件移動
if os.path.exists(dst_dir):
for exs_file in folder[-1]:
abs_path = os.path.join(dst_dir, exs_file)
if os.path.exists(abs_path):
if not overlay: continue
os.remove(abs_path)
os.rename(os.path.join(folder[0], exs_file), os.path.join(dst_dir, exs_file))
elif not os.path.exists(dst_dir): shutil.move(folder[0], dst_dir)
# 刪除移動後的空文件夾
if os.path.exists(src) and overlay: shutil.rmtree(src) |
import tests.generate_fake_dataset as gen
import tests.initialize_db as initdb
import yaml
import testing.postgresql
import psycopg2
import psycopg2.extras
from mock import patch
from pgdedupe.utils import load_config, filename_friendly_hash, create_model_definition
from pgdedupe.run import process_options, preprocess, create_blocking, cluster, train
def test_reproducibility():
"""Test that two dedupers trained with the same config and data
come up with the same results"""
psql = testing.postgresql.Postgresql()
with open('db.yaml', 'w') as f:
yaml.dump(psql.dsn(), f)
pop = gen.create_population(100)
gen.create_csv(pop, 'pop.csv')
initdb.init('db.yaml', 'pop.csv')
dbconfig = load_config('db.yaml')
base_config = {
'schema': 'dedupe',
'table': 'dedupe.entries',
'key': 'entry_id',
'fields': [
{'field': 'ssn', 'type': 'String', 'has_missing': True},
{'field': 'first_name', 'type': 'String'},
{'field': 'last_name', 'type': 'String'},
{'field': 'dob', 'type': 'String'},
{'field': 'race', 'type': 'Categorical', 'categories': ['pacisland', 'amindian', 'asian', 'other', 'black', 'white']},
{'field': 'ethnicity', 'type': 'Categorical', 'categories': ['hispanic', 'nonhispanic']},
{'field': 'sex', 'type': 'Categorical', 'categories': ['M', 'F']}
],
'interactions': [
['last_name', 'dob'],
['ssn', 'dob']
],
'filter_condition': 'last_name is not null AND (ssn is not null OR (first_name is not null AND dob is not null))',
'recall': 0.99,
'prompt_for_labels': False,
'seed': 0,
'training_file': 'tests/dedup_postgres_training.json'
}
config = process_options(base_config)
con = psycopg2.connect(cursor_factory=psycopg2.extras.RealDictCursor, **dbconfig)
preprocess(con, config)
# train two versions of the deduper with the same configuration
with patch.dict('os.environ', {'PYTHONHASHSEED': '123'}):
old_deduper = train(con, config)
con = psycopg2.connect(cursor_factory=psycopg2.extras.RealDictCursor, **dbconfig)
with patch.dict('os.environ', {'PYTHONHASHSEED': '123'}):
new_deduper = train(con, config)
# ensure that the two models come up with the same hash
model_hash = filename_friendly_hash(create_model_definition(config, old_deduper))
new_model_hash = filename_friendly_hash(create_model_definition(config, new_deduper))
assert new_model_hash == model_hash
# run clustering on each of the dedupers
create_blocking(old_deduper, con, config)
old_dupes = cluster(old_deduper, con, config)
create_blocking(new_deduper, con, config)
new_dupes = cluster(new_deduper, con, config)
# each deduper should come up with the same list of clusters
assert [records for records, scores in old_dupes] == [records for records, scores in new_dupes]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Benchmark the scoring performance on various CNNs
"""
from common import find_mxnet
from common.util import get_gpus
import mxnet as mx
import mxnet.gluon.model_zoo.vision as models
from importlib import import_module
import logging
import argparse
import time
import numpy as np
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description='SymbolAPI-based CNN inference performance benchmark')
parser.add_argument('--network', type=str, default='all',
choices=['all', 'alexnet', 'vgg-16', 'resnetv1-50', 'resnet-50',
'resnet-152', 'inception-bn', 'inception-v3',
'inception-v4', 'inception-resnet-v2', 'mobilenet',
'densenet121', 'squeezenet1.1'])
parser.add_argument('--batch-size', type=int, default=0,
help='Batch size to use for benchmarking. Example: 32, 64, 128.'
'By default, runs benchmark for batch sizes - 1, 32, 64, 128, 256')
opt = parser.parse_args()
def get_symbol(network, batch_size, dtype):
image_shape = (3,299,299) if network in ['inception-v3', 'inception-v4'] else (3,224,224)
num_layers = 0
if network == 'inception-resnet-v2':
network = network
elif 'resnet' in network:
num_layers = int(network.split('-')[1])
network = network.split('-')[0]
if 'vgg' in network:
num_layers = int(network.split('-')[1])
network = 'vgg'
if network in ['densenet121', 'squeezenet1.1']:
sym = models.get_model(network)
sym.hybridize()
data = mx.sym.var('data')
sym = sym(data)
sym = mx.sym.SoftmaxOutput(sym, name='softmax')
else:
net = import_module('symbols.'+network)
sym = net.get_symbol(num_classes=1000,
image_shape=','.join([str(i) for i in image_shape]),
num_layers=num_layers,
dtype=dtype)
return (sym, [('data', (batch_size,)+image_shape)])
def score(network, dev, batch_size, num_batches, dtype):
# get mod
sym, data_shape = get_symbol(network, batch_size, dtype)
mod = mx.mod.Module(symbol=sym, context=dev)
mod.bind(for_training = False,
inputs_need_grad = False,
data_shapes = data_shape)
mod.init_params(initializer=mx.init.Xavier(magnitude=2.))
# get data
data = [mx.random.uniform(-1.0, 1.0, shape=shape, ctx=dev) for _, shape in mod.data_shapes]
batch = mx.io.DataBatch(data, []) # empty label
# run
dry_run = 5 # use 5 iterations to warm up
for i in range(dry_run+num_batches):
if i == dry_run:
tic = time.time()
mod.forward(batch, is_train=False)
for output in mod.get_outputs():
output.wait_to_read()
# return num images per second
return num_batches*batch_size/(time.time() - tic)
if __name__ == '__main__':
if opt.network == 'all':
networks = ['alexnet', 'vgg-16', 'resnetv1-50', 'resnet-50',
'resnet-152', 'inception-bn', 'inception-v3',
'inception-v4', 'inception-resnet-v2',
'mobilenet', 'densenet121', 'squeezenet1.1']
logging.info('It may take some time to run all models, '
'set --network to run a specific one')
else:
networks = [opt.network]
devs = [mx.gpu(0)] if len(get_gpus()) > 0 else []
# Enable USE_MKLDNN for better CPU performance
devs.append(mx.cpu())
if opt.batch_size == 0:
batch_sizes = [1, 32, 64, 128, 256]
logging.info('run batchsize [1, 32, 64, 128, 256] by default, '
'set --batch-size to run a specific one')
else:
batch_sizes = [opt.batch_size]
for net in networks:
logging.info('network: %s', net)
if net in ['densenet121', 'squeezenet1.1']:
logging.info('network: %s is converted from gluon modelzoo', net)
logging.info('you can run benchmark/python/gluon/benchmark_gluon.py for more models')
for d in devs:
logging.info('device: %s', d)
logged_fp16_warning = False
for b in batch_sizes:
for dtype in ['float32', 'float16']:
if d == mx.cpu() and dtype == 'float16':
#float16 is not supported on CPU
continue
elif net in ['inception-bn', 'alexnet'] and dtype == 'float16':
if not logged_fp16_warning:
logging.info('Model definition for {} does not support float16'.format(net))
logged_fp16_warning = True
else:
speed = score(network=net, dev=d, batch_size=b, num_batches=10, dtype=dtype)
logging.info('batch size %2d, dtype %s, images/sec: %f', b, dtype, speed)
|
from django.urls import path
from . import views
app_name = 'mainsite'
urlpatterns = [
path('', views.landing, name='landing'),
path('events', views.events, name='events'),
path('reserve', views.reserve, name='reserve'),
path('references', views.references, name='references'),
path('tickets/<int:id>', views.tickets, name='tickets'),
path(r'^pay/(?P<charge>\w+?)/$', views.pay.as_view(), name='pay'),
path('charge/', views.charge, name='charge'),
] |
from tkinter import *
import sqlite3
import json
# top = Tk()
# top.title("Data Acquisition Tool")
# top.geometry('400x500')
# top.configure(background="light blue")
# but0 = Button(top,text='Start',width=5,height=3)
# but0.pack()
# top.mainloop()
# fred = Button(self, fg="red", bg="blue")
# fred["fg"] = "red"
# fred["bg"] = "blue"
# fred.config(fg="red", bg="blue")
# class Application(Frame):
# def __init__(self, master=None):
# super().__init__(master)
# self.master = master
# self.pack()
# self.create_widgets()
# def create_widgets(self):
# self.hi_there = Button(self)
# self.hi_there["text"] = "Hello World\n(click me)"
# self.hi_there["command"] = self.say_hi
# self.hi_there.pack(side="top")
# self.quit = Button(self, text="QUIT", fg="red",
# command=self.master.destroy)
# self.quit.pack(side="bottom")
# def say_hi(self):
# print("hi there, everyone!")
# root = Tk()
# app = Application(master=root)
# app.mainloop()
# lst = ['a', 'b', 'c', 'd']
# root = Tk()
# t = Text(root)
# for x in lst:
# t.insert(END, x + '\n')
# t.pack()
# root.mainloop()
# def show_entry_fields():
# print("First Name: %s\nLast Name: %s" % (e1.get(), e2.get()))
# master = Tk()
# master.geometry('400x500')
# Label(master, text="First Name").grid(row=0)
# Label(master, text="Last Name").grid(row=1)
# e1 = Entry(master)
# e2 = Entry(master)
# e1.grid(row=0, column=1)
# e2.grid(row=1, column=1)
# Button(master, text='Quit', command=master.quit).grid(row=3, column=0, sticky=W, pady=4)
# Button(master, text='Show', command=show_entry_fields).grid(row=3, column=1, sticky=W, pady=4)
# master.mainloop()
# def evaluate(event):
# res.configure(text = "Ergebnis: " + str(eval(entry.get())))
# w = Tk()
# Label(w, text="Your Expression:").pack()
# entry = Entry(w)
# entry.bind("<Return>", evaluate)
# entry.pack()
# res = Label(w)
# res.pack()
# w.mainloop()
# def evaluate(event):
# res.configure(text = "Ergebnis: "+entry.get())
# w = Tk()
# entry = Entry(w)
# entry.bind("<Return>", evaluate)
# entry.pack(side="bottom")
# res = Label(w)
# res.pack(side="top")
# w.mainloop()
# def evaluate():
# res.configure(text = "Text: "+entry.get())
# master = Tk()
# Button(master,text="Click",command=evaluate).grid(row=0, column=0)
# entry = Entry(master)
# entry.grid(row=0, column=1)
# res = Label(master)
# res.grid(row=2, column=0)
# master.mainloop()
columns = []
entries = []
f = open("countFiles.txt", "r")
fileCount = f.read()
index = 0
while index != int(fileCount):
f = open("jsonResult"+str(index)+".txt", "r")
content = f.read()
dict_all = json.loads(content)
allEntryColumns = []
for data in dict_all:
for data2 in data.items():
allEntryColumns.append(data2[0])
for x in allEntryColumns:
if x not in columns:
columns.append(x)
allData = []
i = 0
for data in dict_all:
for x in columns:
try:
allData.append(dict_all[i][x])
except:
allData.append("~")
entries.append(allData)
allData = []
i = i + 1
index = index + 1
displayText = ""
master = Tk()
master.geometry('800x800')
master.title("Data Acquisition Tool")
# master.geometry('800x500')
lst = ['package_size_code', 'fda_ther_equiv_code', 'fda_application_number', 'clotting_factor_indicator','year','fda_product_name','sdfsdf','sdfsdf','dsfd']
ents = []
i = 0
j = 1
k = 1
for e in columns:
if i != 0 and i % 4 == 0:
j = j + 4
k = k + 1.35
i = 0
# print("hello")
lab = Label(master, text = e + " %")
lab.place(x = (i * 200), y = 10 * j)
e = Entry(master,width=5)
e.insert(0,"0")
e.place(x = (i * 200), y = 30 * k)
ents.append(e)
i = i + 1
# Button(master,text=e, height=1,width=1,background="light blue",font=("Courier", 15)).grid(row=i, column=0,padx=10, pady=10)
def started():
# showText = []
#Connection to SQLite Database
conn = sqlite3.connect('testing3.db',isolation_level=None)
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = c.fetchall()
#Create the CREATE table query. For now, all datatypes are text. Clarify
tableName = "table" + str(len(tables))
createQuery = 'CREATE TABLE ' + tableName + "("
for col in columns:
createQuery = createQuery + col + " text" + ","
createQuery = createQuery + "PRIMARY KEY (year, quarter, ndc))"
#calculate number of questions marks. Needed as synthax of insert is c.executemany("INSERT INTO table VALUES (?,?,?,?,?...)", entries)
#Create INSERT query
questionList = []
for x in range(len(columns)):
questionList.append('?')
insertQuery = ",".join(questionList)
insertQuery = "INSERT INTO " + tableName + " VALUES (" + insertQuery + ")"
if (len(tables) == 0):
print("There are no tables")
# c.execute(createQuery)
# c.executemany(insertQuery, entries)
# conn.commit()
c.execute("begin")
c.execute(createQuery)
c.executemany(insertQuery, entries)
c.execute("commit")
conn.close()
# exit()
textResult.delete('0.0',END)
displayText = "There are no tables"
textResult.insert(INSERT, displayText)
return
#comparing columns
latestTable = "table" + str(len(tables) - 1)
cursor = c.execute("SELECT * FROM "+latestTable)
latestColumns = list(map(lambda x: x[0], cursor.description))
#print(latestColumns)
#print(columns)
if len(columns) != len(latestColumns):
print("The number of columns have changed.")
textResult.delete('0.0',END)
displayText = "The number of columns have changed\n"
textResult.insert(INSERT, displayText)
# c.execute(createQuery)
# c.executemany(insertQuery, entries)
# conn.commit()
c.execute("begin")
c.execute(createQuery)
c.executemany(insertQuery, entries)
c.execute("commit")
conn.close()
# exit()
return
for i in range(len(latestColumns)):
if columns[i] != latestColumns[i]:
print("The columns have changed.")
displayText = "The columns have changed\n"
# c.execute(createQuery)
# c.executemany(insertQuery, entries)
# conn.commit()
c.execute("begin")
c.execute(createQuery)
c.executemany(insertQuery, entries)
c.execute("commit")
conn.close()
# exit()
return
displayText = "No Change in number/values of columns\n---------------------------------\n"
#I have to create a new table in order to do value comparison
newTable = tableName
# c.execute(createQuery)
# c.executemany(insertQuery, entries)
c.execute("begin")
c.execute(createQuery)
c.executemany(insertQuery, entries)
c.execute("commit")
#Outer joins to check if rows have been added/removed. Gets count of rows added/removed
rowsRemovedCount = []
leftJoinStatement = "SELECT COUNT(*) FROM "+latestTable+" LEFT OUTER JOIN "+newTable+" ON "+latestTable+".ndc = "+newTable+".ndc WHERE "+newTable+".year ISNULL"
c.execute(leftJoinStatement)
for row in c:
#print(row)
rowsRemovedCount.append(row)
rowsAddedCount = []
revLeftJoinStatement = "SELECT COUNT(*) FROM "+newTable+" LEFT OUTER JOIN "+latestTable+" ON "+latestTable+".ndc = "+newTable+".ndc WHERE "+latestTable+".year ISNULL"
c.execute(revLeftJoinStatement)
for row in c:
#print(row)
rowsAddedCount.append(row)
#Gets number of rows from the latest table in the database
latestTableRowsCount = []
c.execute("SELECT COUNT(*) FROM "+latestTable)
for row in c:
#print(row)
latestTableRowsCount.append(row)
#Gets number of rows from the new table just inserted into database
newTableRowsCount = []
c.execute("SELECT COUNT(*) FROM "+newTable)
for row in c:
#print(row)
newTableRowsCount.append(row)
#Gets count of rows which has same NDC between latest and new table. (Compare with matching NDC)
matchingNDCCount = []
c.execute("SELECT COUNT(*) FROM "+latestTable+", "+newTable+" WHERE " +latestTable+".ndc = "+newTable+".ndc")
for row in c:
#print(row)
matchingNDCCount.append(row)
checkEmpty = 0
print(str(rowsRemovedCount[0][0])+ " rows has been removed from the old table which had "+str(latestTableRowsCount[0][0]) + " rows")
print(str(rowsAddedCount[0][0])+ " rows has been added to the new table which now has "+str(newTableRowsCount[0][0]) + " rows")
displayText = displayText + str(rowsRemovedCount[0][0])+ " rows has been removed from the old table which had "+str(latestTableRowsCount[0][0]) + " rows\n"
displayText = displayText + str(rowsAddedCount[0][0])+ " rows has been added to the new table which now has "+str(newTableRowsCount[0][0]) + " rows\n"
if int(newTableRowsCount[0][0]) == 0:
print("Shrinkage of 100%")
print("Growth of 0%")
checkEmpty = 1
if int(latestTableRowsCount[0][0]) == 0:
print("Shrinkage of 0%")
print("Growth of 100%")
checkEmpty = 1
if checkEmpty == 0:
shrinkage = str(int(rowsRemovedCount[0][0]) / int(matchingNDCCount[0][0]) * 100)
print("Shrinkage of "+shrinkage+"%")
displayText = displayText + "Shrinkage of "+shrinkage+"%\n"
growth = str(int(rowsAddedCount[0][0]) / int(matchingNDCCount[0][0]) * 100)
print("Growth of "+growth+"%")
displayText = displayText + "Growth of "+growth+"%\n"
#Count number of values (including null) in a column (Maybe change this to just count rows in any one column. Same thing)
SelectColCount1 = "SELECT COUNT(coalesce("+newTable+"." + latestColumns[0] + ",\"~\")) FROM " +newTable
ColCount1 = []
c.execute(SelectColCount1)
for row in c:
ColCount1.append(row)
#compare values of every element in each column between two tables where the NDC matches
hasChanged = 0
i = 0
for col in latestColumns:
SelectColDifference = "SELECT COUNT(coalesce("+latestTable+"." + col + ",\"~\")) FROM "+latestTable+", "+newTable+" WHERE " +latestTable+".ndc = "+newTable+".ndc AND "+ "(SELECT coalesce("+latestTable+"." + col + ",\"~\")) <> " + "(SELECT coalesce("+newTable+"." + col + ",\"~\"))"
#print(SelectColDifference)
c.execute(SelectColDifference)
for row in c:
#print(row)
#print(row[0])
#print(ColCount1[0][0])
change = str(int(row[0]) / int(ColCount1[0][0]) * 100)
print("Change of "+change+"% in "+ col)
displayText = displayText + "Change of "+change+"% in "+ col + "\n"
if change != '0.0':
hasChanged = 1
perc = ents[i].get()
if (int(perc) < int(float(change))):
displayText = displayText + "HUGE CHANGE!\n"
i = i + 1
i = 0
#delete newtable if there are no changes.
if (hasChanged == 0 and shrinkage == '0.0' and growth == '0.0'):
c.execute("DROP TABLE " + newTable)
conn.commit()
conn.close()
print("No changes. The newtable is deleted")
displayText = displayText + "No changes. The newtable is deleted"
textResult.delete('0.0',END)
textResult.insert(INSERT, displayText)
return
conn.commit()
conn.close()
textResult.delete('0.0',END)
textResult.insert(INSERT, displayText)
return
def reset():
textResult.delete('0.0',END)
textResult.insert(INSERT, "Shows the percentage change/rows removed results.")
for e in ents:
e.delete(0,END)
e.insert(0,"0")
start = Button(text="Start", command=started)
start.place(x = 630, y = 360)
reset = Button(text="Reset", command=reset)
reset.place(x = 680, y = 360)
textResult = Text(master)
textResult.insert(INSERT, "Shows the percentage change/rows removed results.")
textResult.place(x = 75, y = 400)
vscroll = Scrollbar(master, orient=VERTICAL, command=textResult.yview)
vscroll.place(in_=textResult, relx=1.0, relheight=1.0, bordermode="outside")
master.mainloop()
# master = Tk()
# group = LabelFrame(master, text="Group", padx=5, pady=5)
# group.pack(padx=10, pady=10)
# w = Entry(group)
# w.pack()
# mainloop()
|
#encoding=utf-8
from django.conf import settings as SETTINGS
def settings(context):
return {'DREAMDESKTOP_MSG_DOMAIN' : SETTINGS.DREAMDESKTOP_MSG_DOMAIN,
'DREAMDESKTOP_DOMAIN' : SETTINGS.DREAMDESKTOP_DOMAIN,
'DREAMWIDGETURL' : SETTINGS.DREAMDESKTOP_DREAMWIDGET_URL,
'DREAMDESKTOP_USERDB_DOMAIN' : getattr(SETTINGS,'DREAMDESKTOP_USERDB_DOMAIN', ''),
}
|
import scraper_functions
from inspect import getmembers, isfunction
class EmptyNewsSources(Exception):
pass
class EmptyScraperSource(Exception):
pass
class EmptyScraperFunction(Exception):
pass
class ScraperFunctionNotImplemented(Exception):
pass
def validate_news_sources(news_sources):
module_functions = {fn_name: fn_ref for fn_name,
fn_ref in getmembers(scraper_functions, isfunction)}
if not len(news_sources):
raise EmptyNewsSources("News sources can not be empty")
for news_source in news_sources:
source = news_source.get("source")
if not source:
raise EmptyScraperSource(
f"Scraper source url not found for {news_source.get('function_name')}")
function_name = news_source.get("function_name")
if not function_name:
raise EmptyScraperFunction(
f"Scraper function not found for {news_source.get('source')}")
if function_name not in module_functions:
raise ScraperFunctionNotImplemented(
f"Please check if function with name {function_name} exists in scraper_functions module")
|
import time
import random
# List of all the enemies in the game to randomize.
enemies = ["Dragon", "Troll", "Pirate", "Vampire", "Gorgon", "Ghost"]
# boolean to check if the game is being played for the first time
firstTime = True
# Boolean to check if the game is being restarted
restart = False
# to check if the player have the sword or not
sword = False
def house(name, weap):
print("You approach the door of the house.")
time.sleep(1.5)
print("You are about to knock when the door opens and"
" out steps a " + name + ".")
time.sleep(1.5)
print("Eep! This is the " + name + "\'s house!")
time.sleep(1.5)
print("The " + name + " attacks you!")
time.sleep(1.5)
if not weap:
print("You feel a bit under-prepared for this, "
"what with only having a tiny dagger.")
time.sleep(1.5)
print("Would you like to (1) fight or (2) run away?")
res = input("Please enter 1 or 2\n")
while True:
if res == "1":
if weap:
print("As the " + name + " moves to attack, "
"you unsheath your new sword.")
time.sleep(1.5)
print("The Sword of Ogoroth shines brightly in your hand"
" as you brace yourself for the attack.")
time.sleep(1.5)
print("But the " + name + " takes one look at "
"your shiny new toy and runs away!")
time.sleep(1.5)
print("You have rid the town of the " + name +
". You are victorious!")
time.sleep(1.5)
else:
print("You do your best...")
time.sleep(1.5)
print("but your dagger is no match for the wicked fairie.")
time.sleep(1.5)
print("You have been defeated!")
time.sleep(1.5)
break
elif res == "2":
field()
break
else:
res = input("Please enter 1 or 2\n")
def cave(weapo):
print("You peer cautiously into the cave.")
time.sleep(1.5)
if not weapo:
print("It turns out to be only a very small cave.")
time.sleep(1.5)
print("Your eye catches a glint of metal behind a rock.")
time.sleep(1.5)
print("You have found the magical Sword of Ogoroth!")
time.sleep(1.5)
print("You discard your silly old dagger and take the sword with you.")
weapo = True
time.sleep(1.5)
print("You walk back out to the field.")
time.sleep(1.5)
print("\nEnter 1 to knock on the door of the house.")
time.sleep(1.5)
print("Enter 2 to peer into the cave.")
time.sleep(1.5)
print("What would you like to do?")
else:
print("You've been here before, and gotten all the good stuff."
" It's just an empty cave now.")
time.sleep(1.5)
print("You walk back out to the field.")
time.sleep(1.5)
print("\nEnter 1 to knock on the door of the house.")
time.sleep(1.5)
print("Enter 2 to peer into the cave.")
time.sleep(1.5)
print("What would you like to do?")
n = input("Please enter 1 or 2\n")
while True:
if n == "1":
house(enemy, weapo)
break
elif n == "2":
cave(weapo)
break
else:
n = input("Please enter 1 or 2\n")
def field():
print("You run back into the field. Luckily, "
"you don't seem to have been followed.")
time.sleep(1.5)
print("")
print("Enter 1 to knock on the door of the house.")
time.sleep(1.5)
print("Enter 2 to peer into the cave.")
time.sleep(1.5)
print("What would you like to do?")
num = input("Please enter 1 or 2\n")
while True:
if num == "1":
house(enemy, sword)
break
elif num == "2":
cave(sword)
break
else:
num = input("Please enter 1 or 2\n")
def play_again():
response = input("Would you like to play again? (y/n)\n")
while True:
if response == "y":
print("Excellent! Restarting the game\n")
time.sleep(2)
return True
elif response == "n":
print("Thanks for playing! See you next time.")
time.sleep(2)
return False
else:
response = input("Would you like to play again? (y/n)\n")
return False
while firstTime or restart:
enemy = random.choice(enemies)
sword = False
firstTime = False
restart = False
weapon = False
print("You find yourself standing in an open field,"
" filled with grass and yellow wildflowers.")
time.sleep(1.5)
print("Rumor has it that a " + enemy +
" is somewhere around here, and"
" has been terrifying the nearby village.")
time.sleep(1.5)
print("In front of you is a house.")
time.sleep(1.5)
print("To your right is a dark cave.")
time.sleep(1.5)
print("In your hand you hold your trusty (but not very effective) dagger.")
print("")
time.sleep(1.5)
print("Enter 1 to knock on the door of the house.")
time.sleep(1.5)
print("Enter 2 to peer into the cave.")
time.sleep(1.5)
print("What would you like to do?")
num = input("Please enter 1 or 2\n")
# while loop is used to get specific input from the user (1 or 2 only)
while True:
if num == "1":
house(enemy, sword)
break
elif num == "2":
cave(sword)
sword = True
break
else:
num = input("Please enter 1 or 2\n")
restart = play_again()
|
from tornado import httpclient
from logging import getLogger, INFO
logger = getLogger(__package__)
if __name__ == '__main__':
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("https://anonymous-boilerplate.firebaseapp.com")
logger.info(response.body)
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close() |
"""The Oscan alphabet. Sources:
- `<https://www.unicode.org/charts/PDF/U10300.pdf>`
- Buck, C. A Grammar of Oscan and Umbrian.
"""
__author__ = ["Caio Geraldes <caio.geraldes@usp.br>"]
VOWELS = [
"\U00010300", # 𐌀 OSCAN LETTER A
"\U00010304", # 𐌄 OSCAN LETTER E
"\U00010309", # 𐌉 OSCAN LETTER I
"\U00010316", # 𐌖 OSCAN LETTER U
"\U0001031D", # 𐌝 OSCAN LETTER II
"\U0001031E", # 𐌞 OSCAN LETTER UU
]
CONSONANTS = [
"\U00010301", # 𐌁 OSCAN LETTER B
"\U00010302", # 𐌂 OSCAN LETTER K/G
"\U00010303", # 𐌃 OSCAN LETTER D
"\U00010305", # 𐌅 OSCAN LETTER V
"\U00010307", # 𐌇 OSCAN LETTER H
"\U0001030A", # 𐌊 OSCAN LETTER K
"\U0001030B", # 𐌋 OSCAN LETTER L
"\U0001030C", # 𐌌 OSCAN LETTER M
"\U0001030D", # 𐌍 OSCAN LETTER N
"\U00010310", # 𐌐 OSCAN LETTER P
"\U00010314", # 𐌔 OSCAN LETTER S
"\U00010315", # 𐌕 OSCAN LETTER T
"\U0001031A", # 𐌚 OSCAN LETTER F
]
|
"""
/***************************************************************************
Name : Property Browser
Description : Class that provides functions for overlaying property
boundaries in either a Google Maps Satellite view or
OpenStreetMaps.
Date : 8/July/2013
copyright : (C) 2013 by John Gitau
email : gkahiu@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os.path
from stdm.settings import getProxy
from stdm.utils import PLUGIN_DIR
from stdm.data import STDMDb
from geoalchemy2 import WKBElement
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
from PyQt4.QtNetwork import *
#Layer type enumeration
GMAP_SATELLITE = 2010
OSM = 2011
class OLStyle(object):
'''
Wrapper for defining the style to be used for
rendering the property overlay in OpenLayers.
'''
def __init__(self):
'''
Instantiate styling dictionary. This will be
serialized to a JSON object and passed to
OpenLayers.
'''
self.style = {}
#Set defaults
self.style["fillColor"] = "#00FF00"
self.style["fillOpacity"] = 0.4
self.style["strokeColor"] = "#FE2E64"
self.style["strokeOpacity"] = 1
self.style["strokeWidth"] = 1
self.style["label"] = "${id}"
self.style["labelOutlineColor"] = "#FFFFFF"
self.style["labelOutlineWidth"] = 3
def setFillColor(self,color):
'''
'color' can be a string or QColor
'''
if isinstance(color,QColor):
color = str(color.name())
self.style["fillColor"] = color
def setFillOpacity(self,opacity):
'''
Set opacity of the fill color.
Ranges from 0-1.
'''
self.style["fillOpacity"] = opacity
def setStrokeColor(self,color):
'''
'color' can be a string or QColor.
'''
if isinstance(color,QColor):
color = str(color.name())
self.style["strokeColor"] = color
def setStrokeOpacity(self,opacity):
'''
Set opacity of the the outline.
Ranges from 0-1.
'''
self.style["strokeOpacity"] = opacity
def setStrokeWidth(self,width):
'''
Set the width of the outline.
'''
self.style["strokeWidth"] = width
def setLabelField(self,labelField):
'''
Set the name of the attribute whose value should be
used for labeling the property.
'''
self.style["label"] = "${%s}"%(labelField,)
def toJson(self):
'''
Returns the corresponding style object in JSON format
'''
import json
return json.dumps(self.style)
class PropertyBrowser(QObject):
'''
Overlays property bounds in either on Google Maps Satellite view or
OpenStreetMaps on the QWebView control specified.
'''
def __init__(self,webview,parent = None,style = OLStyle()):
'''
Initialize
'''
QObject.__init__(self,parent)
self.baseHTML = "property_overlay.html"
self.webview = webview
self.dbSession = STDMDb.instance().session
self.olPage = OLWebPage(self)
#Property Style
self._style = style
#Connect slots
QObject.connect(self.olPage, SIGNAL("loadFinished(bool)"),self.onFinishLoading)
QObject.connect(self.olPage, SIGNAL("loadProgress(int)"),self.onLoadingProgress)
QObject.connect(self.olPage, SIGNAL("loadStarted()"),self.onStartLoading)
def url(self):
'''
Returns both the normal and URL paths of the base HTML file to load.
The HTML file has to be located in the 'html' folder of the plugin.
'''
absNormPath = PLUGIN_DIR + "/html/" + self.baseHTML
browserPath = "file:///" + absNormPath
return absNormPath, browserPath
def load(self):
'''
Loads the property page into the QWebView.
This method is only called once on initializing the view. Subsequent object calls
are made to the addOverlay method.
'''
if not QFile.exists(self.url()[0]):
errmsg = QApplication.translate("PropertyBrowser", "The source HTML file could not be found.")
self.emit(SIGNAL("loadError(QString)"), errmsg)
return
self.olPage.mainFrame().load(QUrl(self.url()[1]))
self.webview.setPage(self.olPage)
def onStartLoading(self):
'''
Propagate the page loading events
'''
self.emit(SIGNAL("loadStarted()"))
def onFinishLoading(self,status):
'''
Propagate event/signal
'''
self.emit(SIGNAL("loadFinished(bool)"),status)
def onLoadingProgress(self,prog):
'''
Propagate event
'''
self.emit(SIGNAL("loadProgress(int)"),prog)
def onZoomLevelChanged(self,level):
'''
Signal raised when the zoom level of the map changes.
This signal is only raised in certain circumstances.
(Enumerate the levels)
'''
self.emit(SIGNAL("zoomChanged(int)"),level)
def setBaseLayer(self,layertype):
'''
Set the base layer to either Google Maps or OpenStreetMaps
'''
changeBaseJS = "setBaseLayer(%s)"%(layertype)
zoomLevel,ok = self._setJS(changeBaseJS).toInt()
#Raise map zoom changed event
self.onZoomLevelChanged(zoomLevel)
def setCenter(self,x,y,zoom=12):
'''
Set the center of the map with an optional zoom level
'''
setCenterJS = "setCenter(%s,%s,%s)"%(x,y,zoom)
self._setJS(setCenterJS)
def zoomTo(self,level):
'''
Zoom to a specific level
'''
zoomJS = "zoom(%s)"%(level)
zoomLevel,ok = self._setJS(zoomJS).toInt()
def zoomToPropertyExtents(self):
'''
Zoom to the boundary extents of the last loaded
property.
'''
zoomToExtentsJS = "zoomToPropertyExtent()"
zoomLevel,ok = self._setJS(zoomToExtentsJS).toInt()
#Raise map zoom changed event
self.onZoomLevelChanged(zoomLevel)
def addOverlay(self,property,labelfield = ""):
'''
Overlay a polygon onto the baselayer and set the label to use for the
polygon.
The feature will be transported in GeoJSON format since it is the most
efficient format for AJAX loading.
'''
if property is None:return
#Set the name of the field to use for labeling
self._style.setLabelField(labelfield)
#Update the style of the property on each overlay operation
self._updateLayerStyle()
#Set label object
labelJSObject = "null"
if hasattr(property,labelfield):
propVal = getattr(property,labelfield)
labelJSObject = "{'%s':'%s'}"%(labelfield,str(propVal))
#Reproject to web mercator
prop_wkb = self.dbSession.scalar(property.geom.ST_Transform(900913))
web_geom = WKBElement(prop_wkb)
prop_geo_json = self.dbSession.scalar(web_geom.ST_AsGeoJSON())
overlayJS = "drawProperty('%s',%s);"%(prop_geo_json,labelJSObject)
zoomLevel,ok = self._setJS(overlayJS).toInt()
#Raise map zoom changed event
self.onZoomLevelChanged(zoomLevel)
def removeOverlay(self):
'''
Removes all property overlays
'''
overlayRemJS = ""
self._setJS(overlayRemJS)
def setStyle(self,style):
'''
Set the style of the property
'''
self._style = style
def _updateLayerStyle(self):
'''
Updates the style of the property vector layer
in the map.
'''
olStyle = self._style.toJson()
updateStyleJS = "setPropertyStyle(%s)"%(olStyle,)
self._setJS(updateStyleJS)
def _setJS(self,javascript):
'''
Set the JavaScript code to be executed.
'''
frame = self.olPage.mainFrame()
return frame.evaluateJavaScript(javascript)
class OLWebPage(QWebPage):
'''
We define a custom web page implementation since we need to use
the QGIS proxy settings if a proxy has been specified.
'''
def __init__(self,parent=None):
QWebPage.__init__(self,parent)
self._manager = None
#Set proxy in webpage
proxy = getProxy()
if not proxy is None:
self._manager = QNetworkAccessManager()
self._manager.setProxy(proxy)
self.setNetworkAccessManager(self._manager)
def javaScriptConsoleMessage(self, message, lineNumber, sourceID):
logEntry = "%s[%d]: %s" % (sourceID, lineNumber, message)
qDebug(logEntry)
'''
Log console messages to file.
This is meant for debugging purposes only.
'''
file = "D:/Logs.txt"
f = open(file,'w')
f.write(logEntry)
if logEntry[-1:] != "\n":
f.write("\n")
f.flush()
|
from __future__ import unicode_literals
from django.db import models
from posts.models import Post
from comments.models import Comment
# Create your models here.
class Rule(models.Model):
post = models.ForeignKey(Post)
is_first = models.BooleanField(default=False)
is_last = models.BooleanField(default=False)
solution = models.ForeignKey(Comment,related_name='solution', default=False)
response = models.ForeignKey(Comment,related_name='response', default=False)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
|
import serial
import smbus
SERIAL_DEV = '/dev/ttyS0' #/dev/ttyAMA0
I2C_ADDRESS = 0x60
def open_serial(baudrate):
serial = serial.Serial(SERIAL_DEV, baudrate, 2)
return serial
#def close_serial(serial):
# wiringpi.serialClose(serial)
def write_serial(serial, msg):
serial.write(msg)
def read_serial(serial):
return serial.read(9)
def open_i2c():
return smbus.SMBus(1)
def write_i2c(bus, msg):
bus.write_byte_data(I2C_ADDRESS, 0x00, msg)
def read_i2c(bus):
return bus.read_byte_data(I2C_ADDRESS, 0x00)
if __name__ == "__main__":
serial = open_serial(9600)
i2c = open_i2c()
while True:
|
from pathlib import Path
from pytest_mock import MockerFixture
from clutchless.command.link import LinkCommand, LinkFailure, ListLinkCommand
from clutchless.domain.torrent import MetainfoFile
from clutchless.external.metainfo import TorrentData
from clutchless.service.torrent import LinkService, FindService
def test_link_success(mocker: MockerFixture):
metainfo_file = MetainfoFile({"info_hash": "meaningless"})
location = Path()
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
find_service = mocker.Mock(spec=FindService)
find_service.find.return_value = {TorrentData(metainfo_file, location)}
command = LinkCommand(link_service, find_service)
output = command.run()
assert output.success == [TorrentData(metainfo_file, location)]
def test_link_no_matching_data(mocker: MockerFixture):
metainfo_file = MetainfoFile({"info_hash": "meaningless"})
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
find_service = mocker.Mock(spec=FindService)
find_service.find.return_value = {TorrentData(metainfo_file)}
command = LinkCommand(link_service, find_service)
output = command.run()
assert output.success == []
assert output.no_matching_data == {metainfo_file}
def test_link_failure(mocker: MockerFixture):
metainfo_file = MetainfoFile({"info_hash": "meaningless"})
location = Path()
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
link_service.change_location.side_effect = RuntimeError("something")
find_service = mocker.Mock(spec=FindService)
torrent_data = TorrentData(metainfo_file, location)
find_service.find.return_value = {torrent_data}
command = LinkCommand(link_service, find_service)
output = command.run()
assert output.success == []
assert output.no_matching_data == set()
assert output.fail == [LinkFailure(torrent_data, "something")]
def test_link_run_failure_output(mocker: MockerFixture, capsys):
metainfo_file = MetainfoFile({"info_hash": "meaningless", "name": "some_name"})
location = Path()
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
link_service.change_location.side_effect = RuntimeError("something")
find_service = mocker.Mock(spec=FindService)
torrent_data = TorrentData(metainfo_file, location)
missing_torrent_data = TorrentData(metainfo_file)
find_service.find.return_value = {torrent_data, missing_torrent_data}
command = LinkCommand(link_service, find_service)
output = command.run()
output.display()
result = capsys.readouterr().out
assert (
result
== "\n".join(
[
"Couldn't find the data for the following torrents:",
"some_name",
"Failed to link the following torrents:",
"some_name because: something",
]
)
+ "\n"
)
def test_link_dry_run_failure_output(mocker: MockerFixture, capsys):
metainfo_file = MetainfoFile({"info_hash": "meaningless", "name": "some_name"})
location = Path()
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
link_service.change_location.side_effect = RuntimeError("something")
find_service = mocker.Mock(spec=FindService)
torrent_data = TorrentData(metainfo_file, location)
missing_torrent_data = TorrentData(metainfo_file)
find_service.find.return_value = {torrent_data, missing_torrent_data}
command = LinkCommand(link_service, find_service)
output = command.dry_run()
output.dry_run_display()
result = capsys.readouterr().out
assert (
result
== "\n".join(
[
"Found the following torrents:",
"some_name at .",
"Couldn't find data for the following torrents:",
"some_name",
]
)
+ "\n"
)
def test_link_run_success_output(mocker: MockerFixture, capsys):
metainfo_file = MetainfoFile({"info_hash": "meaningless", "name": "some_name"})
location = Path()
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
find_service = mocker.Mock(spec=FindService)
find_service.find.return_value = {TorrentData(metainfo_file, location)}
command = LinkCommand(link_service, find_service)
output = command.run()
output.display()
result = capsys.readouterr().out
assert (
result == "\n".join(["Linked the following torrents:", "some_name at ."]) + "\n"
)
def test_link_dry_run_success_output(mocker: MockerFixture, capsys):
metainfo_file = MetainfoFile({"info_hash": "meaningless", "name": "some_name"})
location = Path()
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
find_service = mocker.Mock(spec=FindService)
find_service.find.return_value = {TorrentData(metainfo_file, location)}
command = LinkCommand(link_service, find_service)
output = command.dry_run()
output.dry_run_display()
result = capsys.readouterr().out
assert (
result == "\n".join(["Found the following torrents:", "some_name at ."]) + "\n"
)
def test_link_list_run(mocker: MockerFixture):
metainfo_file = MetainfoFile({"info_hash": "meaningless", "name": "some_name"})
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
command = ListLinkCommand(link_service)
output = command.run()
assert output.files == {metainfo_file}
def test_link_list_output(mocker: MockerFixture, capsys):
metainfo_file = MetainfoFile({"info_hash": "meaningless", "name": "some_name"})
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {metainfo_file: 1}
command = ListLinkCommand(link_service)
output = command.run()
output.display()
result = capsys.readouterr().out
assert (
result
== "\n".join(["Found following missing data torrents:", "some_name"]) + "\n"
)
def test_link_list_empty_output(mocker: MockerFixture, capsys):
link_service = mocker.Mock(spec=LinkService)
link_service.get_incomplete_id_by_metainfo_file.return_value = {}
command = ListLinkCommand(link_service)
output = command.run()
output.display()
result = capsys.readouterr().out
assert result == "No missing data torrents found.\n"
|
import pandas as pd
import datetime
import smtplib
GMAIL_ID = "____" # Enter your email
GMAIL_PASSWORD = "___" # Enter your email password then run the Program
def sendEmail(to,sub,msg):
print ("Successfully Send Email !!!!!!!!!!")
s= smtplib.SMTP("smtp.gmail.com", 587)
s.starttls()
s.login(GMAIL_ID,GMAIL_PASSWORD)
s.sendmail(GMAIL_ID,to, f"Subject :{sub}\n\n{msg}")
s.quit()
if __name__ == "__main__":
sendEmail(GMAIL_ID, "subject","test message")
df = pd.read_excel("Data.xlsx")
today = datetime.datetime.now().strftime("%d-%m")
yearnow = datetime.datetime.now().strftime("%Y")
writels = []
for index,item in df.iterrows():
bday = item["Brithday"].strftime("%d-%m")
if (today == bday) and yearnow not in str(item["years"]):
sendEmail(item["email"],"Happy Brithday",item["Message"])
writels.append(index)
# print(writels)
for i in writels:
yr = df.loc[i,"years"]
df.loc[i,"years"] = str(yr) + "," + str(yearnow)
# print(df.loc[i,"years"])
df.to_excel("Data.xlsx", index = False)
|
{
"targets": [
{
"target_name": "protobuf",
"type": "static_library",
"include_dirs": [
"2.6.1/protobuf-2.6.1/src",
"2.6.1/protobuf-2.6.1" # for config.h
],
"sources": [
"2.6.1/protobuf-2.6.1/src/google/protobuf/*.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/io/*.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/stubs/*.cc"
],
"sources!": [
"2.6.1/protobuf-2.6.1/src/google/protobuf/*unittest.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/io/*unittest.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/stubs/*unittest.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/test_util.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/test_util_lite.cc"
],
"direct_dependent_settings": {
"include_dirs": [
"2.6.1/protobuf-2.6.1/src"
]
}
},
# this is the compiler for *.proto files
{
"target_name": "protoc",
"type": "executable",
"include_dirs": [
"2.6.1/protobuf-2.6.1" # for config.h
],
"sources": [
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/*.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/cpp/*.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/python/*.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/java/*.cc"
],
"sources!": [
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/*unittest.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/mock_code_generator.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/test_plugin.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/cpp/*unittest.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/python/*unittest.cc",
"2.6.1/protobuf-2.6.1/src/google/protobuf/compiler/java/*unittest.cc"
],
"dependencies": [
"protobuf"
]
},
# this is the library that builds the addressbook.pb.* which is
# being generated by protoc. This library is used by both example apps.
# Instead we could have each example app compile addressbook.pb.cc.
{
"target_name": "protobuf-example-addressbook",
"type": "static_library",
"include_dirs": [
# I wish we wouldn't need this funky root include dir, but
# see my comments on the protoc action below.
"./"
],
"direct_dependent_settings": {
"include_dirs": [ "./" ]
},
"sources": [
# this file could also be generated manually via
# >cd examples
# >protoc *.proto --cpp_out=.
"2.6.1/protobuf-2.6.1/examples/addressbook.pb.cc"
],
"actions": [
{
# See https://code.google.com/p/gyp/wiki/GypLanguageSpecification
# for actions: they are typically used for running code
# generation tools that provide input for subsequent
# compilation steps. This action here runs the just built
# protoc to generate *.pb.cc+h from *.proto file.
# See also http://src.chromium.org/chrome/trunk/src/build/protoc.gypi
# for how to create a rule for protoc.
"action_name": "protoc",
"inputs": [
# technically the protoc sources are (indirect) inputs
# too, I don't bother listing these here: inter-target
# deps should take care of this (except when the compiler
# sources themselves are modified).
"2.6.1/protobuf-2.6.1/examples/addressbook.proto"
],
"outputs": [
"2.6.1/protobuf-2.6.1/examples/addressbook.pb.cc"
],
# the cwd for the action is the *.gyp's parent dir.cc
# to print the cmd line prefix the action list with 'echo'.
# WARNING: a big drawback of this action here is that the
# generated *.pb.cc will contain an #include directive with
# relative path 2.6.1/protobuf-2.6.1/examples/*.pb.h.
# Means whichever target compiles this *.cc needs the root
# dir in its -I list. Potential fixes:
# a) invoke a python wrapper that changes the cwd before
# calling protoc
# b) add a --basedir option or some such to protoc
# c) change gyp to allow specifying a cwd on gyp actions
"action": [ "<(PRODUCT_DIR)/protoc", "<@(_inputs)", "--cpp_out=." ],
"message": "invoking protoc"
}
],
"dependencies": [
"protobuf",
# to ensure that protoc was built before we build the example:
"protoc"
]
},
{
"target_name": "protobuf-example-add_person",
"type": "executable",
"test": {
"args": ["my_addressbook.pb"],
# this example app is interactive, so feed canned text
# into stdin
"stdin": "123\nkjell\n\n4125556666\n"
},
"include_dirs": [
"2.6.1/protobuf-2.6.1/examples"
],
"sources": [
"2.6.1/protobuf-2.6.1/examples/add_person.cc"
],
"dependencies": [ "protobuf", "protobuf-example-addressbook" ]
},
{
"target_name": "protobuf-example-list_people",
"type": "executable",
# To run this manually:
# >out/release/protobuf-example-add_person my_addressbook.pb
# >out/release/protobuf-example-list_people my_addressbook.pb
# So the list_people example reads the file written by add_person.
"test": {
"args": [ "my_addressbook.pb" ]
},
"include_dirs": [
"2.6.1/protobuf-2.6.1/examples"
],
"sources": [
"2.6.1/protobuf-2.6.1/examples/list_people.cc"
],
"dependencies": [
"protobuf",
"protobuf-example-addressbook",
# this dep only ensures that my_addressbook.pb was written
# by the add_person example
"protobuf-example-add_person"
]
}
# To build these tests we need a more convenient rule to turn
# *.proto into *.pb.?
# {
# "target_name": "protobuf-test",
# "type": "executable",
# "test": {},
# "include_dirs": [
# #"2.6.1/protobuf-2.6.1" # for config.h
# ],
# "sources": [
# "2.6.1/protobuf-2.6.1/src/google/protobuf/*unittest.cc",
# "2.6.1/protobuf-2.6.1/src/google/protobuf/io/*unittest.cc",
# "2.6.1/protobuf-2.6.1/src/google/protobuf/stubs/*unittest.cc",
# "2.6.1/protobuf-2.6.1/src/google/protobuf/test_util.cc"
# ],
# "dependencies": [
# "protobuf",
# "../googletest/googletest.gyp:*"
# ]
# }
]
}
|
import subprocess
import json
import time
# define users
VALIDATOR = "user1"
USER = "user2"
ROWAN = "rwn"
PEGGYETH = "ceth"
PEGGYROWAN = "erwn"
ETH = "eth"
SLEEPTIME = 5
AMOUNT = 10
CLAIMLOCK = "lock"
CLAIMBURN = "burn"
def print_error_message(error_message):
print("#################################")
print("!!!!Error: ", error_message)
print("#################################")
def get_shell_output(command_line):
sub = subprocess.Popen(command_line, shell=True, stdout=subprocess.PIPE)
subprocess_return = sub.stdout.read()
return subprocess_return.rstrip()
def get_user_account(user):
command_line = "sifnodecli keys show " + user + " -a"
return get_shell_output(command_line).decode("utf-8")
def get_operator_account(user):
command_line = "sifnodecli keys show " + user + " -a --bech val"
return get_shell_output(command_line).decode("utf-8")
def get_account_nonce(user):
command_line = "sifnodecli q auth account " + get_user_account(user)
output = get_shell_output(command_line).decode("utf-8")
json_str = json.loads(output)
return json_str["value"]["sequence"]
def get_balance(user, denom):
command_line = "sifnodecli q auth account " + get_user_account(user)
output = get_shell_output(command_line).decode("utf-8")
json_str = json.loads(output)
coins = json_str["value"]["coins"]
for coin in coins:
if coin["denom"] == denom:
return coin["amount"]
return 0
def create_claim(user, validator, amount, denom, claim_type):
command_line = """sifnodecli tx ethbridge create-claim 0x30753E4A8aad7F8597332E813735Def5dD395028 {} {} \
0x11111111262b236c9ac9a9a8c8e4276b5cf6b2c9 {} {} {} {} \
--token-contract-address=0x0000000000000000000000000000000000000000 \
--ethereum-chain-id=3 --from={} \
--yes""".format(get_account_nonce(validator), denom, get_user_account(user),
get_operator_account(validator), amount, claim_type, validator)
# print(command_line)
return get_shell_output(command_line)
def burn_peggy_coin(user, validator, amount):
command_line = """sifnodecli tx ethbridge burn {} \
0x11111111262b236c9ac9a9a8c8e4276b5cf6b2c9 {} {} \
--ethereum-chain-id=3 --from={} \
--yes""".format(get_user_account(user),
amount, PEGGYETH, user)
# print(command_line)
return get_shell_output(command_line)
def lock_rowan(user, amount):
command_line = """sifnodecli tx ethbridge lock {} \
0x11111111262b236c9ac9a9a8c8e4276b5cf6b2c9 {} rwn \
--ethereum-chain-id=3 --from={} --yes
""".format(get_user_account(user), amount, user)
# print(command_line)
return get_shell_output(command_line)
def test_case_1():
print(
"########## Test Case One Start: lock eth in ethereum then mint ceth in sifchain"
)
balance_before_tx = int(get_balance(USER, PEGGYETH))
print("Before lock transaction {}'s balance of {} is {}".format(
USER, PEGGYETH, balance_before_tx))
print("Send lock claim to Sifchain...")
create_claim(USER, VALIDATOR, AMOUNT, ETH, CLAIMLOCK)
time.sleep(SLEEPTIME)
balance_after_tx = int(get_balance(USER, PEGGYETH))
print("After lock transaction {}'s balance of {} is {}".format(
USER, PEGGYETH, balance_after_tx))
if balance_after_tx != balance_before_tx + AMOUNT:
print_error_message("balance is wrong after send eth lock claim")
print("########## Test Case One Over ##########")
def test_case_2():
print(
"########## Test Case Two Start: burn ceth in sifchain then eth back to ethereum"
)
balance_before_tx = int(get_balance(USER, PEGGYETH))
print("Before burn transaction {}'s balance of {} is {}".format(
USER, PEGGYETH, balance_before_tx))
if balance_before_tx < AMOUNT:
print_error_message("No enough ceth to burn")
return
print("Send burn claim to Sifchain...")
burn_peggy_coin(USER, VALIDATOR, AMOUNT)
time.sleep(SLEEPTIME)
balance_after_tx = int(get_balance(USER, PEGGYETH))
print("After burn transaction {}'s balance of {} is {}".format(
USER, PEGGYETH, balance_after_tx))
if balance_after_tx != balance_before_tx - AMOUNT:
print_error_message("balance is wrong after send eth lock claim")
print("########## Test Case Two Over ##########")
def test_case_3():
print(
"########## Test Case Three Start: lock rowan in sifchain transfer to ethereum"
)
balance_before_tx = int(get_balance(USER, ROWAN))
print("Before lock transaction {}'s balance of {} is {}".format(
USER, ROWAN, balance_before_tx))
if balance_before_tx < AMOUNT:
print_error_message("No enough rowan to lock")
print("Send lock claim to Sifchain...")
lock_rowan(USER, AMOUNT)
time.sleep(SLEEPTIME)
balance_after_tx = int(get_balance(USER, ROWAN))
print("After lock transaction {}'s balance of {} is {}".format(
USER, ROWAN, balance_after_tx))
if balance_after_tx != balance_before_tx - AMOUNT:
print_error_message("balance is wrong after send eth lock claim")
print("########## Test Case Three Over ##########")
def test_case_4():
print(
"########## Test Case Four Start: burn erwn in ethereum then transfer rwn back to sifchain"
)
balance_before_tx = int(get_balance(USER, ROWAN))
print("Before lock transaction {}'s balance of {} is {}".format(
USER, ROWAN, balance_before_tx))
print("Send burn claim to Sifchain...")
create_claim(USER, VALIDATOR, AMOUNT, ROWAN, CLAIMBURN)
time.sleep(SLEEPTIME)
balance_after_tx = int(get_balance(USER, ROWAN))
print("After lock transaction {}'s balance of {} is {}".format(
USER, ROWAN, balance_after_tx))
if balance_after_tx != balance_before_tx + AMOUNT:
print_error_message("balance is wrong after send eth lock claim")
print("########## Test Case Four Over ##########")
test_case_1()
test_case_2()
test_case_3()
test_case_4()
|
people = [
{"name":"harry", "home":"slyhterine"},
{"name":"chanaka", "home":"sw19"},
{"name":"saneli", "home":"seeduwa"}
]
def f(person):
return person["home"]
people.sort(key=f)
print(people) |
from collections import deque
from functools import reduce
bridge_length=2
weight=10
truck_weights= [7,4,5,6]
# 에러해결 : dictionary는 중복을 허용하지 않기때문 -> ing 를 딕션에서 리스트로 바꿈
def solution(bridge_length, weight, truck_weights):
ing = []
end = []
time=0
long=len(truck_weights)
# 전부 다리를 건너올 때까지 while loop
while long != len(end):
time+=1
# 다리위에서 1초씩 움직임
for i in ing:
i[1]+=1
# 다리를 다 건너면 end리스트로 넘기고 ing리스트에선 삭제
for i in ing:
if i[1]==bridge_length:
end.append(ing.pop(0)[0])
break
# 다리를 건너는 트럭 리스트 구하기
if len(truck_weights)!=0:
total=0
for i in ing:
total+=i[0]
if total + truck_weights[0] <= weight:
ing.append([truck_weights.pop(0), 0])
return time
print(solution(bridge_length, weight, truck_weights)) |
def drop(x,y):
ret = []
for i in range(len(x)):
if (i + 1) % y != 0:
ret.append(x[i])
return ret
|
import random
import base64
import hashlib
class cryptor():
def __init__(self, x, y, role):
self.toServer = x
self.toServerSend = 0
self.toClient = y
self.toClientSend = 0
self.role = role
def decrypt(self, crypted):
message = ""
crypted = base64.b64decode(crypted)
if self.role == "client":
for x in crypted:
m = hashlib.md5()
m.update("%d" %(self.toClient + self.toClientSend))
message += chr((ord(x) - ord(m.digest()[0])) % 255)
self.toClientSend += 1
else:
for x in crypted:
m = hashlib.md5()
m.update("%d" %(self.toServer + self.toServerSend))
message += chr((ord(x) - ord(m.digest()[0])) % 255)
self.toServerSend += 1
return message
def encrypt(self, message):
crypted = ""
if self.role == "client":
for x in message:
m = hashlib.md5()
m.update("%d" %(self.toServer + self.toServerSend))
crypted += chr((ord(x) + ord(m.digest()[0])) % 255)
self.toServerSend += 1
else:
for x in message:
m = hashlib.md5()
m.update("%d" %(self.toClient + self.toClientSend))
crypted += chr((ord(x) + ord(m.digest()[0])) % 255)
self.toClientSend += 1
return base64.b64encode(crypted) |
"""
Connector adapters.
To register connectors implemented in this module, it is imported in
gaphor.adapter package.
"""
import logging
from zope import component
from zope.interface import implementer
from gaphor import UML
from gaphor.core import inject
from gaphor.diagram import items
from gaphor.diagram.interfaces import IConnect
logger = logging.getLogger(__name__)
@implementer(IConnect)
class AbstractConnect(object):
"""
Connection adapter for Gaphor diagram items.
Line item ``line`` connects with a handle to a connectable item ``element``.
Attributes:
- line: connecting item
- element: connectable item
The following methods are required to make this work:
- `allow()`: is the connection allowed at all (during mouse movement for example).
- `connect()`: Establish a connection between element and line. Also takes care of
disconnects, if required (e.g. 1:1 relationships)
- `disconnect()`: Break connection, called when dropping a handle on a
point where it can not connect.
- `reconnect()`: Connect to another item (only used if present)
By convention the adapters are registered by (element, line) -- in that order.
"""
element_factory = inject("element_factory")
def __init__(self, element, line):
self.element = element
self.line = line
self.canvas = self.element.canvas
assert self.canvas == self.element.canvas == self.line.canvas
def get_connection(self, handle):
"""
Get connection information
"""
return self.canvas.get_connection(handle)
def get_connected(self, handle):
"""
Get item connected to a handle.
"""
cinfo = self.canvas.get_connection(handle)
if cinfo is not None:
return cinfo.connected
def get_connected_port(self, handle):
"""
Get port of item connected to connecting item via specified handle.
"""
cinfo = self.canvas.get_connection(handle)
if cinfo is not None:
return cinfo.port
def allow(self, handle, port):
"""
Determine if items can be connected.
The method contains a hack for folded interfaces, see
`gaphor.diagram.classes.interface` module documentation for
connection to folded interface rules.
Returns `True` by default.
"""
iface = self.element
if isinstance(iface, items.InterfaceItem) and iface.folded:
canvas = self.canvas
count = any(canvas.get_connections(connected=iface))
return not count and isinstance(
self.line, (items.DependencyItem, items.ImplementationItem)
)
return True
def connect(self, handle, port):
"""
Connect to an element. Note that at this point the line may
be connected to some other, or the same element.
Also the connection at UML level still exists.
Returns `True` if a connection is established.
"""
return True
def disconnect(self, handle):
"""Disconnect UML model level connections."""
pass
@component.adapter(items.ElementItem, items.CommentLineItem)
class CommentLineElementConnect(AbstractConnect):
"""Connect a comment line to any element item."""
def allow(self, handle, port):
"""
In addition to the normal check, both line ends may not be connected
to the same element. Same goes for subjects.
One of the ends should be connected to a UML.Comment element.
"""
opposite = self.line.opposite(handle)
connected_to = self.get_connected(opposite)
element = self.element
if connected_to is element:
return None
# Same goes for subjects:
if (
connected_to
and (not (connected_to.subject or element.subject))
and connected_to.subject is element.subject
):
return None
# One end should be connected to a CommentItem:
cls = items.CommentItem
glue_ok = isinstance(connected_to, cls) ^ isinstance(self.element, cls)
if connected_to and not glue_ok:
return None
# Do not allow links between the comment and the element
if (
connected_to
and element
and (
(
isinstance(connected_to.subject, UML.Comment)
and self.element.subject in connected_to.subject.annotatedElement
)
or (
isinstance(self.element.subject, UML.Comment)
and connected_to.subject in self.element.subject.annotatedElement
)
)
):
return None
return super(CommentLineElementConnect, self).allow(handle, port)
def connect(self, handle, port):
if super(CommentLineElementConnect, self).connect(handle, port):
opposite = self.line.opposite(handle)
connected_to = self.get_connected(opposite)
if connected_to:
if isinstance(connected_to.subject, UML.Comment):
connected_to.subject.annotatedElement = self.element.subject
else:
self.element.subject.annotatedElement = connected_to.subject
def disconnect(self, handle):
opposite = self.line.opposite(handle)
oct = self.get_connected(opposite)
hct = self.get_connected(handle)
if hct and oct:
logger.debug("Disconnecting %s and %s" % (hct, oct))
try:
if hct.subject and isinstance(oct.subject, UML.Comment):
del oct.subject.annotatedElement[hct.subject]
elif hct.subject and oct.subject:
del hct.subject.annotatedElement[oct.subject]
except ValueError:
logger.debug(
"Invoked CommentLineElementConnect.disconnect() for nonexistent relationship"
)
super(CommentLineElementConnect, self).disconnect(handle)
component.provideAdapter(CommentLineElementConnect)
class CommentLineLineConnect(AbstractConnect):
"""Connect a comment line to any diagram line."""
def allow(self, handle, port):
"""
In addition to the normal check, both line ends may not be connected
to the same element. Same goes for subjects.
One of the ends should be connected to a UML.Comment element.
"""
opposite = self.line.opposite(handle)
element = self.element
connected_to = self.get_connected(opposite)
# do not connect to the same item nor connect to other comment line
if (
connected_to is element
or not element.subject
or isinstance(element, items.CommentLineItem)
):
return None
# Same goes for subjects:
if (
connected_to
and (not (connected_to.subject or element.subject))
and connected_to.subject is element.subject
):
return None
# One end should be connected to a CommentItem:
cls = items.CommentItem
glue_ok = isinstance(connected_to, cls) ^ isinstance(self.element, cls)
if connected_to and not glue_ok:
return None
return super(CommentLineLineConnect, self).allow(handle, port)
def connect(self, handle, port):
if super(CommentLineLineConnect, self).connect(handle, port):
opposite = self.line.opposite(handle)
c = self.get_connected(opposite)
if c and self.element.subject:
if isinstance(c.subject, UML.Comment):
c.subject.annotatedElement = self.element.subject
else:
self.element.subject.annotatedElement = c.subject
def disconnect(self, handle):
c1 = self.get_connected(handle)
opposite = self.line.opposite(handle)
c2 = self.get_connected(opposite)
if c1 and c2:
if (
isinstance(c1.subject, UML.Comment)
and c2.subject in c1.subject.annotatedElement
):
del c1.subject.annotatedElement[c2.subject]
elif c2.subject and c1.subject in c2.subject.annotatedElement:
del c2.subject.annotatedElement[c1.subject]
super(CommentLineLineConnect, self).disconnect(handle)
component.provideAdapter(
CommentLineLineConnect, adapts=(items.DiagramLine, items.CommentLineItem)
)
class InverseCommentLineLineConnect(CommentLineLineConnect):
"""
In case a line is disconnected that contains a comment-line,
the comment line unlinking should happen in a correct way.
"""
def __init__(self, line, element):
super().__init__(element, line)
component.provideAdapter(
InverseCommentLineLineConnect, adapts=(items.CommentLineItem, items.DiagramLine)
)
class UnaryRelationshipConnect(AbstractConnect):
"""
Base class for relationship connections, such as associations,
dependencies and implementations.
Unary relationships are allowed to connect both ends to the same element
This class introduces a new method: relationship() which is used to
find an existing relationship in the model that does not yet exist
on the canvas.
"""
element_factory = inject("element_factory")
def relationship(self, required_type, head, tail):
"""
Find an existing relationship in the model that meets the
required type and is connected to the same model element the head
and tail of the line are connected to.
type - the type of relationship we're looking for
head - tuple (association name on line, association name on element)
tail - tuple (association name on line, association name on element)
"""
line = self.line
head_subject = self.get_connected(line.head).subject
tail_subject = self.get_connected(line.tail).subject
# First check if the right subject is already connected:
if (
line.subject
and getattr(line.subject, head.name) is head_subject
and getattr(line.subject, tail.name) is tail_subject
):
return line.subject
# Try to find a relationship, that is already created, but not
# yet displayed in the diagram.
for gen in getattr(tail_subject, tail.opposite):
if not isinstance(gen, required_type):
continue
gen_head = getattr(gen, head.name)
try:
if not head_subject in gen_head:
continue
except TypeError:
if not gen_head is head_subject:
continue
# Check for this entry on line.canvas
for item in gen.presentation:
# Allow line to be returned. Avoids strange
# behaviour during loading
if item.canvas is line.canvas and item is not line:
break
else:
return gen
return None
def relationship_or_new(self, type, head, tail):
"""
Like relation(), but create a new instance if none was found.
"""
relation = self.relationship(type, head, tail)
if not relation:
line = self.line
relation = self.element_factory.create(type)
setattr(relation, head.name, self.get_connected(line.head).subject)
setattr(relation, tail.name, self.get_connected(line.tail).subject)
return relation
def reconnect_relationship(self, handle, head, tail):
"""
Reconnect relationship for given handle.
:Parameters:
handle
Handle at which reconnection happens.
head
Relationship head attribute name.
tail
Relationship tail attribute name.
"""
line = self.line
c1 = self.get_connected(line.head)
c2 = self.get_connected(line.tail)
if line.head is handle:
setattr(line.subject, head.name, c1.subject)
elif line.tail is handle:
setattr(line.subject, tail.name, c2.subject)
else:
raise ValueError("Incorrect handle passed to adapter")
def connect_connected_items(self, connections=None):
"""
Cause items connected to ``line`` to reconnect, allowing them to
establish or destroy relationships at model level.
"""
line = self.line
canvas = self.canvas
solver = canvas.solver
# First make sure coordinates match
solver.solve()
for cinfo in connections or canvas.get_connections(connected=line):
if line is cinfo.connected:
continue
adapter = component.queryMultiAdapter((line, cinfo.connected), IConnect)
assert adapter, "No element to connect {} and {}".format(
line, cinfo.connected
)
adapter.connect(cinfo.handle, cinfo.port)
def disconnect_connected_items(self):
"""
Cause items connected to @line to be disconnected.
This is necessary if the subject of the @line is to be removed.
Returns a list of (item, handle) pairs that were connected (this
list can be used to connect items again with connect_connected_items()).
"""
line = self.line
canvas = self.canvas
solver = canvas.solver
# First make sure coordinates match
solver.solve()
connections = list(canvas.get_connections(connected=line))
for cinfo in connections:
adapter = component.queryMultiAdapter(
(cinfo.item, cinfo.connected), IConnect
)
assert adapter
adapter.disconnect(cinfo.handle)
return connections
def connect_subject(self, handle):
"""
Establish the relationship at model level.
"""
raise NotImplementedError("Implement connect_subject() in a subclass")
def disconnect_subject(self, handle):
"""
Disconnect the diagram item from its model element. If there are
no more presentations(diagram items) connected to the model element,
unlink() it too.
"""
line = self.line
old = line.subject
del line.subject
if old and len(old.presentation) == 0:
old.unlink()
def connect(self, handle, port):
"""
Connect the items to each other. The model level relationship
is created by create_subject()
"""
if super(UnaryRelationshipConnect, self).connect(handle, port):
opposite = self.line.opposite(handle)
oct = self.get_connected(opposite)
if oct:
self.connect_subject(handle)
line = self.line
if line.subject:
self.connect_connected_items()
return True
def disconnect(self, handle):
"""
Disconnect model element.
"""
line = self.line
opposite = line.opposite(handle)
oct = self.get_connected(opposite)
hct = self.get_connected(handle)
if hct and oct:
# Both sides of line are connected => disconnect
connections = self.disconnect_connected_items()
self.disconnect_subject(handle)
super(UnaryRelationshipConnect, self).disconnect(handle)
class RelationshipConnect(UnaryRelationshipConnect):
"""
"""
def allow(self, handle, port):
"""
In addition to the normal check, both relationship ends may not be
connected to the same element. Same goes for subjects.
"""
opposite = self.line.opposite(handle)
line = self.line
element = self.element
connected_to = self.get_connected(opposite)
# Element can not be a parent of itself.
if connected_to is element:
return None
# Same goes for subjects:
if (
connected_to
and (not (connected_to.subject or element.subject))
and connected_to.subject is element.subject
):
return None
return super(RelationshipConnect, self).allow(handle, port)
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QTimer
import sys
import time
import random
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(486, 505)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setGeometry(QtCore.QRect(30, 25, 16, 441))
self.progressBar.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar.setProperty("value", 100)
self.progressBar.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar.setTextVisible(False)
self.progressBar.setOrientation(QtCore.Qt.Vertical)
self.progressBar.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar.setObjectName("progressBar")
self.progressBar_2 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_2.setGeometry(QtCore.QRect(60, 25, 16, 441))
self.progressBar_2.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_2.setProperty("value", 90)
self.progressBar_2.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_2.setTextVisible(False)
self.progressBar_2.setOrientation(QtCore.Qt.Vertical)
self.progressBar_2.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_2.setObjectName("progressBar_2")
self.progressBar_3 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_3.setGeometry(QtCore.QRect(90, 25, 16, 441))
self.progressBar_3.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_3.setProperty("value", 80)
self.progressBar_3.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_3.setTextVisible(False)
self.progressBar_3.setOrientation(QtCore.Qt.Vertical)
self.progressBar_3.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_3.setObjectName("progressBar_3")
self.progressBar_4 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_4.setGeometry(QtCore.QRect(120, 25, 16, 441))
self.progressBar_4.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_4.setProperty("value", 70)
self.progressBar_4.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_4.setTextVisible(False)
self.progressBar_4.setOrientation(QtCore.Qt.Vertical)
self.progressBar_4.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_4.setObjectName("progressBar_4")
self.progressBar_5 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_5.setGeometry(QtCore.QRect(150, 25, 16, 441))
self.progressBar_5.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_5.setProperty("value", 60)
self.progressBar_5.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_5.setTextVisible(False)
self.progressBar_5.setOrientation(QtCore.Qt.Vertical)
self.progressBar_5.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_5.setObjectName("progressBar_5")
self.progressBar_6 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_6.setGeometry(QtCore.QRect(180, 25, 16, 441))
self.progressBar_6.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_6.setProperty("value", 50)
self.progressBar_6.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_6.setTextVisible(False)
self.progressBar_6.setOrientation(QtCore.Qt.Vertical)
self.progressBar_6.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_6.setObjectName("progressBar_6")
self.progressBar_7 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_7.setGeometry(QtCore.QRect(210, 25, 16, 441))
self.progressBar_7.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_7.setProperty("value", 40)
self.progressBar_7.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_7.setTextVisible(False)
self.progressBar_7.setOrientation(QtCore.Qt.Vertical)
self.progressBar_7.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_7.setObjectName("progressBar_7")
self.progressBar_8 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_8.setGeometry(QtCore.QRect(240, 25, 16, 441))
self.progressBar_8.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_8.setProperty("value", 30)
self.progressBar_8.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_8.setTextVisible(False)
self.progressBar_8.setOrientation(QtCore.Qt.Vertical)
self.progressBar_8.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_8.setObjectName("progressBar_8")
self.progressBar_9 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_9.setGeometry(QtCore.QRect(270, 25, 16, 441))
self.progressBar_9.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_9.setProperty("value", 20)
self.progressBar_9.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_9.setTextVisible(False)
self.progressBar_9.setOrientation(QtCore.Qt.Vertical)
self.progressBar_9.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_9.setObjectName("progressBar_9")
self.progressBar_10 = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar_10.setGeometry(QtCore.QRect(300, 25, 16, 441))
self.progressBar_10.setStyleSheet("QProgressBar {\n"
" background-color : rgba(0, 0, 0, 0);\n"
" border : 1px;\n"
"}\n"
"\n"
"QProgressBar::chunk {\n"
" \n"
" \n"
" background-color: rgb(85, 255, 0);\n"
" border: 1px solid black;\n"
"}")
self.progressBar_10.setProperty("value", 10)
self.progressBar_10.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignJustify)
self.progressBar_10.setTextVisible(False)
self.progressBar_10.setOrientation(QtCore.Qt.Vertical)
self.progressBar_10.setTextDirection(QtWidgets.QProgressBar.BottomToTop)
self.progressBar_10.setObjectName("progressBar_10")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(20, 5, 311, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setGeometry(QtCore.QRect(20, 475, 311, 16))
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setGeometry(QtCore.QRect(320, 10, 20, 471))
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.line_4 = QtWidgets.QFrame(self.centralwidget)
self.line_4.setGeometry(QtCore.QRect(10, 10, 20, 471))
self.line_4.setFrameShape(QtWidgets.QFrame.VLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(350, 40, 121, 61))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(350, 140, 121, 61))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(350, 240, 121, 61))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(350, 340, 121, 61))
self.pushButton_4.setObjectName("pushButton_4")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(390, 430, 47, 13))
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(390, 450, 47, 13))
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 486, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.pushButton_2.clicked.connect(self.bubbleSort)
self.pushButton.clicked.connect(self.generate)
def generate(self):
arr1 = [self.progressBar, self.progressBar_2, self.progressBar_3, self.progressBar_4, self.progressBar_5, self.progressBar_6, self.progressBar_7, self.progressBar_8, self.progressBar_9, self.progressBar_10]
for i in range(10):
arr1[i].setValue(random.randint(0, 101))
def bubbleSort(self):
self.label_2.setText("Sorting...")
arr = [self.progressBar.value(), self.progressBar_2.value(), self.progressBar_3.value(), self.progressBar_4.value(), self.progressBar_5.value(), self.progressBar_6.value(), self.progressBar_7.value(), self.progressBar_8.value(), self.progressBar_9.value(), self.progressBar_10.value()]
arr1 = [self.progressBar, self.progressBar_2, self.progressBar_3, self.progressBar_4, self.progressBar_5, self.progressBar_6, self.progressBar_7, self.progressBar_8, self.progressBar_9, self.progressBar_10]
n = len(arr)
for i in range(n-1):
for j in range(0, n-i-1):
if arr[j] > arr[j+1] :
time.sleep(0.1)
arr[j], arr[j+1] = arr[j+1], arr[j]
arr1[j].setValue(arr[j])
arr1[j+1].setValue(arr[j+1])
self.label_2.setText("Idle")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.progressBar.setFormat(_translate("MainWindow", "%p"))
self.progressBar_2.setFormat(_translate("MainWindow", "%p"))
self.progressBar_3.setFormat(_translate("MainWindow", "%p"))
self.progressBar_4.setFormat(_translate("MainWindow", "%p"))
self.progressBar_5.setFormat(_translate("MainWindow", "%p"))
self.progressBar_6.setFormat(_translate("MainWindow", "%p"))
self.progressBar_7.setFormat(_translate("MainWindow", "%p"))
self.progressBar_8.setFormat(_translate("MainWindow", "%p"))
self.progressBar_9.setFormat(_translate("MainWindow", "%p"))
self.progressBar_10.setFormat(_translate("MainWindow", "%p"))
self.pushButton.setText(_translate("MainWindow", "Generate"))
self.pushButton_2.setText(_translate("MainWindow", "Bubble Sort"))
self.pushButton_3.setText(_translate("MainWindow", "Insertion Sort"))
self.pushButton_4.setText(_translate("MainWindow", "Merge Sort"))
self.label.setText(_translate("MainWindow", "Status:"))
self.label_2.setText(_translate("MainWindow", "Idle"))
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
from rest_framework import serializers
from api.models import RandomUid
class RandomUidSerializer(serializers.ModelSerializer):
class Meta:
model = RandomUid
fields = ["uuid", "created_at"]
|
import requests
import json
import csv
import pandas as pd
import re
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from collections import OrderedDict
from bs4 import BeautifulSoup
import sys
def get_soup(url):
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
page = session.get(url)
contents = page.content
soup = BeautifulSoup(contents, 'html.parser')
return soup
def write_list_to_csv(headers,rows,file_name):
with open(file_name, 'w+',encoding='UTF-8') as output_file:
wr = csv.writer(output_file, delimiter='\t')
wr.writerow(headers)
wr.writerows(rows)
def read_RA_artist_names_from_file():
artist_names = []
top_profiles = json.load(open('data/RA/top_profiles_info.json', 'r',encoding="UTF-8"),object_pairs_hook=OrderedDict)
others_profiles = json.load(open('data/RA/others_profiles_info.json', 'r',encoding="UTF-8"),object_pairs_hook=OrderedDict)
artist_names.extend(top_profiles.keys())
artist_names.extend(others_profiles.keys())
return artist_names
def save_MB_artist_urls_to_file(artist_names,file_name):
#E.g. Url : https://musicbrainz.org/search?query=Jon+Hopkins&type=artist&method=indexed
url = "https://partyflock.nl/search?enc=%E2%98%A0&TERMS={0}&ELEMENT=artist"
artist_url_list = list()
for artist_name in artist_names:
soup_url = url.format(artist_name.replace(" ","+"))
soup = get_soup(soup_url)
print(soup_url)
table = soup.find('div', {'class': 'search'})
if table == None:
artist_url_list.append([artist_name,None])
else:
artist_page_url = table.find("a").get('href')
artist_url_list.append([artist_name,artist_page_url])
headers = ['artist_name', 'artist_page_url']
write_list_to_csv(headers,artist_url_list,file_name)
def get_MB_gender(soup):
gender = soup.find("td", itemprop="gender")
if gender != None:
return gender.getText()
return ''
def get_MB_born(soup):
born = soup.find("span", itemprop="nationality")
if born != None:
return born.find("a").getText()
return ''
def get_MB_pos(soup):
pos = soup.find("td", itemprop="jobTitle")
if pos != None:
return pos.getText()
return ''
def get_external_links(soup,baseURL):
external_links = soup.find('tr', {'class': 'presencerow'})
if external_links == None:
return {}
links = {}
soundcloud = external_links.find('a', title= re.compile('soundcloud'))
spotify = external_links.find('a', title= re.compile('spotify'))
facebook = external_links.find('a', title= re.compile('facebook'))
twitter = external_links.find('a', title=re.compile('twitter'))
itunes = external_links.find('a', title=re.compile('itunes'))
instagram = external_links.find('a', title=re.compile('instagram'))
youtube = external_links.find('a', title=re.compile('youtube'))
if soundcloud != None:
links['soundcloud'] = baseURL+soundcloud.get('href')
if spotify != None:
links['spotify'] = baseURL+spotify.get('href')
if facebook != None:
links['facebook'] = baseURL+facebook.get('href')
if twitter != None:
links['twitter'] = baseURL + twitter.get('href')
if itunes != None:
links['itunes'] = baseURL + itunes.get('href')
if instagram != None:
links['instagram'] = baseURL + instagram.get('href')
if youtube != None:
links['youtube'] = baseURL + youtube.get('href')
return links
def get_genres(soup):
genre = soup.find("td", text="Genres")
if genre is not None:
return genre.nextSibling.text
return ''
def get_webSite(soup):
site = soup.find("td", text="Site")
if site is not None:
return (site.nextSibling.find('a')['href'])
return ''
def get_bookingWebsite(soup):
booking = soup.find("td", text="Boekingen")
if booking is not None:
return (booking.nextSibling.find('a')['href'])
return ''
def save_MB_artist_info_to_file(file_name):
df = pd.read_csv(file_name, sep='\t',encoding="UTF-8")
df['gender'] = ''
df['born'] = ''
df['position'] = ''
df['genres'] = ''
df['site'] = ''
df['booking'] = ''
df['external_links'] = ''
i=1
# url = 'https://partyflock.nl{0}'
baseURL = 'https://partyflock.nl'
url = baseURL+'{0}'
for index, row in df.iterrows():
artist_url = row['artist_page_url']
if pd.isnull(artist_url) == False:
soup_url = url.format(artist_url)
soup = get_soup(soup_url)
row['gender'] = get_MB_gender(soup)
row['born'] = get_MB_born(soup)
row['position'] = get_MB_pos(soup)
row['external_links'] = get_external_links(soup, baseURL)
row['genres'] = get_genres(soup)
row['site'] = get_webSite(soup)
row['booking'] = get_bookingWebsite(soup)
print(i,row['artist_name'],row['gender'],row['born'],row['position'], row['genres'],row['site'],row['booking'],row['external_links'])
i = i+1
with open(file_name, 'w+', encoding='UTF-8') as output_file:
df.to_csv(output_file, sep='\t', index=None, header=False)
if __name__ == '__main__':
file_address = 'data/PF/'
file_name = 'PF_artist_page_urls'
file_type = '.tsv'
artist_names = read_RA_artist_names_from_file()
for i in range(0,129):
file_batch_name = file_address+file_name+str(i)+file_type
span = 500
start = i*span
end = (i+1)*span
artist_names_batch = artist_names[start:end]
save_MB_artist_urls_to_file(artist_names_batch,file_batch_name)
save_MB_artist_info_to_file(file_batch_name)
print ("batch number "+str(i+1)+" done") |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'precios_ui.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(300, 270)
MainWindow.setMinimumSize(QtCore.QSize(300, 220))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.qt_tabla_precios = QtWidgets.QTableWidget(self.centralwidget)
self.qt_tabla_precios.setGeometry(QtCore.QRect(10, 10, 280, 250))
self.qt_tabla_precios.setMinimumSize(QtCore.QSize(280, 200))
self.qt_tabla_precios.setMaximumSize(QtCore.QSize(16777215, 16777215))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
self.qt_tabla_precios.setFont(font)
self.qt_tabla_precios.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContentsOnFirstShow)
self.qt_tabla_precios.setObjectName("qt_tabla_precios")
self.qt_tabla_precios.setColumnCount(2)
self.qt_tabla_precios.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
self.qt_tabla_precios.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
self.qt_tabla_precios.setHorizontalHeaderItem(1, item)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
item = self.qt_tabla_precios.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "MONEDA"))
item = self.qt_tabla_precios.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "PRECIO"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
# Create your views here.
from django.shortcuts import render_to_response
from django.contrib.formtools.wizard.views import SessionWizardView
from django.template import RequestContext
from defs import definitions
def start(request):
return render_to_response('base.html',
context_instance=RequestContext(request))
class QuestionnaireWizard(SessionWizardView):
template_name= 'form_template.html'
def done(self, form_list, **kwargs):
return render_to_response('done.html', {
'form_data': [form.cleaned_data for form in form_list],
})
def get_form_kwargs(self, step):
return {'definition': definitions[int(step)]}
|
import time
from django.core.management import BaseCommand
from tqdm import tqdm
from ... import api
from ...models import TwitterHashtag, TwitterPost, TwitterUser
class Command(BaseCommand):
# TODO: Enable authentication if required
# def add_arguments(self, parser):
# parser.add_argument('email', type=str)
def handle(self, *args, **options):
t0 = time.perf_counter()
# TODO: Enable authentication if required
# email = options['email']
# user_id, user_key = api.twitter_get_user_credentials(email)
count_twitter_post_created = 0
count_twitter_user_created = 0
for twitter_hashtag in TwitterHashtag.objects.all():
post_data_overviews = api.twitter_get_posts_by_hashtag(twitter_hashtag.value)
for post_data_overview in tqdm(post_data_overviews):
try:
twitter_post = TwitterPost.objects.get(id=post_data_overview['tweetId'])
except TwitterPost.DoesNotExist:
try:
twitter_user = TwitterUser.objects.get(id=post_data_overview['userid'])
except TwitterUser.DoesNotExist:
twitter_user = TwitterUser.objects.create(
id=post_data_overview['userid'],
name=post_data_overview['name'],
screen_name=post_data_overview['screenName'],
)
count_twitter_user_created += 1
post_data = api.twitter_get_post_metadata(post_data_overview['tweetId'])
twitter_post = TwitterPost.objects.create(
id=post_data['id'],
user=twitter_user,
like_count=post_data['likeCount'],
retweet_count=post_data['retweetCount'],
reply_count=post_data['replyCount'],
)
count_twitter_post_created += 1
if twitter_hashtag not in twitter_post.hashtags.all():
twitter_post.hashtags.add(twitter_hashtag)
t = time.perf_counter() - t0
self.stdout.write('Created {} new twitter users'.format(count_twitter_user_created))
self.stdout.write('Created {} new twitter posts'.format(count_twitter_post_created))
self.stdout.write('Done! (took {:.2f} seconds)'.format(t))
|
import pandas as pd
import numpy as np
sub_0283 = pd.read_csv('C:/PORTO/m29-PORTO-sub-0.282.csv')
sub_0284 = pd.read_csv('C:/PORTO/m32-stacked_1.csv')
m=33
w_sub_0283=0.5
w_sub_0284=0.5
final=sub_0283['id'].to_frame()
final['target']=sub_0283['target']*w_sub_0283+sub_0284['target']*w_sub_0284
final.to_csv('C:/PORTO/m{}-PORTO-ensemble.csv'.format(m), index=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.