index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,300 | 6fc8bc5d635cf488eb969a753bb0c73d17d1ea61 | from setuptools import find_packages, setup
setup(
name='petshop',
version='1.0',
packages=find_packages(),
include_package_data=True,
install_requires=[
'flask',
'sqlalchemy',
'Flask-SQLAlchemy',
'mysqlclient',
'faker',
'APScheduler',
'requests',
]
)
|
995,301 | cb6b97b6e74c27374439f0db04a5b36bf929b1a2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'damonhao'
from tornado import ioloop, gen
from net import TcpClient, NetAddress
from common import RpcBase
from stub import stub_factory
class RpcClient(RpcBase):
def __init__(self, io_loop, netAddress):
super(RpcClient, self).__init__()
self._client = TcpClient(io_loop, netAddress)
self._client.set_connection_callback(self._on_connection)
self._services = {} # service name: service
def connect(self):
self._client.connect()
@property
def _inner_mgr(self):
return self._client
def create_stub(self, service_stub_class):
channel = self._client.connection.context
assert channel, "there is not valid channel"
return stub_factory(service_stub_class, channel)
@gen.coroutine
def test_stub(rpcClient):
from services import helloworld_pb2
stub = rpcClient.create_stub(helloworld_pb2.GreeterServer_Stub)
request = helloworld_pb2.HelloRequest()
request.name = "Client"
response = yield stub.SayHello(request)
# response = yield stub.SayHelloWithCoroutine(request)
print "receive: ", response.message
def test_one_loop():
netAddress = NetAddress('127.0.0.1', 8002)
from net.io_loop import IOLoop
io_loop = IOLoop()
io_loop.prepare()
client = RpcClient(io_loop, netAddress)
from services.test_service import GreeterClientImp
client.register_service(GreeterClientImp())
io_loop.call_later(1, test_stub, client)
client.connect()
while True:
io_loop.one_loop(2)
if __name__ == '__main__':
# netAddress = NetAddress('127.0.0.1', 8002)
# # io_loop = ioloop.IOLoop.instance()
# from net.io_loop import IOLoop
# io_loop = IOLoop()
# client = RpcClient(io_loop, netAddress)
# from services.test_service import GreeterClientImp
# client.register_service(GreeterClientImp())
# io_loop.call_later(1, test_stub, client)
# client.connect()
# io_loop.start()
test_one_loop()
|
995,302 | ea67ee0a313bcce444ee227e7cf3e129ba3eb1c8 | import pickle
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dev', action='store_true', help='Use dev')
parser.add_argument('--train', action='store_true', help='Use train')
parser.add_argument('--n_sample', type=int, default=-1, help='N_h')
parser.add_argument('--syn', action='store_true', help='Use syn')
parser.add_argument('--air', action='store_true', help='Use air')
args = parser.parse_args()
if args.syn:
data_path = './results/synthesized/'
data_path2 = './data/synthesized/'
elif args.air:
data_path = './results/airdialogue/'
data_path2 = './data/airdialogue/'
else:
print('Pleae use --syn or --air !')
raise
if args.dev:
# data + kb
kb_file = data_path2 + 'tokenized/dev/dev.eval.kb'
data_file = data_path2 + 'tokenized/dev/dev.eval.data'
# eval step sql
query_file = data_path + 'dev_sql/dev_predict_query'
query2_file = data_path + 'dev_sql/dev_simple'
true_query_file = data_path + 'dev_sql/dev_gt_query'
gate_file = data_path + 'dev_sql/dev_gate'
# output file
if not os.path.exists(data_path + 'dev_sql/simulate_DB/'):
os.makedirs(data_path + 'dev_sql/simulate_DB/')
small_fp = open(data_path + 'dev_sql/simulate_DB/small_db.kb', 'w')
r_fp = open(data_path + 'dev_sql/simulate_DB/record', 'w')
rf_fp = open(data_path + 'dev_sql/simulate_DB/filtered_kb', 'w')
elif args.train:
kb_file = data_path2 + 'tokenized/train/train.kb'
data_file = data_path2 + 'tokenized/train/train.data'
query_file = data_path + 'train_sql/train_predict_query'
query2_file = data_path + 'train_sql/train_simple'
true_query_file = data_path + 'train_sql/train_gt_query'
gate_file = data_path + 'train_sql/train_gate'
if not os.path.exists(data_path + 'train_sql/simulate_DB/'):
os.makedirs(data_path + 'train_sql/simulate_DB/')
small_fp = open(data_path + 'train_sql/simulate_DB/small_db.kb', 'w')
r_fp = open(data_path + 'train_sql/simulate_DB/record', 'w')
rf_fp = open(data_path + 'train_sql/simulate_DB/filtered_kb', 'w')
else:
print('Please use --dev or --train !')
raise
def tokenize_dialogue(path):
sents = []
sents_len = []
with open(path, 'r') as f:
for line in f:
items = line.split("|")
sent = []
for i in range(4):
words = []
for word in items[i].split(" "):
if i < 3: # tokenize intent, action, dialogue
words.append(word)
else: # tokenize boundaries
words.append(int(word))
sent.append(words)
# a, b, c, d = sent[0], sent[1], sent[2], sent[3]
sents.append(sent)
sents_len.append(len(sent[2]))
return sents, sents_len
def tokenize_kb(path):
# <res_no_res> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_10> <tn2_21> <cl_business> <pr_800> <cn_1> <al_AA> <fl_1000> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_11> <d2_14> <tn1_21> <tn2_0> <cl_economy> <pr_200> <cn_0> <al_UA> <fl_1001> <a1_MSP> <a2_ATL> <m1_Sept> <m2_Sept> <d1_12> <d2_14> <tn1_21> <tn2_6> <cl_economy> <pr_100> <cn_1> <al_Delta> <fl_1002> <a1_MSP> <a2_IAD> <m1_Sept> <m2_Sept> <d1_10> <d2_14> <tn1_21> <tn2_2> <cl_economy> <pr_100> <cn_1> <al_UA> <fl_1003> <a1_IAD> <a2_MSP> <m1_Sept> <m2_Sept> <d1_11> <d2_14> <tn1_13> <tn2_20> <cl_economy> <pr_200> <cn_1> <al_Southwest> <fl_1004> <a1_IAD> <a2_MSP> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_1> <tn2_15> <cl_economy> <pr_100> <cn_0> <al_Frontier> <fl_1005> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_11> <d2_12> <tn1_8> <tn2_21> <cl_economy> <pr_200> <cn_1> <al_Delta> <fl_1006> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_12> <d2_13> <tn1_6> <tn2_5> <cl_economy> <pr_200> <cn_1> <al_AA> <fl_1007> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_10> <d2_14> <tn1_23> <tn2_12> <cl_economy> <pr_100> <cn_1> <al_Southwest> <fl_1008> <a1_IAD> <a2_MSP> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_21> <tn2_14> <cl_economy> <pr_200> <cn_1> <al_UA> <fl_1009> <a1_ATL> <a2_MSP> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_14> <tn2_12> <cl_business> <pr_500> <cn_1> <al_Southwest> <fl_1010> <a1_ATL> <a2_MSP> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_6> <tn2_20> <cl_economy> <pr_200> <cn_1> <al_Spirit> <fl_1011> <a1_IAD> <a2_MSP> <m1_Sept> <m2_Sept> <d1_13> <d2_12> <tn1_0> <tn2_21> <cl_economy> <pr_200> <cn_0> <al_UA> <fl_1012> <a1_ATL> <a2_IAD> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_7> <tn2_5> <cl_economy> <pr_200> <cn_1> <al_JetBlue> <fl_1013> <a1_ATL> <a2_IAD> <m1_Sept> <m2_Sept> <d1_11> <d2_14> <tn1_7> <tn2_0> <cl_economy> <pr_200> <cn_1> <al_AA> <fl_1014> <a1_MSP> <a2_IAD> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_6> <tn2_20> <cl_economy> <pr_200> <cn_1> <al_UA> <fl_1015> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_10> <d2_13> <tn1_23> <tn2_18> <cl_economy> <pr_200> <cn_1> <al_Hawaiian> <fl_1016> <a1_MSP> <a2_ATL> <m1_Sept> <m2_Sept> <d1_12> <d2_13> <tn1_3> <tn2_17> <cl_economy> <pr_200> <cn_1> <al_Spirit> <fl_1017> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_10> <tn2_8> <cl_economy> <pr_200> <cn_1> <al_JetBlue> <fl_1018> <a1_IAD> <a2_MSP> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_17> <tn2_14> <cl_economy> <pr_100> <cn_1> <al_Southwest> <fl_1019> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_12> <d2_13> <tn1_4> <tn2_20> <cl_economy> <pr_100> <cn_1> <al_Delta> <fl_1020> <a1_MSP> <a2_ATL> <m1_Sept> <m2_Sept> <d1_12> <d2_13> <tn1_5> <tn2_15> <cl_economy> <pr_200> <cn_1> <al_Southwest> <fl_1021> <a1_ATL> <a2_MSP> <m1_Sept> <m2_Sept> <d1_12> <d2_12> <tn1_12> <tn2_5> <cl_economy> <pr_100> <cn_1> <al_UA> <fl_1022> <a1_ATL> <a2_MSP> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_14> <tn2_16> <cl_economy> <pr_100> <cn_1> <al_Southwest> <fl_1023> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_12> <d2_13> <tn1_4> <tn2_7> <cl_economy> <pr_100> <cn_1> <al_Spirit> <fl_1024> <a1_MSP> <a2_ATL> <m1_Sept> <m2_Sept> <d1_12> <d2_13> <tn1_11> <tn2_16> <cl_economy> <pr_200> <cn_1> <al_Frontier> <fl_1025> <a1_IAD> <a2_MSP> <m1_Sept> <m2_Sept> <d1_12> <d2_14> <tn1_8> <tn2_1> <cl_economy> <pr_100> <cn_1> <al_Hawaiian> <fl_1026> <a1_MSP> <a2_IAD> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_2> <tn2_5> <cl_economy> <pr_200> <cn_1> <al_UA> <fl_1027> <a1_IAD> <a2_ATL> <m1_Sept> <m2_Sept> <d1_11> <d2_14> <tn1_17> <tn2_23> <cl_economy> <pr_100> <cn_1> <al_UA> <fl_1028> <a1_ATL> <a2_MSP> <m1_Sept> <m2_Sept> <d1_11> <d2_13> <tn1_2> <tn2_20> <cl_economy> <pr_200> <cn_1> <al_Frontier> <fl_1029>
kb_sents = []
reservation = []
with open(path, 'r') as f:
for line in f:
kb = []
entry=[]
i=0
reservation.append(line.split()[0])
for word in line.split()[1:]: # without reservation
entry.append(word)
i += 1
if i % 13 == 0:
kb.append(entry)
entry = []
kb_sents.append(kb)
return kb_sents, reservation
def tokenize_query(path):
query = []
with open(path, 'r') as f:
for line in f:
words = []
for word in line[38:-1].split('AND'): # without SELECT and \n
words.append(word)
query.append(words)
return query
def tokenize_true_query(path):
query = []
truth_gate = []
with open(path, 'r') as f:
for line in f:
words = []
truth_gate.append(int(line[0]))
for word in line[42:-1].split('AND'): # without SELECT and \n
words.append(word)
query.append(words)
return query, truth_gate
def tokenize_query2(path):
query = []
with open(path, 'r') as f:
for line in f:
words = []
for word in line.split(): # without SELECT and \n
if word != '0.0':
words.append(int(word))
else:
words.append(word)
query.append(words)
return query
def tokenize_gate(path):
gate = []
with open(path, 'r') as f:
for line in f:
words = []
for word in line.split(): # without SELECT and \n
words.append(word)
gate.append(word)
return gate
def translate_query_to_simple(query):
condiction = ['departure_airport', 'return_airport', 'departure_month', 'return_month', 'departure_day', 'return_day', 'departure_time_num', 'return_time_num', 'class', \
'price', 'num_connections', 'airline_preference']
simple_query = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
curr = 0
for i in range(12):
if curr == len(query):
break
if condiction[i] in query[curr]:
simple_query[i] = int(query[curr].split()[-1])
curr += 1
return simple_query
def ACC_ex(true_flight, each_flight, each_flight_truth, ACC_ex_correct):
if len(each_flight_truth) == 0 and len(each_flight) == 0:
ACC_ex_correct += 1
elif each_flight_truth == each_flight:
ACC_ex_correct += 1
return ACC_ex_correct
def ACC_lf(truth_gate, gate, true_query, query2, ACC_lf_correct, ACC_lf_total):
if truth_gate == 1 and gate == '0.0':
for i in range(12):
if true_query[i] != -1:
ACC_lf_total[i] += 1
if truth_gate == 1 and gate == '1.0':
# 4 combination : truth(y, y, y, n) predict(w, r, n, y)
for i in range(12):
if true_query[i] != -1: # y
if (query2[i] != true_query[i]) and query2[i] != -1: # w
ACC_lf_total[i] += 1
if query2[i] == true_query[i]: # r
ACC_lf_correct[i] += 1
ACC_lf_total[i] += 1
if query2[i] == -1: # n
ACC_lf_total[i] += 1
if true_query[i] == -1: # n
if query2[i] != -1: # y
ACC_lf_total[i] += 1
# if truth_gate == 0 and gate[i] == '0.0':
if truth_gate == 0 and gate == '1.0':
for i in range(12):
if query2[i] != -1:
ACC_lf_total[i] += 1
return ACC_lf_correct, ACC_lf_total
def ACC_lf2(truth_gate, gate, true_query, query2, ACC_lf_correct, ACC_lf_total):
if truth_gate == 1 and gate == '0.0':
ACC_lf_total += 1
if truth_gate == 1 and gate == '1.0':
for i in range(1,13):
if true_query[:i] == query2[:i]:
ACC_lf_correct[i-1] += 1
ACC_lf_total += 1
# if truth_gate == 0 and gate[i] == '0.0':
if truth_gate == 0 and gate == '1.0':
ACC_lf_total += 1
return ACC_lf_correct, ACC_lf_total
def simulate_DB(kb, true_query, truth_gate, query, query2, gate, condiction_num, sort_indices, sort_sent):
# <a1_MCO> <a2_LGA> <m1_Feb> <m2_Feb> <d1_9> <d2_10> <tn1_2> <tn2_7> <cl_business> <pr_400> <cn_1> <al_Southwest> <fl_1000>
airport_list = ['DEN', 'LAX', 'MSP', 'DFW', 'SEA', 'ATL', 'IAH', 'DTW', 'ORD', 'IAD', 'CLT', 'EWR', 'LGA', 'JFK', 'HOU', 'SFO', 'AUS', 'OAK', 'LAS', 'PHL', 'BOS', 'MCO', 'DCA', 'PHX']
month_list = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'June', 'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec']
time_list = ['morning', 'afternoon', 'evening']
air_class_list = ['economy', 'business']
max_price_list = ['200', '500', '1000', '5000']
airline_preference_list = ['normal-cost']
ACC_ex_correct = 0
ACC_ex_total = 0
max_kb = 0
record = []
total = 0
keep = 0
error, error_truth = 0, 0
ACC_lf_correct = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ACC_lf_total = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ACC_lf_correct2 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ACC_lf_total2 = 0
ACC_f = [0, 0, 0, 0, 0, 0, 0]
ACC_f_truth = [0, 0, 0, 0, 0, 0, 0]
ACC_s = [0, 0, 0, 0, 0, 0, 0]
ACC_total = [18460, 7599, 58, 132, 4487, 563, 4357]
kb_len = [0 for _ in range(30)]
filtered_kb = [0 for _ in range(30)]
small_db = []
samll_flight = []
for i in range(len(kb)):
true_query_i = translate_query_to_simple(true_query[i])
ACC_lf_correct, ACC_lf_total = ACC_lf(truth_gate[i], gate[i], true_query_i, query2[i], ACC_lf_correct, ACC_lf_total)
ACC_lf_correct2, ACC_lf_total2 = ACC_lf2(truth_gate[i], gate[i], true_query_i, query2[i], ACC_lf_correct2, ACC_lf_total2)
if gate[i] == '0.0':
small_db.append([sort_indices[i], kb[i][0:17]])
record.append([sort_indices[i], 0])
if truth_gate[i] == 0 and gate[i] == '0.0':
ACC_f[4] += 1; ACC_f[5] += 1; ACC_f[6] += 1
continue
else:
total += 30
if truth_gate[i] == 1:
ACC_ex_total += 1
each_kb = []
each_flight = []
each_price = []
each_flight_truth = []
each_price_truth = []
if i == 0:
print('*'*100)
print('query2 : ', query2[i], 'gate : ', gate[i], 'truth gate : ', truth_gate[i])
print('air : ', airport_list[query2[i][0]], ' ', airport_list[query2[i][1]] ,'month : ', month_list[query2[i][2]], ' ', month_list[query2[i][3]])
print('query : ', query[i])
print('truth query : ', true_query[i])
print('truth query : ', true_query_i)
print('Our query : ', query2[i])
# our sql
for entry in range(len(kb[i])):
if i == 0:
print('kb[i][entry] : ', kb[i][entry])
correct = 1
for c in range(len(kb[i][entry])-1) : # without flight number
if query2[i][c] == -1:
continue
if c >= condiction_num:
break
token = kb[i][entry][c].split('_', 1)[1].split('>', 1)[0]
# print('query2 : ', query2[i][c], 'c : ', c)
if c == 0:
airport_index = airport_list.index(token)
if airport_index != query2[i][c]:
correct = 0
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('airport_index : ', airport_index)
# raise
break
elif c == 1:
airport_index = airport_list.index(token)
if airport_index != query2[i][c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('airport_index : ', airport_index)
# raise
correct = 0
break
elif c == 2:
month_index = month_list.index(token)
if month_index != query2[i][c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('month_index : ', month_index)
# raise
correct = 0
break
elif c == 3:
month_index = month_list.index(token)
if month_index != query2[i][c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('month_index : ', month_index)
# raise
correct = 0
break
elif c == 4:
if int(token)-1 != query2[i][c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('day_index : ', int(token)-1)
# raise
correct = 0
break
elif c == 5:
if int(token)-1 != query2[i][c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('day_index : ', int(token)-1)
# raise
correct = 0
break
elif c == 6:
d_time = time_list[query2[i][c]]
if d_time == 'morning' and int(token) not in [3, 4, 5, 6, 7, 8, 9, 10, 11]:
correct = 0
if d_time == 'afternoon' and int(token) not in [12, 13, 14, 15, 16, 17, 18, 19]:
correct = 0
if d_time == 'evening' and int(token) not in [20, 21, 22, 23, 0, 1, 2]:
correct = 0
elif c == 7:
r_time = time_list[query2[i][c]]
if r_time == 'morning' and int(token) not in [3, 4, 5, 6, 7, 8, 9, 10, 11]:
correct = 0
if r_time == 'afternoon' and int(token) not in [12, 13, 14, 15, 16, 17, 18, 19]:
correct = 0
if r_time == 'evening' and int(token) not in [20, 21, 22, 23, 0, 1, 2]:
correct = 0
elif c == 8:
class_index = air_class_list.index(token)
if class_index != query2[i][c]:
correct = 0
elif c == 9:
if int(max_price_list[query2[i][c]]) < int(token):
correct = 0
elif c == 10:
if true_query_i[c] < int(token):
correct = 0
elif c == 11:
if true_query_i[c] == 1 and token not in ['UA', 'AA', 'Delta', 'Hawaiian']:
correct = 0
if correct == 1:
each_price.append(int(kb[i][entry][9].split('_', 1)[1].split('>', 1)[0]))
each_flight.append(kb[i][entry][12].split('_', 1)[1].split('>', 1)[0])
each_kb.append(kb[i][entry])
keep += 1
# ground truth
for entry in range(len(kb[i])):
correct = 1
for c in range(len(kb[i][entry])-1):
if true_query_i[c] == -1:
continue
if c >= condiction_num:
break
token = kb[i][entry][c].split('_', 1)[1].split('>', 1)[0]
# print('query2 : ', query2[i][c], 'c : ', c)
if c == 0:
airport_index = airport_list.index(token)
if airport_index != true_query_i[c]:
correct = 0
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('airport_index : ', airport_index)
# raise
break
elif c == 1:
airport_index = airport_list.index(token)
if airport_index != true_query_i[c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('airport_index : ', airport_index)
# raise
correct = 0
break
elif c == 2:
month_index = month_list.index(token)
if month_index != true_query_i[c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('month_index : ', month_index)
# raise
correct = 0
break
elif c == 3:
month_index = month_list.index(token)
if month_index != true_query_i[c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('month_index : ', month_index)
# raise
correct = 0
break
elif c == 4:
if int(token)-1 != true_query_i[c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('day_index : ', int(token)-1)
# raise
correct = 0
break
elif c == 5:
if int(token)-1 != true_query_i[c]:
# print('kb[i][entry] : ', kb[i][entry])
# print('query2[i][c] : ', query2[i][c])
# print('day_index : ', int(token)-1)
# raise
correct = 0
break
elif c == 6:
d_time = time_list[true_query_i[c]]
if d_time == 'morning' and int(token) not in [3, 4, 5, 6, 7, 8, 9, 10, 11]:
correct = 0
if d_time == 'afternoon' and int(token) not in [12, 13, 14, 15, 16, 17, 18, 19]:
correct = 0
if d_time == 'evening' and int(token) not in [20, 21, 22, 23, 0, 1, 2]:
correct = 0
elif c == 7:
r_time = time_list[true_query_i[c]]
if r_time == 'morning' and int(token) not in [3, 4, 5, 6, 7, 8, 9, 10, 11]:
correct = 0
if r_time == 'afternoon' and int(token) not in [12, 13, 14, 15, 16, 17, 18, 19]:
correct = 0
if r_time == 'evening' and int(token) not in [20, 21, 22, 23, 0, 1, 2]:
correct = 0
elif c == 8:
class_index = air_class_list.index(token)
if class_index != true_query_i[c]:
correct = 0
elif c == 9:
if int(max_price_list[true_query_i[c]]) < int(token):
correct = 0
elif c == 10:
if true_query_i[c] < int(token):
correct = 0
elif c == 11:
if true_query_i[c] == 1 and token not in ['UA', 'AA', 'Delta', 'Hawaiian']:
correct = 0
if correct == 1:
each_price_truth.append(int(kb[i][entry][9].split('_', 1)[1].split('>', 1)[0]))
each_flight_truth.append(kb[i][entry][12].split('_', 1)[1].split('>', 1)[0])
action = sort_sent[i][1]
if len(action) != 4:
print('No name ! ', sort_indices[i])
true_flight = action[0].split('_', 1)[1].split('>', 1)[0]
raise
else:
true_flight = action[2].split('_', 1)[1].split('>', 1)[0]
if i == 0:
print('true_flight : ', true_flight)
ACC_ex_correct = ACC_ex(true_flight, each_flight, each_flight_truth, ACC_ex_correct)
# sort price
index_price = sorted(range(len(each_price)), key=lambda k: each_price[k])
each_flight = [each_flight[p] for p in index_price ]
index_price_truth = sorted(range(len(each_price_truth)), key=lambda k: each_price_truth[k])
each_flight_truth = [each_flight_truth[p] for p in index_price_truth ]
# empty : change-no_flight, book--no_flight, cancel-no_reservation, change-no_reservation, cancel-cancel
if truth_gate[i] == 1 and gate[i] != '0.0' and true_flight == 'empty' and len(each_flight) == 0:
# book--no_flight, change-no_flight
ACC_f[1] += 1 # book--no_flight
ACC_f[3] += 1 # change-no_flight
if truth_gate[i] == 1 and gate[i] != '0.0' and true_flight != 'empty' and len(each_flight) == 1 and (true_flight in each_flight):
# book--no_flight, change-no_flight
ACC_f[0] += 1 # book--book
ACC_f[2] += 1 # change-book
if truth_gate[i] == 1 and gate[i] != '0.0' and true_flight != 'empty' and len(each_flight) > 1 and (int(true_flight) == int(each_flight[0])):
# book--no_flight, change-no_flight
ACC_f[0] += 1 # book--book
ACC_f[2] += 1 # change-book
if truth_gate[i] == 1 and gate[i] != '0.0' and true_flight == 'empty' and len(each_flight_truth) == 0:
# book--no_flight, change-no_flight
ACC_f_truth[1] += 1 # book--no_flight
ACC_f_truth[3] += 1 # change-no_flight
if truth_gate[i] == 1 and gate[i] != '0.0' and true_flight != 'empty' and len(each_flight_truth) == 1 and (true_flight in each_flight_truth):
# book--no_flight, change-no_flight
ACC_f_truth[0] += 1 # book--book
ACC_f_truth[2] += 1 # change-book
if truth_gate[i] == 1 and gate[i] != '0.0' and true_flight != 'empty' and len(each_flight_truth) > 1 and (int(true_flight) == int(each_flight_truth[0])):
# book--no_flight, change-no_flight
ACC_f_truth[0] += 1 # book--book
ACC_f_truth[2] += 1 # change-book
if true_flight == 'empty' and len(each_flight) == 0:
# print('sample : ', sort_indices[i], ' flight : ', each_flight, 'flight : ', true_flight, 'True Empty')
record.append([sort_indices[i], each_flight, true_flight])
elif true_flight == 'empty' and len(each_flight) != 0:
# print('sample : ', sort_indices[i], ' flight : ', each_flight, 'flight : ', true_flight)
record.append([sort_indices[i], each_flight, true_flight])
elif true_flight in each_flight:
# print('sample : ', sort_indices[i], ' flight : ', each_flight, 'flight : ', true_flight, 'True Flight')
record.append([sort_indices[i], each_flight, true_flight])
elif true_flight != 'empty' and true_flight not in each_flight_truth:
# print('sample : ', sort_indices[i], ' flight : ', each_flight, 'flight : ', true_flight, 'Error Flight')
# print('T : ', true_query[i])
# print('P : ', query[i])
record.append([sort_indices[i], each_flight, true_flight, true_query[i], query[i]])
print('*'*100)
print('Sample : ', i)
print('each_flight_truth : ', each_flight_truth)
print('true_flight : ', true_flight)
print('query2 : ', query2[i], 'gate : ', gate[i], 'truth gate : ', truth_gate[i])
print('token : ', airport_list[query2[i][0]], ' ', airport_list[query2[i][1]] , month_list[query2[i][2]], ' ', month_list[query2[i][3]])
print('query : ', query[i])
print('truth query : ', true_query[i])
print('truth query : ', true_query_i)
print('Our query : ', query2[i])
for k in range(30):
print('kb[i][entry] : ', kb[i][k])
print('sents : ', sort_sent[i])
print('*'*100)
error_truth += 1
if true_flight != 'empty' and true_flight not in each_flight:
# print('sample : ', sort_indices[i], ' flight : ', each_flight, 'flight : ', true_flight, 'Error Flight')
# print('T : ', true_query[i])
# print('P : ', query[i])
record.append([sort_indices[i], each_flight, true_flight, true_query[i], query[i]])
error += 1
kb_len[len(each_kb)] += 1
if len(each_kb) > max_kb:
max_kb = len(each_kb)
while len(each_kb) < 17:
each_kb.append(kb[i][-1])
# print('each_kb : ', each_kb)
# raise
# if sort_indices[i] == 25544:
# print('sample : ', sort_indices[i], ' flight : ', each_flight, 'flight : ', true_flight, 'Error Flight')
# print(each_kb)
# print(len(each_kb))
small_db.append([sort_indices[i], each_kb])
samll_flight.append(each_flight)
print('Max kb : ', max_kb)
print('Acc ex : ', 100.*ACC_ex_correct/ACC_ex_total, ACC_ex_correct, ACC_ex_total)
condiction_name = ['departure_airport', 'return_airport', 'departure_month', 'return_month', 'departure_day', 'return_day', 'departure_time_num', 'return_time_num', 'class', \
'price', 'num_connections', 'airline_preference']
for i in range(12):
print(condiction_name[i], ' : ', ACC_lf_correct[i], ' / ', ACC_lf_total[i], ' -> ', 100.*ACC_lf_correct[i]/ACC_lf_total[i])
print(condiction_name[i], ' : ', ACC_lf_correct2[i], ' / ', ACC_lf_total2, ' -> ', 100.*ACC_lf_correct2[i]/ACC_lf_total2)
print('ACC_Flight : ', ACC_f)
print('ACC_Flight_truth : ', ACC_f_truth)
print('kb_len : ', kb_len)
return samll_flight, small_db, total, keep, error, error_truth, record
sents, sents_len = tokenize_dialogue(data_file)
kb_sents, reservations = tokenize_kb(kb_file)
if args.n_sample != -1:
sents = sents[:args.n_sample]
sents_len = sents_len[:args.n_sample]
kb_sents = kb_sents[:args.n_sample]
reservations = reservations[:args.n_sample]
print('Size of kb : ', len(kb_sents))
print('Size of each kb : ', len(kb_sents[0]), kb_sents[0][0])
query = tokenize_query(query_file)
print('Size of query : ', len(query))
print('Size of each query : ', len(query[0]), query[0])
true_query, truth_gate = tokenize_true_query(true_query_file)
print('Size of true_query : ', len(true_query))
print('Size of each true_query : ', len(true_query[0]), true_query[0])
query2 = tokenize_query2(query2_file)
print('Size of query2 : ', len(query2))
print('Size of each query2 : ', len(query2[0]), query2[0])
gate = tokenize_gate(gate_file)
print('Size of gate : ', len(gate))
print('Size of each gate : ', gate[0])
sort_indices = sorted(range(len(sents_len)), key=lambda k: sents_len[k], reverse=True)
sort_indices_reverse = sorted(range(len(sort_indices)), key=lambda k: sort_indices[k])
sort_true_query, sort_truth_gate, sort_query, sort_query2, sort_gate = [], [], [], [], []
for i in range(len(true_query)):
sort_true_query.append(true_query[sort_indices_reverse[i]])
sort_truth_gate.append(truth_gate[sort_indices_reverse[i]])
sort_query.append(query[sort_indices_reverse[i]])
sort_query2.append(query2[sort_indices_reverse[i]])
sort_gate.append(gate[sort_indices_reverse[i]])
sort_kb = []
sort_sent = []
sort_reservations = []
for i in range(len(kb_sents)):
sort_kb.append(kb_sents[sort_indices[i]])
sort_sent.append(sents[sort_indices[i]])
sort_reservations.append(reservations[sort_indices[i]])
print('*'*100)
# print('sort_indices : ', sort_indices[0:2])
# print('kb_sents : ', sort_kb[0:2])
# print('sents : ', sort_sent[0:2])
print('indices : ', sort_indices_reverse[0:2])
print('kb_sents : ', kb_sents[0:2])
print('sents : ', sents[0:2])
# samll_flight, small_db, total, keep, error, record = simulate_DB(sort_kb, true_query, truth_gate, query, query2, gate, 6, sort_indices, sort_sent)
samll_flight, small_db, total, keep, error, error_truth, record = simulate_DB(kb_sents, sort_true_query, sort_truth_gate, sort_query, sort_query2, sort_gate, 12, list(range(len(kb_sents))), sents)
print('keep : ', keep)
print('total : ', total)
print('error : ', error, ' / ', len(kb_sents))
print('error_truth : ', error_truth, ' / ', len(kb_sents))
print('record : ', len(record))
record.sort(key=lambda x: x[0])
for i in range(len(record)):
r_fp.write(str(record[i]) + '\n')
small_db.sort(key=lambda x: x[0])
for i in range(len(small_db)):
words = str(reservations[i]) + ' '
for entry in range(len(small_db[i][1])):
for word in small_db[i][1][entry]:
words += str(word) + ' '
small_fp.write(words)
small_fp.write('\n')
print('End') |
995,303 | 3c59d5428cdbecc4cc102df75b2cb68fce27256a | #coding:utf-8
from common import Hash
import time,requests
from locust import HttpLocust,TaskSet,task
from locust.contrib.fasthttp import FastHttpLocust
from common import login_lanting
#澜渟APP直播接口压测
#定义用户行为
class User(TaskSet):
#下面是请求头header
header = {
'User-Agent': 'LanTingDoctor/2.0.2 (iPad; iOS 10.1.1; Scale/2.00)',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-Hans-CN;q=1',
'Content-Type': 'application/json',
'requestApp': '3',
'requestclient': '2',
'versionForApp': '2.0',
'Authorization': 'Basic YXBpTGFudGluZ0BtZWRsYW5kZXIuY29tOkFwaVRobWxkTWxkQDIwMTM=',
'Connection': 'keep-alive'
}
s = requests.session()
t = login_lanting.auto_login_by_UID()
#进入直播入参
into = {
'token': t,
'nonce': Hash.get_digit(),
'timestamp':str(int(time.time())),
'live_code': 'L2018112248566'
}
#加密
into['sign'] = Hash.get_sign(into)
#
de = {
'token': t,
'nonce': Hash.get_digit(),
'timestamp': str(int(time.time())),
'live_code': 'L2018121173179'
}
#入参加密
de['sign'] = Hash.get_sign(de)
@task(1)
def chekin(self):
with self.client.post('/v1/live/checkIn',headers = self.header,json=self.into,catch_response=True) as response:
#请求参数中通过catch_response=True来捕获响应数据,然后对响应数据进行校验
#使用success()/failure()两个方法来标识请求结果的状态
if response.status_code == 200:
response.success()
else:
response.failure('not 200!')
@task(1)
def detail(self):
with self.client.post('/v1/live/detail',headers = self.header,json=self.de,catch_response=True) as response:
if response.status_code == 200:
response.success()
else:
response.failure('not 200!')
class Websiteuser(HttpLocust): # or HttpLocust
task_set = User
#host = 'http://api-live.sunnycare.cc'
max_wait = 6000
min_wait = 3000
if __name__=='__main__':
#导入os模块,os.system方法可以直接在pycharm中该文件中直接运行该py文件
import os
os.system('locust -f locustfile7.py --host=http://api-live.sunnycare.cc')
|
995,304 | 1121e1bc3feb2cd7f9adb4c602f3297688a48199 | # -*- coding: utf-8 -*-
# @Time: 2020/12/09 14:46
# @Author: 李运辰
# @Software: PyCharm
# 导入requests包
import requests
from lxml import etree
# 网页链接
url = "https://jobs.51job.com/pachongkaifa/p1/"
# 请求头
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "guid=7e8a970a750a4e74ce237e74ba72856b; partner=blog_csdn_net",
"Host": "jobs.51job.com",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36"
}
# 有请求头写法
res = requests.get(url=url, headers=headers)
res.encoding='gbk'
s = res.text
selector = etree.HTML(s)
for item in selector.xpath('/html/body/div[4]/div[2]/div[1]/div/div'):
title = item.xpath('.//p/span[@class="title"]/a/text()')
name = item.xpath('.//p/a/@title')
location_name = item.xpath('.//p/span[@class="location name"]/text()')
sary = item.xpath('.//p/span[@class="location"]/text()')
time = item.xpath('.//p/span[@class="time"]/text()')
if len(title)>0:
print(title)
print(name)
print(location_name)
print(sary)
print(time)
print("-----------")
|
995,305 | e865e497a3c8a80c83f57a3ff7de5ced197ad7b6 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import os
from six.moves.urllib import parse
import sys
from neutronclient.common import exceptions as n_exceptions
from kuryr.lib import exceptions
from kuryr_libnetwork import config
from kuryr_libnetwork import controllers
from kuryr_libnetwork.server import start
from kuryr_libnetwork.tests.unit import base
@ddt.ddt
class ConfigurationTest(base.TestKuryrBase):
def test_defaults(self):
basepath = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../..'))
self.assertEqual(basepath,
config.CONF.pybasedir)
self.assertEqual('/usr/libexec/kuryr',
config.CONF.bindir)
self.assertEqual('http://127.0.0.1:23750',
config.CONF.kuryr_uri)
self.assertEqual('kuryr',
config.CONF.neutron.default_subnetpool_v4)
self.assertEqual('kuryr6',
config.CONF.neutron.default_subnetpool_v6)
self.assertEqual('kuryr_libnetwork.port_driver.drivers.veth',
config.CONF.port_driver)
@mock.patch.object(sys, 'argv', return_value='[]')
@mock.patch('kuryr_libnetwork.controllers.check_for_neutron_tag_support')
@mock.patch('kuryr_libnetwork.controllers.check_for_neutron_ext_support')
@mock.patch('kuryr_libnetwork.controllers.neutron_client')
@mock.patch('kuryr_libnetwork.app.run')
def test_start(self, mock_run, mock_neutron_client,
mock_check_neutron_ext_support,
mock_check_for_neutron_tag_support,
mock_sys_argv):
start()
kuryr_uri = parse.urlparse(config.CONF.kuryr_uri)
mock_neutron_client.assert_called_once()
mock_check_neutron_ext_support.assert_called_once()
mock_check_for_neutron_tag_support.assert_any_call(
controllers.TAG_NEUTRON_EXTENSION)
mock_check_for_neutron_tag_support.assert_any_call(
controllers.TAG_EXT_NEUTRON_EXTENSION)
mock_run.assert_called_once_with(kuryr_uri.hostname, 23750,
ssl_context=None)
def test_check_for_neutron_ext_support_with_ex(self):
with mock.patch.object(controllers.app.neutron,
'show_extension') as mock_extension:
ext_alias = "subnet_allocation"
err = n_exceptions.NotFound.status_code
ext_not_found_ex = n_exceptions.NeutronClientException(
status_code=err,
message="")
mock_extension.side_effect = ext_not_found_ex
ex = exceptions.MandatoryApiMissing
self.assertRaises(ex, controllers.check_for_neutron_ext_support)
mock_extension.assert_called_once_with(ext_alias)
@mock.patch('kuryr_libnetwork.controllers.app.neutron.show_extension')
@ddt.data('tag', 'tag-ext')
def test_check_for_neutron_tag_support_with_ex(self,
ext_name,
mock_extension):
err = n_exceptions.NotFound.status_code
ext_not_found_ex = n_exceptions.NeutronClientException(
status_code=err,
message="")
mock_extension.side_effect = ext_not_found_ex
controllers.check_for_neutron_tag_support(ext_name)
mock_extension.assert_called_once_with(ext_name)
@mock.patch('kuryr_libnetwork.controllers.app.neutron.show_extension')
@ddt.data('fake_ext')
def test_check_for_neutron_tag_support_wrong_ext_name_with_ex(
self,
ext_name,
mock_extension):
err = n_exceptions.NotFound.status_code
ext_not_found_ex = n_exceptions.NeutronClientException(
status_code=err,
message="")
mock_extension.side_effect = ext_not_found_ex
controllers.check_for_neutron_tag_support(ext_name)
mock_extension.assert_called_once_with(ext_name)
|
995,306 | 18750c38493a619fb28ea52ef6b1ce2cfb9a502b | from django.contrib import admin
from django.urls import path
from .views import home_page, about_us
urlpatterns = [
path('admin/', admin.site.urls),
path('', home_page),
path('about-us', about_us)
]
|
995,307 | b47ac6d82321e05e9f6dc6c1e1b81454690d1030 | import glob
import os
import sys
#input_dir = sys.argv[1]
def convert_folder(input_dir):
for filename in glob.glob(input_dir + '/*.bam'):
print filename
out_name = os.path.dirname(os.path.dirname(input_dir)) + '/'
out_name += 'beds/'
out_name += os.path.basename(filename).partition('.bam')[0] + '.bed'
cmd = 'bamToBed -i %s > %s' % (filename, out_name)
print cmd
os.system(cmd)
convert_folder('fbf1/bams/')
convert_folder('fbf2/bams/')
|
995,308 | 6e5b49ea30cf41c3aa537a1d658800e0fdd64a43 | # Django
from django import forms
# Local
from .models import Device
class DeviceForm(forms.ModelForm):
class Meta:
model = Device
exclude = ['timestamp']
|
995,309 | 4549a54739576e5909e667a111ec2302a037f97f | import pdb
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import numpy as np
import seaborn as sns
import Statistics as stats
import base64
from pandas_profiling import ProfileReport
import os
from streamlit_pandas_profiling import st_profile_report
def run(st,data):
expander = st.beta_expander("Menu",expanded=True)
with expander:
ana_choice = st.radio("Analysis",["Data","Visualization","Statistics","Data Profiling"])
filters = st.checkbox('Add Filters')
if filters:
st.info("Select column and values from below")
filtered_cols = st.multiselect("Select columns to filter",data.columns.tolist())
filtered_sets = []
if len(filtered_cols)>0:
iterations = len(filtered_cols) // 3
difference = len(filtered_cols) % 3
jack = 0
while jack < iterations:
cols_filtered = []
try:
cols_filtered = cols_filtered + st.beta_columns(3)
except:
pass
counter = 0
for i in range(jack*3, 3*jack+3):
filtered_sets.append(cols_filtered[counter].multiselect(filtered_cols[i], data[filtered_cols[i]].unique().tolist()))
counter+=1
jack+=1
if difference == 0:
pass
else:
cols_filtered = []
cols_filtered = cols_filtered + st.beta_columns(difference)
counter = 0
for i in range(iterations*3, iterations*3+difference):
filtered_sets.append(cols_filtered[counter].multiselect(filtered_cols[i], data[filtered_cols[i]].unique().tolist()))
counter += 1
#Now filtering the data
tracker = 0
for filter_value in filtered_sets:
if len(filter_value)>0:
data = data[data[filtered_cols[tracker]].isin(filter_value)]
tracker+=1
if ana_choice == 'Data':
data_options = st.selectbox("",["View Records","Data Correlation","Pivot"])
if data_options == "View Records":
c1,c2 = st.beta_columns(2)
top_bottom_options = c1.radio("Records",["Top","Bottom"])
num_rec = c2.number_input("No. of Records:", min_value=0, max_value=100, step=1, value=10)
if top_bottom_options == 'Top':
st.dataframe(data.head(num_rec))
else:
st.dataframe(data.tail(num_rec))
elif data_options == "Data Correlation":
select_columns = st.multiselect("Select Columns",data.columns.tolist())
corr_view = st.radio("Correlation View",["Table","Chart"])
if corr_view == 'Table':
if len(select_columns)==0:
st.dataframe(data.corr())
else:
st.dataframe(data[select_columns].corr())
else:
if len(select_columns) == 0:
st.write(sns.heatmap(data.corr(), annot=True))
st.pyplot()
else:
st.write(sns.heatmap(data[select_columns].corr(), annot=True))
st.pyplot()
elif data_options == 'Pivot':
dimensions = st.multiselect("Select X axis columns",data.columns.tolist())
measures = st.multiselect("Select Y axis columns", data.columns.tolist())
numeric_cols = st.multiselect("Aggregation columns", data.columns.tolist())
aggregation_operations = st.selectbox("Aggregation Operation",['sum','average','median','count'])
button = st.button("Execute!!!")
if button:
if len(numeric_cols) > 0 :
if aggregation_operations == 'sum':
operation = np.sum
elif aggregation_operations == 'average':
operation = np.mean
elif aggregation_operations == 'median':
operation = np.median
elif aggregation_operations == 'count':
operation = np.count_nonzero
pivot_table = pd.pivot_table(data,values=numeric_cols,index=measures,columns=dimensions,aggfunc=operation)
st.dataframe(pivot_table)
elif ana_choice == "Visualization":
chart_options = st.selectbox('Charts',['Bar','Line','Heatmap','Distplot','Customized'])
if chart_options == 'Bar':
x_col = st.selectbox('X',data.columns.tolist())
y_col = st.selectbox('Y', data.columns.tolist())
hue_color = st.checkbox("Add color column")
direction = st.radio('chart direction',['vertical','horizontal'])
if hue_color:
hue_col = st.selectbox('hue', data.columns.tolist())
button = st.button("Execute!!!")
if button:
if direction == 'vertical':
chart_direction = 'v'
else:
chart_direction = 'h'
if hue_color:
if hue_col:
st.write(sns.barplot(x=x_col, y=y_col, hue=hue_col, data=data,orient=chart_direction))
st.pyplot()
else:
st.write(sns.barplot(x=x_col, y=y_col, data=data,orient=chart_direction))
st.pyplot()
else:
st.write(sns.barplot(x=x_col, y=y_col, data=data, orient=chart_direction))
st.pyplot()
elif chart_options == 'Line':
x_col = st.selectbox('X', data.columns.tolist())
y_col = st.selectbox('Y', data.columns.tolist())
hue_color = st.checkbox("Add color column")
if hue_color:
hue_col = st.selectbox('hue', data.columns.tolist())
button = st.button("Execute!!!")
if button:
if hue_color:
if hue_col:
st.write(sns.lineplot(x=x_col, y=y_col, hue=hue_col, data=data))
st.pyplot()
else:
st.write(sns.lineplot(x=x_col, y=y_col, data=data))
st.pyplot()
else:
st.write(sns.lineplot(x=x_col, y=y_col, data=data))
st.pyplot()
elif chart_options == 'Heatmap':
select_columns = st.multiselect("Select Columns", data.columns.tolist())
button = st.button("Execute!!!")
if button:
if len(select_columns) == 0:
st.write(sns.heatmap(data, annot=True))
st.pyplot()
else:
st.write(sns.heatmap(data[select_columns], annot=True))
st.pyplot()
elif chart_options == 'Distplot':
x_col = st.selectbox('X', data.columns.tolist())
col = st.selectbox('column', data.columns.tolist())
row = st.selectbox('row', data.columns.tolist())
button = st.button("Execute!!!")
if button:
st.write(sns.displot(
data, x=x_col, col=col, row=row,
binwidth=3, height=3, facet_kws=dict(margin_titles=True),
))
st.pyplot()
elif chart_options == 'Customized':
code_area = st.text_area("""Enter your chart script, Return result to value.
e.g.
a = 3
b = 4
value = a + b!!!, Don't enter data parameter !!!""")
button = st.button("Execute!!!")
if button:
loc = {}
exec(code_area, {'data':data}, loc)
return_workaround = loc['value']
st.write(return_workaround)
st.pyplot()
elif ana_choice == 'Statistics':
test_selection = st.selectbox('Category',
['Value Count', 'Normality Test', 'Correlation Test', 'Stationary Test',
'Parametric Test',
'Non Parametric Test'])
statistics = stats.Statistics(data)
if test_selection == 'Value Count':
select_columns = st.selectbox("Select Columns",data.columns.tolist())
mode = st.radio('Value Counts',['Table','Chart'])
if mode == 'Table':
value_counts = statistics.__get__stats__(select_columns)
st.dataframe(value_counts)
else:
value_counts = statistics.__get__stats__(select_columns)
st.write(value_counts[:20].plot(kind='barh'))
st.pyplot()
elif test_selection == 'Normality Test':
st.write("""
Tests whether a data sample has a Gaussian distribution. \n
H0: the sample has a Gaussian distribution. \n
H1: the sample does not have a Gaussian distribution""")
select_test = st.selectbox('Tests', ['ShapiroWilk', 'DAgostino', 'AndersonDarling'])
col = st.selectbox('Select Column', data.columns.tolist())
text_option = st.checkbox('Text')
chart_option = st.checkbox('Chart')
if text_option:
t,p = statistics.normality_tests(data[col], test_type=select_test)
st.write('#### ' + t + " (" + str(p) + ")")
if chart_option:
st.write(sns.kdeplot(x=col,data=data))
st.pyplot()
elif ana_choice == 'Data Profiling':
st.markdown("""
##### The Data Profiling is done automatically using Pandas Profiling tool.\n \n \n \n
""")
limited_records = st.checkbox("Execute on Limited Records!!!")
select_columns = st.multiselect("Select Columns", data.columns.tolist())
if len(select_columns) == 0:
cols = data.columns.tolist()
else:
cols = select_columns
if limited_records:
num_rec = st.number_input("No. of Records:", min_value=0, max_value=1000000, step=1, value=100)
else:
num_rec = len(data)
execute_profiling = st.button('Execute!!!')
if execute_profiling:
st.title(f"Pandas Profiling on {num_rec} records")
report = ProfileReport(data[cols].loc[:num_rec,:], explorative=True)
st.write(data)
st_profile_report(report)
|
995,310 | a547e8d781241ffc6be9a142be5b43bba2d03daf | from django.apps import AppConfig
class PixelMappingConfig(AppConfig):
name = 'pixel_mapping'
|
995,311 | 1ddca0197a9c39726e834c6692088ee4df7e7ffe | #Import Libraries
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD,RMSprop,adam
from keras.utils import np_utils
from keras.models import model_from_json
import numpy as np
import os
import theano
from PIL import Image
from numpy import *
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
# input image dimensions
img_rows, img_cols = 200, 200
# number of channels
img_channels = 1
path1 = 'InputData' #path of folder of images
path2 = 'ResizedInputData' #path of folder to save images
listing = os.listdir(path1)
num_samples=size(listing)
print(num_samples)
#Resize images and convert to grayscale
for file in listing:
im = Image.open(path1 + '/' + file)
img = im.resize((img_rows,img_cols))
gray = img.convert('L')
gray.save(path2 +'/' + file, "JPEG")
imlist = os.listdir(path2)
im1 = array(Image.open('ResizedInputData' + '/'+ imlist[0])) # open one image to get size
m,n = im1.shape[0:2] # get the size of the images
imnbr = len(imlist) # get the number of images
# create matrix to store all flattened images
immatrix = array([array(Image.open('ResizedInputData'+ '/' + im2)).flatten()
for im2 in imlist],'f')
label=np.ones((num_samples,),dtype = int)
label[0:10]=0
label[10:20]=1
label[20:]=2
#Shuffle data so classifier doesn't recognize index patterns
data,Label = shuffle(immatrix,label, random_state=2)
train_data = [data,Label]
#batch_size to train
batch_size = 32
# number of output classes
nb_classes = 3
# number of epochs to train
nb_epoch = 20
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
(X, y) = (train_data[0],train_data[1])
#Split X and y into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4)
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
#Initialize model
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(1, img_rows, img_cols)))
convout1 = Activation('relu')
model.add(convout1)
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
convout2 = Activation('relu')
model.add(convout2)
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
#Train now
print(np.shape(X_train))
hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test,Y_test))
hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score)
print(model.predict_classes(X_test[1:5]))
print(Y_test[1:5])
#Confusion matrix
from sklearn.metrics import classification_report,confusion_matrix
Y_pred = model.predict(X_test)
print(Y_pred)
y_pred = np.argmax(Y_pred, axis=1)
print(y_pred)
p=model.predict_proba(X_test) # to predict probability
#Save weights
fname = "weights-Test-CNN.h5"
model.save_weights(fname,overwrite=True)
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
#Save model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close() |
995,312 | 6b223814d0b5f6635d439cd4cce2397e4b53eca5 | # -*- coding: utf_8 -*-
# fibonacci.py: muestra los numeros de fibonacci hasta n
fib1 = 0
fib2 = 1
temp = 0
n = int(raw_input("Ingrese un numero natural: "))
print ("Serie de Fibonacci hasta " + str(n) + ": ")
if n >= 0:
print (fib1)
if n >= 1:
print(fib2)
for x in range(2, n+1):
temp = fib2 + fib1
print(temp)
fib1 = fib2
fib2 = temp
|
995,313 | f081d3b60efb9cf4e8ca8380b7d199ec43835121 | import os
import sys
import math
divisors = []
def isInt(x):
try:
int(x)
return True
except ValueError:
return False
def isPrime(x, f=2):
if x == 2 or x == 3:
return True
elif x < 2:
return False
elif x > 2:
while f <= math.ceil(math.sqrt(x)):
if x % f == 0:
print("\n", x, " is divisible by ", f, "\n")
try:
divisors.index(f)
except (ValueError, IndexError):
divisors.append(f)
return False
else:
f += 1
return (x % f)
def arePrimes(numbers):
notPrimes = []
primes = 0
for number in numbers:
if isPrime(number):
primes += 1
else:
notPrimes.append(number)
print(primes, "Primes found:\n")
return notPrimes
def main():
print('Program to calculate prime numbers in a given range (includes the range values itself).\n')
limit_low = int(input("Enter lower limit of range: "))
limit_high = int(input("Enter upper limit of range: "))
primeCount = 0
primes = []
lowVal = limit_low
while limit_low <= limit_high:
if isPrime(limit_low):
primeCount += 1
primes.append(limit_low)
limit_low += 1
print("Primes: \n")
print(primes)
print("\n",primeCount, " primes found in range [",lowVal,",",limit_high,"]\n")
print("Divisors in range: \n")
print(divisors)
reply = input("\nCheck if all divisors are prime ? (yes/no): ")
if reply == 'yes':
print("\nChecking if all divisors are prime...\n")
nonPrimeDivisors = arePrimes(divisors)
print("Divisors that are not primes:\n")
print(nonPrimeDivisors)
print("\nMax Divisor:",divisors[-1],"\n")
return True
while int(input("\nRun prime finding program ?\n1. Yes, 0. No\n")):
ch = int(input("1. Detect if a number is prime\n2. Find prime numbers within a given range\n"))
if ch == 1:
x = input("Enter test number: ")
while isInt(x) == False:
print("\nInvalid integer. Please enter again...\n")
x = input("Enter test number: ")
if isInt(x) == False:
print("Repeated invalid data entered. Re-running program...")
os.execl(sys.executable, sys.executable, *sys.argv)
x = int(x)
if isPrime(x):
print("The number is prime\n")
else:
print("The number is not prime\n")
elif ch == 2:
main()
else:
print("Invalid choice input")
exit()
|
995,314 | f91b6f7fa55d479b6b0c9da50cde9750d7aa861b | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.views import generic
from django.contrib.auth.decorators import login_required
from django.forms.formsets import formset_factory
from django.forms import modelformset_factory
from functools import partial, wraps
from django.utils.functional import curry
from django.contrib.auth.forms import User
from users.models import Message
from friendship.models import Friend, Follow
from .forms import AddItemForm, AddSplitItemForm, AddReceiptForm, AddStoreForm, AddProductForm, ShareItemForm
from .models import Store, Product, ReceiptProduct, Receipt, ShareItem, ShareNotification
def get_common_context(user, receipt_id=None):
# Retrieves commonly re-used data for certain views.
common_context = {}
receipt_list = [] # Always in common_context
total_dict = {} # Always in common_context
num_of_new_friend_requests = len(Friend.objects.unread_requests(user=user))
new_messages = [message for message in Message.objects.filter(to_user=user) if message.read is False]
new_share_notifications = [
notification for notification in ShareNotification.objects.filter(
to_user=user,
read=False
)
]
num_of_new_messages = len(new_messages)
num_of_new_share_notifications = len(new_share_notifications)
common_context['num_of_new_friend_requests'] = num_of_new_friend_requests
common_context['num_of_new_messages'] = num_of_new_messages
common_context['num_of_new_share_notifications'] = num_of_new_share_notifications
common_context['total_new_notifications'] = num_of_new_share_notifications + num_of_new_messages + num_of_new_friend_requests
description = ''
for receipt in Receipt.objects.all():
if receipt.owner == user:
receipt_list.append(receipt)
temp_list = [item.price for item in receipt.receiptproduct_set.all()]
taxed_items = [item.price for item in receipt.receiptproduct_set.all() if item.tax]
total_dict[receipt.id] = format(((sum(taxed_items)*receipt.tax)+(sum(temp_list))), '.2f')
common_context["total_dict"] = total_dict
common_context["receipt_list"] = receipt_list
if receipt_id:
current_receipt = get_object_or_404(Receipt, pk=receipt_id)
# Creates a set of users tagged in a receipt
list_of_purchasers = [item.shareitem_set.all() for item in current_receipt.receiptproduct_set.filter(split=True)]
if current_receipt.owner != user and user not in set([share_item.purchasers for share_item in list_of_purchasers[0]]):
raise Http404
items = current_receipt.receiptproduct_set.all()
for item in items:
if item.description == 'None':
description = ''
else:
description = item.description
total = sum([item.price for item in items])
taxed_items = [item.price for item in items if item.tax]
tax = (sum(taxed_items)*current_receipt.tax)
total_and_tax = (total + tax)
common_context['total'] = ("%.2f" % total)
common_context['tax'] = ("%.2f" % tax)
common_context['current_receipt'] = current_receipt
common_context['items'] = items
common_context['total_and_tax'] = ("%.2f" % total_and_tax)
common_context['description'] = description
return common_context
def index(request):
if request.user.is_authenticated:
common_context = get_common_context(request.user)
else:
common_context = {}
return render(request, 'purchase_log/index.html', common_context)
@login_required
def receipts(request):
# TODO: Add section where a user can view the receipts that he\she is tagged in
common_context = get_common_context(request.user)
return render(request, 'purchase_log/receipts.html', common_context)
@login_required
def receipt_details(request, receipt_id):
common_context = get_common_context(request.user, receipt_id)
context = {}
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/receipt_details.html', context)
@login_required
def add_receipt_product(request, receipt_id):
def solo_receipt(request, receipt, post=False):
# if request == 'POST' pass True to post variable
if post:
form = AddItemForm(user=request.user, data=request.POST)
share_item_form = ShareItemForm(user=request.user)
if form.is_valid():
new_product = form.save(commit=False)
new_product.owner = request.user
new_product.purchaser = request.user
new_product.receipt = receipt
new_product.save()
# Create ShareItem for easy addition to Financial Overview
share_item = share_item_form.save(commit=False)
share_item.receipt_product = new_product
share_item.purchasers = request.user
share_item.save()
return HttpResponseRedirect(reverse('purchase_log:receipt_details', args=[receipt_id]))
else:
# No data submitted; create a blank form.
form = AddItemForm(user=request.user, initial={'receipt': receipt})
common_context = get_common_context(request.user, receipt.id)
context = {'current_receipt': receipt, 'form': form}
for key, value in common_context.items():
context[key] = value
return context
def split_receipt(request, receipt, post=False):
# Create Share Item formset
share_item_formset = formset_factory(wraps(ShareItemForm)(partial(ShareItemForm, user=request.user)), extra=3)
# if request == 'POST' pass True to post variable
if post:
form = AddSplitItemForm(user=request.user, data=request.POST)
formset = share_item_formset(request.POST)
share_item_form = ShareItemForm(user=request.user)
if all([form.is_valid(), formset.is_valid()]):
new_product = form.save(commit=False)
new_product.owner = request.user
new_product.receipt = receipt
new_product.save()
# Create ShareItem for easy addition to Financial Overview
share_item = share_item_form.save(commit=False)
share_item.receipt_product = new_product
share_item.purchasers = new_product.purchaser
share_item.save()
# Process Formset
for inline_form in formset:
if inline_form.cleaned_data:
share_item = inline_form.save(commit=False)
share_item.receipt_product = ReceiptProduct.objects.get(id=new_product.id)
share_item.save()
# Create ShareNotification if no notification for this user/receipt already exists.
share_notification_list = [
notification for notification in ShareNotification.objects.filter(
to_user=share_item.purchasers,
receipt=receipt,
)
]
if not share_notification_list:
notification = ShareNotification(
from_user=request.user,
to_user=share_item.purchasers,
receipt=receipt
)
notification.save()
else:
# No data submitted; create a blank form.
form = AddSplitItemForm(user=request.user, initial={
'purchaser': request.user,
'split': True,
'receipt': receipt
})
formset = share_item_formset()
common_context = get_common_context(request.user, receipt.id)
context = {'current_receipt': receipt, 'form': form, 'formset': formset}
for key, value in common_context.items():
context[key] = value
return context
current_receipt = get_object_or_404(Receipt, pk=receipt_id)
if current_receipt.owner != request.user:
raise Http404
if current_receipt.split:
if request.method == 'POST':
split_receipt(request, current_receipt, post=True)
return HttpResponseRedirect(reverse('purchase_log:receipt_details', args=[receipt_id]))
else:
context = split_receipt(request, current_receipt)
elif not current_receipt.split:
if request.method == 'POST':
solo_receipt(request, current_receipt, post=True)
return HttpResponseRedirect(reverse('purchase_log:receipt_details', args=[receipt_id]))
else:
context = solo_receipt(request, current_receipt)
return render(request, 'purchase_log/add_receipt_product_form.html', context)
@login_required
def product_details(request, product_id):
current_product = get_object_or_404(Product, pk=product_id)
if current_product.owner != request.user:
raise Http404
purchase_list = [purchase for purchase in ReceiptProduct.objects.all().filter(product=current_product)]
context = {'purchase_list': purchase_list, 'current_product': current_product}
common_context = get_common_context(request.user)
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/product_details.html', context)
@login_required
def add_receipt(request):
if request.method != 'POST':
# No data submitted; create a blank form.
form = AddReceiptForm(user=request.user, initial={'tax': 0.00})
else:
# POST data submitted; process data.
form = AddReceiptForm(user=request.user, data=request.POST, files=request.FILES or None)
if form.is_valid():
new_receipt = form.save(commit=False)
new_receipt.owner = request.user
new_receipt.save()
return HttpResponseRedirect(reverse('purchase_log:receipt_details', args=[Receipt.objects.last().pk]))
context = {'form': form}
common_context = get_common_context(request.user)
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/add_receipt_form.html', context)
@login_required
def add_store(request):
if request.method != 'POST':
# No data submitted; create a blank form.
form = AddStoreForm
else:
# POST data submitted; process data.
form = AddStoreForm(data=request.POST)
if form.is_valid():
new_store = form.save(commit=False)
new_store.owner = request.user
new_store.save()
return HttpResponseRedirect(reverse('purchase_log:add_receipt'))
common_context = get_common_context(request.user)
context = {'form': form}
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/add_store_form.html', context)
@login_required
def add_product_type(request, receipt_id):
current_receipt = Receipt.objects.get(id=receipt_id)
if request.method != 'POST':
# No data submitted; create a blank form.
form = AddProductForm
else:
# POST data submitted; process data.
form = AddProductForm(data=request.POST)
if form.is_valid():
new_product = form.save(commit=False)
new_product.owner = request.user
new_product.save()
return HttpResponseRedirect(reverse('purchase_log:add_product', args=[receipt_id]))
common_context = get_common_context(request.user, receipt_id)
context = {'current_receipt': current_receipt, 'form': form}
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/add_product_type_form.html', context)
@login_required
def delete_receipt_product(request, receipt_id, pk):
receipt_product = get_object_or_404(ReceiptProduct, pk=pk)
if receipt_product.owner == request.user:
receipt_product.delete()
return HttpResponseRedirect(reverse('purchase_log:receipt_details', args=[receipt_id]))
@login_required
def delete_receipt(request, receipt_id):
receipt = get_object_or_404(Receipt, pk=receipt_id)
if receipt.owner == request.user:
receipt.delete()
return HttpResponseRedirect(reverse('purchase_log:receipts'))
def edit_receipt_product(request, receipt_product_id):
"""Edit an existing entry."""
item = ReceiptProduct.objects.get(id=receipt_product_id)
receipt = item.receipt
if item.owner != request.user:
raise Http404
if request.method != 'POST':
# Initial request; pre-fill form with the current entry.
form = AddItemForm(user=request.user, instance=item)
else:
# POST data submitted; process data.
form = AddItemForm(user=request.user, instance=item, data=request.POST, files=request.FILES or None)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('purchase_log:receipt_details',
args=[receipt.id]))
common_context = get_common_context(request.user, ReceiptProduct.objects.get(id=receipt_product_id).receipt.id)
context = {'item': item, 'receipt': receipt, 'form': form}
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/edit_receipt_product.html', context)
def edit_split_receipt_product(request, receipt_product_id):
"""Edit an existing entry."""
item = ReceiptProduct.objects.get(id=receipt_product_id)
receipt = item.receipt
share_item = ShareItem.objects.filter(receipt_product=item)
share_item_formset = modelformset_factory(ShareItem, form=ShareItemForm(user=request.user))
share_item_formset.form = staticmethod(curry(ShareItemForm, user=request.user)) # <-This confused the shit out of me.
if item.owner != request.user: # But it worked.
raise Http404
if request.method != 'POST':
# Initial request; pre-fill form with the current entry.
form = AddSplitItemForm(user=request.user, instance=item)
formset = share_item_formset(queryset=share_item)
else:
# POST data submitted; process data.
form = AddSplitItemForm(user=request.user, instance=item, data=request.POST)
formset = share_item_formset(request.POST)
if all([form.is_valid(), formset.is_valid()]):
form.save()
for inline_form in formset:
if inline_form.cleaned_data:
share_item = inline_form.save(commit=False)
share_item.receipt_product = ReceiptProduct.objects.get(id=item.id)
share_item.save()
return HttpResponseRedirect(reverse('purchase_log:receipt_details',
args=[receipt.id]))
common_context = get_common_context(request.user, ReceiptProduct.objects.get(id=receipt_product_id).receipt.id)
context = {'item': item, 'receipt': receipt, 'form': form, 'formset': formset}
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/edit_split_receipt_product.html', context)
def edit_receipt(request, receipt_id):
"""Edit an existing entry."""
receipt = Receipt.objects.get(id=receipt_id)
if receipt.owner != request.user:
raise Http404
if request.method != 'POST':
# Initial request; pre-fill form with the current entry.
form = AddReceiptForm(user=request.user, instance=receipt, files=request.FILES or None)
else:
# POST data submitted; process data.
form = AddReceiptForm(user=request.user, instance=receipt, files=request.FILES or None, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('purchase_log:receipt_details', args=[receipt_id]))
context = {'receipt': receipt, 'form': form}
common_context = get_common_context(request.user)
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/edit_receipt.html', context)
def receipt_notifications(request, user_id):
user = User.objects.get(pk=user_id)
if user != request.user:
raise Http404
notification_list = [notification for notification in ShareNotification.objects.filter(to_user = request.user)]
context = {
'notification_list': notification_list
}
common_context = get_common_context(request.user)
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/receipt_notification.html', context)
def share_notification_details(request, share_notification_id):
current_notification = ShareNotification.objects.get(pk=share_notification_id)
list_products = current_notification.receipt.receiptproduct_set.all()
if current_notification.to_user != request.user:
notification = ShareNotification.objects.get(pk=share_notification_id)
else:
notification = ShareNotification.objects.read_notification(share_notification_id)
context = {
'notification': notification,
'user': request.user,
}
common_context = get_common_context(request.user, current_notification.receipt.id)
for key, value in common_context.items():
context[key] = value
return render(request, 'purchase_log/share_notification_details.html', context) |
995,315 | 4f333d686e9bcef7977515fe0de9efeb3b7cd64e | '''
try / except
When the first conversion fails - it
just drops into the except: clause
and the program continues
When the second conversion
succeeds - it just skips the except:
clause and the program continues
astr = 'Hello Bob'
try:
istr = int(astr)
except:
istr = -1
print 'First', istr
astr = '123'
try:
istr = int(astr)
except:
istr = -1
print 'Second', istr
'''
#example
#First -1
#Second 123
#----------------------------------------------------------
'''
#sample try/except
rawstr = raw_input('Enter a number:')
try:
ival = int(rawstr)
except:
ival = -1
if ival > 0 :
print 'Nice work'
else:
print 'Not a number'
'''
largest = None
smallest = None
number = []
estado = True
while estado:
num = raw_input("Enter a number: ")
try:
num1 = int(num)
except:
num1 = -1
if num1 > 0:
number.append(num1)
else:
print 'Invalid input'
print 'Maximum',max(number)
print 'Minimum',min(number)
estado = False
|
995,316 | 8e419fbd8899c7e942cc19df92d5eb3634354efc | from clase import Auto
mustang = Auto('MUSTANG','ROJO','5.5')
mustang.arranca()
mustang.frena()
mustang.set_color("AZUL")
mustang.get_color()
jetta = Auto('JETTA','BLANCO','2.5')
jetta.arranca()
jetta.frena()
jetta.get_color()
|
995,317 | ae6b50cd09e7ad54872419233035df1b51ab5599 | """le but est simple il faut faire une liste qui contier le nombre de fruit et déclaré une variable avec le nombre de fruit à retirer """
fruit_a_retirer = 7
liste = [15,3,18,21]
affichage = [nb_fruits-fruit_a_retirer for nb_fruits in liste if nb_fruits>fruit_a_retirer]
print(affichage)
|
995,318 | e0c1a5aee18b97ca93740d710ff479528b3f5403 | """
█▀▄▀█ █▀▀ ▀█▀ █▀▀ █▀█ █▀ ▀█▀ ▄▀█ ▀█▀
█░▀░█ ██▄ ░█░ ██▄ █▄█ ▄█ ░█░ █▀█ ░█░
The code is licensed under the MIT license.
"""
import os
from .mutations import create, update, delete, apply
from .checks import find_duplicate
from .generators import generate_uid
from .utils import create_station_dict, merge_dicts, get_distance
__appname__ = "stations"
__version__ = "0.0.4"
# Path of the weather stations directory
stations_path: str = (
os.path.expanduser("~")
+ os.sep
+ "Meteostat"
+ os.sep
+ "weather-stations"
+ os.sep
+ "stations"
)
|
995,319 | ba572dfb72c4ab6a3dc7fff8aba1734ec4ff6ecb | from meow_letters.storage.meowdb import MeowDatabase
database = MeowDatabase()
database.db.execute("""CREATE TABLE highscores (id integer primary key autoincrement,
username text, highscore integer)""")
database.db.close()
|
995,320 | de8b39792532c5442085ce9bedbdc820055882cf | from utils import *
def reverse_list(node:ListNode):
head = curr = node
while curr.next:
temp = curr.next
curr.next = temp.next
temp.next = head
head = temp
return head
if __name__ == "__main__":
n = create_nodes([1, 3, 5, 7, 9])
print("Initial")
print_nodes(n)
print("Final")
print_nodes(reverse_list(n))
|
995,321 | 18a2b9dec94d0ec0ba8728c09066d85159d55f64 |
class dispatcher:
name = 'dispatcher'
def __init__(self, function_to_exec):
self.function = function_to_exec
return self.function
def get_name(self):
return self.name
def function_one(a,b):
return a + b
def function_two():
return 'function two' |
995,322 | 3af9f8a3713bd0981e688da88c71f53fdfb74fac | '''
select * from member
/* => 주석처리
from 이후에는 내가 생성한 db 테이블 이름 F5를 눌러서 실행하면
테이블에서 생성한 데이터를 보여줌
*/
--데이터 베이스 구축하기
--데이터 정의어(DDL) : 데이터베이스 만들기
create database Test02;
/*
create database <database명>
위의 쿼리문은 데이터 정의어(DDL) 중의 하나인 create문을 이용하는 쿼리입니다.
위의 쿼리문을 실행시키기 위해서 해당 쿼리문을 블록처리하고 F5를 눌러 실행시킵니다.
그리고 좌측의 개체탐색기 > 데이터베이스를 확인하면 Test02 라는 데이터베이스가 새로 생긴것을 확인할 수 있습니다.
이제 우리가 방금 생성한 Test02 라는 데이터베이스 내에 새로운 테이블을 생성하고 데이터를 추가해야 합니다.
하지만 우리가 처음 시작할 때 master 로 설정하고 시작한 것을 기억하시나요?
이 상태에서 테이블을 생성하거나 데이터를 입력하려고 하면 우리가 원하는대로, Test02 라는 데이터베이스에 데이터가 기록되지 않고 시스템 데이터베이스에 기록되게 됩니다.
따라서 우리가 앞으로 Test02에서 작업하겠다고 컴퓨터에게 알려주어야 합니다.
이를 위해서 아래와 같은 쿼리를 입력합니다.
use Test02;
위의 쿼리문을 실행하면 아래와 같이 master로 선택되어 있었던 것이 Test02로 바뀜
'''
'''
create table member(
id int constraint pk_code primary key,
name char(10),
email char(10)
);
/*
쿼리를 실행시킬 때는 실행시키고자 하는 부분만 블록으로 감싸 F5를 눌러야한다.
그렇지 않고 F5를 누르게되면 해당 쿼리창의 시작부터 끝까지 모든 쿼리가 다시 실행되므로 에러가 발생할 수 있다.
id 칼럼은 contraint pk_code primary key 라고 붙어있는데, 여기서 constraint는 해당 칼럼에 특정 제약조건을 주겠다라는 의미이고 그 제약조건의 내용이 뒤에 따라서 붙습니다
여기서 pk_code primary key 라는 제약조건이 붙었는데, 이는 pk_code 라는 이름의 primary key로 설정하겠다라는 의미입니다.
즉, member 테이블에서의 primary key, 기본키는 id컬럼이며 해당 기본키의 이름은 pk_code이다
*/
-- 데이터 조작어(DML) : INSERT, SELECT
insert into member values(10, '홍범우', 'hong@eamil');
/*
위의 쿼리는, member 라는 테이블에 데이터를 insert 할 것이다라는 의미
입력되는 데이터의 내용은 values(~~~) 내부에 입력
그리고 입력한 데이터가 잘 저장되었나 확인하기 위해 아래 쿼리를 입력
select * from member; 이게 확인하기 위한 쿼리
* : *는 모든 칼럼을 의미 배경이되는 테이블은 from ~~
*/
select * from member
'''
|
995,323 | c4690558ec24603b6480566c288e5e2b86473362 | import Tkinter
from PIL import ImageTk, Image
def _exit(event):
print("Exit")
main_window.destroy()
main_window=Tkinter.Tk()
print("yuiop")
w = main_window.winfo_screenwidth()
h = main_window.winfo_screenheight()
main_window.overrideredirect(1)
main_window.geometry("%dx%d+0+0" % (400, 300))
#main_window.bind("<Escape>", _exit)
#main_window.bind("<space>", _exit)
main_window.bind("<Button-1>", _exit)
main_window.mainloop()
|
995,324 | a78be66ed07d6512e422dc44f429ac82809526a4 | import os
from os import path
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import nltk
import string
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
# remove whitespace from text
def remove_whitespace(text):
return " ".join(text.split())
# remove stopwords function
def remove_stopwords(text):
filtered_text=""
word=""
stop_words = set(stopwords.words("indonesian"))
word_tokens = word_tokenize(text)
filtered_text = [word for word in word_tokens if word not in stop_words]
# filtered_text += " "+ word for word in word_tokens if word not in stop_words
return filtered_text
# get data directory (using getcwd() is needed to support running example in generated IPython notebook)
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
# Read the whole text.
text = open(path.join(d, 'PidatoPresiden.txt')).read()
#menghapus tanda baca, dan ubah ke lowercase
text = text.translate(str.maketrans('', '', string.punctuation)).lower()
#menghilangkan stopword/kata yang tidak perlu
# removed = remove_stopwords(text)
# print(removed)
#stopword
tokens = word_tokenize(text)
stop_words = set(stopwords.words('indonesian'))
new_stopwords = ["hormati", "wakil", "presiden", "negara", "republik", "indonesia", "'", "salam", "om","shanti","namo", "buddhaya", "alaikum", "warahmatullahi","wabarakatuh", "prof", "dr","kh", "jusuf", "kalla", "ma", "ruf", "amin"]
new_stopwords_list = stop_words.union(new_stopwords)
listStopword = set(new_stopwords_list)
#stemming
factory = StemmerFactory()
stemmer = factory.create_stemmer()
removed = []
katakata = ""
for t in tokens:
if t not in listStopword:
# katadasar = stemmer.stem(t)
# removed.append(katadasar)
katakata+=" "+t
# print(removed)
print("Cleaning Result: ")
print(katakata)
# tokenize
tokens = nltk.tokenize.word_tokenize(katakata)
kemunculan = nltk.FreqDist(tokens)
print("Tokenize: ")
print(kemunculan.most_common())
# Generate a word cloud image
wordcloud = WordCloud().generate(katakata)
# Display the generated image:
# the matplotlib way:
# plt.imshow(wordcloud, interpolation='bilinear')
# plt.axis("off")
# lower max_font_size
wordcloud = WordCloud(max_font_size=40, background_color="white").generate(katakata)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show() |
995,325 | 249bf9afb9a8d464883a4f7d5e9ed63c47e2bbe3 | from django.db import models
# Create your models here.
class News(models.Model):
link = models.CharField(max_length=200)
article = models.CharField(max_length=200)
body = models.TextField()
class Meta:
verbose_name_plural = "news"
def __str__(self):
return self.article |
995,326 | 063ae50696f672c6630a9315a3892d50927fac09 | #!/usr/bin/python3
# coding=UTF-8
from __future__ import unicode_literals
from django.db import models
from django import forms
from django.forms import ModelForm
import django.core.management as manage
#sources
# form
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Save data from manual set value
class testcaseModel(models.Model):
testcase_simulation = models.BooleanField()
testcase_channel = models.IntegerField()
testcase_configFile = models.TextField(null=True)
testcase_nameCarac = models.TextField(null=True)
testcase_climChamber = models.BooleanField()
def __str__(self):
return self.testcase_nameCarac
|
995,327 | 2572ef6785a4acbad77cb74dc0e4636b1bdfcd24 | """
Digital root is the recursive sum of all the digits in a number.
Given n, take the sum of the digits of n. If that value has more than one
digit, continue reducing in this way until a single-digit number is produced.
This is only applicable to the natural numbers.
Examples
16 --> 1 + 6 = 7
942 --> 9 + 4 + 2 = 15 --> 1 + 5 = 6
132189 --> 1 + 3 + 2 + 1 + 8 + 9 = 24 --> 2 + 4 = 6
493193 --> 4 + 9 + 3 + 1 + 9 + 3 = 29 --> 2 + 9 = 11 --> 1 + 1 = 2
"""
#Difficulty: 6 kyu
#Name: Sum of Digits / Digital Root
#Link: https://www.codewars.com/kata/541c8630095125aba6000c00/train/python
def digital_root(n):
number = 0 # our var to save result
while n: # while n > 0 we will reduce it
digit = n % 10 # taking remainder
# reminder of div by 10
# always last digit
number += digit # adding last digit to result var
n //= 10 # decreasing our current number
if number < 10: # base case when result found
return number # return result from recursion
else: # else - start recursion
number = digital_root(number) # recursion itself,
# saving result to
# result var
return number # return final result
|
995,328 | 273c9a1c698c7b681ed276da68580ff44e3da402 | # predict.py
import argparse
import sys
import os
import glob
import numpy as np
import urllib
import cv2
from keras.models import load_model as load_keras_model
from keras.preprocessing.image import img_to_array, load_img
from flask import Flask, jsonify
app = Flask(__name__)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
model_filename = 'cntk-model.h5'
class_to_name = [
"agave blue",
"aztec gold sunburst",
"aztec gold sparkle",
"black",
"black sunburst",
"blue sparkle",
"burgundy mist",
"candy apple red",
"candy green",
"cherry burst",
"cherry sunburst",
"coral pink",
"daphne blue",
"desert sand",
"fiesta red",
"lake placid blue",
"ocean turquoise",
"olympic white",
"sage green metallic",
"sea foam green",
"sea foam green sparkle",
"vintage white",
"vintage blonde",
"amber",
"antigua",
"antique burst"
]
def load_model():
if os.path.exists(model_filename):
return load_keras_model(model_filename)
else:
print("File {} not found!".format(model_filename))
exit()
def load_image(filename):
img_arr = img_to_array(load_img(filename, False, target_size=(256,256)))
return np.asarray([img_arr])
@app.route('/predict')
def predict():
result = np.argmax(keras_model.predict(image))
return jsonify({'prediction': class_to_name[result]})
if __name__ == '__main__':
filename = sys.argv[1]
keras_model = load_model()
image = load_image(filename)
app.run(host='0.0.0.0', port=5000)
|
995,329 | 32b0ebc7f61f5f21a7203fd70f255b505f94c216 | from sqlalchemy import Column, Integer, DateTime, Numeric, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
class Txn(Base):
__tablename__ = 'txn'
hash_id = Column(String(64), nullable=False, primary_key=True)
queried = Column(DateTime(timezone=False), nullable=False, index=True)
received = Column(DateTime(timezone=False), nullable=False, index=True)
fee = Column(Numeric(precision=24, scale=12), nullable=False)
size = Column(Integer, nullable=False)
inputs = Column(Integer, nullable=False)
outputs = Column(Integer, nullable=False)
ring = Column(Integer, nullable=False)
version = Column(Integer, nullable=False)
class TxnStat(Base):
__tablename__ = 'txnstat'
queried = Column(DateTime(timezone=False), primary_key=True)
txns = Column(Integer, nullable=False)
sumfee = Column(Numeric(precision=24, scale=12), nullable=False)
sumsize = Column(Integer, nullable=False)
avgsize = Column(Integer, nullable=False)
avgfee = Column(Numeric(precision=24, scale=12), nullable=True)
avgfeeperkb = Column(Numeric(precision=24, scale=12), nullable=True)
maxage = Column(Numeric(precision=24, scale=12), nullable=False)
def create_tables(url):
engine = create_engine(url)
Base.metadata.create_all(engine)
def mksession(url):
engine = create_engine(url)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
return DBSession()
|
995,330 | f6bcdbd27800d94a5ab04671889ef8d5002dd70e | # coding=utf-8
"""
author: wlc
function: 微博检索业务逻辑层
"""
# 引入外部库
# 引入内部库
from src.dao.weiboDao import *
from src.entity.retrieveResult import *
class WeiboOperator:
def __init__ (self):
# 该业务逻辑功能
self.intent = '微博检索'
# 该业务逻辑子功能
self.subintent = {
0: '微博热搜检索',
1: '关键字检索'
}
def get_realtimehot (self) -> RetrieveResult:
"""
获取微博热搜内容
:return:
"""
# 检索对象创建
data = RetrieveResult(intent=self.intent, subintent=self.subintent[0])
# 信息检索
data.set_data(WeiboDao.get_realtimehot_result())
return data
|
995,331 | cd95ecdc946a70a40d6d01416fbb986a722cb2e9 | # -*- coding: utf-8 -*-
import sys
import codecs
import nltk
def tokenizza(frasi): #tokenizza le frasi prese in input
tokens = []
for frase in frasi:
tok = nltk.word_tokenize(frase)
tokens = tokens + tok
return tokens #restituisce i tokens
def calcoloNTokens(file, tokens): #prende in input il file e i suoi token
print "Il file", file, "è lungo", len(tokens), "tokens" #stampa la lunghezza in token del file
def calcoloAvgTokens(frasi, tokens): #calcola la media di token per frase
avgToken = round((len(tokens)*1.0) / (len(frasi)*1.0), 3)
return avgToken
def stampaAvgTokens(file, avgTok): #stampa la lunghezza media delle frasi misurata in token
print "Il file", file, "ha frasi di lunghezza media di", avgTok, "tokens"
def crescitaVocabolario(file1, file2, tokens1, tokens2, x):
print "COMPARO LA GRANDEZZA DEL VOCABOLARIO DEI DUE TESTI ALL'AUMENTARE DEL CORPUS DI 1000 TOKENS:"
interv = x #si definisce l'intervallo entro il quale si calcola la grandezza del vocabolario
while interv < len(tokens1): #finché l'intervallo stabilito rimane inferiore alla lunghezza del testo
vocabolarioX1 = set(tokens1[0:interv]) #si calcolano i vocabolari di file 1 e 2 prendendo i considerazione un numero di token che va da 0 a intervallo
vocabolarioX2 = set(tokens2[0:interv])
print "Il file", file1, "ha", len(vocabolarioX1), "type su", interv, "tokens \------------/ il file", file2, "ha", len(vocabolarioX2), "type su", interv, "tokens" #si stampa la grandezza del vocabolario dei due file entro l'intervallo considerato
interv = interv + x #si accresce l'intervallo
if (interv >= len(tokens1)): #quando la dimensione dell'intervallo supera quella del testo allora l'intervallo massimo preso in considerazione diventa uguale e non supera la lunghezza del testo stesso
print "Il file", file1, "ha", len(set(tokens1)), "type su", len(tokens1), "tokens \------------/ il file", file2, "ha", len(set(tokens2)), "type su", len(tokens2), "tokens"
def crescitaRicchezzaLessicale(file1, file2, tokens1, tokens2, x):
print "COMPARO LA RICCHEZZA LESSICALE DEI DUE TESTI ALL'AUMENTARE DEL CORPUS DI 1000 TOKENS:"
interv = x #si definisce l'intervallo entro il quale si calcola la ricchezza lessicale
while interv < len(tokens1): #finché l'intervallo stabilito rimane inferiore alla lunghezza del testo
vocabolarioX1 = set(tokens1[0:interv]) #si calcola il vocabolario di file 1 e 2 prendendo i considerazione un numero di token che va da 0 a intervallo
vocabolarioX2 = set(tokens2[0:interv])
testoX1 = tokens1[0:interv] #si prende in cosiderazione la lunghezza dei testi 1 e 2 da 0 a intervallo
testoX2 = tokens2[0:interv]
print "Il file", file1, "ha una Token Type Ratio di", round(len(vocabolarioX1)*1.0/len(testoX1)*1.0, 3), "su", interv, "tokens \------------/ il file", file2, "ha una Token Type Ratio di", round(len(vocabolarioX2)*1.0/len(testoX2)*1.0, 3), "su", interv, "tokens" #si calcola e stampa la ricchezza lessicale dei due file entro l'intervallo considerato
interv = interv + x #si accresce l'intervallo
if (interv >= len(tokens1)): #quando la dimensione dell'intervallo supera quella del testo allora l'intervallo massimo preso in considerazione diventa uguale e non supera la lunghezza del testo stesso
print "Il file", file1, "ha una Token Type Ratio di", round(len(set(tokens1))*1.0/len(tokens1)*1.0, 3), "su", len(tokens1), "tokens \------------/ il file", file2, "ha una Token Type Ratio di", round(len(set(tokens2))*1.0/len(tokens2)*1.0, 3), "su", len(tokens2), "tokens"
def annotazioneLinguistica(tokens): #Part-Of-Speach tagger per i token presi in input
tokensPOS = nltk.pos_tag(tokens)
return tokensPOS #restituisce i tokens taggati
def individuaSostantivi(tokensPos): #si prendono in input token taggati
sostantiviTOT = []
cond = ["NN", "NNS", "NNP", "NNPS"] #si stabilisce come condizione una lista di tag
for tok in tokensPos: #si scorrono tutti i token taggati
if tok[1] in cond: #se il token appartiene alla lista di tag
sostantiviTOT.append(tok[1]) #si appende il token alla nuova lista che raccoglie tutti i tok che soddisfano la condizione
return len(sostantiviTOT) #restituisce il numero di token che hanno superato la condizione (sostantivi)
def individuaVerbi(tokensPos): #si prendono in input token taggati
verbiTOT = []
cond = ["VB", "VBD", "VBG", "VBN", "VBZ"] #si stabilisce come condizione una lista di tag
for tok in tokensPos: #si scorrono tutti i token taggati
if tok[1] in cond: #se il token appartiene alla lista di tag
verbiTOT.append(tok[1]) #si appende il token alla nuova lista che raccoglie tutti i tok che soddisfano la condizione
return len(verbiTOT) #restituisce il numero di token che hanno superato la condizione (verbi)
def individuaSVAJ(tokensPos): #si prendono in input token taggati
SVAJ = []
cond = ["NN", "NNS", "NNP", "NNPS", "VB", "VBD", "VBG", "VBN", "VBZ", "RB", "WRB", "JJ", "JJR", "JJS"] #si stabilisce come condizione una lista di tag
for tok in tokensPos: #si scorrono tutti i token taggati
if tok[1] in cond: #se il token appartiene alla lista di tag
SVAJ.append(tok[1]) #si appende il token alla nuova lista che raccoglie tutti i tok che soddisfano la condizione
return len(SVAJ) #restituisce il numero di token che hanno superato la condizione (sostantivi+verbi+avverbi+aggettivi)
def individuaAllButP(tokensPos): #si prendono in input token taggati
AllButP = []
cond = [".", ","] #si stabilisce come condizione una lista di tag
for tok in tokensPos: #si scorrono tutti i token taggati
if tok[1] not in cond: #se il token non appartiene alla lista di tag
AllButP.append(tok[1]) #si appende il token alla nuova lista che raccoglie tutti i tok che soddisfano la condizione
return len(AllButP) #restituisce il numero di token che hanno superato la condizione (tutti tranne la punteggiatura)
def densitaLessicale(tokens): #calcola e restituisce la denistà lessicale dei due file
return round( (individuaSVAJ(annotazioneLinguistica(tokens))*1.0) / (individuaAllButP(annotazioneLinguistica(tokens))*1.0), 3)
def main(file1, file2):
fileInput1 = codecs.open(file1, "r", "utf-8")
fileInput2 = codecs.open(file2, "r", "utf-8")
raw1 = fileInput1.read()
raw2 = fileInput2.read()
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
frasi1 = sent_tokenizer.tokenize(raw1)
frasi2 = sent_tokenizer.tokenize(raw2)
tokens1 = tokenizza(frasi1)
tokens2 = tokenizza(frasi2)
avgTok1 = calcoloAvgTokens(frasi1, tokens1)
avgTok2 = calcoloAvgTokens(frasi2, tokens2)
vocabolario1 = set(tokens1) #vocabolario del fil 1
vocabolario2 = set(tokens2) #vocabolario del file 2
print "CALCOLO IL NUMERO DEI TOKENS:"
print
calcoloNTokens(file1, tokens1) #si richiama la funzione per entrambi i file
calcoloNTokens(file2, tokens2)
print
print "CONFRONTO I DUE TESTI SULLA BASE DEL NUMERO DI TOKENS:"
print
if len(tokens1) > len(tokens2): #si esegue il confronto per stabilire quale dei due file sia il più lungo e si stampa il risultato
print "Il file", file1, "è più lungo del file", file2
elif len(tokens1) < len(tokens2):
print "Il file", file2, "è più lungo del file", file1
else:
print "I due file sono della stessa lunghezza"
print
print "///////////////////////////////////////////////////////////////////////////////////////////////////"
print
print "CALCOLO LA LUNGHEZZA MEDIA DELLE FRASI IN TOKENS:"
print
stampaAvgTokens(file1, avgTok1) #si richiama la funzione per entrambi i file
stampaAvgTokens(file2, avgTok2)
print
print "CONFRONTO I DUE TESTI SULLA BASE DELLA LUNGHEZZA MEDIA DELLE FRASI IN TOKENS:"
print
if avgTok1 > avgTok2: #si confrontano i due file sulla base della lunghezza media delle frasi in token
print "Le frasi del file", file1, "hanno una lunghezza media maggiore di quelle del file", file2
elif avgTok1 < avgTok2:
print "Le frasi del file", file2, "hanno una lunghezza media maggiore di quelle del file", file1
else:
print "Le frasi dei due file hanno la stessa lunghezza media"
print
print "////////////////////////////////////////////////////////////////////////////////////////////////////"
print
print "CALCOLO IL VOCABOLARIO DEI DUE TESTI:"
print #si stampa la lunghezza del vocabolario per entrambi i file
print "Il file", file1, "ha un vocabolario di", len(vocabolario1), "tokens"
print "Il file", file2, "ha un vocabolario di", len(vocabolario2), "tokens"
print
crescitaVocabolario(file1, file2, tokens1, tokens2, 1000)
print
print "////////////////////////////////////////////////////////////////////////////////////////////////////"
print
print "CALCOLO LA RICCHEZZA LESSICALE, COME TYPE TOKEN RATIO, DEI DUE TESTI:"
print #si calcola la ricchezza lessicale come token type ratio per entrambi i file
print "Il file", file1, "ha una Type Token Ratio di", round((len(vocabolario1)*1.0) / (len(tokens1)*1.0), 3)
print "Il file", file2, "ha una Type Token Ratio di", round((len(vocabolario2)*1.0) / (len(tokens2)*1.0), 3)
print
crescitaRicchezzaLessicale(file1, file2, tokens1, tokens2, 1000) #si richiama la funzione per il confronto della crescita lessicale per entrambi i file
print
print "//////////////////////////////////////////////////////////////////////////////////////////////////////"
print
print "CALCOLO IL RAPPORTO TRA SOSTANTIVI E VERBI:"
print #si calcola e stampa il rapporto sostantivi/verbi per entrambi i file, mettendoli a confronto
print "Il file", file1, "ha un rapporto sostantivi/verbi di", round((individuaSostantivi(annotazioneLinguistica(tokens1))*1.0) / (individuaVerbi(annotazioneLinguistica(tokens1))*1.0), 3), "\------------/ il file", file2, "ha un rapporto sostantivi/verbi di", round((individuaSostantivi(annotazioneLinguistica(tokens2))*1.0) / (individuaVerbi(annotazioneLinguistica(tokens2))*1.0), 3)
print
print "//////////////////////////////////////////////////////////////////////////////////////////////////////"
print
print "COMPARO LA DENSITÀ LESSICALE DEI DUE TESTI, CALCOLATA COME (|Sostantivi|+|Verbi|+|Avverbi|+|Aggettivi|)/(TOT-( |.|+|,| ) ) :"
print #si stampa la densità lessicale per i due file
print "Il file", file1, "ha una densità lessicale di", densitaLessicale(tokens1), "\------------/ il file", file2, "ha una densità lessicale di", densitaLessicale(tokens2)
main(sys.argv[1], sys.argv[2])
|
995,332 | 3208f27bec0c4738ed10b5e265ccb7d223301977 | """ from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
'''
_pad = '_'
_punctuation = ';:,.!?¡¿—…"«»“” '
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZÜÖÄabcdefghijklmnopqrstuvwxyzüöäß'
#_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
#_letters_ipa = "iyɨʉɯuɪʏʊeøɘəɵɤoɛœɜɞʌɔæɐaɶɑɒᵻʘɓǀɗǃʄǂɠǁʛpbtdʈɖcɟkɡqɢʔɴŋɲɳnɱmʙrʀⱱɾɽɸβfvθðszʃʒʂʐçʝxɣχʁħʕhɦɬɮʋɹɻjɰlɭʎʟˈˌːˑʍwɥʜʢʡɕʑɺɧʲɚ˞ɫ̩̃"
_letters_ipa = "ɰqɲɸˈʲǀʊxʡmlɹˑhœʍɪɽæɤʀɡiǁɬ˞ħefʢɕǂøɻɢɵɠʂ͡yʔːbɛjʜɚɘʌdɨrɭɗɒɦχɔɾuʕɫɖð̯ɜɧɯpɳɐʐvaɥɑʘɞʉɓzɣãⱱʋtʎnβɺ̩ʃkʑᵻʁǃɮɱəõɟoʛˌʈŋ̃csʙɴθʟʄʝwçɶʏʒ"
#_letters_ipa = "ãçõøŋœɐɔəɛɡɪʁʃʊʏʒʔː̯̃͡χ"
# Export all symbols:
symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
# Special symbol ids
SPACE_ID = symbols.index(" ")
|
995,333 | bb4312c05896fa7f035d0d61b4ab70fffbb6e972 | import os
import sprinter.lib as lib
def execute_commmon_functionality(formula_instance):
install_directory = formula_instance.directory.install_directory(
formula_instance.feature_name
)
cwd = install_directory if os.path.exists(install_directory) else None
if formula_instance.target.has("env"):
formula_instance.directory.add_to_env(formula_instance.target.get("env"))
if formula_instance.target.has("rc"):
formula_instance.directory.add_to_rc(formula_instance.target.get("rc"))
if formula_instance.target.has("gui"):
formula_instance.directory.add_to_gui(formula_instance.target.get("gui"))
if formula_instance.target.has("command"):
lib.call(formula_instance.target.get("command"), shell=True, cwd=cwd)
|
995,334 | 0010eebb0ea6af9802f9fb8712c7a4f3442e8eed | #interactive program to calculate compound interest
import math
def amount_CI(p , r , t , n): #function for amount calculation
a =p*( math.pow( (1.0+(r/100*n)) , n*t ))
return a
#Inputs taken and result
principal = float(input("Enter the principal:")) #type-casted to float for performing mathematical operations
rate = float(input("Enter the rate of interest:")) #type-casted to float for performing mathematical operations
time = float(input("Enter the time period for which the interest needs to be calculated:")) #type-casted to float for performing mathematical operations
number=float(input("Enter the number of times interest i compounded in a year:")) #type-casted to float for performing mathematical operations
amount = amount_CI(principal , rate , time , number) #holds the returned amount
CI = amount - principal
print( "Compound interest :",CI ) #prints the amount
print("Amount :",amount)
|
995,335 | 6e6830b29f5b8d5aa82897abe1d3c726fbbf2807 | import logging
import os
import sys
import traceback
from cliff import app
from cliff import commandmanager
from keystoneclient import session
from keystoneclient.auth import cli
from keystoneclient.auth.identity import v3
LOG = logging.getLogger(__name__)
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v)
if value:
return value
return kwargs.get('default', '')
class Servizor(app.App):
CONSOLE_MESSAGE_FORMAT = '%(levelname)s: %(message)s'
def __init__(self):
super(Servizor, self).__init__(
description='Servizor creates services and endpoints',
version='0.1',
command_manager=commandmanager.CommandManager('servizor.cmd'),
)
self.log = LOG
def initialize_app(self, argv):
self.log.debug('initialize_app')
def prepare_to_run_command(self, cmd):
self.log.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
def build_option_parser(self, description, version):
parser = super(Servizor, self).build_option_parser(
description, version)
parser.add_argument('--os-username',
default=env('OS_USERNAME'),
help='Name used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_USERNAME].')
parser.add_argument('--os-user-id',
default=env('OS_USER_ID'),
help='User ID used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_USER_ID].')
parser.add_argument('--os-user-domain-id',
default=env('OS_USER_DOMAIN_ID'),
help='Defaults to env[OS_USER_DOMAIN_ID].')
parser.add_argument('--os-user-domain-name',
default=env('OS_USER_DOMAIN_NAME'),
help='Defaults to env[OS_USER_DOMAIN_NAME].')
parser.add_argument('--os-password',
default=env('OS_PASSWORD'),
help='Password used for authentication with the '
'OpenStack Identity service. '
'Defaults to env[OS_PASSWORD].')
parser.add_argument('--os-tenant-name',
default=env('OS_TENANT_NAME'),
help='Tenant to request authorization on. '
'Defaults to env[OS_TENANT_NAME].')
parser.add_argument('--os-tenant-id',
default=env('OS_TENANT_ID'),
help='Tenant to request authorization on. '
'Defaults to env[OS_TENANT_ID].')
parser.add_argument('--os-project-name',
default=env('OS_PROJECT_NAME'),
help='Project to request authorization on. '
'Defaults to env[OS_PROJECT_NAME].')
parser.add_argument('--os-domain-name',
default=env('OS_DOMAIN_NAME'),
help='Project to request authorization on. '
'Defaults to env[OS_DOMAIN_NAME].')
parser.add_argument('--os-domain-id',
default=env('OS_DOMAIN_ID'),
help='Defaults to env[OS_DOMAIN_ID].')
parser.add_argument('--os-project-id',
default=env('OS_PROJECT_ID'),
help='Project to request authorization on. '
'Defaults to env[OS_PROJECT_ID].')
parser.add_argument('--os-project-domain-id',
default=env('OS_PROJECT_DOMAIN_ID'),
help='Defaults to env[OS_PROJECT_DOMAIN_ID].')
parser.add_argument('--os-project-domain-name',
default=env('OS_PROJECT_DOMAIN_NAME'),
help='Defaults to env[OS_PROJECT_DOMAIN_NAME].')
parser.add_argument('--os-auth-url',
default=env('OS_AUTH_URL'),
help='Specify the Identity endpoint to use for '
'authentication. '
'Defaults to env[OS_AUTH_URL].')
parser.add_argument('--os-region-name',
default=env('OS_REGION_NAME'),
help='Specify the region to use. '
'Defaults to env[OS_REGION_NAME].')
parser.add_argument('--os-token',
default=env('OS_SERVICE_TOKEN'),
help='Specify an existing token to use instead of '
'retrieving one via authentication (e.g. '
'with username & password). '
'Defaults to env[OS_SERVICE_TOKEN].')
parser.add_argument('--os-endpoint-type',
default=env('OS_ENDPOINT_TYPE'),
help='Defaults to env[OS_ENDPOINT_TYPE].')
parser.add_argument('--os-service-type',
default=env('OS_DNS_SERVICE_TYPE', default='dns'),
help=("Defaults to env[OS_DNS_SERVICE_TYPE], or "
"'dns'"))
parser.add_argument('--insecure', action='store_true',
help="Explicitly allow 'insecure' SSL requests")
return parser
def configure_logging(self):
"""Configure logging for the app
Cliff sets some defaults we don't want so re-work it a bit
"""
if self.options.debug:
# --debug forces verbose_level 3
# Set this here so cliff.app.configure_logging() can work
self.options.verbose_level = 3
super(Servizor, self).configure_logging()
root_logger = logging.getLogger('')
# Requests logs some stuff at INFO that we don't want
# unless we have DEBUG
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.ERROR)
# Other modules we don't want DEBUG output for so
# don't reset them below
iso8601_log = logging.getLogger("iso8601")
iso8601_log.setLevel(logging.ERROR)
# Set logging to the requested level
self.dump_stack_trace = False
if self.options.verbose_level == 0:
# --quiet
root_logger.setLevel(logging.ERROR)
elif self.options.verbose_level == 1:
# This is the default case, no --debug, --verbose or --quiet
root_logger.setLevel(logging.WARNING)
elif self.options.verbose_level == 2:
# One --verbose
root_logger.setLevel(logging.INFO)
elif self.options.verbose_level >= 3:
# Two or more --verbose
root_logger.setLevel(logging.DEBUG)
requests_log.setLevel(logging.DEBUG)
if self.options.debug:
# --debug forces traceback
self.dump_stack_trace = True
def run(self, argv):
try:
return super(Servizor, self).run(argv)
except Exception as e:
if not logging.getLogger('').handlers:
logging.basicConfig()
if self.dump_stack_trace:
self.log.error(traceback.format_exc(e))
else:
self.log.error('Exception raised: ' + str(e))
return 1
def main(argv=sys.argv[1:]):
app = Servizor()
return app.run(argv)
|
995,336 | 008553e22a98eac846a8d23965a7253291a91e3e | # AUTHOR : Karthik Shetty
# DATE : 15 / 03 / 2017
#=======================================================================
# question 1
# 12/(4+1) = 2
# integer division
#=======================================================================
#question 2
# 26**100
# output: 3142930641582938830174357788501626427282669988762475256374173175398995908420104023465432599069702289330964075081611719197835869803511992549376L
#=======================================================================
# question 3
if (0):
l = ['Monty','Python'] * 20 #20 times it will append the strings
str1 = raw_input ("enter the string")
res = str1 * 3 # 3 times the string will append
#
#=======================================================================
# question 4
if (0):
text4 = "abcabc"
print len(set(text4))
# op: 3
# ->set returns the set of charcaters used in the strings as a list
# -> len calculates length of that set list
# -> set is class, len is a builtin_function
# -> the above combination of len with set returns the number of letters used in the string(non-repeated)
#======================================================================
# question 5
if(0):
my_string = "I am writing exam"
print my_string + my_string #I am writing examI am writing exam
print my_string * 3 #I am writing examI am writing examI am writing exam
#solution:
print my_string+' '+my_string
print (my_string + ' ') * 3
#=======================================================================
# question 6
if(0):
my_sent = ['My', 'Answers']
a = ' '.join(my_sent)
print a
b = a.split(' ')
# -> join will join the string with the mentioned space while joining
# -> split will split the string
#=======================================================================
# question 7
if(0):
phrase1 = "hai"
phrase2 = "hello"
print len(phrase1+phrase2)
print len(phrase1)+len(phrase2)
#->len(phrase1+phrase2) : calculate the length after joining the string
#->len(phrase1)+len(phrase2): calculates the length individually and adds
#=======================================================================
# question 8
if(0):
text1 = "welcomE"
text2 = "globaledge"
sorted(set([text1.lower() for e in text1]))
# ['welcome']
sorted([text1.lower() for e in set(text1)])
# ['welcome', 'welcome', 'welcome', 'welcome', 'welcome', 'welcome', 'welcome']
#---------------------------------------------------------------------
# question 9
if(0):
str1 = "case checking"
str1.isupper() # False: checks whether upper
not str1.islower() #False: checks whether lower and checks for not of it
#---------------------------------------------------------------------
# question 10
if (0):
text2 = "good morning"
x = text2.split() #method1
x[-2:] #method2
#======================================================================
#question 12
if(0):
Sent = ['she', 'sells', 'sea', 'shells', 'by', 'the', 'sea', 'shore']
for words in Sent:
if words.startswith ('sh'):
print words #she shells shore
for words in Sent:
if len (words) > 4:
print words #she shells shore
#======================================================================
# question 11
if(0):
import re
text = "size prize pt path zebra"
text1 = text.split()
Speclist = []
for eachword in text1:
if eachword.endswith ('ize'):
Speclist.append (eachword)
print Speclist # op: ['size', 'prize']
for eachword in text1:
if 'z' in eachword:
Speclist.append (eachword)
print Speclist
print text1.findall
# op: ['size', 'prize', 'size', 'prize', 'zebra']
re.findall('[a-zA-Z]*pt+[a-zA-Z]',text)
# ['pt']
'''
C. Only change the sentence if 'pt' in eachword , the other same
D. if eachword.istitle ():
'''
#======================================================================
# question 13
if(0):
text1 = "sample line "
sum([len(w)for w in text1])
average = sum([len(w)for w in text]) / len(text1)
print average
#=======================================================================
# question 14
if(0):
def vocab_size(text):
count = 0
text = text.split()
# return len(text.split())
for word in text:
count += len(word)
return count
text = raw_input("enter the text:")
print vocab_size(text)
#=======================================================================
# question 15
if(0):
def fun_percent(word,text):
a = len(text.split())
print a
b = text.count(word)
print b
return ((float(b)/float(a)) * 100)
text = raw_input ("enter the text")
word = raw_input ("enter the word")
print fun_percent(word,text)
#======================================================================
#question 16
if(1):
text1 = "hiiii everyone"
text2 = "hiiii everyone gud evening"
x = set(text1) < set(text2)
print x
'''checks whaether text1 is subset of text2 or not anhd returns boolean'''
#=======================================================================
# question 17
if(0):
s = 'colorless'
a = s[0:4]
b = s[4:]
print a + 'u' + b
#======================================================================
# question 18
if(0):
word = raw_input("enter the word")
n = word.index('-')
word = word[0:n] + word[(n+1):]
print word
#======================================================================
# question 20
if(0):
import urllib
import sys
from bs4 import BeautifulSoup
f1 = urllib.urlopen(sys.argv[1])
html = f1.read()
soup = BeautifulSoup(html,"lxml")
for script in soup(["script","style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = '\n'.join(chunk for chunk in chunks if chunk)
print text.encode("utf-8")
#======================================================================
#question 21
if(0):
words = ["good","morning","evening","night"]
print words.sort()
print sorted(words)
#======================================================================
#question 22
if(0):
# from test import msg
import test
print test.msg
#=======================================================================
#question 23
if(0):
import webbrowser
url = raw_input("enter the url:")
webbrowser.open(url)
#=======================================================================
|
995,337 | 40560f875a86eaeef63c70b172b3a44e804c4d24 | import typing
from ..._auxiliary_lib import HereditaryStratigraphicArtifact
from ...juxtaposition import calc_rank_of_first_retained_disparity_between
from ._calc_rank_of_earliest_detectable_mrca_between import (
calc_rank_of_earliest_detectable_mrca_between,
)
def does_have_any_common_ancestor(
first: HereditaryStratigraphicArtifact,
second: HereditaryStratigraphicArtifact,
confidence_level: float = 0.95,
) -> typing.Optional[bool]:
"""Determine if common ancestry is evidenced with second.
If insufficient common ranks between first and second are available to
resolve any common ancestor, returns None.
Note that stratum rention policies are strictly required to permanently
retain the most ancient stratum.
Parameters
----------
confidence_level : float, optional
The probability that we will correctly conclude no common ancestor
is shared with second if, indeed, no common ancestor is actually
shared. Default 0.95.
See Also
--------
does_definitively_have_no_common_ancestor :
Can we definitively conclude that first and second share no common
ancestor?
"""
if (
calc_rank_of_earliest_detectable_mrca_between(
first,
second,
confidence_level=confidence_level,
)
is None
):
return None
first_disparity = calc_rank_of_first_retained_disparity_between(
first,
second,
confidence_level=confidence_level,
)
return True if first_disparity is None else first_disparity > 0
|
995,338 | 4cf66651d5ddc889fbee3a5f7b3a2641d239bdd3 | # import requests
import pandas
from bs4 import BeautifulSoup
with open(r"C:\Users\333051\Documents\Udemy\Python Mega Course\app7-webscraping\website\RockSprings.html") as file:
r = file.read()
soup = BeautifulSoup(r, "html.parser")
# print(soup.prettify)
ALL = soup.find_all("div", {"class":"propertyRow"})
l=[]
for item in ALL:
d = {}
d["Price"] = item.find("h4", {"class":"propPrice"}).text.replace("\n", "").replace(" ", "")
d["Addresss"] = item.find_all("span", {"class":"propAddressCollapse"})[0].text
d["Locality"] = item.find_all("span", {"class":"propAddressCollapse"})[1].text
try:
d["Beds"] = item.find("span", {"class":"infoBed"}).find("b").text
except:
d["Beds"] = None
try:
d["Full Baths"] = item.find("span", {"class":"infoValueFullBath"}).find("b").text
except:
d["Full Baths"] = None
try:
d["Half Baths"] = item.find("span", {"class":"infoValueHalfBath"}).find("b").text
except:
d["Half Baths"] = None
try:
d["Sq Ftg"] = item.find("span", {"class":"infoSqFt"}).find("b").text
except:
d["Sq Ftg"] = None
try:
for column_group in item.find_all("div", {"class":"columnGroup"}):
for feature_group, feature_name in zip(column_group.find_all("span", {"class","featureGroup"}), column_group.find_all("span", {"class":"featureName"})):
if "Lot Size" in feature_group.text:
d["Lot Size"] = feature_name.text
except:
d["Lot Size"] = None
l.append(d)
df = pandas.DataFrame(l)
df.to_csv("Century.csv")
|
995,339 | 7a32e37dd9fd101b6355361077b7b041d0d1a756 | import config
from selenium import webdriver
from selenium.webdriver.common import action_chains
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options as ChromeOptions
import time
class BaseFixture:
def __init__(self, browser):
if browser == "chrome":
chrome_options = ChromeOptions()
self.driver = webdriver.Chrome(executable_path=config.chromedriver_path, options=chrome_options)
elif browser == "firefox":
self.driver = webdriver.Firefox(executable_path=config.geckodriver_path)
elif browser == "ie":
self.driver = webdriver.Ie(executable_path=config.iedriver_path)
elif browser == "opera":
self.driver = webdriver.Opera(executable_path=config.operadriver_path)
self.driver.implicitly_wait(30)
self.actions = action_chains.ActionChains(self.driver)
def open_page(self):
self.driver.get(self.target)
def destroy(self):
self.driver.quit() |
995,340 | 297f17308e73d14bc3f831194ae3d120ae5ba566 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 08:00:59 2020
@author: shrutikshirsagar
"""
from __future__ import print_function
import os
import numpy as np
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Masking, LSTM, TimeDistributed, Bidirectional
from keras.optimizers import RMSprop
from calc_scores import calc_scores
import pandas as pd
from numpy.random import seed
from tensorflow import set_random_seed
# Helper functions
def get_num_lines(filename, skip_header=False):
with open(filename, 'r',encoding='ISO-8859-1') as file:
c = 0
if skip_header:
c = -1
for line in file:
c += 1
return c
def get_num_columns(filename, delim=';', skip_header=False):
# Returns the number of columns in a csv file
# First two columns must be 'instance name' and 'timestamp' and are not considered in the output
with open(filename, 'r',encoding='ISO-8859-1') as file:
if skip_header:
next(file)
line = next(file)
offset1 = line.find(delim)+1
offset2 = line[offset1:].find(delim)+1+offset1
cols = np.fromstring(line[offset2:], dtype=float, sep=delim)
return len(cols)
def read_csv(filename, delim=';', skip_header=False):
# Returns the content of a csv file (delimiter delim, default: ';')
# First two columns must be 'instance name' and 'timestamp' and are not considered in the output, header is skipped if skip_header=True
num_lines = get_num_lines(filename, skip_header)
data = np.empty((num_lines,get_num_columns(filename,delim,skip_header)), float)
with open(filename, 'r',encoding='ISO-8859-1') as file:
if skip_header:
next(file)
c = 0
for line in file:
offset1 = line.find(delim)+1
offset2 = line[offset1:].find(delim)+1+offset1
data[c,:] = np.fromstring(line[offset2:], dtype=float, sep=delim)
c += 1
return data
def load_features(path_features='../Prosody/', partition='Train_DE', num_inst=34, max_seq_len=1768):
skip_header = False # AVEC 2018 XBOW feature files
num_features = get_num_columns(path_features + '/' + partition + '_01.csv', delim=';', skip_header=skip_header) # check first
features = np.empty((num_inst, max_seq_len, num_features))
for n in range(0, num_inst):
F = read_csv(path_features + '/' + partition + '_' + str(n+1).zfill(2) + '.csv', delim=';', skip_header=skip_header)
print(F)
print(path_features)
if F.shape[0]>max_seq_len:
F = F[:max_seq_len,:] # cropping
features[n,:,:] = np.concatenate((F, np.zeros((max_seq_len - F.shape[0], num_features)))) # zero padding
return features
def load_labels(path_labels='../labels/', partition='Train_DE', num_inst=34, max_seq_len=1768, targets=[0,1,2]):
# targets=[0,1,2]: 0: arousal, 1: valence, 2: liking/likability
skip_header = False # AVEC 2018 XBOW labels files
num_labels = len(targets)
labels_original = []
labels_padded = []
for t in targets:
labels_original_t = []
labels_padded_t = np.empty((num_inst, max_seq_len, 1))
for n in range(0, num_inst):
yn = read_csv(path_labels + partition + '_' + str(n+1).zfill(2) + '.csv', skip_header=skip_header)
yn = yn[:,t].reshape((yn.shape[0], 1)) # select only target dimension and reshape to 2D array
# original length
labels_original_t.append(yn)
# padded to maximum length
if yn.shape[0] > max_seq_len:
yn = yn[:max_seq_len]
#print(yn.shape)
labels_padded_t[n,:,:] = np.concatenate((yn, np.zeros((max_seq_len - yn.shape[0], 1)))) # zero padding
#print(labels_padded_t.shape)
labels_original.append(labels_original_t)
labels_padded.append(labels_padded_t)
return labels_original, labels_padded
def load_CES_data(path, use_audio=True, use_visual=False, use_linguistic=False, targets=[0,1,2]):
num_train_DE = 34 # number of recordings
num_devel_DE = 14
max_seq_len = 1768 # maximum number of labels
# Initialise numpy arrays
train_DE_x = np.empty((num_train_DE, max_seq_len, 0))
devel_DE_x = np.empty((num_devel_DE, max_seq_len, 0))
if use_audio:
train_DE_x = np.concatenate( (train_DE_x, load_features(path_features= path, partition='Train_DE', num_inst=num_train_DE, max_seq_len=max_seq_len) ), axis=2)
devel_DE_x = np.concatenate( (devel_DE_x, load_features(path_features= path, partition='Devel_DE', num_inst=num_devel_DE, max_seq_len=max_seq_len) ), axis=2)
#test_DE_x = np.concatenate( (test_DE_x, load_features(path_features='../xbow_prosody77/', partition='Test_DE', num_inst=num_test_DE, max_seq_len=max_seq_len) ), axis=2)
#test_HU_x = np.concatenate( (test_HU_x, load_features(path_features='../xbow_prosody77/', partition='Test_HU', num_inst=num_test_HU, max_seq_len=max_seq_len) ), axis=2)
if use_visual:
train_DE_x = np.concatenate( (train_DE_x, load_features(path_features='../Visual_features_500_xbow/', partition='Train_DE', num_inst=num_train_DE, max_seq_len=max_seq_len) ), axis=2)
devel_DE_x = np.concatenate( (devel_DE_x, load_features(path_features='../Visual_features_500_xbow/', partition='Devel_DE', num_inst=num_devel_DE, max_seq_len=max_seq_len) ), axis=2)
#test_DE_x = np.concatenate( (test_DE_x, load_features(path_features='../xvod_prosody77/', partition='Test_DE', num_inst=num_test_DE, max_seq_len=max_seq_len) ), axis=2)
#test_HU_x = np.concatenate( (test_HU_x, load_features(path_features='../xvod_prosody77/', partition='Test_HU', num_inst=num_test_HU, max_seq_len=max_seq_len) ), axis=2)
if use_linguistic:
train_DE_x = np.concatenate( (train_DE_x, load_features(path_features='../text_features_xbow_6s/', partition='Train_DE', num_inst=num_train_DE, max_seq_len=max_seq_len) ), axis=2)
devel_DE_x = np.concatenate( (devel_DE_x, load_features(path_features='../text_features_xbow_6s/', partition='Devel_DE', num_inst=num_devel_DE, max_seq_len=max_seq_len) ), axis=2)
#test_DE_x = np.concatenate( (test_DE_x, load_features(path_features='../linguistic_features_xbow/', partition='Test_DE', num_inst=num_test_DE, max_seq_len=max_seq_len) ), axis=2)
#test_HU_x = np.concatenate( (test_HU_x, load_features(path_features='../linguistic_features_xbow/', partition='Test_HU', num_inst=num_test_HU, max_seq_len=max_seq_len) ), axis=2)
_ , train_DE_y = load_labels(path_labels='../labels/', partition='Train_DE', num_inst=num_train_DE, max_seq_len=max_seq_len, targets=targets)
devel_DE_labels_original, devel_DE_y = load_labels(path_labels='../labels/', partition='Devel_DE', num_inst=num_devel_DE, max_seq_len=max_seq_len, targets=targets)
return train_DE_x, train_DE_y, devel_DE_x, devel_DE_y, devel_DE_labels_original
def emotion_model(max_seq_len, num_features, learning_rate, num_units_1, num_units_2, bidirectional, dropout, num_targets):
# Input layer
inputs = Input(shape=(max_seq_len,num_features))
# Masking zero input - shorter sequences
net = Masking()(inputs)
# 1st layer
if bidirectional:
net = Bidirectional(LSTM( num_units_1, return_sequences=True, dropout=dropout, recurrent_dropout=dropout))(net)
else:
net = LSTM(num_units_1, return_sequences=True, dropout=dropout, recurrent_dropout=dropout)(net)
# 2nd layer
if bidirectional:
net = Bidirectional(LSTM( num_units_2, return_sequences=True, dropout=dropout, recurrent_dropout=dropout ))(net)
else:
net = LSTM(num_units_2, return_sequences=True, dropout=dropout, recurrent_dropout=dropout)(net)
# Output layer
outputs = []
out1 = TimeDistributed(Dense(1))(net) # linear activation
outputs.append(out1)
if num_targets>=2:
out2 = TimeDistributed(Dense(1))(net) # linear activation
outputs.append(out2)
if num_targets==3:
out3 = TimeDistributed(Dense(1))(net) # linear activation
outputs.append(out3)
# Create and compile model
rmsprop = RMSprop(lr=learning_rate)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=rmsprop, loss=ccc_loss) # CCC-based loss function
return model
def main():
pathin = "/Users/shrutikshirsagar/Documents/LSTM_experiments/data"
output_fin = np.empty((0,4))
for dirname in os.listdir(pathin):
new_path = os.path.join(pathin, dirname)
#print('new_path', new_path )
folder_audio_features = new_path
#print('folder_audio', folder_audio_features)
path_output = 'predictions/' # To store the predictions on the test partitions
# Modalities
use_audio = True
use_visual = False
use_linguistic = False
path = folder_audio_features
print('path',path)
# Neural net parameters
batch_size = 34 # Full-batch: 34 sequences
learning_rate = 0.001 # default is 0.001
num_iter = 3 # Number of Iterations
num_units_1 = 64 # Number of LSTM units in LSTM layer 2
num_units_2 = 32 # Number of LSTM units in LSTM layer 2
bidirectional = False # True/False
dropout = 0.1 # Dropout
# Targets
targets = [0,1,2] # List of targets: 0=arousal, 1=valence, 2=liking
shift_sec = 2.0 # Shift of annotations for training (in seconds)
##
target_names = {0: 'arousal', 1: 'valence', 2: 'liking'}
inst_per_sec = 10 # 100ms hop size
# Set seeds to make results reproducible
# (Note: Results might be different from those reported by the Organisers as seeds also training depends on hardware!)
seed(1)
set_random_seed(2)
num_targets = len(targets) # same for all Y
shift = int(np.round(shift_sec*inst_per_sec))
# Load AVEC2018-CES data
print('Loading data ...')
train_x, train_y, devel_x, devel_y, devel_labels_original = load_CES_data(path, use_audio, use_visual, use_linguistic, targets)
num_train = train_x.shape[0]
num_devel = devel_x.shape[0]
max_seq_len = train_x.shape[1] # same for all partitions
num_features = train_x.shape[2]
print(' ... done')
# Shift labels to compensate annotation delay
print('Shifting labels to the front for ' + str(shift_sec) + ' seconds ...')
for t in range(0, num_targets):
train_y[t] = shift_labels_to_front(train_y[t], shift)
devel_y[t] = shift_labels_to_front(devel_y[t], shift)
print(' ... done')
# Create model
model = emotion_model(max_seq_len, num_features, learning_rate, num_units_1, num_units_2, bidirectional, dropout, num_targets)
# print(model.summary())
# Train and evaluate model
ccc_devel_best = np.zeros(num_targets)
print('ccc', ccc_devel_best)
for iteration in range(num_iter):
print('Iteration: ' + str(iteration))
model.fit(train_x, train_y, batch_size=batch_size, epochs=1) # Evaluate after each epoch
# Evaluate on development partition
ccc_iter = evaluate_devel(model, devel_x, devel_labels_original, shift, targets)
# Print results
print('CCC Devel (', end='')
for t in range(0, num_targets):
print(target_names[targets[t]] + ',', end='')
print('): ' + str(np.round(ccc_iter*1000)/1000))
# Get predictions on test (and shift back) if CCC on Devel improved
for t in range(0, num_targets):
if ccc_iter[t] > ccc_devel_best[t]:
ccc_devel_best[t] = ccc_iter[t]
print('CCC Devel best (', end='')
for t in range(0, num_targets):
print(target_names[targets[t]] + ',', end='')
print('): ' + str(np.round(ccc_devel_best*1000)/1000))
folder_name = folder_audio_features
out_vec=np.hstack((folder_name, (np.round(ccc_devel_best*1000)/1000)))
print
output_fin=np.vstack((output_fin,out_vec))
df=pd.DataFrame(output_fin)
df.to_csv('output_df7.csv', index=None)
def evaluate_devel(model, devel_x, label_devel, shift, targets):
num_targets = len(targets)
CCC_devel = np.zeros(num_targets)
# Get predictions
pred_devel = model.predict(devel_x)
# In case of a single target, model.predict() does not return a list, which is required
if num_targets==1:
pred_devel = [pred_devel]
for t in range(0,num_targets):
# Shift predictions back in time (delay)
pred_devel[t] = shift_labels_to_back(pred_devel[t], shift)
CCC_devel[t] = evaluate_partition(pred_devel[t], label_devel[t])
return CCC_devel
def evaluate_partition(pred, gold):
# pred: np.array (num_seq, max_seq_len, 1)
# gold: list (num_seq) - np.arrays (len_original, 1)
pred_all = np.array([])
gold_all = np.array([])
for n in range(0, len(gold)):
# cropping to length of original sequence
len_original = len(gold[n])
pred_n = pred[n,:len_original,0]
# global concatenation - evaluation
pred_all = np.append(pred_all, pred_n.flatten())
gold_all = np.append(gold_all, gold[n].flatten())
ccc, _, _ = calc_scores(gold_all,pred_all)
return ccc
def shift_labels_to_front(labels, shift=0):
labels = np.concatenate((labels[:,shift:,:], np.zeros((labels.shape[0],shift,labels.shape[2]))), axis=1)
return labels
def shift_labels_to_back(labels, shift=0):
labels = np.concatenate((np.zeros((labels.shape[0],shift,labels.shape[2])), labels[:,:labels.shape[1]-shift,:]), axis=1)
return labels
def ccc_loss(gold, pred): # Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
# input (num_batches, seq_len, 1)
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
ccc_loss = K.constant(1.) - ccc
return ccc_loss
if __name__ == '__main__':
main()
#plot baseline and prediction
#plt.plot(iteration, ccc_Devel)
|
995,341 | 07f8fcb27290b4f6d759f99931bae15d1e4148f7 | import unittest
import requests
from pathlib import Path
import json
import mysql.connector
from common import (
HTTP_API_ROOT,
CONFIG_PATH,
run_environment
)
from http_test_helpers import (
wait_predictor_learn,
check_predictor_exists,
check_predictor_not_exists,
check_ds_not_exists,
check_ds_exists,
check_ds_analyzable
)
# +++ define test data
TEST_DATASET = 'us_health_insurance'
TO_PREDICT = {
# 'charges': float,
'smoker': str
}
CONDITION = {
'age': 20,
'sex': 'female'
}
# ---
TEST_DATA_TABLE = TEST_DATASET
TEST_PREDICTOR_NAME = f'{TEST_DATASET}_predictor'
TEST_INTEGRATION = 'test_integration'
TEST_DS = 'test_ds'
TEST_DS_CSV = 'test_ds_csv'
TEST_PREDICTOR = 'test_predictor'
config = {}
def query(q, as_dict=False, fetch=False):
con = mysql.connector.connect(
host=config['integrations']['default_mariadb']['host'],
port=config['integrations']['default_mariadb']['port'],
user=config['integrations']['default_mariadb']['user'],
passwd=config['integrations']['default_mariadb']['password']
)
cur = con.cursor(dictionary=as_dict)
cur.execute(q)
res = True
if fetch:
res = cur.fetchall()
con.commit()
con.close()
return res
def fetch(q, as_dict=True):
return query(q, as_dict, fetch=True)
class UserFlowTest_1(unittest.TestCase):
def get_tables_in(self, schema):
test_tables = fetch(f'show tables from {schema}', as_dict=False)
return [x[0] for x in test_tables]
@classmethod
def setUpClass(cls):
run_environment(
apis=['mysql', 'http']
)
config.update(
json.loads(
Path(CONFIG_PATH).read_text()
)
)
def test_1_create_integration_via_http(self):
'''
check integration is not exists
create integration
check new integration values
'''
res = requests.get(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}')
assert res.status_code == 404
test_integration_data = {}
test_integration_data.update(config['integrations']['default_mariadb'])
test_integration_data['publish'] = True
test_integration_data['database_name'] = TEST_INTEGRATION
res = requests.put(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}', json={'params': test_integration_data})
assert res.status_code == 200
res = requests.get(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}')
assert res.status_code == 200
test_integration = res.json()
assert test_integration['password'] is None
for key in ['user', 'port', 'host', 'publish']:
assert test_integration[key] == test_integration_data[key]
def test_3_create_ds_from_sql_by_http(self):
'''
check is no DS with this name
create DS
analyse it
'''
check_ds_not_exists(TEST_DS)
data = {
"integration_id": TEST_INTEGRATION,
"name": TEST_DS,
"query": f"select * from test_data.{TEST_DATASET} limit 100;"
}
res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
assert res.status_code == 200
check_ds_exists(TEST_DS)
check_ds_analyzable(TEST_DS)
def test_4_create_and_query_predictors(self):
'''
check predictor not exists
learn predictor
query
'''
def test_predictor(predictior_name, datasource_name):
check_predictor_not_exists(predictior_name)
data = {
'to_predict': list(TO_PREDICT.keys()),
'data_source_name': datasource_name
}
res = requests.put(f'{HTTP_API_ROOT}/predictors/{predictior_name}', json=data)
assert res.status_code == 200
# wait for https://github.com/mindsdb/mindsdb/issues/1459
import time
time.sleep(5)
check_predictor_exists(predictior_name)
import time
time.sleep(10)
wait_predictor_learn(predictior_name)
res = requests.post(
f'{HTTP_API_ROOT}/predictors/{predictior_name}/predict',
json={'when': CONDITION}
)
assert res.status_code == 200
res = res.json()
assert len(res) == 1
res = res[0]
for field in TO_PREDICT:
assert field in res
assert res[field]['predicted_value'] is not None
assert res[field]['confidence'] > 0
test_predictor(TEST_PREDICTOR, TEST_DS)
def test_5_delete(self):
res = requests.delete(f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}')
assert res.status_code == 200
check_predictor_not_exists(TEST_PREDICTOR)
# for ds_name in [TEST_DS_CSV, TEST_DS]:
# res = requests.delete(f'{HTTP_API_ROOT}/datasources/{ds_name}')
# assert res.status_code == 200
# check_ds_not_exists(ds_name)
if __name__ == "__main__":
try:
unittest.main(failfast=True)
print('Tests passed!')
except Exception as e:
print(f'Tests Failed!\n{e}')
|
995,342 | dd3441d0a4c9259b83e4afa2b462b861fb51aead | import asyncio
import functools
import time
from contextlib import contextmanager
"""
from stackoverflow: https://stackoverflow.com/q/44169998/532963
"""
def duration(func):
""" decorator that can take either coroutine or normal function
I cannot get this to work w/ FastAPI async methods
"""
@contextmanager
def wrapping_logic():
start_ts = time.time()
yield
dur = time.time() - start_ts
print("{} took {:.2} seconds".format(func.__name__, dur))
@functools.wraps(func)
def wrapper(*args, **kwargs):
def sync_wrapper(func, *args, **kwargs):
with wrapping_logic():
return func(*args, **kwargs)
async def async_wrapper(func, *args, **kwargs):
with wrapping_logic():
return await func(*args, **kwargs)
if asyncio.iscoroutinefunction(func):
return async_wrapper(func, *args, **kwargs)
else:
return sync_wrapper(func, *args, **kwargs)
return wrapper
class SyncAsyncDecoratorFactory:
""" Using class inheritance to abstract the wrapper and repeat as little as possible """
@contextmanager
def wrapper(self, func, *args, **kwargs):
raise NotImplementedError
def __call__(self, func):
@functools.wraps(func)
def sync_wrapper(*args, **kwargs):
with self.wrapper(func, *args, **kwargs):
return func(*args, **kwargs)
@functools.wraps(func)
async def async_wrapper(*args, **kwargs):
with self.wrapper(func, *args, **kwargs):
return await func(*args, **kwargs)
if asyncio.iscoroutinefunction(func):
return async_wrapper
else:
return sync_wrapper
class duration3(SyncAsyncDecoratorFactory):
""" decorator using class inheritance """
@contextmanager
def wrapper(self, func, *args, **kwargs):
start_ts = time.time()
yield
dur = time.time() - start_ts
print(f"{func.__name__} took {dur:.2} seconds")
@duration
def main(sleep_time=0.5):
print("normal function sleeps for:", sleep_time)
time.sleep(sleep_time)
print("normal waited")
return
@duration
async def main_async(sleep_time=0.75):
print("coroutine sleeps for:", sleep_time)
await asyncio.sleep(sleep_time)
print("coroutine waited")
return
if __name__ == "__main__":
main()
loop = asyncio.get_event_loop()
loop.run_until_complete(main_async())
print("finished")
|
995,343 | c8a2896998d479c8d509e2ed74b634e7721ce406 | from gflags import *
if not FLAGS.has_key('fake_storage'):
DEFINE_boolean('fake_storage', False, 'Should we make real storage volumes to attach?') |
995,344 | 8e6511df08777546068029501515502ea5dee2b5 | from django.apps import apps
from django.conf import settings
# Load source app from various apps
# Caveat: Either loaded model classes have no app_label
# or app_label must be named after app name. Conflicting
# app_label creates error.
def get_model (*args):
# Select app name
if hasattr(settings, "MODEL_APP") \
and settings.MODEL_APP in settings.INSTALLED_APPS \
and len(args) < 2:
app_name = settings.MODEL_APP
elif len(args) == 2:
app_name = args[0]
else:
app_name = "nextify"
if app_name.startswith("apps."):
app_name = app_name.replace("apps.", "")
model_name = args[0] if len(args) < 2 else args[1]
if isinstance(model_name, (list, tuple)):
classes = []
for i in model_name:
classes.append(apps.get_model(
app_name, i
))
return classes
else:
return apps.get_model(
app_name, model_name
)
|
995,345 | 8f920049e555c4bac206929aa7fb3085d2317f5a | #!/usr/bin/env python
from ConfigMaster import ConfigMaster
class Params(ConfigMaster):
defaultParams = """
#!/usr/bin/env python
#####################################
## GENERAL CONFIGURATION
#####################################
## debug ##
# Flag to output debugging information
debug = False
# Forecast hour
forecastHour = 3
# Email Address
emailAddy = "prestop@ucar.edu"
"""
|
995,346 | 6d17e647e0e9ce0b3bd48fa987dfebe09cdb94cc | def pythagoras(base,height,hypotenus):
if base == str("x"):
height=int(height)
hypotenus=int(hypotenus)
return ("base = " + str(((hypotenus**2) - (height**2))**0.5))
elif height == str("x"):
base=int(base)
hypotenus=int(hypotenus)
return ("height = " + str(((hypotenus**2) - (base**2))**0.5))
elif hypotenus == str("x"):
base=int(base)
height=int(height)
return ("Hypotenus = " + str(((base**2) + (height**2))**0.5))
else:
return "You know the answer!"
print("enter any two side of right angle triangle")
print("enter x for unknown side")
base=(input("Enter base : "))
height=(input("Enter height : "))
hypotenus=(input("Enter hypotenus : "))
print(pythagoras(base,height,hypotenus))
|
995,347 | 79e5461eed350b0e0953ca96e0b50c1e8cabc335 | # Generated by Django 3.0.8 on 2020-07-27 14:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsauth', '0003_user_thumbnail'),
]
operations = [
migrations.AlterField(
model_name='user',
name='thumbnail',
field=models.URLField(default='http://qe0l8lesp.bkt.clouddn.com/1595858988624.jfif'),
),
]
|
995,348 | ab4bdb862cdba70175105c1a0337859cfd3588bf | import pydash
pydash.initialize(0, "")
array = pydash.ArrayLV(3)
array[0] = pydash.LV(1, "first")
array[0]
|
995,349 | 724eeda2d920f7d841f4523c5d0531c066d41d09 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 13 21:06:23 2019
Need to reinstall module, at the moment I need to be in npy2pd module, same for basic plots
@author: ibladmin
"""
from npy2pd import *
from basic_plots import *
from glm import *
#Input folder with raw npy files
psy_raw = load_data('/mnt/s0/Data/Subjects_personal_project/rewblocks10070/')
psy_df = unpack(psy_raw)
#Plot psychometric data
blocks = np.array([1, 0.7])
plot_psych_block (psy_df , 'rewprobabilityLeft', blocks)
bias_per_session(psy_df, 'rewprobabilityLeft', blocks)
#Plot glm
#include bias blocks only
psy_df = psy_df.loc[(psy_df['rewprobabilityLeft'] == 1) | (psy_df['rewprobabilityLeft'] == 0.7)]
#flip psy_df choice so that right is 1 (aesthetic change)
psy_df['choice'] = psy_df['choice']*-1
result, r2 = glm_logit(psy_df, sex_diff = False)
plot_glm(psy_df, result, r2)
|
995,350 | 23d04dd7342ac37bf5db0aac7ce8ac941cc0c790 | '''server 3 should recieve json data from server 2, and give CSV to server 1'''
from bottle import run, get, post, request
import time
import requests
run(host="127.0.0.1" , port=3333 , debug=True , reloader=True ) |
995,351 | 037af3293da892e30f34e5d69f0008136c8e14d6 | class Config(object):
DEBUG = False
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:root@localhost:3306/flask-mysql"
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:root@localhost:3306/author-manager"
SQLALCHEMY_ECHO = False
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:root@localhost:3306/flask-mysql"
SQLALCHEMY_ECHO = False |
995,352 | 624a98ff6996cfc51456f6a02fb269fdeedbdd06 | ##################################
# fichier mot-de-passe-du-village-validation.py
# nom de l'exercice : Mot de passe du village
# url : http://www.france-ioi.org/algo/task.php?idChapter=646&idTask=0&sTab=task&iOrder=1
# type : validation
#
# Nom du chapitre :
#
# Compétence développée :
#
# auteur :
##################################
# chargement des modules
# mettre votre code ici
codex = int(input())
if codex == 64741:
print("Bon festin !")
else:
print("Allez-vous en !")
|
995,353 | 69295b21b8f1a7813a850f1984be5f2adae7bdf8 | import redis
import json
import sys
import mysql.connector
with open('config.json', 'r') as f:
config = json.load(f)
mydb = mysql.connector.connect(**config["mysql"])
cursor = mydb.cursor()
red = redis.Redis(host='localhost', port=6379, db=0, decode_responses=True)
pub = red.pubsub()
pub.subscribe('newLogin')
pub.subscribe('loadCharacterData')
pub.subscribe('getInventoryData')
pub.subscribe('loadAllItems')
userData = {
"ID" : 0,
"familyName" : 0,
"authToken" : 0
}
characterData = {
"ID" : 0,
"ownerID" : 0,
"name" : 0,
"level" : 0,
"exp" : 0,
"position" : 0
}
inventoryData = {
"ID" : 0,
"slots" : 0,
"money" : 0,
"weight" : 0,
"maxWeight" : 0,
"data" : 0
}
for new_message in pub.listen():
try:
print(new_message)
channel = new_message['channel']
message = json.loads(new_message['data'])
if(channel == "newLogin"):
redis.Redis(db=0)
if(not red.exists(message["ID"])):
userData["ID"] = message["ID"]
userData["familyName"] = message["familyName"]
userData["authToken"] = message["authToken"]
red.hmset(str(userData["ID"]), userData)
userData = {
"ID" : 0,
"familyName" : 0,
"authToken" : 0
}
if(channel == "loadCharacterData"):
redis.Redis(db=1)
if(not red.exists(message["ID"])):
cursor.execute(f"SELECT ID, ownerID, Name, Level, Exp, Position FROM characters WHERE ID = '{message['charID']}'")
result = cursor.fetchone()
characterData["ID"] = result[0]
characterData["ownerID"] = result[1]
characterData["name"] = result[2]
characterData["level"] = result[3]
characterData["exp"] = result[4]
characterData["position"] = result[5]
red.hmset(str(characterData["ID"]), characterData)
characterData = {
"ID" : 0,
"ownerID" : 0,
"name" : 0,
"level" : 0,
"exp" : 0,
"position" : 0
}
redis.Redis(db=2)
if(not red.exists(message["charID"])):
cursor.execute(f"SELECT characterID, slots, money, weight, maxWeight, data FROM inventory WHERE characterID = '{message['charID']}'")
inventoryData["ID"] = result[0]
inventoryData["slots"] = result[1]
inventoryData["data"] = result[2]
red.hmset(str(inventoryData["ID"]), inventoryData )
inventoryData = {
"ID" : 0,
"slots" : 0,
"money" : 0,
"weight" : 0,
"maxWeight" : 0,
"data" : 0
}
red.publish("loadCharacter", json.dumps(message))
if(channel == "loadAllItems"):
redis.Redis(db=9)
cursor.execute(f"SELECT * FROM items;")
result = cursor.fetchall()
for item in result:
itemsData["ID"] = item[0]
itemsData["Name"] = item[1]
itemsData["Stats"] = item[2]
itemsData["Weight"] = item[3]
red.hmset(str(itemsData["ID"]), itemsData)
itemsData = {
"ID" : 0,
"Name" : 0,
"Stats" : 0,
"Weight" : 0
}
except Exception as e:
print(e,"error")
|
995,354 | abef2bdca7baf32eb872c72e9ab6918b4917ed49 | from heapq import heapify, heappop
import random
from sortedcontainers import SortedDict
# !1.堆的删除与遍历通过while循环实现
pq = [random.randrange(1, 100) for _ in range(10)]
heapify(pq)
# !删除堆中小于10的元素
while pq and pq[0] < 10:
heappop(pq)
###########################################################
# !2.遍历字典删除key
mp = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 0: 0, -1: -1}
# !list(mp)获取键的拷贝,注意不要边遍历边修改字典
for key in list(mp):
if key <= 0:
print(mp.pop(key))
###########################################################
# !3.删除SortedDict中小于等于0的key
sd = SortedDict({2: 2, 1: 1, 3: 3, 4: 4, 5: 5, 0: 0, -1: -1})
while sd and sd.peekitem(0)[0] <= 0:
print(sd.popitem(0))
|
995,355 | d9216055ccc2a16ab80893accef4bcef7a556a30 | ../hyperparameters.py |
995,356 | af1e0ee224b19a7524c979ac9ce16c456565356f | # ------------------------------------------
# Name: task_functions
# Purpose: Functions creating/managing tasks/displaying.
#
# Author: Robin Siebler
# Created: 5/6/15
# ------------------------------------------
__author__ = 'Robin Siebler'
__date__ = '5/6/15'
import arrow
import platform
import util
from tasklist import Task, TaskList
from collections import OrderedDict
from colorama import init, Fore, Back, Style
if platform.system() == 'Windows':
init()
# TODO: Colors that work on the Mac don't work very well on Windows and vice versa
# TODO: Add an ini file so the user can specify the colors to use. Point to the colorma
# TODO: page for instructions
class Functions:
def __init__(self):
"""Initialize the task list."""
self.tasklist = TaskList()
self.legend = '\nLegend: Not Due ' + Fore.CYAN + Style.BRIGHT + 'Upcoming ' + Fore.BLUE + \
Style.BRIGHT + 'Due ' + Fore.RED + Style.BRIGHT + 'Overdue ' + Fore.WHITE + Style.BRIGHT + \
Back.WHITE + 'Completed' + Fore.RESET + Style.NORMAL + Back.RESET
def show_tasks(self, tasks=None, date_format=None):
"""Display the tasks (in ID order)
:param tasks: tasks object
"""
if not tasks:
tasks = self.tasklist.tasks
if len(tasks) > 0:
template = '{0:^3} {1:20} {2:^3} {3:20} {4:15} {5:20}'
print template.format('\nID', 'Description', ' Pri', 'Due', 'Created', 'Tags')
print template.format('---', '--------------------', '---', '--------------------', '---------------',
'--------------------')
for task in tasks:
if task.priority == 'L':
priority = Fore.YELLOW + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL
elif task.priority == 'M':
priority = Fore.BLUE + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL
elif task.priority == 'H':
priority = Fore.RED + Style.BRIGHT + task.priority.center(3) + Fore.RESET + Style.NORMAL
else:
priority = ''
if task.due_date is None:
due_date = ''
else:
if date_format:
due_date = task.due_date.rsplit(' ', 1)[0].ljust(20)
else:
due_date = (arrow.get(task.due_date, task.due_date_format).humanize()).ljust(20)
if not task.completed:
today = arrow.now()
diff = arrow.get(task.due_date, task.due_date_format) - today
if diff.days >= 1 and diff.seconds > 0:
due_date = Fore.CYAN + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL
elif diff.days >= 0:
due_date = Fore.BLUE + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL
elif diff.days <= 0:
due_date = Fore.RED + Style.BRIGHT + due_date + Fore.RESET + Style.NORMAL
if date_format:
age = (str(task.creation_date).split()[0]).ljust(15) # drop the time zone
else:
age = (arrow.get(task.creation_date, 'MM/DD/YYYY h:mm:ss A ZZ').humanize()).ljust(15)
if task.note:
desc = task.task + ' *'
else:
desc = task.task
if task.completed:
if task.priority:
priority = task.priority
else:
priority = ''
task_id = Fore.WHITE + Style.BRIGHT + Back.WHITE + str(task.id).center(3)
tags = str(task.tags) + Fore.RESET + Style.NORMAL + Back.RESET
print template.format(task_id, desc, priority, due_date, age, tags)
else:
print template.format(task.id, desc, priority, due_date, age, task.tags)
print self.legend
else:
print('\nThere are no tasks to display!\n')
def show_tasks_by_priority(self, tasks=None, date_format=None):
"""Display the tasks (in Priority order)
:param tasks: tasks object
"""
low_dict_o = OrderedDict()
med_dict_o = OrderedDict()
high_dict_o = OrderedDict()
no_dict_o = OrderedDict()
completed_dict_o = OrderedDict()
low_dict = {}
med_dict = {}
high_dict = {}
no_dict = {}
completed_dict = {}
temp_dict = {}
if not tasks:
tasks = self.tasklist.tasks
if len(tasks) > 0:
for task in tasks:
if task.due_date is None:
due_date = ''
else:
if date_format:
due_date = task.due_date.rsplit(' ', 1)[0].ljust(20)
else:
due_date = (arrow.get(task.due_date, task.due_date_format).humanize()).ljust(20)
age = (str(task.creation_date).split()[0]).ljust(15) # drop the time zone
if task.note:
desc = task.task + ' *'
else:
desc = task.task
if task.completed:
completed_dict[task.id] = task.priority, due_date, age, desc, task.tags
elif task.priority == 'L':
low_dict[task.id] = [task.priority, due_date, age, desc, task.tags]
elif task.priority == 'M':
med_dict[task.id] = [task.priority, due_date, age, desc, task.tags]
elif task.priority == 'H':
high_dict[task.id] = [task.priority, due_date, age, desc, task.tags]
else:
no_dict[task.id] = [task.priority, due_date, age, desc, task.tags]
else:
print('\nThere are no tasks to display!\n')
return
for key, value in sorted(no_dict.items(), key=lambda e: e[1][1]):
if value[1] is not '':
no_dict_o[key] = value
else:
temp_dict[key] = value
for key in temp_dict:
no_dict_o[key] = temp_dict[key]
temp_dict.clear()
for key, value in sorted(low_dict.items(), key=lambda e: e[1][1]):
if value[1] is not '':
low_dict_o[key] = value
else:
temp_dict[key] = value
for key, value in temp_dict.items():
low_dict_o[key] = value
temp_dict.clear()
for key, value in sorted(med_dict.items(), key=lambda e: e[1][1]):
if value[1] is not '':
med_dict_o[key] = value
else:
temp_dict[key] = value
for key, value in temp_dict.items():
med_dict_o[key] = value
temp_dict.clear()
for key, value in sorted(high_dict.items(), key=lambda e: e[1][1]):
if value[1] is not '':
high_dict_o[key] = value
else:
temp_dict[key] = value
for key, value in sorted(temp_dict.items(), key=lambda e: e[1][1]):
high_dict_o[key] = value
temp_dict.clear()
for key, value in sorted(completed_dict.items(), key=lambda e: e[1][1]):
if value[1] is not '':
completed_dict_o[key] = value
else:
temp_dict[key] = value
for key, value in temp_dict.items():
completed_dict_o[key] = value
temp_dict.clear()
del low_dict
del med_dict
del high_dict
del no_dict
del completed_dict
today = arrow.now()
# TODO: Figure out why the key is a tuple instead of a list
for dict in [low_dict_o, med_dict_o, high_dict_o, no_dict_o]:
for key, value in dict.items():
dict[key] = list(dict[key]) # hack - how is this key a tuple!?!
if value[0] == 'L':
dict[key][0] = Fore.YELLOW + Style.BRIGHT + value[0].center(3) + Fore.RESET + Style.NORMAL
elif value[0] == 'M':
dict[key][0] = Fore.BLUE + Style.BRIGHT + value[0].center(3) + Fore.RESET + Style.NORMAL
elif value[0] == 'H':
dict[key][0] = Fore.RED + Style.BRIGHT + value[0].center(3) + Fore.RESET + Style.NORMAL
else:
dict[key][0] = ''
task = self.tasklist.find_task(key)
if task.due_date:
diff = arrow.get(task.due_date, task.due_date_format) - today
if diff.days >= 1 and diff.seconds > 0:
dict[key][1] = Fore.CYAN + Style.BRIGHT + value[1] + Fore.RESET + Style.NORMAL
elif diff.days >= 0:
dict[key][1] = Fore.BLUE + Style.BRIGHT + value[1] + Fore.RESET + Style.NORMAL
elif diff.days <= 0:
dict[key][1] = Fore.RED + Style.BRIGHT + value[1] + Fore.RESET + Style.NORMAL
template = '{0:^3} {1:20} {2:^3} {3:20} {4:15} {5:20}'
print template.format('\nPri', 'Description', 'ID', 'Due', 'Created', 'Tags')
print template.format('---', '--------------------', '---', '--------------------', '---------------',
'--------------------')
if len(high_dict_o) > 0:
for key in high_dict_o:
print template.format(high_dict_o[key][0], high_dict_o[key][3], key, high_dict_o[key][1],
high_dict_o[key][2], high_dict_o[key][4])
if len(med_dict_o) > 0:
for key in med_dict_o:
print template.format(med_dict_o[key][0], med_dict_o[key][3], key, med_dict_o[key][1],
med_dict_o[key][2], med_dict_o[key][4])
if len(low_dict_o) > 0:
for key in low_dict_o:
print template.format(low_dict_o[key][0], low_dict_o[key][3], key, low_dict_o[key][1],
low_dict_o[key][2], low_dict_o[key][4])
if len(no_dict_o) > 0:
for key in no_dict_o:
print template.format(no_dict_o[key][0], no_dict_o[key][3], key, no_dict_o[key][1],
no_dict_o[key][2], no_dict_o[key][4])
completed_template = Fore.WHITE + Style.BRIGHT + Back.WHITE + '{0:^3} {1:20} {2:^3} {3:20} {4:15} {5:20}' + \
Fore.RESET + Style.NORMAL + Back.RESET
if len(completed_dict_o) > 0:
for key in completed_dict_o:
if completed_dict_o[key][0]:
priority = completed_dict_o[key][0]
else:
priority = ''
print completed_template.format(priority, completed_dict_o[key][3], key, completed_dict_o[key][1],
completed_dict_o[key][2], completed_dict_o[key][4])
print self.legend
def show_task(self, task_id):
"""Display the specified task, including its notes, if any.
:param str task_id: the task_id of the task.
"""
task_id = self._validate_task_id(task_id)
if task_id:
task = self.tasklist.find_task(task_id)
if task:
if task.priority == 'L':
priority = Fore.YELLOW + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL
elif task.priority == 'M':
priority = Fore.BLUE + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL
elif task.priority == 'H':
priority = Fore.RED + Style.BRIGHT + ' ' + task.priority + ' ' + Fore.RESET + Style.NORMAL
else:
priority = ''
template = '{0:^3} {1:^3} {2:20} {3:40}'
print template.format('\nID', ' Pri', 'Description', 'Note')
print template.format('---', '---', '--------------------',
'----------------------------------------')
print template.format(task.id, priority, task.task, task.note)
def search_tasks(self, search_string):
"""Search the task list for a task whose contents contains the user provided search string.
:param str search_string: the string to search for.
"""
tasks = self.tasklist.search(search_string.lower())
if tasks:
self.show_tasks(tasks)
else:
print('\nThere were no tasks containing "{}".\n'.format(search_string))
def add_task(self, task, priority=None, due_date=None, tags=None, note=None):
"""Add a new task."""
self.tasklist.add_task(task, priority, due_date, tags, note)
def delete_task(self, task_id):
"""Delete a task."""
task_id = self._validate_task_id(task_id)
if task_id:
self.tasklist.delete_task(task_id)
self.tasklist.renumber_tasks()
print('Task ' + task_id + ' was deleted.')
def modify_task(self, task_id, task_=None, completed=False, priority=None, due_date=None, note=None, tags=None, time=None):
"""Modify a task."""
task_id = self._validate_task_id(task_id)
if task_id:
task = self.tasklist.find_task(task_id)
if task:
print 'Modifying task ' + str(task_id) + ': ' + task.task
if task_:
task.task = task_
elif priority:
task.priority = priority
elif due_date:
if isinstance(due_date, list):
task.due_date = due_date[0]
task.due_date_format = due_date[1]
else:
task.due_date = due_date
elif note:
task.note = note
elif tags:
task.tags = tags
elif time:
time_str = time.split(' ')[0]
time_hour, time_minute = time_str.split(':')
if 'PM' in time:
time_hour = int(time_hour) + 12
due_date = arrow.get(task.due_date, task.due_date_format)
due_date = due_date.replace(hour=time_hour, minute=int(time_minute))
task.due_date = due_date.format(task.due_date_format)
elif completed:
task.completed = True
print 'Modified task ' + str(task_id)
def load_tasks(self, task_file):
"""Load the task file and retrieve the tasks."""
self.tasklist.tasks = util.load(task_file)
Task.last_id = len(self.tasklist.tasks)
def save_tasks(self, task_file):
"""Save the task file."""
util.save(self.tasklist.tasks, task_file)
def _validate_task_id(self, task_id):
"""Validate a task id.
:return: None if an invalid ID was provided, otherwise a string containing the valid task id.
"""
if task_id.isdigit() and int(task_id) <= len(self.tasklist.tasks):
return task_id
else:
print('{} is not an existing task!'.format(task_id))
return None
|
995,357 | 144db73eb16b40d8070c56aa748bd844accdf006 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from mpi.Application import Application
class ShockApp(Application):
class Inventory(Application.Inventory):
import pyre.inventory
steps = pyre.inventory.int("steps", default=10)
# geometry
modeller = pyre.inventory.facility("modeller", default="cube")
# surface mesher
import acis
surfaceMesher = pyre.inventory.facility("surfaceMesher", factory=acis.surfaceMesher)
# machine management
layout = pyre.inventory.facility("layout", default="coupled")
# simulation control
import pyre.simulations
controller = pyre.inventory.facility("controller", factory=pyre.simulations.controller)
# solvers
import rigid
solid = pyre.inventory.facility('solid', family='solver', factory=rigid.solver)
import pulse
fluid = pyre.inventory.facility('fluid', family='solver', factory=pulse.solver)
import elc
coupler = pyre.inventory.facility('coupler', factory=elc.mpiExchanger)
def main(self, *args, **kwds):
# configure the parallel machine
self.layout.layout(self)
# print some information
self.reportConfiguration()
# initialize the coupler
self.coupler.initialize(self) # uses the world communicator for the exchange by default
# launch the application
self.controller.solver = self.layout.solver
self.controller.launch(self)
# compute the specified number of steps
self.controller.march(steps=self.inventory.steps)
return
def reportConfiguration(self):
if self.layout.rank == 0:
import journal
# journal.debug("elc.memory").activate()
# journal.debug("elc.exchange").activate()
# journal.debug("pulse.generators").activate()
elif self.layout.rank == 1:
import journal
# journal.debug("elc.memory").activate()
# journal.debug("elc.exchange").activate()
# journal.debug("pulse.generators").activate()
# journal.debug("rigid.monitoring").activate()
# journal.debug("rigid.timeloop").activate()
# journal.debug("pulse.monitoring").activate()
# journal.debug("pulse.timeloop").activate()
self.fluid.dump()
return
def __init__(self):
Application.__init__(self, 'shock')
self.modeller = None
self.surfaceMesher = None
self.layout = None
self.controller = None
self.fluid = None
self.solid = None
self.coupler = None
return
def _defaults(self):
Application._defaults(self)
self.inventory.launcher.inventory.nodes = 2
return
def _configure(self):
Application._configure(self)
self.modeller = self.inventory.modeller
self.surfaceMesher = self.inventory.surfaceMesher
self.layout = self.inventory.layout
self.controller = self.inventory.controller
self.fluid = self.inventory.fluid
self.solid = self.inventory.solid
self.coupler = self.inventory.coupler
return
def _init(self):
Application._init(self)
return
# main
if __name__ == '__main__':
app = ShockApp()
app.run()
# version
__id__ = "$Id: shock.py,v 1.1.1.1 2005/03/08 16:14:00 aivazis Exp $"
# End of file
|
995,358 | 0abe478e6018680a6414debc3168ef4480c731d2 | """ Open Loop Controller for Spot Micro. Takes GUI params or uses default
"""
import numpy as np
from random import shuffle
import copy
# Ensuring totally random seed every step!
np.random.seed()
FB = 0
LAT = 1
ROT = 2
COMBI = 3
FWD = 0
ALL = 1
class BezierStepper():
def __init__(self,
pos=np.array([0.0, 0.0, 0.0]),
orn=np.array([0.0, 0.0, 0.0]),
StepLength=0.04,
LateralFraction=0.0,
YawRate=0.0,
StepVelocity=0.001,
ClearanceHeight=0.045,
PenetrationDepth=0.003,
episode_length=5000,
dt=0.01,
num_shuffles=2,
mode=FWD):
self.pos = pos
self.orn = orn
self.desired_StepLength = StepLength
self.StepLength = StepLength
self.StepLength_LIMITS = [-0.05, 0.05]
self.LateralFraction = LateralFraction
self.LateralFraction_LIMITS = [-np.pi / 2.0, np.pi / 2.0]
self.YawRate = YawRate
self.YawRate_LIMITS = [-1.0, 1.0]
self.StepVelocity = StepVelocity
self.StepVelocity_LIMITS = [0.1, 1.5]
self.ClearanceHeight = ClearanceHeight
self.ClearanceHeight_LIMITS = [0.0, 0.04]
self.PenetrationDepth = PenetrationDepth
self.PenetrationDepth_LIMITS = [0.0, 0.02]
self.mode = mode
self.dt = dt
# Keep track of state machine
self.time = 0
# Decide how long to stay in each phase based on maxtime
self.max_time = episode_length
""" States
1: FWD/BWD
2: Lat
3: Rot
4: Combined
"""
self.order = [FB, LAT, ROT, COMBI]
# Shuffles list in place so the order of states is unpredictable
# NOTE: increment num_shuffles by episode num (cap at 10
# and reset or someting) for some forced randomness
for _ in range(num_shuffles):
shuffle(self.order)
# Forward/Backward always needs to be first!
self.reshuffle()
# Current State
self.current_state = self.order[0]
# Divide by number of states (see RL_SM())
self.time_per_episode = int(self.max_time / len(self.order))
def ramp_up(self):
if self.StepLength < self.desired_StepLength:
self.StepLength += self.desired_StepLength * self.dt
def reshuffle(self):
self.time = 0
# Make sure FWD/BWD is always first state
FB_index = self.order.index(FB)
if FB_index != 0:
what_was_in_zero = self.order[0]
self.order[0] = FB
self.order[FB_index] = what_was_in_zero
def which_state(self):
# Ensuring totally random seed every step!
np.random.seed()
if self.time > self.max_time:
# Combined
self.current_state = COMBI
self.time = 0
else:
index = int(self.time / self.time_per_episode)
if index > len(self.order) - 1:
index = len(self.order) - 1
self.current_state = self.order[index]
def StateMachine(self):
"""
State Machined used for training robust RL on top of OL gait.
STATES:
Forward/Backward: All Default Values.
Can have slow changes to
StepLength(+-) and Velocity
Lateral: As above (fwd or bwd random) with added random
slow changing LateralFraction param
Rotating: As above except with YawRate
Combined: ALL changeable values may change!
StepLength
StepVelocity
LateralFraction
YawRate
NOTE: the RL is solely responsible for modulating Clearance Height
and Penetration Depth
"""
if self.mode is ALL:
self.which_state()
if self.current_state == FB:
# print("FORWARD/BACKWARD")
self.FB()
elif self.current_state == LAT:
# print("LATERAL")
self.LAT()
elif self.current_state == ROT:
# print("ROTATION")
self.ROT()
elif self.current_state == COMBI:
# print("COMBINED")
self.COMBI()
return self.return_bezier_params()
def return_bezier_params(self):
# First, Clip Everything
self.StepLength = np.clip(self.StepLength, self.StepLength_LIMITS[0],
self.StepLength_LIMITS[1])
self.StepVelocity = np.clip(self.StepVelocity,
self.StepVelocity_LIMITS[0],
self.StepVelocity_LIMITS[1])
self.LateralFraction = np.clip(self.LateralFraction,
self.LateralFraction_LIMITS[0],
self.LateralFraction_LIMITS[1])
self.YawRate = np.clip(self.YawRate, self.YawRate_LIMITS[0],
self.YawRate_LIMITS[1])
self.ClearanceHeight = np.clip(self.ClearanceHeight,
self.ClearanceHeight_LIMITS[0],
self.ClearanceHeight_LIMITS[1])
self.PenetrationDepth = np.clip(self.PenetrationDepth,
self.PenetrationDepth_LIMITS[0],
self.PenetrationDepth_LIMITS[1])
# Then, return
# FIRST COPY TO AVOID OVERWRITING
pos = copy.deepcopy(self.pos)
orn = copy.deepcopy(self.orn)
StepLength = copy.deepcopy(self.StepLength)
LateralFraction = copy.deepcopy(self.LateralFraction)
YawRate = copy.deepcopy(self.YawRate)
StepVelocity = copy.deepcopy(self.StepVelocity)
ClearanceHeight = copy.deepcopy(self.ClearanceHeight)
PenetrationDepth = copy.deepcopy(self.PenetrationDepth)
return pos, orn, StepLength, LateralFraction,\
YawRate, StepVelocity,\
ClearanceHeight, PenetrationDepth
def FB(self):
"""
Here, we can modulate StepLength and StepVelocity
"""
# The maximum update amount for these element
StepLength_DELTA = self.dt * (self.StepLength_LIMITS[1] -
self.StepLength_LIMITS[0]) / (6.0)
StepVelocity_DELTA = self.dt * (self.StepVelocity_LIMITS[1] -
self.StepVelocity_LIMITS[0]) / (2.0)
# Add either positive or negative or zero delta for each
# NOTE: 'High' is open bracket ) so the max is 1
if self.StepLength < -self.StepLength_LIMITS[0] / 2.0:
StepLength_DIRECTION = np.random.randint(-1, 3, 1)[0]
elif self.StepLength > self.StepLength_LIMITS[1] / 2.0:
StepLength_DIRECTION = np.random.randint(-2, 2, 1)[0]
else:
StepLength_DIRECTION = np.random.randint(-1, 2, 1)[0]
StepVelocity_DIRECTION = np.random.randint(-1, 2, 1)[0]
# Now, modify modifiable params AND CLIP
self.StepLength += StepLength_DIRECTION * StepLength_DELTA
self.StepLength = np.clip(self.StepLength, self.StepLength_LIMITS[0],
self.StepLength_LIMITS[1])
self.StepVelocity += StepVelocity_DIRECTION * StepVelocity_DELTA
self.StepVelocity = np.clip(self.StepVelocity,
self.StepVelocity_LIMITS[0],
self.StepVelocity_LIMITS[1])
def LAT(self):
"""
Here, we can modulate StepLength and LateralFraction
"""
# The maximum update amount for these element
LateralFraction_DELTA = self.dt * (self.LateralFraction_LIMITS[1] -
self.LateralFraction_LIMITS[0]) / (
2.0)
# Add either positive or negative or zero delta for each
# NOTE: 'High' is open bracket ) so the max is 1
LateralFraction_DIRECTION = np.random.randint(-1, 2, 1)[0]
# Now, modify modifiable params AND CLIP
self.LateralFraction += LateralFraction_DIRECTION * LateralFraction_DELTA
self.LateralFraction = np.clip(self.LateralFraction,
self.LateralFraction_LIMITS[0],
self.LateralFraction_LIMITS[1])
def ROT(self):
"""
Here, we can modulate StepLength and YawRate
"""
# The maximum update amount for these element
# no dt since YawRate is already mult by dt
YawRate_DELTA = (self.YawRate_LIMITS[1] -
self.YawRate_LIMITS[0]) / (2.0)
# Add either positive or negative or zero delta for each
# NOTE: 'High' is open bracket ) so the max is 1
YawRate_DIRECTION = np.random.randint(-1, 2, 1)[0]
# Now, modify modifiable params AND CLIP
self.YawRate += YawRate_DIRECTION * YawRate_DELTA
self.YawRate = np.clip(self.YawRate, self.YawRate_LIMITS[0],
self.YawRate_LIMITS[1])
def COMBI(self):
"""
Here, we can modify all the parameters
"""
self.FB()
self.LAT()
self.ROT()
|
995,359 | 665088b035058b0f09e705b6d4af9e9192d4468d | import os
import pandas as pd
import shutil
rootDir = "../TRAC2018/"
# input
devInfile = rootDir + "english/agr_en_dev.csv"
testInfile1 = rootDir + "trac-gold-set/agr_en_fb_gold.csv"
testInfile2 = rootDir + "trac-gold-set/agr_en_tw_gold.csv"
trainInfile = rootDir + "english/agr_en_train.csv"
# ouput
vuaDir = "VUA_format/"
trainDataFile = rootDir + vuaDir + "trainData.csv"
testDataFileI = rootDir + vuaDir + "testData-fb.csv"
testDataFileII = rootDir + vuaDir + "testData-tw.csv"
devDataFile = rootDir + vuaDir + "devData.csv"
f1 = 'Id'
f2 = 'Text'
f3 = 'Label'
def readfile(f):
f = open(f, 'r') # fout interpretatie:UTF-8, ISO-8859-1,-2,-15, latin1 #foutmelding:Windows-1252, ASCII, UTF-16
lines = f.readlines()
return (lines)
def makeDataFile(inFile, DfObj, outputFile):
df1 = pd.read_csv(inFile, skiprows=0, header=None) # doctest: +SKIP
print(
"\ndataset:{}\tnr of rows:{}\tnr of columns:{}".format(inFile.replace(rootDir, ""), df1.shape[0], df1.shape[1]))
for i, row in df1.iterrows():
# print(i)
cleantweet = df1.loc[i][1].replace("\t", "").replace("\n", "")
DfObj = DfObj.append({f1: df1.loc[i][0], f2: cleantweet, f3: df1.loc[i][2]}, ignore_index=True)
print(DfObj.shape)
print("{} processed lines from {}\t rows/columns`:{} written to {}".format(i + 1, inFile.replace(rootDir, ""),
DfObj.shape,
outputFile.replace(rootDir, "")))
DfObj.to_csv(outputFile, index=False, header=True, sep='\t')
def main():
mydir = rootDir + vuaDir
if os.path.exists(mydir):
shutil.rmtree(mydir)
os.mkdir(mydir)
dfTrain = pd.DataFrame(columns=[f1, f2, f3])
dfTest = pd.DataFrame(columns=[f1, f2, f3])
dfDev = pd.DataFrame(columns=[f1, f2, f3])
makeDataFile(devInfile, dfDev, devDataFile)
makeDataFile(trainInfile, dfTrain, trainDataFile)
makeDataFile(testInfile1, dfTest, testDataFileI)
makeDataFile(testInfile2, dfTest, testDataFileII)
if __name__ == "__main__":
main()
|
995,360 | 74a9ba727c2a44af0b59fea1a5bc03cd41cefe83 | def fib(n):
numList = []
curr = 1
prev = 0
while len(numList)<n:
numList.append(curr)
curr, prev = curr + prev, curr
print(*numList, sep='\n')
n = int(input('Enter number of digits\n'))
fib(n)
|
995,361 | 75e4229e9a5945dc63e3a7869ff5bcc7ccc2c0c5 | def logger(func):
def inner(*args, **kwargs):
print(func.__name__ + "(%s, %s)" % (args, kwargs))
return func(*args, **kwargs)
return inner
|
995,362 | 9d3a54107875808f9bd589baaf2efc242570f302 | '''
Created on 8.5.2017
TODO - packages not loaded but applicable
- geojson
- shapely
- seaborn as sns
- shapely.wkt, wkt = http://www.geoapi.org/3.0/javadoc/org/opengis/referencing/doc-files/WKT.html
@author: Markus.Walden
'''
#Array
from datetime import datetime
import shapefile
import geopandas as gp
from geopandas import datasets
import pandas as pd
from shapely.geometry import Point
#SQL
import sqlalchemy as sqla
from sqlalchemy.ext.declarative import declarative_base
from geoalchemy2 import Geometry
from sqlalchemy.orm import sessionmaker
#computer
import sys
from geographic import engineString
#map
import matplotlib.pyplot as plt
plt.style.use('bmh')
Base = declarative_base()
class GeographicNE(Base):
'''
classdocs
'''
__tablename__ = 'GeographicNE'
index = sqla.Column(sqla.Integer)
continent = sqla.Column(sqla.NVARCHAR(50))
gdp_md_est = sqla.Column(sqla.Float)
iso_a3 = sqla.Column(sqla.NVARCHAR(50), primary_key=True)
name = sqla.Column(sqla.NVARCHAR(50))
pop_est = sqla.Column(sqla.Float)
geometry = sqla.Column(Geometry("POLYGON"))
def __init__(self, params):
'''
Constructor
'''
def __repr__(self):
#"(id='%s', Date='%s', Type='%s', Value='%s')" % (self.id, self.Date, self.Type, self.Value)
return ""
class Cities(Base):
__tablename__ = 'cities'
name = sqla.Column(sqla.NVARCHAR(50), primary_key=True)
geometry = sqla.Column(Geometry("POINT"))
class Lake(Base):
__tablename__ = 'lakes'
# id = sqla.Column(sqla.Integer)
name = sqla.Column(sqla.NVARCHAR(50), primary_key=True)
depth = sqla.Column(sqla.Integer, default = 0)
created = sqla.Column(sqla.DateTime, default=datetime.now())
geom = sqla.Column(Geometry("POLYGON"))
def main():
'''
shapefileTest()
---------------
- test to print shapefile content
- divided to two files dbf and shp
- uses dictionaries as resultsets to contain data related to location and the location as polycon
Using datasets geopandas for country and city statistics OR Using the gadm28 dataset
- http://stackoverflow.com/questions/31997859/bulk-insert-a-pandas-dataframe-using-sqlalchemy
crs (coordinate system )
http://stackoverflow.com/questions/3845006/database-of-countries-and-their-cities
'''
naturalEarthToCSV = False
esriShapefileToGeopandas = False
loadShapefileData = False
combineDataForCities = True
if naturalEarthToCSV:
gp_world, gp_cities = generateWorldToDB(loadCSV = True)
print ('Countries: ', gp_world)
print ('Cities: ', gp_cities)
if esriShapefileToGeopandas:
'''
'OBJECTID', 'geometry', 'UID', 'ID_0', 'ISO', 'NAME_0',
'REGION', 'VARREGION', 'Shape_Leng', 'Shape_Area'
'ID_1', 'NAME_1',
'ID_2', 'NAME_2',
'ID_3', 'NAME_3',
'ID_4', 'NAME_4',
'ID_5', 'NAME_5',
'''
shp = gp.GeoDataFrame.from_file('./gadm28/gadm28.shp')
shp_1 = shp[['OBJECTID', 'geometry']]
shp = shp[['OBJECTID', 'UID', 'ID_0', 'ISO', 'NAME_0', 'REGION',
'VARREGION', 'Shape_Leng', 'Shape_Area', 'ID_1', 'NAME_1','ID_2', 'NAME_2',
'ID_3', 'NAME_3', 'ID_4', 'NAME_4', 'ID_5', 'NAME_5']]
#save X,Y into csv file
shp.to_csv("./data/allData.csv",header=True,index=False,sep="\t")
shp_1.to_csv("./data/allData_geom.csv",header=True,index=False,sep="\t")
print (shp)
if loadShapefileData:
shapefileTest(i = 0, i_max = 50)
if combineDataForCities:
'''
cities: Country,City,AccentCity,Region,Population,Latitude,Longitude
- Country, City, Population,Latitude,Longitude - link to add iso3
countrycodes: euname,modified,linked_country,iso3,iso2,grc,isonum,country,imperitive
- country, iso3, iso2
- define datasets
- merge with country
- add geometry
- store to csv
'''
df_cities = pd.read_csv("./data/worldcitiespop.csv", sep = ',', encoding = "ISO-8859-1", header = 0,
names=['Country','City','AccentCity','Region','Population','Latitude','Longitude'])
df_cities = df_cities[['Country','City','Region','Population','Latitude','Longitude']]
df_cities.columns = ['iso2', 'City','Region','Population','Latitude','Longitude']
df_cities['iso2'] = df_cities['iso2'].str.upper()
df_cities = df_cities[df_cities['Population'] > 50000]
df_countryCodes = pd.read_csv("./data/countryISO2, 3.csv", sep = ',', header = 0,
names=['euname','modified','linked_country','iso3','iso2','grc','isonum','country','imperitive'])
df_countryCodes = df_countryCodes[['country', 'iso3', 'iso2']]
df_main = pd.merge(df_cities, df_countryCodes, on='iso2', how='inner')
geometry = [Point(xy) for xy in zip(df_main.Longitude, df_main.Latitude)]
crs = {'init': 'epsg:4326'}
df_geo = gp.GeoDataFrame(df_main, crs=crs, geometry=geometry)
print (df_geo)
df_geo.to_csv("./data/allDataCities.csv",header=True,index=False,sep=",")
def generateWorldToDB(loadCSV = False, getAsPandasDataFrame = False):
'''
- Main test method, contains two cases and main body
- The main issue is with handling geographic data. Since the available python libraries have no support for MSSQL.
Storing the data as csv maybe the best bet.
- With conventional data the transformation works
- The geometry type in SQL is image data with convert methods to coordinates or geometric shapes like polycon
returns: datasets for countries, cities
'''
world = gp.read_file(datasets.get_path('naturalearth_lowres'))
cities = gp.read_file(datasets.get_path('naturalearth_cities'))
if loadCSV:
world.to_csv('./data/countries.csv', sep='\t')
cities.to_csv('./data/cities.csv', sep='\t')
return world, cities
if getAsPandasDataFrame:
df_countries = pd.read_csv('./data/countries.csv',sep='\t',
index_col='iso_a3', names=['iso_a3', 'name','continent', 'gdp_md_est', 'geometry', 'pop_est'])
df_cities = pd.read_csv('./data/cities.csv',
index_col='name', names=['name', 'geometry'])
return df_countries, df_cities
else:
dbData = world.to_dict(orient = 'records')
dbData_1 = cities.to_dict(orient = 'records')
print ("original dataframe - countries: ", world)
print ("original dataframe - cities: ", cities)
tableNameA = 'GeographicNE'
print (GeographicNE.__table__)
# process for SQL
sql = sqla.create_engine(engineString)
conn = sql.connect()
metadata = sqla.schema.MetaData(bind=sql,reflect=True)
table = sqla.Table(tableNameA, metadata, autoload=True)
print (table)
# Open the session
Session= sessionmaker(bind=sql)
session = Session()
try:
conn.execute(table.insert(), dbData)
world.to_sql(tableNameA, sql, if_exists='append')
except:
print ('Exception type:', sys.exc_info()[0])
print ('Exception value:', sys.exc_info()[1])
session.commit()
session.close()
return dbData, dbData_1
def shapefileTest(i = 0, i_max = 50):
'''
Loads gadm28 shapefile, containing geographical
- files: gadm28.shp, gadm28.dbf
- fields: ['OBJECTID', 'UID', 'ID_0', 'ISO', 'NAME_0', 'ID_1', 'NAME_1', 'VARNAME_1',
'NL_NAME_1', 'HASC_1', 'CCN_1', 'CCA_1', 'TYPE_1', 'ENGTYPE_1', 'VALIDFR_1', 'VALIDTO_1',
'REMARKS_1', 'ID_2', 'NAME_2', 'VARNAME_2', 'NL_NAME_2', 'HASC_2', 'CCN_2', 'CCA_2', 'TYPE_2',
'ENGTYPE_2', 'VALIDFR_2', 'VALIDTO_2', 'REMARKS_2', 'ID_3', 'NAME_3', 'VARNAME_3', 'NL_NAME_3',
'HASC_3', 'CCN_3', 'CCA_3', 'TYPE_3', 'ENGTYPE_3', 'VALIDFR_3', 'VALIDTO_3', 'REMARKS_3', 'ID_4',
'NAME_4', 'VARNAME_4', 'CCN_4', 'CCA_4', 'TYPE_4', 'ENGTYPE_4', 'VALIDFR_4', 'VALIDTO_4', 'REMARKS_4',
'ID_5', 'NAME_5', 'CCN_5', 'CCA_5', 'TYPE_5', 'ENGTYPE_5', 'REGION', 'VARREGION', 'Shape_Leng',
'Shape_Area']
location
- geometric
- polygon + coordinates marking
'''
myshp = open("./gadm28/gadm28.shp", "rb")
mydbf = open("./gadm28/gadm28.dbf", "rb")
r = shapefile.Reader(shp=myshp, dbf=mydbf)
fields = [field[0] for field in r.fields[1:]]
print ('fields: ', fields)
for feature in r.shapeRecords():
try:
geom = feature.shape.__geo_interface__
atr = dict(zip(fields, feature.record))
print ("geo_interface: ", geom)
print ('feature record: ', atr)
except:
print ('Exception type:', sys.exc_info()[0])
print ('Exception value:', sys.exc_info()[1])
i = i + 1
if i == 50:
break
return r
def testSQLAlchemyORM():
'''
- Use dumy example, lake class to test commit to database using native geometry type.
- DOES not work with MSSQL, current implementation covers postgreSQL with postGIS
'''
print (Lake.__table__)
# lake = Lake(name='Majeur')
lake = Lake(name='Majeur', geom='POLYGON((0 0,1 0,1 1,0 1,0 0))')
# print (lake.geom)
sql = sqla.create_engine(engineString)
conn = sql.connect()
Session= sessionmaker(bind=sql)
session = Session()
try:
session.add(lake)
session.commit()
except:
print ('Exception type:', sys.exc_info()[0])
print ('Exception value:', sys.exc_info()[1])
session.close()
if __name__ == "__main__":
main() |
995,363 | fc759d6b3017b377b2ef3cb301494e92ea953d66 | def magic_date(c,m,y):
m = m.lower()
if m == 'gennaio':
d = 1
elif m == 'febbraio':
d = 2
elif m == 'marzo':
d = 3
elif m == 'aprile':
d = 4
elif m == 'maggio':
d = 5
elif m == 'giugno':
d = 6
elif m == 'luglio':
d = 7
elif m == 'agosto':
d = 8
elif m == 'settembre':
d = 9
elif m == 'ottobre':
d = 10
elif m == 'novembre':
d = 11
elif m == 'dicembre':
d = 12
y = str(y)
y = y[2:4]
y = int(y)
md = d * c
if y == md:
e = True
else:
e = False
return e
def main():
y = int(input('Inserire un anno:'))
m = str(input('Inserire un mese:'))
d = int(input('Inserire un giorno:'))
magicday = magic_date(d,m,y)
if magicday==True:
print('Cavolo sì, è un appuntamento magico)
else:
print('Nessun uomo, riprova')
main()
|
995,364 | 680087defc309eb72979e9b10b98b6937f021744 | # -------------------------------------------------------- 1 ----------------------------------------------------------
import time
import itertools
class TrafficLight:
__color = [["red", [7, 31]], ["yellow", [2, 33]], ["green", [7, 32]], ["yellow", [2, 33]]]
def running(self):
for light in itertools.cycle(self.__color):
print(f"\r\033[{light[1][1]}m\033[1m{light[0]}\033[0m", end="")
time.sleep(light[1][0])
trafficlight_1 = TrafficLight()
trafficlight_1.running()
# ------------------------------------------- вариант решения ---------------------------------------------------------
from time import sleep
class TrafficLight:
__color = "Черный"
def running(self):
while True:
print("Trafficlight is red now")
sleep(7)
print("Trafficlight is yellow now")
sleep(2)
print("Trafficlight is green now")
sleep(7)
print("Trafficlight is yellow now")
sleep(2)
trafficlight = TrafficLight()
trafficlight.running()
# ------------------------------------------- вариант решения ---------------------------------------------------------
import time
import itertools
class TrafficLight:
__color = [["red", [7, 31]], ["yellow", [2, 33]], ["green", [7, 32]], ["yellow", [2, 33]]]
def __init__(self, light_list):
self.light_list = light_list
def running(self):
if len([i for i in self.light_list if i in ["red", "yellow", "green"]]) >= 3:
for light in itertools.cycle(self.__color):
print(f"\r\033[{light[1][1]}m\033[1m{light[0]}\033[0m", end="")
time.sleep(light[1][0])
else:
print("Your color list is incorrect.")
trafficlight_1 = TrafficLight(["lilac", "green", "lime", "white", "black", "yellow"])
trafficlight_1.running()
# ------------------------------------------- вариант решения ---------------------------------------------------------
from time import sleep
class TrafficLight:
__color = 0
def running(self):
# [красный, жёлтый, зелёный]
lights = [
{
'name': 'красный',
'color': '\x1b[41m',
'delay': 7
},
{
'name': 'жёлтый',
'color': '\x1b[43m',
'delay': 2
},
{
'name': 'зелёный',
'color': '\x1b[42m',
'delay': 5
}
]
print('\nИмитация работы светофора:\n')
while True:
# формируем строку вывода (светофор)
s = ''
for i in range(3):
if i == self.__color:
s += f'({lights[self.__color]["color"]} \x1b[0m)'
else:
s += '( )'
print(f'\r{s}', end='')
# устанавливаем задержку
sleep(lights[self.__color]["delay"])
# меняем цвет
self.__color = (self.__color + 1) % 3
lights = TrafficLight()
lights.running()
# ------------------------------------------- вариант решения ---------------------------------------------------------
import pygame
class TrafficLight:
WHITE = (255, 255, 255)
GRAY = (125, 125, 125)
GREEN = (0, 255, 64)
YELLOW = (225, 225, 0)
RED = (255, 0, 0)
clr = [RED, YELLOW, GREEN, YELLOW]
def __init__(self):
self.act_time = [2, 7, 2, 7]
def switch_on(self):
FPS = 60
WIN_WIDTH = 120
WIN_HEIGHT = 400
pygame.init()
clock = pygame.time.Clock()
sc = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
# радиус и координаты круга
r = WIN_WIDTH // 4
x = WIN_WIDTH // 2
y = WIN_HEIGHT // 2 # выравнивание по центру по вертикали
k = 0
while 1:
sc.fill(self.GRAY)
pygame.draw.circle(sc, self.WHITE, (x, y - 100), r)
pygame.draw.circle(sc, self.WHITE, (x, y), r)
pygame.draw.circle(sc, self.WHITE, (x, y + 100), r)
for i in pygame.event.get():
if i.type == pygame.QUIT: exit()
if k == 0:
pygame.draw.circle(sc, self.clr[k], (x, y - 100), r)
elif k == 1 or k == 3:
pygame.draw.circle(sc, self.clr[k], (x, y), r)
elif k == 2:
pygame.draw.circle(sc, self.clr[k], (x, y + 100), r)
pygame.display.update()
pygame.time.wait(self.act_time[k] * 1000)
if k >= 3:
k = 0
else:
k += 1
clock.tick(FPS)
###########
a = TrafficLight()
a.switch_on()
# -------------------------------------------------------- 2 ----------------------------------------------------------
class Road:
def __init__(self, length, width):
self._length = length
self._width = width
def get_full_profit(self):
return f"{self._length} м * {self._width} м * 25 кг * 5 см = {(self._length * self._width * 25 * 5) / 1000} т"
road_1 = Road(5000, 20)
print(road_1.get_full_profit())
# ------------------------------------------- вариант решения ---------------------------------------------------------
class Road:
def __init__(self, _lenght, _width):
self._lenght = _lenght
self._width = _width
def calc(self):
print(f"Масса асфальта - {self._lenght * self._width * 25 * 5 / 1000} тонн")
def main():
while True:
try:
road_1 = Road(int(input("Enter width of road in m: ")), int(input("Enter lenght of road in m: ")))
road_1.calc()
break
except ValueError:
print("Only integer!")
# -------------------------------------------------------- 3 ----------------------------------------------------------
class Worker:
def __init__(self, name, surname, position, wage, bonus):
self.name = name
self.surname = surname
self.position = position
self._income = {"profit": wage, "bonus": bonus}
class Position(Worker):
def get_full_name(self):
return f"{self.name} {self.surname}"
def get_full_profit(self):
return f"{sum(self._income.values())}"
meneger = Position("Dorian", "Grey", "СEO", 500000, 125000)
print(meneger.get_full_name())
print(meneger.position)
print(meneger.get_full_profit())
# -------------------------------------------------------- 4 ----------------------------------------------------------
class Car:
''' Автомобиль '''
def __init__(self, name, color, speed, is_police=False):
self.name = name
self.color = color
self.speed = speed
self.is_police = is_police
print(f'Новая машина: {self.name} (цвет {self.color}) машина полицейская - {self.is_police}')
def go(self):
print(f'{self.name}: Машина поехала.')
def stop(self):
print(f'{self.name}: Машина остановилась.')
def turn(self, direction):
print(f'{self.name}: Машина повернула {"налево" if direction == 0 else "направо"}.')
def show_speed(self):
return f'{self.name}: Скорость автомобиля: {self.speed}.'
class TownCar(Car):
''' Городской автомобиль '''
def show_speed(self):
return f'{self.name}: Скорость автомобиля: {self.speed}. Превышение скорости!' \
if self.speed > 60 else f"{self.name}: Скорость автомобиля {self.speed}"
class WorkCar(Car):
''' Грузовой автомобиль '''
def show_speed(self):
return f'{self.name}: Скорость автомобиля: {self.speed}. Превышение скорости!' \
if self.speed > 40 else f"{self.name}: Скорость автомобиля {self.speed}"
class SportCar(Car):
''' Спортивный автомобиль '''
class PoliceCar(Car):
''' Полицейский автомобиль '''
def __init__(self, name, color, speed, is_police=True):
super().__init__(name, color, speed, is_police)
police_car = PoliceCar('"Полицайка"', 'белый', 80)
police_car.go()
print(police_car.show_speed())
police_car.turn(0)
police_car.stop()
print()
work_car = WorkCar('"Грузовичок"', 'хаки', 40)
work_car.go()
work_car.turn(1)
print(work_car.show_speed())
work_car.turn(0)
work_car.stop()
print()
sport_car = SportCar('"Спортивка"', 'красный', 120)
sport_car.go()
sport_car.turn(0)
print(sport_car.show_speed())
sport_car.stop()
print()
town_car = TownCar('"Малютка"', 'жёлтый', 50)
town_car.go()
town_car.turn(1)
town_car.turn(0)
print(town_car.show_speed())
town_car.stop()
print(f'\nМашина {town_car.name} (цвет {town_car.color})')
print(f'Машина {police_car.name} (цвет {police_car.color})')
# -------------------------------------------------------- 5 ----------------------------------------------------------
class Stationery:
def __init__(self, title="Something that can draw"):
self.title = title
def draw(self):
print(f"Just start drawing! {self.title}))")
class Pen(Stationery):
def draw(self):
print(f"Start drawing with {self.title} pen!")
class Pencil(Stationery):
def draw(self):
print(f"Start drawing with {self.title} pencil!")
class Marker(Stationery):
def draw(self):
print(f"Start drawing with {self.title} marker!")
stat = Stationery()
stat.draw()
pen = Pen("Parker")
pen.draw()
pencil = Pencil("Faber-Castell")
pencil.draw()
marker = Marker("COPIC")
marker.draw()
|
995,365 | 96a3040b82e7ee0f0ca09b660ffdc18122a88855 | #I have created File:-Raoshanks
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def ex1(request):
s= '''<h2>NavigationBar<br></h2>
<a href=https://www.youtube.com>youtube</a><br>
<a href='https://www.facebook.com'>facebokk</a><br>
<a href="https://www.flipkart.com">flipkart</a><br>
'''
return HttpResponse(s)
def analyze(request):
dtext = request.POST.get('text', 'default')
removepunc = request.POST.get('removepunc', 'off')
fullcaps = request.POST.get('fullcaps', 'off')
newlineremover = request.POST.get('newlineremover', 'off')
extraspaceremover = request.POST.get('extraspaceremover', 'off')
if removepunc == 'on':
analyzed = ""
puncations = '''![]()-{};:'"\,<>'''
for char in dtext:
if char not in puncations:
analyzed = analyzed+char
param ={'purpose':"remove punc", 'analyzed_text':analyzed}
dtext = analyzed
# return render(request, 'analyze.html', param)
# elif (fullcaps == "on"):
if (fullcaps == "on"):
analyzed= ""
for char in dtext:
analyzed= analyzed + char.upper()
param ={'purpose':'fullcapsform', 'analyzed_text':analyzed}
dtext = analyzed
# return render(request, 'analyze.html', param)
# elif (newlineremover == "on"):
if (newlineremover == "on"):
analyzed= ""
for char in dtext:
if char !="\n" and char !="\r":
analyzed= analyzed + char
param ={'purpose':'reomve new line', 'analyzed_text':analyzed}
dtext = analyzed
# return render(request, 'analyze.html', param)
# elif (extraspaceremover == "on"):
if (extraspaceremover == "on"):
analyzed= ""
for index, char in enumerate(dtext):
if not(dtext[index]==" " and dtext[index+1] == " "):
analyzed= analyzed + char
param ={'purpose':'extraspaceromover', 'analyzed_text':analyzed}
if (fullcaps !="on" and fullcaps !='on' and newlineremover !='on' and extraspaceremover !='on'):
return HttpResponse("<h1>Please select operation and try again</h1>")
return render(request, 'analyze.html', param)
|
995,366 | b3a6cf6523c2eb1f7a91a327f69fd7c3f588c2bf | import numpy as np
import cv2
import math
def cropImage(base,filename,filetype):
openfile=base+"/"+filename+"."+filetype
# Load an color image in grayscale
og_image = cv2.imread(openfile,0)
#cv2.imshow('Original sudoku',og_image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
blank_image = np.zeros(shape=og_image.shape, dtype=np.uint8)
#cv2.imshow('Blank Image',blank_image)
#cv2.waitKey(0)
blank_image = cv2.GaussianBlur(og_image, (11,11), 0 )
#cv2.imshow('Gaussian Blur',og_image)
#cv2.waitKey(0)
blank_image = cv2.adaptiveThreshold(blank_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 2)
#cv2.imshow('Adaptive Threshold',blank_image)
#cv2.waitKey(0)
blank_image = cv2.bitwise_not(blank_image)
#cv2.imshow('Inverted Image',blank_image)
#cv2.waitKey(0)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
blank_image = cv2.dilate(blank_image, kernel,iterations=1)
#cv2.imshow('Dilated Image',blank_image)
#cv2.waitKey(0)
count=0
maxarea=-1
for y in range(blank_image.shape[0]):
for x in range(blank_image.shape[1]):
if(blank_image[y,x]>=128):
area = cv2.floodFill(blank_image,None, (x,y), 64)
if(area[0]>maxarea):
maxPt = (x,y)
maxarea = area[0]
cv2.floodFill(blank_image,None, maxPt, 255)
for y in range(blank_image.shape[0]):
for x in range(blank_image.shape[1]):
if(blank_image[y,x]==64 and x!=maxPt[0] and y!=maxPt[1]):
area = cv2.floodFill(blank_image, None,(x,y), 0)
blank_image = cv2.erode(blank_image, kernel,iterations=1)
#cv2.imshow('Lines',blank_image)
#cv2.waitKey(0)
"""
minx=1000
miny=1000
maxx=-1
maxy=-1
for y in range(blank_image.shape[0]):
for x in range(blank_image.shape[1]):
if(blank_image[y,x]==255 and x<minx):
minx = x
if(blank_image[y,x]==255 and y<miny):
miny = y
if(blank_image[y,x]==255 and x>maxx):
maxx = x
if(blank_image[y,x]==255 and y>maxy):
maxy = y
print(minx,miny,maxx,maxy)
for y in range(blank_image.shape[0]):
for x in range(blank_image.shape[1]):
if(blank_image[y,x]==0 and x>minx and x< maxx and y>miny and y<maxy):
print(x,y)
cv2.floodFill(blank_image,None, (x,y), 255)
cv2.imshow('Lines1',blank_image)
cv2.waitKey(0)
cv2.imshow('Lines1',blank_image)
cv2.waitKey(0)
"""
edges = cv2.Canny(blank_image,50,150,apertureSize = 3)
thresh = 101
while(1):
lines = cv2.HoughLines(blank_image,1,np.pi/180,thresh)
if(lines is None):
thresh -= 1
else:
break
for current in lines:
for rho,theta in current:
if(rho==0 and theta==-100):
continue
a = np.cos(theta)
b = np.sin(theta)
if(theta>np.pi*45/180 and theta<np.pi*135/180):
x1=0
y1=rho/b
x2=blank_image.shape[1]
y2=-x2*(a/b)+rho/b
else:
y1=0
x1=rho/a
y2=blank_image.shape[0]
x2=-y2*(b/a)+rho/a
for pos in lines:
if((current==pos).all()):
continue
for rho1,theta1 in pos:
if(rho1==0 and theta1==-100):
continue
if(abs(rho-rho1)<20 and abs(theta-theta1)<np.pi*10/180):
a1 = np.cos(theta1)
b1 = np.sin(theta1)
if(theta1>np.pi*45/180 and theta1<np.pi*135/180):
x11=0
y11=rho1/b1
x21=blank_image.shape[1]
y21=-x21*(a1/b1)+rho1/b1
else:
y11=0
x11=rho1/a1
y21=blank_image.shape[0]
x21=-y21*(b1/a1)+rho1/a1
if(((x11-x1)*(x11-x1)+(y11-y1)*(y11-y1))<64*64 and ((x21-x2)*(x21-x2)+(y21-y2)*(y21-y2))<64*64):
current[0][0] = (current[0][0]+pos[0][0])/2
current[0][1] = (current[0][1]+pos[0][1])/2
pos[0][0]=0
pos[0][1]=-100
for someline in lines:
for rho,theta in someline:
a = np.cos(theta)
b = np.sin(theta)
if(theta!=0):
m = -1*(a/b)
c = rho/b
blank_image=cv2.line(blank_image,(0,int(c)),(blank_image.shape[1],int(m*blank_image.shape[1]+c)),255,1)
else:
blank_image=cv2.line(blank_image,(rho,0),(rho,blank_image.shape[0]),255,1)
#cv2.imshow('Hough Lines',blank_image)
#cv2.waitKey(0)
topEdge = (1000,1000)
topYIntercept=100000
topXIntercept=0
bottomEdge = (-1000,-1000)
bottomYIntercept=0
bottomXIntercept=0
leftEdge = (1000,1000)
leftXIntercept=100000
leftYIntercept=0
rightEdge = (-1000,-1000)
rightXIntercept=0
rightYIntercept=0
for current in lines:
for rho,theta in current:
if(rho==0 and theta==-100):
continue
a = np.cos(theta)
b = np.sin(theta)
xIntercept = rho/a
yIntercept = rho/(a*b)
if(theta>np.pi*80/180 and theta<np.pi*100/180):
if(rho<topEdge[0]):
topEdge=(rho,theta)
if(rho>bottomEdge[0]):
bottomEdge=(rho,theta)
elif(theta<np.pi*10/180 or theta>np.pi*170/180):
if(xIntercept>rightXIntercept):
rightEdge=(rho,theta)
rightXIntercept=xIntercept
if(xIntercept<=leftXIntercept):
leftEdge=(rho,theta)
leftXIntercept=xIntercept
flines=[topEdge,bottomEdge,rightEdge,leftEdge]
for someline in flines:
rho=someline[0]
theta=someline[1]
a = np.cos(theta)
b = np.sin(theta)
if(theta!=0):
m = -1*(a/b)
c = rho/b
og_image=cv2.line(og_image,(0,int(c)),(blank_image.shape[1],int(m*blank_image.shape[1]+c)),0,1)
else:
og_image=cv2.line(og_image,(rho,0),(rho,blank_image.shape[0]),0,1)
#cv2.imshow('Final Lines',og_image)
#cv2.waitKey(0)
left1, left2, right1, right2, bottom1, bottom2, top1, top2=[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0]
height=og_image.shape[0]
width=og_image.shape[1]
leftcos=np.cos(leftEdge[1])
leftsin=np.sin(leftEdge[1])
lefttan=(leftsin/leftcos)
rightcos=np.cos(rightEdge[1])
rightsin=np.sin(rightEdge[1])
righttan=(rightsin/rightcos)
if(leftEdge[1]!=0):
left1[0]=0
left1[1]=leftEdge[0]/leftsin
left2[0]=width
left2[1]=-left2[0]/lefttan + left1[1]
else:
left1[1]=0
left1[0]=leftEdge[0]/leftcos
left2[1]=height
left2[0]=left1[0] - height*lefttan
if(rightEdge[1]!=0):
right1[0]=0
right1[1]=rightEdge[0]/rightsin
right2[0]=width
right2[1]=-right2[0]/righttan + right1[1]
else:
right1[1]=0
right1[0]=rightEdge[0]/rightcos
right2[1]=height
right2[0]=right1[0] - height*righttan
bottomcos=np.cos(bottomEdge[1])
bottomsin=np.sin(bottomEdge[1])
bottomtan=(bottomsin/bottomcos)
topcos=np.cos(topEdge[1])
topsin=np.sin(topEdge[1])
toptan=(topsin/topcos)
bottom1[0]=0
bottom1[1]=bottomEdge[0]/bottomsin
bottom2[0]=width
bottom2[1]=-bottom2[0]/bottomtan + bottom1[1]
top1[0]=0
top1[1]=topEdge[0]/topsin
top2[0]=width
top2[1]=-top2[0]/toptan + top1[1]
#Next, we find the intersection of these four lines
leftA = left2[1]-left1[1]
leftB = left1[0]-left2[0]
leftC = leftA*left1[0] + leftB*left1[1]
rightA = right2[1]-right1[1]
rightB = right1[0]-right2[0]
rightC = rightA*right1[0] + rightB*right1[1]
topA = top2[1]-top1[1]
topB = top1[0]-top2[0]
topC = topA*top1[0] + topB*top1[1]
bottomA = bottom2[1]-bottom1[1]
bottomB = bottom1[0]-bottom2[0]
bottomC = bottomA*bottom1[0] + bottomB*bottom1[1]
#Intersection of left and top
detTopLeft = leftA*topB - leftB*topA
ptTopLeft = ((topB*leftC - leftB*topC)/detTopLeft, (leftA*topC - topA*leftC)/detTopLeft)
#Intersection of top and right
detTopRight = rightA*topB - rightB*topA
ptTopRight = ((topB*rightC-rightB*topC)/detTopRight, (rightA*topC-topA*rightC)/detTopRight)
#Intersection of right and bottom
detBottomRight = rightA*bottomB - rightB*bottomA
ptBottomRight = ((bottomB*rightC-rightB*bottomC)/detBottomRight, (rightA*bottomC-bottomA*rightC)/detBottomRight)
#Intersection of bottom and left
detBottomLeft = leftA*bottomB-leftB*bottomA
ptBottomLeft = ((bottomB*leftC-leftB*bottomC)/detBottomLeft, (leftA*bottomC-bottomA*leftC)/detBottomLeft)
maxLength = (ptBottomLeft[0]-ptBottomRight[0]) * (ptBottomLeft[0]-ptBottomRight[0]) + (ptBottomLeft[1]-ptBottomRight[1]) * (ptBottomLeft[1]-ptBottomRight[1])
temp = (ptTopRight[0]-ptBottomRight[0])*(ptTopRight[0]-ptBottomRight[0]) + (ptTopRight[1]-ptBottomRight[1])*(ptTopRight[1]-ptBottomRight[1])
if(temp>maxLength):
maxLength = temp
temp = (ptTopRight[0]-ptTopLeft[0])*(ptTopRight[0]-ptTopLeft[0]) + (ptTopRight[1]-ptTopLeft[1])*(ptTopRight[1]-ptTopLeft[1])
if(temp>maxLength):
maxLength = temp
temp = (ptBottomLeft[0]-ptTopLeft[0])*(ptBottomLeft[0]-ptTopLeft[0]) + (ptBottomLeft[1]-ptTopLeft[1])*(ptBottomLeft[1]-ptTopLeft[1])
if(temp>maxLength):
maxLength = temp
maxLength = int(math.sqrt(maxLength))
src=(ptTopLeft,ptTopRight,ptBottomRight,ptBottomLeft)
src=np.array(src,np.float32)
dst=((0,0),(maxLength-1,0),(maxLength-1,maxLength-1),(0,maxLength-1))
dst=np.array(dst,np.float32)
#print(src,dst)
undistort = np.zeros(shape=(maxLength,maxLength), dtype=np.uint8)
undistort=cv2.warpPerspective(og_image, cv2.getPerspectiveTransform(src, dst), dsize=(maxLength,maxLength))
#cv2.imshow('Final Image',undistort)
#cv2.waitKey(0)
closefile=base+"/"+filename+"-cropped."+filetype
cv2.imwrite(closefile,undistort)
#cropImage("sudoku-giant","jpeg") |
995,367 | d42c0d5645a4a4b9c0f372cd6248b4fb5ae239c6 | from django.shortcuts import render
from django.http import HttpResponse
import random
import string
from .models import URL
# Create your views here.
def index(request):
return render(request, 'url_shorten/index.html')
def shortened(request):
if request.method == 'POST':
long_url = request.POST['url']
short_url = shortify()
url = URL(url_long=long_url, url_short=short_url)
url.save()
return render(request, 'url_shorten/short.html', {'short_url': short_url, 'long_url': url})
def shortify(size=8):
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
|
995,368 | 21fdb7c843a6580350f8b6443dc33bf48ecef8fd | n, l, u = map(int,input().split())
temp = 0
for i in range(n):
update = int(input())
if update > u:
print("BBTV: Dollar reached {} Oshloobs, A record!".format(update))
u = update
elif temp != 0:
if temp > update:
print("NTV: Dollar dropped by {} Oshloobs".format(temp-update))
else:
if l > update:
print("NTV: Dollar dropped by {} Oshloobs".format(l-update))
temp = update
# https://www.acmicpc.net/problem/6249 |
995,369 | 7c5962497257c801ced74862abc11b7722607e95 | #! /usr/bin/env python
PKG = "eddiebot_node"
import roslib;roslib.load_manifest(PKG)
from dynamic_reconfigure.parameter_generator import *
gen = ParameterGenerator()
gen.add("update_rate", double_t, 0, "Polling rate for the parallax eddie.", 30.0, 60, 30.0 )
drive_mode = gen.enum([ gen.const("twist", str_t, "twist", "Takes a geometry_msgs/Twist message and is navigation compatible."),
gen.const("eddie", str_t, "eddie", "Takes a eddiebot_node/Eddie message and is eddiesim compatible."),
gen.const("drive", str_t, "drive", "Takes a eddiebot_node/Drive message which drives the EddieBot as described in the Parallax Eddie manual.")],"")
gen.add("drive_mode", str_t, 0, "The possible drive modes (twist, eddie, drive).", "twist", edit_method = drive_mode)
gen.add("cmd_vel_timeout", double_t, 0, "How long to wait before timing out on a velocity command..", 0.5, 0.0, 0.5)
gen.add("stop_motors_on_bump", bool_t, 0, "Stops motors when the bumps sensor is hit.", True)
gen.add("has_gyro", bool_t, 0, "Enables or disables the gyro.", True)
gen.add("gyro_scale_correction", double_t, 0, "Scaling factor for correct gyro operation.", 1.35, 0.0, 6.0)
gyro_enum = gen.enum([ gen.const("ADXRS613", double_t, 150.0, "ADXRS613 150deg/s"),
gen.const("ADXRS652", double_t, 250.0, "ADXRS652 250deg/s"),
gen.const("ADXRS642", double_t, 300.0, "ADXRS642 300deg/s") ],
"Gyro Options")
gen.add("gyro_measurement_range", double_t, 0, "Measurement range supported by gyro.", 150.0, 0.0, 300.0, edit_method=gyro_enum)
gen.add("odom_angular_scale_correction", double_t, 0, "A correction applied to the computation of the rotation in the odometry.", 1.0, 0.0, 3.0)
gen.add("odom_linear_scale_correction", double_t, 0, "A correction applied to the computation of the translation in odometry.", 1.0, 0.0, 3.0)
gen.add("min_abs_yaw_vel", double_t, 0, "Minimum angular velocity of the EddieBot.", None, 0.0, 3.0)
gen.add("max_abs_yaw_vel", double_t, 0, "Maximum angular velocity of the EddieBot.", None, 0.0, 3.0)
exit( gen.generate(PKG, "EddieBot", "EddieBot"))
|
995,370 | 3e7e9fa1251df819f6373bd910c67b32884ab3f2 | """ Test Configuration """
import pytest
from disco_dan import settings
def test_configured():
assert True
def test_settings_load():
assert settings
|
995,371 | a6ee56d8b240adc021cfecce4ca7dccf0da6411e | from rest_framework.generics import (
ListAPIView,
RetrieveUpdateDestroyAPIView,
CreateAPIView,
DestroyAPIView,
RetrieveAPIView,
)
from rest_framework.filters import (
SearchFilter,
OrderingFilter,
)
from rest_framework.permissions import (
AllowAny,
IsAdminUser,
IsAuthenticated,
IsAuthenticatedOrReadOnly,
)
from django_filters.rest_framework import DjangoFilterBackend
from django.contrib.contenttypes.models import ContentType
from posts.api.permissions import IsOwner
from .serializers import CommentListSerializers, CommentDetailSerializers, create_comment_serializer
from comments.models import Comments
from posts.models import Post
class CommentListAPIView(ListAPIView):
queryset = Comments.objects.all()
serializer_class = CommentListSerializers
filter_backends = [SearchFilter, DjangoFilterBackend, OrderingFilter]
ordering_fields = ['user__username', 'content',
'timestamp', 'content_object__title']
filterset_fields = ['user__username', 'content']
search_fields = ['user__username', 'content']
class CommentDetailAPIView(RetrieveUpdateDestroyAPIView):
"""detail az Model e Comment besoorate API"""
queryset = Comments.objects.all()
serializer_class = CommentDetailSerializers
permission_classes = [IsAuthenticatedOrReadOnly, IsOwner]
class CommentCreateAPIView(CreateAPIView):
queryset = Comments.objects.all()
# serializer_class = CommentListSerializers
permission_classes = [IsAuthenticated]
# def perform_create(self, serializer):
# """User e har posti ke create mishe request.user"""
# serializer.save(user=self.request.user)
def get_serializer_class(self):
# post = Post.objects.filter(published=True)
model_type = self.request.GET.get("type")
object_id = self.request.GET.get("id")
# content = self.request.POST["content"]
parent_id = self.request.GET.get("parent_id", None)
# model_type = model_type.model_class()
# if model_type == 'post':
# post = Post.objects.get(id=object_id)
# model = ContentType.objects.get_for_model(post.__class__)
return create_comment_serializer(instance=model_type, object_id=object_id, parent_id=parent_id, user=self.request.user)
|
995,372 | 567b3ddda979a80e460084a115f90784f4b0d6ec | import os
class Initialize:
def __init__(self):
pass
@staticmethod
def get_version_number():
return "0.1.0"
@staticmethod
def create_dirs():
if not os.path.isdir("data"):
os.mkdir("data")
if not os.path.isdir("data/left"):
os.mkdir("data/left")
if not os.path.isdir("data/keys"):
os.mkdir("data/keys")
if not os.path.isdir("data/lists"):
os.mkdir("data/lists")
|
995,373 | 26a6bacd78313d1dfb45206c3ce4db7bcbeeb2c9 | # Generated by Django 3.0.8 on 2020-07-25 21:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grade_predictions', '0008_auto_20200725_2135'),
]
operations = [
migrations.AddField(
model_name='grade',
name='status',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='grade',
name='status_text',
field=models.CharField(default='', max_length=50),
),
]
|
995,374 | 77569d035cbcf12c47183172b007c8fd25764d5b | import math
import numpy as np
#from icecream import ic
class BrapiVariants():
def __init__(self, gd, request):
self.gd = gd
self.request = request
self.status_messages = []
self.data_matrices = []
self._parse_request(request)
self._count_variants()
self._setup_pagination()
self._add_data()
self.status_messages.append({
'message': 'Request accepted, response successful',
'messageType': 'INFO'
})
def _parse_request(self, request):
self.has_variant_db_id = False
input = {}
variant_db_id = request.args.get('variantDbId', default = '', type = str)
if variant_db_id:
try:
variant_db_id_splitted = variant_db_id.split(':')
input['chrom'] = variant_db_id_splitted[0]
input['pos'] = int(variant_db_id_splitted[1])
self.has_variant_db_id = True
except:
self.status_messages.append({
'message': 'variantDbId seems to be malformatted. It should have the format `chromosome:position`. Example: `1:56242`',
'messageType': 'ERROR'
})
input['page'] = request.args.get('page', default = 0, type = int)
input['pageSize'] = request.args.get('pageSize', default = 1000, type = int)
self.input = input
def _count_variants(self):
self.slice_variant_calls = False
if self.has_variant_db_id:
self.count_variants = 1
else:
self.count_variants = self.gd.count_variants
def _setup_pagination(self):
total_pages = math.ceil(self.count_variants / self.input['pageSize'])
if self.input['page'] >= total_pages:
self.input['page'] = total_pages - 1
self.status_messages.append({
'message': 'Given parameter `page` was bigger than corresponding `totalPages` would allow. `currentPage` was set to the biggest possible value of '+str(self.input['page']),
'messageType': 'WARNING'
})
self.pagination = {
'currentPage': self.input['page'],
'pageSize': self.input['pageSize'],
'totalCount': self.count_variants,
'totalPages': total_pages
}
def _add_data(self):
reference_bases = None
alternate_bases = None
if self.has_variant_db_id:
coord, lookup_type_start = self.gd.get_posidx_by_genome_coordinate(self.input['chrom'], self.input['pos'])
reference_bases = self.gd.callset['variants/REF'].get_basic_selection(coord)
reference_bases = [reference_bases]
alternate_bases = self.gd.callset['variants/ALT'].get_basic_selection(coord).tolist()
chrom = self.gd.callset['variants/CHROM'].get_basic_selection(coord).tolist()
pos = self.gd.callset['variants/POS'].get_basic_selection(coord).tolist()
else:
coord_start = self.input['page'] * self.input['pageSize']
coord_end = coord_start + self.input['pageSize']
reference_bases = self.gd.callset['variants/REF'].get_basic_selection(slice(coord_start, coord_end)).tolist()
alternate_bases = self.gd.callset['variants/ALT'].get_basic_selection(slice(coord_start, coord_end)).tolist()
chrom = self.gd.callset['variants/CHROM'].get_basic_selection(slice(coord_start, coord_end)).tolist()
pos = self.gd.callset['variants/POS'].get_basic_selection(slice(coord_start, coord_end)).tolist()
data = []
for i, ref_base in enumerate(reference_bases):
data.append({
'additionalInfo': {},
'referenceBases': ref_base,
'alternateBases': [x for x in alternate_bases[i] if x != ''],
'ciend': [],
'cipos': [],
'created': None,
'updated': None,
'start': pos[i],
'end': pos[i],
'svlen': None,
'externalReferences': [{}],
'filtersApplied': False,
'filtersFailed': [],
'filtersPassed': False,
'referenceDbId': None,
'referenceName': '',
'referenceSetDbId': None,
'referenceSetName': '',
'variantDbId': str(chrom[i])+':'+str(pos[i]),
'variantNames': [],
'variantSetDbId': [],
'variantType': 'SNV'
})
self.data = data
def get_response_object(self):
brapi_response = {
"@context": [
"https://brapi.org/jsonld/context/metadata.jsonld"
],
"metadata": {
"datafiles": [],
"pagination": self.pagination,
"status": self.status_messages
},
"result": {
"data": self.data
}
}
return brapi_response |
995,375 | 7e688e11e2d2fafe566a18a2e05a38700795c867 | import flask
from flask import request
import requests
from flask_cors import CORS, cross_origin
app = flask.Flask(__name__)
cors=CORS(app)
app.config["CORS_HEADERS"] = 'Content-Type'
@app.route('/', methods=['GET'])
@cross_origin()
def home():
storeType = brand = pkgtype = ""
try:
storeType = request.args.get('storeType', default='', type=str)
except:
pass
try:
brand = request.args.get('brand', default='', type=str)
except:
pass
try:
pkgtype = request.args.get('pkgtype', default='', type=str)
except:
pass
postData = {'lat': request.args.get('lat'), 'long': request.args.get('long'), 'zip': request.args.get('zip'), 'custID': request.args.get('custID'), 'miles': request.args.get('miles'), 'storeType': storeType, 'brand': brand, 'pkgtype': pkgtype}
url = 'https://finder.vtinfo.com/finder/web/v2/iframe/search'
res = requests.post(url, data=postData, headers={'referer': 'https://finder.vtinfo.com/finder/web/v2/iframe?custID=HOF&theme=bs-journal'})
if res.text.find("You are over your daily allowed usage limit") > -1 :
postData = {'lat': request.args.get('lat'), 'long': request.args.get('long'), 'zip': request.args.get('zip'), 'custID': request.args.get('custID'), 'category1':'Brandfinder', 'miles': request.args.get('miles'), 'storeType': storeType, 'brand': brand, 'pkgtype': pkgtype}
res = requests.post(url, data=postData, headers={'referer': 'https://finder.vtinfo.com/finder/web/v2/iframe?custID=HOF&category1=Brandfinder'})
return res.text
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000) |
995,376 | b08621d2e36481132319ef456013a4c6cdf798f6 | def searchFile(Path, a) :
with open(Path,'r') as file :
for line in file :
if a in line :
print(line)
break
else :
print("Searching..")
print("<Name Not Found>")
def splitNames(Path) :
first = []
last = []
c = 1
temp = ''
with open(Path, 'r') as file :
for line in file :
for l in line :
l = l.replace('\n', ' ')
if(l!=' ') and (c == 1):
temp += l
elif(l != ' ' ) and ( c == 2) :
temp += l
elif( l == ' ') and (c == 1) :
first.append(temp)
c = 2
temp = None
temp = ''
elif(l == ' ') and (c == 2):
last.append(temp)
c = 1
temp = None
temp = ''
last.append(temp)
print(first)
print(last)
a = r"C:\Users\quinton.baudoin\Desktop\classNames.txt"
splitNames(a)
|
995,377 | ac38483240b952cae6baf075381e29c7aa11d464 | from tkinter import *
import tkinter as tk
import platform
from config import *
from OSUtilities import isFile
class CourseChoice:
def __init__(self):
self.root = tk.Tk()
self.root.geometry("800x600")
self.canvas = tk.Canvas(self.root)
self.configureCanvasToBeScrollable()
self.frame = tk.Frame(self.canvas)
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas.create_window((4, 4), window=self.frame, anchor="nw")
self.counter = 0
self.toReturn = None
if (isFile("data/courseslist.txt")):
with open("data/courseslist.txt", "r") as f:
while True:
line = f.readline()
if not line:
break
if (line[len(line)-1] == '\n'):
line = line[:-1]
line = line.split(" ")
title = ""
for i in range(len(line)-1):
title += line[i]
link = line[len(line)-1]
self.addElement(title, link)
else:
print("ERROR: couldn't find the data/courseslist.txt file!")
self.updateGraphics()
def callback(self, link):
self.toReturn = link
self.root.destroy()
def addElement(self, title, link):
var = IntVar()
fg = "black"
if "chimica" in link:
fg = "darkorange"
elif "dii" in link:
fg = "brown"
elif "dei" in link:
fg = "blue"
elif "math" in link:
fg = "red"
elem = Checkbutton(self.frame, text = title, font = "bold",\
variable = var, onvalue=1, offvalue=0, height=2, \
activebackground = "#FFFFFF", padx = 300, fg = fg,\
compound = "left", command=lambda:self.callback(link))
self.renderWidget(elem)
def bindLinuxMouseScroll(self, elem):
elem.bind("<Button-4>", lambda event : self.canvas.yview('scroll', -1, 'units'))
elem.bind("<Button-5>", lambda event : self.canvas.yview('scroll', 1, 'units'))
def bindWindowsMouseScroll(self, elem):
elem.bind_all("<MouseWheel>", on_mousewheel)
def bindMouse(self, elem):
if (platform.system()=="Linux"):
self.bindLinuxMouseScroll(self.canvas)
elif platform.system()=="Windows":
self.bindWindowsMouseScroll(self.canvas)
def configureCanvasToBeScrollable(self):
self.bindMouse(self.canvas)
vsb = tk.Scrollbar(self.root, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=vsb.set)
vsb.pack(side="right", fill="y")
def onFrameConfigure(self, canvas):
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def renderWidget(self, elem):
elem.grid(row=self.counter, sticky=W)
self.bindMouse(elem)
self.counter += 1
def updateGraphics(self):
self.counter = 0
self.frame.bind("<Configure>", lambda event, canvas=self.canvas: self.onFrameConfigure(self.canvas))
self.bindMouse(self.frame)
def getCourseURL(self):
self.root.mainloop()
return self.toReturn |
995,378 | b6ad123b6a6e30050d60f93f20edfeb1887dff20 | import getopt
import requests
import random
import sys
import time
def _buildName():
fnames = [
'Abraham',
'Andrew',
'Barack',
'Benjamin',
'Calvin',
'Chester',
'Dwight',
'Franklin',
'George',
'Gerald',
'Grover',
'Harry',
'Herbert',
'James',
'John',
'Lyndon',
'Martin',
'Millard',
'Richard',
'Ronald',
'Rutherford',
'Theodore',
'Thomas',
'Ulysses',
'Warren',
'William',
'Woodrow',
'Zachary',
'Ben',
'Chanice',
'Apu',
'Diego',
'Saka',
'Sasha',
'Steven',
'Thomas',
'Freddy'
]
lnames = [
'Adams',
'Arthur',
'Buchanan',
'Bush',
'Carter',
'Cleveland',
'Clinton',
'Coolidge',
'Eisenhower',
'Fillmore',
'Ford',
'Garfield',
'Grant',
'Harding',
'Harrison',
'Hayes',
'Hoover',
'Jackson',
'Jefferson',
'Johnson',
'Kennedy',
'Knox',
'Lincoln',
'Madison',
'McKinley',
'Monroe',
'Nixon',
'Obama',
'Pierce',
'Reagan',
'Roosevelt',
'Taft',
'Taylor',
'Truman',
'Tyler',
'VanBuren',
'Washington',
'Wilson',
'Mercury',
'Jones',
'Doofenschmirtz',
'Jabutie',
'Dobbs',
'Villalobos',
'Nguyen'
]
return (random.choice(fnames), random.choice(lnames))
def _buildZipCode():
zipDigits = []
for i in range(0, 5):
zipDigits.append(str(random.randint(0, 9)))
return ''.join(zipDigits)
def _buildFakeEmailAddress(fname=None, lname=None):
domains = [
'gmail.com',
'yahoo.com',
'hotmail.com',
'trump.com',
'trumpinternaltionalrealty.com',
'donaldjtrump.com',
'trumporg.com',
'trumpuniversity.com',
'tmgmt.com',
'juno.com',
'10minutemail.com',
'eelmail.com',
'einrot.com',
'fleckens.hu',
'getairmail.com',
'grr.la',
'guerrillamail.biz',
'gustr.com',
'harakirimail.com',
'hulapla.de',
'hushmail.com',
'imgof.com',
'imgv.de',
'mailinator.com',
'reconmail.com',
'rhyta.com',
's0ny.net',
'sharklasers.com',
'sogetthis.com',
'soodonims.com',
'stonerfans.com',
'streetwisemail.com',
'superrito.com',
'suremail.info',
'tafmail.com',
'teewars.org',
'teleworm.us',
'thehighlands.co.uk',
'tradermail.info',
'trbvm.com',
'value-mycar.co.uk',
'yopmail.com',
'zippymail.info',
'zxcvbnm.co.uk',
'whitehouse.gov',
'state.gov',
'fcc.gov',
'dot.gov',
'irs.gov',
'epa.gov',
'gop.com',
'army.mil',
'navy.mil',
'af.mil',
'congress.gov',
'senate.gov',
'outlook.com',
'kgb.net',
'kkk.org',
'aol.com',
'live.com',
'verizon.com',
'earthlink.net',
'comcast.net',
'infowars.com',
'naturalnews.com',
'mindspring.com',
'russianhookers.net',
'bosley.com',
'hairclub.com',
'sweet-escort.ru',
'exxon.com',
'ethics.house.gov',
'ethics.senate.gov',
'brietbart.com',
'foxnews.com'
]
localParts = [
'dumptrump',
'banbannon',
'h8fascism',
'freemelania',
'fred.douglass',
'greenbowling',
'kellyannesclearheels',
'nicer4spicer',
'mexicalijoe',
'mu.slim',
'giantmeteor2016',
'freescience',
'putinitinyou',
'ivankadanke',
'SN.atch.grabber',
'drumpfhouse',
're.fugee',
'australasia',
'youcrane',
'gldnshwrs',
'factsschmacts',
'smoochinmnuchin',
'mike.penceive',
'gorsucks',
'de.voss',
'donaldtrumpmakesmewannasmokecrack',
'formsarenotpetitions',
'refuse',
'resist',
'nonserviam',
'vlad',
'bvdobbs',
'usck',
'lies',
'damnlies',
'whathappenedtodrainingtheswamp',
'yourwallisstupidandsoareyou'
]
if fname and lname:
# this basically creates a random boolean
# by increasing or decreasing the range we can change
# the percentage that use a realistic local part vs
# a clearly spammy local part
if random.randint(0,3):
local = _generateEmailLocalFromName(fname, lname)
else:
local = random.choice(localParts)
else:
local = random.choice(localParts)
address = str.join('', [local, '@', random.choice(domains)]).lower()
print("Generated " + address)
return address
def _generateEmailLocalFromName(fname, lname):
r = random.randint(1, 5)
if r == 1:
local = str.join('', [fname[0], lname])
elif r == 2:
local = str.join('_', [fname, lname])
elif r == 3:
local = str.join('', [lname, fname[0], str(random.randint(1,100))])
elif r == 4:
local = str.join('', [fname[0], lname[0], str(random.randint(1,100))])
else:
local = str.join('.', [fname, lname])
return local
def frontPageForm():
targetURL = 'https://forms.whitehouse.gov/webform/email-signup?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov'
dataPayload = {
"submitted[email_address]": _buildFakeEmailAddress(),
"submitted[zip_code]": _buildZipCode(),
"form_id": "webform_client_form_111",
"form_build_id": "form-43X7sWhYGJ1EdVKeroNYk0M2Wnv7I-Bp4qrOtulPg6A"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
def gorsuchForm():
targetURL = 'https://forms.whitehouse.gov/webform/scotus-form?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov%2Fsupport-nominee-gorsuch'
fname, lname = _buildName()
dataPayload = {
"submitted[first_name]": fname,
"submitted[last_name]": lname,
"submitted[e_mail_address]": _buildFakeEmailAddress(fname, lname),
"submitted[zip_code]": _buildZipCode(),
"form_id": "webform_client_form_106",
"form_build_id": "form-sZV-iGQZ-ZjG8D9H_5SGIZfBSBEsGfiLx-mjVrXt20E"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
def israelForm():
targetURL = 'https://forms.whitehouse.gov/webform/trump-stands-with-israel?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov%2Ftrump-stands-with-israel'
fname, lname = _buildName()
dataPayload = {
"submitted[first_name]": fname,
"submitted[last_name]": lname,
"submitted[email]": _buildFakeEmailAddress(fname, lname),
"submitted[zip_code]": _buildZipCode(),
"form_id": "webform_client_form_176",
"form_build_id": "form-zsH6jltzBOSHwMFO5DyO0Ki9DSVWIjxVSxLydxlsSd0"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
def womenForm():
targetURL = 'https://forms.whitehouse.gov/webform/empowering-female-leaders?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov%2Fsupport-empowering-female-leaders'
fname, lname = _buildName()
dataPayload = {
"submitted[first_name]": fname,
"submitted[last_name]": lname,
"submitted[email]": _buildFakeEmailAddress(fname, lname),
"submitted[zip_code]": _buildZipCode(),
"form_id": "webform_client_form_166",
"form_build_id": "form-NqsMkzEZaaDlyQrTXOKzDh1K-R60re0e1pglFMgWIR4"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
def workForm():
targetURL = 'https://forms.whitehouse.gov/webform/get-involved?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov%2Fsupport-american-back-to-work'
fname, lname = _buildName()
dataPayload = {
"submitted[first_name]": fname,
"submitted[last_name]": lname,
"submitted[email_address]": _buildFakeEmailAddress(fname, lname),
"submitted[zip_code]": _buildZipCode(),
"form_id": "webform_client_form_141",
"form_build_id": "form-6kxJzAO-R2p9ejec8AywNsveIW9AnRlHbM1v19Gp2Ug"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
def jointAddressForm():
targetURL = 'https://forms.whitehouse.gov/webform/joint-address-congress-2017-signup?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov%2Fjoint-address'
fname, lname = _buildName()
dataPayload = {
"submitted[first_name]": fname + ' ' + lname,
"submitted[email_address]": _buildFakeEmailAddress(fname, lname),
"submitted[zip_code]": _buildZipCode(),
"form_id": "webform_client_form_196",
"form_build_id": "form-6kxJzAO-R2p9ejec8AywNsveIW9AnRlHbM1v19Gp2Ug"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
def _buildMAGIdea():
return "Remove Donald Drumpf from office."
def _buildState():
return "District of Columbia"
def _buildCountry():
return "United States"
def _buildComment():
return "No matter how much money or power you have, Donald, she still won't love you."
def issueSurveyForm():
targetURL = 'https://forms.whitehouse.gov/webform/joint-address-issues-survey?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov%2Fjoint-address-issues-survey'
fname, lname = _buildName()
dataPayload = {
"submitted[3_what_are_your_ideas_to_mae_america_great_again]": _buildMAGIdea(),
"submitted[first_name]": fname,
"submitted[last_name]": lname,
"submitted[email_address]": _buildFakeEmailAddress(fname, lname),
"submitted[zip_code]": _buildZipCode(),
"submitted[country]": _buildCountry(),
"submitted[state]": _buildState(),
"submitted[4_additional_comments]": _buildComment(),
"form_id": "webform_client_form_206",
"form_build_id": "form-qzHYQwgCHonDYAAgpQMnllOIuqyMGDPKddwUtJjp4LI"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
def jsocExitForm():
targetURL = 'https://forms.whitehouse.gov/webform/2017-joint-address-exit-survey?initialWidth=544&childId=forall-iframe-embed-1&parentUrl=https%3A%2F%2Fwww.whitehouse.gov%2Fjoint-address-exit-survey'
fname, lname = _buildName()
dataPayload = {
"submitted[4_what_new_policies_would_you_like_to_see_put_in_place]": "I'd like to see you impeached and maybe deported.",
"submitted[5_additional_comments]": "You are just terrible; the absolute worst.",
"submitted[first_name]": fname,
"submitted[last_name]": lname,
"submitted[email_address]": _buildFakeEmailAddress(fname, lname),
"submitted[zip_code]": _buildZipCode(),
"submitted[country]": _buildCountry(),
"submitted[state]": _buildState(),
"form_id": "webform_client_form_236",
"form_build_id": "form-lrFKhiQ7Knpl-zz3iCUdj5dZqBb6FMpxiU2i98yzy6c"
}
return {'targetURL': targetURL, 'dataPayload': dataPayload}
def _buildUserAgent():
agents = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"
]
return random.choice(agents)
def sendProtestSubmission(form):
data = form()
try:
r = requests.post(data['targetURL'], headers={'User-agent': _buildUserAgent()}, data=data['dataPayload'])
except Exception as err:
print("Woops. Something went sideways. " + err)
return r.status_code
def fireTehLazers(iterations=5):
for i in range(0, iterations):
forms = [
frontPageForm,
gorsuchForm,
workForm,
israelForm,
womenForm,
issueSurveyForm,
jointAddressForm,
jsocExitForm
]
form = random.choice(forms)
result = sendProtestSubmission(form)
sleepTime = random.randint(1, 420)
print(str(i + 1) + " of " + str(iterations) + ": " + form.__name__ + ", result code: " + str(
result) + ". Next post in " + str(sleepTime) + " seconds.")
time.sleep(sleepTime)
def main(args):
try:
opts, args = getopt.getopt(args, "hi:", ["iterations=", "help"])
except getopt.GetoptError:
print('whSignupFormProtestSubmitter.py -i <number_of_submissions>')
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--iterations") and isinstance(int(arg), int):
iterations = int(arg)
fireTehLazers(iterations)
else:
print('whSignupFormProtestSubmitter.py -i <number_of_submissions>')
sys.exit(2)
if __name__ == '__main__':
main(sys.argv[1:])
|
995,379 | 6c73e15a32cb431d04290f703ae854fe27550a5e | from pymongo import MongoClient
import feedparser
import string
import html # Convert HTML accents to text
import unidecode # Remove accents
import re
client = MongoClient('localhost', 27017)
db = client.aula
news = db.news
# 1. Parseamos o RSS diante da URL do RSS do EM
em_rss = feedparser.parse('http://www.em.com.br/rss/noticia/gerais/rss.xml')
# 2. Vamos iterar por todos os registros e extrair o sumário de todas as notícias. (list comprehesion)
summary_list = [news.summary for news in em_rss.entries]
# 3. Verificamos o número de sumários extraídos
print('Número de sumários extraídos: {total}'.format(total=len(summary_list)))
# 4. Vamos iterar por cada registro, remover as pontuações e inserir no banco.
for description in summary_list:
cleaned_description = html.unescape(description) # Remove HTML characters
cleaned_description = unidecode.unidecode(cleaned_description).translate(string.punctuation) # Remove accents
cleaned_description = re.sub(r"[^a-zA-Z]+", " ", cleaned_description) # Remove SPECIAL Characters
news.insert_one({'description': cleaned_description})
# Checamos se foram inseridos os registros no banco
print(news.count())
print(news.find_one())
######## PYTHONIC WAY
words = []
# Pegamos todos os registros
description_list = news.find()
# Quebramos todas as frases em palavras.
for word in description_list:
words.extend(word["description"].split())
print(words)
print('Número de ocorrências: {num}'.format(num=len(words)))
######## MONGO WAY
from bson.code import Code
map = Code("""
function () {
this.description.trim().split(/\s+/).forEach((z) => { emit(z, 1) });
};
""")
reduce = Code("""
function(key, values) {
return Array.sum(values)
}
""")
result = news.map_reduce(map, reduce, "wordcount")
###### EXIBICAO DOS RESULTADOS
# Python Way
word_freq = {}
for word in words:
# Se a palavra não estiver no dicionário de frequencia, então adicione ela e a frequência.
if not word in word_freq.keys():
word_freq.update({word: words.count(word)})
print(word_freq)
# MongoDB Way
for words in db.wordcount.find():
print(words) |
995,380 | 9df3a1dfeb231820e1d228e27eff0950a87e48ee | import csv
list = ["james"]
big_list = list * 100
print(big_list)
# court_list = ["Barack James Obama", "Roger Ramjet"]
# search_terms = "Barack"
# matches = []
# for name_string in court_list:
# for word in search_terms.split(" "):
# if word not in name_string:
# break
# else:
# matches.append(name_string) # triggers when the for loop doesn't break
# print(matches)
#very close to being the thing! only downside is that a spelling error in fore or surname invalidates the search for that name.
#i'm sure that is easily fixable
#or just make sure no spelling errors in input - could be a good options to keep output list small
# import tkinter
# from tkinter.constants import *
# tk = tkinter.Tk()
# frame = tkinter.Frame(tk, relief=RIDGE, borderwidth=2)
# frame.pack(fill=BOTH,expand=1)
# label = tkinter.Label(frame, text="Hello, World")
# label.pack(fill=X, expand=1)
# button = tkinter.Button(frame,text="Exit",command=tk.destroy)
# button.pack(side=BOTTOM)
# tk.mainloop()
# from tkinter.filedialog import askopenfilename
# filename = askopenfilename()
# print(filename)
# # client_list = []
# # with open("./CourtList.csv",'rt') as f:
# # data = csv.reader(f)
# # for row in data:
# # client_list.append(row)
# # print(client_list)
#
# new_file = open("./Clients.csv", "w", newline='')
# writer = csv.writer(new_file)
# writer.writerow(["James"])
# writer.writerow(["Adam"])
# writer.writerow(["frank"]) |
995,381 | 145453b84bb197ba6f41dc740f5875387ad79550 | def reverse(arr):
print("Input Array : {} ".format(arr))
ptr1 = 0
ptr2 = len(arr) - 1
while ptr1 < ptr2:
swap(arr, ptr1, ptr2)
ptr1 += 1
ptr2 -= 1
def swap(arr, ptr1, ptr2):
temp = arr[ptr1]
arr[ptr1] = arr[ptr2]
arr[ptr2] = temp
arr = [1, 2, 3, 4, 5, 6]
reverse(arr)
print("Output Array : {} ".format(arr))
|
995,382 | c4d763f55e9833c52ea09ac0238398780f08edc1 | from django import template
from django import forms
from django.http import HttpResponseRedirect
import datetime
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from pirate_core import HttpRedirectException, namespace_get, FormMixin
from pirate_social.models import Subscription
from pirate_signals.models import aso_rep_event, notification_send
from customtags.decorators import block_decorator
register = template.Library()
block = block_decorator(register)
get_namespace = namespace_get('pp_subscription')
@block
def pp_get_subscribees_for_user(context, nodelist, *args, **kwargs):
context.push()
namespace = get_namespace(context)
user = kwargs.pop('user', None)
start = kwargs.get('start', None)
end = kwargs.get('end', None)
if start is None and end is None:
start = 0
end = 8
else:
try:
start = int(start)
end = int(end)
except:
raise ValueError('start and end values must be ints')
if user is None:
raise ValueError("pp_subscription_form tag requires that a User object be passed "
"to it assigned to the 'user=' argument")
subs = Subscription.objects.all()
subs = subs.filter(subscriber=user)
count = subs.count()
namespace['subscribees'] = subs[start:end]
namespace['count'] = count
output = nodelist.render(context)
context.pop()
return output
@block
def pp_get_subscribers_for_user(context, nodelist, *args, **kwargs):
context.push()
namespace = get_namespace(context)
user = kwargs.pop('user', None)
start = kwargs.get('start', None)
end = kwargs.get('end', None)
if start is None and end is None:
start = 0
end = 8
else:
try:
start = int(start)
end = int(end)
except:
raise ValueError('start and end values must be ints')
if user is None:
raise ValueError("pp_subscription_form tag requires that a User object be passed "
"to it assigned to the 'user=' argument")
subs = Subscription.objects.all()
subs = subs.filter(subscribee=user)
count = subs.count()
namespace['subscribers'] = subs[start:end]
namespace['count'] = count
output = nodelist.render(context)
context.pop()
return output
@block
def pp_subscriber_count(context, nodelist, *args, **kwargs):
context.push()
namespace = get_namespace(context)
user = kwargs.pop('user', None)
if user is None:
raise ValueError("pp_subscription_form tag requires that a User object be passed "
"to it assigned to the 'user=' argument")
subs = Subscription.objects.all()
subs = subs.filter(subscribee=user)
count = subs.count()
namespace['count'] = count
output = nodelist.render(context)
context.pop()
return output
@block
def pp_subscribee_count(context, nodelist, *args, **kwargs):
context.push()
namespace = get_namespace(context)
user = kwargs.pop('user', None)
if user is None:
raise ValueError("pp_subscription_form tag requires that a User object be passed "
"to it assigned to the 'user=' argument")
subs = Subscription.objects.all()
subs = subs.filter(subscriber=user)
count = subs.count()
namespace['count'] = count
output = nodelist.render(context)
context.pop()
return output
@block
def pp_has_subscription(context, nodelist, *args, **kwargs):
context.push()
namespace = get_namespace(context)
# this tag only works if a valid pair is assigned to the 'object=' argument
POST = kwargs.get('POST', None)
subscriber = kwargs.pop('subscriber', None)
subscribee = kwargs.pop('subscribee', None)
if subscriber is None:
raise ValueError("pp_subscription_form tag requires that a object be passed "
"to it assigned to the 'subscriber=' argument")
if subscribee is None:
raise ValueError("pp_subscription_form tag requires that a object be passed "
"to it assigned to the 'subscribee=' argument")
try:
Subscription.objects.get(subscriber=subscriber, subscribee=subscribee)
namespace['has_subscription'] = True
except:
namespace['has_subscription'] = False
output = nodelist.render(context)
context.pop()
return output
@block
def pp_end_subscription_form(context, nodelist, *args, **kwargs):
context.push()
namespace = get_namespace(context)
# this tag only works if a valid pair is assigned to the 'object=' argument
POST = kwargs.get('POST', None)
subscriber = kwargs.pop('subscriber', None)
subscribee = kwargs.pop('subscribee', None)
if subscriber is None:
raise ValueError("pp_subscription_form tag requires that a object be passed "
"to it assigned to the 'subscriber=' argument")
if subscribee is None:
raise ValueError("pp_subscription_form tag requires that a object be passed "
"to it assigned to the 'subscribee=' argument")
if POST and POST.get("form_id") == "pp_subscription_form":
form = SubscriptionForm(POST)
if form.is_valid():
sub = Subscription.objects.get(subscriber=subscriber,subscribee=subscribee)
sub.delete()
c_type = ContentType.objects.get_for_model(subscribee)
raise HttpRedirectException(HttpResponseRedirect("/user_profile.html?_t=" + str(c_type.pk) + "&_o=" + str(subscribee.pk)))
else: form = SubscriptionForm()
if subscriber != subscribee: namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
@block
def pp_subscription_form(context, nodelist, *args, **kwargs):
context.push()
namespace = get_namespace(context)
# this tag only works if a valid pair is assigned to the 'object=' argument
POST = kwargs.get('POST', None)
subscriber = kwargs.pop('subscriber', None)
subscribee = kwargs.pop('subscribee', None)
if subscriber is None:
raise ValueError("pp_subscription_form tag requires that a object be passed "
"to it assigned to the 'subscriber=' argument")
if subscribee is None:
raise ValueError("pp_subscription_form tag requires that a object be passed "
"to it assigned to the 'subscribee=' argument")
if POST and POST.get("form_id") == "pp_subscription_form":
form = SubscriptionForm(POST)
if form.is_valid():
sub = Subscription(subscriber=subscriber,subscribee=subscribee,created_dt=datetime.datetime.now())
sub.save()
c_type = ContentType.objects.get_for_model(subscribee)
raise HttpRedirectException(HttpResponseRedirect("/user_profile.html?_t=" + str(c_type.pk) + "&_o=" + str(subscribee.pk)))
else: form = SubscriptionForm()
if subscriber != subscribee: namespace['form'] = form
output = nodelist.render(context)
context.pop()
return output
class SubscriptionForm(forms.ModelForm):
"""This form is used to create a subscription object between two users."""
class Meta:
model = Subscription
exclude = ('subscriber','subscribee')
form_id = forms.CharField(widget=forms.HiddenInput(), initial="pp_subscription_form")
|
995,383 | 5199b59f90f1dc4f8c18270baff6e66dc91cc408 | from __future__ import print_function
import sys
import os
import numpy as np
from tqdm import trange
import matplotlib
# matplotlib.use('agg')
import matplotlib.pyplot as plt
from models_v1 import *
import cv2
class Trainer(object):
def __init__(self, config, img_loader, sketch_loader, img_loader_test, sketch_loader_test):
self.config = config
self.img_loader = img_loader
self.sketch_loader = sketch_loader
self.img_loader_test = img_loader_test
self.sketch_loader_test = sketch_loader_test
self.mode = config.mode
self.batch_size = config.batch_size
self.batch_size_eval = config.batch_size_eval
self.step = tf.Variable(0, name='step', trainable=False)
self.start_step = 0
self.log_step = config.log_step
self.epoch_step = config.epoch_step
self.max_step = config.max_step
self.save_step = config.save_step
self.wd_ratio = config.wd_ratio
self.g_lr = tf.Variable(config.g_lr, name='g_lr')
self.d_lr = tf.Variable(config.d_lr, name='d_lr')
# Exponential learning rate decay
self.epoch_num = config.max_step / config.epoch_step
g_decay_factor = (config.g_min_lr / config.g_lr)**(1./(self.epoch_num-1.))
self.g_lr_update = tf.assign(self.g_lr, self.g_lr*g_decay_factor, name='g_lr_update')
d_decay_factor = (config.d_min_lr / config.d_lr)**(1./(self.epoch_num-1.))
self.d_lr_update = tf.assign(self.d_lr, self.d_lr*d_decay_factor, name='d_lr_update')
self.model_dir = config.model_dir
self.load_path = config.load_path
if self.mode == 'photo_to_sketch_generator':
self.generator = photo_to_sketch_generator
elif self.mode == 'photo_to_sketch_GAN':
self.generator = photo_to_sketch_generator
self.discriminator = discriminator
elif self.mode == 'sketch_to_photo_GAN':
self.generator = sketch_to_photo_generator
self.discriminator = discriminator
elif self.mode == 'photo_to_sketch_GAN_UNET':
self.generator = photo_to_sketch_generator_UNET
self.discriminator = discriminator
else:
print('Wrong mode selected. Select one of 4 available choices')
self.build_model()
self.build_gen_eval_model()
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.model_dir)
sv = tf.train.Supervisor(logdir=self.model_dir,
is_chief=True,
saver=self.saver,
summary_op=None,
summary_writer=self.summary_writer,
save_model_secs=60,
global_step=self.step,
ready_for_local_init_op=None)
gpu_options = tf.GPUOptions(allow_growth=True)
sess_config = tf.ConfigProto(allow_soft_placement=True,
gpu_options=gpu_options)
self.sess = sv.prepare_or_wait_for_session(config=sess_config)
def build_model(self):
if self.mode == 'photo_to_sketch_generator':
# Use only L1 loss or both L1 and discriminator loss
self.x = self.img_loader
x = self.x
self.y = self.sketch_loader
y = self.y
self.G_x, self.G_var = self.generator(x, self.batch_size,
is_train = True, reuse = False)
self.G_loss = tf.reduce_mean(tf.abs(self.G_x-y)) # L1 loss
# self.D_loss = tf.zeros(self.G_loss.shape)
gen_optimizer = tf.train.AdamOptimizer(self.g_lr, beta1 = 0.5, beta2=0.999)
wd_optimizer = tf.train.GradientDescentOptimizer(self.g_lr)
for var in tf.trainable_variables():
weight_decay = tf.multiply(tf.nn.l2_loss(var), self.wd_ratio)
tf.add_to_collection('losses', weight_decay)
wd_loss = tf.add_n(tf.get_collection('losses'))
self.G_optim = gen_optimizer.minimize(self.G_loss, var_list=self.G_var)
self.wd_optim = wd_optimizer.minimize(wd_loss)
self.summary_op = tf.summary.merge([
tf.summary.scalar("g_lr", self.g_lr),
tf.summary.scalar("d_lr", self.d_lr),
tf.summary.image("gen_sketch", self.G_x),
tf.summary.image('train_image',self.x),
tf.summary.image('train_sketch',self.y),
tf.summary.scalar("G_loss", self.G_loss)
# tf.summary.scalar('D_loss', self.D_loss)
])
elif self.mode == 'photo_to_sketch_GAN':
self.x = self.img_loader
x = self.x
self.y = self.sketch_loader
y = self.y
self.G_x, self.G_var = self.generator(x, self.batch_size,
is_train = True, reuse = False)
G_x = self.G_x
D_G_x_in = tf.concat([G_x,x], axis=3) # Concatenates image and sketch along channel axis for generated image
D_y_in = tf.concat([y,x], axis=3) # Concatenates image and sketch along channel axis for ground truth image
D_in = tf.concat([D_G_x_in, D_y_in], axis=0) # Batching ground truth and generator output as input for discriminator
D_out, self.D_var = self.discriminator(D_in, self.batch_size*2,
is_train=True, reuse=False)
self.D_G_x = D_out[0:self.batch_size]
self.D_y = D_out[self.batch_size:]
D_loss_real = tf.reduce_mean(tf.log(self.D_y))
D_loss_fake = tf.reduce_mean(tf.log(tf.constant([1],dtype=tf.float32) - self.D_G_x))
self.D_loss = D_loss_fake + D_loss_real
self.G_loss = tf.reduce_mean(tf.abs(self.G_x-y))*0.5 - self.D_loss # L1 loss
gen_optimizer = tf.train.AdamOptimizer(self.g_lr, beta1 = 0.5, beta2=0.999)
disc_optimizer = tf.train.AdamOptimizer(self.d_lr, beta1 = 0.5, beta2=0.999)
wd_optimizer = tf.train.GradientDescentOptimizer(self.g_lr)
for var in tf.trainable_variables():
weight_decay = tf.multiply(tf.nn.l2_loss(var), self.wd_ratio)
tf.add_to_collection('losses', weight_decay)
wd_loss = tf.add_n(tf.get_collection('losses'))
self.G_optim = gen_optimizer.minimize(self.G_loss, var_list=self.G_var)
self.D_optim = disc_optimizer.minimize(self.D_loss, var_list=self.D_var)
self.wd_optim = wd_optimizer.minimize(wd_loss)
self.summary_op = tf.summary.merge([
tf.summary.scalar("g_lr", self.g_lr),
tf.summary.scalar("d_lr", self.d_lr),
tf.summary.image("gen_sketch", self.G_x),
tf.summary.image('train_image',self.x),
tf.summary.image('train_sketch',self.y),
tf.summary.scalar("G_loss", self.G_loss),
tf.summary.scalar('D_loss', self.D_loss),
tf.summary.image('D_G_x', self.D_G_x),
tf.summary.image('D_y', self.D_y),
])
elif self.mode == 'sketch_to_photo_GAN':
self.x = self.sketch_loader
x = self.x
self.y = self.img_loader
y = self.y
self.G_x, self.G_var = self.generator(x, self.batch_size,
is_train = True, reuse = False)
D_G_x_in = tf.concat([self.G_x,x], axis=3) # Concatenates image and sketch along channel axis for generated image
D_y_in = tf.concat([y,x], axis=3) # Concatenates image and sketch along channel axis for ground truth image
D_in = tf.concat([D_G_x_in, D_y_in], axis=0) # Batching ground truth and generator output as input for discriminator
D_out, self.D_var = self.discriminator(D_in, self.batch_size*2,
is_train=True, reuse=False)
self.D_G_x = D_out[0:self.batch_size]
self.D_y = D_out[self.batch_size:]
D_loss_real = tf.reduce_mean(tf.log(self.D_y))
D_loss_fake = tf.reduce_mean(tf.log(tf.constant([1],dtype=tf.float32) - self.D_G_x))
self.D_loss = D_loss_fake + D_loss_real
self.G_loss = tf.reduce_mean(tf.abs(self.G_x-y))*0.1 - self.D_loss
gen_optimizer = tf.train.AdamOptimizer(self.g_lr, beta1 = 0.5, beta2=0.999)
disc_optimizer = tf.train.AdamOptimizer(self.d_lr, beta1 = 0.5, beta2=0.999)
wd_optimizer = tf.train.GradientDescentOptimizer(self.g_lr)
for var in tf.trainable_variables():
print(var)
weight_decay = tf.multiply(tf.nn.l2_loss(var), self.wd_ratio)
tf.add_to_collection('losses', weight_decay)
wd_loss = tf.add_n(tf.get_collection('losses'))
self.G_optim = gen_optimizer.minimize(self.G_loss, var_list=self.G_var)
self.D_optim = disc_optimizer.minimize(self.D_loss, var_list=self.D_var)
self.wd_optim = wd_optimizer.minimize(wd_loss)
self.summary_op = tf.summary.merge([
tf.summary.scalar("g_lr", self.g_lr),
tf.summary.scalar("d_lr", self.d_lr),
tf.summary.image("gen_sketch", self.G_x),
tf.summary.image('train_image',self.x),
tf.summary.image('train_sketch',self.y),
tf.summary.scalar("G_loss", self.G_loss),
tf.summary.scalar('D_loss', self.D_loss),
tf.summary.image('D_G_x', self.D_G_x),
tf.summary.image('D_y', self.D_y),
])
elif self.mode == 'photo_to_sketch_GAN_UNET':
self.x = self.img_loader
x = self.x
self.y = self.sketch_loader
y = self.y
self.G_x, self.G_var = self.generator(x, self.batch_size,
is_train = True, reuse = False)
D_G_x_in = tf.concat([self.G_x,x], axis=3) # Concatenates image and sketch along channel axis for generated image
D_y_in = tf.concat([y,x], axis=3) # Concatenates image and sketch along channel axis for ground truth image
D_in = tf.concat([D_G_x_in, D_y_in], axis=0) # Batching ground truth and generator output as input for discriminator
D_out, self.D_var = self.discriminator(D_in, self.batch_size*2,
is_train=True, reuse=False)
self.D_G_x = D_out[0:self.batch_size]
self.D_y = D_out[self.batch_size:]
D_loss_real = tf.reduce_mean(tf.log(self.D_y))
D_loss_fake = tf.reduce_mean(tf.log(tf.constant([1],dtype=tf.float32) - self.D_G_x))
self.D_loss = D_loss_fake + D_loss_real
self.G_loss = tf.reduce_mean(tf.abs(self.G_x-y))*0.5 - self.D_loss
gen_optimizer = tf.train.AdamOptimizer(self.g_lr, beta1 = 0.5, beta2=0.999)
disc_optimizer = tf.train.AdamOptimizer(self.d_lr, beta1 = 0.5, beta2=0.999)
wd_optimizer = tf.train.GradientDescentOptimizer(self.g_lr)
for var in tf.trainable_variables():
print(var)
weight_decay = tf.multiply(tf.nn.l2_loss(var), self.wd_ratio)
tf.add_to_collection('losses', weight_decay)
wd_loss = tf.add_n(tf.get_collection('losses'))
self.G_optim = gen_optimizer.minimize(self.G_loss, var_list=self.G_var)
self.D_optim = disc_optimizer.minimize(self.D_loss, var_list=self.D_var)
self.wd_optim = wd_optimizer.minimize(wd_loss)
self.summary_op = tf.summary.merge([
tf.summary.scalar("g_lr", self.g_lr),
tf.summary.scalar("d_lr", self.d_lr),
tf.summary.image("gen_sketch", self.G_x),
tf.summary.image('train_image',self.x),
tf.summary.image('train_sketch',self.y),
tf.summary.scalar("G_loss", self.G_loss),
tf.summary.scalar('D_loss', self.D_loss),
tf.summary.image('D_G_x', self.D_G_x),
tf.summary.image('D_y', self.D_y),
])
else:
print('Wrong mode selected. Choose from available 4 choices.')
def build_gen_eval_model(self):
if self.mode == 'photo_to_sketch_generator':
self.test_x = self.img_loader_test
# self.test_x = tf.placeholder(shape=[self.batch_size_eval,256,256,3], dtype=tf.float32)
test_x = self.test_x
self.test_y = self.sketch_loader_test
test_y = self.test_y
self.G_x_test, G_var = self.generator(test_x, self.batch_size_eval,
is_train = False, reuse = True)
self.G_loss_test = tf.reduce_mean(tf.abs(self.G_x_test-test_y)) # L1 loss
self.summary_op_test = tf.summary.merge([
tf.summary.image("gen_test_sketch", self.G_x_test),
tf.summary.image('test_image',self.test_x),
tf.summary.image('test_sketch',self.test_y),
tf.summary.scalar("G_loss", self.G_loss_test)
])
elif self.mode == 'photo_to_sketch_GAN':
self.test_x = self.img_loader_test
test_x = self.test_x
self.test_y = self.sketch_loader_test
test_y = self.test_y
self.G_x_test, G_var = self.generator(test_x, self.batch_size_eval,
is_train = False, reuse = True)
G_x_test = self.G_x_test
D_G_x_in = tf.concat([G_x_test,test_x], axis=3) # Concatenates image and sketch along channel axis for generated image
D_y_in = tf.concat([test_y,test_x], axis=3) # Concatenates image and sketch along channel axis for ground truth image
self.D_G_x_test, D_Var = self.discriminator(D_G_x_in, self.batch_size_eval,
is_train = False, reuse = True)
self.D_y_test, D_Var = self.discriminator(D_y_in, self.batch_size_eval,
is_train = False, reuse = True)
D_loss_real = tf.reduce_mean(tf.log(self.D_y_test))
D_loss_fake = tf.reduce_mean(tf.log(tf.constant([1],dtype=tf.float32) - self.D_G_x_test))
self.D_loss_test = D_loss_fake + D_loss_real
self.G_loss_test = tf.reduce_mean(tf.abs(self.G_x_test-test_y))*0.5 - self.D_loss_test # L1 loss
self.G_loss_test_L1 = tf.reduce_mean(tf.abs(self.G_x_test-test_y)) # L1 loss
self.summary_op_test = tf.summary.merge([
tf.summary.image("gen_test_sketch", self.G_x_test),
tf.summary.image('test_image',self.test_x),
tf.summary.image('test_sketch',self.test_y),
tf.summary.scalar("G_loss", self.G_loss_test),
tf.summary.scalar("G_loss_L1", self.G_loss_test_L1),
tf.summary.image("D_G_x_test", self.D_G_x_test),
tf.summary.image("D_y_test", self.D_y_test),
tf.summary.scalar("D_loss_test", self.D_loss_test)
])
elif self.mode == 'sketch_to_photo_GAN':
self.test_x = self.sketch_loader_test
test_x = self.test_x
self.test_y = self.img_loader_test
test_y = self.test_y
self.G_x_test, G_var = self.generator(test_x, self.batch_size_eval,
is_train = False, reuse = True)
G_x_test = self.G_x_test
D_G_x_in = tf.concat([G_x_test,test_x], axis=3) # Concatenates image and sketch along channel axis for generated image
D_y_in = tf.concat([test_y,test_x], axis=3) # Concatenates image and sketch along channel axis for ground truth image
self.D_G_x_test, D_Var = self.discriminator(D_G_x_in, self.batch_size_eval,
is_train = False, reuse = True)
self.D_y_test, D_Var = self.discriminator(D_y_in, self.batch_size_eval,
is_train = False, reuse = True)
D_loss_real = tf.reduce_mean(tf.log(self.D_y_test))
D_loss_fake = tf.reduce_mean(tf.log(tf.constant([1],dtype=tf.float32) - self.D_G_x_test))
self.D_loss_test = D_loss_fake + D_loss_real
self.G_loss_test = tf.reduce_mean(tf.abs(self.G_x_test-test_y))*0.5 - self.D_loss_test # L1 loss
self.G_loss_test_L1 = tf.reduce_mean(tf.abs(self.G_x_test-test_y)) # L1 loss
self.summary_op_test = tf.summary.merge([
tf.summary.image("gen_test_image", self.G_x_test),
tf.summary.image('test_sketch',self.test_x),
tf.summary.image('test_image',self.test_y),
tf.summary.scalar("G_loss", self.G_loss_test),
tf.summary.scalar("G_loss_L1", self.G_loss_test_L1),
tf.summary.image("D_G_x_test", self.D_G_x_test),
tf.summary.image("D_y_test", self.D_y_test),
tf.summary.scalar("D_loss_test", self.D_loss_test)
])
elif self.mode == 'photo_to_sketch_GAN_UNET':
self.test_x = self.img_loader_test
test_x = self.test_x
self.test_y = self.sketch_loader_test
test_y = self.test_y
self.G_x_test, G_var = self.generator(test_x, self.batch_size_eval,
is_train = False, reuse = True)
G_x_test = self.G_x_test
D_G_x_in = tf.concat([G_x_test,test_x], axis=3) # Concatenates image and sketch along channel axis for generated image
D_y_in = tf.concat([test_y,test_x], axis=3) # Concatenates image and sketch along channel axis for ground truth image
self.D_G_x_test, D_Var = self.discriminator(D_G_x_in, self.batch_size_eval,
is_train = False, reuse = True)
self.D_y_test, D_Var = self.discriminator(D_y_in, self.batch_size_eval,
is_train = False, reuse = True)
D_loss_real = tf.reduce_mean(tf.log(self.D_y_test))
D_loss_fake = tf.reduce_mean(tf.log(tf.constant([1],dtype=tf.float32) - self.D_G_x_test))
self.D_loss_test = D_loss_fake + D_loss_real
self.G_loss_test = tf.reduce_mean(tf.abs(self.G_x_test-test_y))*0.5 - self.D_loss_test # L1 loss
self.G_loss_test_L1 = tf.reduce_mean(tf.abs(self.G_x_test-test_y)) # L1 loss
self.summary_op_test = tf.summary.merge([
tf.summary.image("gen_test_sketch", self.G_x_test),
tf.summary.image('test_image',self.test_x),
tf.summary.image('test_sketch',self.test_y),
tf.summary.scalar("G_loss", self.G_loss_test),
tf.summary.scalar("G_loss_L1", self.G_loss_test_L1),
tf.summary.image("D_G_x_test", self.D_G_x_test),
tf.summary.image("D_y_test", self.D_y_test),
tf.summary.scalar("D_loss_test", self.D_loss_test)
])
def train(self):
for step in trange(self.start_step, self.max_step):
if self.config.mode == 'photo_to_sketch_generator':
fetch_dict_gen = {
'gen_optim': self.G_optim,
'x': self.x,
'y': self.y,
'G_loss': self.G_loss,
'G_x': self.G_x}
# fetch_dict_disc = {
# 'disc_optim': self.D_optim,
# # 'wd_optim': self.wd_optim,
# 'D_loss': self.D_loss,
# # 'D_x': self.D_x,
# # 'G_loss': self.G_loss,
# # 'D_G_z':self.D_G_z,
# # 'G_z': self.G_z
# }
if step % self.log_step == self.log_step - 1:
fetch_dict_gen.update({
'g_lr': self.g_lr,
# 'd_lr': self.d_lr,
'summary': self.summary_op })
result = self.sess.run(fetch_dict_gen)
G_loss = result['G_loss']
G_x = result['G_x']
# print("\n[{}/{}] Gen_Loss: {:.6f} " . \
# format(step, self.max_step, G_loss))
# D_x = result['D_x']
# D_G_z = result['D_G_z']
# G_z = result['G_z']
if step % self.log_step == self.log_step - 1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
#pdb.set_trace()
g_lr = result['g_lr']
print("\n[{}/{}] Gen_Loss: {:.6f} " . \
format(step, self.max_step, G_loss))
sys.stdout.flush()
if step % self.save_step == self.save_step - 1:
self.saver.save(self.sess, self.model_dir + '/model')
G_loss_test = 0
for i in range(100):
fetch_dict_gen = {
'x': self.test_x,
'y': self.test_y,
'G_loss': self.G_loss_test,
'G_x': self.G_x_test,
'summary_test': self.summary_op_test}
result_test = self.sess.run(fetch_dict_gen)
G_loss_test += result_test['G_loss']
G_loss_test /= 100
print ('\ntest_loss = %.4f'%(G_loss_test))
self.summary_writer.add_summary(result_test['summary_test'], step)
self.summary_writer.flush()
if step % self.epoch_step == self.epoch_step - 1:
self.sess.run([self.g_lr_update])
self.sess.run([self.d_lr_update])
# elif self.config.mode == 'photo_to_sketch_GAN':
else:
fetch_dict_gen = {
'gen_optim': self.G_optim,
'x': self.x,
'y': self.y,
'G_loss': self.G_loss,
'G_x': self.G_x}
fetch_dict_disc = {
'disc_optim': self.D_optim,
# 'wd_optim': self.wd_optim,
'D_loss': self.D_loss,
'D_y': self.D_y,
'G_loss': self.G_loss,
'D_G_x':self.D_G_x,
'G_x': self.G_x
}
if step % self.log_step == self.log_step - 1:
fetch_dict_disc.update({
'g_lr': self.g_lr,
'd_lr': self.d_lr,
'summary': self.summary_op })
result = self.sess.run(fetch_dict_gen)
G_loss = result['G_loss']
x = result['x']
y = result['y']
G_x = result['G_x']
result = self.sess.run(fetch_dict_disc)
result = self.sess.run(fetch_dict_disc)
D_y = result['D_y']
D_G_x = result['D_G_x']
G_x = result['G_x']
D_loss = result['D_loss']
if step % self.log_step == self.log_step - 1:
self.summary_writer.add_summary(result['summary'], step)
self.summary_writer.flush()
#pdb.set_trace()
g_lr = result['g_lr']
print("\n[{}/{}] Gen_Loss: {:.6f} Disc_Loss: {:.6f} " . \
format(step, self.max_step, G_loss, D_loss))
sys.stdout.flush()
if step % self.save_step == self.save_step - 1:
self.saver.save(self.sess, self.model_dir + '/model')
G_loss_test = 0
for i in range(100):
fetch_dict_gen = {
'x': self.test_x,
'y': self.test_y,
'G_loss_L1': self.G_loss_test_L1,
'G_x': self.G_x_test,
'D_g_x': self.D_G_x_test,
'summary_test': self.summary_op_test}
result_test = self.sess.run(fetch_dict_gen)
G_loss_test += result_test['G_loss_L1']
G_loss_test /= 100
print ('\nG_test_loss_L1 = %.4f'%(G_loss_test))
self.summary_writer.add_summary(result_test['summary_test'], step)
self.summary_writer.flush()
if step % self.epoch_step == self.epoch_step - 1:
self.sess.run([self.g_lr_update])
self.sess.run([self.d_lr_update])
def test(self):
# x_image = cv2.imread(self.config.data_dir + 'images/Anand.jpeg')
# x_image = cv2.resize(x_image, (256,256))
# x_image = x_image[:,:,::-1]
# x_image = (x_image - np.mean(x_image))/np.std(x_image)
# x_image = np.repeat(x_image[np.newaxis,:,:,:], 10, axis=0)
# pdb.set_trace()
self.saver.restore(self.sess, self.model_dir + '/model.ckpt-0')
if self.mode == 'photo_to_sketch_generator':
G_loss = 0
for i in range(1):
fetch_dict_gen = {
'x': self.test_x,
'y': self.test_y,
'G_loss': self.G_loss_test,
'G_x': self.G_x_test,
'summary_test': self.summary_op_test}
# feed_dict = {
# self.test_x : x_image
# }
# result = self.sess.run(fetch_dict_gen,feed_dict=feed_dict)
result = self.sess.run(fetch_dict_gen)
self.summary_writer.add_summary(result_test['summary_test'], step)
self.summary_writer.flush()
# pdb.set_trace()
G_loss += result['G_loss']
G_loss /= 100
# elif self.mode == 'photo_to_sketch_GAN':
else:
G_loss = 0
for i in range(1000):
fetch_dict_gen = {
'x': self.test_x,
'y': self.test_y,
'G_loss': self.G_loss_test,
'G_x': self.G_x_test,
'summary_test': self.summary_op_test}
# feed_dict = {
# self.test_x : x_image
# }
# result = self.sess.run(fetch_dict_gen,feed_dict=feed_dict)
result = self.sess.run(fetch_dict_gen)
self.summary_writer.add_summary(result['summary_test'], i)
self.summary_writer.flush()
pdb.set_trace()
G_loss += result['G_loss']
G_loss /= 100
|
995,384 | 5d01086ed7e42f066f74ae12cb2f95e7503b57a7 | from typing import List
##Vectors
Vector = List[float]
height_weight_age = [70, #inches,
170, #pounds,
40 ] #years
grades = [95, # exam1
80, # exam2
75, # exam3
62 ] # exam4
#Vectors add componentwise, meanign if two vectors are the same length, their sum is the sum of their components
#v[0] + w[0], v[1] + w[1], and so on
#This functionality can be implemented using list comprehension to zip the vectors together
def add(v: Vector, w:Vector) -> Vector:
"""Adds corresponding elements"""
assert len(v) == len(w), "Vectors must have the same length"
return [v_i + w_i for v_i, w_i in zip(v, w)]
assert add([1, 2, 3], [4, 5, 6]) == [5, 7, 9]
#Same reasoning for subtracting two vectors
def subtract(v: Vector, w: Vector) -> Vector:
"""Subtract corresponding elements"""
assert len(v) == len(w), "Vectors must have the same length"
return[v_i - w_i for v_i, w_i in zip(v, w)]
assert subtract([5, 7, 9], [4, 5, 6]) == [1, 2, 3]
#Sometimes we will want to componentwise sum a list of vectors -- first element is the sum of all first elements, second the sum of all second, and so on
def vector_sum(vectors: List[Vector]) -> Vector:
"""Sums all corresponding elements"""
# Check that vectors is not empty
assert vectors, "no vectors provided!"
# Check the vectors are all the same size
num_elements = len(vectors[0])
assert all(len(v) == num_elements for v in vectors), "different sizes!"
# the i-th element of the result is the sum of every vector[i]
return [sum(vector[i] for vector in vectors)
for i in range(num_elements)]
#We will need to multiply a vector by a scalar, which multiplies each element in a vector by the scalar
def scalar_multiply(c: float, v: Vector) -> Vector:
"""Multiplies every element by c"""
return [c * v_i for v_i in v]
assert scalar_multiply(2, [1, 2, 3]) == [2, 4, 6]
#Now we can compute the componentwise mean of a list of same-sized vectors
def vector_mean(vectors: List[Vector]) -> Vector:
"""Computes the element-wise average"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
assert vector_mean([[1, 2], [3, 4], [5, 6]]) == [3, 4]
#A useful tool in linear algebra is the dot product
#If we have vectors v and w, the dot product is the length of the vector if we projected v onto w (see page 58)
def dot(v: Vector, w: Vector) -> float:
"""Computes v_1 * w_1 + ... + v_n * w_n"""
assert len(v) == len(w), "vectors must be same length"
return sum(v_i * w_i for v_i, w_i in zip(v, w))
#Now we can easily compute a vector's sum of squares
def sum_of_squares(v: Vector) -> float:
"""Returns v_1 * v_1, v_2 * v_2, ... , v_n * v_n"""
return dot(v, v)
assert sum_of_squares([1, 2, 3]) == 14
#Which can now be used to find its magnitude (length)
import math
def magnitude(v: Vector) -> float:
"""Returns the magnitude (or length) of v"""
return math.sqrt(sum_of_squares(v)) #math.sqrt() is a square root function
assert magnitude([3, 4]) == 5
#We have what we need to find the distance between two vectors, which is defined as
#sqrt((v1 - w1)**2 + (v_2 - w_2) ** 2 + ... + (v_n - w_n) ** 2)
def squared_distance(v: Vector, w: Vector) -> float:
"""Computes (v_1 - w_1) ** 2 + ... + (v_n - w_n) ** 2"""
return sum_of_squares(subtract(v, w))
def distance1(v: Vector, w: Vector) -> float:
"""Computes the distance between v and w"""
return math.sqrt(squared_distance(v, w))
def distance2(v: Vector, w: Vector) -> float: # type: ignore
return magnitude(subtract(v, w))
#Distance 1 and Distance 2 are equivalent
#For vectors in production, more likely better to use NumPy instead
##Matrices
#A matrix is a two-dimensional collection of numbers
#We will represent them as a list of lists, with each inner list having the same size and representing a row in the matrix
#If A is a matrix, A[i][j] is the element in the ith row and the jth column
#We use capital letters to denote a matrix
# Another type alias
Matrix = List[List[float]]
A = [[1, 2, 3], # A has 2 rows and 3 columns
[4, 5, 6]]
B = [[1, 2], # B has 3 rows and 2 columns
[3, 4],
[5, 6]]
#Please note that in normal math, rows and columns would be 1-indexed. But since we are using python, we will zero-index our rows and columns in a matrix
#Given the list-of-lists representation, a matrix's shape is the number of rows and number of columns
#len(A) rows and len(A[0]) columns
from typing import Tuple
def shape(A: Matrix) -> Tuple[int, int]:
"""Returns (# of rows of A, # of columns of A)"""
num_rows = len(A)
num_columns = len(A[0]) if A else 0 #number of elements in first row
return num_rows, num_columns
assert shape([[1, 2, 3], [4, 5, 6]]) == (2, 3)
#a matrix with n rows and k columns will be referred to as an n x k matrix
#each row of an n x k matrix has a length of k
#each column of an n x k matrix has a length of n
def get_row(A: Matrix, i: int) -> Vector:
"""Returns the i-th row of A (as a Vector)"""
return A[i] # A[i] is already the ith row
def get_column(A: Matrix, j: int) -> Vector:
"""Returns the j-th column of A (as a Vector)"""
return [A_i[j] # jth element of row A_i
for A_i in A] # for each row A_i
#We will end up wanting to create a matrix given its shape and a function for generating its elements using list comprehension
from typing import Callable
def make_matrix(num_rows: int,
num_cols: int,
entry_fn: Callable[[int, int], float]) -> Matrix:
"""
Returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)
"""
return [[entry_fn(i, j) # given i, create a list
for j in range(num_cols)] # [entry_fn(i, 0), ... ]
for i in range(num_rows)] # create one list for each i
def identity_matrix(n: int) -> Matrix:
"""Returns the n x n identity matrix"""
return make_matrix(n, n, lambda i, j: 1 if i == j else 0)
assert identity_matrix(5) == [[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
data = [[70, 170, 40],
[65, 120, 26],
[77, 250, 19],
# ....
]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
# user 0 1 2 3 4 5 6 7 8 9
#
friend_matrix = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
assert friend_matrix[0][2] == 1, "0 and 2 are friends"
assert friend_matrix[0][8] == 0, "0 and 8 are not friends"
# only need to look at one row
friends_of_five = [i
for i, is_friend in enumerate(friend_matrix[5])
if is_friend]
|
995,385 | f0d3ccb04443db57a11c6d703758b898e1ae295c | from flask import url_for
class TestExtTable:
def test_download_tables(self, client, token):
res = client.get(
url_for("admin_api.download_tables", source_id="99YYYYY"),
headers={"token": token}
)
assert res.json["meta"]["code"] == 200 or res.json["meta"]["code"] == 400 or res.json["meta"]["code"] == 503
def test_get_download_tables_status(self, client, token):
res = client.get(
url_for("admin_api.get_download_tables_status", source_id="99YYYYY"),
headers={"token": token}
)
assert res.json["meta"]["code"] == 200 or res.json["meta"]["code"] == 404
|
995,386 | f76162b550213cd69e0d604c39ae6a8f440b4ae6 | import sys
sys.path.append("/files/rostam")
from rostam.start import main
from mock import patch
def begin():
args = ["start.py", "-l", "/data", "-o", "/data/rostam.log"]
with patch.object(sys, 'argv', args):
main()
if __name__ == "__main__":
begin()
|
995,387 | ada499b4d7b8482377e241907a00288e6238780d | from django.apps import AppConfig
class OmniConfig(AppConfig):
name = 'omni'
|
995,388 | 5f14acc576236c3869fc058e611d80a4d84b4969 | import json
from flask import Flask
from flask_cors import *
from game import devices_data
from game import host_data
app = Flask(__name__)
CORS(app, supports_credentials=True) # 设置跨域
# GET, 根据用户ID查询特定用户
@app.route('/devices', methods=['GET'])
def get_devices():
return json.dumps(devices_data)
# # 也可以自定义其他的方法
@app.route('/host', methods=['GET'])
def get_host():
return json.dumps(host_data)
# # 也可以自定义其他的方法
# 设定监听端口为3000
if __name__ == '__main__':
app.run(host='localhost', port=3000)
|
995,389 | ad639d69f4f091d595b7885b85e97b46e5fb8d8d | import numpy as np
from testFunction import returnChosenFunction
class Vector2D:
x = None
y = None
def __init__(self,x = 0, y = 0, vector = 0, whichConstructor = 0):
if(whichConstructor==0):
self.x = x
self.y = y
elif(whichConstructor==1):
self.x = vector.x
self.y = vector.y
class Vector3D:
x = None
y = None
Fitness = None
def __init__(self,x = 0, y = 0, fitness = 0, vector = 0, whichConstructor = 0):
if(whichConstructor==0):
self.x = x
self.y = y
self.Fitness = fitness
elif(whichConstructor==1):
self.x = vector.x
self.y = vector.y
self.Fitness = fitness
class DE3D:
hive = None
range = None
maxIterations = None
nrVectors = None
F = None
C = None
chosenFunction = None
def __init__(self, range1, nrVectors, maxIterations, chosenFunction):
self.chosenFunction = chosenFunction
self.maxIterations = maxIterations
self.range = range1
self.nrVectors = nrVectors
self.hive = []
self.C = 0.5
self.F = 0.8
for i in range(0,nrVectors,1):
x = np.random.uniform(low= -self.range, high = self.range)
y = np.random.uniform(low= -self.range, high = self.range)
z = returnChosenFunction(x, y,self.chosenFunction)
self.hive.append(Vector3D(x = x, y = y, fitness = z, whichConstructor=0))
def doOneIteration(self,iterations):
if (iterations < self.maxIterations):
self.moveVectors()
def mutation(self):
x = None
y = None
Xr = Vector2D(x=0, y=0, whichConstructor=0)
Xs = Vector2D(x=0, y=0, whichConstructor=0)
result = Vector2D(x=0, y=0, whichConstructor=0)
strong_fitness = max(the_best.Fitness for the_best in self.hive)
index = 0
for index, item in enumerate(self.hive):
if item.Fitness == strong_fitness:
break
else:
index = -1
Xbest = Vector2D(x=self.hive[index].x, y= self.hive[index].y , whichConstructor =0)
while True:
x = np.random.uniform(low= -self.range, high = self.range)
y = np.random.uniform(low= -self.range, high = self.range)
Xr = Vector2D(x = x, y = y, whichConstructor = 0)
x = np.random.uniform(low= -self.range, high = self.range)
y = np.random.uniform(low= -self.range, high = self.range)
Xs = Vector2D(x = x, y = y, whichConstructor = 0)
temp = Vector2D(x = Xbest.x, y = Xbest.y, whichConstructor =0)
temp.x = Xbest.x + self.F * (Xr.x - Xs.x)
temp.y = Xbest.y + self.F * (Xr.y - Xs.y)
result = Vector2D(vector = Vector2D(x = temp.x, y = temp.y, whichConstructor =0), whichConstructor=1)
if(result.x < self.range and result.x > -self.range and result.y < self.range and result.y > -self.range):
break
return result
def recombination(self,V, x_i, y_i):
result = Vector2D(x=0, y=0, whichConstructor=0)
p = np.random.random()
if (p <= self.C):
result.x = V.x
else:
result.x = x_i
p = np.random.random()
if (p <= self.C):
result.y = V.y
else:
result.y = y_i
return result
def moveVectors(self):
for i in range(0,len(self.hive),1):
U = self.recombination(self.mutation(), self.hive[i].x, self.hive[i].y)
U_fitness = returnChosenFunction(U.x, U.y,self.chosenFunction)
if (U_fitness > self.hive[i].Fitness):
self.hive[i] = Vector3D(vector = U, fitness = U_fitness, whichConstructor=1) |
995,390 | aebe2abcbb3e25cda50add0860057150da631949 | import sys
#捕获单个异常
try:
s = input('please enter two numbers separated by comma: ')
num1 = int(s.split(',')[0].strip())
num2 = int(s.split(',')[1].strip())
...
except ValueError as err:
print('Value Error: {}'.format(err))
#捕获多个异常
#写法一
try:
s = input('please enter two numbers separated by comma: ')
num1 = int(s.split(',')[0].strip())
num2 = int(s.split(',')[1].strip())
...
except (ValueError, IndexError) as err:
print('Error: {}'.format(err))
print('continue')
#写法二
try:
s = input('please enter two numbers separated by comma: ')
num1 = int(s.split(',')[0].strip())
num2 = int(s.split(',')[1].strip())
...
except ValueError as err:
print('Value Error: {}'.format(err))
except IndexError as err:
print('Index Error: {}'.format(err))
#最后增加捕获所有异常,有两种写法
try:
s = input('please enter two numbers separated by comma: ')
num1 = int(s.split(',')[0].strip())
num2 = int(s.split(',')[1].strip())
...
except ValueError as err:
print('Value Error: {}'.format(err))
except IndexError as err:
print('Index Error: {}'.format(err))
except Exception as err:
print('Other error: {}'.format(err))
try:
s = input('please enter two numbers separated by comma: ')
num1 = int(s.split(',')[0].strip())
num2 = int(s.split(',')[1].strip())
...
except ValueError as err:
print('Value Error: {}'.format(err))
except IndexError as err:
print('Index Error: {}'.format(err))
except:
print('Other error')
print('continue')
#带finally的异常处理
try:
f = open('file.txt', 'r')
# some data processing
except OSError as err:
print('OS error: {}'.format(err))
except:
print('Unexpected error:', sys.exc_info()[0])
finally:
try:
f.close()
except:
print(sys.exc_info()[0])
#自定义异常
class MyInputError(Exception):
"""Exception raised when there're errors in input"""
def __init__(self, value): # 自定义异常类型的初始化
self.value = value
def __str__(self): # 自定义异常类型的 string 表达形式
return ("{} is invalid input".format(repr(self.value)))
#通过raise抛出异常
try:
raise MyInputError(1) # 抛出 MyInputError 这个异常
except MyInputError as err:
print('error: {}'.format(err))
try:
raise MyInputError(1) # 抛出 MyInputError 这个异常
except MyInputError as err:
raise err
|
995,391 | a201860c3bad2bedc078673213bf0265cc9fb974 | from django.core import urlresolvers
from django import shortcuts
from django.views.generic import list_detail, create_update
from django.contrib.auth import decorators
from college import models
@decorators.login_required
def delete_student(request, pk):
student = shortcuts.get_object_or_404(models.Student, pk = pk)
return create_update.delete_object(request, model = models.Student, object_id = pk, post_delete_redirect = urlresolvers.reverse('retrieve-group', args=[str(student.group.pk)]))
@decorators.login_required
def update_student(request, pk):
return create_update.update_object(request, model = models.Student, object_id = pk)
@decorators.login_required
def create_student(request):
return create_update.create_object(request, model = models.Student)
@decorators.login_required
def delete_group(request, pk):
return create_update.delete_object(request, model = models.Group, object_id = pk, post_delete_redirect = urlresolvers.reverse('list-groups'))
@decorators.login_required
def update_group(request, pk):
return create_update.update_object(request, model = models.Group, object_id = pk)
@decorators.login_required
def create_group(request):
return create_update.create_object(request, model = models.Group)
def list_groups(request):
return list_detail.object_list(request, queryset = models.Group.objects.all())
def retrieve_group(request, pk):
return list_detail.object_detail(request, queryset = models.Group.objects.all(), object_id = pk)
|
995,392 | 3a66ac5b206ed9f84c6fd0c4c6411c865d9eb163 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-03-09
# @Author : Chloé Artaud (chloe.artaud@univ-lr.fr), Nicolas Sidère (nicolas.sidere@univ-lr.fr)
# @Link : http://findit.univ-lr.fr/
# @Version : $Id$
import os
import logging
import argparse
import csv
import collections
import sys
from argparse import ArgumentTypeError as err
from bs4 import BeautifulSoup
import numpy as np
# ==============================================================================
logger = logging.getLogger(__name__)
# ==============================================================================
def jaccard(set1, set2):
"""
indice de Jaccard : intersection/union
intersection : tab containing common elements
union : cardinality of sets - cardinality of intersection
"""
intersection = []
for i in set1:
if i in set2:
intersection.append(i)
else:
pass
return float(len(intersection)/(len(set1) + len(set2) - len(intersection)))
def evaltask2text(GTfile, candidatefile):
"""
Function: evaluate the results of training and testing for task 2 text (forgeries localization)
Input: file of Gound Truth, file of results
Output: jaccard index with 3 different precisions : line, line + column, line + column + length of token
"""
fGT = open(GTfile, encoding="utf-8", mode="r")
xmlGT = fGT.read()
soupGT = BeautifulSoup(xmlGT, 'xml')
fcandidat = open(candidatefile, encoding="utf-8", mode="r")
xmlcand = fcandidat.read()
soupcand = BeautifulSoup(xmlcand, 'xml')
listfraudlineGT = soupGT.find_all("fraud")
listfraudlineCand = soupcand.find_all("fraud")
logger.debug(listfraudlineGT)
"""Only lines"""
listnblineCand = [fraud["line"] for fraud in listfraudlineCand]
listnblineGT = [fraud["line"] for fraud in listfraudlineGT]
jacclineresult = jaccard(set(listnblineCand), set(listnblineGT))
logger.debug(jacclineresult)
#diff = set(listnblineCand) - set(listnblineGT)
#print(diff)
"""Lines + col"""
listnbpositionCand = [(fraud["line"], fraud["col"]) for fraud in listfraudlineCand]
listnbpositionGT = [(fraud["line"], fraud["col"]) for fraud in listfraudlineGT]
jaccpositionresult = jaccard(set(listnbpositionCand), set(listnbpositionGT))
logger.debug(jaccpositionresult)
"""Lines + col + length of token"""
listnbtokenCand = [(fraud["line"], fraud["col"], len(fraud["forged_value"])) for fraud in listfraudlineCand]
listnbtokenGT = [(fraud["line"], fraud["col"], len(fraud["forged_value"])) for fraud in listfraudlineGT]
jacctokenresult = jaccard(set(listnbtokenCand), set(listnbtokenGT))
logger.debug(jacctokenresult)
return jacclineresult, jaccpositionresult, jacctokenresult
def main():
parser = argparse.ArgumentParser(
description="Evaluate the spotting of modified informations in a set of document OCR outputs.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--debug',
action="store_true",
help="Activate debug output.")
parser.add_argument('-pg', '--pathGT',
type=str,
required=True,
help="path to Groundtruth files")
parser.add_argument('-pe', '--pathExp',
type=str,
required=True,
help="path to Experimentation files")
parser.add_argument('-o', '--output_file',
type=str,
required=True,
help="path to Output File")
args = parser.parse_args();
# Logging
formatter = logging.Formatter("%(name)-12s %(levelname)-7s: %(message)s")
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
#----------------------------------------------------------------
logger.info("Starting up...")
#----------------------------------------------------------------
if not os.path.isdir(args.pathGT):
logger.info("output_directory argument is not a valid path")
sys.exit(1)
logger.info(args.pathGT)
list_results =[]
for filename in os.listdir(os.path.join(args.pathGT)):
dict_results = collections.OrderedDict()
logger.info(filename)
dict_results['filename']=str(filename)
dict_results['jacclineresult'], dict_results['jaccpositionresult'],dict_results['jacctokenresult'] = evaltask2text(os.path.join(args.pathGT, filename), os.path.join(args.pathExp, filename))
# logger.info(dict_results)
list_results.append(dict_results)
bool_header = True
with open(args.output_file, 'w', newline='', encoding='utf-8') as csv_file:
csvwriter = csv.writer(csv_file)
for it_result in list_results:
if bool_header == True:
header = it_result.keys()
csvwriter.writerow(header)
bool_header = False
csvwriter.writerow(it_result.values())
#----------------------------------------------------------------
logger.info("Exiting...")
#----------------------------------------------------------------
if __name__ == "__main__":
main() |
995,393 | db2e7cc93e78f7964c94952b57bb0c3e6ba83dd1 | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
category_csv = forms.CharField(required=True, label='Categories (comma serparated)')
class Meta:
model = Post
fields =('title', 'content', 'category_csv') |
995,394 | a61fda6f471f35e36a0f582fcb881df8485b2230 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: ZhangXiaocheng
# @File: restful.py
# @Time: 2019/4/18 20:48
from flask import views
from app.decorators import set_response, exception_handler, token_validator
class RESTfulView(views.MethodView):
decorators = [exception_handler]
@classmethod
def init_response(cls, code: int=200, msg: str='ok', data=None):
return set_response(code, msg, data)
class LoginRequiredView(RESTfulView):
decorators = [token_validator, exception_handler]
|
995,395 | f2ef6c4843727597cf8c4b38981a6b94e04522a3 | # @Time : 2019/7/1 10:18
# @Author : young
from flask import Blueprint, current_app, make_response
from flask_wtf import csrf
html = Blueprint("html", __name__)
@html.route("/<re('.*'):html_file_name>")
def get_html(html_file_name):
if not html_file_name:
html_file_name = "index.html"
if html_file_name != "":
html_file_name = "html/" + html_file_name
csrf_token = csrf.generate_csrf()
resp = make_response(current_app.send_static_file(html_file_name))
resp.set_cookie("csrf_token", csrf_token)
return resp
|
995,396 | 0193210bd67238dee2478516c20df17d374d4dc8 |
# themodelbot
import tweepy as tp
import time
import os
# credentials to login to twitter api
consumer_key = '3oItC280vFgNLtHa9FLCUSrn6'
consumer_secret = 'J1GwmtT3JbNi3SSkRpqlZHhTaJFwuHOEDI0uaTvlNz0fAmbFTw'
access_token = '1009266220984659969-XekO15oXO6wURY4DVgAm6PNtFDqIUO'
access_secret = 'Nu2NIHSDYfRrVghQiA07Kz0SpecPxJyyN1xf3KRFeSx2w'
# login to twitter account api
auth = tp.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tp.API(auth)
os.chdir('models')
# iterates over pictures in models folder
for model_image in os.listdir('.'):
api.update_with_media(model_image)
time.sleep(3)
|
995,397 | cab65111492fa6bdfd74c003ff0fff45e51865ce | import fbchat
import requests
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from collections import defaultdict
from math import ceil
from io import BytesIO
from PIL import ImageTk, Image
from os import path
class AutoHideScrollbar(Scrollbar):
'''
Scrollbar that automatically hides when needed.
Taken from effbot.org
'''
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
# grid_remove is currently missing from Tkinter!
self.tk.call("grid", "remove", self)
else:
self.grid()
Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise TclError("cannot use pack with this widget")
def place(self, **kw):
raise TclError("cannot use place with this widget")
def ProcessNames(*args):
'''
Processes each name in given csv, searches for matching name in friends list.
Performs different actions depending on how many matching names are found.
'''
# Ask user if they still want to send message without {positions} key
if '{positions}' not in message_entry.get('1.0', 'end-1c'):
check = '{positions} not found in message body text. Still send message?'
# Return if user does not want to send message
if not messagebox.askyesno(title='Send message?', message=check):
return
# Ensure a csv has been selected
if not path_field.get('1.0', 'end-1c'):
messagebox.showerror(title='Error', message='No CSV selected.')
return
# Attempt login
email_str = email.get()
password_str = password.get()
try:
client = fbchat.Client(email_str, password_str, debug=False)
except:
messagebox.showerror('Error', message='Incorrect username or password. Try again.')
return
# Intending to send position notifications
if '{positions}' in message_entry.get('1.0', 'end-1c'):
people = CompileNominations()
# Sending positionless message
else:
people = CompileNames()
# --------------------------- Begin progress window setup ---------------------------
progress_window = Toplevel()
progress_window.title('Progress')
progress = StringVar()
progress.set('Processed 0/{}'.format(len(people.keys())))
ttk.Label(progress_window, textvar=progress).grid(row=0, column=0)
for child in progress_window.winfo_children():
child.grid_configure(padx=2, pady=2)
# --------------------------- End progress window setup------------------------------
not_found = []
for counter, person in enumerate(people.keys()):
# Update progress window
progress.set('Processed {}/{}'.format(counter, len(people.keys())))
progress_window.update()
possible_friends = client.getUsers(person)
# Filter out people not on friends list
possible_friends = list(filter(lambda x: client.getUserInfo(x.uid)['is_friend'], possible_friends))
# If no matches found on friends list
if not possible_friends:
print('{} not found on friends list\n'.format(person))
not_found.append(person)
continue
# If multiple matches found on friends list
elif len(possible_friends) > 1:
DisambiguateFriends(possible_friends, person, people, client)
# If only 1 match found on friends list
else:
friend_uid = possible_friends[0].uid
SendMessage(person, friend_uid, people, client)
progress_window.destroy()
# If some users were not found on friends list, display warning box with their names and positions
if not_found:
DisplayNotFound(not_found, people)
messagebox.showinfo(title='Done!', message='All messages sent. Program will now exit.')
root.destroy()
def DisplayNotFound(not_found, people):
'''
Displays a window with all names not found
'''
# -------------------------- Begin picture selection window setup ----------------------------
not_found_window = Toplevel(mainframe)
not_found_window.geometry(CalcWindowDimensions(not_found, people))
not_found_window.title('User{} not found'.format('s' if len(not_found) > 1 else ''))
# Canvas and frame both required for scrollbar
canvas = Canvas(not_found_window, borderwidth=0)
frame = Frame(canvas)
vscrollbar = AutoHideScrollbar(not_found_window, orient='vertical', command=canvas.yview)
vscrollbar.grid(row=0, column=1, sticky=(N, S))
hscrollbar = AutoHideScrollbar(not_found_window, orient='horizontal', command=canvas.xview)
hscrollbar.grid(row=1, column=0, sticky=(E, W))
canvas.grid(row=0, column=0, sticky=(N, S, E, W))
canvas.configure(yscrollcommand=vscrollbar.set, xscrollcommand=hscrollbar.set)
# Allow the frame to expand
not_found_window.grid_rowconfigure(0, weight=1)
not_found_window.grid_columnconfigure(0, weight=1)
frame.rowconfigure(1, weight=1)
frame.columnconfigure(1, weight=1)
canvas.create_window((0, 0), window=frame, anchor='nw')
frame.bind('<Configure>', lambda event, canvas=canvas: OnFrameConfigure(canvas))
# -------------------------- End picture selection window setup ----------------------------
# Display list of names not found and associated positions
header = 'The following not found. Please send their messages manually.\n\n'
names_and_positions = '\n\n'.join(['{} - {}'.format(person, ', '.join(people[person])) for person in not_found])
Label(frame, text=header, font='Helvetica 14 bold').grid(row=0, column=0, sticky=(E, W))
Label(frame, text=names_and_positions, anchor='w', justify='left').grid(row=1, column=0, sticky=(E, W))
ttk.Button(frame, text='Ok', command=not_found_window.destroy).grid(row=2, column=0, sticky=(S))
mainframe.wait_window(not_found_window)
def CalcWindowDimensions(not_found, people):
'''
Calculates max window dimensions required constrained by screen dimensions
'''
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# Values determined by experimentation
pixels_per_char = 9
pixels_per_col = 31
# header text copied from method above. Not the cleanest implementation, but passing as a parameter seemed like too much
header = 'The following not found. Please send their messages manually.\n\n'
# If not sending position messages, positions will be 0-length list
names_and_positions_lengths = ([len(', '.join(positions) + name) for name, positions in people.items() if name in not_found])
# Include header length in calculation
names_and_positions_lengths.append(len(header))
text_width = pixels_per_char * max(names_and_positions_lengths)
# 2 lines per entry, 1 line of boilerplate, 1 button
text_height = pixels_per_col * (2 * len(not_found) + 2)
width = min(screen_width, text_width)
height = min(screen_height, text_height)
# Center window on screen
x_coord = (screen_width - width) //2
y_coord = (screen_height - height) // 2
return '{}x{}+{}+{}'.format(width, height, x_coord, y_coord)
def OnFrameConfigure(canvas):
'''
Event handler for scrollbar move
'''
canvas.configure(scrollregion=canvas.bbox("all"))
def DisambiguateFriends(possibilities, person, people, client):
'''
Displays box of profile pictures. User clicks picture of intended recipient. Message is sent to associated user.
'''
window = Toplevel(mainframe)
window.title('Multiple results found')
l = ttk.Label(window, text='Multiple "{}" found. Select the profile picture of the intended user.'.format(person))
num_columns = ceil(len(possibilities)/2)
l.grid(row=0, column=0, columnspan=num_columns)
poss_index = 0
# Create max of 2 rows of pictures
for r in range(1, 3):
for c in range(num_columns):
uid = possibilities[poss_index].uid
# Get and rescale thumbnail of profile picture. Would like to figure out how to get full sized picture
content = requests.get(client.getUserInfo(uid)['thumbSrc']).content
resized_image = RescaleImage(Image.open(BytesIO(content)))
img = ImageTk.PhotoImage(resized_image)
# I have no idea why this command works. The internet provided the magical lambda uid=uid answer.
b = ttk.Button(window, image=img, command=lambda uid=uid: SendAndClose(person, uid, people, client, window))
b.grid(column=c, row=r, sticky=(N, S, E, W))
# Save image reference to prevent garbage collection!
b.image = img
poss_index += 1
for child in window.winfo_children():
child.grid_configure(padx=2, pady=2)
# Wait for window to close before continuing
mainframe.wait_window(window)
def SendAndClose(name, uid, people, client, window):
'''
Destroys window, then calls SendMessage
'''
window.destroy()
SendMessage(name, uid, people, client)
def SendMessage(name, uid, people, client):
'''
Sends a message to the user with uid.
'''
message = message_entry.get('1.0', 'end-1c').replace('{positions}', '- ' + '\n- '.join(people[name]))
print('{}: {}\n'.format(name, message))
#client.send(uid, message)
def RescaleImage(img):
'''
Rescales given image
'''
scale_value = 2.0
width, height = [int(scale_value * dim) for dim in img.size]
return img.resize((width, height), Image.ANTIALIAS)
def CompileNominations():
'''
Opens people csv, creates dictionary {person: list of positions}
Return: {person: positions} dictionary
'''
d = defaultdict(set)
with open(path_field.get('1.0', 'end-1c')) as f:
# Eliminate headers
f.readline()
for line in f:
# Use [:3] to safegaurd against additional fields
_, person, position = list(map(str.strip, line.split(',')))[:3]
# Use title to register the same name regardless of capitalization
d[person.title()].add(position)
WriteToFile(d)
return d
def CompileNames():
'''
Opens people csv, creates dictionary {person: ''} for compatibility with program
Return {person: ''} dictionary
'''
d = {}
with open(path_field.get('1.0', 'end-1c')) as f:
# Eliminate headers
f.readline()
for line in f:
# Use [:2] to safegaurd against additional fields
_, person = list(map(str.strip, line.split(',')))[:2]
# Use title to register the same name regardless of capitalization
# Use empty list for compatibility with .joins used later
d[person.title()] = []
WriteToFile(d)
return d
def WriteToFile(d):
'''
Writes names and associated positions to log file on Desktop.
Human readibility prioritized over computer readibility
'''
file_path = path.join(path.expanduser('~'), path.join('Desktop', 'log.txt'))
with open(file_path, 'w') as f:
for person, positions in d.items():
s = '{}: {}\n'.format(person, ', '.join(positions))
f.write(s)
def SetPath(*args):
'''
Displays file selection dialog for CSVs and sets field for path to selected file
'''
extensions = [('CSV', '*.csv'), ('All files', '*')]
dlg = filedialog.Open(mainframe, filetypes=extensions)
result = dlg.show()
if result:
# Make field editable only long enough to replace the text
path_field['state'] = 'normal'
path_field.delete('1.0', END)
path_field.insert('1.0', result)
path_field['state'] = 'disabled'
# ----------------------- GUI setup ------------------------
root = Tk()
root.title('Messenger')
mainframe = ttk.Frame(root, padding='3 3 12 12')
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
file_selector = ttk.Button(mainframe, text='Select people/nominations CSV', command=SetPath).grid(column=1, row=1, sticky=(W, E))
path_field = Text(mainframe, width=30, height=1)
path_field.grid(column=1, row=2, sticky=(W, E))
# Make field non-editable
path_field['state'] = 'disabled'
email = StringVar()
email_entry = ttk.Entry(mainframe, width=20, textvariable=email)
email_entry.grid(column=1, row=3, sticky=(W, E))
password = StringVar()
password_entry = ttk.Entry(mainframe, width=20, show='*', textvariable=password)
password_entry.grid(column=1, row=4, sticky=(W, E))
ttk.Label(mainframe, text='Path').grid(column=2, row=2, sticky=(W, E))
ttk.Label(mainframe, text='Email').grid(column=2, row=3, sticky=(W, E))
ttk.Label(mainframe, text='Password').grid(column=2, row=4, sticky=(W, E))
bp_dims = (30, 10)
message_entry = Text(mainframe, width=bp_dims[0], height=bp_dims[1], wrap='word')
message_entry.grid(column=3, row=1, sticky=(W, E, N, S), columnspan=1, rowspan=3)
ttk.Label(mainframe, text='Message body').grid(column=3, row=4)
ttk.Button(mainframe, text='Send message', command=ProcessNames).grid(column=3, row=5, sticky=(W, E))
for child in mainframe.winfo_children():
child.grid_configure(padx=5, pady=5)
email_entry.focus()
# -------------------- End GUI setup --------------------------
root.mainloop()
|
995,398 | 2e9042aedbae76b97eb1893a57c5ce2d937683f4 | import lib.messages as messages
def turn(state):
"""
This function is responsable for propelling the game forward.
"""
victory = defeat = False
state.months_since_founding += 1
messages.turn_prompt(state.months_since_founding)
if state.months_since_founding > 5:
defeat = True
return state, (victory, defeat)
|
995,399 | 789c748122946f90f670fe9a4addb9baf2619b5c | import numpy as np
def numero_a_letras(n):
"""Dado un entero, devuelve un string con su nombre en castellano"""
especiales = {0: 'cero', 10: 'diez', 11: 'once', 12: 'doce', 13: 'trece', 14: 'catorce', 15: 'quince', 20: 'veinte', 100: 'cien', 1000: 'mil'}
if n in especiales:
return especiales[n]
if n < 100:
cifras = ['', 'una', 'dos', 'tres', 'cuatro', 'cinco', 'seis', 'siete', 'ocho', 'nueve']
decenas = ['', 'dieci', 'veinti', 'treinta', 'cuarenta', 'cincuenta', 'sesenta', 'setenta', 'ochenta', 'noventa']
if n % 10 == 0:
return decenas[n // 10]
if n < 30:
return f"{decenas[n // 10]}{cifras[n % 10]}"
return f"{decenas[n // 10]} y {cifras[n % 10]}"
elif n < 1000:
centenas = ['', 'ciento', 'doscientas', 'trescientas', 'cuatrocientas', 'quinientas', 'seiscientas', 'setecientas', 'ochocientas', 'novecientas']
if n % 100 == 0:
return centenas[n // 100]
return f"{centenas[n // 100]} {numero_a_letras(n % 100)}"
elif n < 10**6:
if n < 2000:
return f"mil {numero_a_letras(n % 1000)}"
if n % 1000 == 0:
return f"{numero_a_letras(n // 1000)} mil"
return f"{numero_a_letras(n // 1000)} mil {numero_a_letras(n % 1000)}"
else:
raise ValueError("Numero demasiado grande")
def estandarizar_mensaje(mensaje):
"""Elimina tildes, mayusculas"""
mensaje = mensaje.lower()
for x, y in {'á': 'a', 'é': 'e', 'í': 'i', 'ó': 'o', 'ú': 'u'}.items():
mensaje = mensaje.replace(x, y)
return mensaje
def conteos_mensaje(mensaje, letras="abcdefghijklmnñopqrstuvwxyz"):
"""Devuelve un diccionario diciendo cuantas veces aparece cada letra"""
mensaje = estandarizar_mensaje(mensaje)
return {x: mensaje.count(x) for x in letras}
def conteos_mensaje2(mensaje,letras="abcdefghijklmnñopqrstuvwxyz"):
"""Convierte el array de conteos_mensaje en un np array"""
return np.array(list(conteos_mensaje(mensaje,letras).values()))
def conteo_numero(numero,letras="abcdefghijklmnñopqrstuvwxyz"):
"""Contabiliza el final de la palabra ve(z/ces) que como se indico en el documento es no fija"""
dicc=conteos_mensaje(numero_a_letras(numero),letras)
if numero==1:
if 'z' in letras:
dicc['z']+=1
else:
if 'c' in letras:
dicc['c']+=1
if 'e' in letras:
dicc['e']+=1
if 's' in letras:
dicc['s']+=1
return dicc
def conteo_numero2(numero,letras="abcdefghijklmnñopqrstuvwxyz"):
"""Convierte el array de conteo_numero en un np array"""
return np.array(list(conteo_numero(numero,letras).values()))
def firmar_mensaje_fija(mensaje,letras="abcdefghijklmnñopqrstuvwxyz"):
"""Devuelve la postdata sin considerar las partes variables: numeros y final de vez/ces"""
postdata = []
for x in range(len(letras)):
vez = "ve"
postdata.append(f"{vez} la letra {letras[x]}")
if len(postdata) > 1:
postdata[-1] = "y " + postdata[-1]
return mensaje + " En este mensaje aparece " + ", ".join(postdata)
def firmar_mensaje(mensaje, conteo):
"""Dado un mensaje y un conteo, firma el mensaje"""
postdata = []
for x in sorted(conteo.keys()):
vez = "veces" if conteo[x] != 1 else "vez"
postdata.append(f"{numero_a_letras(conteo[x])} {vez} la letra {x}")
if len(postdata) > 1:
postdata[-1] = "y " + postdata[-1]
return mensaje + " En este mensaje aparece " + ", ".join(postdata)
def conteo_mensaje_f_nf(kappa,conteo,letras="abcdefghijklmnñopqrstuvwxyz"):
""" Conteo mensaje kappa + sum(conteo) """
conteo_firmado=np.array(kappa)
for i in range(len(letras)):
conteo_firmado+=conteo_numero2(conteo[i],letras)
return conteo_firmado
def es_conteo_valido(mensaje, conteo):
"""Dado un mensaje y un conteo, decide si el conteo coincide con el mensaje firmado"""
firmado = firmar_mensaje(mensaje, conteo)
conteo_firmado = conteos_mensaje(firmado, conteo.keys())
return conteo == conteo_firmado
def calcular_error(mensaje,conteo):
firmado=firmar_mensaje(mensaje,conteo)
conteo_firmado=conteos_mensaje(firmado,conteo.keys())
return dicc_to_array(conteo_firmado)-dicc_to_array(conteo)
def calcular_error2(mensaje,conteo,letras="abcdefghijklmnñopqrstuvwxyz"):
return calcular_error(mensaje,array_to_dicc(conteo,letras))
def calcular_error_kappa(kappa,conteo,letras="abcdefghijklmnñopqrstuvwxyz"):
return conteo_mensaje_f_nf(kappa,conteo,letras)-np.array(conteo)
def norm(x):
"""Devuelve la norma del np array x"""
return np.sqrt(norm2(x))
def norm2(x):
"""Funcion auxiliar"""
return sum(np.square(x))
def dicc_to_array(conteo):
return np.array(list(conteo.values()))
def vec_to_mat(vec,i=0):
"""Convierte un np array en un vector en forma matricial (si i=0 traspuesta) para poder operar con ella"""
if i==0: return vec.reshape((-1,1))
else: return vec.reshape((1,-1))
def array_to_dicc(conteo,letras="abcdefghijklmnñopqrstuvwxyz"):
dicc={}
for i,letter in enumerate(letras):
dicc[letter]=conteo[i]
return dicc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.