text stringlengths 38 1.54M |
|---|
from Tkinter import *
from multicast import multicast
import os
class GUI():
def __init__(self, MCAST_GROUP, MCAST_PORT, BASE_PATH):
self.MCAST_GROUP = MCAST_GROUP
self.MCAST_PORT = MCAST_PORT
self.BASE_PATH = BASE_PATH
# MAIN WINDOW
mainWindow = PanedWindow(orient=VERTICAL)
mainWindow.pack(fill=BOTH, expand=1)
wTopJoinBtn = PanedWindow(mainWindow)
wTopScrolls = PanedWindow(mainWindow)
wTopSubmit = PanedWindow(mainWindow)
wBotScroll = PanedWindow(mainWindow)
wBotSubmit = PanedWindow(mainWindow)
mainWindow.add(wTopJoinBtn)
mainWindow.add(wTopScrolls)
mainWindow.add(wTopSubmit)
mainWindow.add(wBotScroll)
mainWindow.add(wBotSubmit)
# TOP JOIN BUTTON
self._joinBtn(wTopJoinBtn)
# TOP SCROLLABLES
self.users, self.files = self._createTopScrolls(wTopScrolls)
# SEND FILE BUTTON
self._sendBtn(wTopSubmit)
# BOTTOM SCROLLABLE
self.downloadables = self._createBotScroll(wBotScroll)
# DOWNLOAD BUTTON
self._downloadBtn(wBotSubmit)
# ADD FILES TO TOP SCROLLABLE
self._listFiles()
def setSendFunction(self, func):
self.sendFile = func
def setDownloadFunction(self, func):
self.downloadFile = func
def _joinBtn(self, wTopJoinBtn):
btn = Button(wTopJoinBtn, text = "Find devices", command = self._joinMCast)
btn.pack()
wTopJoinBtn.add(btn)
def _joinMCast(self):
multicast.rejoin(self.MCAST_GROUP, self.MCAST_PORT)
def _createTopScrolls(self, wTopScrolls):
wLeftScroll = PanedWindow(wTopScrolls)
wRightScroll = PanedWindow(wTopScrolls)
wTopScrolls.add(wLeftScroll)
wTopScrolls.add(wRightScroll)
scrollUsers = Scrollbar(wLeftScroll)
scrollUsers.pack( side = RIGHT, fill = Y )
scrollFiles = Scrollbar(wRightScroll)
scrollFiles.pack( side = RIGHT, fill = Y )
myUsers = Listbox( wLeftScroll, selectmode = 'multiple', exportselection = 0, yscrollcommand = scrollUsers.set )
myFiles = Listbox( wRightScroll, selectmode = 'multiple', exportselection = 0, yscrollcommand = scrollFiles.set )
myUsers.pack( side = LEFT, fill = BOTH )
scrollUsers.config( command = myUsers.yview )
myFiles.pack( side = LEFT, fill = BOTH )
scrollFiles.config( command = myFiles.yview )
return (myUsers, myFiles)
def _sendBtn(self, wTopSubmit):
btn = Button(wTopSubmit, text = "Send", command = self._send)
btn.pack()
wTopSubmit.add(btn)
def _createBotScroll(self, wBotScroll):
scrollDownloadables = Scrollbar(wBotScroll)
scrollDownloadables.pack( side = RIGHT, fill = Y )
myDownloadables = Listbox( wBotScroll, selectmode = 'multiple', exportselection = 0, yscrollcommand = scrollDownloadables.set )
scrollDownloadables.config( command = myDownloadables.yview )
myDownloadables.pack( fill = BOTH )
return myDownloadables
def _downloadBtn(self, wBotSubmit):
btn = Button(wBotSubmit, text = "Download", command = self._download)
btn.pack()
wBotSubmit.add(btn)
def _send(self):
for x in self.users.curselection():
for y in self.files.curselection():
self.sendFile(self.users.get(x), self.files.get(y))
def _download(self):
for i in self.downloadables.curselection():
self.downloadFile(self.downloadables.get(i))
def _listFiles(self):
for i in os.listdir(self.BASE_PATH+"/Files"):
self.newFile(i)
def newDownloadable(self, newDwn):
self.downloadables.insert(END, str(newDwn))
def newUser(self, newUsr):
self.users.insert(END, str(newUsr))
def newFile(self, newF):
self.files.insert(END, str(newF))
def start(self):
# Launch GUI
mainloop() |
#affine cipher
#Lab 2, Max Grbic, March 1, 2019
#List of all potential characters to be used
letter = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
symbol = [",", ".", " ", ";", ":", "!"]
alphabet = letter + digits + symbol
encodeNum = []
cipherText = []
decodeNum = []
decodeText = []
#the code automatically accepts the user input as upper caser
#if needed, this could be changed and an invalide statement could be returned to the user when an invalide character is user
print("1.Lab2: This will decrypt using an affine cipher(key1=17, key2=8).")
print("Encrypted message 1: TB242520C48GVV4S,0247J,04TB245,JTBZ")
print("-" * 25)
userC = input("Enter a message to decrpyt (A-Z): ")
userC = userC.upper()
print("-" * 25)
for thing in userC:
decrypt = (((alphabet.index(thing) - 8) * 17) % len(alphabet))
decodeNum.append(decrypt)
for piece in decodeNum:
decodeText.append(alphabet[piece])
for let in decodeText:
print(let, end='')
################
print()
print("-" * 25)
print("-" * 25)
print("2.Lab2: This will decrypt using an affine cipher(key1=41, key2=30).")
print("Encrypted message 2: X0N30NL8Q98G4N1T0G8I4M83QNR8WR8D .8WR8IQNLXWRYLQR,8WR1W4R498X08I4M84PPQWRL0182XW0Z8QZ8LX08R0I82NGPLQ4R4TGLW28KRWL,8SW 9")
print("-" * 25)
encodeNum2 = []
cipherText2 = []
decodeNum2 = []
decodeText2 = []
userC2 = input("Enter a message to decrpyt (A-Z): ")
userC2 = userC2.upper()
print("-" * 25)
for thing in userC2:
decrypt = (((alphabet.index(thing) - 30) * 41) % len(alphabet))
decodeNum2.append(decrypt)
for piece in decodeNum2:
decodeText2.append(alphabet[piece])
for let in decodeText2:
print(let, end='')
##################
print()
print("-" * 25)
print("-" * 25)
print("3.Lab2: This will decrypt using an affine cipher(key1=25, key2=39).")
print("Encrypted messge 3:!QR7LVTR7TQ7T1R3LVGX2T R73!TQ3TRN;7R8L QRLX2RLOR2ETRNL 5RLQR3 DG2L;Q;0D:!QJR2ETRJT V;QRTQ!JV;RV;3E!QTW")
print("-" * 25)
encodeNum3 = []
cipherText3 = []
decodeNum3 = []
decodeText3 = []
userC3 = input("Enter a message to decrpyt (A-Z): ")
userC3 = userC3.upper()
print("-" * 25)
for thing in userC3:
decrypt = (((alphabet.index(thing) - 39) * 25) % len(alphabet))
decodeNum3.append(decrypt)
for piece in decodeNum3:
decodeText3.append(alphabet[piece])
for let in decodeText3:
print(let, end='')
|
"""
This module contains all the methods in selenium
"""
from time import sleep
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
class WebDriverFw(webdriver.Firefox, webdriver.Chrome, webdriver.Ie, webdriver.Safari, Keys):
"""
This class inherent the selenium webdriver.
Purpose:
-abstraction to the method of choosing the correct driver for different browser_type
-common methods to validate web content
-common methods to interact the website
"""
def __init__(self, browser_type):
browser_type_lc = browser_type.lower()
# this Proof Of Concept only support chrome for now
if browser_type_lc == 'chrome':
webdriver.Chrome.__init__(self)
self.maximize_window()
self.implicitly_wait(10)
# todo:
elif browser_type_lc == 'ie':
webdriver.Ie.__init__(self)
# todo:
elif browser_type_lc == 'firefox':
webdriver.Firefox.__init__(self)
# todo:
elif browser_type_lc == 'safari':
webdriver.Safari.__init__(self)
else:
assert False, "argument should be one of: chrome, ie, firefox, safari"
def validate_title(self, pattern, case_sensitive=True):
"""
Validate the title of the open website
:param pattern: Strings in the tittle to validate
:param case_sensitive: True/False One can make it case_sensitive (default) or not sensitive
:return: None
"""
if not case_sensitive:
pattern = pattern.lower()
web_title = self.title.lower()
else:
web_title = self.title
try:
assert pattern in web_title, str(pattern) + " In Web title not found"
except AssertionError as other_err:
print("FAIL " + str(other_err))
else:
print("PASS patter found: " + str(pattern))
print("\tTitle: " + str(self.title))
def click_button_by_class_name(self, button_name):
"""
This closes popup window
:param button_name:
:return:
"""
sleep(5)
widget = self.find_element_by_class_name(button_name)
widget.click()
def get_all_search_result(self, element_name, pattern):
"""
Enter a two letter search criteria, and return the search result count
:param element_name: html element
:param pattern: pattern to enter into the search field
:return: count
"""
widget = self.find_element_by_name(element_name)
widget.send_keys(pattern)
sleep(3)
element = self.find_element_by_xpath("//ul[@class='dropdown-menu']")
option = element.find_elements_by_xpath("//li[@role='option']")
widget.send_keys(Keys.CONTROL + 'a')
widget.send_keys(Keys.DELETE)
return len(option)
def open_website(self, url):
"""
open the website
:param url:
:return:
"""
self.get(url)
self.wait_for_site_render('homepage-headline')
def wait_for_site_render(self, class_name, wait=5):
"""
wait for site to complete rendering based on class_name
:param class_name:
:param wait:
:return:
"""
url = self.current_url
try:
WebDriverWait(self, wait).until(ec.presence_of_element_located(
(By.CLASS_NAME, class_name)))
print('PASS ' + url + ' open')
except TimeoutException:
print('FAIL ' + url + ' took too long')
exit()
|
from flask_login import LoginManager
from .database.models import User
login_manager = LoginManager()
@login_manager.user_loader
def load_user(id):
return User.query.filter(User.id == id).first()
|
from influxdb import InfluxDBClient
from datetime import datetime
from datetime import timedelta
import random
client = InfluxDBClient(host='127.0.0.1', port=8086, database='caml_events')
config = [
['2020-09-08 06:00:00', 10, 50],
['2020-09-09 06:00:00', 10, 50]
]
space_id = 216
for row in config:
day = row[0]
start_date = datetime.strptime(day, "%Y-%m-%d %H:%M:%S")
date = start_date
points = []
while date < start_date + timedelta(hours=20):
mins = random.randint(1, row[1])
date = date + timedelta(minutes=mins)
points.append({
"measurement": "density_limit_violation",
"tags": {
"space_id": space_id
},
"time": date.strftime("%Y-%m-%dT%H:%M:%SZ"),
"fields":{
"max_density_limit":10,
"current_density": random.randint(11, 60)
}
})
client.write_points(points, time_precision='ms')
for row in config:
day = row[0]
start_date = datetime.strptime(day, "%Y-%m-%d %H:%M:%S")
date = start_date
points = []
while date < start_date + timedelta(hours=20):
mins = random.randint(1, row[1])
date = date + timedelta(minutes=mins)
person1_id = random.randint(1, 10)
person2_id = person1_id + random.randint(1, 10)
points.append({
"measurement": "social_distance_violation",
"tags": {
"space_id": space_id
},
"time": date.strftime("%Y-%m-%dT%H:%M:%SZ"),
"fields":{
"violation_seconds":random.randint(10, 200),
"person1_id": person1_id,
"person2_id": person2_id
}
})
client.write_points(points, time_precision='ms')
print("Done") |
'''Tendo como dado de entrada a altura (h) de uma pessoa, construa um algoritmo que calcule seu
peso ideal, utilizando as seguintes fórmulas:
Para homens: (72.7*h) - 58
Para mulheres: (62.1*h) - 44.7''' |
import boto3
import json
import os
aws_access_key_id = os.environ['ACCESS_KEY']
aws_secret_access_key = os.environ['SECRET_KEY']
region_name='us-west-2'
rk = boto3.client('rekognition', region_name=region_name, aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key)
s3 = boto3.resource('s3', region_name=region_name, aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key)
bucket = "name_of_your_bucket"
photo1 = 'source_photo.jpg'
photo2 = 'target_photo.jpg'
# Send it to Rekognition
results = rk.compare_faces(
SourceImage = {"S3Object": {"Bucket": bucket, "Name": photo1}},
TargetImage = {"S3Object": {"Bucket": bucket, "Name": photo2}},
SimilarityThreshold=50)
print json.dumps(results['FaceMatches'], indent=4)
|
import sys
from magma import *
from mantle import *
from loam.shields.megawing import MegaWing
megawing = MegaWing()
megawing.Switch.on(4)
megawing.LED.on(2)
main = megawing.main()
A = main.SWITCH[0:2]
B = main.SWITCH[2:4]
C = main.LED
add = Add(2)
add(A,B)
wire(add.O, C)
compile(sys.argv[1], main)
|
import json
import socket # 导入 socket 模块
import numpy as np
# from encrypt_paillier import create_key_pair
import pickle
s = socket.socket() # 创建 socket 对象
host = socket.gethostname() # 获取本地主机名
port = 12346 # 设置端口
s.bind((host, port)) # 绑定端口
flag = 0
s.listen(5) # 等待客户端连接
list = []
p = []
p1 = []
C = []
c1 = []
while True:
c, addr = s.accept() # 建立客户端连接
print(addr)
C.append(c)
res = c.recv(1024)
print(res)
if len(res) == 1:
# print('*')
# list.append(json.loads(res.decode('utf-8')))
flag += 1
if flag == 2:
input1 = open("mid_need/grad1.pkl",'rb')
a = pickle.load(input1)
input2 = open("mid_need/grad2.pkl",'rb')
b = pickle.load(input2)
input1.close()
input2.close()
# a = pickle.load("mid_need/grad1.pkl",encoding='bytes')
# b = pickle.load("mid_need/grad2.pkl",encoding='bytes')
print(len(a[0]))
print(len(b[0]))
# print(a[1])
# print(b[1])
# c = np.add(a[1], b[1]).tolist()
c = []
for item in range(len(a)):
tmp = np.add(a[item], b[item])
c.append(tmp)
print(len(c[0]))
output = open('mid_need/grad3.pkl', 'wb')
pickle.dump(c, output)
output.close()
# np.save("C:\\Users\\FangHaokun\\PycharmProjects\\ml\\1209\\mid_need\\grad3.npy", c)
print('load_finish')
for i in C:
i.send('finish'.encode('utf-8'))
i.close() # 关闭连接
print('send_finish')
flag = 0
C = []
# list = []
if len(res) == 3:
# print('*')
# list.append(json.loads(res.decode('utf-8')))
flag += 1
if flag == 2:
input1 = open("mid_need/grad1.pkl",'rb')
a = pickle.load(input1)
input2 = open("mid_need/grad2.pkl",'rb')
b = pickle.load(input2)
input1.close()
input2.close()
# a = pickle.load("mid_need/grad1.pkl",encoding='bytes')
# b = pickle.load("mid_need/grad2.pkl",encoding='bytes')
print(len(a[0]))
print(len(b[0]))
# print(a[1])
# print(b[1])
# c = np.add(a[1], b[1]).tolist()
c = []
for item in range(len(a)):
tmp = np.add(a[item], b[item]).tolist()
c.append(tmp)
print(len(c[0]))
print(c)
output = open('mid_need/grad3.pkl', 'wb')
pickle.dump(c, output)
output.close()
# np.save("C:\\Users\\FangHaokun\\PycharmProjects\\ml\\1209\\mid_need\\grad3.npy", c)
print('load_finish')
for i in C:
i.send('finish'.encode('utf-8'))
i.close() # 关闭连接
print('send_finish')
flag = 0
C = []
# list = []
if len(res) == 2:
print('%')
flag += 1
if flag == 2:
public_key, private_key = create_key_pair()
key = []
key.append(public_key)
key.append(private_key)
# key.append(pickle.dumps(public_key))
# key.append(pickle.dumps(private_key))
key_pair = pickle.dumps(key)
print(key_pair)
print('create_key_pair_finish')
for i in C:
i.send(key_pair)
i.close() # 关闭连接
print('send_finish')
flag = 0
C = []
if res.decode('utf-8') == 'close':
s.close()
break
print('Successful Closed.') |
sa = list(input())
sb = list(input())
sc = list(input())
dic = {'a':sa,'b':sb,'c':sc}
res = 'a'
for _ in range(len(sa+sb+sc)):
if len(dic[res])==0:
print(res.upper())
break
res = dic[res].pop(0)
|
import os
import pickle
import time
import pickle
import logger
def save_cur_iter_dynamics_model(params, saver, sess, itr):
if params.get("save_variables"):
save_path = saver.save(sess, os.path.join(params['exp_dir'], "model-iter{}.ckpt".format(itr)))
logger.log("Model saved in path {}".format(save_path))
def confirm_restoring_dynamics_model(params):
return params.get("restore_dynamics_variables", False) and params.get("restore_path", False)
def restore_model(params, saver, sess):
restore_path = os.path.join(params["restore_path"], "model-iter{}.ckpt".format(params['restart_iter']))
saver.restore(sess, restore_path)
logger.log("Model restored from {}".format(restore_path))
def save_cur_iter_behavior_policy(params, saver, sess, itr):
if params.get("save_variables"):
save_path = saver.save(sess, os.path.join(params['exp_dir'], "behavior_policy-iter{}.ckpt".format(itr)))
logger.log("Model saved in path {}".format(save_path))
def confirm_restoring_behavior_policy(params):
return params.get("restore_bc_variables", False) and params.get("restore_path", False)
def restore_behavior_policy(params, saver, sess):
restore_path = os.path.join(params['restore_path'], "behavior_policy-iter{}.ckpt".format(params['restart_iter']))
saver.restore(sess, restore_path)
logger.log("Behavior policy restored from {}".format(restore_path))
def save_cur_iter_policy(params, saver, sess, itr):
if params.get("save_variables"):
save_path = saver.save(sess, os.path.join(params['exp_dir'], "policy-iter{}.ckpt".format(itr)))
logger.log("Model saved in path {}".format(save_path))
def confirm_restoring_policy(params):
return params.get("restore_policy_variables", False) and params.get("restore_path", False)
def restore_policy(params, saver, sess):
restore_path = os.path.join(params['restore_path'], "policy-iter{}.ckpt".format(params['restart_iter']))
saver.restore(sess, restore_path)
logger.log("Policy restored from {}".format(restore_path))
def restore_policy_for_video(restore_path, saver, sess):
saver.restore(sess, restore_path)
logger.log("Policy restored from {}".format(restore_path))
def save_cur_iter_offline_data(params, train, val, bc_train, itr):
if params.get("save_variables"):
with open(os.path.join(params['exp_dir'], 'train_collection_{}.pickle'.format(itr)), 'wb') as f:
pickle.dump(train, f)
with open(os.path.join(params['exp_dir'], 'val_collection_{}.pickle'.format(itr)), 'wb') as f:
pickle.dump(val, f)
with open(os.path.join(params['exp_dir'], 'behavior_policy_train_collection_{}.pickle'.format(itr)), 'wb') as f:
pickle.dump(bc_train, f)
def restore_offline_data(params):
with open(os.path.join(params['restore_path'], 'train_collection_{}.pickle'.format(params['restart_iter'])), 'rb') as f:
train = pickle.load(f)
with open(os.path.join(params['restore_path'], 'val_collection_{}.pickle'.format(params['restart_iter'])), 'rb') as f:
test = pickle.load(f)
with open(os.path.join(params['restore_path'], 'behavior_policy_train_collection_{}.pickle'.format(params['restart_iter'])), 'rb') as f:
bc_train = pickle.load(f)
return train, test, bc_train
def confirm_restoring_offline_data(params):
return params.get("restore_offline_data", False) and params.get("restore_path", False)
def confirm_restoring_value(params):
return params.get("restore_value", False) and params.get("restore_path", False)
|
#coding="utf-8"
import requests,re
from urllib.parse import urlparse
def get_url(url):
headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3610.2 Safari/537.36"}
result = requests.get(url,headers=headers,verify=False)
result.encoding = 'utf-8'
return result
def download_txt(novel_name,txt):
with open(novel_name+'.txt', 'a+') as f:
f.write(txt)
if __name__=='__main__':
while 1:
#选择小说名称
url='https://www.biqudu.com/searchbook.php?keyword='
domain=urlparse(url).scheme+'://'+urlparse(url).netloc
keyword=input('请输入作者或者书名:')
result = get_url(url+keyword)
novel_url_name=re.findall('<\/span><a href="(.*?)">\r\n\s+(.*?)\r\n\s+<\/a><\/dt>',result.text,re.S)
if len(novel_url_name)<1:
ag = input('查找不到你输入的小说名称,是否重新输入(y/n):')
if ag.lower() == 'y':
continue
else:
break
else:
if len(novel_url_name)>1:
for i in novel_url_name:
print('%d: %s'%(novel_url_name.index(i)+1,i[1]))
ch_novel=int(input('请选择你将要下载的小说:'))-1
else:
ch_novel=0
#选择章节
url=domain+novel_url_name[ch_novel][0]
result=get_url(url)
chapter_start_flag = re.search('</a></dd>\s+<dt>《'+novel_url_name[ch_novel][1]+'》.*?</dt>', result.text).span()[0]
chapter_pat = re.compile(r'<dd> <a href="(.*?)">(.*?)</a></dd>', re.S)
chapter_url_name = chapter_pat.findall(result.text, chapter_start_flag,)
for i in chapter_url_name:
print('%d: %s '%(chapter_url_name.index(i)+1,i[1]),end='')
ch_start_chapter=int(input('\n请选择开始下载的章节序号:'))-1
chapter_text=novel_url_name[ch_novel][1]+'\n\n\n'
download_txt(novel_url_name[ch_novel][1], chapter_text)
for chapter_index in range(ch_start_chapter,len(chapter_url_name)):
url = domain + chapter_url_name[chapter_index][0]
result = get_url(url)
chapter_source = re.findall('<script>readx\(\);<\/script>(.*?)<script>chaptererror\(\);<\/script>', result.text,re.S)
chapter_text = chapter_url_name[chapter_index][1] + '\n\n ' + chapter_source[0].replace('<br/>','\n').lstrip() +'\n\n\n' # 加上章节标题。
download_txt(novel_url_name[ch_novel][1], chapter_text)
print(chapter_url_name[chapter_index][1]+' 已经下载完毕!')
ag = input('是否继续下载小说(y/n):')
if ag.lower() == 'y':
continue
else:
break |
import boto3
import time
import sys
QUEUE_URL = 'https://sqs.us-east-2.amazonaws.com/334146420596/bhalwan-test-queue1.fifo'
def main():
while True:
try:
message_count = get_pending_message_count()
publish_metric(message_count)
time.sleep(5)
except:
print("Unexpected error:", sys.exc_info()[0])
def get_pending_message_count() -> int:
# get the number of messages on the queue
sqs = boto3.client('sqs', 'us-east-2')
attributes = sqs.get_queue_attributes(QueueUrl=QUEUE_URL, AttributeNames=['ApproximateNumberOfMessages'])
numMessages = attributes['Attributes']['ApproximateNumberOfMessages']
return int(numMessages)
def publish_metric(message_count: int):
#get a handle to cloudwatch
cloudwatchClient = boto3.client('cloudwatch', 'us-east-2')
# publish metric
print("Num Messages " + str(message_count))
cloudwatchClient.put_metric_data(Namespace='QueuePollerNamespace',
MetricData=[{'MetricName': 'AverageQueueSize', 'Unit': 'None', 'Value': message_count}])
if __name__ == '__main__':
main()
|
from __future__ import division
from wonder.layout.generic import Generic
from wonder.utils import color_utils
import time
import math
class RaverPlaid:
layout = None
start_time = 0
# How many sine wave cycles are squeezed into our n_pixels
# 24 happens to create nice diagonal stripes on the wall layout
freq_r = 24
freq_g = 24
freq_b = 24
# How many seconds the color sine waves take to shift through a complete cycle
speed_r = 7
speed_g = -13
speed_b = 19
brightness = 256
max_brightness = 256
def __init__(self, host=None):
self.layout = Generic(host)
self.start_time = time.time()
def iterate(self):
self.layout.clear_pixels()
t = (time.time() - self.start_time) * 5
for ii in range(self.layout.NUM_PIXELS):
pct = (ii / self.layout.NUM_PIXELS)
# diagonal black stripes
pct_jittered = (pct * 77) % 37
blackstripes = color_utils.cos(pct_jittered, offset=t * 0.05, period=1, minn=-1.5, maxx=1.5)
blackstripes_offset = color_utils.cos(t, offset=0.9, period=60, minn=-0.5, maxx=3)
blackstripes = color_utils.clamp(blackstripes + blackstripes_offset, 0, 1)
# 3 sine waves for r, g, b which are out of sync with each other
r = blackstripes * color_utils.remap(
math.cos((t / self.speed_r + pct * self.freq_r) * math.pi * 2),
-1, 1, 0, self.brightness
)
g = blackstripes * color_utils.remap(
math.cos((t / self.speed_g + pct * self.freq_g) * math.pi * 2),
-1, 1, 0, self.brightness
)
b = blackstripes * color_utils.remap(
math.cos((t / self.speed_b + pct * self.freq_b) * math.pi * 2),
-1, 1, 0, self.brightness
)
self.layout.add_pixel((r, g, b))
self.layout.write_pixels()
def set_brightness(self, pct):
self.brightness = color_utils.clamp(pct * self.max_brightness, 0, self.max_brightness)
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
from ConfigParser import RawConfigParser, DEFAULTSECT
from obspy.core import UTCDateTime
import os
import sys
class DBConfigParser(RawConfigParser):
"""
Config parser for the database viewer. Inherits from
ConfigParser.RawConfigParser and adds comments.
It overwrites the write method so that each sections and the items in each
sections are sorted alphabetically and it also adds support to give each
item a commentary to ease documentation of the configuration file.
"""
def __init__(self, env):
"""
Init method that will also read and write the configuration file. No
further interaction is necessary.
"""
self.env = env
# XXX: I do not know how to use super here because RawConfigParser does
# not inherit from 'object'.
RawConfigParser.__init__(self)
# Dictionary to contain the optional comments.
self.comments = {}
self.config_file = self.env.config_file
# Check if the config file exists. Otherwise give a message, create the
# file and exits.
if not os.path.exists(self.config_file):
exists = False
else:
exists = True
self.getConfiguration()
if not exists:
msg = "No config file exists. A default one has been created at %s. Please edit it and restart the application." \
% self.config_file
print msg
sys.exit()
def write(self, fp):
"""
Write an .ini-format representation of the configuration state.
Override the write method to allow for comments in the configuration
file.
"""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
# Sort the sections.
sections = self._sections.keys()
sections.sort()
for section in sections:
fp.write("[%s]\n" % section)
# Enable sorting after keys.
keys = self._sections[section].keys()
keys.sort()
for key in keys:
if key != "__name__":
value = self._sections[section][key]
# Write the comment if there is one.
if self.comments.has_key(key):
lines = \
self.formatStringToCertainLength(self.comments[key],
77)
for line in lines:
fp.write("# %s\n" % line)
fp.write("%s = %s\n" %
(key, str(value).replace('\n', '\n\t')))
fp.write("\n")
def getConfiguration(self):
"""
Reads the config file and adds all values to self.env for application
wide access.
Will create the config file if it does not exists and add any missing
entries automatically.
"""
self.read(self.config_file)
# Check if sections exist and add them if necessary.
sections = ['Files and Directories', 'General Settings', 'Server',
'Picker', 'Appearance']
for section in sections:
if not self.has_section(section):
self.add_section(section)
# Read and set all values. Use a try/except construction for everything
# to write them if they are not set.
self.getOrSetDefault('Server', 'Seishub Server', 'seishub_server',
'http://teide.geophysik.uni-muenchen.de:8080',
comment='The address of the SeisHub server.')
self.getOrSetDefault('Server', 'Password', 'seishub_password',
'dbviewer',
comment='The password for the SeisHub server.')
self.getOrSetDefault('Server', 'User', 'seishub_user',
'dbviewer',
comment='The user for the SeisHub server.')
self.getOrSetDefault('Server', 'Timeout', 'seishub_timeout',
10, value_type='int',
comment='The timeout in seconds for the SeisHub server.')
self.getOrSetDefault('Files and Directories', 'Cache Directory', 'cache_dir',
os.path.join(self.env.home_dir, 'cache'),
comment='All cached files and databases will be stored in this directory.')
# Default set it for a span of one week ending one week ago.
self.getOrSetDefault('General Settings', 'Default Starttime', 'starttime',
'$today - 14$',
comment='The starttime when the database viewer is opened. Possible values are any string that obspy.core.UTCDateTime can parse or $today$ which represents the current day. It is also possible to add or remove whole days from today, e.g. $today - 7$ would be today one week ago.')
self.getOrSetDefault('General Settings', 'Default Endtime', 'endtime',
'$today - 7$',
comment='The endtime when the database viewer is opened. All options applicable to Default Starttime are also valid here.')
# Parse the start- and endtime. The endtime will always be at the end
# of the day.
self.env.starttime = self.parseTimes(self.env.starttime)
self.env.endtime = self.parseTimes(self.env.endtime) + 86399
# Debug mode.
self.getOrSetDefault('General Settings', 'Debug', 'debug', False,
value_type='boolean',
comment='Debugging messages True/False')
# Force software rendering.
comment = 'OpenGL is not supported on all machines and the program might crash right after the initial splash screen. Set this to True to enforce software rendering which is slower but works in any case.'
self.getOrSetDefault('General Settings', 'Force Software Rendering',
'software_rendering', False, value_type='boolean',
comment=comment)
# Details.
self.getOrSetDefault('Appearance', 'Detail', 'detail',
250, value_type='int',
comment='The number of vertical bars drawn for each plot')
# Double click time.
self.getOrSetDefault('General Settings', 'Double click time',
'double_click_time', 0.2, value_type='float',
comment='Maximum time in seconds between to clicks to be registered as a double click.')
# Buffer in days.
self.getOrSetDefault('General Settings',
'Buffer', 'buffer', 3,
value_type='int',
comment='Buffer in days before and after the shown plots when requesting from Seishub')
# Log scale of the plots.
# self.getOrSetDefault('Appearance', 'Log Scale', 'log_scale', False,
# value_type='boolean',
# comment='Plots have a log scale True/False')
# To help determine the maximum zoom level.
self.getOrSetDefault('General Settings', 'Preview delta',
'preview_delta', 30.0, value_type='float',
comment='The sample spacing in seconds of the preview files that are received from Seishub. This is dynamically adjusted once the first file has been requested but it is needed to set a maximum zoom level before any file has been requested.')
# Add picker settings.
self.getOrSetDefault('Picker',
'System call command',
'picker_command', 'obspyck.py -t $starttime$ -d $duration$ -i $channels$',
comment='System call command for the picker, e.g. obspyck.py -t $starttime$ -d $duration$ -i $channels$ (everything enclosed in $ symbols will be replaced with the corresponding variable. Available are starttime, endtime, duration (in seconds), channels)')
self.getOrSetDefault('Picker', 'Time format', 'picker_strftime',
'%Y-%m-%dT%H:%M:%S',
comment='Format for start- and endtime in strftime notation.')
self.getOrSetDefault('Picker', 'Channels enclosure',
'channel_enclosure', "''",
comment='The channels will be enclosed in these two symbols. More then two symbols are not allowed.')
self.getOrSetDefault('Picker', 'Channels seperator', 'channel_seperator',
',', comment='The single channels will be seperated by this.')
# Set event download range.
self.getOrSetDefault('General Settings', 'Events Starttime',
'event_starttime', '$today - 100$',
comment='Requesting events from SeisHub takes time. Therefore events will not be dynamically loaded during runtime (manual loading during runtime is possible) but they rather will be preloaded during the startup of the application. This defines the beginning of the time frame for the preloaded events. The same options as in Default Starttime are valid here.')
self.getOrSetDefault('General Settings', 'Events Endtime',
'event_endtime', '$today$',
comment='This defines the end of the time frame for the preloaded events. The same options as in Default Starttime are valid here.')
# Plot height.
self.getOrSetDefault('Appearance', 'Plot Height', 'plot_height', 50,
value_type='int',
comment='Height of a single plot in pixel.')
# Parse the start- and endtime for the event time frame. The endtime
# will always be at the end of the day.
self.env.event_starttime = self.parseTimes(self.env.event_starttime)
self.env.event_endtime = self.parseTimes(self.env.event_endtime) + 86399
# Ensure that at least the time span that is plotted is in the event
# time span.
if self.env.starttime < self.env.event_starttime:
self.env.event_starttime = self.env.starttime
if self.env.endtime > self.env.event_endtime:
self.env.event_endtime = self.env.endtime
# Writing the configuration file.
with open(self.config_file, 'wb') as configfile:
self.write(configfile)
def getOrSetDefault(self, section, name, env_name, default_value,
value_type=None, comment=None):
"""
Use a try except construction to get the value of name and write it to
self.env.
section = The section in the config file.
name = The name of the item in the config file.
env_name = The corresponding name in the environment
default_value = The default value in the file if the config file is
faulty or could not be found.
value_type = Type of the value.
comment = Optional commen of the configuration option.
"""
# Always set the comment to ensure it will always be written.
if comment:
self.comments[self.optionxform(name)] = comment
# Determine the type.
if value_type == 'int':
funct = self.getint
elif value_type == 'float':
funct = self.getfloat
elif value_type == 'boolean':
funct = self.getboolean
else:
funct = self.get
# Actually set the attribute.
try:
setattr(self.env, env_name, funct(section, name))
except:
setattr(self.env, env_name, default_value)
self.set(section, name, default_value)
def formatStringToCertainLength(self, string, length):
"""
Takes a string and formats it to a certain length. Will remove leading
and trailing whitespaces.
Returns a list with strings each with the maximum length.
XXX: Does currently not work with words longer than length.
"""
string = string.strip()
# Do nothing if nothing is to be done.
if len(string) <= length:
return [string]
items = string.split(' ')
strings = ['']
for item in items:
current_length = len(strings[-1])
item_length = len(item)
if current_length + item_length + 1 > length:
strings.append('')
strings[-1] = strings[-1] + ' ' + item
return strings
def parseTimes(self, time):
"""
Parses the start- and endtimes. Possible values are any string that
obspy.core.UTCDateTime can parse or $today$ which represents the
current day. Whole days can be added or removed from today, e.g.
$today - 7$ represents today one week ago.
"""
if '$' in time:
# Remove the $ symbols.
time = time.strip()[1:-1]
items = time.split(' ')
today = UTCDateTime()
today = UTCDateTime(today.year, today.month, today.day)
if len(items) == 1:
return today
if len(items) == 3:
if items[1] == '-':
today = today - (int(items[2]) * 86400)
elif items[1] == '+':
today = today + (int(items[2]) * 86400)
else:
msg = 'Error in the times of the configuration file.'
raise Error(msg)
return today
msg = 'Error in the times of the configuration file.'
raise Error(msg)
return UTCDateTime(time)
|
import tkinter as tk
import tkinter.ttk as ttk
from tkinter.font import Font
from tkinter.messagebox import *
import pandas as pd
import numpy as np
import re
from string import punctuation
import datetime
# python scripts
import discussion_table
import clerk_management_page
import main
# TODO: 191
class clerk_post_discussion(tk.Frame):
def __init__(self, name, username, master=None): #
tk.Frame.__init__(self, master)
self.type_user = 'clerk'
# User-info account
self.name = name
self.username = username
df_privileged_users = pd.read_excel("csv_files/privileged_users.xlsx")
df_info_user = df_privileged_users[ (df_privileged_users['Type_user'] == self.type_user) & (df_privileged_users["Username"] == self.username)]
if len(df_info_user) != 0 :
self.password = df_info_user['Password'].iloc[-1]
self.id = df_info_user['ID'].iloc[-1]
self.master.title("Clerk Post Discussion Page")
self.master.geometry("563x725")
self.master.configure( background = "light blue" )
self.create_widgets()
def create_widgets(self):
self.top = self.winfo_toplevel()
self.style = tk.ttk.Style()
# Titles
self.style.configure("LabelTitle.TLabel", relief=tk.SUNKEN, anchor="center", font=("Helvetica", 17), background = "#49A")
self.LabelTitle = tk.ttk.Label(self.top, text="Clerk Post Discussion", style="LabelTitle.TLabel")
self.LabelTitle.place(relx=0.22, rely=0.015, relwidth=0.62, relheight=0.066)
# Row 0 view computer types
self.style.configure("Label1.TLabel",anchor="w", font=("Helvetica",15), background = "light blue")
self.LabelComputerTypes = tk.ttk.Label(self.top, text="Computer Types:", style='Label1.TLabel')
self.LabelComputerTypes.place(relx=0.16, rely=0.12, relwidth=0.4, relheight=0.051)
self.Combo1List1 = ["Laptop", "Desktop", "Workstation", "Mainframe", "Server", "Computer Part"]
self.Combo1 = tk.ttk.Combobox(self.top, state="readonly",values=self.Combo1List1, font=("Helvetica",11))
self.Combo1.bind("<<ComboboxSelected>>", self.get_combo1)
self.Combo1.place(relx=0.49, rely=0.12, relwidth=0.4, relheight=0.049)
self.Combo1.set(self.Combo1List1[0])
# Row 1 view computer names
self.style.configure("Label1.TLabel",anchor="w", font=("Helvetica",15), background = "light blue")
self.LabelComputerNames = tk.ttk.Label(self.top, text="Computer Names:", style='Label1.TLabel')
self.LabelComputerNames.place(relx=0.16, rely=0.22, relwidth=0.4, relheight=0.051)
df_items = pd.read_excel("csv_files/items.xlsx")
df_laptop = df_items[df_items['Type'] == 'Laptop']
list_laptop = list(df_laptop['Name'])
self.Combo2List1 = list_laptop
self.Combo2 = tk.ttk.Combobox(self.top, state="readonly",values=self.Combo2List1, font=("Helvetica",11))
self.Combo2.place(relx=0.49, rely=0.22, relwidth=0.4, relheight=0.049)
self.Combo2.set(self.Combo2List1[0])
# Row 2
self.LabelComment = tk.ttk.Label(self.top, text="Comments of this Item:", style='Label1.TLabel')
self.LabelComment.place(relx=0.16, rely=0.32, relwidth=0.6, relheight=0.051)
self.style.configure("CommandView.TButton", font=("Helvetica",14))
self.CommandView = tk.ttk.Button(self.top, text="View", command=lambda: self.command_view_comment("All"), style="CommandView.TButton")
self.CommandView.place(relx=0.7, rely=0.31, relwidth=0.18, relheight=0.053)
# Row 3
self.style.configure("Label1.TLabel",anchor="w", font=("Helvetica",15), background = "light blue")
self.LabelDiscussion = tk.ttk.Label(self.top, text="Discussion I Posted:", style='Label1.TLabel')
self.LabelDiscussion.place(relx=0.16, rely=0.42, relwidth=0.6, relheight=0.051)
self.style.configure("CommandView.TButton", font=("Helvetica",14))
self.CommandView1 = tk.ttk.Button(self.top, text="View", command=lambda: self.command_view_comment("Me"), style="CommandView.TButton")
self.CommandView1.place(relx=0.7, rely=0.41, relwidth=0.18, relheight=0.053)
# Row 4
self.Label2 = tk.ttk.Label(self.top, text="Add a Headline", style='Label1.TLabel')
self.Label2.place(relx=0.16, rely=0.51, relwidth=0.33, relheight=0.054)
self.Text1Var = tk.StringVar()
self.Text1 = tk.ttk.Entry(self.top, textvariable=self.Text1Var, font=("Helvetica",11))
self.Text1.place(relx=0.49, rely=0.510, relwidth=0.4, relheight=0.052)
# Row 3 Write a comment
self.Label3 = tk.ttk.Label(self.top, text="Write a Comment:", style='Label1.TLabel')
self.Label3.place(relx=0.16, rely=0.60, relwidth=0.33, relheight=0.054)
self.Text3 = tk.Text(self.top, font=("Helvetica",11), wrap=tk.WORD)
self.Text3.place(relx=0.49, rely=0.613, relwidth=0.4, relheight=0.2)
# Confirm Button
self.style.configure("CommandConfirm.TButton", font=("Helvetica",14))
self.CommandConfirm = tk.ttk.Button(self.top, text="Confirm", command=self.command_confirm, style="CommandConfirm.TButton")
self.CommandConfirm.place(relx=0.25, rely=0.88, relwidth=0.19, relheight=0.07)
# Cancel Button
self.CommandCancel = tk.ttk.Button(self.top, text="Cancel", command=self.command_cancel, style="CommandConfirm.TButton")
self.CommandCancel.place(relx=0.57, rely=0.88, relwidth=0.19, relheight=0.07)
def get_combo1(self, event):
#["Laptop", "Desktop", "Workstation", "Mainframe", "Server", "Computer Part"]
df_items = pd.read_excel("csv_files/items.xlsx")
if self.Combo1.get() == 'Laptop':
df_computer = df_items[df_items['Type'] == 'Laptop']
list_computer = list(df_computer['Name'])
elif self.Combo1.get() == 'Desktop':
df_computer = df_items[df_items['Type'] == 'Desktop']
list_computer = list(df_computer['Name'])
elif self.Combo1.get() == 'Workstation':
df_computer = df_items[df_items['Type'] == 'workstation']
list_computer = list(df_computer['Name'])
elif self.Combo1.get() == 'Mainframe':
df_computer = df_items[df_items['Type'] == 'mainframe']
list_computer = list(df_computer['Name'])
elif self.Combo1.get() == 'Server':
df_computer = df_items[df_items['Type'] == 'server']
list_computer = list(df_computer['Name'])
else:
df_computer = df_items[df_items['Type'] == 'Computer Part']
list_computer = list(df_computer['Name'])
self.Combo2List1 = list_computer
self.Combo2 = tk.ttk.Combobox(self.top, state="readonly",values=self.Combo2List1, font=("Helvetica",11))
self.Combo2.place(relx=0.49, rely=0.22, relwidth=0.4, relheight=0.049)
self.Combo2.set(self.Combo2List1[0])
def command_view_comment(self, discussion_type):
# 3 discussion_type: "All" "Me" "Guest" "Me All"
item_name = self.Combo2.get()
coming_from_page = 'clerk_post_discussion'
if discussion_type == "All":
df = pd.read_excel( "csv_files/discussions.xlsx" )
df_no_violated = df[df['Status'] == "Non-Violated"]
df_item = df_no_violated[df_no_violated['Computer Name'] == item_name]
if len(df_item) == 0:
tk.messagebox.showinfo("Info", "No comment of this item posted")
else:
self.top.destroy()
discussion_table.discussion_table( coming_from = None,
coming_from_discuss = coming_from_page,
item_name = item_name,
customer_name = self.name, customer_Id = self.id,
customer_username = self.username,
discussion_type = discussion_type, df = df_item)
elif discussion_type == "Me":
df = pd.read_excel( "csv_files/discussions.xlsx" )
df_no_violated = df[df['Status'] == "Non-Violated"]
df_me = df_no_violated[df_no_violated['Username'] == self.username]
df_me_item = df_me[df_me['Computer Name'] == item_name]
if len(df_me_item) == 0:
tk.messagebox.showinfo("Info", "You haven't posted any comment on this item")
else:
self.top.destroy()
discussion_table.discussion_table( coming_from = None,
coming_from_discuss = coming_from_page,
item_name = item_name, customer_name = self.name,
customer_Id = self.id, customer_username = self.username,
discussion_type = discussion_type, df = df_me_item )
def command_confirm(self):
# Get computer name
item_name = self.Combo2.get()
rating = str('empty')
# Headline
self.Headline, self.flag_taboo_headline = self.replace_bad_words(self.Text1.get())
if self.Headline.strip() != "":
self.flag_headline_valid = True
else:
self.flag_headline_valid = False
tk.messagebox.showerror("Error", "Headline cannot be empty")
self.TextComment, self.flag_taboo_content = self.replace_bad_words(self.Text3.get("1.0", "end"))
self.now = datetime.datetime.now()
self.DataTime = self.now.strftime("%y-%m-%d %H:%M")
Status = "Non-Violated"
if self.TextComment.strip() != "":
self.flag_comment_valid = True
else:
self.flag_comment_valid = False
tk.messagebox.showerror("Error", "Comment cannot be empty")
if self.flag_headline_valid and self.flag_comment_valid:
df = pd.read_excel("csv_files/discussions.xlsx")
if len(df) == 0:
Id = 0
tempo = pd.DataFrame([[Id, self.type_user, self.username.lower(), self.name, item_name, rating, self.Headline, self.TextComment, self.DataTime, Status]],
columns=['ID', 'Type User', 'Username', 'Name', 'Computer Name', 'Vote', 'Headline','Comment', 'Timestamp', 'Status'])
df = df.append(tempo)
else:
Id = int(df['ID'].iloc[-1])
Id = Id+1
tempo = pd.DataFrame([[Id, self.type_user, self.username.lower(), self.name, item_name, rating, self.Headline, self.TextComment, self.DataTime, Status]],
columns=['ID', 'Type User', 'Username', 'Name', 'Computer Name', 'Vote', 'Headline', 'Comment', 'Timestamp', 'Status'])
df = df.append(tempo)
# Check if suspended
df_suspend = pd.read_excel( "csv_files/suspend_users.xlsx" )
df_suspend_user = df_suspend[df_suspend['Type_user'] == self.type_user]
if self.username.lower() in list(df_suspend_user['Username']):
tk.messagebox.showerror("Error", "You can't write any comment because you are suspended")
else:
tk.messagebox.showinfo("Success","New comment posted")
# refresh the text entered
self.Text1Var = tk.StringVar()
self.Text1 = tk.ttk.Entry(self.top, textvariable=self.Text1Var, font=("Helvetica",11))
self.Text1.place(relx=0.49, rely=0.510, relwidth=0.4, relheight=0.06)
self.Text3 = tk.Text(self.top, font=("Helvetica",11), wrap=tk.WORD)
self.Text3.place(relx=0.49, rely=0.613, relwidth=0.4, relheight=0.2)
if self.flag_taboo_headline or self.flag_taboo_content:
df_privileged_users = pd.read_excel("csv_files/privileged_users.xlsx")
tk.messagebox.showwarning("Warning","You just got one warning because the comment you just posted contains taboo word(s)")
self.update_warning(df_privileged_users, 'clerk')
df.to_excel("csv_files/discussions.xlsx", index=False)
def command_cancel(self):
self.top.destroy()
clerk_management_page.clerk_management_page(self.name, self.username)
def replace_bad_words(self, my_str):
flag_taboo_word = False
df_taboo = pd.read_excel("csv_files/taboo_list.xlsx")
string = str(my_str)
punctuation_ = ""
for letter in re.escape( punctuation):
if letter not in "#$!.'\"?%:&":
punctuation_ += letter
r = re.compile(r'[\s{}]+'.format(re.escape(punctuation_)))
my_list = r.split(string)
my_string = " "
my_string = my_string.join(my_list)
for word in my_list:
if word.lower() in list(df_taboo['Taboo Words']):
#Change it to *****
number_of_star = len(word)
my_string = my_string.replace(word, "*"*number_of_star )
flag_taboo_word = True
return my_string, flag_taboo_word
def update_warning(self, df, type_user):
df = pd.read_excel("csv_files/privileged_users.xlsx")
df2 = df[df['Type_user'] == type_user]
df_privileged_active = df2[df2['Status'] == "active"]
if self.username.lower() in list(df_privileged_active['Username']):
flag_username_exist = True
df_user_row = df[df['Username'] == self.username]
df_row_list = df_user_row.to_numpy().tolist()
name = df_row_list[0][1]
password = df_row_list[0][3]
current_warning = int(df_user_row['Warnings'].iloc[-1])
chance_login = 1
deny_notify = 0
else:
flag_username_exist = False
if flag_username_exist:
# Update the warning (+1)
if current_warning < 3:
current_warning = current_warning + 1
df['Warnings'] = np.where((df['Username'] == self.username) & (df['Type_user'] == type_user), current_warning, df.Warnings)
df.to_excel("csv_files/privileged_users.xlsx", index=False)
# Auto suspend if Warning == 3
if current_warning >= 3:
df_suspend = pd.read_excel("csv_files/suspend_users.xlsx")
df_suspend_type = df_suspend[df_suspend['Type_user'] == type_user]
suspend_reason = "3 standing warnings"
if len(df_suspend) == 0:
Id = 0
tempo = pd.DataFrame([[str(Id), type_user, self.username.lower(), password, name, current_warning, suspend_reason, chance_login, deny_notify]],
columns=['ID', 'Type_user', 'Username', 'Password', 'Name','Current_warnings', 'Suspend_reason', 'Chance_login', 'Customer_deny_notify'])
df_suspend = df_suspend.append(tempo)
else:
if not self.username.lower() in list(df_suspend_type['Username']):
Id = int(df_suspend['ID'].iloc[-1])
Id = Id+1
tempo = pd.DataFrame([[str(Id), type_user, self.username.lower(), password, name, current_warning, suspend_reason, chance_login, deny_notify]],
columns=['ID', 'Type_user', 'Username', 'Password', 'Name', 'Current_warnings','Suspend_reason', 'Chance_login', 'Customer_deny_notify'])
df_suspend = df_suspend.append(tempo)
# update suspend_user file and customers file
df_suspend.to_excel("csv_files/suspend_users.xlsx", index=False)
df_privileged = pd.read_excel("csv_files/privileged_users.xlsx")
df_privileged['Status'] = np.where((df_privileged['Username'] == self.username) & (df_privileged['Type_user'] == type_user), 'suspended', df_privileged.Status)
#df_privileged.loc[df_privileged['Username'] == username, 'Status'] = 'suspended'
df_privileged.to_excel("csv_files/privileged_users.xlsx", index=False)
self.top.destroy()
main.HomePage()
tk.messagebox.showerror("Error", "Sorry, you are suspended")
# Test Only
#---------------------Main----------
if __name__ == "__main__":
top = tk.Tk()
clerk_post_discussion(top).mainloop()
|
from typing import Tuple
import numpy as np
import matplotlib.pyplot as plt
def estimate_params(X: np.ndarray, Y: np.ndarray, Td: float) -> Tuple[float, float, float, np.ndarray]:
k = np.linalg.inv(X.T @ X) @ X.T @ Y
R = (1 - k[1]) / k[0]
Te = -Td / np.log(k[1])
L = Te * R
return (R, L, Te, k)
if __name__ == '__main__':
## Given
T = 0.1 # Period
Td = 1e-3 # sampling period
img_dir = '../img/'
data_path = '../data/testLab1Var6.csv'
columns = ['time', 'current', 'voltage']
data = np.genfromtxt(data_path, names=columns, delimiter=',')
## Display all data
fig, (cur_ax, vol_ax) = plt.subplots(2, 1, sharex=True)
cur_ax.plot(data['time'], data['current'])
cur_ax.set(xlabel='Время t, с',
ylabel='Сила тока, А',
title='Сила тока и напряжение от времени')
cur_ax.grid()
vol_ax.plot(data['time'], data['voltage'])
vol_ax.set(xlabel='Время t, с',
ylabel='Напряжение, В')
vol_ax.grid()
fig.savefig(img_dir + 'current-voltage-all.png')
## Display only N periods
N = 2
t_final = N * T
time = data['time'][data['time'] < t_final]
current = data['current'][data['time'] < t_final]
voltage = data['voltage'][data['time'] < t_final]
fig, (cur_ax, vol_ax) = plt.subplots(2, 1, sharex=True)
cur_ax.plot(time, current)
cur_ax.set(xlabel='Время t, с',
ylabel='Сила тока, А',
title='Сила тока и напряжение от времени')
cur_ax.grid()
vol_ax.plot(time, voltage)
vol_ax.set(xlabel='Время t, с',
ylabel='Напряжение, В')
vol_ax.grid()
fig.savefig(img_dir + f'current-voltage-{N}T.png')
## Estimate parameters
X = np.c_[data['voltage'][:-1], data['current'][:-1]]
Y = data['current'][1:]
R, L, Te, k = estimate_params(X, Y, Td)
print(f'Estimated R = {R} Ohm\n'
f'Estimated L = {L} Hn')
## Compare model and given data
current_est = (X @ k)[:time.size]
fig, ax = plt.subplots()
ax.plot(time, current, label='Исходные данные')
ax.plot(time, current_est, label='Аппроксимированные данные')
ax.set(xlabel='Время, с',
ylabel='Сила тока, А',
title='Сравнение модели и данных')
ax.grid()
ax.legend()
fig.savefig(img_dir + 'current_estimation_comparison.png')
## Mean and std of parameters
R_est = np.array([])
L_est = np.array([])
n = 1000
for i in range(n):
indices = (i * T <= data['time']) & (data['time'] <= (i + 1) * T)
curr = data['current'][indices]
volt = data['voltage'][indices]
X = np.c_[volt[:-1], curr[:-1]]
Y = curr[1:]
R, L, Te, k = estimate_params(X, Y, Td)
R_est = np.append(R_est, R)
L_est = np.append(L_est, L)
print(
f'Mean R: {np.mean(R_est[np.isfinite(R_est)])}, Ohm\n'
f'Standard deviation R: {np.std(R_est[np.isfinite(R_est)])}\n'
f'Mean L: {np.mean(L_est[np.isfinite(L_est)])}, Hn\n'
f'Standard deviation L: {np.std(L_est[np.isfinite(L_est)])}\n'
)
|
import logging
import os
import time
from typing import NamedTuple
import boto3
from botocore.exceptions import ClientError
log = logging.getLogger()
log.setLevel(os.getenv("LOG_LEVEL", logging.INFO))
class DNSRegistrator(object):
def __init__(self, task_arn: str, cluster_arn: str, task_definition_arn):
self.task_arn = task_arn
self.task_id = task_arn.split("/")[-1]
self.cluster_arn = cluster_arn
self.task_definition_arn = task_definition_arn
self.task = {}
self.task_definition = {}
self.network_interfaces = []
self.dns_entries = []
self.dns_entry: DNSEntry = None
self.ecs = boto3.client("ecs")
self.ec2 = boto3.client("ec2")
self.route53 = boto3.client("route53")
def get_network_interfaces(self):
attachments = list(
filter(
lambda a: a["type"] == "ElasticNetworkInterface"
and a["status"] == "ATTACHED",
self.task.get("attachments"),
)
)
self.network_interfaces = []
for attachment in attachments:
attachment = attachments[0]
eni_id = next(map(lambda d: d["value"], filter(lambda d: d['name'] == 'networkInterfaceId', attachment["details"])), 'none')
try:
response = self.ec2.describe_network_interfaces(NetworkInterfaceIds=[eni_id])
self.network_interfaces.append(response["NetworkInterfaces"][0])
except ClientError as e:
if e.response['Error']['Code'] != 'InvalidNetworkInterfaceID.NotFound':
raise
else:
log.info(
'ignoring network interface %s', eni_id
)
def get_ip_addresses(self, public_ip):
result = []
for network_interface in self.network_interfaces:
if public_ip:
result.append(network_interface.get("Association").get("PublicIp"))
else:
result.append(network_interface.get("PrivateIpAddress"))
return result
def get_task_definition(self):
try:
response = self.ecs.describe_task_definition(
taskDefinition=self.task_definition_arn
)
self.task_definition = response["taskDefinition"]
self.get_dns_entries()
except ClientError as e:
log.error(
'no task definition found with id "%s, %s', self.task_definition_arn, e
)
self.task_definition = {}
def get_task(self):
try:
response = self.ecs.describe_tasks(
cluster=self.cluster_arn, tasks=[self.task_arn]
)
self.task = response["tasks"][0]
except ClientError as e:
log.error(
'no task found with id "%s" on cluster "%s", %s',
self.task_arn,
self.cluster_arn,
e,
)
self.task = {}
def get_dns_entries(self):
self.dns_entries = []
for c in self.task_definition.get("containerDefinitions", {}):
labels = c.get("dockerLabels")
hosted_zone_id = labels.get("DNSHostedZoneId")
dns_name = labels.get("DNSName")
public_ip = "true" == labels.get("DNSRegisterPublicIp", "true")
if not hosted_zone_id:
continue
if not dns_name:
log.error(
'task "%s" tagged with "DNSHostedZonedId", but no label "DNSName"',
self.task_arn,
)
continue
self.dns_entries.append(
DNSEntry(hosted_zone_id, '{}.'.format(dns_name.rstrip('.')), public_ip)
)
self.dns_entry = self.dns_entries[0] if self.dns_entries else None
def register_dns_entry(self, ip_address):
log.info('registering "%s" for task "%s"', self.dns_entry.name, self.task_arn)
response = self.route53.change_resource_record_sets(
HostedZoneId=self.dns_entry.hosted_zone_id,
ChangeBatch={
"Comment": "registration of {by ecs-dns-registrator",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": self.dns_entry.name,
"Type": "A",
"SetIdentifier": self.task_id,
"Weight": 100,
"TTL": 30,
"ResourceRecords": [{"Value": ip_address}],
},
}
],
},
)
wait_for_route53_change_completion(self.route53, response)
def get_resource_record_set(self):
for page in self.route53.get_paginator('list_resource_record_sets').paginate(
HostedZoneId=self.dns_entry.hosted_zone_id,
StartRecordType='A',
StartRecordName=self.dns_entry.name
):
for rr_set in page['ResourceRecordSets']:
if rr_set['Name'] == self.dns_entry.name and rr_set.get('SetIdentifier') == self.task_id:
return rr_set
return None
def deregister_dns_entry(self):
log.info('deregistering "%s" for task "%s"', self.dns_entry.name, self.task_id)
rr_set = self.get_resource_record_set()
if rr_set:
response = self.route53.change_resource_record_sets(
HostedZoneId=self.dns_entry.hosted_zone_id,
ChangeBatch={
"Comment": "deregistration by ecs-dns-registrator",
"Changes": [
{
"Action": "DELETE",
"ResourceRecordSet": self.get_resource_record_set()
}
],
},
)
wait_for_route53_change_completion(self.route53, response)
log.info('DNS record name "%s" for set identifier "%s" deregistered', self.dns_entry.name, self.task_id)
def handle(self, desired_state, last_state):
self.get_task_definition()
if not self.dns_entry:
# skip task definitions without proper labels.
return
self.get_task()
if not self.task:
log.error('task "%s" was not found', self.task_arn)
return
if desired_state == "RUNNING" and last_state == "RUNNING":
self.get_network_interfaces()
ip_addresses = self.get_ip_addresses(self.dns_entry.register_public_ip)
if ip_addresses:
self.register_dns_entry(ip_addresses[0])
else:
log.error('no ip address was found to register task %s', self.task_arn)
elif desired_state == "STOPPED":
self.deregister_dns_entry()
def wait_for_route53_change_completion(route53: object, change: dict):
id = change['ChangeInfo']['Id'].split('/')[-1]
while change['ChangeInfo']['Status'] != 'INSYNC':
log.info(f'waiting for change {id} to complete')
time.sleep(5)
change = route53.get_change(Id=id)
class DNSEntry(NamedTuple):
hosted_zone_id: str
name: str
register_public_ip: bool
def handler(event, context):
if event.get("detail-type") != "ECS Task State Change":
log.error("unsupported event, %", event.get("detail-type"))
return
task_arn = event["detail"]["taskArn"]
task_definition_arn = event["detail"]["taskDefinitionArn"]
cluster_arn = event["detail"]["clusterArn"]
desired_state = event["detail"]["desiredStatus"]
last_state = event["detail"]["lastStatus"]
registrator = DNSRegistrator(task_arn, cluster_arn, task_definition_arn)
registrator.handle(desired_state, last_state)
|
from django.urls import path
from . import views
from django.contrib.auth.views import LoginView,LogoutView
urlpatterns = [
path('',views.indexView,name="home"),
path('dashboard/',views.dashboardView,name="dashboard"),
path('login/',LoginView.as_view(),name="login_url"),
path('register/',views.registerView,name="register_url"),
path('logout/',LogoutView.as_view(next_page='login_url'),name="logout"),
path('create/', views.create_product, name='create_product'),
path('create/orders', views.create_order, name='create_order'),
path('create/products', views.create_products, name='create_products'),
path('create/stores', views.create_stores, name='create_stores'),
path('delete/<str:product_name>', views.delete_product, name='delete_product'),
path('search/', views.search_product, name='search_product'),
path('update/<str:product_name>', views.update_product, name='update_product'),
path('add_products/<str:product_id>', views.add_product, name='add_product')
] |
from django.conf.urls import url
from django.urls import path
from django.views.generic import TemplateView
urlpatterns = path('',
url('^$', TemplateView.as_view(template_name='index.html')),
) |
#
# @lc app=leetcode id=421 lang=python3
#
# [421] Maximum XOR of Two Numbers in an Array
#
# @lc code=start
class Trie:
def __init__(self):
self.root = {}
def insert(self, num):
node = self.root
for i in range(31, -1, -1):
bit = (num >> i) & 1
if bit not in node:
node[bit] = {}
node = node[bit]
def query(self, num):
node = self.root
result = 0
for i in range(31, -1, -1):
bit = (num >> i) & 1
if 1 - bit in node:
result = (result << 1) + 1
node = node[1 - bit]
else:
result = (result << 1) + 0
node = node[bit]
return result
class Solution:
def findMaximumXOR(self, nums: List[int]) -> int:
trie = Trie()
for num in nums:
trie.insert(num)
res = 0
for num in nums:
res = max(res, trie.query(num))
return res
# @lc code=end
|
import pytest
from test_checkout import Checkout
@pytest.fixture()
def checkout():
checkout=Checkout()
return checkout
def test_CanAddItemPrice(checkout):
checkout.addItemPrice("a",1)
def test_CanAddItem(checkout):
checkout.addItem("b")
def test_CanCalculateTotal(checkout):
assert checkout.calculateTotal()==0
def test_GetCorrectTotalWithMultipleItem(checkout):
checkout.addItemPrice("a",1)
checkout.addItemPrice("b",2)
checkout.addItem("a")
checkout.addItem("b")
assert checkout.calculateTotal()==3
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('efilings', '0007_auto_20150825_0120'),
]
operations = [
migrations.AddField(
model_name='filing',
name='filing_number',
field=models.IntegerField(help_text=b'The integer part of the filing number assigned to this electronic filing by the FEC', null=True),
),
migrations.AlterField(
model_name='filing',
name='filing_id',
field=models.CharField(help_text=b'The alphanumeric filing number assigned to this electronic filing by the FEC', max_length=15, unique=True, serialize=False, primary_key=True),
),
]
|
from math import*
vi = float(input(" eank "))
d1 = int(input(" fhrfv "))
d2 = int(input(" iuguyl "))
dano= abs(sqrt(5* d1)+pi**(d2/3))
real= int(dano)
resto= int(vi - real)
print(resto) |
#!/usr/bin/env python
import os
import sys
from setuptools import setup
setup(name='giantbls',
version='0.0.0',
description="",
author='Samuel Grunblatt, Nicholas Saunders',
license='',
package_dir={'giantbls': 'giantbls'},
)
|
# Show a table of the squares of the first four numbers
# Para ficar com a tabela, temos que ter todo ordenado à direita (sinal de maior, no format)
# Temos que também ajustar o espaçamento
print("{:>2s} {:>3s} {:>7s}".format("n", "n²", "2**n"))
# a função range vai desde A, inclusivo, a B, exclusivo.
# Como queremos os números de 1 a 20 (inclusive ambos),
# temos que definir o intervalo de 1 a 21 (exclusivo), para que incluia o 20
for n in range(1,21):
print("{:2d} {:3d} {:7d}".format(n, n**2, 2**n))
# Modify the program to show the squares of 1..20. (Use the range function.)
# Also, add a column to show 2**n. Adjust the formatting.
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def increasingBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
def buildTree(root):
if root:
yield from buildTree(root.left)
yield root.val
yield from buildTree(root.right)
ans = cur = TreeNode(None)
for v in buildTree(root):
cur.right = TreeNode(v)
cur = cur.right
return ans.right
"""
#
# Tree 1
# 10
# / \
# 5 15
# / \ / \
# 2 8 12 20
# / \
# 6 9
"""
t1 = TreeNode(10)
t1.left = TreeNode(5)
t1.right = TreeNode(15)
t1.left.left = TreeNode(2)
t1.left.right = TreeNode(8)
t1.left.right.left = TreeNode(6)
t1.left.right.right = TreeNode(9)
t1.right.left = TreeNode(12)
t1.right.right = TreeNode(20)
solution = Solution()
res = solution.increasingBST(t1)
print (res) |
from collections import Counter
from datetime import datetime, timedelta
import configparser
import numpy as np
import os
import pandas as pd
import smtplib
import re
import lib as ipr_lib
output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..//input_output//'))
def get_grades(personnel, behavioral):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U0001f97a"
u"\u2B06"
u"\ufe0f"
u"\u2B50"
u"\u0c07-\u0c7F" #Telugu
u"\u0300-\u036F" #Combining Marks
u"\u2000-\u206F" #General Punctuation
u"\u25A0-\u25FF" # Geometric Shapes
"]+", flags=re.UNICODE)
name = personnel.Nickname.values[0]
lst_grade = []
remarks = []
for col_name in ['monitoring partner', 'previous mt', 'previous ct', 'backup monitoring personnel', 'backup monitoring personnel.1', 'monitoring partner (2)']:
for col_type in ['rating', 'remarks']:
col_filt = behavioral.columns.str.contains(col_type)
col_filt = col_filt & behavioral.columns.str.contains(col_name.replace(r'(2)', '').replace('.1', '').strip())
col_filt = col_filt & ((behavioral.columns.str.contains('1')) == ('1' in col_name))
col_filt = col_filt & ((behavioral.columns.str.contains('2')) == ('2' in col_name))
if col_type == 'rating':
temp_grade = np.array(list(behavioral.loc[behavioral[col_name] == name, col_filt].values.flatten()))
lst_grade += list(temp_grade[~np.isnan(temp_grade)])
if col_type == 'remarks':
remarks += list(behavioral.loc[behavioral[col_name] == name, behavioral.columns[col_filt]].values.flatten())
personnel.loc[:, 'grade'] = np.round(100*np.mean(lst_grade)/5, 2)
# list for remarks without emojis
filteredRemarks = []
for i in remarks:
filteredRemarks.append(emoji_pattern.sub(r'',str(i)))
personnel.loc[:, 'remarks'] = '\n'.join(np.array(filteredRemarks)[pd.notnull(remarks)])
return personnel
def get_credentials():
cfile = output_path + '/config.txt'
cnf = configparser.ConfigParser(inline_comment_prefixes=';')
cnf.read(cfile)
options = dict()
for section in cnf.sections():
for opt in cnf.options(section):
options[opt] = cnf.get(section, opt)
return options
def main(start, end):
key = "1UylXLwDv1W1ukT4YNoUGgHCHF-W8e3F8-pIg1E024ho"
personnel = ipr_lib.get_sheet(key, "personnel")
personnel = personnel.loc[personnel.current == 1, ['Nickname', 'Fullname']].dropna()
email = ipr_lib.get_sheet(key, "email")
email = email.loc[:, ['Nickname', 'emails']].dropna()
personnel = pd.merge(personnel, email, on='Nickname').set_index('Fullname')
per_personnel = personnel.groupby('Fullname', as_index=False)
key = "1wZhFAvBDMF03fFxlnXoJJ1sH4iOSlN8a2DmOMYW_IxM"
behavioral = ipr_lib.get_sheet(key, "behavioral")
behavioral.columns = behavioral.columns.str.lower()
behavioral.loc[:, ['name', 'monitoring partner', 'previous mt', 'previous ct', 'backup monitoring personnel', 'backup monitoring personnel.1', 'monitoring partner (2)']] = behavioral.loc[:, ['name', 'monitoring partner', 'previous mt', 'previous ct', 'backup monitoring personnel', 'backup monitoring personnel.1', 'monitoring partner (2)']].replace(personnel.to_dict()['Nickname'])
behavioral.loc[:, behavioral.columns[behavioral.columns.str.contains('rating for')]] = behavioral.loc[:, behavioral.columns[behavioral.columns.str.contains('rating for')]].replace({'5 - Outstanding': 5, '4 - Very satisfactory': 4, '4 - Very Satisfactory': 4, '4': 4, '3 - Satisfactory': 3, '3 - Average': 3, '2 - Unsatisfactory': 2, '2': 2, '1 - Poor': 1})
behavioral = behavioral.loc[(pd.to_datetime(behavioral['date of shift']) >= start) & (pd.to_datetime(behavioral['date of shift']) <= end), :]
behavioral = behavioral.drop_duplicates(['date of shift', 'name'], keep='last')
personnel_grade = per_personnel.apply(get_grades, behavioral=behavioral).reset_index(drop=True)
credentials = get_credentials()
conn = smtplib.SMTP('imap.gmail.com', 587)
conn.ehlo()
conn.starttls()
conn.login(credentials['mail'], credentials['password'])
for i in range(len(personnel_grade)):
# print(personnel_grade.remarks[i])
print(personnel_grade.emails[i])
# conn.sendmail(credentials['mail'], personnel_grade.emails[i], 'Subject: Monitoring Behavioral \n\n Your IOMP monitoring behavioral grade from {start} to {end} is {grade}. Here are the remarks of your evaluators: \n\n{remarks}'.format(start=start, end=end, grade=personnel_grade.grade[i], remarks=personnel_grade.remarks[i]))
conn.sendmail(credentials['mail'], personnel_grade.emails[i], 'Subject: Monitoring Behavioral \n\nGood day! Here are the remarks from your evaluators: \n\n{remarks}.\n\nYour final grade will be released along with your monitoring IPR grade.'.format(start=start, end=end, grade=personnel_grade.grade[i], remarks=personnel_grade.remarks[i]))
conn.quit()
return personnel_grade
if __name__ == '__main__':
ts_now = datetime.now()
end = ts_now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
if end.month <= 6:
start = end.replace(year=end.year-1, month=12)
else:
start = end.replace(month=6)
df = main(start, end) |
from config import exp_ranges
from utilities import create_pairs_list, load_data, resample_ohlc
from trading.strategies import strat_dict # contains all strategies with metadata
from functions import optimise_backtest, optimise_bt_multi, calc_stats_many
import time
'''
Cycles through all pairs and all timescales, each time producing a dictionary of results for a range of param settings
Saves these dictionaries as dataframes in csv files for further analysis
Prints best result (according to sqn score) for each pair in each timescale
'''
if __name__ == '__main__':
strat = 'hma_dvb' # 'hma','hma_dvb'
printout=True
strategy = strat_dict.get(strat)
name = strategy.get('name')
strat_func = strategy.get('func')
start = time.perf_counter()
pairs = create_pairs_list('BTC')
pairs = ['BTCUSDT', 'ETHBTC', 'BNBBTC', 'ETHUSDT', 'BNBUSDT', 'XMRBTC']
for pair in pairs:
print(f'Testing {pair} on {time.ctime()[:3]} {time.ctime()[9]} at {time.ctime()[11:-8]}')
price, vol = load_data(pair)
days = len(vol) / 1440
for timescale in exp_ranges.keys():
ts_start = time.perf_counter()
print(f'Testing {timescale} at {time.ctime()[11:-8]}')
first_price = price.iloc[0, 0]
last_price = price.iloc[-1, 3]
hodl_profit = profit = (100 * (last_price - first_price) / first_price)
all_param_ranges = exp_ranges.get(timescale)[0]
param_ranges = {k:v for (k, v) in all_param_ranges.items() if k in strat_dict.get(strat)['params']}
params = tuple(param_ranges.values()) # *args for optimise_backtest
param_str_list = [f'{k}_{v[0]}-{v[1]}-{v[2]}' for (k, v) in param_ranges.items()]
param_str = '_'.join(param_str_list)
if timescale != '1min':
r_price, r_vol = resample_ohlc(price, vol, timescale)
else:
r_price, r_vol = price, vol
if len(r_vol) > 0:
# print(ind_cache)
backtest = optimise_bt_multi(r_price, strat_func, *params)
# print(ind_cache.get('p1').keys())
# ind_cache['p1'] = {}
# ind_cache['p2'] = {}
# ind_cache['p3'] = {}
results = calc_stats_many(backtest, days, pair, timescale, name, param_str, hodl_profit)
if printout:
print(f'Tests recorded: {len(results.index)}')
if len(results.index) > 0 and results["sqn"].max() > 2:
best = results['sqn'].argmax()
if printout:
print(f'Best SQN: {results["sqn"].max()}, Best settings: {results.iloc[best, 0]}')
ts_end = time.perf_counter()
print(f'{pair} {timescale} took {round((ts_end-ts_start)/60)}m {round((ts_end-ts_start)%60)}s')
print('-' * 80)
mid = time.perf_counter()
seconds = round(mid - start)
print(f'{pair} took: {seconds // 60} minutes, {seconds % 60} seconds')
print('-' * 40)
end = time.perf_counter()
seconds = round(end - start)
print(f'Time taken: {seconds // 60} minutes, {seconds % 60} seconds') |
# -*- coding: utf-8 -*-
"""
Created on Mon May 20 16:49:25 2019
@author: xuan
"""
import sys
import numpy as np
import scipy.io as sio
from scipy import stats
# adding os.path to do filetesting - CP, 2019-06-19
import os.path
from os import path
if sys.version[0] == '2':
import cPickle as pickle
else:
import _pickle as pickle
def list_to_nparray(X):
n_col = np.size(X[0],1)
Y = np.empty((0, n_col))
for each in X:
Y = np.vstack((Y, each))
return Y
#kernel_SD can be either 50 or 100
#kernel_type can be either "gaussian" or "half_gaussian"
#bin_width counts in s
#spike_counts is just "spike_counts" variable in "basic_use_of_xds_Python.py
def smooth_binned_spikes(spike_counts, bin_width, kernel_type, kernel_SD, sqrt = 0):
smoothed = []
binned_spikes = spike_counts.T.tolist()
if sqrt == 1:
for (i, each) in enumerate(binned_spikes):
binned_spikes[i] = np.sqrt(each)
bin_size = bin_width
kernel_hl = np.ceil( 3 * kernel_SD / bin_size )
normalDistribution = stats.norm(0, kernel_SD)
x = np.arange(-kernel_hl*bin_size, (kernel_hl+1)*bin_size, bin_size)
kernel = normalDistribution.pdf(x)
if kernel_type == 'gaussian':
pass
elif kernel_type == 'half_gaussian':
for i in range(0, int(kernel_hl)):
kernel[i] = 0
n_sample = np.size(binned_spikes[0])
nm = np.convolve(kernel, np.ones((n_sample))).T[int(kernel_hl):n_sample + int(kernel_hl)]
for each in binned_spikes:
temp1 = np.convolve(kernel,each)
temp2 = temp1[int(kernel_hl):n_sample + int(kernel_hl)]/nm
smoothed.append(temp2)
#print('The input spike counts have been smoothed.')
return np.asarray(smoothed).T
class lab_data:
def __init__(self, base_path, file_name):
self.file_name = file_name[:-4]
file_name = ''.join([base_path, file_name])
print( file_name )
if not path.exists( file_name ):
raise Exception( 'Can''t find file:' + file_name )
self.parse_file(file_name)
self.print_file_info()
def parse_file(self, file_name):
readin = sio.loadmat(file_name)
xds = readin['xds']
self.time_frame = xds['time_frame'][0][0]
self.matlab_meta = xds['meta'][0][0]
self.__meta = dict()
self.__meta['monkey_name'] = self.matlab_meta[0][0]['monkey'][0]
self.__meta['task_name'] = self.matlab_meta[0][0]['task'][0]
self.__meta['duration'] = self.matlab_meta[0][0]['duration'][0]
self.__meta['collect_date'] = self.matlab_meta[0][0]['dateTime'][0]
self.__meta['raw_file_name'] = self.matlab_meta[0][0]['rawFileName'][0]
self.__meta['array'] = self.matlab_meta[0][0]['array'][0]
self.has_EMG = xds['has_EMG'][0][0][0]
self.has_kin = xds['has_kin'][0][0][0]
self.has_force = xds['has_force'][0][0][0]
self.bin_width = xds['bin_width'][0][0][0]
self.sorted = xds['sorted'][0][0][0]
self.spike_counts = xds['spike_counts'][0][0]
self.spikes = xds['spikes'][0][0][0].tolist()
self.unit_names = []
for each in xds['unit_names'][0][0][0].tolist():
self.unit_names.append(each[0])
if self.has_EMG == 1:
self.EMG = xds['EMG'][0][0]
self.EMG_names = []
for each in xds['EMG_names'][0][0][0].tolist():
self.EMG_names.append(each[0])
if self.has_force == 1:
self.force = xds['force'][0][0]
if self.has_kin == 1:
self.kin_p = xds['kin_p'][0][0]
self.kin_v = xds['kin_v'][0][0]
self.kin_a = xds['kin_a'][0][0]
self.trial_target_corners = xds['trial_target_corners'][0][0]
self.trial_target_dir = xds['trial_target_dir'][0][0]
self.trial_result = xds['trial_result'][0][0]
self.trial_start_time = xds['trial_start_time'][0][0]
self.trial_end_time = xds['trial_end_time'][0][0]
self.trial_gocue_time = xds['trial_gocue_time'][0][0]
self.trial_info_table_header = []
for each in xds['trial_info_table_header'][0][0].tolist():
self.trial_info_table_header.append(each[0][0])
self.trial_info_table = xds['trial_info_table'][0][0].tolist()
self.n_neural = np.size(self.spike_counts, 1)
if self.has_EMG == 1:
self.n_EMG = np.size(self.EMG, 1)
else:
self.n_EMG = 0
if self.has_force == 1:
self.n_force = np.size(self.force, 1)
else:
self.n_force = 0
def get_meta(self):
a = dict()
a = self.__meta
return a
def print_file_info(self):
print('Monkey: %s' % (self.__meta['monkey_name']))
print('Task: %s' % (self.__meta['task_name']))
print('Collected on %s ' % (self.__meta['collect_date']))
print('Raw file name is %s' % (self.__meta['raw_file_name']))
print('The array is in %s' % (self.__meta['array']))
print('There are %d neural channels' % (self.n_neural))
print('Sorted? %d' % (self.sorted))
print('There are %d EMG channels' % (self.n_EMG))
print('Current bin width is %.4f seconds' % (self.bin_width))
if self.has_EMG == 1:
print('The name of each EMG channel:')
for i in range(len(self.EMG_names)):
print(self.EMG_names[i])
print('The dataset lasts %.4f seconds' % (self.__meta['duration']))
print('There are %d trials' % (len(self.trial_result)))
print('In %d trials the monkey got reward' % (len(np.where(self.trial_result == 'R')[0])))
def print_trial_info_table_header(self):
for each in self.trial_info_table_header:
print(each)
def get_one_colum_in_trial_info_table(self, colum_name):
n = np.where(np.asarray(self.trial_info_table_header) == colum_name)[0][0]
a = []
for each in self.trial_info_table:
a.append(each[n][0][0])
return a
def save_to_pickle(self, path, file_name = 0):
if file_name == 0:
f = ''.join((path, self.file_name))
else:
f = ''.join((path, file_name))
with open (f, 'wb') as fp:
pickle.dump(self, fp)
print('Save to %s successfully' %(f))
def get_trials_idx(self, my_type, trial_start, time_ahead):
""" my_type: 'R', 'A', 'F' """
""" 'R' for reward """
""" 'A' for aborted """
""" 'F' for failed """
trials_idx = []
if trial_start == 'start_time':
my_T = self.trial_start_time
elif trial_start == 'gocue_time':
my_T = self.trial_gocue_time
temp = np.where(self.trial_result == my_type)[0]
if len(temp) != 0:
for n in temp:
if np.isnan(self.trial_end_time[n]) == False:
if np.isnan(my_T[n]) == False:
ind = np.where((self.time_frame > my_T[n] - time_ahead) & (self.time_frame < self.trial_end_time[n]))[0]
trials_idx.append(ind)
return trials_idx
def get_trials_data_spike_counts(self, my_type = 'R', trial_start = 'start_time', time_ahead = 0):
trial_spike_counts = []
ind = self.get_trials_idx(my_type, trial_start, time_ahead)
for n in ind:
a = self.spike_counts[n, :]
trial_spike_counts.append(a)
return trial_spike_counts
def get_trials_data_time_frame(self, my_type = 'R', trial_start = 'start_time', time_ahead = 0):
trial_time_frame = []
ind = self.get_trials_idx(my_type, trial_start, time_ahead)
for n in ind:
a = self.time_frame[n, :]
trial_time_frame.append(a)
return trial_time_frame
def get_trials_data_EMG(self, my_type = 'R', trial_start = 'start_time', time_ahead = 0):
if self.has_EMG == 0:
print('There is no EMG in this file')
return 0
else:
trial_EMG = []
ind = self.get_trials_idx(my_type, trial_start, time_ahead)
for n in ind:
a = self.EMG[n, :]
trial_EMG.append(a)
return trial_EMG
def get_trials_data_force(self, my_type = 'R', trial_start = 'start_time', time_ahead = 0):
if self.has_force == 0:
print('There is no force in this file')
return 0
else:
trial_force = []
ind = self.get_trials_idx(my_type, trial_start, time_ahead)
for n in ind:
a = self.force[n, :]
trial_force.append(a)
return trial_force
def get_trials_data_kin(self, my_type = 'R', trial_start = 'start_time', time_ahead = 0):
if self.has_kin == 0:
print('There is no kinematics in this file')
return 0
else:
trial_kin_p = []
trial_kin_v = []
trial_kin_a = []
ind = self.get_trials_idx(my_type, trial_start, time_ahead)
for n in ind:
a = self.kin_p[n, :]
trial_kin_p.append(a)
b = self.kin_v[n, :]
trial_kin_v.append(b)
c = self.kin_a[n, :]
trial_kin_a.append(c)
return trial_kin_p, trial_kin_v, trial_kin_a
def get_trials_summary(self, my_type = 'R', trial_start = 'gocue_time'):
if trial_start == 'start_time':
my_T = self.trial_start_time
elif trial_start == 'gocue_time':
my_T = self.trial_gocue_time
trials_summary = dict()
trials_summary['trial_type'] = my_type
temp = np.where(self.trial_result == my_type)[0]
if len(temp) != 0:
a = [[], [], [], [], []]
for i in range(len(temp)):
if np.isnan(self.trial_end_time[temp[i]]) == True:
continue
if np.isnan(my_T[temp[i]]) == True:
continue
a[0].append(self.trial_start_time[temp[i]])
a[1].append(self.trial_end_time[temp[i]])
a[2].append(self.trial_gocue_time[temp[i]])
a[3].append(self.trial_target_corners[temp[i]])
a[4].append(self.trial_target_dir[temp[i]])
trials_summary['trial_start_time'] = np.asarray(a[0])
trials_summary['trial_end_time'] = np.asarray(a[1])
trials_summary['gocue_time'] = np.asarray(a[2])
trials_summary['tgt_corners'] = np.asarray(a[3])
trials_summary['tgt_dir'] = np.asarray(a[4])
return trials_summary
def update_bin_data(self, bin_size, update = 1):
t_spike, spike_counts = self.bin_spikes(bin_size)
if self.has_EMG == 1:
t_EMG, EMG = self.resample_EMG(bin_size)
if len(t_EMG) > len(t_spike):
EMG = EMG[:len(t_spike), :]
if self.has_force == 1:
t_force, force = self.resample_force(bin_size)
if len(t_force) > len(t_spike):
force = force[:len(t_spike), :]
if self.has_kin == 1:
t_kin, kin_p, kin_v, kin_a = self.resample_kin(bin_size)
if len(t_kin) > len(t_spike):
kin_p = kin_p[:len(t_spike), :]
kin_v = kin_v[:len(t_spike), :]
kin_a = kin_a[:len(t_spike), :]
if update == 1:
self.time_frame = t_spike
self.bin_width = bin_size
self.spike_counts = spike_counts
if self.has_EMG == 1:
self.EMG = EMG
if self.has_force == 1:
self.force = force
if self.has_kin == 1:
self.kin_p = kin_p
self.kin_v = kin_v
self.kin_a = kin_a
def bin_spikes(self, bin_size):
print('The new bin width is %.4f s' % (bin_size))
spike_counts = []
bins = np.arange(self.time_frame[0], self.time_frame[-1], bin_size)
bins = bins.reshape((len(bins),))
for each in self.spikes:
bb=each.reshape((len(each),))
out, _ = np.histogram(bb, bins)
spike_counts.append(out)
bins = bins.reshape((len(bins),1))
return bins[1:], np.asarray(spike_counts).T
def resample_EMG(self, bin_size):
if self.has_EMG == 0:
print('There is no EMG in this file.')
return 0
else:
if bin_size < self.bin_width:
print('Cannot bin EMG using this bin size')
return 0
else:
down_sampled = []
t = []
n = bin_size/self.bin_width
length = int(np.floor(np.size(self.EMG, 0)/n))
for i in range(1, length):
down_sampled.append(self.EMG[int(np.floor(i*n)),:])
t.append(self.time_frame[int(np.floor(i*n))])
down_sampled = np.asarray(down_sampled)
t = np.asarray(t)
return t, down_sampled
def resample_force(self, bin_size):
if self.has_force == 0:
print('There is no force in this file.')
return 0
else:
if bin_size < self.bin_width:
print('Cannot bin force using this bin size')
return 0
else:
down_sampled = []
t = []
n = bin_size/self.bin_width
length = int(np.floor(np.size(self.force, 0)/n))
for i in range(1, length):
down_sampled.append(self.force[int(np.floor(i*n)),:])
t.append(self.time_frame[int(np.floor(i*n))])
down_sampled = np.asarray(down_sampled)
t = np.asarray(t)
return t, down_sampled
def resample_kin(self, bin_size):
if self.has_kin == 0:
print('There is no kinematics in this file.')
return 0
else:
if bin_size < self.bin_width:
print('Cannot bin kinematics using this bin size')
return 0
else:
down_sampledp = []
down_sampledv = []
down_sampleda = []
t = []
n = bin_size/self.bin_width
length = int(np.floor(np.size(self.kin_p, 0)/n))
for i in range(1, length):
down_sampledp.append(self.kin_p[int(np.floor(i*n)),:])
down_sampledv.append(self.kin_v[int(np.floor(i*n)),:])
down_sampleda.append(self.kin_a[int(np.floor(i*n)),:])
t.append(self.time_frame[int(np.floor(i*n))])
down_sampledp = np.asarray(down_sampledp)
down_sampledv = np.asarray(down_sampledv)
down_sampleda = np.asarray(down_sampleda)
t = np.asarray(t)
return t, down_sampledp, down_sampledv, down_sampleda
|
#!/usr/bin/env python3
'''Plot all items from the dataset and highlight the skyline.'''
import argparse
import numpy as np
import matplotlib.pyplot as plt
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument(
'dataset',
type=str,
help='name of the file with the dataset',
)
ap.add_argument(
'skyline',
type=str,
help='name of the file with the skyline indices',
)
args = ap.parse_args()
dataset = np.fromfile(args.dataset, dtype=np.float64)
dataset = dataset.reshape((len(dataset) / 2, 2))
skyline = np.loadtxt(args.skyline, dtype=int, delimiter=',')
if len(skyline):
format_string = '{!s:>15} {!s:>15} {!s:>20} {!s:>20}'
ruler_len = 15 + 1 + 15 + 1 + 20 + 1 + 20
print('=' * ruler_len)
print(format_string.format('#', 'item_index', 'x', 'y'))
print('-' * ruler_len)
for index, item_index in enumerate(skyline, 1):
print(format_string.format(index, item_index, dataset[item_index,0], dataset[item_index,1]))
print('=' * ruler_len)
if len(dataset):
plt.scatter(dataset[:,0], dataset[:,1], color='blue', marker='.')
if len(skyline):
plt.scatter(dataset[skyline,0], dataset[skyline,1], color='red', marker='x')
plt.show()
|
from django.conf.urls import include, url
from django.utils.translation import gettext_lazy as _
from . import views
urlpatterns = [
url(r'^$', views.cart_detail, name='detail'),
url(_(r'^add/(?P<product_id>\d+)/$'), views.cart_add, name='add'),
url(_(r'^remove/(?P<product_id>\d+)/$'), views.cart_remove , name='remove'),
]
|
import logging
from decimal import Decimal, ROUND_HALF_UP
logging.basicConfig(
level=logging.INFO,
)
logger = logging.getLogger('')
class FrozenDict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(
frozenset((key, hash(val)) for key, val in self.iteritems()))
def dict_num_sum(value):
"""
对字典进行合计
:param value:
:return:
"""
if not value:
return 0.0
if isinstance(value, dict):
total = 0
for value1 in value.values():
value1 = dict_num_sum(value1)
total += value1
return total
else:
try:
value = float(value)
except Exception as e:
logger.exception(e)
value = 0.0
return value
def decimal_round(number, precision):
'''
@param number: 数值
@param precision: 精度处理位数
@return: 对数值进行四舍五入, precision 为其保留的位数, 采用decimal是为了防止float值 十进制转换为二进制时所造成的误差造成四舍五入出现错误
'''
# 兼容 '' None 等空值
if not number and number != 0:
return decimal_round(0, precision)
if isinstance(number, (float, int)):
# precision不能为负
if precision < 0:
return number
number = repr(number)
try:
precision_str = 1 if precision == 0 else '0.' + '0' * (
precision - 1) + '1'
# result为 decimal值,ROUND_HALF_UP 四舍五入, precision_str为精度
result = Decimal(number).quantize(Decimal(precision_str),
rounding=ROUND_HALF_UP)
except Exception as e:
logger.exception(e)
return decimal_round(0, precision)
return result
def get_formative_money(money, precision=2):
"""
按精度四舍五入
增加千位符
按精度保留小数 位数
0 处理为 0.00
100 处理为 100.00
"""
return "{:,}".format(decimal_round(money, precision))
def delete_zero(value, code='', dict1={}):
'''
删除字典中的0值 张海洋代码,未修改
:param value:
:param code:
:param dict1:
:return:
'''
if isinstance(value, dict):
for code1, value1 in value.items():
value1 = delete_zero(value1, code1, value)
try:
value1 = float(value1)
if value1 == 0:
value.pop(code1)
except:
pass
if dict1.get(code) == {}:
dict1.pop(code)
else:
return value
|
#!/usr/bin/env python
# coding: utf-8
# # Capstone Project
#
# ### This Porject is the part of the final assignment for the *IBM Data Science Professional Certification*.
#
# This is also a separate course by itself, titled **'Applied Data Science Capstone'**
# In[1]:
import pandas as pd
import numpy as np
print('Hello Capstone Project Course!')
# In[ ]:
|
from utils import stats
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_script_path",
help="Path to the dataset script",
type=str,
default="./en_wiki_multi_news.py",
)
parser.add_argument(
"--dataset_cache_path",
help="Path to the cache folder",
type=str,
default="dataset/.en-wiki-multi-news-cache",
)
parser.add_argument(
"--rouge",
help="True if compute the ROUGE scores. Take a long time",
type=bool,
default=False,
)
args = parser.parse_args()
stats(args.dataset_script_path, args.dataset_cache_path, args.rouge)
|
import requests
from bs4 import BeautifulSoup as BS
import hashlib
import os
import gzip
import pickle
import sys
import re
import random
url = 'https://auctions.yahoo.co.jp/category/list/S%E3%82%B5%E3%82%A4%E3%82%BA-%E9%95%B7%E8%A2%96T%E3%82%B7%E3%83%A3%E3%83%84-%E3%83%88%E3%83%83%E3%83%97%E3%82%B9-%E5%A5%B3%E6%80%A7%E7%94%A8-%E3%82%B9%E3%83%9D%E3%83%BC%E3%83%84%E3%82%A6%E3%82%A8%E3%82%A2-%E3%82%B9%E3%83%9D%E3%83%BC%E3%83%84-%E3%83%AC%E3%82%B8%E3%83%A3%E3%83%BC/2084263740/?p=S%E3%82%B5%E3%82%A4%E3%82%BA&auccat=2084263740&aucminprice=5000&aucmaxprice=5999&exflg=1&b=1&n=20&s1=featured&slider=undefined'
user_agent = {"Accept-Language": "ja-JP,ja;q=0.5", 'User-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}
#proxys = [ {'http':f'http://{es[0]}:{es[1]}'} for es in [ line.strip().split() for line in open('proxys') ] ]
def pmap(arg):
key, urls = arg
hrefs = set()
for index, url in enumerate(urls):
if 'category/list' not in url:
continue
try:
url_hash = hashlib.sha256(bytes(url, 'utf8')).hexdigest()
if key != -1 and ( os.path.exists(f'htmls/{url_hash}') or os.path.exists(f'htmls_noise/{url_hash}')):
#print('already', url)
continue
print(key, index, len(urls), url)
try:
r = requests.get(url, headers=user_agent, timeout=5)
r.encoding = 'utf8'
print(r.text)
except Exception as ex:
print(ex)
continue
if 'category/list' in url:
open(f'htmls/{url_hash}','wb').write( gzip.compress(bytes(r.text,'utf8')) )
else:
open(f'htmls_noise/{url_hash}','wb').write( gzip.compress(bytes(r.text,'utf8')) )
soup = BS(r.text, 'html.parser')
for a in soup.find_all('a', {'href':True}):
href = a.get('href')
if 'https://auctions.yahoo.co.jp/' not in href: continue
#href = re.sub(r'\?.*?$', '', href)
hrefs.add(a.get('href'))
except Exception as ex:
print(ex)
continue
return hrefs
if '--resume' in sys.argv:
urls = pickle.load(open('urls.pkl', 'rb') )
else:
urls = pmap((-1, [url]))
print(urls)
DIST = 1
args = { key:[] for key in range(DIST) }
[ args[index%DIST].append(url) for index, url in enumerate(urls) ]
args = [ (key,urls) for key, urls in args.items() ]
#[ pmap(arg) for arg in args ]
from concurrent.futures import ProcessPoolExecutor as PPE
while True:
with PPE(max_workers=DIST) as exe:
urls = set()
for _hrefs in exe.map(pmap, args):
urls |= _hrefs
pickle.dump( urls, open('urls.pkl', 'wb') )
args = { key:[] for key in range(DIST) }
[ args[index%DIST].append(url) for index, url in enumerate(urls) ]
args = [ (key,urls) for key, urls in args.items() ]
|
'''
Views for app.
'''
from flask import Blueprint, redirect, render_template, url_for
from app.forms import ConfirmForm, SignupForm
from app.functions import add_user, confirm_user
from app.models import User
main = Blueprint('main', __name__, template_folder='app/templates')
@main.route('/', methods=['GET', 'POST'])
def index():
#TODO: Add chosen to carrier list
form = SignupForm()
registered = False
if form.validate_on_submit():
carrier = form.carrier.data
phone_number = int(form.phone_number.data)
print(phone_number)
if add_user(carrier, phone_number):
return redirect(url_for('main.confirm',
carrier=carrier,
number=phone_number))
try:
phone_number = int(form.phone_number.data)
except:
phone_number = None
if phone_number and User.query.get(phone_number):
registered = True
return render_template('index.html', form=form, registered=registered)
@main.route('/confirm/<carrier>/<number>', methods=['GET', 'POST'])
def confirm(carrier, number):
#TODO: Implement resend feature
form = ConfirmForm()
if form.validate_on_submit():
phone_number = int(form.phone_number.data)
confirm_user(phone_number)
return redirect(url_for('main.done'))
#TODO: Handle malformed inputs
form.phone_number.data = number
form.carrier.data = carrier
return render_template('confirm.html',
form=form,
carrier=carrier,
number=number)
@main.route('/done')
def done():
return render_template('done.html')
@main.route('/scores/new')
def new_scores():
#TODO: Implement auth first
return 'Unimplemented'
data = get_json()
try:
obj = json.loads(data)
except ValueError:
return 'Bad Request'
if obj:
send_scores(obj)
|
# -*- coding: utf-8 -*-
'''
Short Problem Definition:
Given a table A of N integers from 0 to N-1 calculate the smallest such index
P, that that {A[0],…,A[N-1]} = {A[0],…,A[P]}.
Link
PrefixSet
Complexity:
expected worst-case time complexity is O(N)
expected worst-case space complexity is O(N)
Execution:
Based on the number of elements that can occur in N, you either mark the occurrences
in a boolean array, or put them in a set. The last occurrence of an element that was
not seen before is the result.
'''
def solution(A):
seen = set()
smallest_prefix_idx = 0
for idx in xrange(len(A)):
if A[idx] not in seen:
seen.add(A[idx])
smallest_prefix_idx = idx
return smallest_prefix_idx
|
"""
All actions use up one stack/queue space
"""
action_bindings = [
'q', # Use normal attack
'w', # Use strong attack
'e', # Use special attack, only available after 1/3 of all allied pieces dead and on certain units
'a', # Move, click to determine coordinates
's', # Drop flag
'd' # Special action for king, only available after 2/3 of all allied pieces dead (not including dropped pieces)
]
"""
A piece class
Basis for all combat pieces on the board
Piece consists of:
Position: (xcoord, ycoord)
Level: 0-2
Stats: A dictionary of all stats for the piece
Sprite: Location of sprite in folder
Actions: Function pointers that define a piece's unique abilities
"""
class Piece(dict):
def __init__(self, xcoord, ycoord, level, stats, sprite):
self.level = level
self.stats = stats
self.sprite = sprite
self.position = [xcoord, ycoord]
"""
Dictionary of actions
Actions are added based on clan
Bind actions using action_bindings list
"""
self.actions = {}
"""
Object created by flag drop
Flag consists of:
Position: (xcoord, ycoord)
Stats: A dictionary of all stats for the piece
Sprite: Location of sprite in folder
"""
class Flag(object):
def __init__(self, xcoord, ycoord, stats, sprite):
self.sprite = sprite
self.stats = stats
self.position = [xcoord, ycoord]
|
from num2words import num2words
import matplotlib.pyplot as plt
import unicodedata
def wordstoscore(word):
score=sum([ord(x) % 32 for x in word])
return score
def num2score(num, language):
u = num2words(num, lang=language).replace(' and', '').replace(' ', '').replace('-', '')
return wordstoscore(
unicodedata.normalize('NFKD', u).encode('ASCII', 'ignore').decode())
largest = 2000
available_languages = ['en','cz', 'de', 'dk', 'es', 'fi','fr', 'id', 'it', 'pl','pt', 'nl']
numbers = list(range(largest))
import pandas as pd
scores = pd.DataFrame(columns=available_languages)
language_max = {}
for language in available_languages:
scores[language] = [num2score(n, language) for n in numbers]
for n in numbers:
if n < num2score(n, language):
language_max[language] = n
# for i in range(0, 999999):
# if i < wordstoscore(num2words(i)):
# print(i)
plt.style.use('fivethirtyeight')
fig, ax = plt.subplots()
ax.plot(scores, marker='o', linestyle='none')
ax.set_title("Numbers vs. their alphanumeric scores")
ax.legend(available_languages)
plt.show()
fig2, ax2 = plt.subplots()
ax2.bar(*zip(*language_max.items()))
ax2.set_title("Maximum by language")
plt.show()
|
from django.apps import AppConfig
class TradeConfig(AppConfig):
name = 'trade'
verbose_name = "实战信息"
app_label= "实战信息"
|
from django.db import models
from django.contrib import admin
class Register(models.Model):
username = models.CharField(u"用户名", max_length = 200)
passwd = models.CharField(u"密码", max_length = 200)
repasswd = models.CharField(u"确认密码", max_length = 200)
email = models.EmailField(u"邮箱地址",max_length = 200)
def __unicode__(self):
return self.username
class Build_Steps(models.Model):
build_info_id = models.CharField(max_length = 5000, blank = True, null = True)
script_content = models.CharField(max_length = 5000, blank = True, null = True)
slave_script_file = models.CharField(max_length = 5000, blank = True, null = True)
work_dir = models.CharField(max_length = 1000, blank = True, null = True)
description = models.CharField(max_length = 1000, blank = True, null = True)
class Build_Info(models.Model):
masterip = models.CharField(max_length = 100, blank = True, null = True)
slaveip = models.CharField(max_length = 100, blank = True, null = True)
slave_platform = models.CharField(max_length = 100, blank = True, null = True)
slavename = models.CharField(max_length = 100, blank = True, null = True)
buildername = models.CharField(max_length = 100, blank = True, null = True)
start_method = models.CharField(max_length = 100, blank = True, null = True)
username = models.CharField(max_length = 100, blank = True, null = True)
hour = models.CharField(max_length = 100, blank = True, null = True)
minute = models.CharField(max_length = 100, blank = True, null = True)
git_project_path = models.CharField(max_length = 100, blank = True, null = True)
branches = models.CharField(max_length = 100, blank = True, null = True)
monitor_file_path = models.CharField(max_length = 100, blank = True, null = True)
send_mail = models.CharField(max_length = 100, blank = True, null = True)
flag = models.CharField(max_length = 100, blank = True, null = True)
new_master = models.CharField(max_length = 1000, blank = True, null = True)
new_factory = models.CharField(max_length = 1000, blank = True, null = True)
scripts_path = models.CharField(max_length = 1000, blank = True, null = True)
def __unicode__(self):
return self.slavename
class Meta:
ordering = ["-id"]
|
from collections import deque
def bfs(start, end):
queue = deque([start])
location[start] = 0
while queue:
X = queue.popleft()
if X == end:
return location[X]
if X+1 < MAX and location[X] + 1 < location[X+1]:
location[X+1] = location[X] + 1
queue.append(X + 1)
if 0 < X and location[X] + 1 < location[X-1]:
location[X-1] = location[X] + 1
queue.append(X - 1)
if X*2 < MAX and location[X] + 1 < location[2*X]:
location[2*X] = location[X] + 1
queue.append(2 * X)
MAX = 1000001
N, K = map(int, input().split())
location = [MAX] * MAX
print(bfs(N, K)) |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
#url(r'^$', 'monitor.views.index', name='index'),
#url(r'^/index[/]?$', 'monitor.views.index', name='index'),
url(r'^monitor/', include('monitor.urls')),
#url(r'^admin/', include(admin.site.urls)),
)
|
print ('동전 합산 해드리겠습니다... 음수는 넣지마세요.\n' )
OhBack = int(input('오백원짜리 몇개? '))
Back = int(input('백원짜리 몇개? '))
OhShip = int(input('오십원짜리 몇개? '))
Ship = int(input('십원짜리 몇개? '))
Total = (OhBack*500) + (Back*100) + (OhShip * 50) + (Ship *10)
print ('\n 당신이 갖고 있는 동전은 총', Total, '원 입니다.' ) |
import pytest
from selenium import webdriver
from PageObjects.LoginPage import LoginPage
from utilities.readProperties import ReadConfig
from utilities.customLogger import LogGen
#NamingConvention: Test_ID_nameoftestcase
class Test_001_Login():
baseURL = ReadConfig.getApplicationURL()
username = ReadConfig.getUseremail()
password = ReadConfig.getpassword()
logger = LogGen.loggen()
@pytest.mark.regression
def test_homePageTitle(self,setup):
self.logger.info("****Testcase_001_Login*****")
self.logger.info("****Validating Home Page Title*****")
self.driver = setup
# Launching application
self.driver.get(self.baseURL)
#Returns title of webbrowser
actual_title = self.driver.title
#Assertion
if actual_title=="Your store. Login":
assert True
self.driver.close()
self.logger.info("****Validating Home Page Title test passed*****")
else:
# Method to save screenshots - self.driver.save_screenshot
# '.' represents current project directory
self.driver.save_screenshot(".\\Screenshots\\"+"test_homePageTitle.png")
self.driver.close()
self.logger.error("****Validating Home Page Title Failed*****") # Instead of info
assert False
@pytest.mark.sanity
@pytest.mark.regression
def test_login(self,setup):
self.logger.info("****Testcase_Login*****")
self.driver = setup
#Launching application
self.driver.get(self.baseURL)
#creating an object:
self.lp = LoginPage(self.driver)
self.lp.setUserName(self.username)
self.lp.setPassword(self.password)
self.lp.clickLogin()
actual_title = self.driver.title # returns title of webbrowser
self.lp.clickLogout()
# Returns title of webbrowser
print(actual_title)
if actual_title =="Dashboard / nopCommerce administration":
assert True
self.logger.info("****Login test is passed*****")
self.driver.close()
else:
# Method to save screenshots - self.driver.save_screenshot
# '.' represents current project directory
self.driver.save_screenshot(".\\Screenshots\\"+"test_login.png")
self.driver.close()
self.logger.error("****Login test is failed*****") # Instead of info
assert False |
#!/bin/python3
import logging
import time
import gevent
from gevent import queue, monkey
from client_socket import ClientSocket
monkey.patch_all()
logger = logging.getLogger(__name__)
fh = logging.FileHandler('client/.data/client.log')
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
class SendingQueue():
''' Class to sending queue process '''
def __init__(self, server_address):
''' Create SendingQueue object '''
self.threads = []
# gevent provides a synchronized queue (locking is handled)
self.queue = queue.Queue()
self.socket_mgr = ClientSocket(server_address)
def add(self, segments):
''' A list of segments to the sending queue'''
for sgmt in segments:
self.queue.put(sgmt)
def send(self):
''' Iterate continuously looking to send entries in queue
'''
logger.debug("Started sending thread")
while True:
if self.queue.qsize():
self.socket_mgr.send(self.queue.get())
time.sleep(1) # send is non-blocking, don't over send
else:
time.sleep(3)
def start_sending(self, filename, port_list, segments, num_threads):
''' Start a separate thread to begin sending
from the send queue. Should be started before
breaking up files. As segments are added to
queue, it will send, until stop_sending is called.
'''
self.add(segments)
self.socket_mgr.connect(filename, port_list)
for i in range(num_threads):
self.threads.append(gevent.spawn(self.send))
return
def complete_sending(self):
''' Join all threads created during this send process.
This should be done between searching for new files
'''
# Wait till rest of send queue is empty
# ASSUME no more is being added at this point
logger.debug("Waiting for all segments to send before completing send")
while self.queue.qsize():
time.sleep(3)
# Wait till sending finished
while self.socket_mgr.num_currently_sending:
logger.debug("Waiting for (%d) segments to finish sending",
self.socket_mgr.num_currently_sending)
time.sleep(3)
# Kill any threads created
gevent.killall(self.threads, timeout=5)
self.threads = []
self.socket_mgr.disconnect()
|
import csv
def execute(data, savepath):
csv_reader = csv.reader(open(data))
f = open(savepath, 'wb')
for line in csv_reader:
label = line[0]
features = line[1:]
libsvm_line = label + ' '
for index, feature in enumerate(features):
libsvm_line += str(index + 1) + ':' + feature + ' '
f.write(bytes(libsvm_line.strip() + '\n', 'UTF-8'))
f.close()
execute('mnist_train.csv', 'mnist_train.libsvm')
execute('mnist_test.csv', 'mnist_test.libsvm') |
from season import Season
from PredictionStats import PredictionStats
year_to_predict = 2017
min_mins_played = 10.0
min_games_played = 20
statsToPredict = PredictionStats.statsToPredictLoop.value
statsToUse = PredictionStats.statsToLoop.value
season_predict = Season()
players = season_predict.calcSeason('season/' + str(year_to_predict) + 'Stats.txt', year_to_predict, min_mins_played, min_games_played)
formulae = []
formulae_rvals = []
formulae_f = open('output/formulae.txt', 'r')
for statPredict in statsToPredict:
formulae_overall = []
formulae_rv = 0.0
for statUse in statsToUse:
slope, intercept, r_val = map(float, formulae_f.readline().split(' '))
formulae_overall.append((slope, intercept, r_val))
formulae_rv += r_val
formulae.append(formulae_overall)
formulae_rvals.append(formulae_rv)
formulae_f.close()
for player in players:
predictionVal = player.name
player_season = player.getSeason(year_to_predict)
player_stat_pred = []
for formulas in range(len(formulae)):
stat = 0.0
player_stat = player_season.getStatsByLoopVar( formulas )
for formula in range(len(formulae[formulas])):
slope, intercept, r_val = formulae[formulas][formula]
stat += (float(slope) * float(player_stat) + float(intercept)) * float(r_val) / float(formulae_rvals[formulas])
#player_stat_pred.append(stat / len(formulae[formulas]))
player_stat_pred.append(stat)
for i in range(0, 6):
predictionVal += ' ' + str(player_stat_pred[i])
predictionVal += ' ' + str(player_stat_pred[-1])
print(predictionVal)
|
# Generated by Django 3.0.3 on 2020-04-10 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic_app', '0008_auto_20200409_2247'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='branch',
field=models.CharField(choices=[('CS', 'CS'), ('ME', 'ME'), ('PIE', 'PIE'), ('EE', 'EE'), ('CE', 'CE'), ('ECE', 'ECE'), ('IT', 'IT')], max_length=200),
),
]
|
def partition(lst, begin, end):
i = begin - 1
for j in xrange(begin, end):
if lst[j] < lst[end]:
i += 1
lst[i], lst[j] = lst[j], lst[i]
if lst[end] < lst[i + 1]:
lst[i + 1], lst[end] = lst[end], lst[i + 1]
return i + 1
def _qsort(lst, begin, end):
if begin < end:
p = partition(lst, begin, end)
_qsort(lst, begin, p - 1)
_qsort(lst, p + 1, end)
def qsort(lst):
_qsort(lst, 0, len(lst) - 1)
|
"""
Main smartglass client
Common script that handles several subcommands
See `Commands`
"""
import os
import sys
import logging
import argparse
import functools
import asyncio
import aioconsole
import aiohttp
from typing import List
from logging.handlers import RotatingFileHandler
from xbox.webapi.authentication.models import OAuth2TokenResponse
from xbox.scripts import TOKENS_FILE, CONSOLES_FILE, LOG_FMT, \
LOG_LEVEL_DEBUG_INCL_PACKETS, VerboseFormatter, ExitCodes
from xbox.handlers import tui, gamepad_input, text_input, fallout4_relay
from xbox.auxiliary.manager import TitleManager
from xbox.webapi.scripts import CLIENT_ID, CLIENT_SECRET, REDIRECT_URI
from xbox.webapi.authentication.manager import AuthenticationManager
from xbox.webapi.common.exceptions import AuthenticationException
from xbox.sg import manager
from xbox.sg.console import Console
from xbox.sg.enum import ConnectionState
LOGGER = logging.getLogger(__name__)
REPL_DEFAULT_SERVER_PORT = 5558
class Commands(object):
"""
Available commands for CLI
"""
Discover = 'discover'
PowerOn = 'poweron'
PowerOff = 'poweroff'
REPL = 'repl'
REPLServer = 'replserver'
FalloutRelay = 'forelay'
GamepadInput = 'gamepadinput'
TextInput = 'textinput'
TUI = 'tui'
def parse_arguments(args: List[str] = None):
"""
Parse arguments with argparse.ArgumentParser
Args:
args: List of arguments from cmdline
Returns: Parsed arguments
Raises:
Exception: On generic failure
"""
parser = argparse.ArgumentParser(description='Xbox SmartGlass client')
"""Common arguments for logging"""
logging_args = argparse.ArgumentParser(add_help=False)
logging_args.add_argument(
'--logfile',
help="Path for logfile")
logging_args.add_argument(
'-v', '--verbose', action='count', default=0,
help='Set logging level\n'
'( -v: INFO,\n'
' -vv: DEBUG,\n'
'-vvv: DEBUG_INCL_PACKETS)')
"""Common arguments for authenticated console connection"""
xbl_token_args = argparse.ArgumentParser(add_help=False)
xbl_token_args.add_argument(
'--tokens', '-t', type=str, default=TOKENS_FILE,
help='Tokenfile to load')
xbl_token_args.add_argument(
"--client-id",
"-cid",
default=os.environ.get("CLIENT_ID", CLIENT_ID),
help="OAuth2 Client ID",
)
xbl_token_args.add_argument(
"--client-secret",
"-cs",
default=os.environ.get("CLIENT_SECRET", CLIENT_SECRET),
help="OAuth2 Client Secret",
)
xbl_token_args.add_argument(
"--redirect-uri",
"-ru",
default=os.environ.get("REDIRECT_URI", REDIRECT_URI),
help="OAuth2 Redirect URI",
)
xbl_token_args.add_argument(
'--refresh', '-r', action='store_true',
help="Refresh xbox live tokens in provided token file")
"""Common argument for console connection"""
connection_arg = argparse.ArgumentParser(add_help=False)
connection_arg.add_argument(
'--address', '-a', type=str, default=None,
help="IP address of console")
connection_arg.add_argument(
'--liveid', '-l',
help='LiveID to poweron')
"""Common argument for interactively choosing console to handle"""
interactive_arg = argparse.ArgumentParser(add_help=False)
interactive_arg.add_argument(
'--interactive', '-i', action='store_true',
help="Interactively choose console to connect to")
"""
Define commands
"""
subparsers = parser.add_subparsers(help='Available commands')
# NOTE: Setting dest and required here for py3.6 compat
subparsers.dest = 'command'
subparsers.required = True
"""Discover"""
subparsers.add_parser(Commands.Discover,
help='Discover console',
parents=[logging_args,
connection_arg])
"""Power on"""
subparsers.add_parser(
Commands.PowerOn,
help='Power on console',
parents=[logging_args, connection_arg])
"""Power off"""
poweroff_cmd = subparsers.add_parser(
Commands.PowerOff,
help='Power off console',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
poweroff_cmd.add_argument(
'--all', action='store_true',
help="Power off all consoles")
"""Local REPL"""
subparsers.add_parser(
Commands.REPL,
help='Local REPL (interactive console)',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
"""REPL server"""
repl_server_cmd = subparsers.add_parser(
Commands.REPLServer,
help='REPL server (interactive console)',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
repl_server_cmd.add_argument(
'--bind', '-b', default='127.0.0.1',
help='Interface address to bind the server')
repl_server_cmd.add_argument(
'--port', '-p', type=int, default=REPL_DEFAULT_SERVER_PORT,
help=f'Port to bind to, default: {REPL_DEFAULT_SERVER_PORT}')
"""Fallout relay"""
subparsers.add_parser(
Commands.FalloutRelay,
help='Fallout 4 Pip boy relay',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
"""Controller input"""
subparsers.add_parser(
Commands.GamepadInput,
help='Send controller input to dashboard / apps',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
"""Text input"""
subparsers.add_parser(
Commands.TextInput,
help='Client to use Text input functionality',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
tui_cmd = subparsers.add_parser(
Commands.TUI,
help='TUI client - fancy :)',
parents=[logging_args, xbl_token_args,
connection_arg])
tui_cmd.add_argument(
'--consoles', '-c', default=CONSOLES_FILE,
help="Previously discovered consoles (json)")
return parser.parse_args(args)
def handle_logging_setup(args: argparse.Namespace) -> None:
"""
Determine log level, logfile and special DEBUG_INCL_PACKETS
via cmdline arguments.
Args:
args: ArgumentParser `Namespace`
Returns:
None
"""
levels = [logging.WARNING, logging.INFO, logging.DEBUG, LOG_LEVEL_DEBUG_INCL_PACKETS]
# Output level capped to number of levels
log_level = levels[min(len(levels) - 1, args.verbose)]
logging.basicConfig(level=log_level, format=LOG_FMT)
logging.root.info('Set Loglevel: {0}'
.format(logging.getLevelName(log_level)))
if log_level == LOG_LEVEL_DEBUG_INCL_PACKETS:
logging.root.info('Removing previous logging StreamHandlers')
while len(logging.root.handlers):
del logging.root.handlers[0]
logging.root.info('Using DEBUG_INCL_PACKETS logging')
debugext_handler = logging.StreamHandler()
debugext_handler.setFormatter(VerboseFormatter(LOG_FMT))
logging.root.addHandler(debugext_handler)
if args.logfile:
logging.root.info('Set Logfile path: {0}'.format(args.logfile))
file_handler = RotatingFileHandler(args.logfile, backupCount=2)
file_handler.setLevel(log_level)
file_handler.setFormatter(logging.Formatter(LOG_FMT))
logging.root.addHandler(file_handler)
async def do_authentication(args: argparse.Namespace) -> AuthenticationManager:
"""
Shortcut for doing xbox live authentication (uses xbox-webapi-python lib).
Args:
args: Parsed arguments
Returns: An authenticated instance of AuthenticationManager
Raises:
AuthenticationException: If authentication failed
"""
async with aiohttp.ClientSession() as session:
auth_mgr = AuthenticationManager(
session, args.client_id, args.client_secret, args.redirect_uri
)
# Refresh tokens if we have them
if os.path.exists(args.tokens):
with open(args.tokens, mode="r") as f:
tokens = f.read()
auth_mgr.oauth = OAuth2TokenResponse.parse_raw(tokens)
await auth_mgr.refresh_tokens()
# Request new ones if they are not valid
if not (auth_mgr.xsts_token and auth_mgr.xsts_token.is_valid()):
auth_url = auth_mgr.generate_authorization_url()
print(f'Authorize with following URL: {auth_url}\n')
code = input('Enter received authorization code: ')
await auth_mgr.request_tokens(code)
with open(args.tokens, mode="w") as f:
f.write(auth_mgr.oauth.json())
return auth_mgr
def choose_console_interactively(console_list: List[Console]):
"""
Choose a console to use via user-input
Args:
console_list (list): List of consoles to choose from
Returns:
None if choice was aborted, a desired console object otherwise
"""
entry_count = len(console_list)
LOGGER.debug('Offering console choices: {0}'.format(entry_count))
print('Discovered consoles:')
for idx, console in enumerate(console_list):
print(' {0}: {1} {2} {3}'
.format(idx, console.name, console.liveid, console.address))
print('Enter \'x\' to abort')
choices = [str(i) for i in range(entry_count)]
choices.append('e')
response = ''
while response not in choices:
response = input('Make your choice: ')
if response == 'e':
return None
return console_list[int(response)]
async def cli_discover_consoles(args: argparse.Namespace) -> List[Console]:
"""
Discover consoles
"""
LOGGER.info(f'Sending discovery packets to IP: {args.address}')
discovered = await Console.discover(addr=args.address, timeout=1)
if not len(discovered):
LOGGER.error('No consoles discovered')
sys.exit(ExitCodes.DiscoveryError)
LOGGER.info('Discovered consoles ({0}): {1}'
.format(len(discovered), ', '.join([str(c) for c in discovered])))
if args.liveid:
LOGGER.info('Filtering discovered consoles for LIVEID: {0}'
.format(args.liveid))
discovered = [c for c in discovered if c.liveid == args.liveid]
if args.address:
LOGGER.info('Filtering discovered consoles for IP address: {0}'
.format(args.address))
discovered = [c for c in discovered if c.address == args.address]
return discovered
async def main_async(loop: asyncio.AbstractEventLoop, command: Commands = None) -> ExitCodes:
"""
Async Main entrypoint
Args:
command (Commands):
Returns:
None
"""
auth_manager: AuthenticationManager = None
if command:
# Take passed command and append actual cmdline
cmdline_arguments = sys.argv[1:]
cmdline_arguments.insert(0, command)
else:
cmdline_arguments = None
args = parse_arguments(cmdline_arguments)
handle_logging_setup(args)
LOGGER.debug('Parsed arguments: {0}'.format(args))
command = args.command
LOGGER.debug('Chosen command: {0}'.format(command))
if 'interactive' in args and args.interactive and \
(args.address or args.liveid):
LOGGER.error('Flag \'--interactive\' is incompatible with'
' providing an IP address (--address) or LiveID (--liveid) explicitly')
sys.exit(ExitCodes.ArgParsingError)
elif args.liveid and args.address:
LOGGER.warning('You passed --address AND --liveid: Will only use that specific'
'combination!')
elif command == Commands.PowerOff and args.all and (args.liveid or args.address):
LOGGER.error('Poweroff with --all flag + explicitly provided LiveID / IP address makes no sense')
sys.exit(ExitCodes.ArgParsingError)
elif command == Commands.PowerOff and args.interactive and args.all:
LOGGER.error('Combining args --all and --interactive not supported')
sys.exit(ExitCodes.ArgParsingError)
print('Xbox SmartGlass main client started')
if 'tokens' in args:
"""
Do Xbox live authentication
"""
LOGGER.debug('Command {0} supports authenticated connection'.format(command))
print('Authenticating...')
try:
auth_manager = await do_authentication(args)
except AuthenticationException:
LOGGER.exception('Authentication failed!')
LOGGER.error("Please re-run xbox-authenticate to get a fresh set")
sys.exit(ExitCodes.AuthenticationError)
print('Authentication done')
if command == Commands.TUI:
"""
Text user interface (powered by urwid)
"""
# Removing stream handlers to not pollute TUI
for h in [sh for sh in logging.root.handlers
if isinstance(sh, logging.StreamHandler)]:
LOGGER.debug('Removing StreamHandler {0} from root logger'.format(h))
logging.root.removeHandler(h)
await tui.run_tui(loop, args.consoles, auth_manager)
return ExitCodes.OK
elif command == Commands.PowerOn:
"""
Powering up console
"""
if not args.liveid:
LOGGER.error('No LiveID (--liveid) provided for power on!')
sys.exit(ExitCodes.ArgParsingError)
LOGGER.info('Sending poweron packet for LiveId: {0} to {1}'
.format(args.liveid,
'IP: ' + args.address if args.address else 'MULTICAST'))
await Console.power_on(args.liveid, args.address, tries=10)
sys.exit(0)
"""
Discovery
"""
discovered = await cli_discover_consoles(args)
if command == Commands.Discover:
"""
Simply print discovered consoles
"""
print("Discovered %d consoles: " % len(discovered))
for console in discovered:
print(" %s" % console)
sys.exit(ExitCodes.OK)
elif command == Commands.PowerOff and args.all:
"""
Early call for poweroff --all
"""
"""Powering off all discovered consoles"""
for c in discovered:
print('Powering off console {0}'.format(c))
await c.power_off()
sys.exit(ExitCodes.OK)
"""
Choosing/filtering a console from the discovered ones
"""
console = None
if args.interactive:
LOGGER.debug('Starting interactive console choice')
console = choose_console_interactively(discovered)
elif len(discovered) == 1:
LOGGER.debug('Choosing sole console, no user interaction required')
console = discovered[0]
elif len(discovered) > 1:
LOGGER.error(
'More than one console was discovered and no exact'
' connection parameters were provided')
if not console:
LOGGER.error('Choosing a console failed!')
sys.exit(ExitCodes.ConsoleChoice)
LOGGER.info('Choosen target console: {0}'.format(console))
LOGGER.debug('Setting console callbacks')
console.on_device_status += \
lambda x: LOGGER.info('Device status: {0}'.format(x))
console.on_connection_state += \
lambda x: LOGGER.info('Connection state: {0}'.format(x))
console.on_pairing_state += \
lambda x: LOGGER.info('Pairing state: {0}'.format(x))
console.on_console_status += \
lambda x: LOGGER.info('Console status: {0}'.format(x))
console.on_timeout += \
lambda x: LOGGER.error('Timeout occured!') or sys.exit(1)
userhash = ''
xsts_token = ''
if auth_manager:
userhash = auth_manager.xsts_token.userhash
xsts_token = auth_manager.xsts_token.token
LOGGER.debug('Authentication info:')
LOGGER.debug('Userhash: {0}'.format(userhash))
LOGGER.debug('XToken: {0}'.format(xsts_token))
else:
LOGGER.info('Running in anonymous mode')
LOGGER.info('Attempting connection...')
state = await console.connect(userhash, xsts_token)
if state != ConnectionState.Connected:
LOGGER.error('Connection failed! Console: {0}'.format(console))
sys.exit(1)
# FIXME: Waiting explicitly
LOGGER.info('Connected to console: {0}'.format(console))
LOGGER.debug('Waiting a second before proceeding...')
await console.wait(1)
if command == Commands.PowerOff:
"""
Power off (single console)
"""
print('Powering off console {0}'.format(console))
await console.power_off()
sys.exit(ExitCodes.OK)
elif command == Commands.REPL or command == Commands.REPLServer:
banner = 'You are connected to the console @ {0}\n'\
.format(console.address)
banner += 'Type in \'console\' to acccess the object\n'
banner += 'Type in \'exit()\' to quit the application'
scope_vars = {'console': console}
if command == Commands.REPL:
LOGGER.info('Starting up local REPL console')
console = aioconsole.AsynchronousConsole(locals=scope_vars, loop=loop)
await console.interact(banner)
else:
startinfo = 'Starting up REPL server @ {0}:{1}'.format(args.bind, args.port)
print(startinfo)
LOGGER.info(startinfo)
server = await aioconsole.start_interactive_server(
host=args.bind, port=args.port, loop=loop)
await server
elif command == Commands.FalloutRelay:
"""
Fallout 4 relay
"""
print('Starting Fallout 4 relay service...')
console.add_manager(TitleManager)
console.title.on_connection_info += fallout4_relay.on_connection_info
await console.start_title_channel(
title_id=fallout4_relay.FALLOUT_TITLE_ID
)
print('Fallout 4 relay started')
elif command == Commands.GamepadInput:
"""
Gamepad input
"""
print('Starting gamepad input handler...')
console.add_manager(manager.InputManager)
await gamepad_input.input_loop(console)
elif command == Commands.TextInput:
"""
Text input
"""
print('Starting text input handler...')
console.add_manager(manager.TextManager)
console.text.on_systemtext_configuration += text_input.on_text_config
console.text.on_systemtext_input += functools.partial(text_input.on_text_input, console)
console.text.on_systemtext_done += text_input.on_text_done
while True:
try:
await asyncio.sleep(1)
except KeyboardInterrupt:
print('Quitting text input handler')
return ExitCodes.OK
def main(command: Commands = None):
LOGGER.debug('Entering main_async')
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(main_async(loop, command))
except KeyboardInterrupt:
pass
def main_discover():
"""Entrypoint for discover script"""
main(Commands.Discover)
def main_poweron():
"""Entrypoint for poweron script"""
main(Commands.PowerOn)
def main_poweroff():
"""Entrypoint for poweroff script"""
main(Commands.PowerOff)
def main_repl():
"""Entrypoint for REPL script"""
main(Commands.REPL)
def main_replserver():
"""Entrypoint for REPL server script"""
main(Commands.REPLServer)
def main_falloutrelay():
"""Entrypoint for Fallout 4 relay script"""
main(Commands.FalloutRelay)
def main_textinput():
"""Entrypoint for Text input script"""
main(Commands.TextInput)
def main_gamepadinput():
"""Entrypoint for Gamepad input script"""
main(Commands.GamepadInput)
def main_tui():
"""Entrypoint for TUI script"""
main(Commands.TUI)
if __name__ == '__main__':
main()
|
import unittest
from .update_profiles import HdfsReader
from .update_profiles import ArgoApiClient
class TestClass(unittest.TestCase):
def test_hdfs_reader(self):
hdfs_host = "foo"
hdfs_port = "9000"
hdfs_sync = "/user/foo/argo/tenants/{{tenant}}/sync"
hdfs = HdfsReader(hdfs_host, hdfs_port, hdfs_sync)
test_cases = [
{"tenant": "TA", "report": "Critical", "profile_type": "operations",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ops.json"},
{"tenant": "TA", "report": "Super-Critical", "profile_type": "operations",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ops.json"},
{"tenant": "TA", "report": "Critical", "profile_type": "reports",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_cfg.json"},
{"tenant": "TA", "report": "Critical", "profile_type": "aggregations",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ap.json"},
{"tenant": "TA", "report": "Crit", "profile_type": "reports",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Crit_cfg.json"},
{"tenant": "TA", "report": "Super-Critical", "profile_type": "aggregations",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ap.json"},
{"tenant": "TB", "report": "Critical", "profile_type": "aggregations",
"expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_ap.json"},
{"tenant": "TB", "report": "Critical", "profile_type": "reports",
"expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_cfg.json"},
{"tenant": "TB", "report": "Critical", "profile_type": "metrics",
"expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_metrics.json"}
]
for test_case in test_cases:
actual = hdfs.gen_profile_path(
test_case["tenant"], test_case["report"], test_case["profile_type"])
expected = test_case["expected"]
self.assertEqual(expected, actual)
# Test with dates
test_cases_dates = [
{"tenant": "TA", "report": "Critical", "profile_type": "operations", "date": "2019-12-11",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ops_2019-12-11.json"},
{"tenant": "TA", "report": "Super-Critical", "profile_type": "operations", "date": "2019-10-04",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ops_2019-10-04.json"},
{"tenant": "TA", "report": "Critical", "profile_type": "reports", "date": "2019-05-11",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_cfg.json"},
{"tenant": "TA", "report": "Critical", "profile_type": "aggregations", "date": "2019-06-06",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Critical_ap_2019-06-06.json"},
{"tenant": "TA", "report": "Crit", "profile_type": "reports", "date": "2019-07-04",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Crit_cfg.json"},
{"tenant": "TA", "report": "Super-Critical", "profile_type": "aggregations", "date": "2019-03-04",
"expected": "/user/foo/argo/tenants/TA/sync/TA_Super-Critical_ap_2019-03-04.json"},
{"tenant": "TB", "report": "Critical", "profile_type": "aggregations", "date": "2019-01-04",
"expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_ap_2019-01-04.json"},
{"tenant": "TB", "report": "Critical", "profile_type": "reports", "date": "2019-01-05",
"expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_cfg.json"},
{"tenant": "TB", "report": "Critical", "profile_type": "metrics", "date": "2019-02-24",
"expected": "/user/foo/argo/tenants/TB/sync/TB_Critical_metrics_2019-02-24.json"}
]
for test_case_date in test_cases_dates:
actual = hdfs.gen_profile_path(
test_case_date["tenant"], test_case_date["report"], test_case_date["profile_type"], test_case_date["date"])
expected = test_case_date["expected"]
self.assertEqual(expected, actual)
def test_api(self):
cfg = {
"api_host": "foo.host",
"tenant_keys": {"TA": "key1", "TB": "key2"}
}
argo_api = ArgoApiClient(cfg["api_host"], cfg["tenant_keys"])
test_cases = [
{"resource": "reports", "item_uuid": None,
"expected": "https://foo.host/api/v2/reports"},
{"resource": "reports", "item_uuid": "12",
"expected": "https://foo.host/api/v2/reports/12"},
{"resource": "operations", "item_uuid": None,
"expected": "https://foo.host/api/v2/operations_profiles"},
{"resource": "operations", "item_uuid": "12",
"expected": "https://foo.host/api/v2/operations_profiles/12"},
{"resource": "aggregations", "item_uuid": None,
"expected": "https://foo.host/api/v2/aggregation_profiles"},
{"resource": "aggregations", "item_uuid": "12",
"expected": "https://foo.host/api/v2/aggregation_profiles/12"},
{"resource": "tenants", "item_uuid": None,
"expected": "https://foo.host/api/v2/admin/tenants"},
{"resource": "tenants", "item_uuid": "12",
"expected": "https://foo.host/api/v2/admin/tenants/12"},
{"resource": "metrics", "item_uuid": None,
"expected": "https://foo.host/api/v2/metric_profiles"},
{"resource": "metrics", "item_uuid": "12",
"expected": "https://foo.host/api/v2/metric_profiles/12"}
]
for test_case in test_cases:
actual = argo_api.get_url(
test_case["resource"], test_case["item_uuid"])
expected = test_case["expected"]
self.assertEqual(expected, actual)
|
# coding: utf-8
# Nuevo videojuego
import webapp2
import time
from webapp2_extras import jinja2
from model.videojuego import Videojuego
class NuevoVideojuegoHandler(webapp2.RequestHandler):
def get(self):
valores_plantilla = {
}
jinja = jinja2.get_jinja2(app=self.app)
self.response.write(
jinja.render_template("nuevo_videojuego.html",
**valores_plantilla))
def post(self):
titulo = self.request.get("edTitulo", "")
puntuacion = 0
descripcion = self.request.get("edDescripcion", "")
if (not(titulo) or not(descripcion)):
return self.redirect("videojuegos/nuevo")
else:
videojuego = Videojuego(titulo=titulo, puntuacion=puntuacion,
descripcion=descripcion)
videojuego.put()
time.sleep(1)
return self.redirect("/")
app = webapp2.WSGIApplication([
('/videojuegos/nuevo', NuevoVideojuegoHandler)
], debug=True) |
#2.列表
squres=[1,2,3,4,5]
print(squres[0])#下标是从零开始
newSqures=squres+[2,78,3,6]
print(newSqures)
#与字符串不同的是,列表可以更改列表项的值
a=3**5
squres[2]=a
print(squres)
#append()方法可以将元素添加在列表最后一项
cu=["p","y","t","h","o","n"]
cube1=["p""y""t""h""o""n"]#当每个列表中字符串之间不加逗号的时候,默认为拼接在一起的一个单词,一个引号把所有内容放在里面
print(cu)
print(cube1)
cu.append("myLove")
print(cu)
cu.clear()
cu1=["this","is","a","good","day"]
print(cu1)
cu1.copy()#将原本内容复制一遍
print(cu1)
cube=[9,9,75,6,4]
print(cube)
cube.pop()#删除最后一个元素,与append相反
print(cube)
print(cube[2:4])#在python中,截取中间为冒号:
print(len(cube))#显示其长度
#拼接的另一种方法:
a=['a','b','c']
n=[1,2,3]
x=[a,n]
print(x)
print(x[0])
print(x[0][1])#表示输出数组的下标为0元素中下标为1的元素 |
import unittest
import os
from kanbanflow_cli.kanban_board import KanbanBoard
import setup_tests
class TestKanbanBoardAPICalls(unittest.TestCase):
def setUp(self):
setup_tests.set_kbflow_api_environ_var(config_ini_path="config.ini")
self.board = KanbanBoard()
def test_board_json_is_dict(self):
self.assertIsInstance(self.board.board_json, dict)
def test_board_users_json_is_list(self):
self.assertIsInstance(self.board.board_users_json, list)
def test_board_users_json_more_than_zero(self):
self.assertGreater(len(self.board.board_users_json), 0)
def test_board_column_ids_is_list(self):
self.assertIsInstance(self.board.column_dict_list, list)
def test_board_column_ids_list_more_than_one(self):
"""
There is at least one column
"""
self.assertGreater(len(self.board.column_dict_list), 0)
def test_board_column_ids_first_element_is_dict(self):
"""
First element in list of column is a dictionary
"""
self.assertIsInstance(self.board.column_dict_list[0], dict)
def test_board_colors_is_list(self):
"""
Colors is returned as a list
"""
colors = self.board.color_dict_list
self.assertIsInstance(colors, list)
def test_board_colors_list_more_than_one(self):
"""
There is at least one task color
"""
self.assertGreater(len(self.board.color_dict_list), 0)
def test_board_colors_first_element_is_dict(self):
"""
First element in list of colors is a dictionary
"""
self.assertIsInstance(self.board.color_dict_list[0], dict)
def tearDown(self):
del os.environ["KBFLOW_API"]
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
#!coding=utf-8
import string
#[end-y_end, end-start, high-low, $2/$3, end_level, 缺口, 包线]
class kline():
def cal_kline(self, list):
y_e = list[0][2]
y_l = list[0][3]
y_h = list[0][4]
ret = []
for day in list:
result = []
low = day[3]
high = day[4]
start = day[1]
end = day[2]
range_ratio = (high - low) / y_e
day_ratio = (end - y_e) / y_e
k_ratio = (end - start) / y_e
result.append(day_ratio*100)
result.append(k_ratio*100)
result.append(range_ratio*100)
if range_ratio == 0:
result.append(1.0)
result.append(1.0)
else:
result.append(abs(k_ratio)/range_ratio)
result.append((end-low)/(high-low))
if low > y_h:
result.append(1)
elif high < y_l:
result.append(-1)
else:
result.append(0)
if k_ratio > 0:
if start < y_l and end > y_h:
result.append(1)
else:
result.append(0)
elif k_ratio < 0:
if start > y_h and end < y_l:
result.append(-1)
else:
result.append(0)
else:
result.append(0)
ret.append(result)
y_e = end
y_l = low
y_h = high
return ret
|
from datetime import datetime
from factory import django, Faker, fuzzy
from pytz import UTC
from user.models import User
from .models import Lesson
class LessonFactory(django.DjangoModelFactory):
class Meta:
model = Lesson
date_and_time = fuzzy.FuzzyDateTime(start_dt=datetime(2019, 1, 1, tzinfo=UTC),
end_dt=datetime(2019, 12, 12, tzinfo=UTC))
teacher = fuzzy.FuzzyChoice(choices=User.objects.filter(is_teacher=True))
title = Faker('sentence')
description = Faker('text')
|
# Generated by Django 2.0.6 on 2018-12-16 07:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Camera',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.CharField(max_length=100)),
('serial_number', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Vehicle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(blank=True, max_length=15, null=True)),
('mobile', models.PositiveIntegerField(default='8554951545')),
('address', models.CharField(default='test', max_length=250)),
('type', models.CharField(max_length=100)),
('image_path', models.CharField(max_length=500)),
('rc_path', models.CharField(max_length=500)),
('is_done', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='VehicleViolation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField()),
('has_paid', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('camera', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Interface.Camera')),
('vehicle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Interface.Vehicle')),
],
),
migrations.CreateModel(
name='Violation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=200)),
('fine_amount', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='vehicleviolation',
name='violation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Interface.Violation'),
),
]
|
import random
pool = "0123456789abcdefABCDEF"
l = 20
for x in range(150):
tmp = ""
l = random.randint(24, 30)
for i in range(l):
index = random.randint(0, len(pool)-1)
tmp += pool[index]
print('char flag%d[50] = "%s";'%(x, tmp))
|
class Node:
def __init__(self, data, next_data = None):
self.__data = data
self.__next = next_data
def __str__(self):
return self.__data
def get_data(self):
return self.__data
def get_next(self):
return self.__next
def set_data(self,data):
self.__data = data
def set_next(self,next_data):
self.__next = next_data
def add_after(self, value):
tmp = self.__next
a = Node(str(value))
self.__next = a
a.set_next(tmp)
def remove_after(self):
tmp = self.get_next().get_next()
self.__next = tmp
def __contains__(self, value):
if self.__next == None and self.__data != value:
return False
else:
if self.__data == value :
return True
tmp = self.__next
while tmp != None:
if tmp.get_data() == value:
return True
else:
tmp = tmp.get_next()
return False
def get_sum(self): #此处假设仅有整数值
num = self.__data
tmp = self.__next
while tmp != None:
num +=tmp.get_data()
tmp = tmp.get_next()
return num
def print_node_chain(node_of_chain):
while node_of_chain != None:
print(node_of_chain.get_data())
node_of_chain=node_of_chain.get_next()
return None
def create_node_chain(values): #假设列表(values)非空
num = len(values)
tmp = Node(values[num-1])
if len(values) == 1:
return tmp
else:
for i in range(1,num):
c = Node(values[num-i-1])
c.set_next(tmp)
tmp =c
return tmp
values = create_node_chain(['apple', 'banana', 'cherry', 'date', 'elderberry'])
print_node_chain(values)
|
from CNN.AbstractLayer import Layer
import numpy as np
from multiprocessing import Pool
from os import cpu_count
from math import ceil
class ConvolutionLayer(Layer):
def __init__(self, filters, learningRate, stride=1, isLearning=True, allowedThreads=None):
super(ConvolutionLayer, self).__init__(isLearning)
self._learningRate = learningRate
self._filters = filters
self._stride = stride
self._allowedThreads = allowedThreads or cpu_count()
def parallelize(self, func, param):
pool = Pool(self._allowedThreads)
res = pool.starmap_async(func, param, chunksize=ceil(len(param)/self._allowedThreads))
pool.close()
pool.join()
return res.get()
@staticmethod
def convolveFilter(tensor, filters, fIndex, stride):
shape = list(tensor.shape[:-1])
shape = [(shape[i] - filters.shape[i+1]) // (stride)
for i in range(len(shape))] + [1]
featureMap = np.zeros(tuple(shape))
for i in range(0, featureMap.shape[0], stride): # line i
for j in range(0, featureMap.shape[1], stride): # column j
# we compute the result of the dot product between:
# (1) the current receptive field, and
# (2) the current filter (3 dimensional dot product)
featureMap[i][j][0] \
= np.tensordot(tensor[i:i+filters.shape[1], j:j+filters.shape[2], :], filters[fIndex], axes=((0, 1, 2), (0, 1, 2)))# / filters[0].size
return (fIndex, featureMap)
def convolve(self, tensor):
"""
Convolution layer.
It takes a tensor input or the feature map produced by the previous
layer and applies its convolution according to its own filters.
stride : the "sliding step" of the convolution, usually 1
filters : an array of filters (3 dimensional filters)
"""
# init the resulting feature map
shape = list(tensor.shape[:-1])
shape = [(shape[i] - self._filters.shape[i+1]) // (self._stride)
for i in range(len(shape))] + [self._filters.shape[0]]
featureMap = np.zeros(tuple(shape))
res = self.parallelize(ConvolutionLayer.convolveFilter, [(tensor, self._filters, i, self._stride) for i in range(self._filters.shape[0])])
for f, partFMap in res:
featureMap[:][:][f] = partFMap[:][:][0]
return featureMap
@staticmethod
def learnConv(loss, receivedInput, filters, stride, learningRate):
"""
Function computing the loss of the previous layer and the updated filters.
The received loss is computed in the next layer and sent here through backprop.
"""
# contains the loss of the previous layer
previousLayerLoss = np.zeros(receivedInput.shape)
# will be used to compute the updated filters
filtersCorrection = np.zeros(filters.shape)
for i in range(filters.shape[0]): # for each filter
for j in range(0, filters.shape[1], stride): # for i along the height
for k in range(0, filters.shape[2], stride): # for j along the width
# computing dL/dinput and dL/dW
previousLayerLoss[j:j+filters.shape[1],
k:k+filters.shape[2], :] \
+= loss[j, k, i] * filters[i]
filtersCorrection[i] += loss[j, k, i] \
* receivedInput[j:j+filters.shape[1],
k:k+filters.shape[2],
:]
# returns the previous layer's loss and the updated filters
return previousLayerLoss, filters - learningRate * filtersCorrection
def compute(self, tensor):
"""
Wraps the computation static method
"""
self.saveData(tensor)
return self.convolve(tensor)
def learn(self, loss):
"""
Wraps the learning static method and update the filters
"""
if self.isLearning():
res, self._filters = ConvolutionLayer.learnConv(loss,
self.getSavedData(),
self._filters,
self._stride,
self._learningRate)
return res
|
import Tkinter as tk
import time
top = tk.Tk()
def addText():
tickerSymbols = ["VTIAX","PTTRX","PRFDX","DBLTX","TGBAX","FCNTX","CNSAX","ANZAX","FISCX","FACVX","PACIX","VCVSX","DEEAX","ACCBX","CLDAX"]
for i in tickerSymbols:
i = tickerSymbols.index(i)
i = str(i)
currentPercentage = L.cget("text")
newPercentage = i + "%" + " complete"
L.configure(text=newPercentage)
top.update_idletasks()
B = tk.Button(top, text ="Change text", command = addText)
L = tk.Label(top,text='0%')
B.pack()
L.pack()
top.geometry("900x300")
top.mainloop()
|
"""Test cassandra storage."""
import logging
import unittest
from cassandra import cluster as cassandra_driver
from cassandra.protocol import ConfigurationException
from ccmlib import common
from ccmlib.cluster import Cluster as CCMCluster
from ccmlib.cluster_factory import ClusterFactory as CCMClusterFactory
from cfm_core.ingredient import Ingredient
from cfm_core.pantry import Pantry
from cfm_service.storage.cassandra_storage import CassandraCfmStorage
class CassandraStorageTest(unittest.TestCase):
"""Testing pantry class."""
CLUSTER_NAME = "cfm-ccm-cluster"
CLUSTER_PATH = "/Users/zvo/.ccm"
CLUSTER_VERSION = "3.11.8"
CLUSTER_KWARGS = {"version": CLUSTER_VERSION}
CLUSTER_NODE_COUNT = 1
def __init__(self, *args, **kwargs):
"""__init__."""
super().__init__(*args, **kwargs)
def setUp(self):
"""Docstring."""
self._start_cluster()
self._init_cluster()
def tearDown(self):
"""Docstring."""
self._stop_cluster()
def _start_cluster(self):
"""Docstring."""
try:
cluster = CCMClusterFactory.load(self.CLUSTER_PATH, self.CLUSTER_NAME)
logging.debug(
"Found existing ccm {} cluster; clearing".format(self.CLUSTER_NAME)
)
cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
self.CCM_CLUSTER = cluster
except Exception:
logging.debug(
"Creating new ccm cluster {} with {}",
self.CLUSTER_NAME,
self.CLUSTER_KWARGS,
)
cluster = CCMCluster(
self.CLUSTER_PATH, self.CLUSTER_NAME, **self.CLUSTER_KWARGS
)
cluster.set_configuration_options({"start_native_transport": True})
common.switch_cluster(self.CLUSTER_PATH, self.CLUSTER_NAME)
cluster.populate(self.CLUSTER_NODE_COUNT, ipformat=None)
cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
self.CCM_CLUSTER = cluster
def _init_cluster(self):
session = cassandra_driver.Cluster(contact_points=["localhost"]).connect()
try:
session.execute("DROP KEYSPACE cfm;")
except ConfigurationException:
logging.debug("keyspace was not there")
session.execute(
"""
CREATE KEYSPACE cfm WITH replication = {'class': 'NetworkTopologyStrategy', 'datacenter1': '1'}
"""
)
session.execute(
"""
CREATE TABLE IF NOT EXISTS cfm.pantry (
pantry_id text primary key,
blob text
);
"""
)
def _stop_cluster(self):
self.CCM_CLUSTER.stop()
self.CCM_CLUSTER.remove()
def test_store_pantry(self):
"""Docstring."""
cs = CassandraCfmStorage({"contact_points": ["localhost"], "log_level": "INFO"})
response = cs.store_pantry(
user_id="-1", pantry_id="-1", pantry=Pantry([Ingredient("egg", 1, "piece")])
)
self.assertEquals("OK", response)
|
import cfscrape
import requests
import re
import random
import chardet
from time import sleep
import subprocess
def icurl(url):
user_agent = ichoieUa()
startUrl = "https://www.yifile.com"
referer = startUrl
cookie_arg, a = cfscrape.get_cookie_string(startUrl)
#print(cookie_arg, a)
cmd = "curl --referer '{referer}' --cookie '{cookie_arg}' -A '{user_agent}' '{url}'"
loginContent = None
try:
loginContent = subprocess.check_output(cmd.format(referer = referer, cookie_arg=cookie_arg, user_agent=user_agent, url=url), shell=True)
except subprocess.CalledProcessError as e:
loginContent = None
print(loginContent, cookie_arg)
return loginContent, cookie_arg
def ichoieUa():
user_agent_list = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.67 Safari/537.36",
"Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/4E423F",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1623.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2226.0 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"
]
return random.choice(user_agent_list)
def ichoieCp():
user_agent_list = [
"google.com",
"163.com",
"yaeh.cn",
"126.com",
]
return random.choice(user_agent_list)
if __name__ == "__main__":
for num in range(10,2000000):
cp = ichoieCp()
url = "https://www.yifile.com/ajax_yifile.php?uid=0&cmail={account}@{company}.com&txt={txt}&curl=https://www.yifile.com/index.php"
#print(url.format(account = num, company = cp, txt=num))
icurl(url.format(account = num, company = cp, txt=num));
|
"""Mecabによるチャットの解析"""
import sys
import codecs
from collections import defaultdict
import MeCab
import youtube_db
def main(argvs, argc):
"""Mecabによるチャットの解析"""
if argc < 2:
print("Usage #python %s video_id1 video_id2 ..." % argvs[0])
return 1
video_list = []
for id in argvs[1:]:
video_list.append(id)
db = youtube_db.YoutubeDb()
db.connect('youtube.sqlite')
records = youtube_db.LiveChatMessage.select().where(youtube_db.LiveChatMessage.video_id << video_list)
mecab = MeCab.Tagger('')
pos = ['名詞', '形容詞', '形容動詞', '感動詞', '動詞', '副詞']
exclude = [
'する',
'いる',
'http',
'https',
'co',
'jp',
'com'
]
wordcount = defaultdict(int)
for rec in records:
txt = rec.message
node = mecab.parseToNode(txt)
while node:
fs = node.feature.split(",")
if fs[0] in pos:
word = (fs[6] != '*' and fs[6] or node.surface)
word = word.strip()
if word.isdigit() == False:
if len(word) != 1:
if word not in exclude:
wordcount[word] += 1
node = node.next
for k, v in sorted(wordcount.items(), key=lambda x: x[1], reverse=True):
if v < 10:
break
print("%s\t%d" % (k, v))
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
sys.exit(main(argvs, argc))
|
class Solution:
# @param {string} s
# @param {string} p
# @return {boolean}
def isMatch(self, s, p):
if p == s:
return True
elif p == None or p == "":
return False
ls = len(s)
lp = len(p)
last_s = 0
last_p = -1
i = j = 0
while i < ls:
if j< lp and (p[j] == "?" or s[i] == p[j]):
i += 1
j += 1
elif j < lp and p[j] == "*":
last_s = i
last_p = j
j += 1
elif last_p >=0:
last_s += 1
i = last_s
j = last_p
else:
return False
if i< ls:
return False
while j<lp and p[j] == "*":
j += 1
return j == lp
|
# coding: utf-8
# In[3]:
from scipy.sparse import csr_matrix, hstack
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split, cross_val_score
import string
import numpy as np
import pandas as pd
import math
import os
import csv
from random import sample
# In[ ]:
#helper functions
#function to clean text
def cleaner(text):
#make look up table with punctuation in
table = str.maketrans({key: None for key in """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""})
#remove punctuation and capitals
text=[i.lower().translate(table) for i in text]
#remove stop words
forbidden_words = set(["the"])
return [' '.join([word for word in i.split() if word not in forbidden_words]) for i in text]
def text_collecter(Subsampled_data_frame,playlist_info,track_info_dict):
# playlist names
playlist_names=cleaner([playlist_info["name"][ii] for ii in Subsampled_data_frame["PID"]])
# prediction artist names
prediction_artist_names=cleaner([track_info_dict[ii]["artist_name"] for ii in Subsampled_data_frame["TO_PREDICT"]])
# prediction album names
prediction_album_names=cleaner([track_info_dict[ii]["album_name"] for ii in Subsampled_data_frame["TO_PREDICT"]])
# prediction track names
prediction_track_names=cleaner([track_info_dict[ii]["track_name"] for ii in Subsampled_data_frame["TO_PREDICT"]])
#train artist names squished together
#as they are squished never do ngrams>1
train_artist_names=cleaner([" ".join([track_info_dict[jj]["artist_name"] for jj in ii]) for ii in Subsampled_data_frame["PLAYLISTS"]])
train_album_names=cleaner([" ".join([track_info_dict[jj]["album_name"] for jj in ii]) for ii in Subsampled_data_frame["PLAYLISTS"]])
train_track_names=cleaner([" ".join([track_info_dict[jj]["track_name"] for jj in ii]) for ii in Subsampled_data_frame["PLAYLISTS"]])
return([playlist_names,prediction_artist_names,prediction_album_names,prediction_track_names,train_artist_names,train_album_names,train_track_names])
def Find(x,y):
return(list(set(x.split()).intersection(set(y.split()))))
def Find_shared_Words(a,b):
return([' '.join(x) for x in list(map(Find,a,b))])
# In[ ]:
#make big bag of words
#returns large sparse array and the tv objects that are needed for predictions
def BOW(Subsampled_data_frame,playlist_info,playlists,track_info_dict,Training,vectorizers,challenge):
#hard code parameters for now
MAX_FEATURES_PLAYLIST_NAMES=1000
MAX_FEATURES_PREDICTION_ARTIST_NAMES=1000
MAX_FEATURES_PREDICTION_ALBUM_NAMES=1000
MAX_FEATURES_PREDICTION_TRACK_NAMES=1000
MAX_FEATURES_TRAIN_ARTIST_NAMES=1000
MAX_FEATURES_TRAIN_ALBUM_NAMES=1000
MAX_FEATURES_TRAIN_TRACK_NAMES=1000
MIN_COUNTS=10
MAX_FEATURES_SHARED_TRACK=1000
MAX_FEATURES_SHARED_ALBUM=1000
MAX_FEATURES_SHARED_ARTIST=1000
MIN_COUNTS_SHARED=1
#collect clean text not shared
[playlist_names,prediction_artist_names,prediction_album_names,prediction_track_names,train_artist_names,train_album_names,train_track_names]=text_collecter(Subsampled_data_frame,playlist_info,track_info_dict)
#collect clean text shared
#only looking at shared between playlist name and predictions for now
if challenge not in [2,5]:
shared_track_names = Find_shared_Words(playlist_names,prediction_artist_names)
shared_album_names = Find_shared_Words(playlist_names,prediction_album_names)
shared_artist_names = Find_shared_Words(playlist_names,prediction_track_names)
tv1 = vectorizers[0]
tv2 = vectorizers[1]
tv3 = vectorizers[2]
tv4 = vectorizers[3]
tv5 = vectorizers[4]
tv6 = vectorizers[5]
tv7 = vectorizers[6]
tv8 = vectorizers[7]
tv9 = vectorizers[8]
tv10 = vectorizers[9]
if Training:
#transform
if challenge not in [2,5]:
playlist_names_tf_idf=tv1.fit_transform(playlist_names)
print("done1")
prediction_artist_names_tf_idf=tv2.fit_transform(prediction_artist_names)
print("done2")
prediction_album_names_tf_idf=tv3.fit_transform(prediction_album_names)
print("done3")
prediction_track_names_tf_idf=tv4.fit_transform(prediction_track_names)
print("done4")
if challenge not in [1,7,9]:
tv5.fit_transform(train_artist_names[:min(1000000,len(train_artist_names))])
print("done5")
train_artist_names_tf_idf=tv5.transform(train_artist_names)
print("done5")
tv6.fit_transform(train_album_names[:min(1000000,len(train_artist_names))])
print("done6")
train_album_names_tf_idf=tv6.transform(train_album_names)
print("done6")
tv7.fit_transform(train_track_names[:min(1000000,len(train_artist_names))])
print("done7")
train_track_names_tf_idf=tv7.transform(train_track_names)
if challenge not in [2,5]:
shared_track_names_tf_idf=tv8.fit_transform(shared_track_names)
print("done8")
shared_album_names_tf_idf=tv9.fit_transform(shared_album_names)
print("done9")
shared_artist_names_tf_idf=tv10.fit_transform(shared_artist_names)
print("done10")
vectorizers = [tv1,tv2,tv3,tv4,tv5,tv6,tv7,tv8,tv9,tv10]
else:
#transform
if challenge not in [2,5]:
playlist_names_tf_idf=tv1.transform(playlist_names)
prediction_artist_names_tf_idf=tv2.transform(prediction_artist_names)
prediction_album_names_tf_idf=tv3.transform(prediction_album_names)
prediction_track_names_tf_idf=tv4.transform(prediction_track_names)
if challenge not in [1,7,9]:
train_artist_names_tf_idf=tv5.transform(train_artist_names)
train_album_names_tf_idf=tv6.transform(train_album_names)
train_track_names_tf_idf=tv7.transform(train_track_names)
if challenge not in [2,5]:
shared_track_names_tf_idf=tv8.transform(shared_track_names)
shared_album_names_tf_idf=tv9.transform(shared_album_names)
shared_artist_names_tf_idf=tv10.transform(shared_artist_names)
#merge into a mega spase array
if challenge in [3,4,6,8,10]:
sparse_merge = hstack((playlist_names_tf_idf,prediction_artist_names_tf_idf,prediction_album_names_tf_idf,prediction_track_names_tf_idf,train_artist_names_tf_idf,train_album_names_tf_idf,train_track_names_tf_idf,shared_track_names_tf_idf,shared_album_names_tf_idf,shared_artist_names_tf_idf)).tocsr()
if challenge in [1,7,9]:
sparse_merge = hstack((playlist_names_tf_idf,prediction_artist_names_tf_idf,prediction_album_names_tf_idf,prediction_track_names_tf_idf,shared_track_names_tf_idf,shared_album_names_tf_idf,shared_artist_names_tf_idf)).tocsr()
if challenge in [2,5]:
sparse_merge = hstack((prediction_artist_names_tf_idf,prediction_album_names_tf_idf,prediction_track_names_tf_idf,train_artist_names_tf_idf,train_album_names_tf_idf,train_track_names_tf_idf)).tocsr()
if Training:
return(sparse_merge,vectorizers)
else:
return(sparse_merge)
|
## 토마토
## M,N,H 행렬이 주어질 때, 토마토가 전부 익을 때까지 걸리는 일수를 구하라. 불가능하면 -1을 반환하라.
## BFS
## 토마토 개수와 최종적으로 익은 토마토 개수를 비교
###############입력###############
import heapq
import sys
from collections import deque, defaultdict
r = sys.stdin.readline
m, n, h = map(int, r().split())
tomato_list = [[list(map(int, r().split())) for _ in range(n)]for _ in range(h)]
q = deque([])
## 전체 토마토 개수
tomato_num = n*m*h
## 익은 토마토 개수
done_count = 0
## 지난 날
day = -1
for i in range(len(tomato_list)):
for j in range(len(tomato_list[i])):
for k in range(len(tomato_list[i][j])):
if tomato_list[i][j][k] == 1:
q.append([i,j,k])
done_count += 1
elif tomato_list[i][j][k] == -1:
tomato_num -= 1
#print(tomato_num)
#print(done_count)
##################################
dx = [1, -1, 0, 0, 0, 0]
dy = [0, 0, 1, -1, 0, 0]
dz = [0, 0, 0, 0, 1, -1]
while q:
q_size = len(q)
for _ in range(q_size):
now_z, now_x, now_y = q.popleft()
for i in range(6):
next_x = now_x + dx[i]
next_y = now_y + dy[i]
next_z = now_z + dz[i]
if next_x <0 or next_x >=n or next_y<0 or next_y>=m or next_z <0 or next_z >= h:
continue
if tomato_list[next_z][next_x][next_y]:
continue
q.append([next_z, next_x, next_y])
tomato_list[next_z][next_x][next_y] = 1
done_count += 1
day += 1
if done_count < tomato_num:
print(-1)
else:
print(day)
#print(f"총 토마토 개수 : {tomato_num}, 익은 토마토 개수 : {done_count}")
#print(f"총 걸린 날 : {day}")
|
import pymysql
import Config
conn = pymysql.connect(**Config.sql_conn_dict)
cur=conn.cursor()
sql = 'select * from student'
cur.execute(sql)
#cur.fetchone()
print(cur.fetchone())
print(cur.fetchone())
print(cur.fetchone())
print(cur.fetchone())
print(cur.fetchone())
print(cur.fetchone())
cur.close()
conn.close()
|
from django.db import models
# Create your models here.
class Level(models.Model):
short_name = models.CharField(max_length=10, unique=True)
name = models.CharField(max_length=30, unique=True)
describe = models.TextField()
need_exp = models.IntegerField()
def __str__(self):
return self.short_name
class MethodFamily(models.Model):
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
class Method(models.Model):
name = models.TextField(unique=True)
exp = models.IntegerField()
method_family = models.ForeignKey(MethodFamily, on_delete=models.CASCADE)
def __str__(self):
return self.name
class SkillFamily(models.Model):
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
class User(models.Model):
name = models.CharField(max_length=50, unique=True)
skill_family = models.ManyToManyField(SkillFamily)
def __str__(self):
return self.name
class Skill(models.Model):
name = models.CharField(max_length=30, unique=True)
tier = models.IntegerField()
skill_family = models.ForeignKey(SkillFamily, on_delete=models.CASCADE)
method_family = models.ManyToManyField(MethodFamily)
def __str__(self):
return self.name
class UserSkill(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
skill = models.ForeignKey(Skill, on_delete=models.CASCADE)
skill_exp = models.IntegerField(default=0)
skill_level = models.ForeignKey(Level, on_delete=models.CASCADE)
def __str__(self):
return "user:{},skill:{}".format(self.user, self.skill)
class UserMethod(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
method = models.ForeignKey(Method, on_delete=models.CASCADE)
skill = models.ForeignKey(Skill, on_delete=models.CASCADE)
method_times = models.IntegerField(default=0)
def __str__(self):
return "user:{},skill:{},method:{}".format(self.user, self.skill,
self.method)
|
import sys
import json
from collections import defaultdict
from httplib import HTTPConnection
import logging
from os import makedirs
from os.path import isdir, join
import re
from urllib2 import HTTPError, urlopen, Request
from urlparse import urlsplit
import config
import egginst
from enstaller import Enstaller
from enstaller.history import History
from plat import custom_plat
from utils import open_with_auth, get_installed_info, comparable_version, \
cname_fn
from verlib import IrrationalVersionError
from indexed_repo.chain import Chain, Req
from indexed_repo import dist_naming
from indexed_repo.requirement import add_Reqs_to_spec
logger = logging.getLogger(__name__)
class Resources(object):
def __init__(self, index_root=None, urls=[], verbose=False, prefix=None,
platform=None):
self.plat = platform or custom_plat
self.prefix = prefix
self.verbose = verbose
self.index = []
self.history = History(prefix)
self.enst = Enstaller(Chain(verbose=verbose), [prefix or sys.prefix])
self.product_index_path = 'products'
self.authenticate = True
for url in urls:
self.add_product(url)
if index_root:
self.load_index(index_root)
# Cache attributes
self._installed_cnames = None
self._status = None
self._installed = None
def clear_cache(self):
self._installed_cnames = None
self._status = None
self._installed = None
def _http_auth(self):
username, password = config.get_auth()
if username and password and self.authenticate:
return (username + ':' + password).encode('base64')
else:
return None
def _read_json_from_url(self, url):
logger.debug('Reading JSON from URL: %s' % url)
req = Request(url)
auth = self._http_auth()
if auth:
req.add_header('Authorization', auth)
return json.load(urlopen(req))
def load_index(self, url):
url = url.rstrip('/')
index_url = '%s/%s' % (url, self.product_index_path)
try:
index = self._read_json_from_url(index_url)
except HTTPError as e:
logger.exception('Error getting products file %s' % index_url)
return
for product in index:
product_url = '%s/products/%s' % (url, product['product'])
try:
product['base_url'] = url
product['url'] = product_url.rstrip('/')
self.add_product(product)
except HTTPError:
logger.exception('Error getting index file %s' % product_url)
def _read_product_index(self, product_url):
""" Get the product index.
Try the platform-independent one first, then try the
platform-specific one if that one doesn't exist. Does both
HTTP requests simultaneously.
"""
independent = urlsplit('%s/index.json' % (product_url))
specific = urlsplit('%s/index-%s.json' % (product_url, self.plat))
logger.debug('Trying for JSON from URLs: %s, %s' %
(independent.geturl(), specific.geturl()))
conn1 = HTTPConnection(independent.netloc)
conn2 = HTTPConnection(specific.netloc)
auth = self._http_auth()
if auth:
headers = {'Authorization': auth}
else:
headers = {}
conn1.request('GET', independent.path, headers=headers)
conn2.request('GET', specific.path, headers=headers)
try:
res = conn1.getresponse()
if res.status == 200:
data = res.read()
return independent, json.loads(data)
res = conn2.getresponse()
if res.status == 200:
data = res.read()
return specific, json.loads(data)
else:
raise HTTPError(specific, res.code, res.reason, res.msg)
except ValueError:
logger.exception('Error parsing index for %s' % product_url)
logger.error('Invalid index file: """%s"""' % data)
return None, None
except HTTPError:
logger.exception('Error reading index for %s' % product_url)
return None, None
finally:
conn1.close()
conn2.close()
def add_product(self, index):
if self.verbose:
print "Adding product:", index['url']
index_url, product_index = self._read_product_index(index['url'])
if product_index is None:
return
index['index_url'] = index_url
index.update(product_index)
if 'platform' in index and index['platform'] != self.plat:
raise Exception('index file for platform %s, but running %s' %
(index['platform'], self.plat))
if 'eggs' in index:
self._add_egg_repos(index['url'], index)
self.index.append(index)
return index
def _add_egg_repos(self, url, index):
if 'egg_repos' in index:
repos = [url + '/' + path + '/' for path in index['egg_repos']]
else:
repos = [url]
self.enst.chain.repos.extend(repos)
for cname, project in index['eggs'].iteritems():
for distname, data in project['files'].iteritems():
name, version, build = dist_naming.split_eggname(distname)
spec = dict(metadata_version='1.1',
name=name, version=version, build=build,
python=data.get('python', '2.7'),
packages=data.get('depends', []))
add_Reqs_to_spec(spec)
assert spec['cname'] == cname, distname
dist = repos[data.get('repo', 0)] + distname
self.enst.chain.index[dist] = spec
self.enst.chain.groups[cname].append(dist)
def get_installed_cnames(self):
if not self._installed_cnames:
self._installed_cnames = self.enst.get_installed_cnames()
return self._installed_cnames
def get_status(self):
if not self._status:
# the result is a dict mapping cname to ...
res = {}
for cname in self.get_installed_cnames():
d = defaultdict(str)
info = self.enst.get_installed_info(cname)[0][1]
if info is None:
continue
d.update(info)
res[cname] = d
for cname in self.enst.chain.groups.iterkeys():
dist = self.enst.chain.get_dist(Req(cname))
if dist is None:
continue
repo, fn = dist_naming.split_dist(dist)
n, v, b = dist_naming.split_eggname(fn)
if cname not in res:
d = defaultdict(str)
d['name'] = d.get('name', cname)
res[cname] = d
res[cname]['a-egg'] = fn
res[cname]['a-ver'] = '%s-%d' % (v, b)
def vb_egg(fn):
try:
n, v, b = dist_naming.split_eggname(fn)
return comparable_version(v), b
except IrrationalVersionError:
return None
except AssertionError:
return None
for d in res.itervalues():
if d['egg_name']: # installed
if d['a-egg']:
if vb_egg(d['egg_name']) >= vb_egg(d['a-egg']):
d['status'] = 'up-to-date'
else:
d['status'] = 'updateable'
else:
d['status'] = 'installed'
else: # not installed
if d['a-egg']:
d['status'] = 'installable'
self._status = res
return self._status
def get_installed(self):
if not self._installed:
self._installed = set([pkg['egg_name']
for pkg in self.get_status().values()
if pkg['status'] != 'installable'])
return self._installed
def search(self, text):
""" Search for eggs with name or description containing the given text.
Returns a list of canonical names for the matching eggs.
"""
regex = re.compile(re.escape(text), re.IGNORECASE)
results = []
for product in self.index:
for cname, metadata in product.get('eggs', {}).iteritems():
name = metadata.get('name', '')
description = metadata.get('description', '')
if regex.search(name) or regex.search(description):
results.append(cname)
return results
def _req_list(self, reqs):
""" Take a single req or a list of reqs and return a list of
Req instances
"""
if not isinstance(reqs, list):
reqs = [reqs]
# Convert cnames to Req instances
for i, req in enumerate(reqs):
if not isinstance(req, Req):
reqs[i] = Req(req)
return reqs
def install(self, reqs):
reqs = self._req_list(reqs)
with self.history:
installed_count = 0
for req in reqs:
installed_count += self.enst.install(req)
# Clear the cache, since the status of several packages could now be
# invalid
self.clear_cache()
return installed_count
def uninstall(self, reqs):
reqs = self._req_list(reqs)
with self.history:
for req in reqs:
self.enst.remove(req)
self.clear_cache()
return 1
if __name__ == '__main__':
#url = 'file://' + expanduser('~/buildware/scripts')
url = 'https://EPDUser:Epd789@www.enthought.com/repo/epd/'
r = Resources([url], verbose=1)
req = Req('epd')
print r.enst.chain.get_dist(req)
r.enst.chain.print_repos()
for v in r.get_status().itervalues():
print '%(name)-20s %(version)16s %(a-ver)16s %(status)12s' % v
|
"""
This simulates the mlab-ns lookup request, whose code lives here:
https://code.google.com/p/m-lab/source/browse/server/mlabns/handlers/lookup.py?repo=ns
The difference in this module is that we don't support features which
ooni-support does not use and we augment features which ooni-support
would rely on if mlab-ns were to add those features.
Also, this is a twisted web server rather than appengine.
"""
import logging
from twisted.web import resource
from twisted.web.server import NOT_DONE_YET
class LookupSimulatorResource (resource.Resource):
def __init__(self, db):
"""db is a dict mapping { fqdn -> other_stuff }; inserts come from mlabsim.update."""
resource.Resource.__init__(self)
self._db = db
self._log = logging.getLogger(type(self).__name__)
def render_GET(self, request):
self._log.debug('Request args: %r', request.args)
try:
policy = self._unpack_arg(request, 'policy')
format = self._unpack_arg(request, 'format', 'json')
if policy != 'all':
raise BadParameter("Only 'policy=all' parameter supported.")
if format != 'json':
raise BadParameter("Only 'format=json' parameter supported.")
except BadParameter, e:
request.sendJsonErrorMessage(e.args[0])
else:
request.sendJsonResponse(self._db.values())
return NOT_DONE_YET
def _unpack_arg(self, request, key, default=None):
try:
[value] = request.args[key]
except KeyError:
if default is None:
raise BadParameter('Missing %r parameter.' % (key,))
else:
return default
except ValueError:
raise BadParameter('Multiple %r parameters unsupported.' % (key,))
else:
return value
class BadParameter (Exception):
pass
|
import grpc
import time
from ray import serve
from ray.serve.drivers import DefaultgRPCDriver, DAGDriver
import asyncio
import aiohttp
from ray.serve.generated import serve_pb2, serve_pb2_grpc
import numpy as np
import click
import starlette
from typing import Optional
from serve_test_utils import save_test_results
@serve.deployment(
num_replicas=1,
)
class D:
def __call__(self, input):
return input["a"]
async def measure_grpc_throughput_tps(data_size=1, duration_secs=10):
async with grpc.aio.insecure_channel("localhost:9000") as channel:
stub = serve_pb2_grpc.PredictAPIsServiceStub(channel)
tps_stats = []
data = bytes("123" * data_size, "utf-8")
for _ in range(duration_secs):
start = time.time()
request_completed = 0
while time.time() - start < 1:
_ = await stub.Predict(serve_pb2.PredictRequest(input={"a": data}))
request_completed += 1
tps_stats.append(request_completed)
return tps_stats
async def measure_http_throughput_tps(data_size=1, duration_secs=10):
tps_stats = []
data = "123" * data_size
async def fetch(session):
async with session.get("http://localhost:8000/", json={"a": data}) as response:
return await response.text()
async with aiohttp.ClientSession() as session:
for _ in range(duration_secs):
start = time.time()
request_completed = 0
while time.time() - start < 1:
_ = await fetch(session)
request_completed += 1
tps_stats.append(request_completed)
return tps_stats
async def trial(measure_func, data_size=1, num_clients=1):
client_tasks = [measure_func for _ in range(num_clients)]
throughput_stats_tps_list = await asyncio.gather(
*[client_task(data_size) for client_task in client_tasks]
)
throughput_stats_tps = []
for client_rst in throughput_stats_tps_list:
throughput_stats_tps.extend(client_rst)
mean = round(np.mean(throughput_stats_tps), 2)
std = round(np.std(throughput_stats_tps), 2)
return mean, std
async def json_resolver(request: starlette.requests.Request):
return await request.json()
@click.command()
@click.option("--http-test", is_flag=True, type=bool, default=False)
@click.option(
"--data-size",
default="1",
type=int,
)
def main(http_test: Optional[bool], data_size: Optional[int]):
test_name = "gRPC"
if http_test:
test_name = "http"
serve.run(DAGDriver.bind(D.bind(), http_adapter=json_resolver))
throughput_mean_tps, throughput_std_tps = asyncio.run(
trial(measure_http_throughput_tps, data_size=data_size)
)
else:
serve.run(DefaultgRPCDriver.bind(D.bind()))
throughput_mean_tps, throughput_std_tps = asyncio.run(
trial(measure_grpc_throughput_tps, data_size=data_size)
)
print(throughput_mean_tps, throughput_std_tps)
save_test_results(
{test_name: throughput_mean_tps},
default_output_file=f"/tmp/serve_protocol_{test_name}_benchmark.json",
)
if __name__ == "__main__":
main()
|
#!/usr/local/opt/python3/bin/python3
import cv2
import numpy as np
canvas = np.zeros((300,300,3), dtype = 'uint8')
red = (0,0,255)
green = (0,255,0)
for row in range(0,300,20):
for col in range(10,300,20):
cv2.rectangle(canvas, (col,row), (col+10,row+10), red, -1)
for row in range(10,300,20):
for col in range(0,300,20):
cv2.rectangle(canvas, (col,row), (col+10,row+10), red, -1)
cv2.circle(canvas, (canvas.shape[1]//2, canvas.shape[0]//2), canvas.shape[1]//5, green,-1)
cv2.imshow("canvas", canvas)
cv2.waitKey(0) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 13:34:38 2018
@author: ivabruge
GeniePath: Graph Neural Networks with Adaptive Receptive Paths
Paper: https://arxiv.org/abs/1802.00910
this model uses an LSTM on the node reductions of the message-passing step
we store the network states at the graph node, since the LSTM variables are not transmitted
"""
from dgl.graph import DGLGraph
import torch
import torch.nn as nn
import torch.nn.functional as F
import argparse
from dataset import load_data, preprocess_features
class NodeReduceModule(nn.Module):
def __init__(self, input_dim, num_hidden, num_heads=3, input_dropout=None,
attention_dropout=None, act=lambda x: F.softmax(F.leaky_relu(x), dim=0)):
super(NodeReduceModule, self).__init__()
self.num_heads = num_heads
self.input_dropout = input_dropout
self.attention_dropout = attention_dropout
self.act = act
self.fc = nn.ModuleList(
[nn.Linear(input_dim, num_hidden, bias=False)
for _ in range(num_heads)])
self.attention = nn.ModuleList(
[nn.Linear(num_hidden * 2, 1, bias=False) for _ in range(num_heads)])
def forward(self, msgs):
src, dst = zip(*msgs)
hu = torch.cat(src, dim=0) # neighbor repr
hv = torch.cat(dst, dim=0)
msgs_repr = []
# iterate for each head
for i in range(self.num_heads):
# calc W*hself and W*hneigh
hvv = self.fc[i](hv)
huu = self.fc[i](hu)
# calculate W*hself||W*hneigh
h = torch.cat((hvv, huu), dim=1)
a = self.act(self.attention[i](h))
if self.attention_dropout is not None:
a = F.dropout(a, self.attention_dropout)
if self.input_dropout is not None:
hvv = F.dropout(hvv, self.input_dropout)
h = torch.sum(a * hvv, 0, keepdim=True)
msgs_repr.append(h)
return msgs_repr
class NodeUpdateModule(nn.Module):
def __init__(self, residual, fc, act, aggregator, lstm_size=0):
super(NodeUpdateModule, self).__init__()
self.residual = residual
self.fc = fc
self.act = act
self.aggregator = aggregator
if lstm_size:
self.lstm = nn.LSTM(input_size=lstm_size, hidden_size=lstm_size, num_layers=1)
else:
self.lstm=None
#print(fc[0].out_features)
def forward(self, node, msgs_repr):
# apply residual connection and activation for each head
for i in range(len(msgs_repr)):
if self.residual:
h = self.fc[i](node['h'])
msgs_repr[i] = msgs_repr[i] + h
if self.act is not None:
msgs_repr[i] = self.act(msgs_repr[i])
# aggregate multi-head results
h = self.aggregator(msgs_repr)
#print(h.shape)
if self.lstm is not None:
c0 = torch.zeros(h.shape)
if node['c'] is None:
c0 = torch.zeros(h.shape)
else:
c0 = node['c']
if node['h_i'] is None:
h0 = torch.zeros(h.shape)
else:
h0 = node['h_i']
#add dimension to handle sequential (create sequence of length 1)
h, (h_i, c) = self.lstm(h.unsqueeze(0), (h0.unsqueeze(0), c0.unsqueeze(0)))
#remove sequential dim
h = torch.squeeze(h, 0)
h_i = torch.squeeze(h, 0)
c = torch.squeeze(c, 0)
return {'h': h, 'c':c, 'h_i':h_i}
else:
return {'h': h, 'c':None, 'h_i':None}
class GeniePath(nn.Module):
def __init__(self, num_layers, in_dim, num_hidden, num_classes, num_heads,
activation, input_dropout, attention_dropout, use_residual=False ):
super(GeniePath, self).__init__()
self.input_dropout = input_dropout
self.reduce_layers = nn.ModuleList()
self.update_layers = nn.ModuleList()
# hidden layers
for i in range(num_layers):
if i == 0:
last_dim = in_dim
residual = False
else:
last_dim = num_hidden * num_heads # because of concat heads
residual = use_residual
self.reduce_layers.append(
NodeReduceModule(last_dim, num_hidden, num_heads, input_dropout,
attention_dropout))
self.update_layers.append(
NodeUpdateModule(residual, self.reduce_layers[-1].fc, activation,
lambda x: torch.cat(x, 1), num_hidden * num_heads))
# projection
self.reduce_layers.append(
NodeReduceModule(num_hidden * num_heads, num_classes, 1, input_dropout,
attention_dropout))
self.update_layers.append(
NodeUpdateModule(False, self.reduce_layers[-1].fc, None, sum))
def forward(self, g):
g.register_message_func(lambda src, dst, edge: (src['h'], dst['h']))
for reduce_func, update_func in zip(self.reduce_layers, self.update_layers):
# apply dropout
if self.input_dropout is not None:
# TODO (lingfan): use batched dropout once we have better api
# for global manipulation
for n in g.nodes():
g.node[n]['h'] = F.dropout(g.node[n]['h'], p=self.input_dropout)
g.node[n]['c'] = None
g.node[n]['h_i'] = None
g.register_reduce_func(reduce_func)
g.register_update_func(update_func)
g.update_all()
logits = [g.node[n]['h'] for n in g.nodes()]
logits = torch.cat(logits, dim=0)
return logits
#train on graph g with features, and target labels. Accepts a loss function and an optimizer function which implements optimizer.step()
def train(self, g, features, labels, epochs, loss_f=torch.nn.NLLLoss, loss_params={}, optimizer=torch.optim.Adam, optimizer_parameters=None, lr=0.001, ignore=[0], quiet=False):
labels = torch.LongTensor(labels)
_, labels = torch.max(labels, dim=1)
# convert labels and masks to tensor
if optimizer_parameters is None:
optimizer_parameters = self.parameters()
#instantiate optimizer on given params
optimizer_f = optimizer(optimizer_parameters, lr)
for epoch in range(args.epochs):
# reset grad
optimizer_f.zero_grad()
# reset graph states
for n in g.nodes():
g.node[n]['h'] = torch.FloatTensor(features[n].toarray())
# forward
logits = self.forward(g)
#intantiate loss on passed parameters (e.g. class weight params)
loss = loss_f(**loss_params)
#trim null labels
idx = [i for i, a in enumerate(labels) if a not in ignore]
logits = logits[idx, :]
labels = labels[idx]
out = loss(logits, labels)
if not quiet:
print("epoch {} loss: {}".format(epoch, out))
out.backward()
optimizer_f.step()
def main(args):
# dropout parameters
input_dropout = args.idrop
attention_dropout = args.adrop
# load and preprocess dataset
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(args.dataset)
features = preprocess_features(features)
# initialize graph
g = DGLGraph(adj)
# create model
model = GeniePath(args.num_layers,
features.shape[1],
args.num_hidden,
y_train.shape[1],
args.num_heads,
F.elu,
input_dropout,
attention_dropout,
args.residual)
model.train(g, features, y_train, epochs=args.epochs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GAT')
parser.add_argument("--dataset", type=str, required=True,
help="dataset name")
parser.add_argument("--epochs", type=int, default=10,
help="training epoch")
parser.add_argument("--num-heads", type=int, default=3,
help="number of attentional heads to use")
parser.add_argument("--num-layers", type=int, default=1,
help="number of hidden layers")
parser.add_argument("--num-hidden", type=int, default=8,
help="size of hidden units")
parser.add_argument("--residual", action="store_true",
help="use residual connection")
parser.add_argument("--lr", type=float, default=0.001,
help="learning rate")
parser.add_argument("--idrop", type=float, default=0.2,
help="Input dropout")
parser.add_argument("--adrop", type=float, default=0.2,
help="attention dropout")
args = parser.parse_args()
print(args)
main(args) |
## Import all the required libraries
import random
'''
Step 1 - DNA Sequencing
DNA sequencing is the process of determining the sequence of nucleotide bases (As, Ts, Cs, and Gs) in a piece of DNA.
'''
def dnaSequencing():
nucleotide_bases = ['A', 'T', 'C', 'G']
nucleotide_encoding_table = dict()
## There are total 256 or 4^4 characters available
## We have to encode all the characters uniquely
## Characters list -
## index represents ASCII value of characters
## characters[index] represented if the character is encoded or not
## characters[index] = 0 -> not encoded
## characters[index] = 1 -> encoded
characters = list()
for i in range(256):
characters.append(0)
## Generating all possible sequnences of nucleotide base of length 4
## It will make total possible sequences = 256
for nucleotide_base1 in nucleotide_bases:
for nucleotide_base2 in nucleotide_bases:
for nucleotide_base3 in nucleotide_bases:
for nucleotide_base4 in nucleotide_bases:
sequence = nucleotide_base1 + nucleotide_base2 + nucleotide_base3 + nucleotide_base4
## Assign a character randomly choosen
index = random.randint(0, 255)
while characters[index] == 1:
index = random.randint(0, 255)
## Insert the character with ascii value = index in the dictionary
nucleotide_encoding_table[sequence] = chr(index)
## Mark the character as visited
characters[index] = 1
#print(len(nucleotide_encoding_table))
#print(nucleotide_encoding_table)
## Format of encoding table is - sequence : character
## But required format for encoding is character : sequence
final_encoding_table = dict()
for sequence in nucleotide_encoding_table:
character = nucleotide_encoding_table[sequence]
final_encoding_table[character] = sequence
return final_encoding_table
'''
Step 2 - DNA Encoding.
Encode the text using encoding table. Replace all the characters with corresponding sequence.
'''
def dnaEncoding(encoding_table, text):
encoded_text = ""
for chracter in text:
## Each character is replaced by corresponding sequence
encoded_text += encoding_table[chracter]
return encoded_text
'''
Step 3 - Transcription.
In Biochemistry, the process of transcription begins when an enzyme called RNA polymerase (RNA pol)
attaches itself to the template DNA strand and begins to catalyse the production of the complementary RNA,
called the mRNA.
'''
def transcription(encoded_text):
original_complementary_nucleotide = {'A' : 'U', 'T' : 'A', 'C' : 'G', 'G' : 'C'}
mRNA = ""
for original_nucleotide in encoded_text:
complementary_nucleotide = original_complementary_nucleotide[original_nucleotide]
mRNA += complementary_nucleotide
return mRNA
encoding_table = dnaSequencing()
encoded_text = dnaEncoding(encoding_table, 'eucaleptus')
mRNA = transcription(encoded_text) |
from flask import session
from myforum import app
app.secret_key = 'cxblmawefl345lrgf435f43ac541fanlm2356y0xvn1234'
app.PER_PAGE = 3
app.run(host='127.0.0.1', debug=True)
|
import sys
sys.stdin = open("D3_3032_input.txt", "r")
def gcd(a, b):
x, y, u, v = 0, 1, 1, 0
while a != 0:
q, r = b // a, b % a
m, n = x - u * q, y - v * q
b, a, x, y, u, v = a, r, u, v, m, n
return x, y
T = int(input())
for test_case in range(T):
A, B = map(int, input().split())
print("#{} ".format(test_case + 1), end="")
print(*gcd(A, B)) |
import json
import os
import socket
import struct
import requests
class RdpServer:
def __init__(self, backlog=5, addr=('0.0.0.0', 7788)):
# 默认使用AF_INET协议族,即ipv4地址和端口号的组合以及tcp协议
self.serverSocket = socket.socket()
# 绑定监听的ip地址和端口号
self.serverSocket.bind(addr)
# 开始等待
self.serverSocket.listen(backlog)
def link_one_client(self):
os.chdir("file")
while True:
# 获取客户端对象和客户端地址
clientSocket, addr = self.serverSocket.accept()
# 打印
print("建立连接\n目标主机地址为:{0}".format(addr))
fhead = clientSocket.recv(struct.calcsize('128sI'))
# print(fhead)
if not fhead:
continue
filename, filesize = struct.unpack('128sI', fhead)
print(filename.decode().strip('\00'))
with open(filename.decode().strip('\00'), 'wb') as fl:
ressize = filesize
while True:
if ressize > 1024:
filedata = clientSocket.recv(1024)
else:
filedata = clientSocket.recv(ressize)
fl.write(filedata)
ressize -= len(filedata)
if ressize <= 0:
print("传输完成")
checkFile()
break
def checkFile():
logs = readLog()
if logs[0]['DisconnectTime'] != "Connecting..." and logs[0]['Success2Login'] == 'Yes':
url = 'http://10.21.196.121/rdpHoneyPot/'
fileTime = getFileName(logs[0]["ConnectTime"])
fileName = ""
temp = os.listdir()
for i in temp:
if i.find(fileTime):
fileName = i
break
data = {'ip': logs[0]['ip'], 'port': logs[0]['port'], 'ConnectTime': logs[0]['ConnectTime'],
"DisconnectTime": logs[0]['DisconnectTime'], 'fileName': fileName}
requests.post(url, data)
elif logs[0]['DisconnectTime'] != "Connecting...":
url = 'http://10.21.196.121/rdpHoneyPot/'
data = {'ip': logs[0]['ip'], 'port': logs[0]['port'], 'ConnectTime': logs[0]['ConnectTime'],
"DisconnectTime": logs[0]['DisconnectTime']}
requests.post(url, data)
def readLog():
with open('file/log.txt', 'r+', encoding="utf8") as f:
mydict = f.read()
mydict = mydict.replace('{', ',{').replace(',', '', 1)
mydict = '[' + mydict + ']'
mydict = mydict.replace("'", '"')
mydict = json.loads(mydict)
return mydict[-1:]
def getFileName(ConnectTime):
ConnectTime = ConnectTime.replace('-', '').replace(' ', '_').replace(':', '-')
ConnectTime = ConnectTime.split(".")[0]
return ConnectTime
if __name__ == '__main__':
# server = RdpServer()
# print("Start")
# server.link_one_client()
checkFile()
|
"""
127 Word Ladder
Given two words (beginWord and endWord), and a dictionary's word list, find the length of shortest transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time.
Each transformed word must exist in the word list. Note that beginWord is not a transformed word.
For example,
Given:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log","cog"]
As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
return its length 5.
Note:
Return 0 if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
You may assume no duplicates in the word list.
You may assume beginWord and endWord are non-empty and are not the same.
UPDATE (2017/1/20):
The wordList parameter had been changed to a list of strings (instead of a set of strings). Please reload the code definition to get the latest changes.
"""
# LUP Solution
# Build a map with the given words
# {'jk_': ['jkl'], 'de_': ['def'], 'gh_': ['ghi'], 'j_l': ['jkl'], '_ef': ['def'], '_kl': ['jkl'], 'a_c': ['abc'], '_hi': ['ghi'], 'g_i': ['ghi'], 'd_f': ['def'], '_bc': ['abc'], 'ab_': ['abc']}
# Now do BFS with words using queue. (word, count)
from collections import defaultdict, deque
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
words_map = self.get_words_map(wordList)
return self.bfs_words(beginWord, endWord, words_map)
def bfs_words(self, begin, end, words_map):
visited, queue = set(), deque([(begin, 1)])
while queue:
word, count = queue.popleft()
if word not in visited:
visited.add(word)
if word == end:
return count
for i in range(len(word)):
temp = word[:i] + "_" + word[i + 1:]
listr = words_map[temp] if words_map.has_key(temp) else []
for tword in listr:
queue.append((tword, count + 1))
return 0
def get_words_map(self, wordList):
mapr = defaultdict(list)
for word in wordList:
for i in range(len(word)):
tword = word[:i] + "_" + word[i + 1:]
mapr[tword].append(word)
return mapr |
import argparse
from torch.utils.data import DataLoader
from torch import multiprocessing
import torch.nn.functional as F
from torchvision import transforms
from bdcn import BDCN
from datasets import PoseLoadDataset, TensorProducer, NYUDataset
from utils import *
from PIL import Image
from net import Cycle
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Trainer unity server setup.')
parser.add_argument('--batch_size', type=int, default=8,
help='Batch size of images to read into memory')
parser.add_argument('--epochs', type=int, default=1,
help='Number of epochs to train')
parser.add_argument('--workers', type=int, default=4,
help='Number of workers for data loader')
parser.add_argument('--save', action='store_true',
help='Save positions and images to disk (default: no)')
parser.add_argument('--save-preds', action='store_true',
help='Save predictions to disk (default: no)')
parser.add_argument('--device', type=str, default='cpu',
help='Run on device (default: cpu)')
parser.add_argument('--show', action='store_true',
help='Show incoming batches (default: no)')
parser.add_argument('--compress', action='store_true',
help='Compressed communication between unity and python (default: no)')
parser.add_argument('--filename', type=str,
help='Filename for saved model (will be appended to models directory)')
parser.add_argument('-r', '--resume', type=str, default=None,
help='whether resume from some, default is None')
parser.add_argument('--dataset', action='store_true',
help='Train or test on stored dataset')
parser.add_argument('--lr', dest='base_lr', type=float, default=1e-8,
help='the base learning rate of model')
parser.add_argument('-m', '--momentum', type=float, default=0.9,
help='the momentum')
parser.add_argument('--weight-decay', type=float, default=0.0002,
help='the weight_decay of net')
parser.add_argument('--gamma', type=float, default=0.1,
help='the decay of learning rate, default 0.1')
parser.add_argument('--max_iter', type=int, default=40000,
help='max iters to train network, default is 40000')
parser.add_argument('--iter_size', type=int, default=10,
help='iter size equal to the batch size, default 10')
parser.add_argument('--side_weight', type=float, default=0.5,
help='the loss weight of sideout, default 0.5')
parser.add_argument('--fuse_weight', type=float, default=1.1,
help='the loss weight of fuse, default 1.1')
parser.add_argument('--average_loss', type=int, default=50,
help='smoothed loss, default is 50')
parser.add_argument('-s', '--snapshots', type=int, default=1000,
help='how many iters to store the params, default is 1000')
parser.add_argument('--step_size', type=int, default=10000,
help='the number of iters to decrease the learning rate, default is 10000')
parser.add_argument('--val_step_size', type=int, default=100,
help='every n steps doing a validation')
parser.add_argument('--display', type=int, default=20,
help='how many iters display one time, default is 20')
parser.add_argument('--param_dir', type=str, default='models',
help='the directory to store the params')
args = parser.parse_args()
np.random.seed(42)
device = torch.device(args.device)
params = {'batch_size': args.batch_size,
'num_workers': args.workers,
'pin_memory': True}
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
path = "./models/70_net_G_A.pth"
t = Cycle(3, 3).cuda()
t.load_state_dict(torch.load(path))
t.eval()
def gan(data):
o = t(data)
for i, d in enumerate(o):
o[i] = normalize((o[i] * 128 + 128).clamp(0, 255))
return o
#dataset = NYUDataset('data/unity_test', augment=False)
dataset = NYUDataset('data/nyudv2/test', augment=False)
#dataset = NYUDataset('data/bsds500', augment=False)
loader = DataLoader(dataset, **params, shuffle=False)
print("Test samples: {}".format(len(loader)*args.batch_size))
model = BDCN(pretrained=True).to(device)
model_state = torch.load(args.filename)
model.load_state_dict(model_state)
detransform = UnNormalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
print("Testing...")
for nr, (inputs, outputs) in enumerate(loader):
inputs = inputs.to(device)
#inputs = gan(inputs)
scales = [0.5, 0.75, 1.0]#[0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5]#[0.4, 0.425, 0.45, 0.475, 0.5, 0.525, 0.55, 0.575, 0.6, 0.65, 0.7, 0.75]#[0.25, 0.375, 0.4375, 0.5, 0.5625, 0.625, 0.75]#[0.5, 1.0, 1.5]#[0.25, 0.375, 0.4375, 0.5]
preds = []
for s in scales: # multiscale testing
preds += [nn.functional.interpolate(nn.functional.sigmoid(
model(nn.functional.interpolate(inputs, scale_factor=s, mode='bilinear'))[-1]),
size=(inputs.shape[2], inputs.shape[3]), mode='bilinear').data.cpu()]
#pred = model(nn.functional.interpolate(input, scale_factor=s, mode='bilinear'))
#for p in pred:
# preds += [nn.functional.interpolate(nn.functional.sigmoid(p),
# size=(inputs.shape[2], inputs.shape[3]), mode='bilinear').data.cpu()]
preds = torch.mean(torch.stack(preds), dim=0)
for i in range(len(inputs)):
image = detransform(inputs[i]).data.permute(1, 2, 0).cpu().numpy().astype(np.uint8)
pred_lines = preds[i, 0].data.cpu().numpy()
gt_lines = outputs[0][i].data.numpy()[0]
if args.save_preds:
name = dataset.inputs[nr * len(inputs) + i].split('.')[0]
im = Image.fromarray(image)
im.save("results/rgb/{}.png".format(name))
im = Image.fromarray((gt_lines * 255).astype(np.uint8))
im.save("results/gt/{}.png".format(name))
im = Image.fromarray(np.floor(pred_lines * 255).astype(np.uint8))
im.save("results/pred/{}.png".format(name))
|
from django.urls import path
from .views import *
urlpatterns = [
path("", MoviesView.as_view()),
path("<slug:slug>/", MovieDetailView.as_view(), name='movie_detail'),
path("review/<int:pk>/", AddReview.as_view(), name="add_review"),
] |
import pickle
import sys
import numpy as np
# sys.path.append()
import codecs
from new_test_7_3 import code_method4
import functools
def cmp(x, y):
# 用来调整顺序
if x[0] > y[0]:
return -1
if x[0] < y[0]:
return 1
return 0
def get_pr_points(sorted_score, relevant_label):
"""
:param sorted_score: item in it is like: (score, label)
:param relevant_label:
:return:
"""
numPair = len(sorted_score)
assert numPair > 0
a= [1 for s in sorted_score if s[1] == relevant_label]
numRelevant = sum([1 for s in sorted_score if s[1] == relevant_label])
curvePoints = []
scores = sorted(list(set([s[0] for s in sorted_score])), reverse=True)
groupedByScore = [(s, [item for item in sorted_score if item[0] == s]) for s in scores]
for i in range(len(groupedByScore)):
score, items = groupedByScore[i]
numRelevantInGroup = sum([1 for item in items if item[1] == relevant_label])
if numRelevantInGroup > 0:
sliceGroup = groupedByScore[:i + 1]
items_slice = [x for y in sliceGroup for x in y[1]]
numRelevantInSlice = sum([1 for s in items_slice if s[1] == relevant_label])
sliceRecall = numRelevantInSlice / float(numRelevant)
slicePrecision = numRelevantInSlice / float(len(items_slice))
curvePoints.append((sliceRecall, slicePrecision))
return curvePoints
def save_points(points, saved_path):
fout = codecs.open(saved_path, 'w', 'utf-8')
for recall, precision in points:
fout.write('{} {}\n'.format(recall, precision))
def filter_word(phrase, stopkey):
# 过滤,去掉停用词
filter_words = []
for word in phrase:
if word not in stopkey:
filter_words.append(word)
return filter_words
def get_cause_effect(file_list, stopkey, now_id):
# 从原始数据中提取出cause和effect
now = file_list[now_id]
now = now.replace('\n', '')
now = now.split('|||')
cause = now[0].split(' ')
effect = now[1].split(' ')
cause = filter_word(cause, stopkey)
effect = filter_word(effect, stopkey)
# print(now)
# print(cause, effect)
return [cause, effect]
def calculate_sentence_pair_likelihood_ratio(sent_X, sent_Y, COPA_word_pair_class_likelihood_ratios, choosed_pair,
method="pairwise"):
temp = 1.
for word_x in sent_X:
for word_y in sent_Y:
word_pair = word_x + "_" + word_y
if word_pair in COPA_word_pair_class_likelihood_ratios:
temp *= COPA_word_pair_class_likelihood_ratios[word_pair]
return temp
def calculate_result(data_list_lines1, data_list_lines2, COPA_word_pair_class_likelihood_ratios,
class_priors, choosed_pair, doc_name):
class_prior_ratio = class_priors[0] / class_priors[1]
count = 0
len_pattern = len(data_list_lines1)
true_pattern = len(data_list_lines1)
result_list = []
for i in range(len_pattern):
sent_X, sent_Y = data_list_lines1[i]
class_likelihood_ratio_1 = calculate_sentence_pair_likelihood_ratio(
sent_X, sent_Y, COPA_word_pair_class_likelihood_ratios, choosed_pair)
sent_X_2, sent_Y_2 = data_list_lines2[i]
class_likelihood_ratio_2 = calculate_sentence_pair_likelihood_ratio(
sent_X_2, sent_Y_2, COPA_word_pair_class_likelihood_ratios, choosed_pair)
# class_posterior_1 = 1 / (1 + class_likelihood_ratio_1 * class_prior_ratio)
# class_posterior_2 = 1 / (1 + class_likelihood_ratio_2 * class_prior_ratio)
if class_likelihood_ratio_1 == 1:
class_posterior_1 = 1e-10
else:
class_posterior_1 = 1 / (1 + class_likelihood_ratio_1 * class_prior_ratio)
if class_likelihood_ratio_2 == 1:
class_posterior_2 = 1e-10
else:
class_posterior_2 = 1 / (1 + class_likelihood_ratio_2 * class_prior_ratio)
ratio = class_posterior_1 / class_posterior_2
if ratio > 1:
count = count + 1
if ratio == 1:
true_pattern -= 1
result_list.append([class_posterior_1, 1])
result_list.append([class_posterior_2, 0])
print(count, true_pattern, len_pattern)
print(count/true_pattern)
print('二选一准确率', count / true_pattern)
total_congju = sorted(result_list, key=functools.cmp_to_key(cmp))
pr_save_path = 'PRCurve' + doc_name + '_ourmethods.txt'
max_points = get_pr_points(total_congju, relevant_label=1)
save_points(max_points, pr_save_path)
flag_id = 0
# for total_congju_sig in total_congju:
# print(flag_id, total_congju_sig)
# flag_id += 1
count_map = 0
num_true = 0
num_total = 0
for i in total_congju:
num_total += 1
if i[1] == 1:
num_true += 1
count_map += float(num_true / num_total)
print(num_true, num_total)
acc = float(count_map) / num_true
print(acc)
# print(count, true_pattern, len_pattern)
# acc = float(count) / true_pattern
# print("accuracy:", acc)
return acc
def cal_all_word_pair_likelihood_ratios(COPA_word_pairs, dict_word2id, P_word_lemma_num, COPA_advcl_word_pair_counter,
COPA_conj_word_pair_counter, COPA_inter_word_pair_counter, evidence_priors,
evidence_counts, class_1_evidence_probs, class_0_evidence_probs):
COPA_word_pair_class_likelihood_ratios = {}
Min = 1000000000
Max = 0
COPA_words = []
for wp in COPA_word_pairs:
# print(w)
a = wp.split("_")
word_a = a[0]
word_b = a[1]
# 将str转换成id
COPA_words.extend([word_a, word_b])
w1w2 = wp
w2w1 = a[1] + '_' + a[0]
advcl_likelihood = (COPA_advcl_word_pair_counter[w1w2] + 1e-4) / evidence_counts[0] # 1e-4
conj_likelihood = (COPA_conj_word_pair_counter[w1w2] + 1e-4) / evidence_counts[1] # 1e-4
inter_likelihood = (COPA_inter_word_pair_counter[w1w2] + 2e-4) / evidence_counts[2] # 2e-4
other_likelihood = (P_word_lemma_num[word_a] * P_word_lemma_num[word_b] + 1e5) / evidence_counts[3] # 1e5
# advcl_likelihood = (COPA_advcl_word_pair_counter[w1w2]) / evidence_counts[0] # 1e-4
# conj_likelihood = (COPA_conj_word_pair_counter[w1w2]) / evidence_counts[1] # 1e-4
# inter_likelihood = (COPA_inter_word_pair_counter[w1w2]) / evidence_counts[2] # 2e-4
# other_likelihood = (word_id_counter[id_a] * word_id_counter[id_b] + 1e5) / evidence_counts[3] # 1e5
evidence_likelihoods = [advcl_likelihood, conj_likelihood, inter_likelihood, other_likelihood]
numerator, denominator = 0, 0
for i in range(4):
numerator += class_0_evidence_probs[i] * evidence_likelihoods[i]
denominator += class_1_evidence_probs[i] * evidence_likelihoods[i]
COPA_word_pair_class_likelihood_ratios[w1w2] = numerator / denominator
return COPA_word_pair_class_likelihood_ratios
def grid_tuning_five(islemma, doc_name, data_list_line1, data_list_line2, COPA_word_pairs, grid):
print(doc_name)
print("grid_tuning_five")
print('获得基础数据')
# dict_word2id: a dict mapping word to id.
# word_id_counter: a list recording the counts of words (ids)
# COPA_advcl_word_pair_counter:
# COPA_conj_word_pair_counter:
# COPA_inter_word_pair_counter:
if doc_name == '_bok':
doc_name_temp = '_book_corpus'
with open('../database_original/temp' + doc_name_temp + '/str_id_' + 'word_count_delete.file', 'rb') as f:
dict_word2id = pickle.load(f)
print(len(dict_word2id))
word_id_counter = np.load('../database_original/temp' + doc_name_temp + '/P_word_id.npy')
else:
with open('../database_original/temp' + doc_name + '/str_id_' + 'word_count_delete.file', 'rb') as f:
dict_word2id = pickle.load(f)
print(len(dict_word2id))
word_id_counter = np.load('../database_original/temp' + doc_name + '/P_word_id.npy')
with open("data/P_word_lemma_num.file", "rb") as f:
P_word_lemma_num = pickle.load(f)
with open('data/temp' + doc_name + '/concept_advcl_pair_pos' + '.file', 'rb') as f:
COPA_advcl_word_pair_counter = pickle.load(f)
with open('data/temp' + doc_name + '/concept_conj_pair_pos' + '.file', 'rb') as f:
COPA_conj_word_pair_counter = pickle.load(f)
with open('data/temp' + doc_name + '/concept_inter_pair_pos' + '.file', 'rb') as f:
COPA_inter_word_pair_counter = pickle.load(f)
choosed_pair = []
print("读取数据完毕")
prior_prob_other = grid[4]
# 此处分别得到4中证据类型的比例和
evidence_priors, evidence_counts = code_method4.cal_evidence_priors(COPA_word_pairs, dict_word2id, word_id_counter,
COPA_advcl_word_pair_counter, COPA_conj_word_pair_counter,
COPA_inter_word_pair_counter, prior_prob_other)
advcl_class_1_prob = grid[0]
conj_class_1_prob = grid[1]
inter_class_1_prob = grid[2]
other_class_1_prob = grid[3]
other_class_0_prob = 1 - other_class_1_prob
advcl_class_0_prob = 1 - advcl_class_1_prob
conj_class_0_prob = 1 - conj_class_1_prob
inter_class_0_prob = 1 - inter_class_1_prob
evidence_class_0_probs = [advcl_class_0_prob, conj_class_0_prob, inter_class_0_prob,
other_class_0_prob]
evidence_class_1_probs = [advcl_class_1_prob, conj_class_1_prob, inter_class_1_prob,
other_class_1_prob]
class_1_prob = 0.
class_0_prob = 0.
class_1_evidence_probs, class_0_evidence_probs = [], []
for i in range(4):
class_0_prob += evidence_class_0_probs[i] * evidence_priors[i]
class_1_prob += evidence_class_1_probs[i] * evidence_priors[i]
for i in range(4):
class_0_evidence_probs.append(
evidence_class_0_probs[i] * evidence_priors[i] / class_0_prob)
class_1_evidence_probs.append(
evidence_class_1_probs[i] * evidence_priors[i] / class_1_prob)
class_priors = [class_0_prob, class_1_prob]
# print("The evidence class 0 probs:", evidence_class_0_probs)
# print("The evidence class 1 probs:", evidence_class_1_probs)
# print("The class priors:", class_priors)
# print("The evidence probs:", evidence_priors)
# 针对COPA中的所有词对,计算出它们的证据类型似然比
COPA_word_pair_class_likelihood_ratios = \
cal_all_word_pair_likelihood_ratios(
COPA_word_pairs, dict_word2id, P_word_lemma_num, COPA_advcl_word_pair_counter,
COPA_conj_word_pair_counter, COPA_inter_word_pair_counter, evidence_priors,
evidence_counts, class_1_evidence_probs, class_0_evidence_probs)
# print(COPA_word_pair_class_likelihood_ratios)
print('COPA_word_pair_class_likelihood_ratios:', len(COPA_word_pair_class_likelihood_ratios))
print([advcl_class_1_prob, conj_class_1_prob, inter_class_1_prob,
other_class_1_prob, prior_prob_other])
# 后面对具体的进行计算
acc = calculate_result(
data_list_line1, data_list_line2, COPA_word_pair_class_likelihood_ratios,
class_priors, choosed_pair, doc_name)
print("accuracy:", acc)
def filter_nparray(doc_name):
with open("data/cause_effect_pair" + ".file", "rb") as f:
P_A_B_pair = pickle.load(f)
cause_effect_choose_pair = set()
with open('data/temp' + doc_name + '/concept_advcl_pair_pos' + '.file', 'rb') as f:
advcl_pair_pos_copa_icw = pickle.load(f)
with open('data/temp' + doc_name + '/concept_conj_pair_pos' + '.file', 'rb') as f:
conj_pair_pos_copa_icw = pickle.load(f)
with open('data/temp' + doc_name + '/concept_inter_pair_pos' + '.file', 'rb') as f:
inter_pair_pos_copa_icw = pickle.load(f)
# with open('data/concept_advcl_pair_pos' + '.file', 'rb') as f:
# advcl_pair_pos_copa_icw = pickle.load(f)
# with open('data/concept_conj_pair_pos' + '.file', 'rb') as f:
# conj_pair_pos_copa_icw = pickle.load(f)
# with open('data/concept_inter_pair_pos' + '.file', 'rb') as f:
# inter_pair_pos_copa_icw = pickle.load(f)
# inter_pair_pos_copa_icw, conj_pair_pos_copa_icw = advcl_pair_pos_copa_icw, advcl_pair_pos_copa_icw
for lemma_pair in P_A_B_pair:
if (advcl_pair_pos_copa_icw[lemma_pair] == 0) | (conj_pair_pos_copa_icw[lemma_pair] == 0) | (
inter_pair_pos_copa_icw[lemma_pair] == 0):
pass
else:
cause_effect_choose_pair.add(lemma_pair)
print(len(P_A_B_pair), len(cause_effect_choose_pair))
with open("data/cause_effect_choose_pair.file", "wb") as f:
pickle.dump(cause_effect_choose_pair, f)
return cause_effect_choose_pair
# 399 410----覆盖了覆盖了0.97
if __name__ == '__main__':
data_list_line1 = np.load("data/data_list_line1" + ".npy", allow_pickle=True)
data_list_line2 = np.load("data/data_list_line2" + ".npy", allow_pickle=True)
# with open("data/cause_effect_pair" + ".file", "rb") as fi:
# COPA_word_pairs = pickle.load(fi)
# print(len(COPA_word_pairs))
# print(len(data_list_line2), len(data_list_line1))
icw_grids_666 = [[0.15, 0.02, 0.1, 0.0001, 0.98], [0.3, 0.04, 0.2, 0.0001, 0.98]]
bok_grids_642 = [[0.1, 0.04, 0.4, 0.0001, 0.9], [0.1, 0.04, 0.45, 0.0001, 0.9]]
# 0.648
gut_grids_606 = [[0.65, 0.06, 0.05, 0.0001, 0.95], [0.9, 0.08, 0.15, 0.0001, 0.85],
[0.95, 0.08, 0.15, 0.0001, 0.85]]
grids = [[], [0.15, 0.02, 0.1, 0.0001, 0.98], [0.65, 0.06, 0.05, 0.0001, 0.95], [0.1, 0.04, 0.4, 0.0001, 0.9]]
choose = ['', '_icw', '_gut', '_bok']
for i in range(1, 4):
cause_effect_choose_pair = filter_nparray(choose[i])
print('cause_effect_choose_pair', len(cause_effect_choose_pair))
grid_tuning_five('', choose[i], data_list_line1, data_list_line2, cause_effect_choose_pair, grids[i])
code_method4.print_time()
# 0.716
|
# coding:utf-8
# -*- 微米支付 -*-
import json
import words
import urllib
import logging
import tornado.web
import tornado.gen
from model.order_model import ThirdPayOrders
from model.table_base import Wanted
from tornado.httpclient import AsyncHTTPClient
class WmPrepayHandler(tornado.web.RequestHandler):
CP = 'ol100013'
URL = "http://121.199.6.130:10086/apppay/apppay2"
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self, *args, **kwargs):
price = int(self.get_argument('fee'))
pkg = self.get_argument('pkg')
md5 = self.get_argument('md5')
imei = self.get_argument('imei')
ext = self.get_argument('oid')
uid = self.get_argument('uid')
channel_id = self.get_argument('cid')
product = self.get_argument('pid', 'Unknown')
params = {
'fee': price,
'extData': ext,
'partnerid': self.CP,
'pkgName': pkg,
'md5': md5,
'imei': imei
}
resp = yield AsyncHTTPClient().fetch('{}?{}'.format(self.URL, urllib.urlencode(params)))
if resp.error:
logging.warning('query to remote server error:{}'.format(resp.body))
self.set_status(601, 'remote server error:{}'.format(resp.body))
return
obj = json.loads(resp.body)
if 'code' not in obj or obj['code'] != "200":
logging.warning('query to remote server error:{}'.format(resp.body))
self.set_status(601, 'remote server error:{}'.format(obj['msg']))
return
if self.CP != obj['partnerid']:
self.set_status(602, 'partnerid error.')
return
appid = obj['data']['appid']
token_id = obj['data']['token_id']
order = ThirdPayOrders()
order.pkg = pkg
order.order_id = ext
order.uid = uid
order.trans_id = obj['orderid']
order.tag = words.WiMiTag
order.price = price
order.product = product
order.channel = channel_id
order.ext = '{{"appid":"{}", "token":"{}", "pkg":"{}", "imei":"{}"}}'.format(appid, token_id, pkg, imei)
order.save()
self.write('{{"appid":"{}", "token":"{}"}}'.format(appid, token_id))
class WmCallbackHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self, *args, **kwargs):
trans_id = self.get_argument('orderid')
fee = self.get_argument('fee')
order_id = self.get_argument('extData')
openid = self.get_argument('openid')
appid = self.get_argument('appid')
debug_str = '{}&{}&{}&{}&{}'.format(trans_id, fee, order_id, openid, appid)
db_order = ThirdPayOrders()
db_order.order_id = order_id
db_order.channel = Wanted
res = yield tornado.gen.Task(db_order.update_from_db)
if not res:
logging.warning('no order with info[{}] in db.'.format(debug_str))
self.write('fail')
return
else:
db_order.stat = words.WiMiSuccessStat
db_order.visible = self.settings['server_conf'].may_deduct(db_order.channel)
db_order.success = True
db_order.update_to_db()
self.write('ok')
class WmOrderStatusHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
oid = self.get_argument('oid')
order = ThirdPayOrders()
order.order_id = oid
order.price = Wanted
order.stat = Wanted
order.success = Wanted
order.tag = Wanted
res = yield tornado.gen.Task(order.update_from_db)
logging.info(order)
if not res or order.tag != words.WiMiTag or not order.success:
self.write('{"code":201, "price": 0}')
else:
self.write('{{"code":200, "price": {}}}'.format(order.price))
class WmQueryHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
uid = self.get_argument('uid')
channel = self.get_argument('channel')
success, db_orders = \
yield tornado.gen.Task(ThirdPayOrders.get_some, 'uid="{}" and channel="{}"'.format(uid, channel))
if not success:
self.write('[]')
return
res = []
for item in db_orders:
if item.tag != words.WiMiTag:
continue
if item.stat != words.WiMiSuccessStat:
continue
res.append({'price': item.price, 'oid': item.order_id})
self.write(json.dumps({"code": 200, "orders": res}))
|
# coding: utf-8
import xlrd
import xlwt
import os
import logging
from customize import getCellStyle, getWidthByColumn, getColOutlineLevel
from constants import requiredColumnsSorted
from constants import headRowIdx, firstDataRowIdx, fbpSheetName, pathSep
from dataHandler import DataHandler
from FBPLoader import FBPLoader
#File names and sheet name
inputFileName = "LTE eNB Feature Build Plan.xlsm"
raBacklogFileName = "OM RA Backlog.xls"
workingFolder = os.getcwd()
fbpFileAbsPath = workingFolder + pathSep + inputFileName
raBacklogPath = workingFolder + pathSep + raBacklogFileName
class RABacklogGenerator(object):
def __init__(self, fbpLoader, raBacklogPath, handler):
self._fbp = fbpLoader
self._fbpSheet = self._fbp.getBacklogSheetObj()
self._requiredFBPColumnsSorted = self._fbp._requiredColumnsSorted
self._requiredColumnsSorted = [v for v in self._requiredFBPColumnsSorted]
self._fbpIndexMap = fbpLoader.getIndexes()
self._raDataIn = []
import sys
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.DEBUG)
self._raSheetName = u'backlog'
self._raBacklogPath = raBacklogPath
self.loadRABacklog()
self._dataHandler = DataHandler(self._raDataIn, srcHdr = self._requiredColumnsSorted, \
dstHdr = self._requiredFBPColumnsSorted, handler = handler)
def loadRABacklog(self):
try:
self._raSheetIn = FBPLoader(self._raBacklogPath, self._raSheetName, headerIdx = 0, retainAllReqHdrs = True)
self.loadRAData()
except Exception, e:
self._logger.warning("Available input sheet not valid, will be cleared:%s\n"%e.message)
self._raSheetIn = []
#Result sheet
book = xlwt.Workbook(raBacklogPath, style_compression = 2)
sheet = book.add_sheet(self._raSheetName)
self._raSheet = sheet
def loadRAData(self):
'''Load RA data by existing order, the header row is also saved here'''
raSheet = self._raSheetIn.getBacklogSheetObj()
self._raDataIn = [raSheet.row_values(rowId) for rowId in range(1, raSheet.nrows)]
self._requiredColumnsSorted = self._raSheetIn.getAllHeaders()
def generate(self):
''' Generate OM RA backlog per filtering/merge/purge/sort '''
columnsForRAChecking = ['Requirement Area',
'i_FT', 'Feature Team',
'i_TDD CPRI H',
'Site_BTSOM', 'OM LTE_Site', 'OMRefa_Site',
]
mayHaveImpacts = lambda x: x == 'x' or x == 'u' #TM impacts or our imacts
filterCriteria = lambda cols : (cols[0] == "TDD-AifSiteS") \
or ( mayHaveImpacts(cols[1]) and ((cols[2] == 'HZ03') or (cols[2] == 'HZ04')) )\
or ( mayHaveImpacts(cols[3]) ) \
or (cols[4] == 'Hzu') or (cols[5] == 'Hzu') or (cols[6] == 'Hzu')
rowIds = self._fbp.filterRowsByColPred(columnsForRAChecking, filterCriteria)
self._logger.info("Totally %d records filtered from upstream FBP file", len(rowIds))
getCellValue = lambda rowId, rowHdr: self._fbpSheet.cell(rowId, self._fbpIndexMap[rowHdr]).value
from copy import copy
columnsForFeatureCheckings = copy(columnsForRAChecking)
columnsForFeatureCheckings.append('Feature or Subfeature')
isFidValidInFBP = lambda fid: len(self._fbp.filterRowsByColPred(columnsForFeatureCheckings,
lambda cols: filterCriteria(cols[:-1]) and cols[-1] == fid )) > 0
self._dataHandler.collectAndMergeData(rowIds, getCellValue, isFidValidInFBP)
self._dataHandler.purgeDoneFeatures()
self._dataHandler.sortData()
self._raData = self._dataHandler.getData()
self.formatAndSaveData()
def formatAndSaveData(self):
''' Write data into new workbook '''
sheet = self._raSheet
book = self._raSheet.get_parent()
header = self._raData[0]
#NOTE: row styles will be overwritten when write is called!
[sheet.write(rowId, colId, rowData[colId], getCellStyle(rowId, colId, rowData, header)) \
for rowId, rowData in zip(range(0, len(self._raData)), self._raData)\
for colId in range(0, len(rowData))]
#Format width
[sheet.col(colId).set_width(getWidthByColumn(colId, self._raData, header)) \
for colId in range(0, len(header))]
#Set outline
for colId in range(0, len(header)):
sheet.col(colId).level = getColOutlineLevel(colId, header)
#Set freeze
sheet.panes_frozen = True
sheet.horz_split_pos = 1 #Always show header
sheet.vert_split_pos = header.index('TDD Release')
book.save(self._raBacklogPath)
def test1(fbp):
#fbp = FBPLoader(fbpFileAbsPath, fbpSheetName)
RABacklogGenerator(fbp, raBacklogPath).generate()
def generate():
handler = initLogger('rabp.log')
logging.getLogger().info("\n~~~~~~~~~~~~Start~~~~~~~~~~~\n")
fbp = FBPLoader(fbpFileAbsPath, fbpSheetName)
logging.getLogger().info("~~~~~~~~~~~~FBP Loaded now~~~~~~~~~~~")
RABacklogGenerator(fbp, raBacklogPath, handler).generate()
logging.getLogger().info("\n~~~~~~~~~~~~End~~~~~~~~~~~\n")
def initLogger(fname):
from logging.handlers import RotatingFileHandler
handler = RotatingFileHandler(filename = fname, maxBytes = 2*1024*1024, backupCount=10)
fmt = logging.Formatter(fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(fmt)
handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.DEBUG)
return handler
def testLogger():
handler = initLogger('test.log')
logger = logging.getLogger("test")
logger.setLevel(logging.DEBUG)
logger.info("test log")
logging.getLogger("test.a").debug("debug info") |
import numpy as np
import matplotlib.pyplot as plt
import scipy.fftpack
import pyaudio
import time
time.sleep(1)
np.set_printoptions(suppress=True) # don't use scientific notation
CHUNK = 1024 # number of data points to read at a time
RATE = 1000 # time resolution of the recording device (Hz)
WIDTH = 2
p=pyaudio.PyAudio() # start the PyAudio class
stream=p.open(format=p.get_format_from_width(WIDTH),channels=1,rate=RATE,input=True,
frames_per_buffer=CHUNK) #uses default input
# ax = getNewPlot()
fig, ax = plt.subplots()
count = 82
while True:
plt.ion()
data = np.fromstring(stream.read(CHUNK),dtype=np.int16)
data = data * np.hanning(len(data)) # smooth the FFT by windowing data
# fft = abs(np.fft.fft(data).real)
# fft = fft[:int(len(fft)/2)] # keep only first half
# freq = np.fft.fftfreq(CHUNK,1.0/RATE)
# freq = freq[:int(len(freq)/2)] # keep only first half
# E5 = 00000
# limitFFT = max([np.max(fft)*0.2, 3.0* E5])
# freqsPeak = freq[np.where(fft>=limitFFT)]
# fftsPeak = fft[np.where(fft>=limitFFT)]
# N = 800
# sample spacing
# T = 1.0 / 800.0
x = np.linspace(0.0, CHUNK/RATE, CHUNK)
y = np.sin(count * 2.0*np.pi*x) + np.sin(60 * 2.0*np.pi*x)+ np.sin(30 * 2.0*np.pi*x)
yf = scipy.fftpack.fft(y)
xf = np.linspace(0.0, 1.0/(2.0*(1/RATE)), int(CHUNK/2))
ax.plot(xf, 2.0/CHUNK * np.abs(yf[:CHUNK//2]))
plt.pause(1)
ax.clear()
stream.stop_stream()
stream.close()
p.terminate()
|
import pickle
import numpy as np
import datetime
from .utils import get_data
import torch
class NetCoach(object):
"""
Trains a player using MCTS.
"""
def __init__(self, mcts, net, train_kw = None, max_moves=2000,
buffer=20, episodes=20, iterations = 100, train_time=0,
prop_thresh=30, verbose=1, resume=False):
self.mcts = mcts
self.net = net
self.nnet = True
self.train_kw = train_kw
self.max_moves = max_moves
self.data_history = []
self.buffer = buffer
self.episodes = episodes
self.iterations = iterations
self.train_time = train_time
self.calculation_time = datetime.timedelta(seconds=train_time)
self.prop_thresh = prop_thresh
self.verbose = verbose
if resume:
self.net.load_model()
with open('./temp/data_hist.pkl', 'rb') as fp:
self.data_history = pickle.load(fp)
def _pprint(self, msg):
if self.verbose:
print(msg)
def episode(self):
"""Executes an episode, defined as playing out a full game."""
data, moves = get_data(self.mcts, nnet=self.nnet, max_moves=self.max_moves, return_moves=True, prop_thresh=self.prop_thresh)
return data, moves
def iteration(self):
"""Executes one iteration of the training loop."""
train_data = []
ep_times = []
self._pprint('SELF PLAY FOR %d GAMES' %(self.episodes))
for episode in range(self.episodes):
ep_start = datetime.datetime.utcnow()
ep_data, moves = self.episode()
train_data.extend(ep_data)
time_delta = datetime.datetime.utcnow() - ep_start
ep_times.append(time_delta.total_seconds())
ave_time = np.mean(ep_times)
eta = ave_time * (self.episodes - episode - 1)
self._pprint(
'Game %d/%d finished, took %.2f seconds (%d moves). ETA: %.2f seconds'
%(episode+1, self.episodes, ep_times[-1], moves, eta)
)
self._pprint('')
self.data_history.append(train_data)
if len(self.data_history) > self.buffer:
self._pprint("Memory capacity exceeded buffer, deleting oldest training data.\n")
self.data_history.pop(0)
train_data = []
for data in self.data_history:
train_data.extend(data)
self._pprint("TRAINING ON %d BOARDS FROM MEMORY" %(len(train_data)))
if self.train_kw is None:
self.net.train(train_data)
else:
self.net.train(train_data, **self.train_kw)
# reset the mcts after training
self.mcts.reset()
# save the model
self.net.save_model()
# save training data
with open('./temp/data_hist.pkl', 'wb') as fp:
pickle.dump(self.data_history, fp)
self._pprint('')
def train(self):
"""Trains the network over all iterations, or training time if not zero"""
if self.train_time <= 0:
# train on episodes
begin = datetime.datetime.utcnow()
time = []
for i in range(self.iterations):
i_begin = datetime.datetime.utcnow()
self.iteration()
i_end = datetime.datetime.utcnow()
delta = i_end - i_begin
delta = delta.total_seconds()
time.append(delta)
eta = np.mean(time)*(self.iterations - i)
self._pprint("ITERATION %d done in %.2f seconds, ETA %.2f seconds\n" %(i+1, delta, eta))
end = datetime.datetime.utcnow()
delta = end - begin
delta = delta.total_seconds()
self._pprint("Done training, took %.2f seconds" %(delta))
else:
time = []
begin = datetime.datetime.utcnow()
i = 0
while datetime.datetime.utcnow() - begin < self.calculation_time:
i_begin = datetime.datetime.utcnow()
self.iteration()
i_end = datetime.datetime.utcnow()
delta = i_end - i_begin
delta = delta.total_seconds()
time.append(delta)
ave_time = np.mean(time)
total = np.round(self.calculation_time.total_seconds()/ave_time)
i += 1
self._pprint("Iteration %d done in %.2f seconds, ESTIMATED ITERATIONS: %d\n" %(i, delta, total))
end = datetime.datetime.utcnow()
delta = end - begin
delta = delta.total_seconds()
self._pprint("Done training, took %.2f seconds" %(delta))
|
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
def output_fusion_matrix(predict, ground_truth):
print("混淆矩阵如下: ")
labels = set(ground_truth)
cf_matrix = confusion_matrix(y_pred=predict, y_true=ground_truth, labels=list(labels))
print("labels = ", labels)
print(cf_matrix)
def output_classification_report(preict, ground_truth):
r = classification_report(y_true=ground_truth, y_pred=preict)
print("分类结果报告: ")
print(r)
def cal_acc(predict, ground_truth):
"""计算分类精度
args:
predict:预测的各个对象的类别数
ground_truth:基准值
returns:
分类精度
"""
assert len(predict) == len(ground_truth), "预测标签与基准值的长度不一致"
count = 0
for index in range(len(predict)):
if predict[index] == ground_truth[index]:
count += 1
return float(count / len(predict))
|
import ray
import time
import asyncio
from ray import serve
@serve.deployment(num_replicas=2)
def model_one(input_data):
print("Model 1 predict")
time.sleep(4)
return 1
@serve.deployment(num_replicas=2)
def model_two(input_data):
print("Model 2 predict")
time.sleep(4)
return 2
@serve.deployment(max_concurrent_queries=10, route_prefix="/composed")
class EnsembleModel:
def __init__(self):
self.model_one = model_one.get_handle()
self.model_two = model_two.get_handle()
async def __call__(self, input_data):
print("Call models concurrently, wait for both to finish")
tasks = [self.model_one.remote(input_data), self.model_two.remote(input_data)]
print("collect models predictions (non-blocking)")
predictions = await asyncio.gather(*tasks)
return predictions
def send_concurrent_model_requests(num_single_model_replicas=2):
ensemble_model = EnsembleModel.get_handle()
all_data = [
ensemble_model.remote(input_data)
for input_data in range(num_single_model_replicas)
]
all_predictions = ray.get(all_data)
print(all_predictions)
if __name__ == "__main__":
# start local cluster and ray serve processes
# Start ray with 8 processes.
if ray.is_initialized():
ray.shutdown()
ray.init(num_cpus=8)
serve.start()
# deploy all actors / models
model_one.deploy()
model_two.deploy()
EnsembleModel.deploy()
# Send 2 concurrent requests to the Ensemble Model for predictions.
# This runs 4 seconds in total calling 2 times the ensemble model
# concurrently,
# which calls 2 models in parallel for each call. In total 4 models run
# parallel.
st = time.time()
send_concurrent_model_requests()
print("duration", time.time() - st)
# Output
# [[1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]
# duration 4.015406847000122
|
# import dependencies
import os
import csv
# Set path for file
csvpath = os.path.join('Resources','budget_data.csv')
# the functions takes csvreader as a parameter and extracts three lists:
# date, profit/losses, and the change.
# Change list will allow finding the max and min change val and date indexes
def py_analysis(csvreader):
month_year = []
p_l = []
change = []
previous_pl = 0
for row in csvreader:
month_year.append(row[0])
p_l.append(int(row[1]))
change.append(int(row[1])-previous_pl)
previous_pl = int(row[1])
# evaluate the total profit by sum of all values in the P&L list
profit_total = sum(p_l)
# obtain the number of months by taking the lenght of the month list
month_total = len(month_year)
# the profit changed all but the first month, since the first
# month doesnt count, we use total months - 1
# average total shortcut by using the P&L firstlast values (index [0] and [-1])
average_change = round(( p_l[-1] - p_l[0] )/(month_total - 1 ), 2)
# obtain the max and min change.
greatest_inc = max(change)
greatest_dec = min(change)
# print results. Obtain the months index by using the index method change.index(greatest_inc/dec)
alltext = f'''Financial Analysis
----------------------------
Total Months: {month_total}
Total: ${profit_total}
Average Change: ${average_change}
Greatest Increase in Profits: {month_year[change.index(greatest_inc)]} (${greatest_inc})
Greatest Decrease in Profits: {month_year[change.index(greatest_dec)]} (${greatest_dec})'''
return alltext
# with open read the file path an assing it to csvfile
with open(csvpath) as csvfile:
# use csv reader by specifyng the variable that holds the content (csvfile) and delimiter
csvreader = csv.reader(csvfile, delimiter=',')
# skip the headers
next(csvreader)
# run the function for the budget data analysis and assign the output to a variable
py_analysis_output = py_analysis(csvreader)
# print the message to the terminal
print(py_analysis_output)
# create path for output file
data_output = os.path.join('analysis', 'py_analysis.txt')
# create a write a text file with the analysis.
with open(data_output, 'w', newline="") as text:
text.write(py_analysis_output)
text.close() |
import re
def xmlmetricf(file):
sline = ""
for line in file:
sline += str(line.decode("utf-8"))
mo = re.compile(r'<\?xml version="1\.0" encoding="UTF-8"\?>(\r\n)*<!DOCTYPE\ssprawozdanie\sPUBLIC\s"sprawozdanie"\s"http:\/\/mhanckow\.vm\.wmi\.amu\.edu\.pl:20002/zajecia/file-storage/view/sprawozdanie\.dtd">(\r\n)*<sprawozdanie\sprzedmiot="[AaĄąBbCcĆćDdEeĘęFfGgHhIiJjKkLlŁłMmNnŃńOoÓóPpRrSsŚśTtUuWwYyZzŹźŻż]{3}" temat="[AaĄąBbCcĆćDdEeĘęFfGgHhIiJjKkLlŁłMmNnŃńOoÓóPpRrSsŚśTtUuWwYyZzŹźŻż]{1}">(\r\n)*<imie_nazwisko>[AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ][aąbcćdeęfghijklłmnńoóprsśtuwyzźż]+(\s[AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ][aąbcćdeęfghijklłmnńoóprsśtuwyzźż]+)+</imie_nazwisko>(\r\n)*<nr_indeksu>[0-9]{6}</nr_indeksu>(\r\n)*<liczba_pkt>([0-9]+|([0-9]+\.[0-9]+))</liczba_pkt>(\r\n)*(<zadanie\snr="-?[0-9]+[AaĄąBbCcĆćDdEeĘęFfGgHhIiJjKkLlŁłMmNnŃńOoÓóPpRrSsŚśTtUuWwYyZzŹźŻż]*"\spkt="([0-9]+|([0-9]+\.[0-9]+))+"></zadanie>(\r\n)*)+</sprawozdanie>')
res = re.findall(mo, sline)
if res:
return True
else:
return False |
import requests
import json
import uuid
import tempfile
from bs4 import BeautifulSoup
# django
from django.contrib.sites.models import Site
from django.db.models import F
from django.conf import settings
from django.utils.translation import activate
from django.core import files
# mockups
from base.mockups import Mockup
from base.data import regions_data
# models
from ministries.models import Ministry
from ministries.models import PublicService
from regions.models import Commune
from regions.models import Region
from aldryn_newsblog.models import Article
from users.models import User
from aldryn_people.models import Person
from aldryn_newsblog.cms_appconfig import NewsBlogConfig
from djangocms_text_ckeditor.models import Text
from filer.models.imagemodels import Image
from djangocms_picture.models import Picture
def create_articles(quantity=20, language='en'):
activate(language)
app_config = NewsBlogConfig.objects.first()
m = Mockup()
for x in range(quantity):
m.create_article(
is_published=True,
app_config=app_config,
)
def get_current_government_structure():
mockup = Mockup()
return mockup.get_or_create_government_structure(
current_government=True
)[0]
def create_presidency():
m = Mockup()
government_structure = get_current_government_structure()
m.create_presidency(government_structure=government_structure)
def create_sociocultural_department():
m = Mockup()
government_structure = get_current_government_structure()
m.create_sociocultural_department(
government_structure=government_structure)
def create_ministry(datetime=None, quantity=10):
m = Mockup()
government_structure = get_current_government_structure()
for x in range(quantity):
minister = m.create_public_servant(
government_structure=government_structure,
)
m.create_ministry(
minister=minister,
government_structure=government_structure,
)
def create_cms_pages():
mockup = Mockup()
site_id = Site.objects.first().id
mockup.get_or_create_page(
reverse_id=u'Noticias',
template=u'base.jade',
site_id=site_id,
)
def load_regions(datetime=None, quantity=10):
government_structure = get_current_government_structure()
for region_data in regions_data:
name = region_data['name']
region = Region.objects.get_or_create(
government_structure=government_structure,
name=name,
)[0]
if not region.name_es:
region.name_es = name
if not region.name_en:
region.name_en = name
region.save()
for commune_data in region_data['communes']:
Commune.objects.get_or_create(
name=commune_data['name'],
region=region,
)
def load_data_from_digital_gob_api(ministry_with_minister=False):
# get or create current government structure
m = Mockup()
government_structure = get_current_government_structure()
# Get ministries from public json
headers = {
'User-Agent': 'Mozilla/5.0',
}
url = 'https://apis.digital.gob.cl/misc/instituciones/_search?size=1000'
ministries = requests.get(url, headers=headers)
ministries = ministries.json()['hits']['hits']
PublicService.objects.filter(name=None).delete()
for ministry in ministries:
source = ministry['_source']
name = source['nombre']
# see if name starts with "ministerio"
if name.lower().startswith('ministerio'):
description = source.get('mision', '')
defaults = {'description': description}
if ministry_with_minister:
# create a minister dummy by ministry
minister = m.create_public_servant(
government_structure=government_structure,
)
defaults['minister'] = minister
# get or create ministry by government structure and name
ministry_obj = Ministry.objects.get_or_create(
government_structure=government_structure,
name=name,
defaults=defaults,
)[0]
if not ministry_obj.name_es:
ministry_obj.name_es = name
if not ministry_obj.name_en:
ministry_obj.name_en = name
ministry_obj.save()
'''
If rest has "servicios dependientes"
get or create PublicService
'''
for service in source.get('servicios_dependientes'):
name = service.get('nombre')
url = service.get('url', None)
if not url:
continue
public_service = PublicService.objects.get_or_create(
name=name.strip(),
ministry=ministry_obj,
defaults={
'name_es': name.strip(),
'url': url,
}
)[0]
if not public_service.name_en:
public_service.name_en = name
public_service.save()
def load_base_data():
load_regions()
load_data_from_digital_gob_api()
PublicService.objects.filter(name_es=None).update(name_es=F('name'))
PublicService.objects.filter(name_en=None).update(name_es=F('name'))
def create_text_plugin(content, target_placeholder, language, position):
'''
Create text plugin by article
'''
text = Text(body=content)
text.position = position
text.tree_id = None
text.lft = None
text.rght = None
text.level = None
text.language = language
text.plugin_type = 'TextPlugin'
text.placeholder = target_placeholder
text.save()
def create_picture_plugin(image, target_placeholder, language, position):
'''
Create picture image plugin by Article
'''
# separate name and path from url image
image_html = BeautifulSoup(image, 'html.parser')
image_src = image_html.img.get('src')
data_image = image_src.split('/')[-3:]
s3_url = 'https://s3-us-west-2.amazonaws.com/gob.cl/'
img_url = s3_url + 'gobcl-uploads/' + '/'.join(data_image)
img = download_file_from_url(img_url)
if not img:
return
img_name = data_image[-1]
if len(img_name) > 150:
print('entro al if de 150 content')
img_name_split = img_name.split('.')
img_name = '{}.{}'.format(
str(uuid.uuid4()),
img_name_split[-1]
)
# Create Image element (django CMS)
image = Image.objects.create()
image.name = img_name
image.file.save(
img_name,
img,
save=True
)
image.save()
# Create Picture plugin
picture = Picture.objects.create()
picture.picture = image
picture.position = position
picture.tree_id = None
picture.lft = None
picture.rght = None
picture.level = None
picture.language = language
picture.plugin_type = 'PicturePlugin'
picture.placeholder = target_placeholder
picture.save()
def create_content(content_list, target_placeholder, language):
'''
Create text or upload image depends content list
'''
position = 0
for content in content_list[2:-1]:
if content == '\n' or content == '\r\n':
continue
elif content.startswith('<p><img'):
create_picture_plugin(
content,
target_placeholder,
language,
position
)
else:
create_text_plugin(
content,
target_placeholder,
language,
position,
)
position += 1
def download_file_from_url(url):
# Stream the image from the url
try:
request = requests.get(url, stream=True)
except requests.exceptions.RequestException as e:
return
if request.status_code != requests.codes.ok:
return
# Create a temporary file
lf = tempfile.NamedTemporaryFile()
# Read the streamed image in sections
for block in request.iter_content(1024 * 8):
# If no more file then stop
if not block:
break
# Write image block to temporary file
lf.write(block)
return files.File(lf)
def create_news_from_json(json_name: str='gobcl-posts.json'):
'''
Open gobcl-posts.json and read
data to create news from old site gob.cl
'''
# open gobcl-posts.json
with open(settings.BASE_DIR + '/' + json_name) as news:
json_news = json.loads(news.read())
# get basic data required by model Article (aldryn newsblog)
app_config = NewsBlogConfig.objects.first()
owner = User.objects.first()
author = Person.objects.get_or_create()[0]
for news in json_news:
# Get principal data from json
title = news.get('titulo', '')[0]
image_url = news.get('thumb_img', '')
publishing_date = news.get('fecha', '')[0]
lead = news.get('bajada', '')
if lead:
lead = lead[0]
content = news.get('contenido', '')
language = news.get('lang', 'es')
if language == 'es-CL' or language == 'es':
activate('es')
language = 'es'
elif language == 'en-US':
activate('en')
language = 'en'
article = Article.objects.translated(
title=title
)
if article.exists():
continue
data = {
'app_config': app_config,
'title': title,
'lead_in': lead,
'publishing_date': publishing_date,
'owner': owner,
'author': author,
'is_published': True,
}
if image_url:
'''
if exists image_url get image from
gobcl-uploads folder and create add image to Article
'''
data_image = image_url[0].split('/')[-3:]
s3_url = 'https://s3-us-west-2.amazonaws.com/gob.cl/'
img_url = s3_url + 'gobcl-uploads/' + '/'.join(data_image)
img = download_file_from_url(img_url)
if not img:
print('No image')
if img:
img_name = data_image[-1]
if len(img_name) > 150:
print('entro al if de 150 json')
img_name_split = img_name.split('.')
img_name = '{}.{}'.format(
str(uuid.uuid4()),
img_name_split[-1]
)
print('*' * 10)
print('titulo noticia ' + title)
print('titulo imagen ' + img_name)
print('*' * 10)
image = Image.objects.create()
image.name = img_name
image.file.save(
img_name,
img,
save=True
)
image.save()
data['featured_image'] = image
article = Article.objects.create(**data)
if content:
create_content(
content,
article.content,
language
)
|
import contextily as cx
import geopandas as gpd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import colorbar
from seaborn.axisgrid import FacetGrid, Grid
from aves.visualization.colors import colormap_from_palette
from aves.visualization.maps.utils import geographical_scale, north_arrow
class GeoFacetGrid(FacetGrid):
def __init__(self, geodataframe: gpd.GeoDataFrame, *args, **kwargs):
geocontext = kwargs.pop("context", None)
if geocontext is None:
geocontext = geodataframe
self.geocontext = geocontext
self.bounds = geocontext.total_bounds
self.aspect = (self.bounds[2] - self.bounds[0]) / (
self.bounds[3] - self.bounds[1]
)
kwargs["aspect"] = self.aspect
kwargs["xlim"] = (self.bounds[0], self.bounds[2])
kwargs["ylim"] = (self.bounds[1], self.bounds[3])
super().__init__(geodataframe, *args, **kwargs)
for ax in self.axes.flatten():
if kwargs.get("remove_axes", True):
ax.set_axis_off()
if kwargs.get("equal_aspect", True):
ax.set_aspect("equal")
self.zorder = 0
def add_layer(self, func_or_data, *args, **kwargs):
if isinstance(func_or_data, gpd.GeoDataFrame):
# a direct geography
for ax in self.axes.flatten():
func_or_data.plot(*args, ax=ax, zorder=self.zorder, **kwargs)
else:
plot = lambda *a, **kw: func_or_data(
plt.gca(), kw.pop("data"), *a, zorder=self.zorder, **kw
)
self.map_dataframe(plot, *args, **kwargs)
self.zorder += 1
def add_basemap(
self, file_path, interpolation="hanning", reset_extent=False, **kwargs
):
for ax in self.axes.flatten():
cx.add_basemap(
ax,
crs=self.geocontext.crs.to_string(),
source=file_path,
interpolation=interpolation,
zorder=self.zorder,
reset_extent=reset_extent,
**kwargs
)
if not reset_extent:
ax.set_xlim((self.bounds[0], self.bounds[2]))
ax.set_ylim((self.bounds[1], self.bounds[3]))
self.zorder += 1
def add_map_elements(
self,
all_axes=False,
scale=True,
arrow=True,
scale_args={},
arrow_args={},
):
for ax in self.axes.flatten():
if arrow:
north_arrow(ax, **arrow_args)
if scale:
geographical_scale(ax, **scale_args)
if not all_axes:
break
def add_global_colorbar(self, palette, k, title=None, title_args={}, **kwargs):
orientation = kwargs.get("orientation", "horizontal")
if orientation == "horizontal":
cax = self.fig.add_axes([0.25, -0.012, 0.5, 0.01])
elif orientation == "vertical":
cax = self.fig.add_axes([1.01, 0.25, 0.01, 0.5])
else:
raise ValueError("unsupported colorbar orientation")
if title:
cax.set_title(title, **title_args)
cax.set_axis_off()
cb = colorbar.ColorbarBase(
cax, cmap=colormap_from_palette(palette, n_colors=k), **kwargs
)
return cax, cb
def set_title(self, title, **kwargs):
self.fig.suptitle(title, **kwargs)
class GeoAttributeGrid(Grid):
def __init__(
self,
geodataframe: gpd.GeoDataFrame,
*,
context: gpd.GeoDataFrame = None,
vars=None,
height=2.5,
layout_pad=0.5,
col_wrap=4,
despine=True,
remove_axes=True,
set_limits=True,
equal_aspect=True
):
super().__init__()
if vars is not None:
vars = list(vars)
else:
vars = list(geodataframe.drop("geometry", axis=1).columns)
if not vars:
raise ValueError("No variables found for grid.")
self.vars = vars
if context is None:
geocontext = geodataframe
self.geocontext = geocontext
self.bounds = geocontext.total_bounds
self.aspect = (self.bounds[2] - self.bounds[0]) / (
self.bounds[3] - self.bounds[1]
)
n_variables = len(vars)
n_columns = min(col_wrap, len(vars))
n_rows = n_variables // n_columns
if n_rows * n_columns < n_variables:
n_rows += 1
with mpl.rc_context({"figure.autolayout": False}):
fig, axes = plt.subplots(
n_rows,
n_columns,
figsize=(n_columns * height * self.aspect, n_rows * height),
sharex=True,
sharey=True,
squeeze=False,
)
flattened = axes.flatten()
if set_limits:
for ax in flattened:
ax.set_xlim((self.bounds[0], self.bounds[2]))
ax.set_ylim((self.bounds[1], self.bounds[3]))
if remove_axes:
for ax in flattened:
ax.set_axis_off()
else:
# deactivate only unneeded axes
for i in range(n_variables, len(axes)):
flattened[i].set_axis_off()
if equal_aspect:
for ax in flattened:
ax.set_aspect("equal")
self.fig = fig
self.axes = axes
self.data = geodataframe
# Label the axes
self._add_axis_labels()
self._legend_data = {}
# Make the plot look nice
self._tight_layout_rect = [0.01, 0.01, 0.99, 0.99]
self._tight_layout_pad = layout_pad
self._despine = despine
if despine:
sns.despine(fig=fig)
self.tight_layout(pad=layout_pad)
def _add_axis_labels(self):
for ax, label in zip(self.axes.flatten(), self.vars):
ax.set_title(label)
def add_layer(self, func_or_data, *args, **kwargs):
if isinstance(func_or_data, gpd.GeoDataFrame):
# a direct geography
for ax, col in zip(self.axes.flatten(), self.vars):
func_or_data.plot(*args, ax=ax, **kwargs)
else:
for ax, col in zip(self.axes.flatten(), self.vars):
func_or_data(ax, self.data, col, *args, **kwargs)
def figure_from_geodataframe(
geodf,
height=5,
bbox=None,
remove_axes=True,
set_limits=True,
basemap=None,
basemap_interpolation="hanning",
):
if bbox is None:
bbox = geodf.total_bounds
aspect = (bbox[2] - bbox[0]) / (bbox[3] - bbox[1])
fig, ax = plt.subplots(figsize=(height * aspect, height))
if set_limits:
ax.set_xlim([bbox[0], bbox[2]])
ax.set_ylim([bbox[1], bbox[3]])
if remove_axes:
ax.set_axis_off()
if basemap is not None:
cx.add_basemap(
ax,
crs=geodf.crs.to_string(),
source=basemap,
interpolation=basemap_interpolation,
zorder=0,
)
return fig, ax
def small_multiples_from_geodataframe(
geodf,
n_variables,
height=5,
col_wrap=5,
bbox=None,
sharex=True,
sharey=True,
remove_axes=True,
set_limits=True,
flatten_axes=True,
equal_aspect=True,
basemap=None,
basemap_interpolation="hanning",
):
if n_variables <= 1:
return figure_from_geodataframe(
geodf,
height=height,
bbox=bbox,
remove_axes=remove_axes,
set_limits=set_limits,
basemap=basemap,
basemap_interpolation=basemap_interpolation,
)
if bbox is None:
bbox = geodf.total_bounds
aspect = (bbox[2] - bbox[0]) / (bbox[3] - bbox[1])
n_columns = min(col_wrap, n_variables)
n_rows = n_variables // n_columns
if n_rows * n_columns < n_variables:
n_rows += 1
fig, axes = plt.subplots(
n_rows,
n_columns,
figsize=(n_columns * height * aspect, n_rows * height),
sharex=sharex,
sharey=sharey,
squeeze=False,
)
flattened = axes.flatten()
if set_limits:
for ax in flattened:
ax.set_xlim([bbox[0], bbox[2]])
ax.set_ylim([bbox[1], bbox[3]])
if remove_axes:
for ax in flattened:
ax.set_axis_off()
else:
# deactivate only unneeded axes
for i in range(n_variables, len(axes)):
flattened[i].set_axis_off()
if equal_aspect:
for ax in flattened:
ax.set_aspect("equal")
if basemap is not None:
for ax in flattened:
cx.add_basemap(
ax,
crs=geodf.crs.to_string(),
source=basemap,
interpolation=basemap_interpolation,
zorder=0,
)
if flatten_axes:
return fig, flattened
return fig, axes
|
import fresh_tomatoes
import media
import urllib
import json
import connect
# Estanciando uma array para enviar no final
movies_list = []
# Esse FOR foi criado para receber os JSON e tratar eles para o HTML
for item in connect.movie_query["items"]:
# Pegando o titulo do filme
title = item["title"]
# Pegado o poster
poster = "https://image.tmdb.org/t/p/w500{}".format(item["poster_path"])
# Para pegar o trailer, usaremos a API novamente, pegando o id do filme
trailer_url = "http://api.themoviedb.org/3/movie/{}/videos?api_key={}"
# Com a id, fazendo outra requisicao, trazendo agora o trailer
trailer_api = trailer_url.format(str(item["id"]), connect.api_key)
# Abrindo o JSON
trailer = urllib.urlopen(trailer_api)
# Lendo o JSON
trailer = json.loads(trailer.read())
# E do JSON extraimos a key do youtube do trailer
movie_trailer = "https://youtu.be/{}".format(trailer["results"][0]["key"])
# Pegando o resumo do filme
overview = item["overview"]
# Inserindo todos os dados obtidos dentro do item
item = media.Movie(title,
overview,
poster,
movie_trailer)
# Populando a array
movies_list.append(item)
# Por fim, envindo para o fresh_tomatoes para criar a HTML
fresh_tomatoes.open_movies_page(movies_list)
|
nota1 = float(input('Nota da Prova de Química: '))
nota2 = float(input('Nota da Prova de Matemática: '))
nota3 = float(input('Nota da Prova de Física: '))
nota1 = nota1 * 1
nota2 = nota2 * 1
nota3 = nota3 * 2
mp = nota1 + nota2 + nota3/1+1+2
if mp >= 60:
print('Aprovado')
else:
print('Reprovado')
print(f'A sua média foi {mp}.')
|
class Human:
def __init__(self, name, age, height):
self.name = name
self.age = age
self.height = height
def eat(self): pass
def drink(self): pass
def breath(self): pass
def __str__(self):
return f'Human: {self.age} | {self.name} | {self.height}'
class Superhero(Human):
def __init__(self, superpower, name, age, height):
super().__init__(name, age, height)
self.superpower = superpower
def do_super_stuff(self): print('I`m doing a super stuff')
class Man(Superhero):
def __init__(self, name, age, height, superpower):
super(Man, self).__init__(name, age, height, superpower)
self.gender = 'MALE'
def __str__(self):
return f'Human: {self.age} | {self.name} | {self.height} | {self.gender}'
class Woman(Human):
def __init__(self, name, age, height):
super().__init__(name, age, height)
self.gender = 'FEMALE'
def __str__(self):
return f'Human: {self.age} | {self.name} | {self.height} | {self.gender}'
man = Man('Andrii', 23, 180, 'lift things')
wom = Woman('Olenka', 23, 170)
print(man)
man.do_super_stuff()
print(Man.__mro__) |
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from util import *
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
inserted_image = []
class ImvelyMallsSpider(scrapy.Spider):
name = 'Naning9'
domain = ''
def start_requests(self):
self.domain = 'http://www.naning9.com'
yield scrapy.Request(url='http://www.naning9.com/', callback=self.main_parse, errback=self.errback_httpbin,
headers=header)
def errback_httpbin(self, failure):
self.logger.error(repr(failure))
if failure.check(HttpError):
response = failure.value.response
self.logger.error('HttpError on %s', response.url)
elif failure.check(DNSLookupError):
request = failure.request
self.logger.error('DNSLookupError on %s', request.url)
elif failure.check(TimeoutError, TCPTimedOutError):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
def main_parse(self, response):
urls = []
for url in response.css('div.alc_content_inn li a::attr(href)').extract():
if url.startswith('/shop/'):
urls.append(make_url(self.domain, url))
urls.reverse()
for url in urls:
yield scrapy.Request(url=url, callback=self.sub_parse, errback=self.errback_httpbin, headers=header)
def sub_parse(self, response):
for url in response.css('div.goods_conts div.thumb a::attr(href)').extract():
yield scrapy.Request(url=self.domain + url + '&lang=kr', callback=self.detail_parse,
errback=self.errback_httpbin, headers=header)
def detail_parse(self, response):
cate = response.css('meta[property="product:category"]::attr(content)').extract()
title = response.css('meta[property="og:title"]::attr(content)').extract_first()
img = response.css('div.det_cust_wrap img::attr(src)').extract()
price = response.css('meta[property="product:sale_price:amount"]::attr(content)').extract_first()
price_unit = response.css('meta[property="product:sale_price:currency"]::attr(content)').extract_first()
url = response.url
product_no = response.css('input[id="index_no"]::attr(value)').extract_first()
image_url = response.css('meta[property="og:image"]::attr(content)').extract_first()
if image_url not in inserted_image:
yield {
'host_url': self.domain,
'tag': cate,
'product_name': title,
'image_url': make_url(self.domain, image_url),
'product_price': price,
'currency_unit': price_unit,
'product_url': url,
'product_no': product_no,
'main': '1',
'nation': 'ko'
}
inserted_image.append(image_url)
for im in img:
if im not in inserted_image:
yield {
'host_url': self.domain,
'tag': cate,
'product_name': title,
'image_url': make_url(self.domain, im),
'product_price': price,
'currency_unit': price_unit,
'product_url': url,
'product_no': product_no,
'main': '0',
'nation': 'ko'
}
inserted_image.append(im)
|
test_list = [{"id" : 1, "data" : "HappY"},
{"id" : 2, "data" : "BirthDaY"},
{"id" : 3, "data" : "Rash"}]
res = [i for i in test_list if not (i["id"] == 2)]
print(test_list)
print(res) |
from argparse import ArgumentParser
import airsimneurips as airsim
import cv2
import threading
import time
import utils
import numpy as np
import math
# Params
level_name = "Final_Tier_1"
tier=1
drone_name = "drone_1"
takeoff_height = 1.0
viz_traj = True
viz_traj_color_rgba = [1.0, 0.0, 0.0, 1.0]
# Setup
client = airsim.MultirotorClient()
client.confirmConnection()
client.simLoadLevel(level_name)
client.confirmConnection()
time.sleep(2.0)
# Arm drone
client.enableApiControl(vehicle_name=drone_name)
client.arm(vehicle_name=drone_name)
# Set default values for trajectory tracker gains
traj_tracker_gains = airsim.TrajectoryTrackerGains(kp_cross_track = 5.0, kd_cross_track = 0.0,
kp_vel_cross_track = 3.0, kd_vel_cross_track = 0.0,
kp_along_track = 0.4, kd_along_track = 0.0,
kp_vel_along_track = 0.04, kd_vel_along_track = 0.0,
kp_z_track = 2.0, kd_z_track = 0.0,
kp_vel_z = 0.4, kd_vel_z = 0.0,
kp_yaw = 3.0, kd_yaw = 0.1)
client.setTrajectoryTrackerGains(traj_tracker_gains, vehicle_name=drone_name)
time.sleep(0.2)
# Start race
client.simStartRace(tier=tier)
# Take off
# client.takeoffAsync().join()
# client.reset()
start_position = client.simGetVehiclePose(vehicle_name=drone_name).position
# # print(start_position)
takeoff_waypoint = airsim.Vector3r(start_position.x_val, start_position.y_val, start_position.z_val-takeoff_height)
client.moveOnSplineAsync([takeoff_waypoint], vel_max=15.0, acc_max=5.0, add_position_constraint=True, add_velocity_constraint=False, add_acceleration_constraint=False, viz_traj=viz_traj, viz_traj_color_rgba=viz_traj_color_rgba, vehicle_name=drone_name).join()
print(client.simGetLastGatePassed(drone_name))
# Gates
gate_names_sorted_bad = sorted(client.simListSceneObjects("Gate.*"))
gate_indices_bad = [int(gate_name.split('_')[0][4:]) for gate_name in gate_names_sorted_bad]
gate_indices_correct = sorted(range(len(gate_indices_bad)), key=lambda k: gate_indices_bad[k])
gate_names_sorted = [gate_names_sorted_bad[gate_idx] for gate_idx in gate_indices_correct]
next_pose = client.simGetObjectPose(gate_names_sorted[0])
client.moveOnSplineAsync([next_pose.position], vel_max=15.0, acc_max=5.0, add_position_constraint=True, add_velocity_constraint=False, add_acceleration_constraint=False, viz_traj=viz_traj, viz_traj_color_rgba=viz_traj_color_rgba, vehicle_name=drone_name).join()
print(client.simGetLastGatePassed(drone_name))
# print(client.client.call('simGetObjectScale', "Gate00")) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.