text stringlengths 38 1.54M |
|---|
from accounts.models import CustomUser
from .models import Todo
import time
from django.shortcuts import render,HttpResponse
import json
from django.views.decorators.csrf import csrf_exempt
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import Model
from django.db.models.fields.files import ImageFieldFile
from django.forms import model_to_dict
class ExtendedEncoderAllFields(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, ImageFieldFile):
try:
mypath = o.path
except:
return ''
else:
return mypath
# this will either recusively return all atrributes of the object or return just the id
elif isinstance(o, Model):
return model_to_dict(o)
# return o.id
return super().default(o)
# Create your views here.
def index(request):
template = "todoapp/index.html"
args = {}
users = CustomUser.objects.all()
tasks = [i for i in Todo.objects.all()][::-1]
print(users)
args['users']=users
args['tasks']=tasks
return render(request,template,args)
def onboarding(request):
template = "todoapp/onboarding.html"
args = {}
return render(request,template,args)
def checkKey(key):
if key == "araba":
return True
return False
@csrf_exempt
def create_task(request):
user = CustomUser.objects.get(username="TheoElia")
objects = {}
response = {}
headers = {}
head_json = request.META
for key,val in head_json.items():
if key.startswith('HTTP_'):
headers[key]=val
# print(headers)
try:
api_key = headers['HTTP_API_KEY']
except Exception as e:
response['success']=False
response['message']= "Please provide an api key"
dump = json.dumps(response,cls=ExtendedEncoderAllFields)
return HttpResponse(dump, content_type='application/json')
if checkKey(api_key):
# Getting data posted by user
json_data = json.loads(str(request.body, encoding='utf-8'))
# putting data posted by user into our own dictionary
for key,val in json_data.items():
objects[key]=val
# create a new task
# time.sleep(10)
# Trying to make sure api caller provided all fields
try:
task = Todo()
task.title = objects['title']
task.description = objects['description']
task.due_date = objects['due_date']
except KeyError as e:
# if a field is missin, ask api caller for it
response['success']=False
response['message']= "Please provide "+str(e)
except Exception as e:
# if there's an unknown error, tell api caller
response['success']=False
response['message']=str(e)
else:
# if everything is fine, let's save
task.save()
response['success']=True
response['message']="Task created"
response['user']=user
else:
response['success']=False
response['message']= "Wrong api key provided"
# converting response dictionary to json
dump = json.dumps(response,cls=ExtendedEncoderAllFields)
return HttpResponse(dump, content_type='application/json')
|
import os
import random
from Clustering.K_Means.HTML_CLUSTERING.main import run
from Clustering.K_Means.HTML_CLUSTERING.utils import HtmlPage
def get_test_data():
for root, _, files in os.walk("./download", topdown=False):
for f in files:
yield os.path.join(root, f)
def load_page(file_path):
with open(file_path, 'r') as f:
body = f.read()
return HtmlPage(body=body)
def test():
test_templates = list(get_test_data())
print("templates count:", len(test_templates))
k = {random.choice(test_templates)}
def add(k_clusters):
_clt = run(load_page(t) for t in k_clusters)
print(k_clusters)
for i in test_templates[1:15 * len(k)]:
_clt.add_page(load_page(i))
for i in test_templates[:]:
classify = _clt.classify(load_page(i))
if classify == -1:
k.add(i)
return add(k)
else:
print(k)
add(k)
if __name__ == '__main__':
test()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import json
from ClientMachineRest import ClientMachineRest
from Rectangle import Rectangle
from Logger import Logger
class ClientScreenParserRest(object):
"""ScreenParser Client to Call Restful Service
binding Rest base url when create
"""
def __init__(self,
text_detector_url, text_recognizer_url,
button_detector_url, button_recognizer_url,
image_detector_url, image_recognizer_url):
"""Constructor of Screen Parser Client
Screen Parser Client Bindings text_detect,text_recognize,
button_detect,button_recognize,
image_detect,image_recognize
Arguments:
text_detector_url {str} s-- base_url for text_detect
text_recognizer_url {str} -- base_url for text_recognize
button_detector_url {str} -- base_url for button_detect
button_recognizer_url {str} -- base_url for button_recognize
image_detector_url {str} -- base_url for image_detect
image_recognizer_url {str} -- base_url for image_recognize
"""
super(ClientScreenParserRest, self).__init__()
self.__text_detector_url = text_detector_url
self.__text_recognizer_url = text_recognizer_url
self.__button_detector_url = button_detector_url
self.__button_recognizer_url = button_recognizer_url
self.__image_detector_url = image_detector_url
self.__image_recognizer_url = image_recognizer_url
self.__logger = Logger("ClientScreenParserRest").get()
def multiple_connection_server_dec(func):
"""A decorator (Try to connect to the server multiple times.)
Arguments:
func {function} -- a function what you want to do
Returns:
function -- The function that is currently called.
"""
def execute(*args, **kw):
"""decorator function
Arguments:
*args {[type]} -- args[0] --> self
**kw {[type]} -- [description]
Returns:
function -- The function that is currently called.
"""
# args[0].__logger.info("Start connecting to the server")
# Try to connect to the server.
for i in xrange(0, 3):
try:
# args[0].__logger.info("Connection times : [%d]", i)
return func(*args, **kw)
except RuntimeError:
args[0].__logger.warning(
"%d-connect to the server failed.", i)
args[0].__logger.info("Multiple connection server failed.")
return func(*args, **kw)
return execute
@multiple_connection_server_dec
def text_detect(self, machine_id):
"""Text region detect handler of specific machine
detect text region by restful service
Arguments:
machine_id {str} -- machine_id
Returns:
list<Rectange> -- regions detected
"""
url_addi = r'Texts/Detection?Machine=' + machine_id
rest_url = self.__text_detector_url + r'/' + url_addi
response = requests.get(rest_url)
if response.status_code == requests.codes.ok:
raw_list = json.loads(response.text)
if raw_list is None:
self.__logger.error(
"Error:The text regions detection failed. Response:%s",
response.text)
raise RuntimeError(
"The requested failed in the text regions detection.")
for i in range(0, len(raw_list)):
raw_list[i] = Rectangle.json_hock(raw_list[i])
return raw_list
self.__logger.error(
"Error:The requested failed in the text\
regions detection. Response:%s:%s", response.status_code, response.text)
raise RuntimeError(
"The requested failed in the text regions detection.")
@multiple_connection_server_dec
def text_recognize(self, machine_id, regions):
"""Text content recognize handler of specific machine
recognize text region by restful service at given regions
Arguments:
machine_id {string} -- machine_id
regions {list<Rectangle>} -- regions to recognize
Returns:
list<str> -- contents recognized
"""
url_addi = r'Texts/Recognition'
rest_url = self.__text_recognizer_url + r'/' + url_addi
post_dict = {}
post_dict['rects'] = regions
post_dict['id'] = machine_id
post_json = json.dumps(
post_dict, default=Rectangle.json_default, sort_keys=True)
headers = {'Content-type': 'application/json'}
response = requests.post(rest_url, data=post_json, headers=headers)
if response.status_code == requests.codes.ok:
return json.loads(response.text)
self.__logger.warning(
"Warning:The requested failed in the text\
recognition. Response:%s:%s", response.status_code, response.text)
raise RuntimeError("The requested failed in the text recognition.")
@multiple_connection_server_dec
def button_detect(self, machine_id):
"""Button region detect handler of specific machine
detect button region by restful service
Arguments:
machine_id {str} -- machine_id
Returns:
list<Rectange> -- regions detected
"""
url_addi = r'Buttons/Detection?Machine=' + machine_id
rest_url = self.__button_detector_url + r'/' + url_addi
response = requests.get(rest_url)
if response.status_code == requests.codes.ok:
raw_list = json.loads(response.text)
if raw_list is None:
self.__logger.warning(
"Warning:The button regions detection\
failed. Response:%s", response.text)
raise RuntimeError(
"The requested failed in the button regions detection.")
for i in range(0, len(raw_list)):
raw_list[i] = Rectangle.json_hock(raw_list[i])
return raw_list
self.__logger.warning(
"Warning:The requested failed in the button\
regions detection. Response:%s:%s", response.status_code, response.text)
raise RuntimeError(
"The requested failed in the button regions detection.")
@multiple_connection_server_dec
def button_recognize(self, machine_id, regions):
"""Button Status recognize handler of specific machine
recognize button region by restful service at given regions
Arguments:
machine_id {string} -- machine_id
regions {list<Rectangle>} -- regions to recognize
Returns:
list<str> -- statuses recognized
"""
url_addi = r'Buttons/Recognition'
rest_url = self.__button_recognizer_url + r'/' + url_addi
post_dict = {}
post_dict['rects'] = regions
post_dict['id'] = machine_id
post_json = json.dumps(
post_dict, default=Rectangle.json_default, sort_keys=True)
headers = {'Content-type': 'application/json'}
response = requests.post(rest_url, data=post_json, headers=headers)
if response.status_code == requests.codes.ok:
return json.loads(response.text)
self.__logger.warning(
"Warning:The requested failed in the button\
recognition. Response:%s:%s", response.status_code, response.text)
raise RuntimeError("The requested failed in the button recognition.")
@multiple_connection_server_dec
def image_detect(self, machine_id):
"""Image region detect handler of specific machine
detect image region by restful service
Arguments:
machine_id {str} -- machine_id
Returns:
list<Rectange> -- regions detected
"""
url_addi = r'Image/Detection?Machine=' + machine_id
rest_url = self.__image_detector_url + r'/' + url_addi
response = requests.get(rest_url)
if response.status_code == requests.codes.ok:
raw_list = json.loads(response.text)
if raw_list is None:
self.__logger.warning(
"Warning:The image regions detection\
failed. Response:%s", response.text)
raise RuntimeError(
"Requested failed in the image regions detection.")
for i in range(0, len(raw_list)):
raw_list[i] = Rectangle.json_hock(raw_list[i])
return raw_list
self.__logger.warning("Warning:Requested failed in the image regions\
detection. Response:%s:%s", response.status_code, response.text)
raise RuntimeError(
"Requested failed in the image regions detection.")
@multiple_connection_server_dec
def image_recognize(self, machine_id, regions):
"""Image Status recognize handler of specific machine
recognize image region by restful service at given regions
Arguments:
machine_id {string} -- machine_id
regions {list<Rectangle>} -- regions to recognize
Returns:
list<str> -- labels recognized
"""
url_addi = r'Image/Recognition'
rest_url = self.__image_recognizer_url + r'/' + url_addi
post_dict = {}
post_dict['rects'] = regions
post_dict['id'] = machine_id
post_json = json.dumps(
post_dict, default=Rectangle.json_default, sort_keys=True)
headers = {'Content-type': 'application/json'}
response = requests.post(rest_url, data=post_json, headers=headers)
if response.status_code == requests.codes.ok:
return json.loads(response.text)
self.__logger.warning(
"Warning:Requested failed in the images\
recognize. Response:%s:%s", response.status_code, response.text)
raise RuntimeError("Requested failed in the images recognize.")
if __name__ == '__main__':
machine = ClientMachineRest("http://APBSH0675:64001")
machines_info = machine.get_machine_info()
machine_id = machines_info.get("machineID")
print machine_id
parser = ClientScreenParserRest(
"http://Local:64002/LogScreenParser",
#"http://localhost:65002/LogScreenParser",
"http://Local:64004/TesseractScreenParser",
"http://Local:64002/LogScreenParser",
"http://Local:64003/FeatureScreenParser",
"",
"")
# regions = []
# regions.append(Rectangle([4,35,305,40]))
# print regions
# print parser.text_recognize(machine_id, regions)
regions = parser.text_detect(machine_id)
print regions
print parser.text_recognize(machine_id, regions)
# regions = parser.button_detect(machine_id)
# print regions
# print parser.button_recognize(machine_id, regions)
|
import names
import pandas as pd
import numpy as np
import random as rand
# SECTION 1
# Creating a list to store random student names
from pandas import DataFrame
num = 100 # Determining the number of students
name_list = [] # Preallocating the list to store names
for i in range(num):
name_list.append(names.get_full_name())
# Generating a random score array in the range of 0 to 100 for 5 subjects
df_array = [np.array(name_list).transpose()] # Preallocating the array for the dataframe
subjects = ['Japanese', 'English', 'Mathematics', 'Social_Studies', 'Science'] # List of subjects
for x in subjects:
score_list = []
for y in range(num):
# Generating a list with random number in the range of 0 to 100
rand_num = rand.random() * 101
score_list.append(rand_num)
# Assigning the subject name as the variable name for simplicity
vars()[x] = np.around(score_list, decimals=1)
# Appending the random number list to the array for dataframe
df_array.append(vars()[x])
# Creating pandas dataframe
col_names = ['Student_Name'] + subjects # Concatenating the list for column names
df: DataFrame = pd.DataFrame(list(zip(name_list, Japanese, English, Mathematics, Social_Studies, Science)), columns=col_names)
# SECTION 2
# Calculating the HENSACHI for all students
df['Total'] = df.loc[:, 'Japanese':'Science'].sum(axis=1)
df['Average'] = df.loc[:, 'Japanese':'Science'].mean(axis=1, numeric_only=True)
df.sort_values(by='Total', inplace=True, ascending=False)
df = df.reset_index(drop=True)
JPN_avg = df['Japanese'].mean(axis=0)
JPN_std = df['Japanese'].std(axis=0)
ENG_avg = df['English'].mean(axis=0)
ENG_std = df['English'].std(axis=0)
MA_avg = df['Mathematics'].mean(axis=0)
MA_std = df['Mathematics'].std(axis=0)
SS_avg = df['Social_Studies'].mean(axis=0)
SS_std = df['Social_Studies'].std(axis=0)
SC_avg = df['Science'].mean(axis=0)
SC_std = df['Science'].std(axis=0)
total_avg = df['Average'].mean(axis=0)
total_std = df['Total'].std(axis=0)
df.loc[-1, 'Student_Name':'Average'] = ['Subject Averages', JPN_avg, ENG_avg, MA_avg, SS_avg, SC_avg, total_avg, None]
df.loc[-2, 'Student_Name':'Average'] = ['Subject Standard Deviations', JPN_std, ENG_std, MA_std, SS_std, SC_std, total_std, None]
new_labels = ['JPN_HENSACHI', 'ENG_HENSACHI', 'MA_HENSACHI', 'SS_HENSACHI', 'SC_HENSACHI', 'SOGO_HENSACHI']
col_names = col_names + ['Total']
ct = 1
for x in new_labels:
for y in range(num):
if x != 'SOGO_HENSACHI':
df.loc[y, x] = 50 + (df.loc[y, col_names[ct]] - df.loc[-1, col_names[ct]]) * 10 / df.loc[-2, col_names[ct]]
else:
df.loc[y, x] = df.loc[y, 'JPN_HENSACHI':'SC_HENSACHI'].mean()
ct += 1
df = df.round(2)
df.to_excel('hensachi_sample.xlsx', sheet_name='Sheet1', engine='xlsxwriter') # doctest: +SKIP
|
from frw_tester import *
from logger import *
import os
import shutil
from supervisor import supervisor
import timeout_decorator
class ScenarioMaker:
# ------------------------------------------------ constructor ----------------------------------------------------------
def __init__(self):
# ------------------------------------------------ instantiation ------------------------------------------------------
self.m_frw_tester = frw_tester()
self.m_logger = logger()
self.supervisor = supervisor()
# ------------------------------------------- goto ~/BANZAI_EP ---------------------------------------------------------
self.m_frw_tester.goto_path("/BANZAI_EP")
self.home_path =self. m_frw_tester.abspath
# ---------------------------------------- create the root path for Logs -----------------------------------------------
self.logs_path = os.environ["HOME"] + "/Logs"
# ------------------------------check if the logs directory exists else create it --------------------------------------
self.checkdir(self.logs_path)
# ------------------------------clean all logs from previous test ------------------------------------------------------
self.cleandir(self.logs_path)
# def reset(self):
# self.m_frw_tester.__del__()
# self.m_frw_tester.__init__()
# ----------------------------------- check if a given directory exists else create it ------------------------------------
def checkdir(self,arg_path):
if os.path.isdir(arg_path):
pass
else:
os.mkdir(arg_path)
# ---------------------------------------- remove all files in a given path -----------------------------------------------
def cleandir(self,arg_path):
for the_file in os.listdir(arg_path):
file_path = os.path.join(arg_path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
# ------------------------------------------------ destructor --------------------------------------------------------------
def __del__(self):
self.m_frw_tester.__del__()
# -------------------------------------------copy files in a given path ( remove it to logger class )-----------------------
def copy_files(self,from_path, to_path):
if os.path.exists(to_path):
#self.cleandir(to_path)
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
# ------------------------------------------------ function called before test starts---------------------------------------
@timeout_decorator.timeout(180) # 3min of delay
def cleanup(self):
#-------start with a valid firmware : reflash the platform with a valid firmware using arduino ( ref-firmware)-----------------------------------------> call it when driver for power cut will be developed
#self.m_frw_tester.flash_camera(arg_mode='arduino', arg_frw_type="spherical")
#time.sleep(10)
# ------------------------------------------------ reset-camera --------------------------------------------------------
self.m_frw_tester.reset_camera()
# ------------------------------------------------ time to reset --------------------------------------------------------
time.sleep(2)
# -------------------------------------clean test_capt (if previous test crashed ) --------------------------------------
self.cleandir(self.home_path + "/Desktop/test_capt")
time.sleep(1)
# -------------------------------------remove all files in DCIM/100GOPRO (if previous test crashed ) --------------------
clean_cmd = self.m_frw_tester.tcmdAgent.getCmd(clean=1)
self.m_frw_tester.Execute(clean_cmd)
time.sleep(0.5)
@timeout_decorator.timeout(180)# 3min of delay
def Reset_Test(self):
# --------------------------------- create folder for reset logs ----------------------------------------------------------
loc_path =self.logs_path+"/ResetTest"
self.checkdir(loc_path)
# ----------------------------------------- start logs (rtos,linux) acquisition ------------------------------------------
self.m_frw_tester.start_acquisition()
# ----------------------------------------- reset camera -----------------------------------------------------------------
self.m_frw_tester.reset_camera()
# ----------------------------------------- wait for reset ---------------------------------------------------------------
time.sleep(5)
# ----------------------------------------- stop logs (rtos,linux) acquisition ------------------------------------------
self.m_frw_tester.stop_acquisition()
# ----------------------------------------- get logs (rtos,linux) ---------------------------------------------------------
linux_rtos_logs =self.m_frw_tester.get_data()
self.m_logger.write(loc_path+"/linux_ResetTest_log.txt",linux_rtos_logs[0])
self.m_logger.write(loc_path+"/rtos_ResetTest_log.txt",linux_rtos_logs[1])
@timeout_decorator.timeout(300)# 5min of delay
def Test_video(self,test_mode,flare=1,arg_time=4):
# --------------------------create folder for video logs based on test_mode -----------------------------------------------
loc_path = self.logs_path + "/test_video_"+test_mode
self.checkdir(loc_path)
# ----------------------------------------- start logs (rtos,linux) acquisition ------------------------------------------
self.m_frw_tester.start_acquisition()
# ----------------------------------------- still = 0 for video test -----------------------------------------------------
self.m_frw_tester.runTest(still=0,test_mode="5K_EAC_30_W_HEVC_IMX577", flare=flare, time=arg_time)
time.sleep(5)
# ----------------------------------------- stop logs (rtos,linux) acquisition ------------------------------------------
self.m_frw_tester.stop_acquisition()
# ----------------------------------------- get logs (rtos,linux) ---------------------------------------------------------
linux_rtos_logs = self.m_frw_tester.get_data()
# ----------------------------------------- write logs (rtos,linux) in logs path--------------------------------------------
self.m_logger.write(loc_path + "/linux_"+test_mode+"video_log.txt", linux_rtos_logs[0])
self.m_logger.write(loc_path + "/rtos_"+test_mode+"video_log.txt", linux_rtos_logs[1])
# --------------------------------------- copy files from test_capt to the log directory-----------------------------------
self.copy_files(self.home_path+"/Desktop/test_capt",loc_path+"/test_capt")
# --------------------- clean test_capt folder after each test to keep the logs clean for the following test----------------
self.cleandir(self.home_path + "/Desktop/test_capt")
time.sleep(1.5)
@timeout_decorator.timeout(300)# 5min of delay
def Test_image(self,test_mode="5K_EAC_30_W_HEVC_IMX577",test_option=None):
# --------------------------create folder for still logs based on test mode and option ( 'PANO , CALIB ' )-----------------
loc_path =self.logs_path+"/still_"+test_mode+str(test_option)
# ------------------------------check if the logs directory exists else create it -----------------------------------------
self.checkdir(loc_path)
# ----------------------------------------- start logs (rtos,linux) acquisition ------------------------------------------
self.m_frw_tester.start_acquisition()
# ----------------------------------------- still = 1 for image test -----------------------------------------------------
self.m_frw_tester.runTest(still=1,test_mode=test_mode,test_option=test_option)
time.sleep(5)
# ----------------------------------------- stop logs (rtos,linux) acquisition ------------------------------------------
self.m_frw_tester.stop_acquisition()
# ----------------------------------------- get logs (rtos,linux) ---------------------------------------------------------
linux_rtos_logs = self.m_frw_tester.get_data()
# ----------------------------------------- write logs (rtos,linux) in logs path--------------------------------------------
self.m_logger.write(loc_path+"/linux_ImageTest_log.txt",linux_rtos_logs[0])
self.m_logger.write(loc_path+"/rtos_ImageTest_log.txt",linux_rtos_logs[1])
# --------------------------------------- copy files from test_capt to the log directory-----------------------------------
self.copy_files(self.home_path + "/Desktop/test_capt", loc_path + "/test_capt")
# --------------------- clean test_capt folder after each test to keep the logs clean for the following test----------------
self.cleandir(self.home_path + "/Desktop/test_capt") #for each test delete the generated files
time.sleep(1.5)
@timeout_decorator.timeout(360)# 6min of delay
def flash_Test(self,arg_frw_type):
# --------------------------create folder for flash logs based on the type of flashing ( 'spherical ....' )---------------
loc_path = self.logs_path + "/flashTest"+arg_frw_type
# ------------------------------check if the logs directory exists else create it -----------------------------------------
self.checkdir(loc_path)
# ----------------------------------------- start logs (rtos,linux) acquisition ------------------------------------------
self.m_frw_tester.start_acquisition()
# -------------------------------------------flash camera with new firmware ------------------------------------------------
self.m_frw_tester.flash_camera(arg_mode='make',arg_frw_type=arg_frw_type) #this reboots the platform
# -------------------------------------------time for flashing -----------------------------------------------------------
time.sleep(10)
# ----------------------------------------- get list of commands to check if the rtos boots -------------------------------
check_boot_cmd = self.m_frw_tester.tcmdAgent.getCmd(rtos_version_test=1)
# ----------------------------------------- run commands for boot test ----------------------------------------------------
self.m_frw_tester.runScenario(check_boot_cmd)
time.sleep(2)
# ----------------------------------------- stop logs (rtos,linux) acquisition ------------------------------------------
self.m_frw_tester.stop_acquisition()
# ----------------------------------------- get logs (rtos,linux) ---------------------------------------------------------
linux_rtos_logs =self.m_frw_tester.get_data()
# ----------------------------------------- write logs (rtos,linux) in logs path--------------------------------------------
rtos_log_path = loc_path+"/rtos_flashTest_log.txt"
self.m_logger.write(loc_path+"/linux_flashTest_log.txt",linux_rtos_logs[0]) #write data
self.m_logger.write(rtos_log_path,linux_rtos_logs[1])
# ----------------------------------------- check if the firmware is correctly booted----------------------------------------
self.supervisor.isfirmwareBooted(rtos_log_path)
#
# s= ScenarioMaker()
# s.cleandir(s.logs_path+"/still_5K_EAC_30_W_HEVC_IMX577None")
|
# --> smtp lib, ssl
import smtplib, ssl
def sendmail(message):
s_server = "smtp.gmail.com"
port = 587
send_mail = "seceminiproject@gmail.com"
mail_password = "sriram@raghu"
recv_mail = "r4ghunandhan@gmail.com"
con = ssl.create_default_context()
try:
server = smtplib.SMTP(s_server, port)
server.ehlo()
server.starttls(context= con)
server.ehlo()
server.login(send_mail, mail_password)
server.sendmail(sendmail, recv_mail, message)
except Exception as e:
print(e)
finally:
server.quit() |
#coding=utf-8
import io
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
#报告
import unittest
from HTMLTestRunner import HTMLTestRunner
from time import sleep
import time #生成时间戳用
import os #上传autoit用
import sys
"""解决vscode中不能引用别的模块的问题"""
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
# print(sys.path)
sys.stdout=io.TextIOWrapper(sys.stdout.buffer,encoding='utf8')
# 引入公共方法
from common.comfunction import OpenBrowser
from common.comfunction import User
from common.comfunction import comHtml,com_path
resultpath = os.path.join(com_path(),"报告")
# 上传office相关文件
class up_office(unittest.TestCase):
'''上传office相关文件'''
def upload_office(self):
'''上传office文件'''
mode=2
driver = OpenBrowser(mode)
User().login(driver)
# 公共参数
picturePath=com_path()+"截图\\"+"19种上传格式截图\\office\\"
if not (os.path.exists(picturePath)):
os.makedirs(picturePath)
showPath="file:///C:/work/1测试/10自动化/截图保存/19种上传格式截图/office/"
waitTime=5
uploadwait= 15 #上传之后的等待时间
# 私有资料根目录文件夹
el1=driver.find_element_by_xpath("//span[text()='新建']")
sleep(waitTime)
ActionChains(driver).move_to_element(el1).perform()
driver.find_element_by_xpath("//li[text()='文件夹']").click()
folder1=int(time.time())
print("文件夹:%s " %folder1)
driver.switch_to.active_element.send_keys(folder1)
driver.switch_to.active_element.send_keys(Keys.ENTER)
# 进入文件夹
driver.find_element_by_xpath("//span[text()="+str(folder1)+"]").click()
# 上传文件
# office相关
fpath = com_path()+"19种格式\\office\\"
word1name = "2017年12月11日-2017年12月15日发行监管部"
word2name = "带图片表格文档"
excel1name = "003_模板_TestLink测试用例导入"
excle2name = "cyprex1.3测试用例"
pptname = "小z素材-商务炫酷风格动态模板-003"
# 新建office文件夹,并进入
el1=driver.find_element_by_xpath("//span[text()='新建']")
sleep(waitTime)
ActionChains(driver).move_to_element(el1).perform()
driver.find_element_by_xpath("//li[text()='文件夹']").click()
office = "office"
driver.switch_to.active_element.send_keys(office)
driver.switch_to.active_element.send_keys(Keys.ENTER)
print("创建office分类文件夹成功: %s" %office)
sleep(1)
driver.find_element_by_xpath("//span[text()="+"'"+office+"'"+"]").click()
# 上传office文件
driver.find_element_by_xpath("//input[@type='file']").send_keys(fpath+word1name+".doc")
driver.find_element_by_xpath("//input[@type='file']").send_keys(fpath+word2name+".docx")
driver.find_element_by_xpath("//input[@type='file']").send_keys(fpath+excel1name+".xls")
driver.find_element_by_xpath("//input[@type='file']").send_keys(fpath+excle2name+".xlsx")
driver.find_element_by_xpath("//input[@type='file']").send_keys(fpath+pptname+".ppt")
sleep(20)
# 截图并输出
date1=str(int(time.time()))
driver.get_screenshot_as_file(picturePath+date1+".png")
comHtml().print_html("office文件列表", picturePath, date1) # 输出到html报告
# 预览文件
#点击
driver.find_element_by_xpath("//span[text()=\'"+word1name+"\']/..").click()
# 等待加载,准备截图
WebDriverWait(driver, 15, 0.2).until(ec.presence_of_element_located((By.XPATH, "//iframe")))
sleep(1)
date2=str(int(time.time()))
driver.get_screenshot_as_file(picturePath+date2+".png")
comHtml().print_html(word1name, picturePath, date2) # 输出到html报告
driver.find_element_by_xpath("//span[contains(text(),'返回')]/..").click()
WebDriverWait(driver, 5, 0.2).until_not(ec.presence_of_element_located((By.XPATH, "//iframe")))
# 预览下一个
driver.find_element_by_xpath("//div/span[text()=\'"+word2name+"\']/..").click()
WebDriverWait(driver, 15, 0.2).until(ec.presence_of_element_located((By.XPATH, "//iframe")))
sleep(1)
date3=str(int(time.time()))
driver.get_screenshot_as_file(picturePath+date3+".png")
comHtml().print_html(word2name, picturePath, date3) # 输出到html报告
driver.find_element_by_xpath("//span[contains(text(),'返回')]/..").click()
WebDriverWait(driver, 5, 0.2).until_not(ec.presence_of_element_located((By.XPATH, "//iframe")))
#预览下一个
driver.find_element_by_xpath("//div/span[text()=\'"+excel1name+"\']/..").click()
WebDriverWait(driver, 15, 0.2).until(ec.presence_of_element_located((By.XPATH, "//iframe")))
sleep(1)
date4=str(int(time.time()))
driver.get_screenshot_as_file(picturePath+date4+".png")
comHtml().print_html(excel1name, picturePath, date4) # 输出到html报告
driver.find_element_by_xpath("//span[contains(text(),'返回')]/..").click()
WebDriverWait(driver, 5, 0.2).until_not(ec.presence_of_element_located((By.XPATH, "//iframe")))
# 预览下一个
driver.find_element_by_xpath("//div/span[text()=\'"+excle2name+"\']/..").click()
WebDriverWait(driver, 15, 0.2).until(ec.presence_of_element_located((By.XPATH, "//iframe")))
sleep(1)
date5=str(int(time.time()))
driver.get_screenshot_as_file(picturePath+date5+".png")
comHtml().print_html(excle2name, picturePath, date5) # 输出到html报告
driver.find_element_by_xpath("//span[contains(text(),'返回')]/..").click()
WebDriverWait(driver, 5, 0.2).until_not(ec.presence_of_element_located((By.XPATH, "//iframe")))
# 预览下一个
driver.find_element_by_xpath("//div/span[text()=\'"+pptname+"\']/..").click()
WebDriverWait(driver, 15, 0.2).until(ec.presence_of_element_located((By.XPATH, "//iframe")))
sleep(5)
date6=str(int(time.time()))
driver.get_screenshot_as_file(picturePath+date6+".png")
comHtml().print_html(pptname, picturePath, date6) # 输出到html报告
driver.find_element_by_xpath("//span[contains(text(),'返回')]/..").click()
WebDriverWait(driver, 5, 0.2).until_not(ec.presence_of_element_located((By.XPATH, "//iframe")))
# 返回到格式集合目录
driver.find_element_by_xpath("//a[text()=\'"+folder1+"\']")
# driver.quit()
if __name__ == "__main__":
testunite = unittest.TestSuite()
testunite.addTest(up_office("upload_office"))
# 生成报告
fp = open(resultpath+'up_office.html','wb')
runner = HTMLTestRunner(stream=fp, title='upoffice', description='执行情况:')
runner.run(testunite)
fp.close()
|
from model.contact import Contact
import random
def test_delete_contact_by_id(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="qqqqqqqq", middlename="wwwwwww", nickname="eeefdeeee", title="vvvvvvvvvv",
lastname="eeeeeeeee", company="xccccccccc",
adress="ffcvcxvcvcxvxcvx", home="23144124214", mobile="45565656678",
work="56678678678", fax="67867868686",
email="wap@mail.ru", email2="trest@mail.ru", email3="big@mail.ru",
homepage="http://wwwww.ru", byear="1985", ayear="2000",
address2="sdfdsfsdfsdfsd", phone2="sdfsdfsdfsdfsdf", notes="sfsdfsdfdssdfsdfs"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_group_list(), key=Contact.id_or_max)
# Удаление контакта по индексу
#def test_delete_some_contact(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(firstname="qqqqqqqq", middlename="wwwwwww", nickname="eeefdeeee", title="vvvvvvvvvv",
# lastname="eeeeeeeee", company="xccccccccc",
# adress="ffcvcxvcvcxvxcvx", home="23144124214", mobile="45565656678",
# work="56678678678", fax="67867868686",
# email="wap@mail.ru", email2="trest@mail.ru", email3="big@mail.ru",
# homepage="http://wwwww.ru", byear="1985", ayear="2000",
# address2="sdfdsfsdfsdfsd", phone2="sdfsdfsdfsdfsdf", notes="sfsdfsdfdssdfsdfs"))
# old_contacts = app.contact.get_contact_list()
# index = randrange(len(old_contacts))
# app.contact.delete_contact_by_index(index)
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) - 1 == len(new_contacts)
# old_contacts[index:index + 1] = []
# assert old_contacts == new_contacts
|
#!/usr/bin/env python
# coding: utf-8
# # Q1.Given an array of integers and a number, perform left rotations on the array.
# In[1]:
def rotate(arr,n):
x=arr[n-1]
for i in range(n-1,0,-1):
arr[i] = arr[i-1];
arr[0] = x
arr=[1,2,3,4,5]
n = len(arr)
print("Given array is")
for i in range(0,n):
print(arr[i],end=" ")
rotate(arr,n)
print("\nRotated array is")
for i in range(0,n):
print(arr[i], end=" ")
# # Q2.Arrange the array elements so that all negative numbers appears before all positive numbers.
# In[5]:
def rearrange(arr, n):
j=0
for i in range(0,n):
if(arr[i]<0):
temp=arr[i]
arr[i]=arr[j]
arr[j]=temp
j=j+1
print(arr)
arr=[-12,11,-13,-5,6,-7,5,-3,-6]
n=len(arr)
rearrange(arr, n)
# # Q3.Given an array of positive and negative numbers arrange them in an alteenate fashion such that every positive number is followed by negative.
# In[2]:
def rearrange(arr, n):
arr.sort()
i, j = 1, 1
while j < n:
if arr[j] > 0:
break
j += 1
while (arr[i] < 0) and (j < n):
arr[i], arr[j] = arr[j], arr[i]
i += 2
j += 1
return (arr)
arr = [-2,3,4,-1]
ans = rearrange(arr, len(arr))
for num in ans:
print(num, end=" ")
# In[3]:
def rearrange(arr, n):
arr.sort()
i, j = 1, 1
while j < n:
if arr[j] > 0:
break
j += 1
while (arr[i] < 0) and (j < n):
arr[i], arr[j] = arr[j], arr[i]
i += 2
j += 1
return (arr)
arr = [-2,3,1]
ans = rearrange(arr, len(arr))
for num in ans:
print(num, end=" ")
# In[4]:
def rearrange(arr, n):
arr.sort()
i, j = 1, 1
while j < n:
if arr[j] > 0:
break
j += 1
while (arr[i] < 0) and (j < n):
arr[i], arr[j] = arr[j], arr[i]
i += 2
j += 1
return (arr)
arr = [-5,3,4,5,-6,-2,8,9,-1,-4]
ans = rearrange(arr, len(arr))
for num in ans:
print(num, end=" ")
# # Q4. Program for Caesar Cipher in Python.
# In[6]:
def encrypt(string, shift):
cipher = ''
for char in string:
if char == ' ':
cipher = cipher + char
elif char.isupper():
cipher = cipher + chr((ord(char) + shift - 65) % 26 + 65)
else:
cipher = cipher + chr((ord(char) + shift - 97) % 26 + 97)
return cipher
text = input("enter string: ")
s = int(input("enter the key: "))
print("original string: ", text)
print("after encryption: ", encrypt(text, s))
|
# search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
#####################################################
#####################################################
# Please enter the number of hours you spent on this
# assignment here
num_hours_i_spent_on_this_assignment = 0
#####################################################
#####################################################
#####################################################
#####################################################
# Give one short piece of feedback about the course so far. What
# have you found most interesting? Is there a topic that you had trouble
# understanding? Are there any changes that could improve the value of the
# course to you? (We will anonymize these before reading them.)
"""
I found that the course is very fun to learn and entertaining
This assignment is hard and requires too much time to finish
I truly found this assignment very interesting and fun because I learned a lot from it
However, I have spent more than 30 hours to finish this assignment
I hope the change that we are allowed to work the assignment in groups of 2 or 3 to lower the spending time on the assignment
"""
#####################################################
#####################################################
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
from util import PriorityQueue
from util import Stack
from util import Queue
from game import Directions
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Questoin 1.1
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print ( problem.getStartState() )
print (problem.isGoalState(problem.getStartState()) )
print ( problem.getSuccessors(problem.getStartState()) )
"""
"*** YOUR CODE HERE ***"
visited = []
direction = []
fringe = Stack()
StartState = ((problem.getStartState(), "" , 0))
fringe.push((0, StartState, direction, visited))
# when the stack is not empty
while not fringe.isEmpty():
# pop the stack and get those values
depth, current_node, direction, visited = fringe.pop()
# if it reaches the goal then return direction
if problem.isGoalState(current_node[0]) == True:
return direction + current_node[1]
# check whether the current position is in the visited list or not
if current_node[0] not in visited:
visited = visited + [current_node[0]]
# loop to get successor
for element in problem.getSuccessors(current_node[0]) :
# if not in visited then push all the values to stack
if element[0] not in visited:
if problem.isGoalState(element[0]) == True:
return direction + [element[1]]
depth_of_node = len(direction)
#push everything to the stack
fringe.push((-depth_of_node, element, direction + [element[1]], visited))
return direction
def breadthFirstSearch(problem):
"""Questoin 1.2
Search the shallowest nodes in the search tree first.
"""
"*** YOUR CODE HERE ***"
visited = []
direction = []
visited_node = []
fringe = Queue()
count = 0
#StartState = ((problem.getStartState(), "" , 0))
Startstate = problem.getStartState()
fringe.push((Startstate, direction))
Successor = []
# while the queue is not empty
while not fringe.isEmpty():
# Get all the values from queue
current_node, direction = fringe.pop()
# if it reaches the goal then return direction
if problem.isGoalState(current_node) == True:
direction.append(Successor[1])
return direction
# if not reaches
else:
# check the current is in visited list or not
if current_node not in visited:
Successor = problem.getSuccessors(current_node)
visited = visited + [current_node]
# looping the successor to get the next node and direction
for element in Successor :
# check whether the next node is in visited list or not
if element[0] not in visited:
if problem.isGoalState(element[0]) == True:
return direction + [element[1]]
else:
#push everything to the queque
fringe.push((element[0], direction + [element[1]]))
return direction
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Question 1.3
Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
from util import PriorityQueue
visited = []
direction = []
fringe = PriorityQueue()
startState = (problem.getStartState(),"" ,0)
d_end_current = 0
total_cost = 0
fringe.push((startState ,direction),total_cost)
# while the priorityqueue is not empty
while not fringe.isEmpty():
item = fringe.pop()
current_node = item[0]
direction = item[1]
#check whether the current is goal or not
if(problem.isGoalState(current_node[0])):
direction.append(current_node[1])
return direction
# check whether current posistion are in visited list or not
if current_node[0] not in visited:
visited.append(current_node[0])
Successor = problem.getSuccessors((current_node[0]))
#looping to get the each element from Successor
for element in Successor:
# if next position not in visisted....
if element[0] not in visited:
if problem.isGoalState(element[0]):
direction.append(element[1])
return direction
else:
#call the heuristic function which is the node from end to current position
d_end_current = heuristic(element[0], problem)
# f = h + g
total_cost = problem.getCostOfActions(direction + [element[1]]) + int(d_end_current)
# push all the values to priority queue
fringe.push((element, direction + [element[1]]),total_cost)
return direction
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
|
#!/usr/bin/python
import time, os, subprocess
import RPi.GPIO as GPIO
# set up GPIO on pin 17 for button press
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(22, GPIO.OUT)
def say_something(something):
subprocess.call(["chmod", "u+x", "./speech.sh"])
subprocess.call(["./speech.sh", something])
try:
while True:
if GPIO.input(17)== False:
print('Button 1 Pressed')
GPIO.output(22, GPIO.HIGH)
say_something('You pressed button 1')
if GPIO.input(18) == False:
print('Button 2 Pressed')
GPIO.output(22, GPIO.LOW)
say_something('You pressed button 2')
time.sleep(0.2)
finally:
GPIO.cleanup()
|
import asynctest
from asynctest.mock import patch
from asynctest.mock import call
from charlesbot.slack.slack_message import SlackMessage
class TestPagerdutyEscalations(asynctest.TestCase):
def setUp(self):
patcher1 = patch('charlesbot_pagerduty_escalations.pagerdutyescalations.PagerdutyEscalations.load_config') # NOQA
self.addCleanup(patcher1.stop)
self.mock_load_config = patcher1.start()
patcher2 = patch('charlesbot_pagerduty_escalations.pagerdutyescalations.PagerdutyEscalations.print_service_list') # NOQA
self.addCleanup(patcher2.stop)
self.mock_print_service_list = patcher2.start()
patcher3 = patch('charlesbot_pagerduty_escalations.pagerdutyescalations.PagerdutyEscalations.trigger_escalation_incident') # NOQA
self.addCleanup(patcher3.stop)
self.mock_trigger_escalation_incident = patcher3.start()
from charlesbot_pagerduty_escalations.pagerdutyescalations import PagerdutyEscalations # NOQA
self.pd = PagerdutyEscalations()
def test_process_message_non_slack_message(self):
message = "!lassie services this is my message to you ooh ooh"
yield from self.pd.process_message(message)
self.assertEqual(self.mock_print_service_list.mock_calls, [])
self.assertEqual(self.mock_trigger_escalation_incident.mock_calls, [])
def test_process_message_non_lassie_message(self):
message = SlackMessage(type="message",
user="U2147483697",
channel="C2147483705",
text="This is my message to you ooh ooh")
yield from self.pd.process_message(message)
self.assertEqual(self.mock_print_service_list.mock_calls, [])
self.assertEqual(self.mock_trigger_escalation_incident.mock_calls, [])
def test_process_message_invalid_lassie_message(self):
message = SlackMessage(type="message",
user="U2147483697",
channel="C2147483705",
text="!lassie")
yield from self.pd.process_message(message)
self.assertEqual(self.mock_print_service_list.mock_calls, [])
self.assertEqual(self.mock_trigger_escalation_incident.mock_calls, [])
def test_process_message_services_message_one(self):
message = SlackMessage(type="message",
user="U2147483697",
channel="C2147483705",
text="!lassie services")
yield from self.pd.process_message(message)
expected_call = call(message)
self.assertEqual(self.mock_print_service_list.mock_calls,
[expected_call])
self.assertEqual(self.mock_trigger_escalation_incident.mock_calls, [])
def test_process_message_services_message_two(self):
message = SlackMessage(type="message",
user="U2147483697",
channel="C2147483705",
text="!lassie services some filler we ignore")
yield from self.pd.process_message(message)
expected_call = call(message)
self.assertEqual(self.mock_print_service_list.mock_calls,
[expected_call])
self.assertEqual(self.mock_trigger_escalation_incident.mock_calls, [])
def test_process_message_escalate_one(self):
message = SlackMessage(type="message",
user="U2147483697",
channel="C2147483705",
text="!lassie website")
yield from self.pd.process_message(message)
expected_call = call("website", message)
self.assertEqual(self.mock_print_service_list.mock_calls, [])
self.assertEqual(self.mock_trigger_escalation_incident.mock_calls,
[expected_call])
def test_process_message_escalate_two(self):
message = SlackMessage(type="message",
user="U2147483697",
channel="C2147483705",
text="!lassie website halp plz")
yield from self.pd.process_message(message)
expected_call = call("website halp plz", message)
self.assertEqual(self.mock_print_service_list.mock_calls, [])
self.assertEqual(self.mock_trigger_escalation_incident.mock_calls,
[expected_call])
|
# -*- coding:utf-8 -*-
# @Desc :
# @Author : Administrator
# @Date : 2019-07-31 15:54
from django.core.mail import send_mail
from django.conf import settings
import string
import random
from users.models import EmailVerifyCode
# 生成验证码(随机字符串)
def get_random_code(slen):
return ''.join(random.sample(string.ascii_letters + string.digits, slen))
# 发送验证码邮件
def send_mail_code(email,send_type):
## 1.创建邮箱验证码对象,保存数据,用以后作对比
emailVerifyCode = EmailVerifyCode()
emailVerifyCode.email = email
emailVerifyCode.send_type = send_type
# 获取验证码
code = get_random_code(8)
emailVerifyCode.code = code
emailVerifyCode.save()
## 2.settings.py配置信息
# EMAIL_HOST = 'smtp.163.com' # smpt服务地址
# EMAIL_PORT = 25 # 端口号
# EMAIL_HOST_USER = 'configureadmin@163.com' # 发送邮件的邮箱地址即发件人
# EMAIL_HOST_PASSWORD = 'asdfghjkl******' # 发送邮件的邮箱[即发件人]中设置的客户端授权密码
# EMAIL_FROM = '谷粒教育<configureadmin@163.com>' # 收件人看到的发件人
## 3.发送邮件的具体内容信息
# 激活用户
if send_type == 1:
send_title = "欢迎注册谷粒教育网站:"
send_body = "请点击以下链接,进行激活账号: \n http://127.0.0.1:8000/users/user_active/" + code
# 发送邮件
send_mail(send_title,send_body,settings.EMAIL_FROM,[email])
# 重置密码
if send_type == 2:
send_title = "谷粒教育重置密码系统:"
send_body = "请点击以下链接,进行重置密码: \n http://127.0.0.1:8000/users/user_reset/" + code
# 发送邮件
send_mail(send_title,send_body,settings.EMAIL_FROM,[email])
# 修改邮箱-获取验证码
if send_type == 3:
send_title = "谷粒教育重置邮箱验证码:"
send_body = "你的邮箱验证码是: " + code
# 发送邮件
send_mail(send_title,send_body,settings.EMAIL_FROM,[email])
|
"""
Exercicios com strings
"""
print("*"*40)
print("")
print("BRINCANDO COM STRING USANDO PALINDROMO")
print("")
print("*"*40)
enter = input("Digite uma palavra ou frase: ")
def palindromo(enter):
if enter[::-1] == enter:
return f"A palavra {enter} é um palindromo"
else:
return f"A palavra {enter} não é um palindromo"
print(palindromo(enter))
|
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
RainbowList = [0, 1, 2, 3, 4, 5]
pos = mc.player.getTilePos()
x = pos.x
y = pos.y
z = pos.z
""" the L in RainbowList is not capitalized """
for color in Rainbowlist:
""" missing the argument for the color, and wool is not defined """
mc.setBlock(x, y, z, wool)
""" should be y +=1 """
y + 1
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "pacmang2",
version = "0.0.1",
author = "Gaetan Gourdin",
author_email = "bouleetbil@frogdev.info",
description = ("Python binding for pacman-g2"),
license = "GPL2",
keywords = "pacman-g2",
url = "http://www.frugalware.org",
packages=['pacmang2'],
long_description=read('README'),
classifiers=[
"Development Status :: Alpha",
"Topic :: Software Development :: Libraries :: Python Module",
"License :: GPL2,"
],
)
|
#MenuTitle: Wet Paint
# -*- coding: utf-8 -*-
__doc__="""
Wet Paint
"""
import GlyphsApp
from NaNGFGraphikshared import *
from NaNGFAngularizzle import *
from NaNGFNoise import *
from NaNFilter import NaNFilter
from NaNGlyphsEnvironment import glyphsEnvironment as G
from math import atan2, degrees
import math
import random
class Drip(NaNFilter):
params = {
"S": { "maxdrip": 150, "iterations": 1 },
"M": { "maxdrip": 350, "iterations": 2 },
"L": { "maxdrip": 400, "iterations": 2 }
}
def getDrippableSegments(self, outlinedata):
# find drippable segments within path and store indices
indices = []
degvariance = 40
for direction, nodes in outlinedata:
index = []
index_start, index_end = -999, -999
# collect all possible drippable segments within certain angle
for n in range(0,len(nodes)-1):
x1,y1 = nodes[n]
x2,y2 = nodes[n+1]
a = atan2(y1-y2, x1-x2)
deg = degrees(a)
if abs(deg)>(180-degvariance) and abs(deg)<(180+degvariance):
if index_start == -999:
index_start = n
else:
index_end = n
else:
if index_start != -999:
index.append( [ index_start, index_end ] )
index_start = -999
indices.append(index)
return indices
def doDrip(self, thislayer, indices, outlinedata, maxdrip):
# run through each drippable segment and do something
for p in range(0, len(outlinedata)):
direction, structure = outlinedata[p]
nodelen = len(structure)
segs = indices[p]
for seg in segs:
index_start, index_end = seg
seedx = random.randrange(0,100000)
noisescale = 0.01
steps = float(index_end - index_start)
steppos = 0
for n in range(index_start, index_end):
angle = 180/steps * steppos
t = math.sin(math.radians(angle))
adjust = 1
x,y = structure[n]
if n < index_end:
x2, y2 = structure[n+1]
searchblack = DistanceToNextBlack(thislayer, [x, y], [x2, y2], outlinedata, searchlimit=200)
#print searchblack
if searchblack is not None and searchblack < 200:
adjust = 0.2
# insert distance to next black checker here
noiz = pnoise1( (n+seedx)*noisescale, 4)
size = noiseMap( noiz, 0, maxdrip )
if direction==Direction.CLOCKWISE: size*=0.2
size = t * abs(size) * adjust
structure[n][1] = y - size
steppos+=1
def processLayer(self, thislayer, params):
for n in range(0, params["iterations"]):
pathlist = ConvertPathsToSkeleton(thislayer.paths, 4) # small seg size = quicker
outlinedata = setGlyphCoords(pathlist)
indices = self.getDrippableSegments(outlinedata)
# Modifies outlinedata
self.doDrip(thislayer, indices, outlinedata, params["maxdrip"])
# draw updated outline
for path in outlinedata:
p = convertToFitpath(path[1], True)
thislayer.paths.append(p)
G.remove_overlap(thislayer)
self.CleanOutlines(thislayer, remSmallPaths=True, remSmallSegments=True, remStrayPoints=True, remOpenPaths=True, keepshape=True)
Drip()
|
# -*- coding: utf-8 -*-
# Tools.py
from scipy import *
from scipy import signal
from scipy import fftpack
import pandas as pd
def FFT(TD,T=None):
"""
Single time domain array (TD) to FFT series and powerspectrum
"""
TD = array(TD).flatten()
Tfull = TD.shape[0]
if T==None:
nn = argmin(abs(Tfull-array([2**n for n in range(14)])))
T = 2**nn
# cencering
TD=TD[-T:]
# Detrend
TD=signal.detrend(TD)
# Windowing
TD=TD*signal.hamming(T)
# Tsukiyama [2013]
a=10.0
b=10.0
def TK(t):
if t>T/2.:
return 1.0/(1.0+b*exp((t-T)/a))
else:
return 1.0/(1.0+b*exp(-t/a))
TD=TD*map(TK,arange(T))
# Do FFT
FD = fftpack.fft(TD)
fX = fftpack.fftfreq(T) # freq list
PS = array([sqrt(c.real ** 2 + c.imag ** 2) for c in FD])/T
fXr=fX[fX>=0]
PSr=PS[fX>=0]
return {'fXr':fXr,'PSr':PSr,"FD":FD}
# Agent list operation support -----------------------------
def ID_pick(agents,ID):
return agents[[agents[i].ID for i in range(len(agents))].index(ID)]
def ID_ind(agents,ID):
return [agents[i].ID for i in range(len(agents))].index(ID)
# DataFrame ala Distibution as a cell ----------------------
def Data_agr(Data, varlst, how='mean'):
if how=='mean':
return pd.DataFrame([[nanmean(Data[varlst][v].iloc[i]) for v in varlst] for i in range(Data.shape[0])],index=Data.index,columns=varlst)
if how=='sum':
return pd.DataFrame([[nansum(Data[varlst][v].iloc[i]) for v in varlst] for i in range(Data.shape[0])],index=Data.index,columns=varlst)
# Dictionary List manuplation ------------------------------------------------------
def dict_dst(lst_of_dict,com_keys=None):
"""
return value distributions of common keys in multiple dictionaries
"""
if com_keys==None:
com_keys=lst_of_dict[0].keys()
newdict={}
for key in com_keys:
dst=[]
for Dict in lst_of_dict:
dst.append(Dict[key])
newdict[key]= dst
return newdict
def dict_sum(lst_of_dict,com_keys=None):
"""
return a summed dict of common keys in multiple dictionaries
Each element of key must be able to numerically sum up
"""
if com_keys==None:
com_keys=lst_of_dict[0].keys()
newdict={}
for key in com_keys:
val=0
for Dict in lst_of_dict:
val=val+Dict[key]
newdict[key]=val
return newdict
def dict_sumprod(lst_of_dict,weight,com_keys=None):
"""
return a sumproduct dict of common keys in multiple dictionaries
Each element of key must be able to numerically sum and prod
"""
if com_keys==None:
com_keys=lst_of_dict[0].keys()
newdict={}
Nlst = len(lst_of_dict)
for key in com_keys:
val=0
for i in range(Nlst):
Dict=lst_of_dict[i]
val=val+weight[i]*Dict[key]
newdict[key]=val
return newdict
def dict_mean(lst_of_dict,com_keys=None):
"""
return a mean dict of common keys in multiple dictionaries
Each element of key must be able to numerically mean
"""
if com_keys==None:
com_keys=lst_of_dict[0].keys()
newdict={}
Nlst = len(lst_of_dict)
for key in com_keys:
val=0
for Dict in lst_of_dict:
val=val+(1.0/Nlst)*Dict[key]
newdict[key]=val
return newdict
def dict_std(lst_of_dict,com_keys=None):
"""
return a mean dict of common keys in multiple dictionaries
Each element of key must be able to numerically mean
"""
if com_keys==None:
com_keys=lst_of_dict[0].keys()
newdict={}
Nlst = len(lst_of_dict)
for key in com_keys:
temp=[]
for Dict in lst_of_dict:
temp.append(Dict[key])
newdict[key]=std(array(temp),axis=0)
return newdict
def dict_median(lst_of_dict,com_keys=None):
"""
return a mean dict of common keys in multiple dictionaries
Each element of key must be able to numerically mean
"""
if com_keys==None:
com_keys=lst_of_dict[0].keys()
newdict={}
Nlst = len(lst_of_dict)
for key in com_keys:
temp=[]
for Dict in lst_of_dict:
temp.append(Dict[key])
newdict[key]=median(array(temp),axis=0)
return newdict |
a = int(input(""))
b = int(input(""))
c = int(input(""))
resultado = (a + b + c)/3
print(resultado)
|
import json
import os
import requests
from utils.config import Config
from utils.singleton import Singleton
GITHUB_HOST = 'https://api.github.com'
class Github(Singleton):
def __init__(self):
if hasattr(self, '_init'):
return
self._init = True
c = Config()
token = c.data('github', 'token')
self.basic_headers = {
'Authorization': 'token %s' % token,
'Content-Type': 'application/json'
}
def create_project(self, proj_full_name):
proj_name = os.path.basename(proj_full_name)
url = '%s/orgs/linuxdeepin/repos' % GITHUB_HOST
description = 'mirrored from https://cr.deepin.io/#/admin/projects/%s' % proj_full_name
d = {
"name": proj_name,
"description": description
}
r = requests.post(url, data=json.dumps(d), headers=self.basic_headers)
if not str(r.status_code).startswith('2'):
# not normal
return r.status_code, r.text
print('finish creation')
print('granting deepin-gerrit push permission')
status_code, text = self.grant_permission(proj_name)
return status_code, text
def grant_permission(self, proj_name):
replication_user = 'deepin-gerrit'
url = '%s/repos/linuxdeepin/%s/collaborators/%s' % (GITHUB_HOST, proj_name, replication_user)
permission = 'push'
d = {
"permission": permission
}
r = requests.put(url, data=json.dumps(d), headers=self.basic_headers)
return r.status_code, r.text
|
import torch
import torchvision.transforms as transforms
from newevaluate import evaluate
from itertools import chain
import numpy as np
def post_process(image):
image = image.view(-1, 3, 32, 32)
image = image.mul(0.5).add(0.5)
return image
def generate_image(image, frame, name):
image = image.cpu()
image = post_process(image)
image = transforms.ToPILImage()(vutils.make_grid(image, padding=2, normalize=False))
def reconstruction_loss(image1, image2):
nc, image_size, _ = image1.shape
image1, image2 = post_process(image1), post_process(image2)
norm = torch.norm((image2 - image1).view(-1,nc*image_size*image_size), dim=(1))
return norm.view(-1).data.cpu().numpy()
#Calculates the L2 loss between image1 and image2
def latent_reconstruction_loss(image1, image2):
norm = torch.norm((image2 - image1), dim=1)
return norm.view(-1).data.cpu().numpy()
def l1_latent_reconstruction_loss(image1, image2):
norm = torch.sum(torch.abs(image2 - image1),dim=1)
return norm.view(-1).data.cpu().numpy()
def adjust_learning_rate(optimizer, epoch, num_epochs, lrate):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lrate - lrate * (epoch-45)/(num_epochs - 45)
print('use learning rate %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def half_adjust_learning_rate(optimizer, epoch, num_epochs, lrate):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lrate - 1e-4
print('use learning rate %f' % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def anomaly_score(data, netG, netE, netD2, ngpu=1):
if (ngpu > 1):
a1 = netD2.module.feature(torch.cat((data,data),dim =1))
a2 = netD2.module.feature(torch.cat((data,netG(netE(data)).detach()),dim=1))
else:
a1 = netD2.feature(torch.cat((data,data),dim =1))
a2 = netD2.feature(torch.cat((data,netG(netE(data)).detach()),dim=1))
return l1_latent_reconstruction_loss(a1,a2)
def score_and_auc(dataLoader, netG, netE, netD2, device, ngpu=1, break_iters = 100):
score_list = []
score_label = []
count=0
with torch.no_grad():
for i,data in enumerate(dataLoader, 0):
# targets = data[1].to(device)
# print(torch.unique(targets, return_counts=True))
# if count>=break_iters:
# break
real = data[0].to(device)
score_label.append(data[1].to(device).tolist())
score_list.append(anomaly_score(real,netG, netE, netD2, ngpu))
count+=1
# import ipdb;ipdb.set_trace()
score_list = list(chain.from_iterable(score_list))
score_label = list(chain.from_iterable(score_label))
print(score_list[:5], score_list[-5:])
print(score_label[:5], score_label[-5:])
score_anom_mean = np.array(score_list).mean()
if np.sum(score_label) == 0:
print('only non anomolous samples are passed' + str(len(score_label)))
score_auc = 0
else:
score_auc = evaluate(score_label,score_list)
return score_auc, score_anom_mean |
import datetime
from openpyxl import load_workbook, Workbook
import sqlite3
from telebot import types
from misc import DB_DIR, REPORTS_DIR, CHATS_ID, OO_DIR
now = datetime.datetime.now()
current_day = now.day
current_hour = now.hour
# -----------------------ФУНКЦИИ ДЛЯ ДЕЛИШЕК----------------------------------
def print_in_console(text, date, fullname=""):
"""Функция для печати инф-ы с датой/временем в консоль"""
print(str(date))
if fullname != "":
print("{} {}\n".format(text, fullname))
else:
print("{}\n".format(text))
def auth(bot, uid):
"""Проверить живет ли конс в БД"""
db = sqlite3.connect(DB_DIR)
cursor = db.cursor()
sql = "SELECT uid FROM staff WHERE uid={}".format(uid)
cursor.execute(sql)
result = cursor.fetchall()
if len(result) == 0: # Если ответ на запрос пуст, то юзера нет в БД.
bot.send_message(
chat_id=uid,
text="Похоже, что бот тебя не узнал. Напиши команду /addmeplease")
return False
else:
return True
def is_intern(uid):
"""Это ли помогатор?"""
db = sqlite3.connect(DB_DIR)
cursor = db.cursor()
sql = "SELECT uid FROM staff WHERE department=3"
cursor.execute(sql)
interns_uid = cursor.fetchall()
for intern in interns_uid:
if uid == intern[0]:
return True
return False
def is_mentor(uid):
"""Это ли помогатор?"""
db = sqlite3.connect(DB_DIR)
cursor = db.cursor()
sql = "SELECT uid FROM mentors"
cursor.execute(sql)
mentors_uid = cursor.fetchall()
for mentor in mentors_uid:
if uid == mentor[0]:
return True
return False
def is_helper(uid):
"""Это ли помогатор?"""
db = sqlite3.connect(DB_DIR)
cursor = db.cursor()
sql = "SELECT uid FROM helpers"
cursor.execute(sql)
helpers_uid = cursor.fetchall()
for helper in helpers_uid:
if uid == helper[0]:
return True
return False
def is_super(uid):
"""Это ли супервизор?"""
db = sqlite3.connect(DB_DIR)
cursor = db.cursor()
sql = "SELECT uid FROM supers"
cursor.execute(sql)
supers_uid = cursor.fetchall()
for _super in supers_uid:
if uid == _super[0]:
return True
return False
def is_pip(uid):
"""Это ли ПиП?"""
db = sqlite3.connect(DB_DIR)
cursor = db.cursor()
sql = "SELECT uid FROM pips"
cursor.execute(sql)
pips_uid = cursor.fetchall()
for pip in pips_uid:
if uid == pip[0]:
return True
return False
def is_oo(uid):
"""Это ли из ОО?"""
db = sqlite3.connect(DB_DIR)
cursor = db.cursor()
sql = "SELECT uid FROM staff WHERE department=4"
cursor.execute(sql)
oos_uid = cursor.fetchall()
for oo in oos_uid:
if uid == oo[0]:
return True
return False
def create_keyboard(department):
kbrd = types.ReplyKeyboardMarkup(resize_keyboard=True)
ping_btn = types.KeyboardButton(text="Пинг")
dezh_btn = types.KeyboardButton(text="Дежурный")
#change_btn = types.KeyboardButton(text="Обратная связь")
etc_btn = types.KeyboardButton(text="Прочие")
oo_btn = types.KeyboardButton(text="ОО")
if department == 1:
kbrd.row(ping_btn, dezh_btn)
kbrd.row(etc_btn)#, change_btn)
return kbrd
elif department == 3:
kbrd.row(ping_btn, dezh_btn)
kbrd.row(oo_btn)
return kbrd
elif department == "cancel":
kbrd = types.InlineKeyboardMarkup()
cancel_btn = types.InlineKeyboardButton(
text="Отменить пинг.", callback_data="cancel")
kbrd.row(cancel_btn)
elif department == "dezh_cancel":
kbrd = types.InlineKeyboardMarkup()
cancel_btn = types.InlineKeyboardButton(
text="Отменить дежурного.", callback_data="dezh_cancel")
kbrd.row(cancel_btn)
return kbrd
def get_department(where, uid):
"""Найти условный отдел консультанта"""
db = sqlite3.connect(DB_DIR)
cursor = db.cursor()
sql = "SELECT department FROM {} WHERE uid={}".format(where, uid)
data = cursor.execute(sql)
return data.fetchall()[0][0]
def get_admin_chat_id(where, uid):
"""Найти id чата , в котором нужно редактировать сообщение"""
department = get_department(where, uid)
if department == 1:
return CHATS_ID[0]
elif department == 3:
return CHATS_ID[4]
def send_members_list(bot, user_id, message, sql, m_text):
"""Отправить список пользователей согласно запросу"""
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
try:
mycursor.execute(sql)
except:
text4 = "Список пуст!"
member_list = mycursor.fetchall()
e_text = []
for row in member_list:
if len(row) < 4:
uid, last_name, first_name, *etc = row
try:
r_text = "{} {} {}".format(uid, last_name, first_name)
e_text.append(r_text)
except:
pass
else:
uid, department, last_name, first_name, *etc = row
try:
r_text = "{} {} {} {}".format(uid, department, last_name, first_name)
e_text.append(r_text)
except:
pass
text4 = "\n".join(e_text)
bot.edit_message_text(
chat_id=user_id,
message_id=message,
text='{} {}'.format(m_text, text4))
def update_calls(what, department):
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
today = datetime.date.today()
sql1 = "UPDATE stats SET date='{}-{:0>2}-{:0>2}' WHERE department={}".format(
today.year, today.month, today.day, department)
mycursor.execute(sql1)
sql2 = "SELECT {} FROM stats WHERE department={}".format(what, department)
mycursor.execute(sql2)
result = mycursor.fetchall()
value = result[0][0] + 1
sql4 = "UPDATE stats SET {}={} WHERE department={}".format(what, value, department)
mycursor.execute(sql4)
mydb.commit()
def update_answers(uid, what, chat):
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
if is_mentor(uid) and chat == CHATS_ID[4]:
sql = "SELECT {} FROM mentors WHERE uid={}".format(what, uid)
mycursor.execute(sql)
result = mycursor.fetchall()
value = result[0][0] + 1
sql4 = "UPDATE mentors SET {}={} WHERE uid={}".format(what, value, uid)
mycursor.execute(sql4)
elif is_helper(uid):
sql = "SELECT {} FROM helpers WHERE uid={}".format(what, uid)
mycursor.execute(sql)
result = mycursor.fetchall()
value = result[0][0] + 1
sql4 = "UPDATE helpers SET {}={} WHERE uid={}".format(what, value, uid)
mycursor.execute(sql4)
elif is_super(uid):
sql = "SELECT {} FROM supers WHERE uid={}".format(what, uid)
mycursor.execute(sql)
result = mycursor.fetchall()
value = result[0][0] + 1
sql2 = "UPDATE supers SET {}={} WHERE uid={}".format(what, value, uid)
mycursor.execute(sql2)
mydb.commit()
mycursor.close()
mydb.close()
def update_ping_status(uid, m_id, ping_type, ping_status, is_reply='False'):
"""Обновить инфу о состоянии пинга"""
now = datetime.datetime.now()
hours = now.hour
minutes = now.minute
seconds = now.second
total_time = hours*60*60 + minutes*60 + seconds
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
if not is_reply:
sql = """UPDATE staff
SET last_ping_mid={}, ping_type='{}', ping_status={}, time_to_answer={}
WHERE uid={}""".format(m_id, ping_type, ping_status, total_time, uid)
else:
sql1 = """SELECT time_to_answer, total_time, department
FROM staff
WHERE uid={}""".format(uid)
first_time, staff_t_time, department = mycursor.execute(sql1).fetchall()[0]
sql = """SELECT total_time
FROM stats
WHERE department={}""".format(department)
stats_t_time = mycursor.execute(sql).fetchall()[0][0]
sql = """UPDATE staff
SET last_ping_mid={}, ping_type='{}', ping_status={}, time_to_answer=0, total_time={}
WHERE uid={}""".format(m_id, ping_type, ping_status, staff_t_time+(total_time-first_time), uid)
mycursor.execute(sql)
sql = """UPDATE stats
SET total_time={}
WHERE department={}""".format(stats_t_time+(total_time-first_time), department)
mycursor.execute(sql)
if is_reply == False:
value = 1
else:
value = 0
if ping_type != 'empty':
sql = "SELECT {} FROM staff WHERE uid={}".format(ping_type, uid)
res = mycursor.execute(sql)
data = res.fetchall()[0][0] + value
sql = "UPDATE staff SET {}={} WHERE uid={}".format(ping_type, data, uid)
else:
sql = "UPDATE staff SET ping_type='empty' WHERE uid={}".format(uid)
mycursor.execute(sql)
mydb.commit()
def update_dezh_status(uid, last_dezh_mid, dezh_type, dezh_status, is_reply='False'):
"""Обновить инфу о состоянии пинга"""
now = datetime.datetime.now()
hours = now.hour
minutes = now.minute
seconds = now.second
total_time = hours*60*60 + minutes*60 + seconds
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
if not is_reply:
sql = "UPDATE staff SET dezh_status={}, last_dezh_mid={}, time_to_answer_dezh={} WHERE uid={}".format(
dezh_status, last_dezh_mid, total_time, uid)
else:
sql1 = """SELECT time_to_answer_dezh, total_dezh_time, department
FROM staff
WHERE uid={}""".format(uid)
first_time, staff_t_time, department = mycursor.execute(sql1).fetchall()[0]
sql = """SELECT total_time
FROM stats
WHERE department={}""".format(department)
stats_t_time = mycursor.execute(sql).fetchall()[0][0]
sql = """UPDATE staff
SET dezh_status={}, last_dezh_mid={}, time_to_answer_dezh=0, total_dezh_time={}
WHERE uid={}""".format(dezh_status, last_dezh_mid, staff_t_time+(total_time-first_time), uid)
mycursor.execute(sql)
sql = """UPDATE stats
SET total_dezh_time={}
WHERE department={}""".format(stats_t_time+(total_time-first_time), department)
mycursor.execute(sql)
if is_reply == False:
value = 1
else:
value = 0
if dezh_type != 'empty':
sql = "SELECT {} FROM staff WHERE uid={}".format(dezh_type, uid)
mycursor.execute(sql)
data = mycursor.fetchall()[0][0]
amt = data + value
sql2 = "UPDATE staff SET {}={}, dezh_status={}, last_dezh_mid={} WHERE uid={}".format(
dezh_type, amt, dezh_status, last_dezh_mid, uid)
else:
sql2 = "UPDATE staff SET dezh_status={}, last_dezh_mid={} WHERE uid={}".format(
dezh_status, last_dezh_mid, uid)
mycursor.execute(sql2)
mydb.commit()
def mass_message(bot, text, uid):
"""Отправить сообщение всем пользователям бота"""
text = text[text.find("\"")+1 : text.rfind("\"")]
users = set()
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
sql = """SELECT uid
FROM staff"""
mycursor.execute(sql)
result = mycursor.fetchall()
for user in result:
users.add(user[0])
for uid in users:
try:
bot.send_message(
chat_id=uid,
text=text)
except:
bot.send_message(
chat_id=uid,
text="для {} не отправилось".format(uid))
bot.send_message(
chat_id=uid,
text="Все получили сообщение.")
def group_message(bot, text, uid):
"""Отправить сообщение группе пользователей"""
group = text.split(" ")[1]
message = text[text.find("\"")+1 : text.rfind("\"")]
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
users = set()
if group == "экстерн":
sql = "SELECT uid FROM staff WHERE department=1"
for user in mycursor.execute(sql).fetchall():
users.add(user[0])
try:
for user in users:
bot.send_message(
chat_id=user,
text=message)
except:
bot.send_message(
chat_id=uid,
text="Не доставлено {}".format(user))
elif group == "стажеры":
sql = "SELECT uid FROM staff WHERE department=3"
for user in mycursor.execute(sql).fetchall():
users.add(user[0])
try:
for user in users:
bot.send_message(
chat_id=user,
text=message)
except:
bot.send_message(
chat_id=uid,
text="Не доставлено {}".format(user))
elif group == "помогаторы":
sql = "SELECT uid FROM helpers"
for user in mycursor.execute(sql).fetchall():
users.add(user[0])
try:
for user in users:
bot.send_message(
chat_id=user,
text=message)
except:
bot.send_message(
chat_id=uid,
text="Не доставлено {}".format(user))
elif group == "пипы":
sql = "SELECT uid FROM pips"
for user in mycursor.execute(sql).fetchall():
users.add(user[0])
try:
for user in users:
bot.send_message(
chat_id=user,
text=message)
except:
bot.send_message(
chat_id=uid,
text="Не доставлено {}".format(user))
elif group == "суперыкэ":
sql = "SELECT uid FROM supers WHERE department=1"
for user in mycursor.execute(sql).fetchall():
users.add(user[0])
try:
for user in users:
bot.send_message(
chat_id=user,
text=message)
except:
bot.send_message(
chat_id=uid,
text="Не доставлено {}".format(user))
bot.send_message(
chat_id=uid,
text="Все получили сообщение.")
def private_message(bot, text, uid):
"""Отправить сообщение пользователю бота"""
try:
uid = text.split(" ")[1]
msg = text[text.find("\"")+1:-1]
bot.send_message(
chat_id=uid,
text=msg)
except:
print("OOOOOOOOOOOOOOOOOOOOOPS!!!")
def status(bot, adress, uid):
"""Поменять адрес пребывания"""
mydb = sqlite3.connect(DB_DIR)
cursor = mydb.cursor()
# Супервизорский
if adress.startswith("Установить супервизорский адрес"):
adress = adress[len('Установить супервизорский адрес '):]
sql = """UPDATE supers
SET adress='{}'
WHERE uid={}""".format(adress, uid)
# Помогаторский
elif adress.startswith("Установить помогаторский адрес"):
adress = adress[len('Установить помогаторский адрес '):]
sql = """UPDATE helpers
SET adress='{}'
WHERE uid={}""".format(adress, uid)
# Консультантский
elif adress.startswith("Установить адрес"):
adress = adress[len('Установить адрес '):]
sql = """UPDATE staff
SET adress='{}'
WHERE uid={}""".format(adress, uid)
# Установить имя
elif adress.startswith("Установить имя"):
adress = adress[len('Установить имя '):]
sql = """UPDATE staff
SET first_name='{}'
WHERE uid={}""".format(adress, uid)
# Установить фамилию
elif adress.startswith("Установить фамилия"):
adress = adress[len('Установить фамилия '):]
sql = """UPDATE staff
SET last_name='{}'
WHERE uid={}""".format(adress, uid)
cursor.execute(sql)
mydb.commit()
bot.send_message(
chat_id=uid,
text="Адрес изменен на {}. Теперь напиши боту /start.".format(adress))
cursor.close()
def oo_info(bot, message):
department = get_department('staff', message.from_user.id)
if message.text in ["ОО", "Меню ОО"]:
oo_kbrd = types.ReplyKeyboardMarkup(resize_keyboard=True)
reg_btn = types.KeyboardButton(text="Регламенты")
staff_btn = types.KeyboardButton(text="Сотрудники ОО")
wtf_btn = types.KeyboardButton(text="Что делать, если...")
cont_btn = types.KeyboardButton(text="Контакты")
back_btn = types.KeyboardButton(text="Главное меню")
oo_kbrd.row(reg_btn, staff_btn)
oo_kbrd.row(wtf_btn, cont_btn)
oo_kbrd.row(back_btn)
bot.send_message(
chat_id=message.from_user.id,
text="Отдел Обучения",
reply_markup=oo_kbrd)
elif message.text == "Главное меню":
bot.send_message(
chat_id=message.from_user.id,
text="Главное меню.",
reply_markup=create_keyboard(department))
elif message.text == "Регламенты":
reg_kbrd = types.ReplyKeyboardMarkup(resize_keyboard=True)
codes_btn = types.KeyboardButton(text="Коды перерывов для Yealink")
vw_btn = types.KeyboardButton(text="Very Want и Want")
codes2_btn = types.KeyboardButton(text="Для чего какой код")
money_btn = types.KeyboardButton(text="Показатели премии")
money2_btn = types.KeyboardButton(text="Дни зарплаты")
back_btn = types.KeyboardButton(text="Главное меню")
oo_menu_btn = types.KeyboardButton(text="Меню ОО")
reg_kbrd.row(codes_btn, vw_btn)
reg_kbrd.row(codes2_btn, money_btn)
reg_kbrd.row(money2_btn)
reg_kbrd.row(back_btn, oo_menu_btn)
bot.send_message(
chat_id=message.from_user.id,
text="Регламенты.",
reply_markup=reg_kbrd)
elif message.text == "Коды перерывов для Yealink":
try:
with open(OO_DIR+"yealink.jpg", 'rb') as file:
bot.send_document(message.from_user.id, file)
except:
pass
try:
with open(OO_DIR+"yealink.pdf", 'rb') as file:
bot.send_document(message.from_user.id, file)
except:
pass
elif message.text == "Very Want и Want":
bot.send_message(
chat_id=message.from_user.id,
text="Кнопка «WANT» для перерыва до 60 мин."\
"\nНе стоит тратить его за раз или задерживаться — твои коллеги с тобой в общей очереди."\
"\n\nКнопка «VERY WANT» для перерыва на 10 мин."\
"\n\nПосле того, как ты нажал «GO» на callider, поменяй тип перерыва на телефоне на «Личный».")
elif message.text == "Для чего какой код":
bot.send_message(
chat_id=message.from_user.id,
text="Другое поручение — поручения наставников и супервизоров."\
"\n\nПроверка знаний — повышение квалификации."\
"\n\nБеседа с супервизором — ОС по качеству твоей работы."\
"\n\nПочта — только для писем пользователей."\
"\n\nЧаты — только для чатов."\
"\n\nАльтработа — чтобы анализировать обращения от пользователей, собирать статистику."\
"\n\nЛичный — для перерывов на обед или отдыха."\
"\n\nУдаленный доступ — для подключения к ПК пользователя."\
"\n\nОбучение — лекции, тренинги и самопрослушивание."\
"\n\nИсходящий вызов — чтобы позвонить пользователю."\
"\n\nСложный инцидент — чтобы заполнить инцидент для экспертов. "\
"В этом коде нужно указывать номер инцидента."\
"\n\nБлиц-опрос — тесты на знание материала и регламентов."\
"\n\nТайм-аут с клиентом — чтобы подготовить ответ для клиента, который не хочет ожидать на линии."\
"В этом коде нужно указывать номер инцидента.")
elif message.text == "Показатели премии":
bot.send_message(
chat_id=message.from_user.id,
text="После перехода на трудовой договор ты можешь получать премию в 20% каждый месяц."\
"\n\nПремия зависит от:"\
"\n1. Качества консультаций."\
"\n2. Процента рассказанных уведомлений и зарегистрированных обращений."\
"\n3. Целевая занятость."\
"\n\nПодробнее о расчёте премии читай на "\
"wiki https://wiki.skbkontur.ru/pages/viewpage.action?pageId=307512810")
elif message.text == "Дни зарплаты":
bot.send_message(
chat_id=message.from_user.id,
text="В день заключения трудового договора тебе придет стипендия."\
"\n\nДень зарплаты — 10 число каждого месяца. Аванс— 25 число.")
elif message.text == "Сотрудники ОО":
staff_kbrd = types.ReplyKeyboardMarkup(resize_keyboard=True)
head_btn = types.KeyboardButton(text="Руководители")
#ekb_btn = types.KeyboardButton(text="Екатеринбург")
#vlg_btn = types.KeyboardButton(text="Волгоград")
#novosib_btn = types.KeyboardButton(text="Новосибирск")
vor_btn = types.KeyboardButton(text="Воронеж")
trainers_btn = types.KeyboardButton(text="Тренеры")
back_btn = types.KeyboardButton(text="Главное меню")
oo_menu_btn = types.KeyboardButton(text="Меню ОО")
staff_kbrd.row(head_btn)
staff_kbrd.row(ekb_btn, vlg_btn)
staff_kbrd.row(novosib_btn, vor_btn)
staff_kbrd.row(trainers_btn)
staff_kbrd.row(back_btn, oo_menu_btn)
bot.send_message(
chat_id=message.from_user.id,
text="Сотрудники ОО.",
reply_markup=staff_kbrd)
elif message.text == "Руководители":
bot.send_message(
chat_id=message.from_user.id,
text="Третьякова Екатерина — руководитель отдела обучения."\
"\n@trekaterina, +7 909 018-25-21.")
#elif message.text == "Екатеринбург":
# bot.send_message(
# chat_id=message.from_user.id,
# text="❇️ Алексеев Алексей @lexxxekb"\
# "\nрассказывает как работать с телефоном, уведомлениями, знаёт всё про сервисы УТО и сертификаты."\
# "\n\n❇️ Баталина Ирина @irinabatalina"\
# "\nзнает всё про сертификаты, переводит лекции в онлайн."\
# "\n\n❇️ Брюханов Никита @nikitabrukhanov"\
# "\nчитает лекции по сервисам АУБ и стилю текстов."\
# "\n\n❇️ Вязовиков Константин @vyazovikov"\
# "\nсоставляет план работы ОО, читает лекции для ОПВС."\
# "\n\n❇️ Григорьев Иван @Pyatackovski"\
# "\nзнает всё про сертификаты, читает лекции по сервисам ЭП."\
# "\n\n❇️ Иванова Ольга @ivanovaom"\
# "\nзнает всё про Маркет, читает лекции по сервисам УТО. "\
# "Специалист по написанию статей, инструкций и просто красивых текстов!"\
# "\n\n❇️ Криницына Анна @anna_krinitsyna"\
# "\nзанимается планами на каждый день, знает всё про Экстерн."\
# "\n\n❇️ Кудряшов Михаил @Greenz42"\
# "\nчитает лекции для АРТ, рассказывает о кассах, сертификатах и настройке ПК."\
# "\n\n❇️ Неркарарян Армен @ArmenNerkararyan"\
# "\nчитает лекции по КЭ и настройке рабочего места, знает всё про внутренние ресурсы."\
# "\n\n❇️ Протасенко Кристина @protasenko_kristina"\
# "\nчитает лекции по сервисам ЭДО."\
# "\n\n❇️ Русинова Александра @allergique"\
# "\nчитает лекции по внутренним ресурсам, знает всё про НДС и требования."\
# "\n\n❇️ Рязанов Михаил @Mishuk"\
# "\nчитает лекции по сервисам АУБ."\
# "\n\n❇️ Савченко Алёна @savchenko_alena"\
# "\nв декрете.")
#elif message.text == "Волгоград":
# bot.send_message(
# chat_id=message.from_user.id,
# text="❇️ Дьяконова Валерия @Lera_Dyakonova"\
# "\nчитает лекции по сервисам ЭДО и ОПВС, знает всё об информационной безопасности."\
# "\n\n❇️ Погодина Марина @m_bagira"\
# "\nчитает лекции по сервисам ОПВС.")
#elif message.text == "Новосибирск":
# bot.send_message(
# chat_id=message.from_user.id,
# text="❇️ Хагай Ксения @Kusyuma1"\
# "\nобучает стажёров сервисам УТО, помогает вести тренинги и отвечает на вопросы пользователей."\
# "\n\n❇️ Кучер Артем @flartman"\
# "\nстажируется в отделе обучения, помогает пользователям Экстерна и Фокуса.")
elif message.text == "Воронеж":
bot.send_message(
chat_id=message.from_user.id,
text="❇️ Новикова Марина @m_malkova"\
"\nчитает лекции по сервисам ОПВС."\
"\n\n❇️ Зотьев Роман @MarcusAvrelius"\
"\nчитает лекции по сервисам ОПВС.")
elif message.text == "Тренеры":
bot.send_message(
chat_id=message.from_user.id,
text="❇️ Грефенштейн Петр @PeterGref"\
"\n\n❇️ Сергеева Дарья @Dasha_Sergeeva"
"\n\n❇️ Черных Людмила @lyukache")
elif message.text == "Что делать, если...":
wtf_kbrd = types.ReplyKeyboardMarkup(resize_keyboard=True)
sick_btn = types.KeyboardButton(text="Я заболел")
late_btn = types.KeyboardButton(text="Я опаздываю")
otgul_btn = types.KeyboardButton(text="Нужен отгул")
# hungry_btn = types.KeyboardButton(text="Проголодался")
blood_btn = types.KeyboardButton(text="Я сдаю кровь")
army_btn = types.KeyboardButton(text="Пришла повестка")
insurance_btn = types.KeyboardButton(text="Нужна медицинская страховка")
out_btn = types.KeyboardButton(text="Надо отпроситься")
back_btn = types.KeyboardButton(text="Главное меню")
oo_menu_btn = types.KeyboardButton(text="Меню ОО")
wtf_kbrd.row(sick_btn, late_btn)
wtf_kbrd.row(otgul_btn, hungry_btn)
wtf_kbrd.row(blood_btn, army_btn)
wtf_kbrd.row(insurance_btn, out_btn)
wtf_kbrd.row(back_btn, oo_menu_btn)
bot.send_message(
chat_id=message.from_user.id,
text="Сотрудники ОО.",
reply_markup=wtf_kbrd)
elif message.text == "Я заболел":
bot.send_message(
chat_id=message.from_user.id,
text="1. Напиши @m_malkova или наставнику, что болеешь. "\
"\n2. Посети врача и оформи больничный. "\
"Сообщи нам день начала больничного и день повторного приема: "\
"«Дали больничный 1.02, повторный приём 4.02. Иванов Иван»."\
"\n3. Сходи на повторный приём и напиши нам, когда больничный закроют. "\
"Если больничный продлили, напиши день повторного приёма.")
elif message.text == "Я опаздываю":
bot.send_message(
chat_id=message.from_user.id,
text="Напиши @m_malkova или или наставнику, что опаздываешь. "\
"В сообщении должны быть:"\
"\n\n— ФИО"\
"\n— дата набора"\
"\n— причина опоздания"\
"\n— время, когда появишься на работе.")
elif message.text == "Нужен отгул":
bot.send_message(
chat_id=message.from_user.id,
text="Если с тобой ещё не заключили ТД, напиши @m_malkova."\
"\n\nЕсли с тобой заключили ТД, то:"\
"\n1. Договорись с наставником о днях, когда тебя не будет."\
"\n2. Отправь заявление на отпуск."\
"\n\nЧтобы написать заявление зайди на старый callider в раздел «График» "\
"→ «Заявления на отпуск». Выбери «Отпуск без сохранения заработной платы»."\
"\n\nВ поле для причины укажи «семейные обстоятельства» даже если реальная "\
"причина не такая."\
"\n\nЕсли отпуск нуже на 1 день, то в периоде укажи только этот день: "\
"если тебя не будет 8 июля, то период с 08.07.2019 по 08.07.2019."\
"\n\nЕсли заявление не получилось написать в день отпуска, то напиши его на "\
"следующий день в отделе персонала в 616.")
#elif message.text == "Проголодался":
# bot.send_message(
# chat_id=message.from_user.id,
# text="Екатеринбург"\
# "\nПосле перехода на трудовой договор ты можешь заказывать обеды в офис на свой этаж. "\
# "Чтобы заказать обед, перейди на callider → «Еще» → «Обеды»."\
# "\n\nНа Радищева заказывать можно из Offline и Caterinburg."\
# "\n\nЗаказ на завтра надо сделать сегодня до 17:00. "\
# "Т. е., чтобы обед пришёл в понедельник, заказ надо сделать в воскресенье до 17:00."\
# "\n\nЕсли ты работаешь в Контур НТТ, то пока заказывать обеды нельзя, "\
# "но скоро такая возможность появится.")
# bot.send_message(
# chat_id=message.from_user.id,
# text="Новосибирск"\
# "\nПосле перехода на трудовой договор ты можешь заказывать обеды в офис."\
# "\n\nЧтобы заказать обед, перейди на obed.ru, авторизуйся. "\
# "Открой «Обеды» → «21 век». Заказ на завтра надо сделать в рабочий день до 14.00. "\
# "Т. е. чтобы обед пришел в понедельник, заказ надо сделать в пятницу до 14.00."\
# "\n\nРадом с офисом есть кафе «Оливка». Полный список кафе, подключенных к системе "\
# "Obed.ru можно посмотреть на сайте по кнопке «По карте Obed.ru» в правом верхнем углу.")
elif message.text == "Я сдаю кровь":
bot.send_message(
chat_id=message.from_user.id,
text="Напиши @m_malkova, что собираешься сдать кровь. "\
"Если ты работаешь на ТД, то пиши наставнику."\
"\nЕсли ты выйдешь на работу в день, когда сдаёшь кровь, передай в отдел персонала "\
"фотографию или оригинал справки о сдаче крови в этот же день до 13:00. "\
"Работник отдела пресонала оформит приказ о допуске к работе в день сдачи крови и "\
"её компонентов."\
"\n\nЕсли в день сдачи крови ты не выйдешь на работу, напиши заявление об освобождении "\
"от работы в отделе персонала. Сделай этого заранее или утром в день, когда сдаёшь кровь."\
"\n\nЕсли ты сдаёшь кровь в выходной день или во время отпуска, "\
"то никакие документы оформлять не надо."\
"\n\nПосле ты можешь взять два дополнительных дня оплачиваемого отпуска. "\
"Для этого зайди на старый callider в раздел «График» → «Заявления на отпуск». "\
"Выбери «Дополнительный день отдыха за сдачу крови и ее компонентов».")
elif message.text == "Пришла повестка":
bot.send_message(
chat_id=message.from_user.id,
text="1. Напиши @m_malkova или наставнику, что тебе надо быть в военкомате. "\
"\n2. Принеси в отдел персонала оригинал повестки. Копия не подойдёт."\
"\n3. Там же напиши заявление об отсутствии на рабочем месте."\
"\n\nВсё это надо сделать до дня визита в военкомат.")
elif message.text == "Нужна медицинская страховка":
bot.send_message(
chat_id=message.from_user.id,
text="К программе ДМС можно подключиться бесплатно, если твой стаж работы в компании 1 год. "\
"В ином случае подключиться можно за свой счет.")
elif message.text == "Надо отпроситься":
bot.send_message(
chat_id=message.from_user.id,
text="Напиши @m_malkova или наставнику, что тебе надо уйти раньше. "\
"\n\nЕсли ты работаешь на ТД, напиши наставнику.")
elif message.text == "Контакты":
bot.send_message(
chat_id=message.from_user.id,
text="Пиши нам на OO_owner@skbkontur.ru"\
"\n\nПиши коллегам в Telegram или СМС. Звони, если вопрос срочный. "\
"Наши контакты по кнопке «Сотрудники ОО»."\
"\n\nЕсли есть идеи для бота, пиши @fancyAndBeauty.")
def make_statistic(department):
"""Состаление отчета по пингам за сутки"""
if department == 1:
# путь до папки с отчетами
PATH = REPORTS_DIR+"КЭ_{}_{}.xlsx".format(datetime.datetime.now().year, datetime.datetime.now().month)
# перечисление нужных столбцов для отчета
needed_losts = True
needed_kcrs = True
needed_exps = True
needed_visions = True
needed_pzs = True
needed_miss = True
needed_study = True
# elif department == 2:
# PATH = REPORTS_DIR+"ФМС_{}_{}.xlsx".format(datetime.datetime.now().year, datetime.datetime.now().month)
# needed_losts = False
# needed_kcrs = False
# needed_exps = False
# needed_visions = True
# needed_pzs = True
# needed_miss = False
# needed_study = False
elif department == 3:
PATH = REPORTS_DIR+"СТ_{}_{}.xlsx".format(datetime.datetime.now().year, datetime.datetime.now().month)
needed_losts = False
needed_kcrs = False
needed_exps = False
needed_visions = False
needed_pzs = False
needed_miss = False
needed_study = False
# elif department == 5:
# PATH = REPORTS_DIR+"КБ_{}_{}.xlsx".format(datetime.datetime.now().year, datetime.datetime.now().month)
# needed_losts = False
# needed_kcrs = False
# needed_exps = False
# needed_visions = False
# needed_pzs = False
# needed_miss = False
# needed_study = False
# elif department == 6:
# PATH = REPORTS_DIR+"ЭЛЬБА_{}_{}.xlsx".format(datetime.datetime.now().year, datetime.datetime.now().month)
# needed_losts = False
# needed_kcrs = False
# needed_exps = False
# needed_visions = False
# needed_pzs = False
# needed_miss = False
# needed_study = False
try:
wb = load_workbook(PATH)
except:
wb = Workbook()
last_ws = wb[wb.sheetnames[-1]]
last_date = last_ws.title
today = str(datetime.date.today())
if today == last_date:
target = wb[wb.sheetnames[-1]]
wb.remove(target)
ws = wb.create_sheet(today)
else:
ws = wb.create_sheet(today)
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
sql = """SELECT pings, knowledges, checks, dezh, miss, study, break_time, reboot, op_time, losts, exps, kcrs, visions, pzs, pings_canceled, dezh_canceled, date, total_time, total_dezh_time, hards
FROM stats
WHERE department={}""".format(department)
res = mycursor.execute(sql)
data = res.fetchall()
data = data[0]
# sql1 = """SELECT SUM(pings), SUM(knowledges), SUM(checks), SUM(dezh), SUM(losts), SUM(kcrs), SUM(exps)
# FROM stats
# WHERE department={}""".format(department)
# res = mycursor.execute(sql1)
# total = 0
# for qty in res.fetchall()[0]:
# total += qty
# s_sql = """SELECT SUM(dezh), SUM(study), SUM(break_time), SUM(reboot), SUM(op_time), SUM(miss)
# FROM staff
# WHERE department={}""".format(department)
# res = mycursor.execute(s_sql)
# total_dezh = 0
# for qty in res.fetchall()[0]:
# total_dezh += qty
pings, knowledges, checks, dezh, miss, study, break_time, reboot, op_time, losts, exps, kcrs, visions, pzs, pings_canceled, dezh_canceled, date, total_time, total_dezh_time, hards = data
for col in ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T"]:
ws.column_dimensions[col].width = 15
ws.append(["Всего вызовов", pings+knowledges+checks+dezh+miss+study+losts+kcrs+exps+visions+pzs+break_time+op_time+reboot])
if pings > 0:
ws.append(["Всего пингов", pings])
if knowledges > 0:
ws.append(["Всего привязок знаний", knowledges])
if hards > 0:
ws.append(["Всего сложных", hards])
if checks > 0:
ws.append(["Всего проверок", checks])
if losts > 0 and needed_losts:
ws.append(["Всего потеряшек", losts])
if kcrs > 0 and needed_kcrs:
ws.append(["Всего КЦР", kcrs])
if exps > 0 and needed_exps:
ws.append(["Всего эксперт", exps])
if visions > 0 and needed_visions:
ws.append(["Всего визий", visions])
if pzs > 0 and needed_pzs:
ws.append(["Всего проверок знаний", pzs])
if dezh > 0:
ws.append(["Всего дежурок", dezh])
if break_time > 0:
ws.append(["Всего пропусков обеда", break_time])
if miss > 0 and needed_miss:
ws.append(["Всего пропусков фикс.перерыва", miss])
if study > 0 and needed_study:
ws.append(["Всего обучениё/Прослушок", study])
if reboot > 0:
ws.append(["Всего перезагрузок", reboot])
if op_time > 0:
ws.append(["Всего ОП", op_time])
ws.append(["Всего отмен", pings_canceled+dezh_canceled])
if pings_canceled > 0:
ws.append(["Всего пингов отменено", pings_canceled])
if dezh_canceled > 0:
ws.append(["Всего дежурок отменено", dezh_canceled])
# if total == 0:
# ws.append(["Ожидание ответа пинга", "Неизвестно"])
# else:
# ws.append(["Ожидание ответа пинга", "{}:{}".format((total_time/total)//60, (total_time/total)%60)])
# if total_dezh == 0:
# ws.append(["Ожидание ответа дежурки", "Неизвестно"])
# else:
# ws.append(["Ожидание ответа дежурки", "{}:{}".format((total_dezh_time/total_dezh)//60, (total_dezh_time/total_dezh)%60)])
if department != 3:
ws.append(["--------------"])
ws.append(["СУПЕРВИЗОРСКАЯ"])
ws.append(["--------------"])
sql = """SELECT last_name, first_name, pings, knowledges, f_dezh, h_dezh, study_yes, study_no, break_time, reboot_yes, reboot_no, op_time, losts, kcrs, visions, pzs, miss
FROM supers
WHERE department={}
ORDER BY last_name ASC""".format(department)
mycursor.execute(sql)
su_list = mycursor.fetchall()
if department == 1:
ws.append(["Фамилия","Имя","Пинги","Знания","Гл.деж","Вр.деж","Обуч./Прос(ДА)","Обуч./Прос(НЕТ)","Перерыв","Перезагрузка(ДА)","Перезагрузка(НЕТ)","ОП","Потеряхи","КЦР","Визия","Проверка знаний","Фикс.перерыв","Всего"])
# elif department == 2:
# ws.append(["Фамилия","Имя","Пинги","Знания","Гл.деж","Вр.деж","Визия","Проверка знаний","Всего"])
# elif department in [5,6]:
# ws.append(["Фамилия","Имя","Пинги","Гл.деж","Вр.деж","Всего"])
for row in su_list:
try:
last_name, first_name, pings, knowledges, f_dezh, h_dezh, study_yes, study_no, break_time, reboot_yes, reboot_no, op_time, losts, kcrs, visions, pzs, miss = row
summary = pings+knowledges+f_dezh+h_dezh+study_yes+study_no+break_time+reboot_yes+reboot_no+op_time+losts+kcrs+visions+pzs+miss
if department == 1:
ws.append([last_name, first_name, pings, knowledges, f_dezh, h_dezh, study_yes, study_no, break_time, reboot_yes, reboot_no, op_time, losts, kcrs, visions, pzs, miss, summary])
# elif department == 2:
# ws.append([last_name, first_name, pings, f_dezh, h_dezh, visions, pzs, summary])
# elif department in [5,6]:
# ws.append([last_name, first_name, pings, f_dezh, h_dezh, summary])
except:
pass
su_sql = """SELECT SUM(pings), SUM(knowledges), SUM(f_dezh), SUM(h_dezh), SUM(study_yes), SUM(study_no), SUM(break_time), SUM(reboot_yes), SUM(reboot_no), SUM(op_time), SUM(losts), SUM(kcrs), SUM(visions), SUM(pzs), SUM(miss)
FROM supers
WHERE department={}""".format(department)
mycursor.execute(su_sql)
su_data = mycursor.fetchall()[0]
t_pings, t_knowledges, t_f_dezh, t_h_dezh, t_study_yes, t_study_no, t_break_time, t_reboot_yes, t_reboot_no, t_op_time, t_losts, t_kcrs, t_visions, t_pzs, t_miss = su_data
t_total = t_pings+t_knowledges+t_f_dezh+t_h_dezh+t_study_yes+t_study_no+t_break_time+t_reboot_yes+t_reboot_no+t_op_time+t_losts+t_kcrs+t_visions+t_pzs+t_miss
if department == 1:
ws.append(["----","ИТОГО", t_pings, t_knowledges, t_f_dezh, t_h_dezh, t_study_yes, t_study_no, t_break_time, t_reboot_yes, t_reboot_no, t_op_time, t_losts, t_kcrs, t_visions, t_pzs, t_miss, t_total])
# elif department == 2:
# ws.append(["----","ИТОГО", t_pings, t_knowledges, t_f_dezh, t_h_dezh, t_visions, t_pzs, t_total])
# elif department in [5,6]:
# ws.append(["----","ИТОГО", t_pings, t_f_dezh, t_h_dezh, t_total])
# if department != 3:
# ws.append(["--------------"])
# ws.append(["ПОМОГАТОРСКАЯ"])
# ws.append(["--------------"])
# sql = """SELECT last_name, first_name, pings, knowledges, checks, f_dezh, h_dezh, study_yes, study_no, break_time, reboot_yes, reboot_no, op_time, visions, pzs, hards_yes, hards_no
# FROM helpers
# WHERE department={}
# ORDER BY last_name ASC""".format(department)
# mycursor.execute(sql)
# he_list = mycursor.fetchall()
# if department == 1:
# ws.append(["Фамилия","Имя","Пинги","Знания","Гл.деж","Вр.деж","Обуч./Прос(ДА)","Обуч./Прос(НЕТ)","Перерыв","Перезагрузка(ДА)","Перезагрузка(НЕТ)","ОП","Визия","Проверка знаний","Всего"])
# # elif department == 2:
# # ws.append(["Фамилия","Имя","Пинги","Знания","Гл.деж","Вр.деж","Визия","Проверка знаний","Всего"])
# # elif department in [5,6]:
# # ws.append(["Фамилия","Имя","Пинги","Гл.деж","Вр.деж","Всего"])
# for row in he_list:
# try:
# last_name, first_name, pings, knowledges, checks, f_dezh, h_dezh, study_yes, study_no, break_time, reboot_yes, reboot_no, op_time, visions, pzs, hards_yes, hards_no = row
# summary = pings+knowledges+checks+f_dezh+h_dezh+study_yes+study_no+break_time+reboot_yes+reboot_no+op_time+visions+pzs+hards_yes+hards_no
# if summary != 0:
# if department == 1:
# ws.append([last_name, first_name, pings, knowledges, f_dezh, h_dezh, study_yes, study_no, break_time, reboot_yes, reboot_no, op_time, visions, pzs, summary])
# # elif department == 2:
# # ws.append([last_name, first_name, pings, knowledges, f_dezh, h_dezh, visions, pzs, summary])
# # elif department in [5,6]:
# # ws.append([last_name, first_name, pings, f_dezh, h_dezh, summary])
# except:
# pass
# h_sql = """SELECT SUM(pings), SUM(knowledges), SUM(checks), SUM(f_dezh), SUM(h_dezh), SUM(study_yes), SUM(study_no), SUM(break_time), SUM(reboot_yes), SUM(reboot_no), SUM(op_time), SUM(visions), SUM(pzs), sum(hards_yes), sum(hards_no)
# FROM helpers
# WHERE department={}""".format(department)
# mycursor.execute(h_sql)
# h_data = mycursor.fetchall()[0]
# t_pings, t_knowledges, t_checks, t_f_dezh, t_h_dezh, t_study_yes, t_study_no, t_break_time, t_reboot_yes, t_reboot_no, t_op_time, t_visions, t_pzs, t_hards_yes, t_hards_no = h_data
# t_total = t_pings+t_knowledges+t_checks+t_f_dezh+t_h_dezh+t_study_yes+t_study_no+t_break_time+t_reboot_yes+t_reboot_no+t_op_time+t_visions+t_pzs+t_hards_yes+t_hards_no
# if department == 1:
# ws.append(["----","ИТОГО",t_pings, t_knowledges, t_f_dezh, t_h_dezh, t_study_yes, t_study_no, t_break_time, t_reboot_yes, t_reboot_no, t_op_time, t_visions, t_pzs, t_total])
# # elif department == 2:
# # ws.append(["----","ИТОГО",t_pings, t_knowledges, t_f_dezh, t_h_dezh, t_visions, t_pzs, t_total])
# # elif department in [5,6]:
# # ws.append(["----","ИТОГО",t_pings, t_f_dezh, t_h_dezh, t_total])
else:
ws.append(["--------------"])
ws.append(["НАСТАВНИЧЕСКАЯ"])
ws.append(["--------------"])
sql = """SELECT last_name, first_name, pings, knowledges, checks, hards_yes, hards_no, f_dezh, break_time, reboot_yes, reboot_no, op_time, visions, pzs
FROM mentors
WHERE department={}
ORDER BY last_name ASC""".format(department)
mycursor.execute(sql)
he_list = mycursor.fetchall()
ws.append(["Фамилия","Имя","Пинги","Знания","Проверки","Сложный(ДА)","Сложный(НЕТ)","Дежурный","Перерыв","Перезагрузка(ДА)","Перезагрузка(НЕТ)","ОП","Визия","Всего"])
for row in he_list:
try:
last_name, first_name, pings, knowledges, checks, hards_yes, hards_no, f_dezh, break_time, reboot_yes, reboot_no, op_time, visions = row
summary = pings+knowledges+checks+f_dezh+break_time+reboot_yes+reboot_no+op_time+hards_yes+hards_no+visions
if summary != 0:
ws.append([last_name, first_name, pings, knowledges, checks, hards_yes, hards_no, f_dezh, break_time, reboot_yes, reboot_no, op_time, visions, summary])
except:
pass
h_sql = """SELECT SUM(pings), SUM(knowledges), SUM(checks), sum(hards_yes), sum(hards_no), SUM(f_dezh), SUM(break_time), SUM(reboot_yes), SUM(reboot_no), SUM(op_time), sum(visions)
FROM mentors
WHERE department={}""".format(department)
mycursor.execute(h_sql)
h_data = mycursor.fetchall()[0]
t_pings, t_knowledges, t_checks, t_hards_yes, t_hards_no, t_f_dezh, t_break_time, t_reboot_yes, t_reboot_no, t_op_time, t_visions = h_data
t_total = t_pings+t_knowledges+t_checks+t_f_dezh+t_break_time+t_reboot_yes+t_reboot_no+t_op_time+t_hards_yes+t_hards_no+t_visions
ws.append(["----","ИТОГО",t_pings, t_knowledges, t_checks, t_hards_yes, t_hards_no, t_f_dezh, t_break_time, t_reboot_yes, t_reboot_no, t_op_time, t_visions, t_total])
ws.append(["--------------"])
ws.append(["РАБОТЯГИ"])
ws.append(["--------------"])
sql = """SELECT adress, last_name, first_name, pings, knowledges, hards, checks, dezh, study, break_time, reboot, op_time, losts, kcrs, exps, miss, pings_canceled, dezh_canceled
FROM staff
WHERE department={}
ORDER BY adress, last_name""".format(department)
mycursor.execute(sql)
st_list = mycursor.fetchall()
if department == 1:
ws.append(["Фамилия","Имя","Пинги","Знания","Пинг отмен.","Дежурка","Обуч./Прос.","Перерыв","Перезагрузка","ОП","Дежурка отмен.","Потеряхи","КЦР","Эксперты","Фикс.перерыв","Всего","Всего отмен."])
# elif department == 2:
# ws.append(["Фамилия","Имя","Пинги","Знания","Пинг отмен.","Дежурка","Перерыв","Перезагрузка","ОП","Дежурка отмен.","Эксперты","Всего","Всего отмен."])
elif department == 3:
ws.append(["Набор","Фамилия","Имя","Пинги","Знания","Сложный","Проверки","Пинг отмен.","Дежурка","Перерыв","Перезагрузка","ОП","Дежурка отмен.","Всего","Всего отмен."])
# elif department in [5,6]:
# ws.append(["Фамилия","Имя","Пинги","Пинг отмен.","Дежурка","Дежурка отмен.","Всего","Всего отмен."])
for row in st_list:
try:
adress, last_name, first_name, pings, knowledges, hards, checks, dezh, study, break_time, reboot, op_time, losts, kcrs, exps, miss, pings_canceled, dezh_canceled = row
summary = pings+knowledges+checks+dezh+study+break_time+reboot+op_time+losts+kcrs+exps+miss+hards
canceled_summary = pings_canceled + dezh_canceled
if summary != 0:
if department == 1:
ws.append([last_name, first_name, pings, knowledges, pings_canceled, dezh, study, break_time, reboot, op_time, dezh_canceled, losts, kcrs, exps, miss, summary, canceled_summary])
# elif department == 2:
# ws.append([last_name, first_name, pings, knowledges, pings_canceled, dezh, break_time, reboot, op_time, dezh_canceled, exps, summary, canceled_summary])
elif department == 3:
ws.append([adress, last_name, first_name, pings, knowledges, hards, checks, pings_canceled, dezh, break_time, reboot, op_time, dezh_canceled, summary, canceled_summary])
# elif department in [5,6]:
# ws.append([last_name, first_name, pings, pings_canceled, dezh, dezh_canceled, summary, canceled_summary])
except:
pass
s_sql = """SELECT SUM(pings), SUM(knowledges), sum(hards), SUM(checks), SUM(dezh), SUM(study), SUM(break_time), SUM(reboot), SUM(op_time), SUM(losts), SUM(kcrs), SUM(exps), SUM(miss), SUM(pings_canceled), SUM(dezh_canceled)
FROM staff
WHERE department={}""".format(department)
mycursor.execute(s_sql)
s_data = mycursor.fetchall()[0]
t_pings, t_knowledges, t_hards, t_checks, t_dezh, t_study, t_break_time, t_reboot, t_op_time, t_losts, t_kcrs, t_exps, t_miss, t_canceled_dezh, t_canceled_pings = s_data
t_total = t_pings+t_knowledges+t_hards+t_checks+t_dezh+t_study+t_break_time+t_reboot+t_op_time+t_losts+t_kcrs+t_exps+t_miss
t_canceled_total= t_canceled_dezh + t_canceled_pings
if department == 1:
ws.append(["----","ИТОГО",t_pings, t_knowledges, t_canceled_pings, t_dezh, t_study, t_break_time, t_reboot, t_op_time, t_canceled_dezh, t_losts, t_kcrs, t_exps, t_miss, t_total, t_canceled_total])
# elif department == 2:
# ws.append(["----","ИТОГО",t_pings, t_knowledges, t_canceled_pings, t_dezh, t_break_time, t_reboot, t_op_time, t_canceled_dezh, t_exps, t_total, t_canceled_total])
elif department == 3:
ws.append(["----","----","ИТОГО",t_pings, t_knowledges, t_hards, t_checks, t_canceled_pings, t_dezh, t_break_time, t_reboot, t_op_time, t_canceled_dezh, t_total, t_canceled_total])
# elif department in [5,6]:
# ws.append(["----","ИТОГО",t_pings, t_canceled_pings, t_dezh, t_canceled_dezh, t_total, t_canceled_total])
wb.save(PATH)
def is_today(department):
mydb = sqlite3.connect(DB_DIR)
mycursor = mydb.cursor()
today = datetime.date.today()
sql = "SELECT date FROM stats WHERE department={}".format(department)
mycursor.execute(sql)
result = mycursor.fetchall()
s_date = result[0][0].split("-")
date2 = "{}-{:0>2}-{:0>2}".format(s_date[0], s_date[1], s_date[2])
if str(today) != str(date2):
if department == 1:
make_statistic(1)
# elif department == 2:
# make_statistic(2)
elif department == 3:
make_statistic(3)
# elif department == 5:
# make_statistic(5)
# elif department == 6:
# make_statistic(6)
sql = """UPDATE stats
SET total_time=0, total_dezh_time=0, pings=0, knowledges=0, hards=0, checks=0, dezh=0, study=0, break_time=0, reboot=0, op_time=0, checks=0, losts=0, exps=0, kcrs=0, visions=0, pzs=0, pings_canceled=0, dezh_canceled=0, miss=0"""
mycursor.execute(sql)
sql = """UPDATE staff
SET total_time=0, total_dezh_time=0, hards=0, time_to_answer=0, time_to_answer_dezh=0, last_ping_mid=-1, ping_type='empty', ping_status=0, last_dezh_mid=-1, dezh_status=0, pings_canceled=0, dezh_canceled=0, pings=0, knowledges=0, checks=0, dezh=0, study=0, break_time=0, reboot=0, op_time=0, checks=0, kcrs=0, losts=0, exps=0, miss=0"""
mycursor.execute(sql)
sql = """UPDATE helpers
SET pings=0, knowledges=0, hards_yes=0, hards_no=0, checks=0, f_dezh=0, h_dezh=0, study_yes=0, study_no=0, break_time=0, reboot_yes=0, reboot_no=0, op_time=0, checks=0, kcrs=0, visions=0, pzs=0"""
mycursor.execute(sql)
sql = """UPDATE supers
SET pings=0, knowledges=0, hards_yes=0, hards_no=0, checks=0, f_dezh=0, h_dezh=0, study_yes=0, study_no=0, break_time=0, reboot_yes=0, reboot_no=0, op_time=0, losts=0, exps=0, kcrs=0, visions=0, pzs=0, miss=0"""
mycursor.execute(sql)
sql = """UPDATE mentors
SET pings=0, knowledges=0, hards_yes=0, hards_no=0, checks=0, f_dezh=0, break_time=0, reboot_yes=0, reboot_no=0, op_time=0"""
mycursor.execute(sql)
mydb.commit()
|
import datetime
import smtplib
from django.contrib.auth.forms import PasswordChangeForm
import pytz
from django.core.files.storage import FileSystemStorage
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import Student, Student_Course, CustomUser, Course, Faculty, Faculty_Course, Faculty_Assignment, \
Student_Assignment, Student_Grade, Resource
def loginPage(request):
status = None
if request.user.is_authenticated:
info = CustomUser.objects.filter(email=request.user)
for i in info:
status = i.designation
if status == 'student':
return redirect('dashboard')
else:
return redirect('faculty_dashboard')
elif request.method == 'POST':
password = request.POST.get('password')
email = request.POST.get('email')
user = authenticate(request, email=email, password=password)
if user is not None:
login(request, user)
info = CustomUser.objects.filter(email=request.user)
for i in info:
status = i.designation
if status == 'student':
return redirect('dashboard')
else:
return redirect('faculty_dashboard')
else:
messages.info(request, 'Username OR password is incorrect')
context = {}
return render(request, 'lms/login.html', context)
def logoutUser(request):
logout(request)
return redirect('login')
@login_required(login_url='login')
def dashboard(request):
student_info = None
f_name = None
l_name = None
s = Student.objects.filter(email_id=request.user)
for i in s:
student_info = i.email_id
f_name = i.f_name
l_name = i.l_name
stu_course_info = Student_Course.objects.filter(email=student_info)
course = []
no_of_course = 0
total_credits = 0
due_assignments = 0
for i in stu_course_info:
cn = Course.objects.filter(course_id=i.course_id)
total_assignments = Faculty_Assignment.objects.filter(course_id=i.course_id).count()
completed_assignments = Student_Assignment.objects.filter(course_id=i.course_id,
student_id=request.user).count()
due_assignments = due_assignments + (total_assignments - completed_assignments)
due = total_assignments - completed_assignments
for j in cn:
data = {"course_id": i.course_id,
"course_name": j.course_name,
"due": due
}
course.append(data)
no_of_course = no_of_course + 1
total_credits = total_credits + j.course_credits
return render(request, 'lms/dashboard.html',
context={"name": f_name,"l_name":l_name, "course": course, "no_of_course": no_of_course,
"total_credits": total_credits, "due_assignments": due_assignments})
## Faculty dashboard
@login_required(login_url='login')
def faculty_dashboard(request):
fac_info = Faculty.objects.get(email_id=request.user)
fac_course_info = Faculty_Course.objects.filter(email=fac_info)
course = []
no_of_course = 0
ungraded_assignments = 0
no_of_students = []
for i in fac_course_info:
cn = Course.objects.filter(course_id=i.course_id)
total_assign = Student_Assignment.objects.filter(course_id=i.course_id).count()
graded_assign = Student_Grade.objects.filter(course_id=i.course_id).count()
ungraded_assignments = ungraded_assignments + (total_assign - graded_assign)
due = total_assign - graded_assign
for j in cn:
total_stu = Student_Course.objects.filter(course_id=i.course_id).count()
data = {"course_id": i.course_id,
"course_name": j.course_name,
"course_count": total_stu,
"due": due
}
course.append(data)
no_of_course = no_of_course + 1
return render(request, 'lms/faculty_dashboard.html',
context={"name": fac_info.f_name,"l_name": fac_info.l_name, "course": course, "no_of_course": no_of_course,
"total_students": no_of_students, "ungraded_assignments": ungraded_assignments})
## view where faculty can view its added assignment, deadline etc..
def faculty_assignment(request, course_name, course_id, assignment_id):
if (assignment_id != None):
Faculty_Assignment.objects.filter(course_id=course_id, assign_id=assignment_id).delete()
course_info = Faculty_Assignment.objects.filter(course_id=course_id)
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
designation = "faculty"
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
assign_info = []
total_assignment = 0
for i in course_info:
data = {"assign_id": i.assign_id,
"marks": i.marks,
"PDF": i.PDF,
"deadline": i.deadline
}
total_assignment = total_assignment + 1
assign_info.append(data)
return render(request, 'lms/faculty_assignments.html',
context={"course_id": course_id, "assign_info": assign_info, "course_name": course_name,
"course": course, "designation": designation})
## views where faculty can add and edit their assignments
def edit_upload_assignment(request, course_id, course_name, assign_id):
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
designation = "faculty"
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
email = request.user
faculty = Faculty.objects.get(email_id=email)
if request.method == 'POST':
if request.POST:
post = Faculty_Assignment()
post.marks = request.POST.get('marks')
post.deadline = request.POST.get('deadline')
post.faculty_id = faculty.email_id
post.course_id = course_id
file = request.FILES['PDF']
f = FileSystemStorage()
fileName = f.save(file.name, file)
f = 'static/files/' + fileName
post.PDF = f
if assign_id == "None":
assig = Faculty_Assignment.objects.filter(course_id=course_id)
arr = [0]
for i in assig:
a = i.assign_id.split("_", 1)
arr.append(int(a[1]))
id = course_id + '_' + str(max(arr) + 1)
post.assign_id = id
post.save()
text = 'New assignment has been added under course ' + str(course_name)
subject = 'Check out new assignment on lms'
email_sender(subject, text, course_id)
else:
Faculty_Assignment.objects.filter(assign_id=assign_id).update(PDF=post.PDF, marks=post.marks,
deadline=post.deadline)
text = 'Assignment ' + str(assign_id) + ' has been edited under course ' + str(course_name)
subject = 'Check out changes in assignment ' + str(assign_id) + ' on lms'
email_sender(subject, text, course_id)
s = '/faculty_assignment/' + str(course_id) + '/' + str(course_name) + '/' + 'None'
return redirect(s)
return render(request, 'lms/upload.html',
context={"course_id": course_id, "course_name": course_name, "course": course,
"designation": designation})
## Assignment-Marks-Upload-Resources
def static_page(request, course_name, course_id):
d = CustomUser.objects.filter(email=request.user)
designation = None
for i in d:
designation = i.designation
if designation == "faculty":
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
else:
e = Student.objects.filter(email_id=request.user)
email = None
for i in e:
email = i.email_id
stu_course = Student_Course.objects.filter(email=email)
course = []
for i in stu_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
return render(request, 'lms/course_page.html',
context={"course_id": course_id, "designation": designation, "course_name": course_name,
"course": course})
## views where student can view the assignments uploaded by repective faculties
def student_assignment(request, course_id, course_name):
assignments = Faculty_Assignment.objects.filter(course_id=course_id)
assign_info = []
deadline = 0
email = Student.objects.get(email_id=request.user)
stu_course = Student_Course.objects.filter(email=email)
course = []
designation = "student"
for i in stu_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
for i in assignments:
d = Faculty_Assignment.objects.filter(assign_id=i.assign_id)
for i in d:
deadline = i.deadline
count_check = Student_Assignment.objects.filter(assign_id=i.assign_id, student_id=request.user).count()
if (count_check == 0):
status = "Upload"
else:
graded = Student_Grade.objects.filter(assign_id=i.assign_id, student_id=request.user).count()
if (graded == 0):
status = "Edit"
else:
status = "View Grades"
data = {"assign_id": i.assign_id,
"deadline": deadline,
"status": status,
}
assign_info.append(data)
return render(request, 'lms/student_assignment.html',
context={"assign_info": assign_info, "course_id": course_id, "course_name": course_name,
"course": course, "designation": designation})
# Where student can upload or edit assignments
def upload_student_assignment(request, assign_id, course_id, course_name):
d = Faculty_Assignment.objects.filter(assign_id=assign_id)
PDF = None
weightage = None
email = Student.objects.get(email_id=request.user)
stu_course = Student_Course.objects.filter(email=email)
course = []
designation = "student"
for i in stu_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
data = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(data)
for i in d:
deadline = i.deadline
weightage = i.marks
PDF = i.PDF
count_check = Student_Assignment.objects.filter(assign_id=assign_id, student_id=request.user).count()
if (count_check == 0):
tz = pytz.timezone('asia/kolkata')
ct = datetime.datetime.now(tz=tz)
if (deadline > ct):
sub = deadline - ct
string_format = str(sub)
k = string_format.partition('.')
diff = k[0] + ' Left for submission'
else:
sub = ct - deadline
string_format = str(sub)
k = string_format.partition('.')
diff = k[0] + ' Due for submission'
else:
time_of_sub = Student_Assignment.objects.filter(assign_id=assign_id, student_id=request.user)
for i in time_of_sub:
submisson = i.time_of_submission
if deadline > submisson:
sub = deadline - submisson
string_format = str(sub)
k = string_format.partition('.')
diff = 'Submitted before ' + k[0]
else:
sub = submisson - deadline
string_format = str(sub)
k = string_format.partition('.')
diff = 'Submitted after ' + k[0]
if request.method == 'POST':
if request.POST:
file = request.FILES['PDF']
f = FileSystemStorage()
fileName = f.save(file.name, file)
f = 'static/files/' + fileName
status_check = Student_Assignment.objects.filter(assign_id=assign_id, student_id=request.user).count()
if (status_check == 0):
post = Student_Assignment()
post.assign_id = assign_id
post.course_id = course_id
post.student_id = request.user
tz = pytz.timezone('asia/kolkata')
ct = datetime.datetime.now(tz=tz)
post.time_of_submission = ct
post.PDF = f
post.save()
else:
post = Student_Assignment()
post.PDF = f
tz = pytz.timezone('asia/kolkata')
ct = datetime.datetime.now(tz=tz)
post.time_of_submission = ct
Student_Assignment.objects.filter(assign_id=assign_id, student_id=request.user).update(PDF=post.PDF,
time_of_submission=post.time_of_submission)
s = '/student_assignment/' + str(course_id) + '/' + str(course_name)
return redirect(s)
return render(request, 'lms/student_upload.html',
context={"assign_id": assign_id, "course_name": course_name, "weightage": weightage, "PDF": PDF,
"status": diff, "deadline": deadline, "designation": designation, "course": course})
## Where faculty gets list of their assignment for grading
def faculty_assignment_list_for_grading(request, course_id, course_name):
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
designation = "faculty"
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
course_info = Faculty_Assignment.objects.filter(course_id=course_id)
assign_info = []
for i in course_info:
a = Student_Grade.objects.filter(assign_id=i.assign_id).count()
b = Student_Assignment.objects.filter(assign_id=i.assign_id).count()
if (a == b and a != 0):
status = "completed"
elif (a != b):
status = "Pending"
else:
status = "No submissions yet "
data = {"assign_id": i.assign_id,
"deadline": i.deadline,
"status": status
}
assign_info.append(data)
return render(request, 'lms/faculty_assignment_list_for_grading.html',
context={"course_id": course_id, "assign_info": assign_info, "course_name": course_name,
"course": course, "designation": designation})
# Where faculty can view students submission
def students_submission_list(request, assign_id, course_id, course_name):
# if request.method == 'GET':
# assign_id = request.GET.get('assign_id')
# course_name = request.GET.get('course_name')
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
designation = "faculty"
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
deadline = 0
total_marks = 0
marks = None
course_info = Faculty_Assignment.objects.filter(course_id=course_id, assign_id=assign_id)
for i in course_info:
deadline = i.deadline
total_marks = i.marks
list = Student_Assignment.objects.filter(assign_id=assign_id)
student_list = []
total_submissions = 0
for i in list:
id = CustomUser.objects.get(email=i.student_id)
student_grade = Student_Grade.objects.filter(assign_id=assign_id, student_id=i.student_id).count()
if (student_grade > 0):
stu = Student_Grade.objects.filter(assign_id=assign_id, student_id=i.student_id)
for j in stu:
marks = str(j.marks) + "/" + str(total_marks)
else:
marks = "Enter marks"
if (i.time_of_submission > deadline):
status_check = i.time_of_submission - deadline
string_format = str(status_check)
k = string_format.partition('.')
t = k[0]
status = "Late"
else:
status_check = deadline - i.time_of_submission
status = "On Time"
string_format = str(status_check)
k = string_format.partition('.')
t = k[0]
data = {"assign_id": i.assign_id,
"student_id": i.student_id,
"identification": id.identification,
"status_check": t,
"status": status,
"marks": marks
}
total_submissions = total_submissions + 1
student_list.append(data)
return render(request, 'lms/students_submission_list.html',
context={"course_id": course_id, "assign_id": assign_id, "student_list": student_list,
"total_submissions": total_submissions, "course_name": course_name, "course": course,
"designation": designation})
# where faculty can upload marks of each student
def faculty_grades(request, assign_id, course_id, student_id, course_name):
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
designation = "faculty"
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
get_pdf = Student_Assignment.objects.filter(assign_id=assign_id, student_id=student_id)
time_of_sub = 0
deadline = 0
pdf = None
total_marks = None
for i in get_pdf:
pdf = i.PDF
time_of_sub = i.time_of_submission
t_m = Faculty_Assignment.objects.filter(assign_id=assign_id)
for i in t_m:
total_marks = i.marks
deadline = i.deadline
if (time_of_sub > deadline):
status_check = time_of_sub - deadline
string_format = str(status_check)
k = string_format.partition('.')
t = k[0]
status = "Submited Late after " + t
else:
status_check = deadline - time_of_sub
string_format = str(status_check)
k = string_format.partition('.')
t = k[0]
status = "Submited before " + t
if request.method == 'POST':
if request.POST:
status_check = Student_Grade.objects.filter(assign_id=assign_id, student_id=student_id).count()
if (status_check == 0):
post = Student_Grade()
post.assign_id = assign_id
post.course_id = course_id
post.student_id = student_id
post.marks = request.POST.get('marks')
post.comments = request.POST.get('comments')
post.save()
email_list = Student_Grade.objects.filter(course_id=course_id)
addresslist = []
for i in email_list:
email_id = i.student_id
e = CustomUser.objects.filter(email=email_id)
for j in e:
addresslist.append(j.email)
fromaddr = 'seas.gict@gmail.com'
for address in addresslist:
toaddrs = address
text = 'Assignment Grade for ' + str(assign_id) + ' has been added under course ' + str(course_name)
subject = 'Check out your grades for ' + str(assign_id) + ' on lms'
msg = 'Subject: %s\n\n%s' % (subject, text)
username = 'seas.gict@gmail.com'
password = 'admin@7016176980'
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(username, password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
else:
post = Student_Grade()
post.marks = request.POST.get('marks')
post.comments = request.POST.get('comments')
Student_Grade.objects.filter(assign_id=assign_id, student_id=student_id).update(marks=post.marks,
comments=post.comments)
email_list = Student_Grade.objects.filter(course_id=course_id)
addresslist = []
for i in email_list:
email_id = i.student_id
e = CustomUser.objects.filter(email=email_id)
for j in e:
addresslist.append(j.email)
fromaddr = 'seas.gict@gmail.com'
for address in addresslist:
toaddrs = address
text = 'Assignment Grade for ' + str(assign_id) + ' has been reviewed under course ' + str(
course_name)
subject = 'Check out your reviewed grades for ' + str(assign_id) + ' on lms'
msg = 'Subject: %s\n\n%s' % (subject, text)
username = 'seas.gict@gmail.com'
password = 'admin@7016176980'
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(username, password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
s = '/students_submission_list/' + str(assign_id) + '/' + str(course_id) + '/' + str(course_name)
return redirect(s)
return render(request, 'lms/faculty_grades.html',
context={"course_id": course_id, "assign_id": assign_id, "pdf": pdf, "course_name": course_name,
"total_marks": total_marks, "status": status, "course": course, "designation": designation})
# students get their grades after evaluation
def get_students_grade(request, course_id, course_name):
email = Student.objects.get(email_id=request.user)
stu_course = Student_Course.objects.filter(email=email)
course = []
designation = "student"
for i in stu_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
marks = None
list = Student_Grade.objects.filter(course_id=course_id, student_id=request.user)
graded_assignments = []
for i in list:
stu = Faculty_Assignment.objects.filter(assign_id=i.assign_id)
for j in stu:
marks = str(i.marks) + "/" + str(j.marks)
data = {"assign_id": i.assign_id,
"marks": marks,
"comments": i.comments}
graded_assignments.append(data)
return render(request, 'lms/students_submission_list.html',
context={"course_id": course_id, "graded_assignments": graded_assignments, "course_name": course_name,
"course": course, "designation": designation})
#### function to list resources on faculty side
def resources_list(request, course_id, course_name, resource_id):
if (resource_id):
Resource.objects.filter(course_id=course_id, resource_id=resource_id).delete()
res = Resource.objects.filter(course_id=course_id)
resource_info = []
for i in res:
data = {"resource_id": i.resource_id,
"description": i.description,
"PDF": i.resource_material}
resource_info.append(data)
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
designation = "faculty"
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
return render(request, 'lms/resources_list.html',
context={"course": course, "designation": designation, "course_name": course_name,
"course_id": course_id, "resource_info": resource_info})
def add_resource(request, course_id, course_name, resource_id):
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
designation = "faculty"
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
if request.method == 'POST':
if request.POST:
post = Resource()
post.course_id = course_id
email = CustomUser.objects.get(email=request.user)
post.faculty_id = email.email
post.description = request.POST.get('description')
file = request.FILES['PDF']
f = FileSystemStorage()
fileName = f.save(file.name, file)
f = 'static/files/' + fileName
post.resource_material = f
if (resource_id == "None"):
res = Resource.objects.filter(course_id=course_id)
arr = [0]
for i in res:
r = i.resource_id.split("_", 2)
arr.append(int(r[2]))
id = 'R' + '_' + course_id + '_' + str(max(arr) + 1)
post.resource_id = id
post.save()
else:
Resource.objects.filter(course_id=course_id, resource_id=resource_id).update(
description=post.description,
resource_material=post.resource_material)
s = '/resources_list/' + str(course_id) + '/' + str(course_name) + '/None'
return redirect(s)
return render(request, 'lms/add_resource.html',
context={"course_id": course_id, "course_name": course_name, "course": course,
"designation": designation})
### student can view and download resources uploaded by faculty
def download_resources(request, course_id, course_name):
res = Resource.objects.filter(course_id=course_id)
resource_info = []
for i in res:
data = {"resource_id": i.resource_id,
"description": i.description,
"PDF": i.resource_material}
resource_info.append(data)
email = Student.objects.get(email_id=request.user)
stu_course = Student_Course.objects.filter(email=email)
course = []
designation = "student"
for i in stu_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
return render(request, 'lms/download_resources.html',
context={"course_id": course_id, "course_name": course_name, "resource_info": resource_info,
"designation": designation, "course": course})
def edit_profile(request):
d = CustomUser.objects.filter(email=request.user)
des = None
f_name = None
l_name = None
stud_fac_id = None
for i in d:
des = i.designation
if des == "student":
email = Student.objects.get(email_id=request.user)
details = Student.objects.filter(email_id=email)
stu_course = Student_Course.objects.filter(email=email)
course = []
for i in stu_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
for i in details:
f_name = i.f_name
l_name = i.l_name
stud_fac_id = i.s_id
else:
email = Faculty.objects.get(email_id=request.user)
details = Faculty.objects.filter(email_id=email)
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
for i in details:
f_name = i.f_name
l_name = i.l_name
stud_fac_id = i.f_id
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('logout')
else:
form = PasswordChangeForm(request.user)
return render(request, 'lms/edit_profile.html',
context={"course": course, "form": form, "f_name": f_name, "l_name": l_name, "id": stud_fac_id,"designation":des})
#### Function to send to email
def email_sender(subject, text, course_id):
email_list = Student_Course.objects.filter(course_id=course_id)
addresslist = []
for i in email_list:
email_id = i.email
e = CustomUser.objects.filter(email=email_id)
for j in e:
addresslist.append(j.email)
fromaddr = 'seas.gict@gmail.com'
for address in addresslist:
toaddrs = address
msg = 'Subject: %s\n\n%s' % (subject, text)
username = 'seas.gict@gmail.com'
password = 'admin@7016176980'
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(username, password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
return
def enrolled_students(request, course_id, course_name):
fac_course = Faculty_Course.objects.filter(email=request.user)
course = []
designation = "faculty"
for i in fac_course:
cn = Course.objects.filter(course_id=i.course_id)
for j in cn:
d = {"course_id": i.course_id,
"course_name": j.course_name,
}
course.append(d)
info = []
e = Student_Course.objects.filter(course_id=course_id)
for i in e:
student_info = CustomUser.objects.filter(email=i.email)
for j in student_info:
data = {
"email": i.email,
"roll": j.identification,
"first_name": j.first_name,
"last_name": j.last_name
}
info.append(data)
return render(request, 'lms/enrolled_students.html',
context={"course_id": course_id, "info": info, "course_name": course_name, "course": course,
"designation": designation})
# if request.method == 'GET':
# course_id = request.GET.get('course_id')
# course_name = request.GET.get('course_name')
# else:
# course_id = []
# course_name = []
|
import sys
import copy
import traceback
from collections import defaultdict
from mysql_merge.utils import MiniLogger, create_connection, handle_exception
from mysql_merge.mysql_mapper import Mapper
from mysql_merge.mysql_merger import Merger
import mysql_merge.config as config
# VALIDATE CONFIG:
if len(config.merged_dbs) == 0:
print "You must specify at least one database to merge"
sys.exit()
# Prepare logger
#####################################################################
# STEP 1 - map database schema, relations and indexes
print "STEP 1. Initial mapping of DB schema"
print " -> 1.1 First merged db"
mapped_db = config.merged_dbs[0]
conn = create_connection(mapped_db, config.common_data)
mapper = Mapper(conn, mapped_db['db'], config, MiniLogger())
db_map = mapper.map_db()
conn.close()
print " -> 1.2 Destination db"
conn = create_connection(config.destination_db, config.common_data)
mapper = Mapper(conn, config.destination_db['db'], config, MiniLogger())
mapper.execute_preprocess_queries_target()
destination_db_map = mapper.map_db()
conn.commit()
conn.close()
print ""
print "STEP 2. Actually merge all the databases"
print ""
counter = 0
for source_db in config.merged_dbs:
if (source_db['db'] != config.main_db):
counter = counter + 1
try:
source_db_tpl = copy.deepcopy(config.common_data)
source_db_tpl.update(source_db)
destination_db_tpl = copy.deepcopy(config.common_data)
destination_db_tpl.update(config.destination_db)
merger = Merger(destination_db_map, source_db_tpl, destination_db_tpl, config, counter, MiniLogger())
merger.merge()
except Exception,e:
conn = merger._conn if globals().has_key('merger') else None
handle_exception("There was an unexpected error while merging db %s" % source_db['db'], e, conn)
print ""
print "STEP 3. Performing post-processing"
print ""
conn = create_connection(config.destination_db, config.common_data)
mapper = Mapper(conn, config.destination_db['db'], config, MiniLogger())
mapper.execute_postrocess_queries_target()
conn.commit()
conn.close()
print "Merge is finished"
|
"""
A simple way for objects to subscribe to other objects for a "feed-like"
functionality in a Django project
"""
__version__ = '0.1.5'
__author__ = 'Rick Vause'
__email__ = 'rvause@gmail.com'
|
# Copyright Mark Jenkins, 2013
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
# http://www.gnu.org/prep/maintain/html_node/License-Notices-for-Other-Files.html
# @author Mark Jenkins <mark@markjenkins.ca>
from binascii import unhexlify
from string import hexdigits
from .key_from_random import (
make_entropy_source_from_existing_bytes_plus_urandom,
make_key_building_from_existing_bytes_plus_urandom,
)
def bytes_from_hex_strings(hex_strings):
return unhexlify( bytes(''.join(hex_strings), 'ascii') )
def make_key_from_hex_strings(hex_strings):
return make_key_building_from_existing_bytes_plus_urandom(
bytes_from_hex_strings(hex_strings)
)
def make_entropy_source_from_hex_prompt():
hex_line = ''.join(
character
for character in input(
"Enter some hex as additional entropy, needs to be an even number "
"of hex characters\n> ")
if character in hexdigits
)
print("thanks, got %i bytes (%i bits) out of that" % (
len(hex_line) // 2, len(hex_line) * 8 // 2) )
print()
return make_entropy_source_from_existing_bytes_plus_urandom(
bytes_from_hex_strings( (hex_line,) ) )
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 17:04:29 2019
@author: domin
"""
import matplotlib.pyplot as plt
from numpy import linalg as LA
import numpy as np
from scipy import fftpack
#f=8,4,2,1 for each additional qubit
f = 2 # Frequency, in cycles per second, or Hertz
f_s = 40 # Sampling rate, or number of measurements per second
t = np.linspace(0, 1, f_s, endpoint=False)
x0=np.exp(1j*2*np.pi*f*t)/2;
x1=np.exp(1j*2*np.pi*f*t)/2;
#changing the negative sign changes the frequency
H=(1/np.sqrt(2))*np.array([[1, 1],[1, -1]])
def Arraymul(A,x):
xt1=A[0,0]*np.real(x)+A[0,1]*np.imag(x)
xt2=A[1,0]*np.real(x)+A[1,1]*np.imag(x)
XS = xt1+xt2
return XS,xt1,xt2
#gate applied to qubit0 only
XS,xt1,xt2 = Arraymul(H,x0)
#add both states
Xq=XS+x1
fig, ax = plt.subplots()
ax.plot(t, np.real(Xq),t,np.imag(Xq))
ax.set_xlabel('Time [s]')
ax.set_ylabel('Signal amplitude');
Xff = fftpack.fft(Xq)
freqs = fftpack.fftfreq(len(Xq)) *f_s
#freqs = np.fft.fftshift(freqs)
fig, ax = plt.subplots()
ax.stem(freqs, np.abs(Xff)/f_s)
#ax.stem(freqs/f_s, np.abs(np.imag(X1))/f_s)
ax.set_xlabel('Frequency in Hertz [Hz]')
ax.set_ylabel('Frequency Domain (Spectrum) Magnitude')
ax.set_xlim(-3.25 , 3.25 )
ax.set_ylim(-1.25, 1.25) |
import logging
from contextlib import contextmanager
from datetime import datetime
from typing import List, Dict
from records import Database
logger = logging.getLogger(__name__)
@contextmanager
def db_connection(db_config: dict):
connection = Database(db_config['public-transport-stops'])
yield connection
connection.close()
def get_count_of_distinct_next_stops(db: Database, relevant_stops: List[str]) -> Dict[int, int]:
rows = db.query("""WITH relevant_stops AS (
SELECT unnest(:relevant_stops) AS uic_ref
),
next_station_mapping AS (
SELECT DISTINCT
s.stop_name,
t.trip_id,
st.stop_sequence,
s.uic_ref
FROM stops s
INNER JOIN stop_times st ON s.stop_id = st.stop_id
INNER JOIN trips t ON st.trip_id = t.trip_id
INNER JOIN routes r ON t.route_id = r.route_id
WHERE r.route_type = 2 OR r.route_type = 1
)
SELECT distinct
nsm1.uic_ref,
COUNT(nsm2.stop_name)
OVER (PARTITION BY nsm1.uic_ref )
FROM relevant_stops
LEFT JOIN next_station_mapping nsm1 ON relevant_stops.uic_ref = nsm1.uic_ref
INNER JOIN next_station_mapping nsm2 ON nsm1.trip_id = nsm2.trip_id
WHERE nsm1.stop_sequence = (nsm2.stop_sequence - 1)
GROUP BY nsm1.uic_ref, nsm2.stop_name;""",
relevant_stops=relevant_stops).all()
return {int(row['uic_ref']): row['count'] for row in rows}
def get_all_departure_times(db_config: dict, due_date: datetime) -> Dict[int, List[datetime]]:
with db_connection(db_config) as db:
departure_times: Dict[int, List[datetime]] = _query_stop_times_departures(db, due_date)
stop_frequency_departures: Dict[int, List[datetime]] = _query_frequency_departure_times(db, due_date)
for uic_ref, frequency_departures in stop_frequency_departures.items():
if uic_ref in departure_times:
departure_times[uic_ref].extend(frequency_departures)
else:
departure_times[uic_ref] = frequency_departures
return departure_times
def _query_stop_times_departures(db: Database, due_date: datetime) -> Dict[int, List[datetime]]:
due_date_gtfs: str = _format_gtfs_date(due_date)
rows = db.query("""WITH calendar_trip_mapping AS (
SELECT
st.departure_time,
s.uic_ref
FROM stop_times st
INNER JOIN stops s ON st.stop_id = s.stop_id
INNER JOIN trips t ON st.trip_id = t.trip_id
LEFT JOIN calendar_dates c ON t.service_id = c.service_id
WHERE NOT EXISTS(SELECT 1
FROM frequencies f
WHERE f.trip_id = t.trip_id)
AND (c.date = :date OR t.service_id = '000000')
)
SELECT
uic_ref,
array_agg(departure_time) AS departure_times
FROM calendar_trip_mapping
GROUP BY uic_ref""",
date=due_date_gtfs).all()
# service_id 000000 represents the whole schedule
return {row['uic_ref']: _combine_departure_time(row, due_date) for row in rows}
def _query_frequency_departure_times(db: Database, due_date: datetime) -> Dict[int, List[datetime]]:
"""Get departure times for stops which have trips that are modeled in the frequencies table"""
due_date_gtfs: str = _format_gtfs_date(due_date)
rows = db.query("""SELECT
s.uic_ref,
array_agg(st.departure_time + (INTERVAL '1s' * intervals)) AS departure_times
FROM stop_times st
INNER JOIN frequencies f on st.trip_id = f.trip_id
INNER JOIN trips t on f.trip_id = t.trip_id
INNER JOIN stops s on st.stop_id = s.stop_id
LEFT JOIN calendar_dates c ON t.service_id = c.service_id,
generate_series(0, 86400, f.headway_secs) intervals
WHERE (st.departure_time + (INTERVAL '1s' * intervals)) <= f.end_time
AND (c.date = :date OR t.service_id = '000000')
GROUP BY s.uic_ref""",
date=due_date_gtfs).all()
return {row['uic_ref']: _combine_departure_time(row, due_date) for row in rows}
def _format_gtfs_date(due_date: datetime) -> str:
"""Format datetime into gtfs date format yyymmdd"""
return f"{due_date.year}{due_date.month:02d}{due_date.day:02d}"
def _combine_departure_time(row: dict, due_date: datetime) -> List[datetime]:
"""Convert row of departure times with due date to form a complete datetime object"""
departure_times: List[datetime] = list()
for departure_time in row['departure_times']:
if departure_time:
departure_times.append(due_date + departure_time)
return departure_times
|
from src.bayes.utils import load_data
def test_load_data(file_path):
symptoms, diseases = load_data(file_path)
assert symptoms[0][1] == 0.96
assert diseases[0][1] == 0.99
|
read = open('in.in', 'r')
write = open('out.out', 'w')
cases = int(read.readline())
for case in range(cases):
line = read.readline()[:-1]
fields = line.split(" ")
n = int(fields[0])
r = int(fields[1])
p = int(fields[2])
s = int(fields[3])
# counts = []
# counts += [(r, p, s)]
imposs = False
for i in range(n):
rp = (r + s - p) // 2
pp = (r + p - s) // 2
sp = (s + p - r) // 2
if rp < 0 or pp < 0 or sp < 0:
imposs = True
break
r = rp
p = pp
s = sp
# counts = [(r, p, s)] + [counts]
if imposs:
write.write("Case #{0}: {1}\n".format(case+1, "IMPOSSIBLE"))
continue
# print(counts)
string = ''
if r == 1:
string = 'R'
if p == 1:
string = 'P'
if s == 1:
string = 'S'
for i in range(n):
stringp = ''
for j in string:
if i == n - 1 and j == 'R':
stringp += 'RS'
elif j == 'R':
stringp += 'SR'
if j == 'P':
stringp += 'PR'
if i < n - 2 and j == 'S':
stringp += 'SP'
elif j == 'S':
stringp += 'PS'
string = stringp
# print(string)
write.write("Case #{0}: {1}\n".format(case+1, string))
read.close()
write.close()
|
import torch
learning_rate = 0.8
batch_size = 128
epochs = 10
classes = 10
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
def fact(n):
"Calcula el factorial de n"
if n==1:
return 1
else:
return fact(n-1)*n
print("2!:",fact(2))
print("5!:",fact(5))
print("13!:",fact(13))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MeterOpenModel import MeterOpenModel
class ExerciseItemOpenModelThird(object):
def __init__(self):
self._desc = None
self._external_item_id = None
self._item_code = None
self._meter_list = None
self._name = None
self._parent_item_code = None
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
@property
def external_item_id(self):
return self._external_item_id
@external_item_id.setter
def external_item_id(self, value):
self._external_item_id = value
@property
def item_code(self):
return self._item_code
@item_code.setter
def item_code(self, value):
self._item_code = value
@property
def meter_list(self):
return self._meter_list
@meter_list.setter
def meter_list(self, value):
if isinstance(value, MeterOpenModel):
self._meter_list = value
else:
self._meter_list = MeterOpenModel.from_alipay_dict(value)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def parent_item_code(self):
return self._parent_item_code
@parent_item_code.setter
def parent_item_code(self, value):
self._parent_item_code = value
def to_alipay_dict(self):
params = dict()
if self.desc:
if hasattr(self.desc, 'to_alipay_dict'):
params['desc'] = self.desc.to_alipay_dict()
else:
params['desc'] = self.desc
if self.external_item_id:
if hasattr(self.external_item_id, 'to_alipay_dict'):
params['external_item_id'] = self.external_item_id.to_alipay_dict()
else:
params['external_item_id'] = self.external_item_id
if self.item_code:
if hasattr(self.item_code, 'to_alipay_dict'):
params['item_code'] = self.item_code.to_alipay_dict()
else:
params['item_code'] = self.item_code
if self.meter_list:
if hasattr(self.meter_list, 'to_alipay_dict'):
params['meter_list'] = self.meter_list.to_alipay_dict()
else:
params['meter_list'] = self.meter_list
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.parent_item_code:
if hasattr(self.parent_item_code, 'to_alipay_dict'):
params['parent_item_code'] = self.parent_item_code.to_alipay_dict()
else:
params['parent_item_code'] = self.parent_item_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ExerciseItemOpenModelThird()
if 'desc' in d:
o.desc = d['desc']
if 'external_item_id' in d:
o.external_item_id = d['external_item_id']
if 'item_code' in d:
o.item_code = d['item_code']
if 'meter_list' in d:
o.meter_list = d['meter_list']
if 'name' in d:
o.name = d['name']
if 'parent_item_code' in d:
o.parent_item_code = d['parent_item_code']
return o
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
"""
形态学操作 - 开操作
开操作 = 腐蚀 + 膨胀
opencv关于形态学操作进行了封装,所有的形态学操作可使用一个api进行,即
cv.morphologyEx(src, option, kernel, anchor, iterations)
- src: 任意输入图像,可以为灰度、彩色或二值
- option: 形态学操作的枚举
- kernel: 结构元素 或 卷积核
- anchor: 结构元素或卷积核的中心像素点坐标
- iterations: 形态学操作的次数
关于开操作可以理解如下,先对图像进行腐蚀操作,之后对腐蚀的结果进行膨胀。可以删除二值图像中的干扰快,降低图像二值化之后噪点过多的
问题,在api中,他的枚举为`cv.MORPH_OPEN`
"""
def main():
src = cv.imread("../../pic/cement_road.jpeg")
cv.namedWindow("binary", cv.WINDOW_KEEPRATIO)
cv.namedWindow("dst", cv.WINDOW_KEEPRATIO)
cv.namedWindow("src", cv.WINDOW_KEEPRATIO)
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (3, 3), 0)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 5))
binary = cv.adaptiveThreshold(blur, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 25, 10)
dst = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel)
cv.imshow("binary", binary)
cv.imshow("dst", dst)
cv.imshow("src", src)
cv.waitKey(0)
cv.destroyAllWindows()
if "__main__" == __name__:
main()
|
from roger.components.data_conversion_utils import TypeConversionUtil
def test_type_comparision():
datatype_1 = list.__name__
datatype_2 = str.__name__
datatype_3 = bool.__name__
datatype_4 = float.__name__
datatype_5 = int.__name__
# list should always come first
assert datatype_1 == TypeConversionUtil.compare_types(datatype_1, datatype_2)
assert datatype_1 == TypeConversionUtil.compare_types(datatype_1, datatype_3)
assert datatype_1 == TypeConversionUtil.compare_types(datatype_1, datatype_4)
assert datatype_1 == TypeConversionUtil.compare_types(datatype_1, datatype_5)
# then string
assert datatype_2 == TypeConversionUtil.compare_types(datatype_2, datatype_3)
assert datatype_2 == TypeConversionUtil.compare_types(datatype_2, datatype_4)
assert datatype_2 == TypeConversionUtil.compare_types(datatype_2, datatype_5)
# the rest should always be casted up to string
assert datatype_2 == TypeConversionUtil.compare_types(datatype_3, datatype_4)
assert datatype_2 == TypeConversionUtil.compare_types(datatype_4, datatype_5)
assert datatype_2 == TypeConversionUtil.compare_types(datatype_5, datatype_3)
# should raise error when sent 'Unknown' data types
bogus_dt = "bogus"
try:
TypeConversionUtil.compare_types(bogus_dt, datatype_1)
except AssertionError as error:
exception_raised = True
assert exception_raised
try:
TypeConversionUtil.compare_types(datatype_1, bogus_dt)
except AssertionError as error:
exception_raised = True
assert exception_raised
def test_casting_values():
castable = [
["True", bool.__name__, True],
[1 , bool.__name__, True],
[1.0, bool.__name__, True],
[[], bool.__name__, False]
]
for items in castable:
assert items[-1] == TypeConversionUtil.cast(*items[:-1]) # cast (value, type)
|
#coding=utf-8
import re,urllib,sys,MySQLdb as mdb
import chardet
reload(sys)
#sys.setdefaultencoding('utf-8')
#s='#NAME?' #或者用raw_input()输入也行
def geturl():
#db =mdb.connect(host='127.0.0.1',user='root',passwd='hehe',db='public_opinion',charset='utf8')
#cur=db.cursor()
s=raw_input("请输入关键词:")
s=s.decode(sys.stdin.encoding).encode('utf-8')
#print chardet.detect(s)
#query='insert into key_word (keyword) values("'+s+'")'
#print query
#cur.execute(query)
#db.commit()
#cur.close()
#s_utf=s.decode(sys.stdin.encoding).encode('utf-8') #编码需要转换为utf-8
return urllib.quote(s)
'''
if __name__=='__main__':
geturl()
'''
|
__author__ = 'Evenvi'
import tesseract
import cv
#image = cv.LoadImage("./img/plateBinary.jpg",cv.CV_LOAD_IMAGE_GRAYSCALE)
image = cv.LoadImage("./img/plateBinary.jpg",cv.CV_LOAD_IMAGE_GRAYSCALE)
#chiImage = cv.LoadImage("./img/chiPlate.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE)
def recognize(image):
api = tesseract.TessBaseAPI()
api.Init(".","eng",tesseract.OEM_DEFAULT)
api.SetPageSegMode(tesseract.PSM_AUTO)
tesseract.SetCvImage(image, api)
text = api.GetUTF8Text()
conf = api.MeanTextConf()
return text
def chiRecognize(image):
api = tesseract.TessBaseAPI()
api.Init(".","chi_sim",tesseract.OEM_DEFAULT)
api.SetPageSegMode(tesseract.PSM_AUTO)
tesseract.SetCvImage(image, api)
text = api.GetUTF8Text()
conf = api.MeanTextConf()
return text
#print "Plate NO. is:"+recognize(image)
print "PlateCHI NO. is:"+chiRecognize(image) |
from pymongo import MongoClient
client = MongoClient() # The call to mongoclient is outside the connecCollection function,
# to only make the call once, and save time and resources.
def connectCollection(database, collection):
# Get a database and a collection from mongoDB
db = client[database]
coll = db[collection]
return db, coll
def getLocation(lst):
# Get the coordinates and create a type point coordinates in mongoDB
longitude = lst['geometry']['location']['lng']
latitude = lst['geometry']['location']['lat']
loc = {
'type':'Point',
'coordinates':[longitude,latitude]
}
return loc
def creating_geoindex(collection_name):
# Get all the places, excluding the ones with none values for lat and long.
# and create a new value location with the type point coordinates from the function getLocation.
db, coll = connectCollection('companies',collection_name)
places = list(coll.find({"geometry.location.lat":{"$ne":None},"geometry.location.lng":{"$ne":None}}))
for place in places:
value = {"$set": {'location':getLocation(place)}}
coll.update_one(place,value) |
# _*_ coding: utf-8 _*_
import os
ifile = open('test.txt','r')
ofile = open('result.txt','w')
num = 1
for line in ifile.readlines():
print line
print num
num = num + 1
if ((line.startswith('#') != 1) and (line != '\n')):
ofile.write(line)
# if (line != '\n')
# for line in ifile.readline():
# if line.startswith('#'):
# ofile.write(line)
ifile.close()
ofile.close()
|
"""MyGame URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/panel/', include(('admin_panel.urls', 'admin_panel'), namespace="admin-panel")),
path('web/',include('extra.urls',namespace='extra_web')),
path('registration/',include('registration.api.urls',namespace='registration')),
path('extra/',include('extra.api.urls',namespace='extra')),
path('solo/',include('solo.api.urls',namespace='solo')),
path('game/',include('game.api.urls',namespace='game')),
path('subscription/',include('subscription.api.urls',namespace='subscription')),
]
# urlpatterns += [url(r'^silk/', include('silk.urls', namespace='silk'))]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from django.shortcuts import render
from .models import StudentsInfo,GuardianInfo
# , GuardianInfo
# Create your views here.
def GuardianInfoListVW(request):
all_guardian = GuardianInfo.objects.all()
context = {"Guardian_list":all_guardian}
return render(request, 'students/guardian_info_list.html', context)
def GuardianInfoDtlsVW(request, phone_no):
indiv_guardian = GuardianInfo.objects.get(phone=phone_no)
context = {"Guardian_Info":indiv_guardian}
return render(request, 'students/guardian_info_dtls.html',context)
def StudentsInfoListVW(request):
all_students = StudentsInfo.objects.all()
context = {"students_list":all_students}
return render(request, 'students/students_info_list.html', context)
def StudentsInfoDetailsVW(request, student_roll):
all_students = StudentsInfo.objects.get(roll=student_roll)
context = {"student_dtls":all_students}
return render(request, 'students/students_info_dtls.html', context)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 12 10:47:04 2019
@author: 19233292
"""
def cumulative_sum(l):
count = 0
new_list = []
for i in l:
count += i
new_list.append(count)
return new_list
print(cumulative_sum([1,2,3])) |
# https://towardsdatascience.com/lstm-for-time-series-prediction-de8aeb26f2ca
# https://romanorac.github.io/machine/learning/2019/09/27/time-series-prediction-with-lstm.html
import sys
import torch
import torch.nn as nn
import os
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os.path
from sklearn.preprocessing import MinMaxScaler
data_txt = '../data/txt/'
data_csv = '../data/csv/'
results_folder = '../data/results/'
if len(sys.argv) < 2:
print('Error: Output folder prefix is needed')
sys.exit(0)
elif os.path.exists(results_folder+sys.argv[1]):
print('Error: Output folder {} already exists'.format(results_folder+sys.argv[1]))
sys.exit(0)
else:
output_folder_prefix = sys.argv[1]
os.mkdir(results_folder+'/'+output_folder_prefix)
dataset = pd.read_csv(data_csv+'035-08-sequence_air_all.csv', header=0, index_col=0)
dataset.replace(-1, dataset.mean())
all_data = dataset['measure'].values.astype(float)
all_data = dataset[(dataset.index >= "2013-12-01") & (dataset.index <= "2019-11-30")]['measure'].values.astype(float)
df_train = dataset[dataset.index < "2017-12-01"]['measure'].to_frame(name='NO2')
print(df_train.shape)
df_val = dataset[(dataset.index >= "2017-12-01") & (dataset.index < "2018-09-31")]['measure'].to_frame(name='NO2')
print(df_val.shape)
df_test = dataset[(dataset.index >= "2018-12-01") & (dataset.index < "2019-09-31")]['measure'].to_frame(name='NO2')
print(df_test.shape)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_arr = scaler.fit_transform(df_train)
val_arr = scaler.transform(df_val)
test_arr = scaler.transform(df_test)
def transform_data(arr, seq_len):
x, y = [], []
for i in range(len(arr) - seq_len):
x_i = arr[i : i + seq_len]
y_i = arr[i + 1 : i + seq_len + 1]
x.append(x_i)
y.append(y_i)
x_arr = np.array(x).reshape(-1, seq_len)
y_arr = np.array(y).reshape(-1, seq_len)
x_var = Variable(torch.from_numpy(x_arr).float())
y_var = Variable(torch.from_numpy(y_arr).float())
return x_var, y_var
from torch.autograd import Variable
seq_len = 72
x_train, y_train = transform_data(train_arr, seq_len)
x_val, y_val = transform_data(val_arr, seq_len)
x_test, y_test = transform_data(test_arr, seq_len)
def plot_sequence(axes, i, x_train, y_train):
axes[i].set_title("%d. Sequence" % (i + 1))
axes[i].set_xlabel("Time Bars")
axes[i].set_ylabel("Scaled VWAP")
axes[i].plot(range(seq_len), x_train[i].cpu().numpy(), color="r", label="Feature")
axes[i].plot(range(1, seq_len + 1), y_train[i].cpu().numpy(), color="b", label="Target")
axes[i].legend()
# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14, 7))
# plot_sequence(axes, 0, x_train, y_train)
# plot_sequence(axes, 1, x_train, y_train)
# plt.show()
# If we have a GPU available, we'll set our device to GPU. We'll use this device variable later in our code.
is_cuda = torch.cuda.is_available()
if is_cuda:
device = torch.device("cuda")
print("GPU is available")
else:
device = torch.device("cpu")
print("GPU not available, CPU used")
import torch.nn as nn
import torch.optim as optim
class Model(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Model, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.lstm = nn.LSTMCell(self.input_size, self.hidden_size).to(device)
self.linear = nn.Linear(self.hidden_size, self.output_size).to(device)
def forward(self, input, future=0, y=None):
outputs = []
# reset the state of LSTM
# the state is kept till the end of the sequence
h_t = torch.zeros(input.size(0), self.hidden_size, dtype=torch.float32).to(device)
c_t = torch.zeros(input.size(0), self.hidden_size, dtype=torch.float32).to(device)
for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm(input_t, (h_t, c_t))
output = self.linear(h_t).to(device)
outputs += [output]
for i in range(future):
if y is not None and random.random() > 0.5:
output = y[:, [i]] # teacher forcing
h_t, c_t = self.lstm(output, (h_t, c_t))
output = self.linear(h_t)
outputs += [output]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs
import time
import random
class Optimization:
""" A helper class to train, test and diagnose the LSTM"""
def __init__(self, model, loss_fn, optimizer, scheduler):
self.model = model
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
self.train_losses = []
self.val_losses = []
self.futures = []
@staticmethod
def generate_batch_data(x, y, batch_size):
for batch, i in enumerate(range(0, len(x) - batch_size, batch_size)):
x_batch = x[i : i + batch_size]
y_batch = y[i : i + batch_size]
yield x_batch, y_batch, batch
def train(
self,
x_train,
y_train,
x_val=None,
y_val=None,
batch_size=72,
n_epochs=0,
do_teacher_forcing=None,
):
seq_len = x_train.shape[1]
for epoch in range(n_epochs):
start_time = time.time()
self.futures = []
train_loss = 0
for x_batch, y_batch, batch in self.generate_batch_data(x_train, y_train, batch_size):
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
y_pred = self._predict(x_batch, y_batch, seq_len, do_teacher_forcing)
self.optimizer.zero_grad()
loss = self.loss_fn(y_pred, y_batch)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
self.scheduler.step()
train_loss /= batch
self.train_losses.append(train_loss)
self._validation(x_val, y_val, batch_size)
elapsed = time.time() - start_time
print(
"Epoch %d Train loss: %.6f. Validation loss: %.6f. Avg future: %.2f. Elapsed time: %.2fs."
% (epoch + 1, train_loss, self.val_losses[-1], np.average(self.futures), elapsed)
)
def _predict(self, x_batch, y_batch, seq_len, do_teacher_forcing):
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
if do_teacher_forcing:
future = random.randint(1, int(seq_len) / 2)
limit = x_batch.size(1) - future
y_pred = self.model(x_batch[:, :limit], future=future, y=y_batch[:, limit:])
else:
future = 0
y_pred = self.model(x_batch)
self.futures.append(future)
return y_pred
def _validation(self, x_val, y_val, batch_size):
x_val = x_val.to(device)
y_val = y_val.to(device)
if x_val is None or y_val is None:
return
with torch.no_grad():
val_loss = 0
for x_batch, y_batch, batch in self.generate_batch_data(x_val, y_val, batch_size):
y_pred = self.model(x_batch)
loss = self.loss_fn(y_pred, y_batch)
val_loss += loss.item()
val_loss /= batch
self.val_losses.append(val_loss)
def evaluate(self, x_test, y_test, batch_size, future=1):
with torch.no_grad():
test_loss = 0
actual, predicted = [], []
for x_batch, y_batch, batch in self.generate_batch_data(x_test, y_test, batch_size):
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
y_pred = self.model(x_batch, future=future)
#y_pred = (
# y_pred[:, -len(y_batch) :] if y_pred.shape[1] > y_batch.shape[1] else y_pred
#)
print(x_batch.shape[1])
print(y_batch.shape[1])
print(batch)
if y_pred.shape[1] > y_batch.shape[1]:
y_pred = y_pred[:, -len(y_batch):]
loss = self.loss_fn(y_pred, y_batch)
test_loss += loss.item()
actual += torch.squeeze(y_batch[:, -1]).data.cpu().numpy().tolist()
predicted += torch.squeeze(y_pred[:, -1]).data.cpu().numpy().tolist()
test_loss /= batch
return actual, predicted, test_loss
def plot_losses(self):
plt.plot(self.train_losses, label="Training loss")
plt.plot(self.val_losses, label="Validation loss")
plt.legend()
plt.title("Losses")
def generate_sequence(scaler, model, x_sample, future=1000):
""" Generate future values for x_sample with the model """
y_pred_tensor = model(x_sample, future=future)
y_pred = y_pred_tensor.cpu().tolist()
y_pred = scaler.inverse_transform(y_pred)
return y_pred
def to_dataframe(actual, predicted):
return pd.DataFrame({"actual": actual, "predicted": predicted})
def inverse_transform(scalar, df, columns):
for col in columns:
df[col] = scaler.inverse_transform(df[col])
return df
# model_1 = Model(input_size=1, hidden_size=21, output_size=1)
# loss_fn_1 = nn.MSELoss()
# optimizer_1 = optim.Adam(model_1.parameters(), lr=1e-3)
# scheduler_1 = optim.lr_scheduler.StepLR(optimizer_1, step_size=5, gamma=0.1)
# optimization_1 = Optimization(model_1, loss_fn_1, optimizer_1, scheduler_1)
#
#
# optimization_1.train(x_train, y_train, x_val, y_val, do_teacher_forcing=False)
# optimization_1.plot_losses()
#
# actual_1, predicted_1, test_loss_1 = optimization_1.evaluate(x_test, y_test, future=5, batch_size=100)
# df_result_1 = to_dataframe(actual_1, predicted_1)
# df_result_1 = inverse_transform(scaler, df_result_1, ['actual', 'predicted'])
# df_result_1.plot(figsize=(14, 7))
# print("Test loss %.6f" % test_loss_1)
#
# x_sample = x_test[0].reshape(1, -1)
# y_sample = df_test.vwap[:1100]
#
# y_pred1 = generate_sequence(scaler, optimization_1.model, x_sample)
# plt.figure(figsize=(14, 7))
# plt.plot(range(100), y_pred1[0][:100], color="blue", lw=2, label="Predicted VWAP")
# plt.plot(range(100, 1100), y_pred1[0][100:], "--", color="blue", lw=2, label="Generated VWAP")
# plt.plot(range(0, 1100), y_sample, color="red", label="Actual VWAP")
# plt.legend()
#
model_2 = Model(input_size=1, hidden_size=50, output_size=1)
loss_fn_2 = nn.MSELoss()
optimizer_2 = optim.Adam(model_2.parameters(), lr=1e-3)
scheduler_2 = optim.lr_scheduler.StepLR(optimizer_2, step_size=5, gamma=0.1)
optimization_2 = Optimization(model_2, loss_fn_2, optimizer_2, scheduler_2)
optimization_2.train(x_train, y_train, x_val, y_val, batch_size=72, n_epochs=0, do_teacher_forcing=True)
optimization_2.plot_losses()
actual_3, predicted_3, test_loss_3 = optimization_2.evaluate(x_val, y_val, batch_size=72, future=5)
df_result_3 = to_dataframe(actual_3, predicted_3)
df_result_3 = inverse_transform(scaler, df_result_3, ["actual", "predicted"])
df_result_3.plot(figsize=(14, 7))
df_result_3['difference'] = df_result_3['actual'] - df_result_3['predicted']
df_result_3.to_csv(results_folder + '/' + output_folder_prefix + '/pre-MC.csv')
diff_3 = sum(df_result_3['difference'])/len(df_result_3['difference'])
print("Test PRE-MC loss %.6f" % test_loss_3)
print("Test PRE-MC diff %.6f" % diff_3)
actual_2, predicted_2, test_loss_2 = optimization_2.evaluate(x_test, y_test, batch_size=72, future=5)
df_result_2 = to_dataframe(actual_2, predicted_2)
df_result_2 = inverse_transform(scaler, df_result_2, ["actual", "predicted"])
df_result_2.plot(figsize=(14, 7))
df_result_2['difference'] = df_result_2['actual'] - df_result_2['predicted']
df_result_2.to_csv(results_folder + '/' + output_folder_prefix + '/post-MC.csv')
diff_2 = sum(df_result_2['difference'])/len(df_result_2['difference'])
print("Test Post-MC loss %.6f" % test_loss_2)
print("Test Post-MC diff %.6f" % diff_2)
#plt.show()
|
import fasttext
import sys
import os
import argparse
'''
No hay mucha magia (o al revés: hay demasiada).
Usamos la librería de fasttext para entrenar nuestro modelo, y luego appendeamos las predicciones con el valor __label__ pues esa es la salida esperada
'''
def train_and_test():
model = train(args.train_data)
print(test(model, args.test_data))
def train_and_predict():
model = train(args.train_data)
print(predict(model, args.test_data))
def train(train_data):
return fasttext.train_supervised(train_data, epoch=5, wordNgrams=2, verbose=2)
def test(model, test_data):
return model.test(test_data)
def predict(model, predict_file):
with open(predict_file, 'r') as sentences, open('result.txt', 'w') as fout:
for line in sentences:
result = model.predict(line.replace('\n', ''))
get_result_label(result)
fout.write(get_result_label(result))
fout.write('\n')
def get_result_label(result):
return result[0][0]
ap = argparse.ArgumentParser()
ap.add_argument('train_data')
ap.add_argument('test_data')
ap.add_argument('function')
args = ap.parse_args()
if(args.function == 'test'):
train_and_test()
else:
if(args.function == 'predict'):
train_and_predict()
else:
raise Exception('{} is not test or predict'.format(args.function))
|
#!/usr/bin/python
#
# Given v7 and v8 objects at 6m, create versions at different heights
#
refheight=6
cut=3
for obj in ['Safedock2S-%sm.obj',
'Safedock2S-%sm-pole.obj',
'Safegate-%sm.obj',
'Safegate-%sm-pole.obj']:
for height in [3, 3.5, 4, 4.5, 5, 5.5, 6.5, 7, 7.5, 8]:
infilename=("DGSs/"+obj) % refheight
infile=file(infilename, 'rt')
outfilename=("DGSs/"+obj)% height
outfile=file(outfilename, 'wt')
print outfilename
for line in infile:
tokens=line.split()
if not tokens:
outfile.write('\n')
continue
if tokens[0]=='VT' and float(tokens[2])>cut:
outfile.write("VT\t%9.4f %9.4f %9.4f\t%6.3f %6.3f %6.3f\t%-6s %-6s\n" % (
float(tokens[1]),
float(tokens[2])-refheight+height,
float(tokens[3]),
float(tokens[4]),float(tokens[5]),float(tokens[6]),
float(tokens[7]),float(tokens[8])))
elif tokens[0]=='ANIM_trans' and float(tokens[2])>cut:
#print line
assert(line[71]=='\t')
outfile.write("\tANIM_trans\t%9.4f %9.4f %9.4f\t%9.4f %9.4f %9.4f%s" % (
float(tokens[1]),
float(tokens[2])-refheight+height,
float(tokens[3]),
float(tokens[4]),
float(tokens[5])-refheight+height,
float(tokens[6]),
line[71:]))
else:
outfile.write(line)
outfile.close()
infile.close()
refdist=20
for obj in ['SA-%sm-Safedock2S-%sm.obj',
'SA-%sm-Safegate-%sm.obj']:
for dist in [16, 18, 20, 22, 24]:
for height in [4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8]:
if height==refheight and dist==refdist: continue
infilename=("Standalone_DGSs/"+obj) % (refdist, refheight)
infile=file(infilename, 'rt')
outfilename=("Standalone_DGSs/"+obj)% (dist, height)
outfile=file(outfilename, 'wt')
print outfilename
for line in infile:
tokens=line.split()
if not tokens:
outfile.write('\n')
continue
if tokens[0]=='VT':
if float(tokens[2])>cut:
newheight=float(tokens[2])-refheight+height
else:
newheight=float(tokens[2])
if float(tokens[3])<-cut:
newdist=float(tokens[3])+refdist-dist
else:
newdist=float(tokens[3])
outfile.write("VT\t%9.4f %9.4f %9.4f\t%6.3f %6.3f %6.3f\t%-6s %-6s\n" % (
float(tokens[1]),
newheight,
newdist,
float(tokens[4]),float(tokens[5]),float(tokens[6]),
float(tokens[7]),float(tokens[8])))
elif tokens[0]=='ANIM_trans' and line[71]=='\t':
if float(tokens[2])>cut:
newheight1=float(tokens[2])-refheight+height
newheight2=float(tokens[5])-refheight+height
else:
newheight1=float(tokens[2])
newheight2=float(tokens[5])
if float(tokens[3])<-cut:
newdist1=float(tokens[3])+refdist-dist
newdist2=float(tokens[6])+refdist-dist
else:
newdist1=float(tokens[3])
newdist2=float(tokens[6])
outfile.write("\tANIM_trans\t%9.4f %9.4f %9.4f\t%9.4f %9.4f %9.4f%s" % (
float(tokens[1]),
newheight1,
newdist1,
float(tokens[4]),
newheight2,
newdist2,
line[71:]))
else:
outfile.write(line)
outfile.close()
infile.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 18 13:54:26 2019
@author: gdussert
"""
import keras
# import keras_retinanet
from keras_retinanet import models
from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image
from keras_retinanet.utils.visualization import draw_box, draw_caption
from keras_retinanet.utils.colors import label_color
from keras_retinanet.utils.gpu import setup_gpu
# import miscellaneous modules
import matplotlib.pyplot as plt
import cv2
import os
import numpy as np
import time
model_path = "snapshots_all/resnet50_csv_10.h5"
tresh=0.5
# load retinanet model
model = models.load_model(model_path, backbone_name='resnet50')
model = models.convert_model(model)
id_to_class=["blaireaux","chamois","chat forestier","chevreuil","lièvre","lynx","renard","sangliers","cerf"]
def show_detection(filename):
# load image
image = read_image_bgr(filename)
# copy to draw on
draw = image.copy()
draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, scale = resize_image(image)
# process image
start = time.time()
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
print("processing time: ", time.time() - start)
# correct for image scale
boxes /= scale
# visualize detections
for box, score, label in zip(boxes[0], scores[0], labels[0]):
# scores are sorted so we can break
if score < 0.5:
break
color = label_color(label)
b = box.astype(int)
draw_box(draw, b, color=color)
caption = "{} {:.3f}".format(id_to_class[label], score)
draw_caption(draw, b, caption)
plt.figure(figsize=(15, 15))
plt.axis('off')
plt.imshow(draw)
plt.show()
def show_detection_folder(folder):
for file in os.listdir(folder):
if file.endswith(".jpg") or file.endswith(".JPG") or file.endswith(".png"):
file_path=os.path.join(folder, file)
print(file_path)
show_detection(file_path)
def pred_bbox(filename):
# load image
image = read_image_bgr(filename)
# copy to draw on
draw = image.copy()
draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)
# preprocess image for network
image = preprocess_image(image)
image, scale = resize_image(image)
# process image
boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
return boxes, scores, labels
def comp_exif(folder):
from iptcinfo3 import IPTCInfo
import pandas as pd
TP=[0]*len(id_to_class) #true positive
FP=[0]*len(id_to_class) #false positive
FN_void=[0]*len(id_to_class)
FN_false=[0]*len(id_to_class)
dict_error_FP={}
dict_nb={}
start = time.time()
nb=0
for file in os.listdir(folder):
if file.endswith(".jpg") or file.endswith(".JPG") or file.endswith(".png"):
nb+=1
if nb%100==0:
print("Done {0} images in ".format(nb), time.time() - start)
file_path=os.path.join(folder, file)
species=IPTCInfo(file_path)['keywords'][0].decode("utf-8")
boxes, scores, labels = pred_bbox(file_path)
if scores[0][0]>tresh:
pred_class_name=id_to_class[labels[0][0]]
pred_id=labels[0][0]
else:
pred_class_name=None
try:
dict_nb[species]+=1
except:
dict_nb[species]=1
print(file_path,species,pred_class_name)
#Si il n'y a pas de prediction
if pred_class_name==None:
if species in id_to_class:
species_id=id_to_class.index(species)
FN_void[species_id]+=1
#S'il y a des predictions on prend celle avec la confidence al plus élevée
else:
if pred_class_name==species: #bien prédit
TP[pred_id]+=1
else: #mal prédit : faux positif, si c'était une vraie classe il y a aussi un faux negatif
FP[pred_id]+=1
if species in id_to_class:
species_id=id_to_class.index(species)
FN_false[species_id]+=1
try:
dict_error_FP[species]+=1
except:
dict_error_FP[species]=1
d = {'species': id_to_class, 'TP': TP, 'FP':FP,'FN_false':FN_false,'FN_void':FN_void}
df = pd.DataFrame(data=d)
print(df)
print("\nImages source de FP par classe :")
for k in dict_error_FP.keys():
print(k,dict_error_FP[k])
print("\nNombre total d'image par classe:")
for k in dict_nb.keys():
print(k,dict_nb[k])
return df
comp_exif("/beegfs/data/gdussert/projects/olivier_pipeline/all_classes/test/")
|
import logging
import torch
import torch.nn as nn
from kma.modules.attention import Attention
LOG_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
logging.basicConfig(format=LOG_FORMAT, level=getattr(logging, 'INFO'))
logger = logging.getLogger(__name__)
class RNNDecoder(nn.Module):
KEY_ATTN_SCORE = 'attention_score'
KEY_LENGTH = 'length'
KEY_SEQUENCE = 'sequence'
def __init__(self, vocab_size, hidden_size, sos_id, eos_id, pad_id, embeddings,
bidirectional_encoder, use_attention, **kwargs):
super(RNNDecoder, self).__init__()
self.embeddings = nn.Embedding(vocab_size, kwargs['vocab_embed_dim'])
self.embeddings.padding_idx = pad_id
if embeddings is not None:
self.embeddings.weight = nn.Parameter(embeddings)
self.embeddings.weight.requires_grad = False
self.input_dropout = nn.Dropout(kwargs['input_dropout'])
self.vocab_embed_dim = self.embeddings.embedding_dim
self.hidden_size = hidden_size
self.rnn = getattr(nn, kwargs['rnn_type'])(input_size=self.embeddings.embedding_dim,
hidden_size=self.hidden_size,
num_layers=kwargs['rnn_layer'],
dropout=kwargs['rnn_dropout'],
batch_first=True)
self.sos_id = sos_id
self.eos_id = eos_id
self.use_attention = use_attention
self.bidirectional_encoder = bidirectional_encoder
if self.use_attention:
self.attention = Attention(self.hidden_size)
def _init_state(self, encoder_hidden):
""" Init decoder start with last state of the encoder """
def _fix_enc_hidden(h):
""" If encoder is bidirectional, do the following transformation.
[layer*directions x batch x dim] -> [layer x batch x directions*dim]
"""
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
if encoder_hidden is None:
return None
if isinstance(encoder_hidden, tuple): # LSTM
encoder_hidden = tuple([_fix_enc_hidden(h) for h in encoder_hidden])
else:
encoder_hidden = _fix_enc_hidden(encoder_hidden)
return encoder_hidden
def forward_step(self, input_seqs, hidden, encoder_outputs):
embedded = self.embeddings(input_seqs)
assert embedded.dim() == 3 # [batch x len x emb_dim]
embedded = self.input_dropout(embedded)
output, hidden = self.rnn(embedded, hidden)
attn = None
if self.use_attention:
output, attn = self.attention(output, encoder_outputs)
# TODO: check dropout layer for output
return output, hidden, attn, embedded
def forward(self, input_seqs, encoder_hidden, encoder_outputs,
teacher_forcing_ratio=0,
src_input=None):
pass
def validate_args(input_seqs, encoder_hidden, encoder_output, use_attention, rnn_type, sos_id, teacher_forcing_ratio):
if use_attention:
if encoder_output is None:
raise ValueError("Argument encoder_output cannot be None "
"when attention is used.")
if input_seqs is None and encoder_hidden is None:
batch_size = 1
else:
if input_seqs is not None:
batch_size = input_seqs.size(0) # [batch x max_len]
else:
if rnn_type == 'LSTM':
batch_size = encoder_hidden[0].size(1)
elif rnn_type == 'GRU':
batch_size = encoder_hidden.size(1)
else:
raise ValueError("Unknown rnn mode is provided.")
# set default input and max decoding length
if input_seqs is None:
if teacher_forcing_ratio > 0:
raise ValueError("Teacher forcing has to be disabled (set 0) when no inputs is provided.")
if rnn_type == 'LSTM':
device = encoder_hidden[0].device
elif rnn_type == 'GRU':
device = encoder_hidden.device
else:
raise ValueError("Unknown rnn mode is provided.")
input_seqs = torch.LongTensor([sos_id] * batch_size).view(batch_size, 1).to(device)
if use_attention:
max_length = int(encoder_output.size(1) * 1.5)
else:
max_length = 200
else:
max_length = input_seqs.size(1) - 1 # minus the start of sequence symbol
return input_seqs, batch_size, max_length
|
'''
WSGI接口:只要求web开发者实现一个函数,就可以响应http请求
'''
def HelloWorld(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
return [b'<h1 style background:red>HelloWorld, I am python server</h1>']
# 2.7---str 3.6---byte
# 为什么return的内容必须是list?????????????????????
# environ:一个包含所有HTTP请求信息的dict对象
# start_response:注意Header只能发送一次,也就是只能调用一次start_response()函数。start_response()函数接收两个参数,
# 一个是HTTP响应码,一个是一组list表示的HTTP Header,每个Header用一个包含两个str的tuple表示
# ''' |
'''
@author: xilh
@since: 20200127
'''
class Father:
def f1(self):
print("Father.f1 ...")
class Son(Father):
def f2(self):
print("Son.f2 ...")
print("== 单继承 ==")
son = Son()
print(son.f2())
print(son.f1()) |
'''
Created on Aug 29, 2014
'''
import os,sys,subprocess,time,shutil
from comMethods import *
from numpy import ceil
class Fitter:
""" This class performs the UNCLE fits to the VASP data that has been gathered so far. It also
keeps track of the fitting errors, prediction errors, and summaries of cluster expansion
terms from iteration to iteration. """
def __init__(self, atoms, M_fitStructures, N_subsets, vstructsFinished, uncleOutput):
""" CONSTRUCTOR """
self.vstructsFinished = vstructsFinished
self.atoms = atoms
self.M_fitStructures = M_fitStructures
self.N_subsets = N_subsets
self.enumFolder = os.getcwd() + '/enum/'
self.neededFilesDir = os.getcwd() + '/needed_files/'
self.uncleExec = os.getcwd() + '/needed_files/uncle.x'
self.uncleOut = uncleOutput
#self.header = "peratom\nnoweights\nposcar\n"
self.vstructsFinished = vstructsFinished
def filterStructuresIn(self, fitsDir, iteration, maxE):
'''Remove structs from structures.in, just before fitting, that don't fit criteria here.
For now, structures above maxE will be removed'''
if maxE < 100: # < the absurd default
inFile = fitsDir + '/structures.in'
lines = readfile(inFile)
subprocess.call(['mv',inFile,inFile + '_{}full'.format(iteration)])
outFile = open(fitsDir + '/structures.in','w')
outFile.write("peratom\nnoweights\nposcar\n")
for j in xrange(len(lines)):
if list(lines[j].strip().split()[0])[:2] == ['#','-']:
if j != len(lines) - 1:
FE = float(lines[j+1].strip().split()[6].strip(','))
if FE <= maxE:
natomsList = [int(i) for i in lines[j+6].strip().split()]
natoms = natomsList[0]+natomsList[1]
outFile.writelines(lines[j:j+10+natoms])
else:
struct = lines[j + 1].strip().split()[3]
subprocess.call(['echo','\tNot including struct {} in fit (exceeds maxE)'.format(struct)])
outFile.close()
def filterStructuresInFrac(self, fitsDir, iteration, cullFrac):
'''Remove structs from structures.in, just before fitting, that don't fit criteria here.
For now, the top cullFrac of uncle formation energies will be removed'''
if cullFrac > 0:
# Get the structs and FE ordered by energy
FEdata = zeros(20000,dtype = [('struct', int32),('FE', float),('keep',bool)])
inFile = fitsDir + '/structures.in'
lines = readfile(inFile)
subprocess.call(['mv',inFile,inFile + '_{}full'.format(iteration)])
nstructs = 0
for j in xrange(len(lines)):
if list(lines[j].strip().split()[0])[:2] == ['#','-']:
if j != len(lines) - 1:
struct = lines[j + 1].strip().split()[3]
FE = float(lines[j+1].strip().split()[6].strip(','))
nstructs += 1
FEdata[nstructs]['FE'] = FE
FEdata[nstructs]['struct'] = struct
sort(FEdata,order = ['FE','struct']) #from low to high
#keep only part of them
nkeep = ceil((1-cullFrac)*nstructs)
FEdata[0:nkeep]['keep'] = [True]*nkeep
FEdata[nkeep:nstructs-nkeep+1]['keep'] = [False]*(nstructs-nkeep)
#scan lines again and write only the ones to keep
outFile = open(fitsDir + '/structures.in','w')
outFile.write("peratom\nnoweights\nposcar\n")
for j in xrange(len(lines)):
if list(lines[j].strip().split()[0])[:2] == ['#','-']:
if j != len(lines) - 1:
struct = int(lines[j + 1].strip().split()[3])
istruct = next(i for i in FEdata[:nstructs+1]['struct'] if i == struct)
if FEdata[istruct]['keep']:
natomsList = [int(i) for i in lines[j+6].strip().split()]
natoms = natomsList[0]+natomsList[1]
outFile.writelines(lines[j:j+10+natoms])
else:
subprocess.call(['echo','\tNot including struct {} in fit (removing top {} in FE))'.format(struct,cullFrac)])
outFile.close()
def fitVASPData(self, iteration, maxE):
""" Performs the UNCLE fit to the VASP data. """
natoms = len(self.atoms)
lastDir = os.getcwd()
subdir = 'fits'
#prep for all
for iatom, atom in enumerate(self.atoms):
nfinished = len(self.vstructsFinished[iatom])
if nfinished > 1: #don't try fitting if structures.in is too small
atomDir = lastDir + '/' + atom
if nfinished < 100:
cullFrac = 0
else:
cullFrac = 0.01
if os.path.isdir(atomDir):
subprocess.call(['echo','\nFitting VASP data for ' + atom + '. . .\n'])
fitsDir = atomDir + '/fits'
if os.path.isdir(fitsDir):
os.chdir(fitsDir)
subprocess.call(['cp', atomDir + '/structures.in', '.' ]) #so we have the latest version here
# self.filterStructuresInFrac(fitsDir,iteration, cullFrac) #remove some structures at the top of the FE list.
# check = subprocess.check_output([self.uncleExec, '15'])
# subprocess.call(['echo','Uncle 15 feedback'+ check])
if natoms ==1:
os.chdir(lastDir + '/' + self.atoms[0] + '/' + subdir)
subprocess.call([self.uncleExec, '15'], stdout=self.uncleOut)
os.chdir(lastDir)
else:#parallelize the atom jobs
#make job files
os.chdir(lastDir)
mem = '16' #Gb
walltime = 2.0 #hrs
execString = self.uncleExec + ' 15'
atomStrings = ['']*natoms
parallelJobFiles(self.atoms,subdir,walltime,mem,execString,atomStrings)
#submit jobs
jobIds = parallelAtomsSubmit(self.atoms[1:],subdir)
#use this job to calculate the first atom:
os.chdir(lastDir + '/' + self.atoms[0] + '/' + subdir)
subprocess.call(['echo','\n\tThis job calculating the first atom: {}. Submitted jobs for the others.\n'.format(self.atoms[0])])
subprocess.call([self.uncleExec, '15'], stdout=self.uncleOut)
os.chdir(lastDir)
#wait
parallelAtomsWait(jobIds)
#post calc work for all
for iatom, atom in enumerate(self.atoms):
if len(self.vstructsFinished[iatom]) > 1: #don't try fitting if structures.in is too small
atomDir = lastDir + '/' + atom
if os.path.isdir(atomDir):
fitsDir = atomDir + '/fits'
if os.path.isdir(fitsDir):
os.chdir(fitsDir)
subprocess.call(['mv','fitting_errors.out','fitting_errors_' + str(iteration) + '.out'])
subprocess.call(['mv','prediction_errors.out','prediction_errors_' + str(iteration) + '.out'])
subprocess.call(['mv','J.1.summary.out','J.1.summary_' + str(iteration) + '.out'])
subprocess.call(['cp','structures.in', 'structures.in_' + str(iteration)]) #also leaves a copy of the file to be appended to
subprocess.call(['cp','structures.holdout', 'structures.holdout_' + str(iteration)]) #leave the file in case a
os.chdir(lastDir)
def makeFitDirectories(self):
""" Creates the 'fits' directories for each atom and populates the directories with the
files that UNCLE needs to perform a fit. These files are lat.in, CS.in, clusters.out,
and the current structures.in and structures.holdout files. """
for iatom, atom in enumerate(self.atoms):
atomDir = os.path.abspath(atom)
fitsDir = atomDir + '/fits'
if os.path.isdir(fitsDir): #remove it...start clean because must have current files
try:
check = subprocess.check_output(['rm','-r',fitsDir])
except:
subprocess.call(['echo','ERROR in removing /fits for atom {}'.format(atom)])
subprocess.call(['mkdir',fitsDir])
subprocess.call(['ln','-s',self.enumFolder + '/struct_enum.out',fitsDir])
subprocess.call(['ln','-s',self.enumFolder + '/lat.in',fitsDir])
subprocess.call(['ln','-s',self.enumFolder + '/clusters.out',fitsDir])
infile = open(self.neededFilesDir + '/CS.in','r')
inlines = [line for line in infile]
infile.close()
# TODO: This doesn't work right now unless it's a negative number in settings.in
outfile = open(fitsDir + '/CS.in','w')
for i in xrange(len(inlines)):
if i == 60:
if (self.M_fitStructures > len(self.vstructsFinished[iatom]) and self.M_fitStructures > 0):
outfile.write(str(len(self.vstructsFinished[iatom])) + "\n")
else:
outfile.write(str(self.M_fitStructures) + "\n")
elif i == 62:
outfile.write(str(self.N_subsets) + "\n")
else:
outfile.write(inlines[i])
outfile.close()
def writeHoldout(self, N, structs,vdata):
'''Writes structures.holdout from a list of struct names for each atom.'''
for iatom in xrange(len(self.atoms)):
nmax = min(N,len(structs[iatom]))
atomDir = os.path.abspath(self.atoms[iatom])
structuresWrite(nmax,atomDir,self.vstructsFinished[iatom],\
vdata[iatom,:nmax]['FE'],vdata[iatom,:nmax]['conc'],\
vdata[iatom,:nmax]['energy'],'.holdout','w')
subprocess.call(['cp', atomDir + '/structures.holdout', atomDir + '/fits/structures.holdout'])
|
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import gettext_lazy as _
# Create your models here.
class StudentField(models.Model):
title = models.CharField(max_length=255)
def __str__(self):
return self.title
def handle_avatar_upload_path(instance, filename):
return 'avatars/' + str(instance.id) + '.' + filename.split('.')[-1]
class UserProfile(models.Model):
student_id = models.IntegerField()
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
field = models.ForeignKey(StudentField, on_delete=models.CASCADE)
avatar = models.ImageField(upload_to=handle_avatar_upload_path, default='/avatars/default.jpg')
def __str__(self):
return 'پروفایل ' + str(self.user)
class Notification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.TextField(verbose_name=_('Notifictation text'))
seen = models.BooleanField(verbose_name=_('Seen'))
|
def get_final_line(fname):
with open(fname) as f:
for line in f:
pass
return line
if __name__ == "__main__":
print(get_final_line("/etc/passwd"))
|
#import the os module
import os
#import the csv module
import csv
budget_data_csv = os.path.join("Resource", "budget_data.csv")
#variable to hold total profits and losses
total = 0
#list to hold all csv data
BudgetInfo = []
#list to hold months column from csv data
Months = []
#list to hold profits and losses column from csv data
Amounts = []
#list of monthly amount changes
DifList = []
#open and read the csv file
with open(budget_data_csv, newline="") as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
header = next(csvreader)
for row in csvreader:
#updating the total
total += int(row[1])
#appending data to BudgetInfo list
BudgetInfo.append(row)
#appending data to Months list
Months.append(row[0])
#appending data to Amounts list
Amounts.append(int(row[1]))
#finding the total months presented in the csv file
TotalMonths = len(BudgetInfo)
#recording the total number of profits and losses
Period = len(BudgetInfo) - 1
#recording the amount from the first month in the csv file
FirstMonthTotal = BudgetInfo[0][1]
#finding the total for the last month of the csv file
LastMonthTotal = Amounts[-1]
#finding average change for the year
AvgChange = (int(LastMonthTotal) - int(FirstMonthTotal)) / int(Period)
#populating the list of monthly amount changes
DifList = [y-x for x, y in zip(Amounts,Amounts[1:])]
#print results to terminal
print("Financial Analysis")
print("----------------------------")
print(f"Total Months: {TotalMonths}")
print(f"Total: ${total}")
print("Average Change: $%.2f" %AvgChange)
print(f"Greatest Increase in Profits: {Months[(DifList.index(max(DifList)))+ 1]} (${max(DifList)})")
print(f"Greatest Decrease in Profits: {Months[(DifList.index(min(DifList)))+ 1]} (${min(DifList)})")
# Specify the file to write to
output_path = os.path.join("output", "PyBankCSV.csv")
# Open the file using "write" mode. Specify the variable to hold the contents
with open(output_path, 'w', newline='') as csvfile:
# Initialize csv.writer
csvwriter = csv.writer(csvfile, delimiter=',')
# Write data to csv file)
csvwriter.writerow(["Financial Analysis"])
csvwriter.writerow(["----------------------------"])
csvwriter.writerow([f"Total Months: {TotalMonths}"])
csvwriter.writerow([f"Total: ${total}"])
csvwriter.writerow(["Average Change: $%.2f" %AvgChange])
csvwriter.writerow([f"Greatest Increase in Profits: {Months[(DifList.index(max(DifList)))+ 1]} (${max(DifList)})"])
csvwriter.writerow([f"Greatest Decrease in Profits: {Months[(DifList.index(min(DifList)))+ 1]} (${min(DifList)})"]) |
import re
class server_cvar:
# server_cvar: "mp_friendlyfire" "1"
pattern = re.compile("server_cvar: \"(?P<cvar>.*)\" \"(?P<value>.*)\"")
@staticmethod
def isMatch(instr):
return (server_cvar.pattern.match(instr) != None)
def __init__(self,instr):
obj = server_cvar.pattern.match(instr)
self.cvar = obj.group("cvar")
self.value = obj.group("value")
def __str__(self):
return "server_cvar: %s changed to \"%s\"" % (self.cvar,self.value)
from eventhandler import eventhandler
eventhandler.registerEvent(server_cvar)
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Websocket server for testing purposes."""
import asyncio
import json
from qiskit.providers.ibmq.api.clients.websocket import WebsocketResponseMethod
TOKEN_JOB_COMPLETED = 'token_job_completed'
TOKEN_JOB_TRANSITION = 'token_job_transition'
TOKEN_TIMEOUT = 'token_timeout'
TOKEN_WRONG_FORMAT = 'token_wrong_format'
TOKEN_WEBSOCKET_RETRY_SUCCESS = 'token_websocket_retry_success'
TOKEN_WEBSOCKET_RETRY_FAILURE = 'token_websocket_retry_failure'
TOKEN_WEBSOCKET_JOB_NOT_FOUND = 'token_websocket_job_not_found'
async def websocket_handler(websocket, path):
"""Entry point for the websocket mock server."""
# pylint: disable=unused-argument
# Receive the authentication message.
msg_in = await websocket.recv()
auth_message = json.loads(msg_in)
# Check for valid access tokens.
token = auth_message['data']
if token in (TOKEN_JOB_COMPLETED,
TOKEN_JOB_TRANSITION,
TOKEN_TIMEOUT,
TOKEN_WRONG_FORMAT,
TOKEN_WEBSOCKET_RETRY_SUCCESS,
TOKEN_WEBSOCKET_RETRY_FAILURE,
TOKEN_WEBSOCKET_JOB_NOT_FOUND):
msg_out = json.dumps({'type': 'authenticated'})
await websocket.send(msg_out.encode('utf8'))
else:
# Close the connection.
await websocket.close()
# Depending on the access token, perform different actions:
if token == TOKEN_JOB_COMPLETED:
await handle_token_job_completed(websocket)
elif token == TOKEN_JOB_TRANSITION:
await handle_token_job_transition(websocket)
elif token == TOKEN_TIMEOUT:
await handle_token_timeout(websocket)
elif token == TOKEN_WRONG_FORMAT:
await handle_token_wrong_format(websocket)
elif token == TOKEN_WEBSOCKET_RETRY_SUCCESS:
await handle_token_retry_success(websocket)
elif token == TOKEN_WEBSOCKET_RETRY_FAILURE:
await handle_token_retry_failure(websocket)
elif token == TOKEN_WEBSOCKET_JOB_NOT_FOUND:
await handle_token_job_not_found(websocket)
async def handle_token_job_completed(websocket):
"""Return a final job status, and close with 4002."""
msg_out = WebsocketResponseMethod(type_='job-status',
data={'status': 'COMPLETED'})
await websocket.send(msg_out.as_json().encode('utf8'))
await websocket.close(code=4002)
async def handle_token_job_transition(websocket):
"""Send several job status, and close with 4002."""
msg_out = WebsocketResponseMethod(type_='job-status',
data={'status': 'RUNNING'})
await websocket.send(msg_out.as_json().encode('utf8'))
await asyncio.sleep(1)
msg_out = WebsocketResponseMethod(type_='job-status',
data={'status': 'COMPLETED'})
await websocket.send(msg_out.as_json().encode('utf8'))
await websocket.close(code=4002)
async def handle_token_timeout(websocket):
"""Close the socket after 10 seconds, without replying."""
await asyncio.sleep(10)
await websocket.close()
async def handle_token_wrong_format(websocket):
"""Return a status in an invalid format."""
await websocket.send('INVALID'.encode('utf8'))
await websocket.close()
async def handle_token_retry_success(websocket):
"""Close the socket once and force a retry."""
if not hasattr(handle_token_retry_success, 'retry_attempt'):
setattr(handle_token_retry_success, 'retry_attempt', True)
await handle_token_retry_failure(websocket)
else:
await handle_token_job_completed(websocket)
async def handle_token_retry_failure(websocket):
"""Continually close the socket, until both the first attempt and retry fail."""
await websocket.close(code=4001)
async def handle_token_job_not_found(websocket):
"""Close the socket, specifying code for job not found."""
await websocket.close(code=4003)
|
from __future__ import division
import pandas as pd
import numpy as np
from scipy import stats
from scipy.stats import ttest_ind, levene, f_oneway, f
from math import sqrt
from itertools import combinations
from qsturng import psturng
# ===== STATISTICAL TESTS USED BY CEP =====
def get_cohens(sample_a, sample_b):
'''
Calculate absolute value of Cohen's d from two samples
Sample A and Sample B are array-like data stores
Ideally they should be numpy arrays or pandas Series
So we can perform mean and standard deviation calculations with them
'''
mean_a = sample_a.mean()
mean_b = sample_b.mean()
std_a = sample_a.std()
std_b = sample_b.std()
numer = mean_a - mean_b
denom = sqrt((std_a**2 + std_b**2) / 2)
cohens_d = numer / denom
return abs(cohens_d)
def welch_anova(*args):
'''
This helper function calculate Welch's ANOVA where
the homogeneity assumption of variance is violated
args here is the list of array-like data stores, ideally numpy arrays
See this web link for the derived formula:
http://www.uvm.edu/~dhowell/gradstat/psych340/Lectures/Anova/anova2.html
'''
# Number of groups
k = len(args)
total_weight = 0
total_weighted_sum = 0
weight_list = []
mean_list = []
count_list = []
for sample in args:
mean = sample.mean()
mean_list.append(mean)
var = sample.var()
count = sample.count()
count_list.append(count)
weight = count / var
weight_list.append(weight)
total_weight += weight
weighted_sum = weight * mean
total_weighted_sum += weighted_sum
weighted_grand_mean = total_weighted_sum / total_weight
# Next, let's find Welch's F
total_weighted_var = 0
crazy_sum = 0
for w, m, n in zip(weight_list, mean_list, count_list):
# This part is used for f_stat calculation
element = w * ((m - weighted_grand_mean) ** 2)
total_weighted_var += element
denom_squared_element = (1 - w / total_weight) ** 2
crazy_element = denom_squared_element / (n - 1)
crazy_sum += crazy_element
f_numer = total_weighted_var / (k - 1)
f_denom = 1 + 2 * (k - 2) * crazy_sum / (k**2 - 1)
f_stat = f_numer / f_denom
# Next, let's find Welch's degree of freedom
df = (k**2 - 1) / (3 * crazy_sum)
# Now determine p-value from df
pval = 1 - f.cdf(f_stat, k - 1, df)
return f_stat, pval
def get_msw_et_al(*args):
'''
This helper function calculates mean squares within of a list of samples
args here is the list of array-like data stores, ideally numpy arrays
This function returns three values:
msw: Mean Squares Within
k: number of groups
df_within: degree of freedom
'''
# N is the total number of cases
# k is the number of groups
N = 0
k = len(args)
# Within Sum of Squares
wss = 0
for sample in args:
count = sample.count()
N += count
var = sample.var()
squares = var * (count - 1)
wss += squares
# Finally divide WSS by df_within
df_within = N - k
msw = wss / df_within
return msw, k , df_within
def tukey(sample_a, sample_b, **kwargs):
'''
Calculate Tukey's HSD and significance from two samples
Sample A and Sample B are array-like data stores
Ideally they should be numpy arrays or pandas Series
So we can perform mean and standard deviation calculations with them
We'll also pass the Mean Squares Within here as msw
This functions will return the mean difference and the p-value
r: number of samples in total
df: degrees of freedom - this will be the sum of (count of each sample -1)
'''
# Retrieve arguments
msw = kwargs.get('msw')
r = kwargs.get('r')
df = kwargs.get('df')
mean_a = sample_a.mean()
count_a = sample_a.count()
mean_b = sample_b.mean()
count_b = sample_b.count()
standard_error = sqrt(msw * (1/2) * (1/count_a + 1/count_b))
mean_diff = mean_a - mean_b
q = abs(mean_diff) / standard_error
p = psturng(q, r, df)
return mean_diff, p
def gh(sample_a, sample_b, **kwargs):
'''
Calculate Games-Howell from two samples
Sample A and Sample B are array-like data stores
Ideally they should be numpy arrays or pandas Series
So we can perform mean and standard deviation calculations with them
This functions will return the mean difference and the p-value
'''
# Retrieve argument(s)
r = kwargs.get('r')
# For Games-Howell, we'll have to calculate a custom standard error
# And custom df to get q statistic
mean_a = sample_a.mean()
var_a = sample_a.var()
count_a = sample_a.count()
s2n_a = var_a / count_a
mean_b = sample_b.mean()
var_b = sample_b.var()
count_b = sample_b.count()
s2n_b = var_b / count_b
standard_error = sqrt((1/2) * (s2n_a + s2n_b))
mean_diff = mean_a - mean_b
q = abs(mean_diff) / standard_error
# Next, calculate custom df
df_numer = (s2n_a + s2n_b)**2
df_denom = (s2n_a**2 / (count_a - 1)) + (s2n_b**2 / (count_b - 1))
df = df_numer / df_denom
p = psturng(q, r, df)
return mean_diff, p
def translate_result(pval, mean_diff, sample_a, sample_b):
'''
This function returns a tuple of verdict, sign, and cohens_d
verdict can be "Not significant", "Small effect size", etc.
sign can be blank, "*", "**", etc.
based on the given p-value
'''
if pval < SIG_LEVEL:
cohens_d = get_cohens(sample_a, sample_b)
if cohens_d < .15:
lang = 'NONE'
elif .15 <= cohens_d < .45:
lang = 'SMALL'
elif .45 <= cohens_d < .75:
lang = 'MEDIUM'
elif .75 <= cohens_d:
lang = 'LARGE'
message = EFF_LANG_DICT[lang]
cohen_sign = EFF_SIGN_DICT[lang]
# Append a rounded mean_diff to the sign
# ===== Mike 10/06 =====
# Temporarily set to 4 decimal points
# And disable cohen_sign so Ramya can do checking
diff = '%0.4f' % mean_diff
sign = diff
# sign = '{diff}{cs}'.format(diff=diff, cs=cohen_sign)
verdict = message.title()
else:
verdict = "Not significant"
sign = None
cohens_d = None
return verdict, sign, cohens_d
def cep_ttest(sample_a, sample_b):
'''
Sample A and Sample B are array-like data stores
Ideally they should be numpy arrays or pandas Series
So we can perform mean and standard deviation calculations with them
The function will return a dictionary with the following entries:
"test": "Standard" (equal variance) or "Welch" (not equal variance)
"pval": P-value of the test performed
"verdict": "Not significant" or effect size specified
"cohen": Cohen's d value
"sign": blank, ".", "*", "**", or "***" depending on p-value and significance
"g1_n": response count in sample_a
"g2_n": response count in sample_b
'''
# Construct a result_dict
result_dict = {}
# First, perform a Levene's test to determine whether the samples have equal variances
equal_var_test = levene(sample_a, sample_b, center='mean')
# The significance stat is the second element in the result tuple
equal_var_test_sig = equal_var_test[1]
# Then, depending on the result, we'll perform either a standard or a Welch's test
# If there's no result, then end test here
if pd.isnull(equal_var_test_sig):
result_dict['test'] = 'N/A'
else:
if equal_var_test_sig >= SIG_LEVEL:
equal_var_arg = True
result_dict['test'] = 'Standard'
elif equal_var_test_sig < SIG_LEVEL:
equal_var_arg = False
result_dict['test'] = 'Welch'
ttest_result = ttest_ind(sample_a, sample_b, axis=0, equal_var=equal_var_arg)
ttest_result_sig = ttest_result[1]
result_dict['pval'] = ttest_result_sig
# If it's not significant, end here
# Translate result here
mean_diff = sample_a.mean() - sample_b.mean()
verdict, sign, cohens_d = translate_result(ttest_result_sig, mean_diff, sample_a, sample_b)
result_dict['cohen'] = cohens_d
result_dict['verdict'] = verdict
result_dict['sign'] = sign
result_dict['g1_n'] = sample_a.count()
result_dict['g2_n'] = sample_b.count()
result_dict['g1_mean'] = sample_a.mean()
result_dict['g2_mean'] = sample_b.mean()
return result_dict
def cep_anova(samples_dict):
'''
Perform ANOVAs for the samples listed in sample_list
'''
samples_list = samples_dict.values()
result_dict = {}
# First, perform a Levene test to determine the homogeneity of variance
equal_var_test = levene(*samples_list, center='mean')
# The significance stat is the second element in the result tuple
equal_var_test_sig = equal_var_test[1]
# Then, depending on the result, we'll perform either a standard or a Welch's test
# If there's no result, then end test here
if pd.isnull(equal_var_test_sig):
result_dict['test'] = 'N/A'
else:
if equal_var_test_sig >= SIG_LEVEL:
result_dict['test'] = 'Standard'
# Perform an ANOVA here
anova_result = f_oneway(*samples_list)
elif equal_var_test_sig < SIG_LEVEL:
result_dict['test'] = 'Welch'
# Perform a Welch test here
anova_result = welch_anova(*samples_list)
anova_result_sig = anova_result[1]
result_dict['anova_p'] = anova_result_sig
if anova_result_sig < SIG_LEVEL:
# If significant, we'll continue with posthoc tests
# First, split samples into pairs so we can perform tests
# on each pair
c = combinations(samples_dict.items(), 2)
pairs_dict = {}
for i in c:
# Get the value tuple first
val_tuple = i[0][0], i[1][0]
# Then the sample tuple
sample_tuple = i[0][1], i[1][1]
# Then assign all to pairs_dict
pairs_dict[val_tuple] = sample_tuple
# If we did standard test earlier, follow with Tukey posthoc
# If we did Welch earlier, follow with Games-Howell
# First, let's calculate msw, r, and df to feed into the posthoc
msw, r, df = get_msw_et_al(*samples_list)
kwargs_dict = {}
kwargs_dict['r'] = r
if result_dict['test'] == 'Standard':
result_dict['posthoc'] = 'Tukey'
posthoc = tukey
kwargs_dict['msw'] = msw
kwargs_dict['df'] = df
elif result_dict['test'] == 'Welch':
result_dict['posthoc'] = 'Games-Howell'
posthoc = gh
for key, sample_tuple in pairs_dict.items():
sample_a = sample_tuple[0]
sample_b = sample_tuple[1]
mean_diff, pval = posthoc(sample_a, sample_b, **kwargs_dict)
# Translate result into verdict, sign, and cohens_d
# And save this tuple in the key entry of the result_dict
result_dict[key] = translate_result(pval, mean_diff, sample_a, sample_b)
return result_dict
|
from recommendations import recommendation
import MySQLdb
import numpy as np
import math
from scipy import spatial
conn1 = MySQLdb.connect(host = "localhost", user = "root", passwd = "40OZlike", db = "plalyst")
cur= conn1.cursor()
class Song():
def __init__(self, name):
self.name = name
cur.execute('select id from Song where name ="'+name+'"')
self.id = cur.fetchall()[0][0]
self.tags = []
cur.execute('select tag from SongTag where song ="'+str(self.id)+'"')
tags = cur.fetchall()
for tag in tags:
self.tags.append(tag[0])
class RecommendedSong():
def __init__(self, name, id, songList):
self.name = name
self.id = id
self.tags = []
cur.execute('select tag from SongTag where song ="'+str(self.id)+'"')
tags = cur.fetchall()
for tag in tags:
self.tags.append(tag[0])
self.cosineTag =[]
j = 0;
for inpSong in songList:
tagList = list(self.tags)
tagList.extend(inpSong.tags)
tagList = list(set(tagList))
inpMatrix = []
retMatrix = []
for i in range(0,len(tagList)):
inpMatrix.append(inpSong.tags.count(tagList[i]))
retMatrix.append(self.tags.count(tagList[i]))
result = cosine_similarity(inpMatrix, retMatrix)
self.cosineTag.append(result)
j+=1
self.avgCos = np.mean(self.cosineTag)
def cosine_similarity(v1,v2):
"compute cosine similarity of v1 to v2: (v1 dot v2)/{||v1||*||v2||)"
return 1 - spatial.distance.cosine(v1,v2)
cur.execute('select name from Song order by id desc limit 8')
songs1Empty = cur.fetchall()
songs = []
for songN in songs1Empty:
songs.append(songN[0])
print("input taken")
songList = []
inputByUser = []
for songName in songs:
inputByUser.append('"'+songName+'"')
s = Song(songName)
songList.append(s)
tagList = []
for song in songList:
for tag in song.tags:
tagList.append(str(tag))
tagList=list(set(tagList))
tagList = ",".join(tagList)
inputByUser=list(set(inputByUser))
inputByUser = ",".join(inputByUser)
sql = 'select distinct Song.name, Song.id from Song join SongTag on SongTag.song = Song.id where SongTag.tag in ('+tagList+') and Song.name not in ('+inputByUser+')'
cur.execute(sql)
recSongs = cur.fetchall()
recSongList = []
for recSongName in recSongs:
r = RecommendedSong(recSongName[0],recSongName[1],songList)
recSongList.append(r)
recSongList.sort(key=lambda x: x.avgCos, reverse=True)
recommended30 = recSongList[:30]
cur.close()
#print(recS.cosineTag)
conn1.close()
received=recommendation(songs)
cond = True
for i in range(0,30):
if received[i].name!=recommended30[i].name:
cond = False
if(cond):
print('Test Passed.. the received list matches with the generated one')
else:
print('Test Failed')
|
import pygeo.geocelery_conf
def url_to_download_filepath(user_url, url):
user_filepath = user_url_to_filepath(user_url)
filepath = '%s/%s' %(download_dir(), user_filepath)
filepath += url[6:]
return filepath
def download_dir():
return "%s/esgf" % pygeo.geocelery_conf.DOWNLOAD_DIR
def user_url_to_filepath(user_url):
user_url = user_url.replace('https://', '')
user_url = user_url.replace('http://', '')
return user_url
def user_cert_file(user_url):
filepath = user_url_to_filepath(user_url)
return '%s/%s/cert.esgf' % (download_dir(), filepath)
|
"""
Problem 120 - Triangle
Given a triangle, find the minimum path sum from top to bottom. Each
step you may move to adjacent numbers on the row below.
"""
from typing import List
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
for i in range(len(triangle) - 2, -1, -1):
for j in range(len(triangle[i])):
triangle[i][j] = triangle[i][j] + min(
triangle[i + 1][j], triangle[i + 1][j + 1]
)
return triangle[0][0]
if __name__ == "__main__":
triangle = [[2], [3, 4], [6, 5, 7], [4, 1, 8, 3]]
# Should return 11
print(Solution().minimumTotal(triangle))
|
#!/usr/bin/env python
# coding: utf-8
import binascii
def MD2(input_string):
""" Calculates the MD2 hash of any string input.
Arguments:
input_string
Returns:
Hexadecimal MD2 hash of the input string.
"""
#------------------------------------------------------------------------------------------------
# Step 1: Append Padding Bytes
#------------------------------------------------------------------------------------------------
#Convert the input string to a bytearray:
m_bytes = bytearray(input_string, 'utf-8')
#16-Bytes minus the length of the message in bytes, modulo 16 gives the
#length of padding needed (in Bytes):
len_padding = 16-(len(m_bytes) % 16)
#The value for each of the padding Bytes is the value of the length of the padding required:
padding = bytearray(len_padding for i in range(len_padding))
#Now add the padding to the original message, so that it is divisible by 16:
padded_message = m_bytes + padding
S = [41, 46, 67, 201, 162, 216, 124, 1, 61, 54, 84, 161, 236, 240, 6, 19,98, 167, 5, 243, 192, 199,
115, 140, 152, 147, 43, 217, 188, 76, 130, 202,30, 155, 87, 60, 253, 212, 224, 22, 103, 66, 111,
24, 138, 23, 229, 18,190, 78, 196, 214, 218, 158, 222, 73, 160, 251, 245, 142, 187, 47, 238, 122,
169, 104, 121, 145, 21, 178, 7, 63, 148, 194, 16, 137, 11, 34, 95, 33,128, 127, 93, 154, 90, 144,
50, 39, 53, 62, 204, 231, 191, 247, 151, 3,255, 25, 48, 179, 72, 165, 181, 209, 215, 94, 146, 42,
172, 86, 170, 198,79, 184, 56, 210, 150, 164, 125, 182, 118, 252, 107, 226, 156, 116, 4, 241,69,
157, 112, 89, 100, 113, 135, 32, 134, 91, 207, 101, 230, 45, 168, 2,27, 96, 37, 173, 174, 176, 185,
246, 28, 70, 97, 105, 52, 64, 126, 15,85, 71, 163, 35, 221, 81, 175, 58, 195, 92, 249, 206, 186,
197, 234, 38,44, 83, 13, 110, 133, 40, 132, 9, 211, 223, 205, 244, 65, 129, 77, 82,106, 220, 55,
200, 108, 193, 171, 250, 36, 225, 123, 8, 12, 189, 177, 74,120, 136, 149, 139, 227, 99, 232, 109,
233, 203, 213, 254, 59, 0, 29, 57,242, 239, 183, 14, 102, 88, 208, 228, 166, 119, 114, 248, 235,
117, 75, 10,49, 68, 80, 180, 143, 237, 31, 26, 219, 153, 141, 51, 159, 17, 131, 20]
#------------------------------------------------------------------------------------------------
# Step 2: Append a 16-byte checksum to the result of step 1
#------------------------------------------------------------------------------------------------
# Part A: Clear the Checksum bytearray of 16 bytes:
C = bytearray(0 for i in range(16))
# Part B: Set L to zero:
L=0
# Part C: Process each 16-Byte block:
M = padded_message
N = len(M)
for i in range (0, int(N/16)):
#Calculate the checksum block i, using each Byte within the 16-Byte block:
for j in range (0, 16):
c = M[i * 16 + j]
C[j] = C[j] ^ (S[c^L])
L = C[j]
#Append the calculated checksum to the padded message:
padded_message_checksum = M + C
#------------------------------------------------------------------------------------------------
# Step 3: Initialise MD Buffer
#------------------------------------------------------------------------------------------------
# A 48-Byte buffer is used to compute the message digest and this is initialised to zero.
X = bytearray([0 for i in range(48)])
#------------------------------------------------------------------------------------------------
# Step 4: Process the Message in 16-Byte Blocks
#------------------------------------------------------------------------------------------------
M_PC = padded_message_checksum
# Process each 16-Byte block:
for i in range(0, int(len(M_PC)/16)):
#Copy block i into X:
for j in range(0, 16):
X[16+j] = M_PC[(i*16) + j]
X[32+j] = (X[16+j] ^ X[j])
t = 0
# Do 18 Rounds:
for j in range(0, 18):
# Round j:
for k in range(0, 48):
t = X[k] = (X[k] ^ S[t])
t = (t+j) % 256
#------------------------------------------------------------------------------------------------
# Step 5: Output
#------------------------------------------------------------------------------------------------
binary_output = X[:16]
hex_output = binascii.hexlify(binary_output).decode('utf-8')
return (hex_output)
|
import random
l = [0] * 4
m = [list(l) for x in range(4)]
for i in range(4):
for j in range(4):
m[i][j] = random.randint(0, 16)
print(m)
diag1 = [m[i][i] for i in range(4)]
diag2 = [m[i][4 - i - 1] for i in range(4)]
border = [m[0][i] for i in range(4)] + [m[3][i] for i in range(4)] + [m[i][0] for i in range(1, 4)] + [m[i][3] for i in range(1, 4)]
sum_of_matrix = 0
for i in range(4):
sum_of_matrix += sum(m[i])
d = {}
d[1] = diag1
d[2] = diag2
d[3] = border
d[4] = sum_of_matrix
d[5] = "exit"
while True:
s = int(input())
print(d[s])
if s == 5:
break
|
from __future__ import division
import argparse
# from . import main, utils
import pkg_resources
import pandas as pd
import ast
import subprocess
import numpy as np
def run():
parser = argparse.ArgumentParser(
description='run diffacto for scavager results',
epilog='''
Example usage
-------------
$ scav2diffacto -S1 sample1_1_proteins.tsv sample1_n_proteins.tsv -S2 sample2_1_proteins.tsv sample2_n_proteins.tsv
-------------
''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-dif', help='path to Diffacto', required=True)
parser.add_argument('-S1', nargs='+', help='input files for S1 sample', required=True)
parser.add_argument('-S2', nargs='+', help='input files for S2 sample', required=True)
parser.add_argument('-S3', nargs='+', help='input files for S3 sample')
parser.add_argument('-S4', nargs='+', help='input files for S4 sample')
parser.add_argument('-peptides', help='name of output peptides file', default='peptides.txt')
parser.add_argument('-samples', help='name of output samples file', default='sample.txt')
parser.add_argument('-out', help='name of diffacto output file', default='diffacto_out.txt')
parser.add_argument('-norm', help='normalization method. Can be average, median, GMM or None', default='None')
parser.add_argument('-impute_threshold', help='impute_threshold for missing values fraction', default='0.25')
parser.add_argument('-min_samples', help='minimum number of samples for peptide usage', default='3')
parser.add_argument('-version', action='version', version='%s' % (pkg_resources.require("scavager")[0], ))
args = vars(parser.parse_args())
df_final = False
allowed_prots = set()
allowed_peptides = set()
for sample_num in ['S1', 'S2', 'S3', 'S4']:
if args[sample_num]:
for z in args[sample_num]:
df0 = pd.read_table(z)
allowed_prots.update(df0['dbname'])
for sample_num in ['S1', 'S2', 'S3', 'S4']:
if args[sample_num]:
for z in args[sample_num]:
df0 = pd.read_table(z.replace('_proteins.tsv', '_peptides.tsv'))
allowed_peptides.update(df0['peptide'])
for sample_num in ['S1', 'S2', 'S3', 'S4']:
if args[sample_num]:
for z in args[sample_num]:
label = z.replace('_proteins.tsv', '')
df1 = pd.read_table(z.replace('_proteins.tsv', '_PSMs_full.tsv'))
# df1 = pd.read_table(z.replace('_proteins.tsv', '_PSMs.tsv'))
df1 = df1[df1['peptide'].apply(lambda z: z in allowed_peptides)]
# print(df1.shape)
# print(z.replace('_proteins.tsv', '_PSMs_full.tsv'))
# print(df1.columns)
df1['peptide'] = df1.apply(lambda z: z['peptide'] + str(z['assumed_charge']), axis=1)
df1 = df1.sort_values('MS1Intensity', ascending=False).drop_duplicates(['peptide'])
df1['peptide'] = df1['peptide'].apply(lambda z: z[:-1])
df1['MS1Intensity'] = df1.groupby('peptide')['MS1Intensity'].transform(sum)
df1 = df1.sort_values('q', ascending=True).drop_duplicates(['peptide'])
df1[label] = df1['MS1Intensity']
df1[label] = df1[label].replace([0, 0.0], np.nan)
df1['protein'] = df1['protein'].apply(lambda z: ';'.join([u for u in ast.literal_eval(z) if u in allowed_prots]))
df1 = df1[df1['protein'].apply(lambda z: z != '')]
df1 = df1[['peptide', 'protein', label]]
if df_final is False:
df_final = df1
else:
df_final = df_final.reset_index().merge(df1.reset_index(), on='peptide', how='outer')#.set_index('peptide')
# df_final = df_final.merge(df1, on='peptide', how='outer')
df_final.protein_x.fillna(value=df_final.protein_y, inplace=True)
df_final['protein'] = df_final['protein_x']
df_final = df_final.drop(columns=['protein_x', 'protein_y', 'index_x', 'index_y'])
print(df_final.columns)
df_final = df_final.set_index('peptide')
df_final['proteins'] = df_final['protein']
df_final = df_final.drop(columns=['protein'])
cols = df_final.columns.tolist()
cols.remove('proteins')
cols.insert(0, 'proteins')
df_final = df_final[cols]
df_final.fillna(value='')
df_final.to_csv(args['peptides'], sep=',')
out = open(args['samples'], 'w')
for sample_num in ['S1', 'S2', 'S3', 'S4']:
if args[sample_num]:
for z in args[sample_num]:
label = z.replace('_proteins.tsv', '')
out.write(label + '\t' + sample_num + '\n')
out.close()
subprocess.call(['python3', args['dif'], '-i', args['peptides'], '-samples', args['samples'], '-out',\
args['out'], '-normalize', args['norm'], '-impute_threshold', args['impute_threshold'], '-min_samples', args['min_samples']])
if __name__ == '__main__':
run()
|
import pytest
from denorm.models import DirtyInstance
from django.core.exceptions import ValidationError
from django.db.models import ProtectedError
from model_bakery import baker
from bpp.models import Wydawca
from bpp.models.wydawca import Poziom_Wydawcy
@pytest.mark.django_db
def test_wydawnictwo_zwarte_wydawca_delete(wydawnictwo_zwarte, wydawca, denorms):
wydawnictwo_zwarte.wydawca = wydawca
wydawnictwo_zwarte.save()
denorms.flush()
assert DirtyInstance.objects.count() == 0
with pytest.raises(ProtectedError):
wydawca.delete()
wydawnictwo_zwarte.wydawca = None
wydawnictwo_zwarte.save()
wydawca.delete()
assert DirtyInstance.objects.count() == 1
denorms.flush()
@pytest.mark.django_db
def test_wydawnictwo_zwarte_wydawca_change_nazwa(wydawnictwo_zwarte, wydawca, denorms):
wydawnictwo_zwarte.wydawca = wydawca
wydawnictwo_zwarte.save()
denorms.flush()
assert DirtyInstance.objects.all().count() == 0
wydawca.nazwa = wydawca.nazwa + "X"
wydawca.save()
assert DirtyInstance.objects.all().count() == 2
@pytest.mark.django_db
def test_wydawnictwo_zwarte_wydawca_change_alias_dla(
wydawnictwo_zwarte, wydawca, denorms
):
wydawnictwo_zwarte.wydawca = wydawca
wydawnictwo_zwarte.save()
denorms.flush()
assert DirtyInstance.objects.all().count() == 0
wydawca2 = baker.make(Wydawca)
wydawca.alias_dla = wydawca2
wydawca.save()
assert DirtyInstance.objects.all().count() == 5
@pytest.mark.django_db
def test_wydawnictwo_zwarte_wydawca_change_poziom_ten_sam_rok(
wydawnictwo_zwarte, wydawca, rok, denorms
):
wydawnictwo_zwarte.wydawca = wydawca
wydawnictwo_zwarte.rok = rok
wydawnictwo_zwarte.save()
denorms.flush()
assert DirtyInstance.objects.all().count() == 0
pw = wydawca.poziom_wydawcy_set.create(rok=rok, poziom=1)
assert DirtyInstance.objects.all().count() == 1
denorms.flush()
pw.poziom = 2
pw.save()
# Tu przebuduje wydawce + liste poziomow na wydawcy
denorms.flush(run_once=True)
# ... a teraz bedzie przebudowywał wyd. zwrate
assert DirtyInstance.objects.all().count() == 1
assert DirtyInstance.objects.first().object_id == wydawnictwo_zwarte.pk
@pytest.mark.django_db
def test_wydawca_get_tier(wydawca, rok):
assert wydawca.get_tier(rok) == -1
pw = wydawca.poziom_wydawcy_set.create(rok=rok, poziom=None)
assert wydawca.get_tier(rok) is None
pw.poziom = 1
pw.save()
assert wydawca.get_tier(rok) == 1
@pytest.mark.django_db
def test_wydawca_alias_get_tier(wydawca, alias_wydawcy, rok):
wydawca.poziom_wydawcy_set.create(rok=rok, poziom=1)
assert wydawca.get_tier(rok) == 1
assert alias_wydawcy.get_tier(rok) == 1
assert alias_wydawcy.get_tier(rok + 10) == -1
def test_wydawca_alias_nie_pozwol_stworzyc_poziomu_dla_aliasu(alias_wydawcy):
with pytest.raises(ValidationError):
alias_wydawcy.poziom_wydawcy_set.create(rok=2020, poziom=1)
def test_wydawca_alias_nie_pozwol_zrobic_aliasu_dla_posiadajacego_poziomy(wydawca):
wydawca.poziom_wydawcy_set.create(rok=2020, poziom=2)
w2 = baker.make(Wydawca)
wydawca.alias_dla = w2
with pytest.raises(ValidationError):
wydawca.save()
def test_wydawca_alias_sam_do_siebie(wydawca):
wydawca.alias_dla = wydawca
with pytest.raises(ValidationError):
wydawca.save()
def test_poziom_wydawcy_str(wydawca):
pw = Poziom_Wydawcy.objects.create(wydawca=wydawca, rok=2020, poziom=1)
assert str(pw) == 'Poziom wydawcy "Wydawca Testowy" za rok 2020'
@pytest.mark.django_db
def test_denorm_wydawca_ilosc_aliasow(denorms):
w1 = Wydawca.objects.create(nazwa="123")
assert w1.ile_aliasow == 0
Wydawca.objects.create(nazwa="456", alias_dla=w1)
denorms.flush()
w1.refresh_from_db()
assert w1.ile_aliasow == 1
@pytest.mark.django_db
def test_denorm_wydawca_poziomy_wydawcy(denorms):
w1 = Wydawca.objects.create(nazwa="123")
assert w1.lista_poziomow == []
Poziom_Wydawcy.objects.create(wydawca=w1, poziom=2, rok=3333)
denorms.flush()
w1.refresh_from_db()
assert w1.lista_poziomow[0] == [3333, 2]
@pytest.mark.django_db
def test_wydawca_str():
w1 = baker.make(Wydawca, nazwa="Foo")
w2 = baker.make(Wydawca, nazwa="Bar", alias_dla=w1)
assert str(w2) == "Bar (alias dla Foo)"
|
x=int(input())
l=[64,32,32,16,16,8,8,4,4,2,2,1]
num=0
for i in range(len(l)):
if l[i]<=x:
if x-l[i]>=0:
x-=l[i]
num+=1
print(num)
|
# -*- coding: utf-8 -*-
from deptreevis import *
from deptree import *
from eval import *
from macrodef import *
def make_foo():
sl = StatementList()
arg = sl.make_var()
ret = sl.call(access_macro('b'), arg)
arg.make_arg()
ret.make_ret()
return user_macro(lambda:sl)
def make_bar(foo):
sl = StatementList()
arg = sl.make_var()
b = sl.call(access_macro('x'), arg)
clo = sl.call(tuple_macro({'b':b}), None)
a = sl.call(foo, clo)
ret = sl.call(copy_macro, a)
arg.make_arg()
ret.make_ret()
return sl
sl = make_bar(make_foo())
tree = sl.get_tree()
graph = deptree_make_graph(tree)
graph.draw('foo.png', prog='dot')
|
def is_nan(string):
return string != string
# Usado pra pegar a primeira linha das tabelas normais
def get_begin_row(data, begin_string):
begin_row = 0
for row in data:
begin_row += 1
if(row[0] == begin_string):
break
while is_nan(data[begin_row][0]):
begin_row += 1
return begin_row
# Usado pra pegar a primeira linha das tabelas diferentes que começam com um valor "nan"
def get_begin_row_nan(data, begin_string):
begin_row = 0
for row in data:
begin_row += 1
if(row[1] == begin_string):
break
while is_nan(data[begin_row][1]):
begin_row += 1
return begin_row
# Usado pra pegar a última linha das tabelas normais
def get_end_row(data, end_string):
end_row = 0
for row in data:
end_row += 1
if row[0] == end_string:
break
return end_row - 2
# Usado pra pegar a última linha das tabelas diferentes que começam com um valor "nan"
def get_end_row_nan(data, end_string):
end_row = 0
for row in data:
end_row += 1
if row[1] == end_string:
break
return end_row - 2
# Usado para limpar a tabela, remover vírgulas de valores e colocar ponto, e onde tem nan colocar 0.0,
# caso for número
def clean_cell(element):
# A value was found with incorrect formatting. (3,045.99 instead of 3045.99)
if is_nan(element):
return 0.0
if type(element) == str:
if "." in element and "," in element:
element = element.replace(".", "").replace(",", ".")
elif "," in element:
element = element.replace(",", ".")
return float(element) |
from flask import g
from werkzeug.local import LocalProxy
from flask_dance.consumer import OAuth2ConsumerBlueprint
__maintainer__ = "Oleg Lavrovsky <oleg@datalets.ch>"
def make_hitobito_blueprint(
client_id=None,
secret=None,
domain=None,
*,
scope=None,
redirect_url=None,
redirect_to=None,
login_url=None,
authorized_url=None,
session_class=None,
storage=None,
rule_kwargs=None,
):
"""
Make a blueprint for authenticating with hitobito using OAuth 2. Requires
an OAuth consumer from hitobito. Either pass the domain, ID, secret
to this constructor, or make sure that the Flask application config defines
them, using the variables :envvar:`hitobito_CLIENT_DOMAIN`,
:envvar:`hitobito_CLIENT_ID` and
:envvar:`hitobito_CLIENT_SECRET`.
Args:
client_id (str): The hitobito Client ID for your application
secret (str): The hitobito Client Secret for your application
domain (str): The hitobito Domain for your application
scope (str, optional): comma-separated list of scopes for OAuth token
redirect_url (str): the URL to redirect to after the authentication
dance is complete
redirect_to (str): if ``redirect_url`` is not defined, the name of the
view to redirect to after the authentication dance is complete.
The actual URL will be determined by :func:`flask.url_for`
login_url (str, optional): the URL path for the ``login`` view.
Defaults to ``/login``
authorized_url (str, optional): URL path for the ``authorized`` view.
Defaults to ``/authorized``.
session_class (class, optional): The class to use for creating a
Requests session. Defaults to
:class:`~flask_dance.consumer.requests.OAuth2Session`.
storage: A token storage class, or an instance of a token storage
class, to use for this blueprint. Defaults to
:class:`~flask_dance.consumer.storage.session.SessionStorage`.
rule_kwargs (dict, optional): Additional arguments that should be
passed when adding the login and authorized routes. Defaults to
``None``.
:rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint`
:returns: A :doc:`blueprint <flask:blueprints>` to attach to Flask app.
"""
scope = scope or ['']
hitobito_bp = OAuth2ConsumerBlueprint(
"hitobito",
__name__,
client_id=client_id,
client_secret=secret,
scope=scope,
base_url="https://" + domain,
authorization_url="https://" + domain + "/oauth/authorize",
token_url="https://" + domain + "/oauth/token",
redirect_url=redirect_url,
redirect_to=redirect_to,
login_url=login_url,
authorized_url=authorized_url,
session_class=session_class,
storage=storage,
rule_kwargs=rule_kwargs,
)
hitobito_bp.from_config["base_url"] = "HITOBITO_CLIENT_DOMAIN"
hitobito_bp.from_config["client_id"] = "HITOBITO_CLIENT_ID"
hitobito_bp.from_config["client_secret"] = "HITOBITO_CLIENT_SECRET"
@hitobito_bp.before_app_request
def set_applocal_session():
g.flask_dance_hitobito = hitobito_bp.session
return hitobito_bp
hitobito = LocalProxy(lambda: g.flask_dance_hitobito)
|
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
kPluginNodeName = "MitsubaHKShader"
kPluginNodeClassify = "/shader/surface"
kPluginNodeId = OpenMaya.MTypeId(0x87015)
class hk(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
mMaterial = OpenMaya.MObject()
mUseSigmaSA = OpenMaya.MObject()
mSigmaS = OpenMaya.MObject()
mSigmaA = OpenMaya.MObject()
mUseSigmaTAlbedo = OpenMaya.MObject()
mSigmaT = OpenMaya.MObject()
mAlbedo = OpenMaya.MObject()
mThickness = OpenMaya.MObject()
mPhaseFunction = OpenMaya.MObject()
mPhaseFunctionHGG = OpenMaya.MObject()
mPhaseFunctionMicroFlakeStdDev = OpenMaya.MObject()
mOutColor = OpenMaya.MObject()
def compute(self, plug, block):
if plug == hk.mOutColor:
resultColor = OpenMaya.MFloatVector(0.0,0.0,0.0)
outColorHandle = block.outputValue( hk.mOutColor )
outColorHandle.setMFloatVector(resultColor)
outColorHandle.setClean()
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return hk()
def nodeInitializer():
nAttr = OpenMaya.MFnNumericAttribute()
eAttr = OpenMaya.MFnEnumAttribute()
try:
hk.mMaterial = eAttr.create("material", "mat")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
Materials = ["Apple",
"Cream",
"Skimmilk",
"Spectralon",
"Chicken1",
"Ketchup",
"Skin1",
"Wholemilk",
"Chicken2",
"Potato",
"Skin2",
"Lowfat Milk",
"Reduced Milk",
"Regular Milk",
"Espresso",
"Mint Mocha Coffee",
"Lowfat Soy Milk",
"Regular Soy Milk",
"Lowfat Chocolate Milk",
"Regular Chocolate Milk",
"Coke",
"Pepsi Sprite",
"Gatorade",
"Chardonnay",
"White Zinfandel",
"Merlot",
"Budweiser Beer",
"Coors Light Beer",
"Clorox",
"Apple Juice",
"Cranberry Juice",
"Grape Juice",
"Ruby Grapefruit Juice",
"White Grapefruit Juice",
"Shampoo",
"Strawberry Shampoo",
"Head & Shoulders Shampoo",
"Lemon Tea Powder",
"Orange Juice Powder",
"Pink Lemonade Powder",
"Cappuccino Powder",
"Salt Powder",
"Sugar Powder",
"Suisse Mocha"
]
for i in range(len(Materials)):
eAttr.addField(Materials[i], i)
# Default to Skin1
eAttr.setDefault(6)
hk.mUseSigmaSA = nAttr.create("useSigmaSA","ussa", OpenMaya.MFnNumericData.kBoolean, False)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
hk.mSigmaS = nAttr.createColor("sigmaS", "ss")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
hk.mSigmaA = nAttr.createColor("sigmaA", "sa")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
hk.mUseSigmaTAlbedo = nAttr.create("useSigmaTAlbedo","usta", OpenMaya.MFnNumericData.kBoolean, False)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
hk.mSigmaT = nAttr.createColor("sigmaT", "st")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
hk.mAlbedo = nAttr.createColor("albedo", "albedo")
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
nAttr.setDefault(0.0,0.0,0.0)
hk.mThickness = nAttr.create("thickness","t", OpenMaya.MFnNumericData.kFloat, 1.0)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
hk.mThickness = nAttr.create("thickness","t", OpenMaya.MFnNumericData.kFloat, 1.0)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
hk.mPhaseFunction = eAttr.create("phaseFunction", "pf")
eAttr.setKeyable(1)
eAttr.setStorable(1)
eAttr.setReadable(1)
eAttr.setWritable(1)
PhaseFunctions = ["Isotropic",
"Henyey-Greenstein",
"Rayleigh",
"Kajiya-Kay",
"Micro-Flake"
]
for i in range(len(PhaseFunctions)):
eAttr.addField(PhaseFunctions[i], i)
# Default to Isotropic
eAttr.setDefault(0)
hk.mPhaseFunctionHGG = nAttr.create("phaseFunctionHGG","pfhgg", OpenMaya.MFnNumericData.kFloat, 0.0)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
hk.mPhaseFunctionMicroFlakeStdDev = nAttr.create("phaseFunctionMFSD","pfmfsd", OpenMaya.MFnNumericData.kFloat, 0.05)
nAttr.setKeyable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setWritable(1)
hk.mOutColor = nAttr.createColor("outColor", "oc")
nAttr.setStorable(0)
nAttr.setHidden(0)
nAttr.setReadable(1)
nAttr.setWritable(0)
except:
sys.stderr.write("Failed to create attributes\n")
raise
try:
hk.addAttribute(hk.mMaterial)
hk.addAttribute(hk.mUseSigmaSA)
hk.addAttribute(hk.mSigmaS)
hk.addAttribute(hk.mSigmaA)
hk.addAttribute(hk.mUseSigmaTAlbedo)
hk.addAttribute(hk.mSigmaT)
hk.addAttribute(hk.mAlbedo)
hk.addAttribute(hk.mThickness)
hk.addAttribute(hk.mPhaseFunction)
hk.addAttribute(hk.mPhaseFunctionHGG)
hk.addAttribute(hk.mPhaseFunctionMicroFlakeStdDev)
hk.addAttribute(hk.mOutColor)
except:
sys.stderr.write("Failed to add attributes\n")
raise
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator,
nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kPluginNodeClassify )
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName )
raise
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName )
raise
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
import pdb
import time
import json
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from scrapy.http import FormRequest
from collections import OrderedDict
import pandas
class RealtycompassdataPySpider(scrapy.Spider):
name = "realtycompassdata.py"
allowed_domains = ["realtycompass.com"]
# start_urls =['https://www.realtycompass.com/search-realtycompass.php?q=Yelahanka%20bangalore']
def __init__(self, *args, **kwargs):
self.start_url=[]
url='https://www.realtycompass.com/search-realtycompass.php?q=Yelahanka%20bangalore'
self.start_url.append(url)
super(RealtycompassdataPySpider, self).__init__(*args, **kwargs)
def start_requests(self):
for url in self.start_url:
yield SplashRequest(url, callback=self.parse)
def parse(self, response):
pdb.set_trace()
|
import numpy as np
import math
from function import khoang_cach, mid_point
def head_pose_ratio(nose, left_eye, right_eye):
g_t = mid_point(left_eye[0][0], left_eye[0][3])
g_p = mid_point(right_eye[0][0], right_eye[0][3])
x2 = (g_t[0], nose[1])
x1 = (g_p[0], nose[1])
y2 = (nose[0], g_t[1])
y1 = (nose[0], g_p[1])
kc_gt_x2 = khoang_cach(g_t, x2)
if kc_gt_x2 == 0:
kc_gt_x2 = 1
kc_gp_x1 = khoang_cach(g_p, x1)
if kc_gp_x1 == 0:
kc_gp_x1 = 1
kc_gt_y2 = khoang_cach(g_t, y2)
if kc_gt_y2 == 0:
kc_gt_y2 = 1
kc_gp_y1 = khoang_cach(g_p, y1)
if kc_gp_y1 == 0:
kc_gp_y1 = 1
n_ratio_1 = kc_gp_x1/kc_gt_x2
n_ratio_2 = kc_gp_y1/kc_gt_y2
c_ratio_1 = kc_gp_x1/kc_gp_y1
c_ratio_2 = kc_gt_x2/kc_gt_y2
if n_ratio_1 < 0:
n_ratio_1 = n_ratio_1 * (-1)
if n_ratio_2 < 0:
n_ratio_2 = n_ratio_2 * (-1)
if c_ratio_1 < 0:
c_ratio_1 = c_ratio_1 * (-1)
if c_ratio_2 < 0:
c_ratio_2 = c_ratio_2 * (-1)
n = (g_p[1] - nose[1])/(g_p[0] - nose[0])
m = (g_t[1] - nose[1])/(g_t[0] - nose[0])
x5 = int(math.degrees(math.atan(n)))
x6 = int(math.degrees(math.atan(m)))*(-1)
return n_ratio_1, n_ratio_2, c_ratio_1, c_ratio_2, x5, x6
|
# -*- coding: utf-8 -*-
"""Admin models and registration for trivia app."""
# Part of Trebek (https://github.com/whutch/trebek)
# :copyright: (c) 2018 Will Hutcheson
# :license: MIT (https://github.com/whutch/trebek/blob/master/LICENSE.txt)
from django.contrib import admin
from . import models
admin.site.register(models.UserData)
admin.site.register(models.Category)
admin.site.register(models.Question)
admin.site.register(models.Game)
admin.site.register(models.GameRound)
admin.site.register(models.CategoryState)
admin.site.register(models.QuestionState)
admin.site.register(models.Player)
|
import string
s = 'The quick brown for jumped over the lazy doc.'
print(s)
# 首字母大写.
print(string.capwords(s))
|
import gym
from environment import TSCEnv
from world import World
from generator import LaneVehicleGenerator
from agent import MaxPressureAgent, IntersectionAgent
from metric import TravelTimeMetric
import argparse
from plan import *
import os.path as osp
# parse args
parser = argparse.ArgumentParser(description='Run Example')
parser.add_argument('config_file', type=str, help='path of config file')
parser.add_argument('--thread', type=int, default=16, help='number of threads')
parser.add_argument('--episodes', type=int, default=5, help='number of episodes')
parser.add_argument('--steps', type=int, default=3600, help='number of steps')
args = parser.parse_args()
# open files
with open(args.config_file) as f:
config = json.load(f)
roadnet_file = osp.join(config["dir"], config["roadnetFile"])
flow_file = osp.join(config["dir"], config["flowFile"])
# create world
world = World(args.config_file, thread_num=args.thread)
# create agents
agents = []
for i in world.intersections:
action_space = gym.spaces.Discrete(len(i.phases))
agents.append(MaxPressureAgent(
action_space, i, world,
LaneVehicleGenerator(world, i, ["lane_count"], in_only=True)
))
# route plan agents
guide_agents = []
for inter in world.intersections:
guide_agents.append(IntersectionAgent(inter))
# create metric
metric = TravelTimeMetric(world)
# create env
env = TSCEnv(world, agents, metric)
# Plan agent
a_star_plan = A_Star_Plan(roadnet_file)
# Record agent
record = Record(a_star_plan, interval=300, min_reference=5)
# Vehicle control agent
vc = VehicleControl()
def train():
indexs = {}
for agent in guide_agents:
#agent.load_model()
indexs[agent.id] = agent.index
for e in range(args.episodes):
obs = env.reset()
for i in range(args.steps):
if i % 1000 == 0:
print("Episode: ", e, " Time: ", i)
record.update(world, i)
waiting = world.eng.get_lane_waiting_vehicle_count()
suggestions = {}
for agent in guide_agents:
suggestions[agent.id] = agent.get_action(waiting)
vc.replan(world, a_star_plan, record, suggestions, indexs)
for agent in guide_agents:
agent.remember(waiting, i)
if i % agent.update_model_freq == agent.update_model_freq - 1:
agent.replay()
if i % agent.update_target_model_freq == agent.update_target_model_freq - 1:
agent.update_target_network()
actions = []
for agent_id, agent in enumerate(agents):
actions.append(agent.get_action(obs[agent_id]))
obs, rewards, dones, info = env.step(actions)
vc.summary()
vc.reset()
print("Episode: ", e, " Final travel time: ", env.eng.get_average_travel_time())
print()
def test():
obs = env.reset()
for i in range(args.steps):
record.update(world, i)
Q_values = {}
indexs = {}
for agent in guide_agents:
Q_values[agent.id] = agent.get_values(running_v)
indexs[agent.id] = agent.index
vc.replan(world, a_star_plan, record, Q_values, indexs)
actions = []
for agent_id, agent in enumerate(agents):
actions.append(agent.get_action(obs[agent_id]))
obs, rewards, dones, info = env.step(actions)
vc.summary()
print("Final travel time: ", env.eng.get_average_travel_time())
def main():
train()
main() |
# This coding is to make a local backup of following mathematics competition problems and solutions
# provided on AOPS website, in case they do not provide contents in the future, and also for convenience
# to practice the problems on local.
#
# AMC 8
# AMC 10
# AMC 12
# AIME
# USAJMO
# USAMO
#
import urllib.request
import urllib.error
import logging
def crawl_url(url, length, withSeperatedPage):
# get filename after the last / character
filenamebase = url[url.rindex('/') + 1 : ]
urllib.request.urlretrieve(url, '../html/' + filenamebase + '.html')
urllib.request.urlretrieve(url + '_Problems', '../html/' + filenamebase + '_Problems' + '.html')
if (withSeperatedPage):
urllib.request.urlretrieve(url + '_Answer_Key', '../html/' + filenamebase + '_Answer_Key' + '.html')
for i in range(length):
index = str(i + 1)
logging.info('Downloading ' + url + '_Problems/Problem_' + index, '../html/')
try:
urllib.request.urlretrieve(url + '_Problems/Problem_' + index, '../html/' + filenamebase + '_Problem_' + index + '.html')
except urllib.error.URLError as e:
logging.warning('Error in Downloading ' + url + '_Problems/Problem_' + index)
# raise RuntimeError("Failed to download '{}'. '{}'".format(url, e.reason))
pass
for i in range(1985, 1999):
index = str(i)
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AJHSME', 25, True)
for i in range(1999, 2020):
index = str(i)
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AMC_8', 25, True)
for i in range(2000, 2021):
index = str(i)
if (i < 2002):
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AMC_10', 25, True)
else:
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AMC_10A', 25, True)
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AMC_10B', 25, True)
for i in range(1950, 2021):
index = str(i)
if (i < 1960):
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AHSME', 50, True)
elif (i < 1968):
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AHSME', 40, True)
elif (i < 1974):
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AHSME', 35, True)
elif (i < 2000):
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AHSME', 30, True)
elif (i < 2002):
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AMC_12', 25, True)
else:
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AMC_12A', 25, True)
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AMC_12B', 25, True)
for i in range(1983, 2021):
index = str(i)
if (i < 2000):
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AIME', 15, True)
else:
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AIME_I', 15, True)
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_AIME_II', 15, True)
for i in range(2010, 2021):
index = str(i)
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_USAJMO', 6, False)
for i in range(1972, 2021):
index = str(i)
if (i < 1996):
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_USAMO', 5, False)
else:
crawl_url('https://artofproblemsolving.com/wiki/index.php/' + index + '_USAMO', 6, False)
|
import tensorflow as tf
from tensorflow.python.keras import backend as K
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print(x_train[0:2])
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu', name='l_1st'),
tf.keras.layers.Dense(128, activation='relu', name='l_2nd'),
tf.keras.layers.Dense(128, activation='relu', name='l_3rd'),
tf.keras.layers.Dense(128, activation='relu', name='l_4th'),
tf.keras.layers.Dense(128, activation='relu', name='l_5th'),
tf.keras.layers.Dense(128, activation='relu', name='l_6th'),
tf.keras.layers.Dense(128, activation='relu', name='l_7th'),
tf.keras.layers.Dense(128, activation='relu', name='l_8th'),
tf.keras.layers.Dense(128, activation='relu', name='l_9th'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax', name='dense10')
])
l = tf.keras.losses.SparseCategoricalCrossentropy()
opt = tf.keras.optimizers.Adam(0.001)
model.compile(optimizer=opt, loss=l, metrics=['accuracy'])
class ExtendedTensorBoard(tf.keras.callbacks.TensorBoard):
def _log_gradients(self, epoch):
step = tf.cast(epoch, dtype=tf.int64)
writer = self._train_writer
# writer = self._get_writer(self._train_run_name)
with writer.as_default(), tf.GradientTape() as g:
# here we use test data to calculate the gradients
_x_batch = x_train[:100]
_y_batch = y_train[:100]
g.watch(tf.convert_to_tensor(_x_batch))
_y_pred = self.model(_x_batch) # forward-propagation
loss = self.model.loss(y_true=_y_batch, y_pred=_y_pred) # calculate loss
gradients = g.gradient(loss, self.model.trainable_weights) # back-propagation
# In eager mode, grads does not have name, so we get names from model.trainable_weights
for weights, grads in zip(self.model.trainable_weights, gradients):
tf.summary.histogram(
weights.name.replace(':', '_')+'_grads', data=grads, step=step)
writer.flush()
def on_epoch_end(self, epoch, logs=None):
# def on_train_batch_end(self, batch, logs=None):
# This function overwrites the on_epoch_end in tf.keras.callbacks.TensorBoard
# but we do need to run the original on_epoch_end, so here we use the super function.
super(ExtendedTensorBoard, self).on_epoch_end(epoch, logs=logs)
# super(ExtendedTensorBoard, self).on_train_batch_end(batch, logs=logs)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_gradients(epoch)
ee = ExtendedTensorBoard(histogram_freq=1, write_images=True, update_freq='batch')
model.fit(x_train, y_train, epochs=10, callbacks=[ee], validation_data=(x_test, y_test), )
# model.fit(x_train, y_train, epochs=5, callbacks=[gradient_cb, tensorboard_cb]) |
"""
__init__.py adding folders to the __path__ object
"""
from sys import path
path.insert(0, "src/controllers")
|
import os
import csv
import time
import datetime
import subprocess
from multiprocessing.pool import ThreadPool
from config import *
multi_result = []
def pull_price(resource_name, size, now_time):
vm_name = resource_prefix + 'vm-' + '111'
command = " az vm create \
--resource-group {} \
--name {} \
--image {} \
--admin-username azureuser \
--generate-ssh-keys \
--priority {}\
--max-price {} \
--eviction-policy {} \
--size {}\
--subscription {} \
".format(
resource_name,
vm_name,
image,
priority,
max_price,
eviction_policy,
size,
subscription
)
# print(command)
status, res = subprocess.getstatusoutput(command)
#print(res)
result = res.split("is lower than the current spot price")
try:
result = result[1].split("for Azure Spot VM")
price = result[0]
return (now_time, resource_name, size, float(price.replace('USD', '').replace("'",'').strip()))
except Exception as e:
# print("Exception occurred with {}, {}".format(size, resource_name), str(e))
# print('************************')
return (now_time, resource_name, size, float('-inf'))
def callback(data):
multi_result.append({
'time': data[0],
'region': data[1],
'size': data[2],
'api_price': float('-inf'),
'cli_price': data[3],
'pay_as_you_go_price': float('-inf'),
'1_year_reserved_price': float('-inf'),
'3_year_reserved_price': float('-inf'),
'%_saving_pay_as_you_go': float('-inf'),
'%_saving_1y_reserved': float('-inf'),
'%_saving_3y_reserved': float('-inf')
})
def pull_multiple(resource_names, instance_sizes, now_time, is_windows_instances=False):
global multi_result
multi_result = []
p = ThreadPool(n_threads)
count = 0
for resource_name in resource_names:
for size in instance_sizes:
if size in spot_region_map[resource_name.replace('roshan-test-', '')]:
print(resource_name, size)
count += 1
p.apply_async(pull_price, args=(resource_name.strip(), size.strip(), now_time,), callback=callback)
print("Count ", count)
p.close()
p.join()
return multi_result
# print(pull_multiple(['roshan-test-ukwest'], ['Standard_D2_v3'], str(datetime.datetime.now()))) |
## Given a list of points as a tuple (x, y) and an integer k,
## find the k closest points to the origin (0,0).
# Calculates a point's distance from the origin by
# adding up how many "steps" it would take to reach (0,0).
def distance(point):
return abs(point[0]) + abs(point[1])
def closest_points(points, k):
# Throws an error if the user asks for more points than are given.
if len(points) < k:
return "ERROR: Asking for {} closest points when only {} points exist.".format(k, len(points))
# Closest is a dictionary, using distance: points (e.g 0: (0,0))
closest = {}
results = []
# All points are added into Closest, sorted by distance from origin.
for iter in range(len(points)):
dist = distance(points[iter])
if dist not in closest:
closest[dist] = [points[iter]]
else:
closest[dist].append(points[iter])
values = sorted(list(closest))
# Using values for order, adds up to "k" items into results.
for val in values:
for items in closest[val]:
results.append(items)
if len(results) == k:
return results
return results
if __name__ == "__main__":
print(closest_points([(0,0),(1,2), (-3, 4), (3, 1)], 2)) |
import numpy as np
import matplotlib.pyplot as plt
import argparse
import warnings
warnings.filterwarnings("ignore")
from Map import makeMap
from Dijkstra import runDijkstra
from AStar import runAStar
def booleanParser(val):
if val.lower() == 't':
return True
else:
return False
def checkRes(sPts,gPts,res):
checS = False
checG = False
if sPts[0] % res == 0 and sPts[1] % res == 0:
checS = True
if gPts[0] % res == 0 and gPts[1] % res == 0:
checG = True
return checS,checG
def run():
stPt = (stNode[0],150-stNode[1])
gPt = (gNode[0],150-gNode[1])
cS,cG = checkRes(stPt,gPt,res)
if cS and cG:
if All:
print("Making the map for point robot")
Map = makeMap(0,0)
if Map.shape[1]<=stPt[0] or Map.shape[1]<=gPt[0] or gPt[1]<0 or stPt[1]<0:
print("Start or Goal point is outside the Map")
return
if Map[int(stPt[1]),int(stPt[0])] != 255 or Map[int(gPt[1]),int(gPt[0])] != 255:
print("Start or Goal point is on the obstacle")
return
print("Starting Dijkstra")
runDijkstra(Map, stPt, gPt, res, anime= anime)
print("Starting A*")
runAStar(Map, stPt, gPt, res, anime= anime, weight= w)
print("Making the map for the rigid body so as to consider it as point")
Map = makeMap(roboDia, clear)
if Map.shape[1]<=stPt[0] or Map.shape[1]<=gPt[0] or gPt[1]<0 or stPt[1]<0:
print("Start or Goal point is outside the Map")
return
if Map[int(stPt[1]),int(stPt[0])] != 255 or Map[int(gPt[1]),int(gPt[0])] != 255:
print("Start or Goal point is on the obstacle")
return
print("Starting Dijkstra")
runDijkstra(Map, stPt, gPt, res, anime= anime)
print("Starting A*")
runAStar(Map, stPt, gPt, res, anime= anime, weight= w)
else:
print("Making the map so that the rigid body is considered as point")
Map = makeMap(roboDia, clear)
plt.imshow(Map)
plt.plot(stPt[0],stPt[1],'bo')
plt.plot(gPt[0],gPt[1],'ro')
plt.show()
if Map.shape[1]<=stPt[0] or Map.shape[1]<=gPt[0] or gPt[1]<0 or stPt[1]<0:
print("Start or Goal point is outside the Map")
return
if Map[int(stPt[1]),int(stPt[0])] != 255 or Map[int(gPt[1]),int(gPt[0])] != 255:
print("Start or Goal point is on the obstacle")
return
if dijk:
print("Starting Dijkstra")
runDijkstra(Map, stPt, gPt, res, anime= anime)
if AStar:
print("Starting A*")
runAStar(Map, stPt, gPt, res, anime= anime, weight= w)
else:
if not cS:
print("Change the start point or resolution, path cannot be found with this resolution")
else:
print("Change the goal point or resolution, path cannot be found with this resolution")
plt.show()
Parser = argparse.ArgumentParser()
Parser.add_argument('--Start', default= [40,130], \
help='enter the start point as tuple, Default is (40,20)', nargs='+', type= float)
Parser.add_argument('--Goal', default= [220,10],\
help='enter the goal point as tuple, Default is (220,140)', nargs='+', type= float)
Parser.add_argument('--Res', default= 5,\
help='enter the resolution required, Default is 5.0', type=int)
Parser.add_argument('--RoboDia', default= 10,\
help='enter the size of the robot, for a point please enter 0. Default is 10',type=float)
Parser.add_argument('--Clearance', default= 0,\
help='enter the clearance required, Default is 0',type=float)
Parser.add_argument('--AStar', default= 'T',\
help='boolean to determine whether to run A* or not, Default is True')
Parser.add_argument('--Weight', default= 1.0,\
help='Weight value for weighted A*, Default is 1',type=float)
Parser.add_argument('--Dijk', default= 'T',\
help='boolean to determine whether to run Dijkstra or not, Default is True')
Parser.add_argument('--Anima', default= 'T',\
help='boolean to show the live animation of path exploration, Default is True')
Parser.add_argument('--checkAll', default= 'T',\
help='If True runs all the 4 cases given the robot dia and clearance, Default is True')
Args = Parser.parse_args()
stNode = tuple(Args.Start)
gNode = tuple(Args.Goal)
res = Args.Res
roboDia = Args.RoboDia
clear = Args.Clearance
AStar = booleanParser(Args.AStar)
dijk = booleanParser(Args.Dijk)
anime = booleanParser(Args.Anima)
w = Args.Weight
All = booleanParser(Args.checkAll)
if __name__ == '__main__':
run() |
#!/usr/bin/env python
import os
import pytest
import pexpect
TIMEOUT_SECONDS = 2
child = None
def check_result(pattern):
index = child.expect(
[pattern, pexpect.EOF, pexpect.TIMEOUT], timeout=TIMEOUT_SECONDS
)
try:
assert index == 0
except AssertionError:
""" print(
"\n==== Screen buffer raw ====\n",
child._buffer.getvalue(),
"\n^^^^ Screen buffer raw ^^^^",
)
print(
"==== Screen buffer ====\n",
child._buffer.getvalue().decode("utf8"),
"\n^^^^ Screen buffer ^^^^",
)"""
raise
def check_prompt(keys, pattern):
child.write(keys)
check_result(pattern)
child.write("\003") # ctrl-c: reset inputs
def check_command(command, pattern):
child.sendline(command)
check_result(pattern)
@pytest.fixture
def child():
global child
child = pexpect.spawn("coverage", ["run", "--append", "DebugLibrary/shell.py"])
child.expect("Enter interactive shell", timeout=TIMEOUT_SECONDS * 3)
yield child
check_command("exit", "Exit shell.")
child.wait()
@pytest.fixture
def robot_child():
global child
# Command "coverage run robot tests/step.robot" does not work,
# so start the program using DebugLibrary's shell instead of "robot".
child = pexpect.spawn(
"coverage", ["run", "--append", "DebugLibrary/shell.py", "tests/step.robot"]
)
child.expect('Type "help" for more information.*>', timeout=TIMEOUT_SECONDS * 3)
yield child
# Exit the debug mode started by Debug keyword.
check_command("c", "Exit shell.*" "another test case.*" "end") # continue
# Exit the interactive shell started by "DebugLibrary/shell.py".
check_command("c", "Report: ")
child.wait()
# Clean up robot test output
os.remove("log.html")
os.remove("output.xml")
os.remove("report.html")
def test_autocomplete(child):
check_prompt("key\t", "keywords")
check_prompt("key\t", "Keyword Should Exist")
check_prompt("k \t", "keywords.*Keyword Should Exist")
check_prompt("keywords \t", "BuiltIn.*DebugLibrary")
check_prompt("keywords debug\t", "DebugLibrary")
# check_prompt('debu\t', 'DebugLibrary')
# check_prompt('DebugLibrary.\t', 'Debug If')
check_prompt("get\t", "Get Count")
check_prompt("get\t", "Get Time")
# check_prompt('selenium http://google.com \t', 'firefox.*chrome')
# check_prompt('selenium http://google.com fire\t', 'firefox')
def test_help(child):
check_command("libs", "Imported libraries:.*DebugLibrary.*Builtin libraries:")
check_command("help libs", "Print imported and builtin libraries,")
check_command("libs \t", "-s")
check_command("libs -s", "ibraries/BuiltIn.py.*Builtin libraries:")
check_command("?keywords", "Print keywords of libraries,")
check_command("k debuglibrary", "Debug")
check_command("k nothing", "not found library")
check_command("d Debug", "Open a interactive shell,")
def test_variables(child):
check_command(
"@{{list}} = Create List hello world", "@{{list}} = ['hello', 'world']"
)
check_command("${list}", "['hello', 'world']")
check_command(
"&{dict} = Create Dictionary name=admin", "&{dict} = {'name': 'admin'}"
)
check_command("${dict.name}", "admin")
def test_auto_suggest(child):
check_command("get time", "'*'")
check_prompt("g", "et time")
def test_errors(child):
check_command("fail", "AssertionError")
check_command("nothing", "No keyword with name 'nothing' found.")
check_command("get", "execution failed:.*No keyword with name 'get' found.")
def test_debug_if(child):
check_command("${secs} = Get Time epoch", "secs.* = ")
check_command("Debug If ${secs} > 1", "Enter interactive shell")
check_command("exit", "Exit shell.")
check_command("Debug If ${secs} < 1", "> ")
def test_some_rf_core_keywords(child):
check_command("log to console hello", "hello")
check_command("get time", ".*-.*-.* .*:.*:.*")
def test_step_functionality(robot_child):
check_command("list", "Please run `step` or `next` command first.")
check_command(
"s", # step
"/tests/step.robot.7..*"
"-> log to console working.*"
"=> BuiltIn.Log To Console working",
)
check_command("l", " 7 -> log to console working") # list
check_command(
"n", # next
"/tests/step.robot.8..*"
"@.* = Create List hello world.*"
"@.* = BuiltIn.Create List hello world",
)
check_command(
"", # just repeat last command
"/tests/step.robot.11..*"
"-> log to console another test case.*"
"=> BuiltIn.Log To Console another test case",
)
check_command(
"l", # list
" 6 debug.*"
" 7 log to console working.*"
" 8 @.* = Create List hello world.*"
" 9.*"
" 10 test2.*"
" 11 -> log to console another test case.*"
" 12 log to console end",
)
check_command(
"ll", # longlist
" 10 test2.*"
" 11 -> log to console another test case.*"
" 12 log to console end",
)
|
import re
import json
from typing import Union
from grandpybot.helpers import base_path
class Parser:
"""The parser used for user questions input.
Attributes:
_stopwords (set): A list of words considered as non keywords.
_punctuation (set): A list which contains punctuation characters.
_original_string (str): The string which will be parsed.
_parsed_string (list): The parsed string as a list.
"""
_stopwords: set
_original_string: Union[str, None]
_parsed_string: list
_punctuation: set = {".", ",", "!", "?", ":", ";"}
def __init__(self, language: str = 'fr', string=None):
"""Parser constructor
:param language: The language of the stopwords.
"""
self._original_string = string
self._parsed_string = []
try:
with open(base_path(f'stopwords_{language.lower()}.json')) as file:
self._stopwords = set(json.load(file))
except IOError:
self._stopwords = set()
@property
def original_string(self):
"""Get the original string."""
return self._original_string
@original_string.setter
def original_string(self, value):
"""Set the original string to be parsed.
:param value: The string to be parsed.
"""
self._parsed_string.clear()
self._original_string = value
def parse(self, question: Union[str, None] = None):
"""Parse a string and return the considered keywords.
:param question: The question input.
"""
if question is not None:
self.original_string = question
elif not self.original_string:
raise ValueError("No string to be parsed.")
cleaned = self._clean_string(self.original_string)
pieces = list(dict.fromkeys(cleaned.split())) # Removes duplicates
# Finally, remove stopwords...
self._parsed_string = [
kw for kw in pieces
if kw not in self._stopwords
]
return self._parsed_string
def _clean_string(self, string: str):
"""Clean a string for parsing.
:param string: The string to clean.
"""
string = string.lower().strip()
return self._clean_apostrophe(self._split_punctuation(string))
def _split_punctuation(self, string):
"""Surround all punctuations characters with spaces.
:param string:
:return: str
"""
# Reserved characters for RegExp which requires escape '\'.
reserved_chars = {'.', '?'}
pattern = '|'.join({
'\\' + char if char in reserved_chars else char
for char in self._punctuation
})
return re.sub(pattern, self._surround_char, string)
@staticmethod
def _clean_apostrophe(string):
"""Remove all apostrophes and their prefixed letter.
:param string: The string on which operates.
"""
return re.sub(r"[a-zA-Z]+\s*'", '', string)
@staticmethod
def _surround_char(match: re.Match, surrounded_by=" "):
"""Surround each regexp match with a given character.
:param match: The Match object of a regexp.
:param surrounded_by: the character used to surround the match.
:return: str
"""
return match.group(0).replace(
match.group(0), match.group(0).center(3, surrounded_by)
)
def find_address(self, address_keywords: list = None):
"""Find an extract an address from the parsed string.
:param address_keywords: A list of strings where an address/place can
follow this words.
:return: str
"""
address = ''
if not isinstance(address_keywords, list):
address_keywords = ["trouve", "situe", "adresse"]
if len(self._parsed_string) < 1 and self.original_string:
self.parse()
for address_keyword in address_keywords:
if address_keyword in self._parsed_string:
i = self._parsed_string.index(address_keyword) + 1
break
else:
# if the for loop as went through all the items without
# finding an address keyword.
i = 0
for word in self._parsed_string[i:]:
if word in self._punctuation:
break
address += f'{word} '
return address.rstrip() # Remove the trailling whitespace on the end.
|
from django.db import models
# Create your models here.
class Code(models.Model):
objects = models.Manager()
id = models.CharField(max_length = 10, primary_key=True)
name = models.CharField(max_length=512)
parent = models.ForeignKey('self', models.SET_NULL, blank=True, null=True)
|
n = int(input())
from collections import deque
stack = deque()
l = []
l.append(0)
l1 = []
l2 = []
l2.append(0)
l3 = []
l3.append(0)
l4 = []
l4.append(0)
s = input().split()
j = 0
m2 = 0
m1 = 0
k = 0
for i in s:
j = j+1
if i == "1":
stack.append("Y")
if len(stack)>m2:
m2 = len(stack)
l1.append(j)
else:
stack.pop()
if len(stack) == 0:
l2.append(j-l3[k])
l3.append(j)
k = k+1
if(l3[k] -l3[k-1])>m1:
m1 = l3[k] - l3[k-1]
l4.append(l3[k-1]+1)
s = l1[-1]
f = m2
t = max(l2)
fo = l4[-1]
print(f, s, t, fo) |
"""
Copyright 2014 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.engine.behaviors import BaseBehavior
from cloudcafe.cloudkeep.barbican.containers.models.container import SecretRef
from cloudcafe.cloudkeep.common.responses import CloudkeepResponse
class ContainerBehaviors(BaseBehavior):
def __init__(self, client, secret_behaviors):
self.client = client
self.secret_behaviors = secret_behaviors
self.created_containers = []
def create_container(self, name, container_type, secret_refs):
""" Proxies the ContainerClient's create_container and captures
the resulting HATOAS ref for cleanup.
"""
resp = self.client.create_container(
name=name, container_type=container_type, secret_refs=secret_refs)
if resp.entity and resp.entity.reference:
self.created_containers.append(resp.entity.reference)
return CloudkeepResponse(resp=resp, entity=resp.entity)
def create_container_with_secret(self, name="test_container",
secret_name="test_secret"):
"""Create a secret and a generic container with that secret inside.
:param name: The name of the container
:param secret_name: The name of the secret in the container
:returns: A tuple containing the responses from the secret creation and
the container creation, in that order
"""
secret_resp = self.secret_behaviors.create_secret_from_config()
secret_refs = [SecretRef(name=secret_name, ref=secret_resp.ref)]
container_resp = self.create_container(name, "generic", secret_refs)
return (secret_resp, container_resp)
def remove_from_created_containers(self, container_ref):
""" Clean-up helper method """
if container_ref in self.created_containers:
self.created_containers.remove(container_ref)
def delete_container(self, container_ref):
""" Proxies the ContainerClient's delete_container"""
self.remove_from_created_containers(container_ref)
return self.client.delete_container(container_ref)
def delete_all_created_containers(self):
""" Clean-up helper method to delete all containers created by
using this class.
"""
for container_ref in list(self.created_containers):
self.delete_container(container_ref)
self.created_containers = []
def create_rsa_container(self, name, priv_key_ref, pub_key_ref,
priv_pass_ref=None):
""" Creates an RSA container based on pre-created HATOAS secret
reference urls.
:param name: Container name
:param priv_key_ref: HATOAS reference to a secret containing
a private key.
:param priv_pass_ref: HATOAS reference to a secret containing
a private key passphrase.
:param pub_key_ref: HATOAS reference to a secret containing
a public key.
"""
priv_pass = SecretRef(name='private_key_passphrase', ref=priv_pass_ref)
priv = SecretRef(name='private_key', ref=priv_key_ref)
pub = SecretRef(name='public_key', ref=pub_key_ref)
refs = [pub, priv]
if priv_pass_ref:
refs.append(priv_pass)
return self.create_container(name=name,
container_type='rsa',
secret_refs=refs)
|
import torch
from torch import nn
class ClampLoss(nn.Module):
""" Wrapper Module for `(clamp(input, 0, 1) - clamp(target, 0, 1))`
"""
def __init__(self, module, min_value=0, max_value=1, eta=0.001):
super().__init__()
self.module = module
self.min_value = min_value
self.max_value = max_value
self.eta = eta
def forward(self, input, target):
noclip_loss = self.module(input, target)
clip_loss = self.module(torch.clamp(input, self.min_value, self.max_value),
torch.clamp(target, self.min_value, self.max_value))
return clip_loss + noclip_loss * self.eta
|
"""create results schema
Revision ID: 2e7dac655911
Revises:
Create Date: 2020-09-15 21:06:39.157407
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2e7dac655911'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.execute("CREATE SCHEMA IF NOT EXISTS results")
def downgrade():
op.execute("DROP SCHEMA results")
|
import time
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from numpy import random, mean, median
import GiniCoef
import candidates
import citizens
import financers
import parameters
import os
# Generate agents
def generate_citizens(num_cit, inc_list):
my_citizens = []
for i in range(num_cit):
my_citizens.append(citizens.Citizens(i, inc_list[i]))
return my_citizens
def reset_universe(ag, num_candidates, num_donors):
# Choosing candidates and signing-in with TSE ;)
cand_ = random.choice(ag, num_candidates)
# Converting Citizens into Candidates class
[candidates.Candidates.convert_to_candidate(c) for c in cand_]
# Initiating Candidates variables
[c.start() for c in cand_]
# Selecting donors from general population
don_ = random.choice(ag, num_donors)
# Excluding Candidates from the Donors base
don_ = [d for d in don_ if not isinstance(d, candidates.Candidates)]
# Converting Citizens into Donors class
[financers.Financers.convert_to_donor(d) for d in don_]
return cand_, don_
# Initiate simulation
def run_the_game(my_agents, num_candidates, num_donors):
# Testing three case scenarios
print('')
print('Testing three cases')
cases = {'Caso 1 Teto percentual': ['Donation ceiling set at {}% of income'
.format(parameters.income_percentage_case1 * 100),
parameters.income_percentage_case1, 'blue'],
'Caso 2 Teto nominal': ['Donation ceiling set at Nominal value of {}'
.format(parameters.ceiling_amount), parameters.ceiling_amount, 'red'],
'Caso 3 Sem teto': ['Donation with no ceiling', None, 'green']}
average_gini = {'Caso 1 Teto percentual': [],
'Caso 2 Teto nominal': [],
'Caso 3 Sem teto': []}
average_donation = {'Caso 1 Teto percentual': [],
'Caso 2 Teto nominal': [],
'Caso 3 Sem teto': []}
for each in cases.keys():
print('')
print('{}: {}'.format(each, cases[each][0]))
cand_, don_ = reset_universe(my_agents, num_candidates, num_donors)
print('Number of candidates {}'.format(len(cand_)))
print('Number of donors {}'.format(len(don_)))
# Resetting total donated
[d.reset_total_donated() for d in don_]
# Donation
a = random.uniform(0, 1, len(don_))
for i, d in enumerate(don_):
c = random.choice(cand_)
# Donation based on percentage of income or given amount
if each == 'Caso 1 Teto percentual':
# Choosing value from 0 to 1 and truncating at ceiling
if a[i] > cases[each][1]:
a[i] = cases[each][1]
c.update_treasure(d.donate(percentage=a[i]))
elif each == 'Caso 2 Teto nominal':
if a[i] > cases[each][1]:
a[i] = cases[each][1]
c.update_treasure(d.donate(amount=a[i]))
else:
c.update_treasure(d.donate(amount=a[i]))
gini, m = call_plot([d.get_cumulative_donation() for d in don_], each, cases[each][2])
average_gini[each].append(gini)
average_donation[each].append(m)
return average_gini, average_donation
def call_plot(values, case, color):
some_results = GiniCoef.Gini(values)
print("{} GINI is {:.4f}".format(case, some_results[0]))
m = median(values)
lw = .1
if case == 'Ex-ante':
lw = 1.5
print('Renda mediana - {}: {:.4f}'.format(case, m))
else:
print('Valor doação mediano - {}: {:.4f}'.format(case, m))
# Plot
plt.plot([0, 100], [0, 100], '--', color='yellow')
plt.plot(some_results[1][0], some_results[1][1], color=color, label=case, lw=lw)
plt.xlabel('% da população')
plt.ylabel('% dos valores')
return some_results[0], m
def repetition():
# Running the game
start = time.time()
my_agents = generate_citizens(parameters.num_citizens, parameters.income_list)
# Ex-ante GINI
call_plot(parameters.income_list, 'Ex-ante', 'black')
# Empty dictionaries
average_gini = {'Caso 1 Teto percentual': [],
'Caso 2 Teto nominal': [],
'Caso 3 Sem teto': []}
average_donation = {'Caso 1 Teto percentual': [],
'Caso 2 Teto nominal': [],
'Caso 3 Sem teto': []}
# Numerous runs
for i in range(parameters.number_runs):
gini, donation = run_the_game(my_agents, parameters.num_candidates, parameters.num_donors)
print('')
print('Total citizens {}'.format(len(my_agents)))
print('')
print('Time spent in seconds {:.2f}'.format(time.time() - start))
for each in gini.keys():
average_gini[each].append(gini[each])
average_donation[each].append(donation[each])
# General output
m_g_1 = median(average_gini['Caso 1 Teto percentual'])
m_d_1 = median(average_donation['Caso 1 Teto percentual'])
m_g_2 = median(average_gini['Caso 2 Teto nominal'])
m_d_2 = median(average_donation['Caso 2 Teto nominal'])
m_g_3 = median(average_gini['Caso 3 Sem teto'])
m_d_3 = median(average_donation['Caso 3 Sem teto'])
print('')
print('Overall Gini averages')
print('Caso 1 Teto percentual: median Gini {:.4} Donated value median {:.4}'.format(m_g_1, m_d_1))
print('Caso 2 Teto nominal: median Gini {:.4} Donated value {:.4}'.format(m_g_2, m_d_2))
print('Caso 3 Sem teto: median Gini{:.4} Donated value median {:.4}'
.format(m_g_3, m_d_3))
with open('output.csv', 'a') as f:
f.write('perc_{}_nominal_{}\n'.format(parameters.income_percentage_case1, parameters.ceiling_amount))
f.write('{:.12f};{:.12f}\n'.format(m_g_1, m_d_1))
f.write('{:.12f};{:.12f}\n'.format(m_g_2, m_d_2))
f.write('{:.12f};{:.12f}\n'.format(m_g_3, m_d_3))
dark_patch = mpatches.Patch(color='black', label='Renda da população')
blue_patch = mpatches.Patch(color='blue', label='Caso 1 Teto percentual: {}%'
.format(parameters.income_percentage_case1 * 100))
red_patch = mpatches.Patch(color='red', label='Caso 2 Teto nominal: {}'.format(parameters.ceiling_amount))
green_patch = mpatches.Patch(color='green', label='Caso 3 Sem teto')
plt.legend(handles=[dark_patch, blue_patch, red_patch, green_patch], loc='upper left', frameon=False)
plt.savefig('figures_png/fig_perc{}_nom{}.png'
.format(parameters.income_percentage_case1, parameters.ceiling_amount),
format='png')
plt.savefig('figures_pdf/fig_perc{}_nom{}.pdf'
.format(parameters.income_percentage_case1, parameters.ceiling_amount),
format='pdf', transparent=True)
plt.savefig('figures_eps/fig_perc{}_nom{}.eps'
.format(parameters.income_percentage_case1, parameters.ceiling_amount),
format='eps', transparent=True)
def overriding_parameters():
if os.path.exists('output.csv'):
os.remove('output.csv')
perc = [.05, .1, .2, .3]
nominal = [.01, .05, .1, .25]
for i in range(len(perc)):
parameters.income_percentage_case1 = perc[i]
parameters.ceiling_amount = nominal[i]
repetition()
if __name__ == '__main__':
# Adjust parameters in parameters.py
# Call the simulation
# repetition()
# To run multiple comparatives, use the function below and set them in the respective function above
overriding_parameters()
|
import numpy as np
import pandas as pd
class Sudoku:
def __init__(self,matrix):
try:
self.sudoku_matrix = np.zeros([9,9]) + np.array(matrix)
except:
raise "Matriz de tamanho errado."
aux_possibility_matrix_dict = {}
for i in range(0,9):
aux_possibility_matrix_dict[i]=[list(range(1,10))]
self.possibility_matrix = pd.DataFrame(aux_possibility_matrix_dict, index=np.arange(9))
print(self.possibility_matrix[[0,1,2,3,4,5]])
self.matrix_to_possibility_transfer()
print(self.possibility_matrix[[0,1,2,3,4,5]])
print("")
def matrix_to_possibility_transfer(self):
for idx_x, row_x in enumerate(self.sudoku_matrix):
for idx_y, element_x_y in enumerate(row_x):
if(element_x_y != 0):
self.possibility_matrix[idx_y][idx_x] = [element_x_y]
def access_index(self, quadrand, _x, _y):
return(self.sudoku_matrix[x][y])
def access_possibility_index(self, _x, _y):
return(self.possibility_matrix[9*x][9*y])
def get_quadrant(self, full_matrix, _x, _y):
# print(type(full_matrix))
# print(full_matrix)
# print(full_matrix[_x*3:(_x*3)+3])
aux = []
for x in full_matrix[_x*3:(_x*3)+3]:
aux2 =x[_y*3:(_y*3)+3]
aux.extend([aux2 ])
return aux
# def set_element_in_array(self, modified_array, _x, _y):
# self.possibility_matrix =
def clear_quadrants_from_quadrand_element(self):
for idx_x, matrix_row in enumerate(self.sudoku_matrix):
for idx_y, matrix_element in enumerate(matrix_row):
if(matrix_element != 0):
quadrant_x = idx_x//3
quadrant_y = idx_y//3
aux_quadrant = self.get_quadrant(
self.possibility_matrix.to_numpy(),
quadrant_x,
quadrant_y
)
for possibility_idx_x, possibility_element_x in enumerate(
aux_quadrant
):
print([x for x,y in enumerate(possibility_element_x)])
for possibility_idx_y, possibility_element_y in enumerate(possibility_element_x):
aux = possibility_element_y.copy()
print("looking for element ", idx_x, idx_y, " no indice (pssblt) ",possibility_idx_x+quadrant_x*3, possibility_idx_y+quadrant_y*3, aux)
if(matrix_element in list(aux) and [idx_x, idx_y] != [possibility_idx_x+quadrant_x*3, possibility_idx_y+quadrant_y*3]):
print(self.possibility_matrix[[0,1,2,3,4,5]])
aux_list = self.possibility_matrix[possibility_idx_y+quadrant_y*3][possibility_idx_x+quadrant_x*3].copy()
aux_list.remove(matrix_element)
self.possibility_matrix[possibility_idx_y+quadrant_y*3][possibility_idx_x+quadrant_x*3] = aux_list
print(self.possibility_matrix[[0,1,2,3,4,5]])
print("---------------------------------------------")
print("")
base_matrix = [[0, 0, 7, 0, 0, 0, 2, 0, 8],
[0, 8, 9, 0, 7, 3, 0, 0, 0],
[0, 0, 0, 0, 0, 2, 0, 1, 7],
[8, 0, 0, 0, 0, 6, 0, 2, 0],
[0, 0, 0, 2, 1, 0, 0, 4, 0],
[6, 4, 0, 0, 0, 9, 0, 0, 0],
[5, 0, 0, 6, 0, 0, 8, 0, 0],
[0, 9, 0, 3, 4, 0, 1, 0, 0],
[4, 0, 0, 0, 2, 7, 3, 0, 0]]
sudoku_obj = Sudoku(base_matrix)
print(sudoku_obj.get_quadrant(sudoku_obj.sudoku_matrix,0,2))
sudoku_obj.clear_quadrants_from_quadrand_element()
print(sudoku_obj.get_quadrant(sudoku_obj.sudoku_matrix,0,2))
# print(sudoku_obj.get_quadrant(sudoku_obj.sudoku_matrix,1,1))
|
import numpy as np
def get_k_means_plus_plus_center_indices(n, n_cluster, x, generator=np.random):
'''
:param n: number of samples in the data
:param n_cluster: the number of cluster centers required
:param x: data- numpy array of points
:param generator: random number generator from 0 to n for choosing the first cluster at random
The default is np.random here but in grading, to calculate deterministic results,
We will be using our own random number generator.
:return: the center points array of length n_clusters with each entry being index to a sample
which is chosen as centroid.
'''
# TODO:
# implement the Kmeans++ algorithm of how to choose the centers according to the lecture and notebook
# Choose 1st center randomly and use Euclidean distance to calculate other centers.
center_points = []
center_points.append(get_lloyd_k_means(n,n_cluster, x, generator)[0])
centroid_points = np.array([x[center_points[0]]])
for k in range(0, n_cluster-1, 1):
updated_centroid = np.expand_dims(centroid_points, 1)
square = (updated_centroid - x)**2
euclid_dist = np.sum(square,2)
min_euclid_dist = np.min(euclid_dist, 0)
arg_updated_center = np.argmax(min_euclid_dist)
centroid_points = np.vstack([centroid_points, x[arg_updated_center]])
center_points.append(arg_updated_center)
centers = center_points
# DO NOT CHANGE CODE BELOW THIS LINE
print("[+] returning center for [{}, {}] points: {}".format(n, len(x), centers))
return centers
def get_lloyd_k_means(n, n_cluster, x, generator):
return generator.choice(n, size=n_cluster)
class KMeans():
'''
Class KMeans:
Attr:
n_cluster - Number of cluster for kmeans clustering (Int)
max_iter - maximum updates for kmeans clustering (Int)
e - error tolerance (Float)
generator - random number generator from 0 to n for choosing the first cluster at random
The default is np.random here but in grading, to calculate deterministic results,
We will be using our own random number generator.
'''
def __init__(self, n_cluster, max_iter=100, e=0.0001, generator=np.random):
self.n_cluster = n_cluster
self.max_iter = max_iter
self.e = e
self.generator = generator
def fit(self, x, centroid_func=get_lloyd_k_means):
'''
Finds n_cluster in the data x
params:
x - N X D numpy array
centroid_func - To specify which algorithm we are using to compute the centers(Lloyd(regular) or Kmeans++)
returns:
A tuple
(centroids a n_cluster X D numpy array, y a length (N,) numpy array where cell i is the ith sample's assigned cluster, number_of_updates a Int)
Note: Number of iterations is the number of time you update the assignment
'''
assert len(x.shape) == 2, "fit function takes 2-D numpy arrays as input"
N, D = x.shape
self.centers = centroid_func(len(x), self.n_cluster, x, self.generator)
# TODO:
# - comment/remove the exception.
# - Initialize means by picking self.n_cluster from N data points
# - Update means and membership until convergence or until you have made self.max_iter updates.
# - return (means, membership, number_of_updates)
# DONOT CHANGE CODE ABOVE THIS LINE
gamma_func = np.zeros((N, self.n_cluster))
cluster_centroids = x[self.centers]
alpha = 10**10
row = np.arange(N)
new_axis = np.newaxis
updated_x = np.repeat(x[:, :, new_axis], self.n_cluster,2)
for i in range(0,self.max_iter,1):
gamma_func = 0 * gamma_func
updated_alpha = 0
square = (cluster_centroids.T - updated_x)**2
edist = np.sum(square, 1)
edist_root = edist **(0.5)
arg_min_dist = np.argmin(edist_root, 1)
gamma_func[row, arg_min_dist] = 1
non_zero_cnt = np.count_nonzero(gamma_func,0)
updated_alpha = np.sum(edist_root * gamma_func)
difference = abs(updated_alpha - alpha)
if self.e >= difference:
break
tmp = np.matmul(gamma_func.T, x)
non_zero_cnt = non_zero_cnt .reshape(self.n_cluster, 1)
cluster_centroids = 0 * cluster_centroids
np.divide(tmp, non_zero_cnt, out = cluster_centroids, where = non_zero_cnt != 0)
alpha = updated_alpha
y = arg_min_dist
centroids = cluster_centroids
# DO NOT CHANGE CODE BELOW THIS LINE
return centroids, y, self.max_iter
class KMeansClassifier():
'''
Class KMeansClassifier:
Attr:
n_cluster - Number of cluster for kmeans clustering (Int)
max_iter - maximum updates for kmeans clustering (Int)
e - error tolerance (Float)
generator - random number generator from 0 to n for choosing the first cluster at random
The default is np.random here but in grading, to calculate deterministic results,
We will be using our own random number generator.
'''
def __init__(self, n_cluster, max_iter=100, e=1e-6, generator=np.random):
self.n_cluster = n_cluster
self.max_iter = max_iter
self.e = e
self.generator = generator
def fit(self, x, y, centroid_func=get_lloyd_k_means):
'''
Train the classifier
params:
x - N X D size numpy array
y - (N,) size numpy array of labels
centroid_func - To specify which algorithm we are using to compute the centers(Lloyd(regular) or Kmeans++)
returns:
None
Stores following attributes:
self.centroids : centroids obtained by kmeans clustering (n_cluster X D numpy array)
self.centroid_labels : labels of each centroid obtained by
majority voting (N,) numpy array)
'''
assert len(x.shape) == 2, "x should be a 2-D numpy array"
assert len(y.shape) == 1, "y should be a 1-D numpy array"
assert y.shape[0] == x.shape[0], "y and x should have same rows"
self.generator.seed(42)
N, D = x.shape
# TODO:
# - comment/remove the exception.
# - Implement the classifier
# - assign means to centroids
# - assign labels to centroid_labels
# DONOT CHANGE CODE ABOVE THIS LINE
k_means_module = KMeans(self.n_cluster, self.max_iter, self.e, self.generator)
cluster_centroids, allocated_tags, z = k_means_module.fit(x, centroid_func)
cluster_centroid_tags = np.zeros(self.n_cluster)
for i in range(0, self.n_cluster,1):
current_division = y[np.where(allocated_tags==i)]
len_div = len(current_division)
if len_div !=0:
binary_cnt = np.bincount(current_division)
cluster_centroid_tags[i] = np.argmax(binary_cnt)
else:
cluster_centroid_tags[i] = 0
centroid_labels = cluster_centroid_tags
centroids = cluster_centroids
# DONOT CHANGE CODE BELOW THIS LINE
self.centroid_labels = centroid_labels
self.centroids = centroids
assert self.centroid_labels.shape == (
self.n_cluster,), 'centroid_labels should be a numpy array of shape ({},)'.format(self.n_cluster)
assert self.centroids.shape == (
self.n_cluster, D), 'centroid should be a numpy array of shape {} X {}'.format(self.n_cluster, D)
def predict(self, x):
'''
Predict function
params:
x - N X D size numpy array
returns:
predicted labels - numpy array of size (N,)
'''
assert len(x.shape) == 2, "x should be a 2-D numpy array"
self.generator.seed(42)
N, D = x.shape
# TODO:
# - comment/remove the exception.
# - Implement the prediction algorithm
# - return labels
# DONOT CHANGE CODE ABOVE THIS LINE
new_axis = np.newaxis
updated_x = np.repeat(x[:,:,new_axis], self.n_cluster, 2)
square = (self.centroids.T - updated_x)**2
edist = np.sum(square, 1)
edist_root = edist **(0.5)
edist_argmin = np.argmin(edist_root,1)
tags = self.centroid_labels[edist_argmin]
labels = tags
# DO NOT CHANGE CODE BELOW THIS LINE
return np.array(labels)
def transform_image(image, code_vectors):
'''
Quantize image using the code_vectors
Return new image from the image by replacing each RGB value in image with nearest code vectors (nearest in euclidean distance sense)
returns:
numpy array of shape image.shape
'''
assert image.shape[2] == 3 and len(image.shape) == 3, \
'Image should be a 3-D array with size (?,?,3)'
assert code_vectors.shape[1] == 3 and len(code_vectors.shape) == 2, \
'code_vectors should be a 2-D array with size (?,3)'
# TODO
# - comment/remove the exception
# - implement the function
# DONOT CHANGE CODE ABOVE THIS LINE
len_vec = len(code_vectors)
row, col, size = image.shape
updated_image = image.reshape(row*col, 3)
initial_image = updated_image
infinity_mat = np.full(row*col, float('+inf'))
for i in range(0,len_vec):
square = (code_vectors[i] - initial_image)**2
edist = np.sum(square, 1)
indices = np.where(infinity_mat > edist)
len_index = len(indices)
if len_index > 0:
updated_image[indices]= code_vectors[i]
infinity_mat[indices] = edist[indices]
updated_image = updated_image.reshape(row, col, 3)
# DONOT CHANGE CODE BELOW THIS LINE
return updated_image
|
import csv
import plotly.express as px
import plotly.figure_factory as ff
import pandas as pd
dataFile=pd.read_csv("csv/normal.csv")
fig=ff.create_distplot([dataFile["Weight"].tolist()],["Weight"],show_hist=False)
fig.show() |
# 226. Invert Binary Tree
# Given the root of a binary tree, invert the tree, and return its root.
# Example 1:
# Input: root = [4,2,7,1,3,6,9]
# Output: [4,7,2,9,6,3,1]
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
return self.switcheroo(root)
def switcheroo(self, root):
if not root:
return root
root.left, root.right \
= self.switcheroo(root.right), self.switcheroo(root.left)
return root
|
#-*- coding:utf-8 -*-
import os
class CodeReplace(object):
def __init__(self,rootPath):
self.old = '"code", java.getCode()'
self.new = '"id", java.getId()'
self.rootPath = rootPath
self.repFileList = []
def run(self):
self.recursionReplace(self.rootPath)
self.log_write()
#替换文本
def replace(self,_file):
rep = None
with open(_file, 'r', encoding='UTF-8') as f:
content = f.read()
if(self.old in content):
rep = content.replace(self.old, self.new)
if not rep is None:
with open(_file, 'w', encoding='UTF-8') as f:
f.write(rep)
self.repFileList.append(_file)
print('替换文件:', _file, '文件完成')
#递归读取文件
def recursionReplace(self,path):
pathDir = os.listdir(path) # 获取当前路径下的文件名,返回List
for s in pathDir:
newDir = os.path.join(path, s) # 将文件名加入到当前文件路径后面
if os.path.isfile(newDir): # 如果是文件
if os.path.splitext(newDir)[1] == ".java": # 判断是否是java文件
self.replace(newDir) # 替换文本
pass
else:
self.recursionReplace(newDir) # 如果不是文件,递归这个文件夹的路径
#更改日期指写入文件
def log_write(self):
log = os.path.abspath(os.path.join(os.path.dirname(self.rootPath), ".."))+'\\replog.log'
with open(log, 'w', encoding='UTF-8') as f:
for content in self.repFileList:
f.write(content)
f.write('\n')
print('替换文件记录写入:',log,'文件')
if __name__ == '__main__':
rootPath = 'E:\code\workspace\workspace\dataAnalyseCommons'
codeReplace = CodeReplace(rootPath)
codeReplace.run() |
class Stack:
def __init__(self):
self.items = []
def push(self,item):
self.items.append(item)
def pop(self):
if self.items:
return self.items.pop()
print('Stack is empty')
# show the next value that is ready to be popped
def peek(self):
if self.items:
return self.items[-1] # last item in the list
print('Stack is empty')
def size(self):
return len(self.items)
def is_empty(self):
return (self.items == [])
print('Using local Stack class') |
import chess
from chessboard import display
def evaluateScore():
if board.is_checkmate():
if board.turn: #If White turn return -9999 meaning Black won. Else return 9999 meaning White won
return -9999
else:
return 9999
#Score pieces based on position. Piece square tables found online
pawnTable = [
0, 0, 0, 0, 0, 0, 0, 0,
5, 10, 10, -20, -20, 10, 10, 5,
5, -5, -10, 0, 0, -10, -5, 5,
0, 0, 0, 20, 20, 0, 0, 0,
5, 5, 10, 25, 25, 10, 5, 5,
10, 10, 20, 30, 30, 20, 10, 10,
50, 50, 50, 50, 50, 50, 50, 50,
0, 0, 0, 0, 0, 0, 0, 0]
knightTable = [
-50, -40, -30, -30, -30, -30, -40, -50,
-40, -20, 0, 5, 5, 0, -20, -40,
-30, 5, 10, 15, 15, 10, 5, -30,
-30, 0, 15, 20, 20, 15, 0, -30,
-30, 5, 15, 20, 20, 15, 5, -30,
-30, 0, 10, 15, 15, 10, 0, -30,
-40, -20, 0, 0, 0, 0, -20, -40,
-50, -40, -30, -30, -30, -30, -40, -50]
bishopTable = [
-20, -10, -10, -10, -10, -10, -10, -20,
-10, 5, 0, 0, 0, 0, 5, -10,
-10, 10, 10, 10, 10, 10, 10, -10,
-10, 0, 10, 10, 10, 10, 0, -10,
-10, 5, 5, 10, 10, 5, 5, -10,
-10, 0, 5, 10, 10, 5, 0, -10,
-10, 0, 0, 0, 0, 0, 0, -10,
-20, -10, -10, -10, -10, -10, -10, -20]
rookTable = [
0, 0, 0, 5, 5, 0, 0, 0,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
5, 10, 10, 10, 10, 10, 10, 5,
0, 0, 0, 0, 0, 0, 0, 0]
queenTable = [
-20, -10, -10, -5, -5, -10, -10, -20,
-10, 0, 5, 0, 0, 0, 0, -10,
-10, 5, 5, 5, 5, 5, 0, -10,
0, 0, 5, 5, 5, 5, 0, -5,
-5, 0, 5, 5, 5, 5, 0, -5,
-10, 0, 5, 5, 5, 5, 0, -10,
-10, 0, 0, 0, 0, 0, 0, -10,
-20, -10, -10, -5, -5, -10, -10, -20]
kingTable = [
20, 30, 10, 0, 0, 10, 30, 20,
20, 20, 0, 0, 0, 0, 20, 20,
-10, -20, -20, -20, -20, -20, -20, -10,
-20, -30, -30, -40, -40, -30, -30, -20,
-30, -40, -40, -50, -50, -40, -40, -30,
-30, -40, -40, -50, -50, -40, -40, -30,
-30, -40, -40, -50, -50, -40, -40, -30,
-30, -40, -40, -50, -50, -40, -40, -30]
kingEndTable = [
-50, -30, -30, -30, -30, -30, -30, -50,
-30, -30, 0, 0, 0, 0, -30,-30,
-30, -10, 20, 30, 30, 20, -10, -30,
-30, -10, 30, 40, 40, 30, -10, -30,
-30, -10, 30, 40, 40, 30, -10, -30,
-30, -10, 20, 30, 30, 20, -10, -30,
-30, -20, -10, 0, 0, -10, -20, -30,
-50, -40, -30, -20, -20, -30, -40, -50]
#Number of each pieces on current board
whitePawn = len(board.pieces(chess.PAWN, chess.WHITE))
blackPawn = len(board.pieces(chess.PAWN, chess.BLACK))
whiteKnight = len(board.pieces(chess.KNIGHT, chess.WHITE))
blackKnight = len(board.pieces(chess.KNIGHT, chess.BLACK))
whiteBishop = len(board.pieces(chess.BISHOP, chess.WHITE))
blackBishop = len(board.pieces(chess.BISHOP, chess.BLACK))
whiteRook = len(board.pieces(chess.ROOK, chess.WHITE))
blackRook = len(board.pieces(chess.ROOK, chess.BLACK))
whiteQueen = len(board.pieces(chess.QUEEN, chess.WHITE))
blackQueen = len(board.pieces(chess.QUEEN, chess.BLACK))
pawnScore = sum([pawnTable[i] for i in board.pieces(chess.PAWN, chess.WHITE)]) - sum([pawnTable[chess.square_mirror(i)] for i in board.pieces(chess.PAWN, chess.BLACK)])
knightScore = sum([knightTable[i] for i in board.pieces(chess.KNIGHT, chess.WHITE)]) - sum([knightTable[chess.square_mirror(i)] for i in board.pieces(chess.KNIGHT, chess.BLACK)])
bishopScore = sum([bishopTable[i] for i in board.pieces(chess.BISHOP, chess.WHITE)]) - sum([bishopTable[chess.square_mirror(i)] for i in board.pieces(chess.BISHOP, chess.BLACK)])
rookScore = sum([rookTable[i] for i in board.pieces(chess.ROOK, chess.WHITE)]) - sum([rookTable[chess.square_mirror(i)] for i in board.pieces(chess.ROOK, chess.BLACK)])
queenScore = sum([queenTable[i] for i in board.pieces(chess.QUEEN, chess.WHITE)]) - sum([queenTable[chess.square_mirror(i)] for i in board.pieces(chess.QUEEN, chess.BLACK)])
if 100 * whitePawn + 300 * whiteKnight + 300 * whiteBishop + 500 * whiteRook + 900 * whiteQueen < 1400 and 100 * blackPawn + 300 * blackKnight + 300 * blackBishop + 500 * blackRook + 900 * blackQueen < 1400:
#If materialScore on both sides are less than 1400 (queen + rook = 900 + 500 = 1400) use kingEndTable
kingScore = sum([kingEndTable[i] for i in board.pieces(chess.KING, chess.WHITE)]) - sum([kingEndTable[chess.square_mirror(i)] for i in board.pieces(chess.KING, chess.BLACK)])
else:
kingScore = sum([kingTable[i] for i in board.pieces(chess.KING, chess.WHITE)]) - sum([kingTable[chess.square_mirror(i)] for i in board.pieces(chess.KING, chess.BLACK)])
#Pawns are worth 100, knights 300, bishops 300, rooks 500 and queen 900
materialScore = 100 * (whitePawn - blackPawn) + 300 * (whiteKnight - blackKnight) + 300 * (whiteBishop - blackBishop) + 500 * (whiteRook - blackRook) + 900 * (whiteQueen - blackQueen)
totalScore = materialScore + pawnScore + knightScore + bishopScore + rookScore + queenScore + kingScore
if board.turn: #Return positive score if White turn. Negative score if Black turn
return totalScore
else:
return -totalScore
#Search best move using minimax and alphabeta algorithm with negamax implementation
def negamax(alpha, beta, depth):
if depth == 0:
return quiescence(alpha, beta)
bestScore = -9999 #Initialize bestScore as worst possible White score
moveValuePair = sortMoves(False)
for move in moveValuePair:
board.push(move)
moveScore = -negamax(-beta, -alpha, depth - 1)
board.pop()
bestScore = max(bestScore, moveScore)
alpha = max(alpha, moveScore)
if beta <= alpha:
return bestScore
return bestScore
#Used to get rid of horizon effect
def quiescence(alpha, beta):
moveScore = evaluateScore()
alpha = max(alpha, moveScore)
if beta <= alpha:
return moveScore
moveValuePair = sortMoves(True)
for move in moveValuePair:
board.push(move)
moveScore = -quiescence(-beta, -alpha)
board.pop()
alpha = max(alpha, moveScore)
if beta <= alpha:
return moveScore
return alpha
#Sort moves in ascending order based on score
def sortMoves(capturesOnly):
moveValuePair = {}
if capturesOnly:
for move in board.legal_moves:
if board.is_capture(move):
board.push(move)
moveValuePair[move] = evaluateScore()
board.pop()
else:
for move in board.legal_moves:
board.push(move)
moveValuePair[move] = evaluateScore()
board.pop()
sortMoveValuePair = dict(sorted(moveValuePair.items(), key = lambda item: item[1]))
return sortMoveValuePair
def selectMove(depth):
bestScore = -99999 #Initialize bestScore as worst possible White score
alpha = -100000 #Alpha is bestScore for maximizing player (White). Initialize as worst possible White score
beta = 100000 #Beta is bestScore for minimizing player (Black). Initialize as worst possible Black score
for move in board.legal_moves:
#Skip move if threefold repetition for AI vs AI. For human vs AI you can comment these 5 lines since humans usually won't repeat moves
board.push(move)
if board.can_claim_threefold_repetition():
board.pop()
continue
board.pop()
if board.is_castling(move): #Don't need to check castling rights if move is a castle
board.push(move)
moveScore = -negamax(-beta, -alpha, depth - 1)
else:
if board.turn:
castleRightsBefore = board.has_castling_rights(chess.WHITE) #Compare castle rights before and after. If before is true and after is false the move prevents castling
board.push(move)
castleRightsAfter = board.has_castling_rights(chess.WHITE)
else:
castleRightsBefore = board.has_castling_rights(chess.BLACK)
board.push(move)
castleRightsAfter = board.has_castling_rights(chess.BLACK)
if castleRightsBefore and not castleRightsAfter: #Subtract 50 (Value of half pawn) if move prevents castling
moveScore = -negamax(-beta, -alpha, depth - 1) - 50
else:
moveScore = -negamax(-beta, -alpha, depth - 1)
if moveScore > bestScore:
bestScore = moveScore
bestMove = move
alpha = max(alpha, moveScore)
board.pop()
return bestMove
#Main
board = chess.Board()
display.start(board.fen())
printEndMessage = False
while not display.checkForQuit():
if not board.is_game_over():
#Uncomment to play vs AI by typing move. A move is square of piece you want to move and square where you want piece to move to. Ex: e2e4 moves e2 pawn to e4. Put AIMove first to play as Black
#playerMove = input()
#board.push_san(playerMove)
#display.update(board.fen())
#print("Player move:", playerMove)
AImove = selectMove(1) #Increase number for harder AI at the cost of it taking longer
if board.turn:
print("White move:", AImove)
else:
print("Black move:", AImove)
board.push(AImove)
display.update(board.fen())
elif not printEndMessage:
if board.is_checkmate():
if board.turn:
print("BLACK WON!")
else:
print("WHITE WON!")
else:
print("STALEMATE/DRAW!")
printEndMessage = True
display.terminate()
|
import pytest
import os
from polyglotdb.io import inspect_mfa, inspect_textgrid
from polyglotdb import CorpusContext
def test_load_discourse(graph_db, mfa_test_dir, textgrid_test_dir):
test_file_path = os.path.join(mfa_test_dir, "mfa_test.TextGrid")
acoustic_path = os.path.join(textgrid_test_dir, 'acoustic_corpus.TextGrid')
mfa_parser = inspect_mfa(test_file_path)
parser = inspect_textgrid(acoustic_path)
with CorpusContext('load_remove_test', **graph_db) as c:
c.reset()
c.load_discourse(parser, acoustic_path)
c.load_discourse(mfa_parser, test_file_path)
syllabics = ['ER', 'AE', 'IH', 'EH', 'ae', 'ih', 'er', 'eh']
c.encode_syllabic_segments(syllabics)
c.encode_syllables()
q = c.query_graph(c.word).filter(c.word.label == 'JURASSIC')
assert q.count() > 0
q = c.query_graph(c.phone).filter(c.phone.label == 'AE')
assert q.count() > 0
q = c.query_lexicon(c.syllable).filter(c.syllable.label == 'JH.ER')
assert q.count() > 0
q = c.query_lexicon(c.lexicon_word).filter(c.lexicon_word.label == 'JURASSIC')
assert q.count() > 0
q = c.query_lexicon(c.lexicon_phone).filter(c.lexicon_phone.label == 'AE')
assert q.count() > 0
q = c.query_lexicon(c.lexicon_phone).filter(c.lexicon_phone.label == 'ae')
assert q.count() > 0
q = c.query_lexicon(c.lexicon_syllable).filter(c.lexicon_syllable.label == 'JH.ER')
assert q.count() > 0
q = c.query_discourses().filter(c.discourse.name == 'mfa_test')
assert q.count() > 0
q = c.query_speakers().filter(c.speaker.name == 'mfa')
assert q.count() > 0
d = c.discourse_sound_file('acoustic_corpus')
assert os.path.exists(d['consonant_file_path'])
def test_remove_discourse(graph_db):
with CorpusContext('load_remove_test', **graph_db) as c:
c.remove_discourse('mfa_test')
q = c.query_graph(c.word).filter(c.word.label == 'JURASSIC')
assert q.count() == 0
q = c.query_graph(c.phone).filter(c.phone.label == 'AE')
assert q.count() == 0
q = c.query_lexicon(c.syllable).filter(c.syllable.label == 'JH.ER')
assert q.count() == 0
q = c.query_lexicon(c.lexicon_word).filter(c.lexicon_word.label == 'JURASSIC')
assert q.count() == 0
q = c.query_lexicon(c.lexicon_phone).filter(c.lexicon_phone.label == 'AE')
assert q.count() == 0
q = c.query_lexicon(c.lexicon_phone).filter(c.lexicon_phone.label == 'ae')
assert q.count() > 0
q = c.query_lexicon(c.lexicon_syllable).filter(c.lexicon_syllable.label == 'JH.ER')
assert q.count() == 0
q = c.query_discourses().filter(c.discourse.name == 'mfa_test')
assert q.count() == 0
q = c.query_speakers().filter(c.speaker.name == 'mfa')
assert q.count() == 0
d = c.discourse_sound_file('acoustic_corpus')
assert os.path.exists(d['consonant_file_path'])
c.remove_discourse('acoustic_corpus')
assert not os.path.exists(d['consonant_file_path'])
|
'''
Driver for KMTRonic RS485 Relay
the board has 8 relays and a status command
note: KMT status seems quite buggy
(oscilloscope measures noisy response)
'''
from . import rs485
size = 8 # no. relays
ID = 4 # id-select-switches currently toggled for ID4
stat_byte = 0xA0
byte1 = 0xFF
def status():
'''
retry when dirty_status is bad
of 1000 tries:
994 were correct on second request
'''
n_tries = 100
for _ in range(n_tries):
msg = dirty_status()
is_boolean = all(bit in range(2) for bit in msg)
is_byte = len(msg) == 8
if is_byte and is_boolean:
return msg
raise Exception('cannot get it!')
def dirty_status():
'''
has intermittent failures:
In [83]: status()
Out[83]: b'\x01\x00\x01\x00\x00\x00\x00\x00'
In [84]: status()
Out[84]: b'@@@@\x80\x80\x80\x00'
'''
byte2 = stat_byte + ID
byte3 = 0x00
cmd = bytearray([byte1, byte2, byte3])
rs485.write(cmd, delay=1) # NOTE: delay is empirical from observed behavior
response = rs485.read(8)
return response
def send(idx, relay_on):
id_offset = (ID - 1) * 8
byte2 = idx + 1 + id_offset
byte3 = relay_on
cmd = bytearray([byte1, byte2, byte3])
rs485.write(cmd)
def turn_on(idx):
send(idx, True)
def turn_off(idx):
send(idx, False)
def reset():
for i in range(size):
turn_off(i)
|
"""
Спросить имя пользователя и сохранить его.
Прочитать файл questions.txt и последовательно задать вопросы пользователю.
Проверить ответы из файла answers.txt
Записать результаты в файл и назвать его именем пользователя.
В результатх указать количество правильных и не правильных ответов.
"""
score_right = 0
score_wrong = 0
name = input('Введите свое имя: ')
# Очищаем файл если он уже существует
answers_p = open(name + '.txt', 'w'); answers_p.close()
file = open('questions.txt', 'r')
# Задаем вопросы и получем ответы, записываем в файл с именем игрока
for quest in file:
print(quest)
a = input('Введите ответ: ')
answers_p = open(name + '.txt', 'a')
answers_p.write(a + '\n')
file.close()
answers_p.close()
number = 0
file_1 = open(name + '.txt', 'r')
file_2 = open('answers.txt', 'r')
answers_p = file_1.readlines()
right_answers = file_2.readlines()
for ans in right_answers:
if answers_p[:number] == right_answers[:number]:
score_right += 1
else:
score_wrong += 1
number += 1
file_1.close(); file_1 = open(name + '.txt', 'a')
file_2.close()
file_1.write('Right answers: ' + str(score_right) + '\nWrong answers: ' + str(score_wrong))
|
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Container constraint tests
"""
import doctest
import unittest
class TestUnaddableError(unittest.TestCase):
def test_str(self):
from zope.container.interfaces import UnaddableError
e = UnaddableError("'container'", "'obj'", "Extra")
self.assertEqual(str(e),
"'obj' cannot be added to 'container': Extra")
class TestCheckObject(unittest.TestCase):
def test_non_container(self):
from zope.container.constraints import checkObject
with self.assertRaisesRegex(
TypeError, "Container is not a valid Zope container."):
checkObject({}, 'foo', 42)
class TestCheckFactory(unittest.TestCase):
def test_no_validate_on_parent(self):
# Does nothing if no constraints are provided
from zope.container.constraints import checkFactory
class Factory:
def getInterfaces(self):
return {}
result = checkFactory({}, 'name', Factory())
self.assertTrue(result)
class TestTypesBased(unittest.TestCase):
def test_raw_types(self):
from zope.container.constraints import _TypesBased
t = _TypesBased('.TestTypesBased', module=__name__)
self.assertEqual(t.types, [TestTypesBased])
def test_raw_types_property(self):
from zope.container.constraints import _TypesBased
t = _TypesBased.types
self.assertTrue(hasattr(t, '__get__'))
def test_suite():
return unittest.TestSuite((
unittest.defaultTestLoader.loadTestsFromName(__name__),
doctest.DocTestSuite(
'zope.container.constraints'),
doctest.DocFileSuite(
'../constraints.rst'),
))
|
def get_variable_type(variable_type):
initial_type = variable_type
var_type = variable_type.upper()
if(var_type == 'BYTE'):var_type = 'INT1'
if(var_type == 'WORD'):var_type = 'INT2'
if(var_type == 'INT'):var_type = 'INT4'
if(var_type == 'LONG'):var_type = 'INT8'
if(var_type in ['DOUBT', 'DO_UB', 'DOUBLE']):var_type = 'DOUB'
if(var_type == 'DARK'):var_type = 'DARR'
if(var_type in ['BARK', 'BARR']):var_type = 'IARR1'
if(var_type in ['WAR' , 'WARR']):var_type = 'IARR2'
if(var_type in ['LIAR', 'IARR']):var_type = 'IARR4'
if(var_type in ['QUARK','QARR']):var_type = 'IARR8'
return (initial_type, var_type)
SCALAR_VAR_TYPES = ['INT1', 'INT2', 'INT4', 'INT8']+['DOUB']+['CSTR']
ARRAY_VAR_TYPES = ['IARR1', 'IARR2','IARR4', 'IARR8']+['DARR']
ALL_VAR_TYPES = SCALAR_VAR_TYPES+ARRAY_VAR_TYPES
ALL_VALID_VAR_TYPE_NAMES = ALL_VAR_TYPES + \
['BYTE', 'WORD', 'INT', 'LONG'] + ['DOUBT', 'DO_UB', 'DOUBLE'] + \
['DARK']+['BARK', 'BARR']+['WAR' , 'WARR']+ \
['LIAR', 'IARR']+['QUARK','QARR']
def sizeof_variable_type(var_type):
if(len(var_type) == 4 and var_type[0:3] == 'INT'):return int(var_type[3:])
if(var_type == 'DOUB'):return 8
if(len(var_type) == 5 and var_type[0:4] == 'IARR'):return int(var_type[4:])
if(var_type == 'DARR'):return 8
#MAYBE:ADD ARRAY ... OR NO(не очень то уж и орно на самом деле)
return None
class TypeofName:
no_one = 0b00_00_0
label = 0b00_00_1
func = 0b00_11_0
var = 0b11_00_0
not_var = 0b00_11_1
local = 0b01_01_1
any_name = 0b11_11_1
class TypeofNameAction:
_min_value = 0
ERROR = 0
PTR = 1
SZ = 2 #when SZ(x) only for vars
LEN = 3 #when LEN(arr) only for vars
_max_value = 2 |
from bellman_ford import BellmanFord
from dijkstra import Dijkstra
class Johnson:
@staticmethod
def johnson(network):
"""
Calculates the shortest path using Johnson's algorithm
Parameters
----------
src : str, int
An arbitrary node that does not exist in the STN.
Returns
-------
distance_matrix : List[List[int]]
A 2-D list representing the shortest distances between all the nodes
"""
distance_matrix = [[] for x in range(network.length)]
potential_function = BellmanFord.bellman_ford(network)
if not potential_function:
return False
for node_idx in range(network.length):
distance_matrix[node_idx] = Dijkstra.dijkstra(
network, node_idx, potential_function=potential_function)
if network.flag:
network.flag = False
# network.distance_matrix = distance_matrix
return distance_matrix
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.