text stringlengths 38 1.54M |
|---|
# convert from png to jpg
# png - RGBA
# jpg - RGB
from PIL import Image
# current image
blue_png = Image.open('C:\\Users\\svfarande\\onedrive\\Documents\\Study '
'MAterial\\Python\\PyCharm Projects\\PyBootCamp\\Images\\blue_color.png')
blue_jpg = blue_png.convert('RGB')
# new image
blue_jpg.save('C:\\Users\\svfarande\\onedrive\\Documents\\Study MAterial\\Python\\PyCharm '
'Projects\\PyBootCamp\\Images\\blue_color.jpg')
|
from tkinter import *
from tkinter import messagebox
from tkinter import simpledialog
from dateutil import parser
import datetime
import TakeTest, Feedback, CreateTest, login, Test
import csv
import os
import shelve
#Note for later self: check if a test name with the same name exists when creating a test. Maybe also add a timer so it gets deleted automatically
test_list = []
class Welcome(Frame):
# GUI Setup
def __init__ (self, master):
# Initialise Questionnaire Class
Frame.__init__(self, master)
self.grid()
self.overView()
self.createButtons()
def overView(self):
lblQuestion = Label(self, text = 'WELCOME '+login.name, font=('MS', 8,'bold'))
lblQuestion.grid(row=0, column= 4, rowspan=2)
# Create widgets to select a module from a list
string = ""
if login.is_teacher:
string = "Modules you can create an assesment in"
else:
string = "Modules you may have assesments in"
lblModules = Label(self, text=string, font=('MS', 8,'bold'))
lblModules.grid(row=2, column=0, columnspan=2, sticky=NE)
self.listProg = Listbox(self, height= 3)
scroll = Scrollbar(self, command= self.listProg.yview)
self.listProg.configure(yscrollcommand=scroll.set)
self.listProg.grid(row=3, column=0, columnspan=2, sticky=NE)
scroll.grid(row=3, column=4, sticky=W)
modules_list = self.retrieveModules()
for module in modules_list:
self.listProg.insert(END, module)
#self.listProg.selection_set(END)
self.listTest = Listbox(self, height= 3)
scroll = Scrollbar(self, command= self.listTest.yview)
self.listTest.configure(yscrollcommand=scroll.set)
self.listTest.grid(row=7, column=0, columnspan=2, sticky=NE)
scroll.grid(row=7, column=4, sticky=W)
def retrieveModules(self):
modules_list = []
with open('user_modules.csv') as csvfile:
rdr = csv.reader(csvfile)
for row in rdr:
#print(row)
#print("Does {} == {}".format(row[0], login.username))
if row[0] == login.username:
for i in range(1,len(row)):
if row[i]!= "":
modules_list.append(row[i])
return modules_list
def retrieveTests(self, module):
global test_list # setting it to be global so i can access it within Create_Test
test_list = []
with open('tests_overview.csv') as csvfile:
rdr = csv.reader(csvfile)
for row in rdr:
if row[0] == module:#Might need to add in the fact that a test gets taken
#> row[4] is TEST DURATION, adding it to test_list mean the duration vars are more accessible
#> row[5] is ATTEMPTS allowed - 1 for summative, 3 for formative
#> row[2] is TEST TYPE
#print("row 6:", row[6])
#print("row 6 type", type(row[6]))
print(row)
if row[2] == "F":
test_list.append((row[1], row[4], row[5], row[2]))
else:
#> STORE THE DUEDATE AT INDEX 4 OF THE TUPLE
test_list.append((row[1], row[4], row[5], row[2], row[6]))
#else:
# print("row 6:", row[6])
# test_list.append((row[1], row[4], row[5], row[2], row[6]))
if len(test_list) == 0:
# rather than return empty list, return -1
return -1
else:
return test_list
def getIndividualResults(self):
import viewResult
viewResult.View_Results(Toplevel())
def createButtons(self):
butCheck = Button(self, text='Check for Tests',font=('MS', 8,'bold'), command=self.checkTest)
butCheck.grid(row=4, column=0, columnspan=2)
if login.is_teacher:
butCreate = Button(self, text='Create TEST!',font=('MS', 8,'bold'), command=self.createTest)
butCreate.grid(row=8, column=0, columnspan=2)
butEdit = Button(self, text='Edit Test', font=('MS', 8,'bold'), command=self.editTest)
butEdit.grid(row = 8, column = 3, columnspan=2)
butView = Button(self, text='View Class results', font=('MS', 8,'bold'), command=self.viewClassResults)
butView.grid(row = 8, column = 6, columnspan=2)
butIndv = Button(self, text='View Individual Results', font=('MS', 8,'bold'), command=self.getIndividualResults)
butIndv.grid(row = 8, column = 9, columnspan=2)
else:
butTake = Button(self, text='Take TEST!',font=('MS', 8,'bold'), command = self.takeTest)#rename me to thing depending on whether or not you are a teacher
butTake.grid(row=8, column=0, columnspan=2)
butResult = Button(self, text='View Result',font=('MS', 8,'bold'), command = self.getResult)
butResult.grid(row=8, column=3, columnspan=2)
def viewClassResults(self):
if self.listTest.curselection() != ():
index = self.listTest.curselection()[0]
testname = str(self.listTest.get(index))
db = shelve.open("test_results/"+testname+"_results")
students = []
attempts = []
all_students = [[]]
position = 0
total = 0
for item in db:
students.append(item)
for i in range(len(students)):
results = db.get(students[i]).toString()[2]
#attempts.append(db.get(students[i]).toString()[1])
score = 0
correctAnswers = []
result = db.get(students[i]).toString()
#> Get the answers to the questions and store in var correctAnswers
with open(testname+".csv") as testfile:
rdr = csv.reader(testfile)
for row in rdr:
correctAnswers.append((int(row[5]),int(row[6]),int(row[7]),int(row[8])))
#> iterate through the answers and enumerate them,
for b, answer in enumerate(correctAnswers):
if result[2][b] == answer:
score += 1
all_students[position].append(1)
else:
all_students[position].append(0)
all_students.append([])
position += 1
students[i] = (students[i], score)
question_x = [0]*(len(all_students[0]))
all_students.pop(len(all_students)-1)
try:
for i in range(len(all_students[0])):
for student in all_students:
question_x[i] += student[i]
question_list = []
for i in range(len(question_x)):
question_x[i] = round(question_x[i]/len(all_students)*100, 1)
question_list.append(i+1)
print(all_students)
print(question_x)
import testgrades
import ClassResults
classResult = ClassResults.class_results(Tk(), students, testname)
testgrades.display_graph(question_x, question_list)
except IndexError:
messagebox.showwarning("Note", "No students have taken this test yet!")
def checkTest(self):
""" This function appends the tests available for a give
module to the Listbox listTest
"""
if self.listProg.curselection() != ():#Check if the user has selected something
index = self.listProg.curselection()[0]
strModule = str(self.listProg.get(index))
#retrieve tests for that module
test_list = self.retrieveTests(strModule)
# i.e. if retrieveTests doesn't return -1
if test_list != -1:
self.listTest.delete(0,END)
for test in test_list:
self.listTest.insert(END, test[0])
self.listTest.selection_set(END)
else:
#clear list box and show message
self.listTest.delete(0,END)
messagebox.showwarning("Note!","There are no tests for that module. ")
else:
messagebox.showwarning("ERROR","Please select a module!")
def editTest(self):
if self.listTest.curselection() != ():
t1 = Toplevel()
t1.title("Test")
index = self.listTest.curselection()[0]
testfile = str(self.listTest.get(index))
# Try - Except can be used if neccessary
#try:
CreateTest.create_file = 1
CreateTest.Create_Test(t1, testfile+'.csv')
#except FileNotFoundError:
# messagebox.showwarning("ERROR", "Test only exists in tests_overview.csv")
# t1.destroy()
else:
messagebox.showwarning("ERROR", "Please a pick an existing test to edit.")
def editTestFast(self, testfile):
if self.listTest.curselection() != ():
t1 = Toplevel()
t1.title("Test")
CreateTest.Create_Test(t1, testfile+'.csv')
def createTest(self):
""" This method creates an empty test csv file with a filename specified by the user in a
dialog box that appears. It then appends the test's metadata (teacher, testname, module)
to the tests_overview.csv file """
if self.listProg.curselection() != ():
index = self.listProg.curselection()[0]
strModule = str(self.listProg.get(index))
#print(strModule)
name = login.name
testName = simpledialog.askstring("Input", "Enter test name")
#if testname == None or contains forbidden characters
if not testName or len([i for i in testName if i in ['/', '\\', '?', '%', '*', ':', '|', '"', '<', '>', '.']]) != 0:
messagebox.showinfo("Error", "You didn't enter a name or you \nused a forbidden character!")
testType = simpledialog.askstring("Input", "Formative Test: F, Summative Test: S")
duedate = False
if testType and testType.upper() == 'S':
invalid_input=True
while(invalid_input==True):
dueDate = simpledialog.askstring("Input", "Please enter the date the assesment is due in a following format 'Aug 28 1999 12:00AM'")
try:
duedate = parser.parse(dueDate)
now = datetime.datetime.now()
print((now-duedate).total_seconds())
if (now-duedate).total_seconds() > 0:
messagebox.showwarning("ERROR", "Please enter a date that's in the future")
else:
invalid_input=False
except:
messagebox.showwarning("ERROR", "Enter the due date in a valid format")
testDuration = simpledialog.askinteger("Input", "Enter the test duration time in minutes.\n (Min: 15, Max: 120)")
if testDuration < 15:
#> change testDuration to 15
#testDuration = 15
pass
elif testDuration > 120:
testDuration = 120
elif testType and testType.upper() == 'F':
#> Formative tests don't need time limits so setting testDuration
#> to be several years is a solution for now
testDuration = 'No' # in milliseconds
else:
messagebox.showwarning("ERROR", "Enter F or S!")
return
# check if the file already exists in the folder or if testName for the selected module is already in tests_overview
if os.path.isfile('.\\{}.csv'.format(testName)) == False and testName not in test_list:
if duedate == False:
Test.test_file(testName, testType.upper(), strModule, name, testDuration)
else:
Test.test_file(testName, testType.upper(), strModule, name, testDuration, duedate)
print('...Test Created...\n'+120*'-'+'\nTest Name: {0:30}|Type: {1:10}|Teacher: {2:25}|Duration: {3:9}\n'.format(testName, 'Formative' if testType.upper() == 'F' else 'Summative',
name, 'No time limit' if testType.upper() == 'F' else str(testDuration) + ' minutes' + str(duedate)))
self.checkTest()
self.editTestFast(testName)
#print(test_list)
elif testName:
messagebox.showwarning("ERROR", "Test with that name already exists!")
else:
messagebox.showwarning("ERROR", "Something went wrong?!")
else:
messagebox.showwarning("ERROR", "Please select a module!")
def takeTest(self):
if self.listTest.curselection() != ():
index = self.listTest.curselection()[0]
testName= str(self.listTest.get(index))
testType = [i[3] for i in test_list if i[0] == testName]
testType = str(testType[0]) #> convert it to a string
timeLimit = [i[1] for i in test_list if i[0] == testName]
attemptsAllowed = [i[2] for i in test_list if i[0] == testName]
if messagebox.askokcancel("Are you sure", "This is a test "+testType+ " test and you only get "+ attemptsAllowed[0]+" attempts in total"):
print("Taking Test:", testName)
print("attemptsAllowed =",attemptsAllowed[0])
print("testType =",testType)
db = shelve.open("test_results/"+testName+"_results")
#check if students ID exists in database, if it returns True then do not allow student to take test if test (if test is summative)
try:
if testType == 'S':
"""WE only have test name to work on :( so i have to check all of the test_overview file for the datetime
#> Duedate is saved in test_list as the 4th index so don't need this
with open('tests_overview.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if row[1] == testName:
dueDate = row[6]
"""
#duedate = parser.parse(dueDate)
duedate = self.turnDueDateToObject(testName) #> Dis be quicker!
now = datetime.datetime.now()
if now>duedate:#current due date has passed :( therefore don't allow student to take test
messagebox.showinfo("TOO LATE!!!","Sorry, the due date for this assesment has passed already")
else:
db[login.username]
messagebox.showinfo("Can't take summative test!", "You have already sat this test")
db.close()
elif testType == 'F':
db[login.username]
attempts = db.get(str(login.username)).toString()[1]
print("You have made {} attempts so far".format(attempts))
if attempts == 3:
messagebox.showinfo("Can't take formative test!", "You have already used your final attempt")
db.close()
else:
db.close()
t1 = Toplevel()
t1.geometry("700x300")
app = TakeTest.Take_Test(t1, testName, timeLimit, login.username, testType, attempts)
except KeyError: # if database for test doesn't contain student's userID, it will return KeyError
db.close()
t1 = Toplevel()
t1.geometry("700x300")
app = TakeTest.Take_Test(t1, testName, timeLimit, login.username, testType)
else:
messagebox.showwarning("ERROR", "Please select a test to take!")
def getResult(self):
if self.listTest.curselection() != ():
index = self.listTest.curselection()[0]
testname = str(self.listTest.get(index))
testType = [i[3] for i in test_list if i[0] == testname]
testType = str(testType[0])
t1 = Toplevel()
if testType == 'S':
#> this is horrible...
duedate = self.turnDueDateToObject(testname)
print(duedate)
fdbck = Feedback.Show_Results(t1, login.username, testname, testType, duedate)
elif testType == 'F':
fdbck = Feedback.Show_Results(t1, login.username, testname, testType)
else:
messagebox.showwarning("Error", "Is your testtype a string other than 'F' or 'S'?")
def turnDueDateToObject(self, testname):
#""" This function converts the test's duedate to a datetime.datetime object """
print("Getting duedate and doing stuff...........")
#> get the duedate
duedate = [i[4] for i in test_list if i[0] == testname][0].split()
#> split the string in to date and time
theDate = duedate[0].split("-") #> [year, month, day]
theTime = duedate[1].split(":") #> [hour, minute, second]
#> create datetime object, convert values to ints as they are strings
dueDate = datetime.datetime(int(theDate[0]), int(theDate[1]), int(theDate[2]),
int(theTime[0]), int(theTime[1]), int(theTime[2]))
return dueDate
#mainloop
#if login.username != "":
if login.loggedIn != False:
root = Tk()
root.title("HOME "+ str(login.username))
app = Welcome(root)
root.mainloop()
|
from .location import Location
from .playlist_user import PlaylistUser
from .playlist import Playlist
from .user import User
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError, Warning
class AccountTaxTemplate(models.Model):
_inherit = 'account.tax.template'
l10n_pe_edi_tax_code = fields.Selection(selection=[
('1000', 'IGV - VAT - Impuesto General a las Ventas'),
('1016', 'IVAP - VAT - Impuesto a la Venta Arroz Pilado'),
('2000', 'ISC - EXC - Impuesto Selectivo al Consumo'),
('9995', 'EXP - FRE - Exportación'),
('9996', 'GRA - FRE - Gratuito'),
('9997', 'EXO - VAT - Exonerado'),
('9998', 'INA - FRE - Inafecto'),
('9999', 'OTROS - OTH - Otros tributos')
], string='Código SUNAT EDI')
l10n_pe_edi_unece_category = fields.Selection(string='Código UNECE 5305')
|
from flask_restx import fields
from weakref import WeakSet
class AnyNotNullField(fields.Raw):
__schema_type__ = 'any'
def format(self, value):
if not isinstance(value, WeakSet):
return value
class ForbiddenField(fields.Raw):
__schema_type__ = 'any'
def format(self, value):
return
# ==================== Wildcard fields =======================
wild_any_fields = fields.Wildcard(AnyNotNullField, description="other fields", skip_none=False, allow_null=False)
wild_forbid_fields = fields.Wildcard(ForbiddenField, description="forbidden fields for output")
|
from tkinter import *
from tkinter import Menu
import ScrolledText
import tkinter.messagebox
import tkinter.filedialog
root=Tk(className="Project Laminus")
def dummy():
print ("I am a Dummy Command,I will be removed in the next step")
def open_file():
f = tkinter.askopenfile(defaultextension=".txt", filetypes=[("All Types", ".*")])
if not f:
return
txt1.delete(1.0, END)
txt1.insert(END, f.read())
f.close()
def file_save():
fout = tkinter.filedialog.asksaveasfile(mode='w', defaultextension=".txt")
text2save = str(self.text.get(0.0, END))
fout.write(text2save)
fout.close()
textPad = ScrolledText.ScrolledText(root,width=200,height=80)
def exit():
if tkinter.messagebox.askokcancel("Exiting Laminus!", "Do you really want to quit? Save your work if you haven't!"):
root.destroy()
def about():
label=tkinter.messagebox.showinfo("About Laminus","Laminus is made by Dhaval Thakur for the aid of programming")
menu = Menu(root)
menubar = Menu(root)
root.configure(menu=menubar)
filemenu = Menu(menubar,tearoff=0)
menubar.add_cascade(label="File", menu=filemenu)
filemenu.add_command(label="Save", command=file_save)
filemenu.add_command(label='Open a File..', command=open_file)
filemenu.add_cascade(label="About..",command=about)
filemenu.add_command(label='Exit Laminus', command=exit)
#*****LOGO****
photo = PhotoImage(file="laminus.gif")
label= Label(root,image=photo)
label.pack()
textPad.pack()
root.mainloop() |
import os.path
import random
import hashlib
from datetime import datetime
SALT = "abc123"
def add_salt(string):
return f"{string}{SALT}"
def user_exists(username):
if os.path.isfile(f"{username}.txt"):
return True
else:
return False
def generate_tokens(seed):
tokens = []
token = f"{seed}{datetime.now().strftime('%d%m%Y%H%M')}"
for _ in range(5):
token = hashlib.md5(token.encode("utf-8")).hexdigest()
tokens.append(token[:6])
return tokens
def get_passwords_hash(username):
with open(f"{username}.txt", "r") as file:
seed = file.readline()
local_password = file.readline()
return seed.strip(), local_password.strip()
def save_passwords_hash(username, seed, local_password):
with open(f"{username}.txt", "w") as file:
file.write(f"{seed}\n")
file.write(f"{local_password}\n")
def main():
print("Login")
username = str(input("Username: "))
if user_exists(username):
print("Returning user...")
input_password = str(input("Local password: "))
input_password = add_salt(input_password)
seed, local_password = get_passwords_hash(username)
if local_password != hashlib.md5(input_password.encode("utf-8")).hexdigest():
print("Wrong Password")
exit(-1)
else:
print("Login sucessfull")
else:
print("New user...")
seed = str(random.getrandbits(64))
seed = add_salt(seed)
local_password = input("Create local password: ")
local_password = add_salt(local_password)
seed = hashlib.md5(seed.encode("utf-8")).hexdigest()
local_password = hashlib.md5(local_password.encode("utf-8")).hexdigest()
save_passwords_hash(username, seed, local_password)
while True:
input("Press any key to generate tokens")
print("Valid tokens:", *generate_tokens(seed))
if __name__ == "__main__":
main()
|
#!/usr/bin/python
import time
import subprocess
import sys
try:
c = open("/home/pi/Watchman/useGprs.txt","r")
status = c.read()
status = status.strip()
c.close()
except Exception as e:
sys.exit()
if status == '1':
time.sleep(10)
subprocess.call(['sudo','/home/pi/Watchman/activateGprs.py'])
|
#!/usr/bin/python
from platform import python_version
import time
if python_version().split(".")[0] == "2":
print("Running in Python 2")
t1 = raw_input("Starting time (hours:minutes): ")
t2 = raw_input("Ending time (hours:minutes): ")
else:
print("Running in Python 3")
t1 = input("Starting time (hours:minutes): ")
t2 = input("Ending time (hours:minutes): ")
t1h = int(t1.split(":")[0])
t1m = int(t1.split(":")[1])
t2h = int(t2.split(":")[0])
t2m = int(t2.split(":")[1])
hours = t2h - t1h
minutes = t2m - t1m
if minutes < 0:
hours = hours - 1
minutes = minutes + 60
print("Total time: " + str(hours) + ":" + str(minutes))
|
from typing import List, Dict
from random import randint
class Solution:
def __init__(self, nums: List[int]):
self.indexes: Dict[List[int]] = {}
for i, num in enumerate(nums):
if num not in self.indexes:
self.indexes = [i]
else:
self.indexes.append(i)
def pick(self, target: int) -> int:
return self.indexes[target][randint(0, len(self.indexes[target]-1))]
|
from django.test import TestCase
from django.urls import reverse, resolve
from chat.views import Inbox,cost_chat,UserSearch,Directs,SendDirect,Inbox_cost,Daliy_Tip
class Test_url(TestCase):
def test_Inbox(self):
url = reverse('chat:Inbox')
self.assertEqual(resolve(url).func, Inbox)
def test_cost_chat(self):
url = reverse('chat:cost_chat')
self.assertEqual(resolve(url).func, cost_chat)
def test_UserSearch(self):
url = reverse('chat:usersearch')
self.assertEqual(resolve(url).func, UserSearch)
# def test_Directs(self):
# url = reverse('chat:free_chat')
# self.assertEqual(resolve(url).func, Directs)
def test_SendDirect(self):
url = reverse('chat:send_direct')
self.assertEqual(resolve(url).func, SendDirect)
def test_Inbox_cost(self):
url = reverse('chat:Inbox_cost')
self.assertEqual(resolve(url).func, Inbox_cost)
def test_Daliy_Tip(self):
url = reverse('chat:Daliy_Tip')
self.assertEqual(resolve(url).func, Daliy_Tip)
|
document = open('mbox-short.txt')
hours= dict()
for line in document:
if line.startswith('From'):
line = line.split()
if len(line) >= 4:
hour = line[5]
hour = hour.split(':')
hour = hour[0]
hours [hour] = hours.get(hour, 0) + 1
count = list()
for ho, times in hours.items():
count.append((ho, times))
count.sort()
for times, ho in count:
print(times, ho)
#print(count)
#print(sorted([(ho, time) for time, ho in hours.items()], reverse=True))
|
class Solution:
def matrixMultiplication(self, n, arr):
dp = [[float("inf")] * n for i in range(n)]
for i in range(n):
dp[i][i] = 0
for l in range(2, n):
for i in range(1, n - l + 1):
j = i + l - 1
for k in range(i, j):
cost = dp[i][k] + dp[k + 1][j] + arr[i - 1] * arr[k] * arr[j]
dp[i][j] = min(cost, dp[i][j])
return dp[1][n - 1]
|
import datetime
rok_urodzenia = int(input("PODAJ ROK URODZENIA"))
aktualny_rok = datetime.datetime.now().year
wynik = aktualny_rok - rok_urodzenia
if wynik >=18:
print ("Jesteś pełnoletni!")
else:
print("Nie jesteś pełnoletni!") |
from CSVinfo import *
class MotherboardData:
'''
This class contains only static methods. Methods name are descriptive of their function.
Additional required information, wherever necessary, has been specified.
'''
def get_motherboard_price(row):
price = MotherboardData.extract_num_data(row[MOTHERBOARD_PRICES], 1, ',')
if price == 0:
price = MotherboardData.extract_num_data(row[MOTHERBOARD_PRICES], 1, ']')
return price
def get_motherboard_performance_score(row):
return MotherboardData.get_motherboard_ethernet_score(row) + \
(50 if MotherboardData.is_motherboard_usb3_header(row) else 0)
def extract_num_data(col, start, str):
'''
Returns the value in 'col' as a float. The value is converted starting from the index
'start' and ending at the index before the first occurence of 'str'.
'''
if str not in col:
return 0
return float(col[start:col.find(str)])
def get_motherboard_ethernet_score(row):
ethernet = row[MOTHERBOARD_ONBOARD_ETHERNET]
multiplier = int(ethernet[0])
if 'Gbps' in ethernet:
return 50 * multiplier
return 25 * multiplier
def is_motherboard_usb3_header(row):
return True if row[MOTHERBOARD_ONBOARD_USB3] == 'Yes' else False
|
from django.shortcuts import render, HttpResponse, redirect, reverse
from django.views.generic import View
from apps.user.models import User
import re
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import SignatureExpired
from django.conf import settings
from django.core.mail import send_mail
from celery_tasks.tasks import send_register_active_email
from django.contrib.auth import authenticate, login
# Create your views here.
# /user/register
class RegisterView(View):
"""注册"""
def get(self, request):
# 显示注册页面
return render(request, 'register.html')
def post(self, request):
# 进行注册处理
# 接收数据
username = request.POST.get('user_name')
password = request.POST.get('pwd')
email = request.POST.get('email')
allow = request.POST.get('allow')
# 进行数据校验
if not all([username, password, email]):
# 数据不完整
return render(request, 'register.html', {'errmsg': '数据不完整'})
# 检验邮箱
if not re.match(r'^[a-z0-9][\w.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return render(request, 'register.html', {'errmsg': '邮箱格式不正确'})
if allow != 'on':
return render(request, 'register.html', {'errmsg': '请同意协议'})
# 校验用户是否重复
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# 用户名不存在
user = None
if user:
return render(request, 'register.html', {'errmsg': '用户已存在'})
# 进行业务处理:进行用户注册
user = User.objects.create_user(username, email, password)
user.is_active = 0
user.save()
# 这部分使用celery来发送邮件
# 发送激活链接:http://127.0.0.1:8000/user/active/id
# 激活链接中需要包含用户的身份信息,并且要把身份形象加密
# # 加密用户的身份信息,生成激活TOKEN
serializer = Serializer(settings.SECRET_KEY, 3600)
info = {'confirm': user.id}
token = serializer.dumps(info)
#
# # 发邮件
# subject = '天天生鲜欢迎信息'
# # message = '邮件正文'
# print(token)
# print(type(token))
token = str(token, 'utf-8')
# print(token)
# print(type(token))
# message = '<h1> {} 欢迎您成为天天生鲜注册会员</h1> 请点击下面链接激活您的账户<br/>' \
# '<a href="http://127.0.0.1:8000/user/active/{}">' \
# 'http://127.0.0.1:8000/user/active/{}' \
# '</a>'.format(username, token, token)
# sender = settings.EMAIL_PROM # 指定发件人
# receiver = [email] # 收件人列表
# html_message = message
# # send_mail(subject, message, sender, receiver) #message转义不了
# message = ''
# send_mail(subject, message, sender, receiver, html_message=html_message)
# celery发邮件
# send_register_active_email.delery(email, username, token) # 这个不行,没有delery这个方法
print('------', email, username, token)
send_register_active_email(email, username, token)
return HttpResponse('hello')
class ActiveView(View):
"""
用户激活
"""
def get(self, request, token):
"""进行用户激活"""
# 进行解密,获取要激活的用户信息
serializer = Serializer(settings.SECRET_KEY, 3600)
try:
info = serializer.loads(token)
# 获取激活用户的ID
user_id = info['confirm']
user = User.objects.get(id=user_id)
user.is_active = 1
user.save()
# 跳转到登录页面
return redirect(reverse('user:login'))
except SignatureExpired as e:
# 激活链接已过期
return HttpResponse('激活链接已过期')
class LoginView(View):
"""登录"""
def get(self, request):
'''显示登录页面'''
# 判断是否记住了用户名
if 'username' in request.COOKIES:
username = request.COOKIES.get('username')
checked = 'checked'
else:
username = ''
checked = ''
# 使用模板
return render(request, 'login.html', {'username': username, 'checked': checked})
# return render(request, 'login.html')
def post(self, request):
# 接收数据
username = request.POST.get('username')
password = request.POST.get('pwd')
# 校验数据
if not all([username, password]):
return render(request, 'login.html', {'errmsg': '数据不完整'})
# 业务处理: 登录校验
user = authenticate(username=username, password=password)
if user is not None:
# 用户名密码正确
if user.is_active:
# 用户已激活
login(request, user)
# 记住用户名时需要这个
response = redirect(reverse('goods:index'))
# 判断是否需要记住用户名
remember = request.POST.get('remember')
if remember == 'on':
# 记住用户名
response.set_cookie('username', username, max_age=24*3600)
else:
# 下次不需要记住
response.delete_cookie('username')
return response
# 跳转到首页
# return redirect(reverse('goods:index'))
else:
# 用户未激活
return render(request, 'login.html', {'errmsg': '用户未激活'})
else:
# 用户名或密码错误
return render(request, 'login.html', {'errmsg': '用户名或密码错误'})
# 返回应答
|
sum = 0
while True:
number = input("Enter the item price or press q to quit : ")
if number != 'q':
sum += int(number)
print(f"Order total so far {sum}")
else:
print(f"Your bill total is {sum}")
print("Thanks for shopping")
break |
from typing import MutableMapping
from django.shortcuts import render
from django.http import HttpResponse
from .models import city
def index(request):
citys = city.objects.all()
return render(request,'index.html',{'citys':citys}) |
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
swagger_template = {
"swagger": "",
"openapi": "3.0.0",
"components": {
"securitySchemes": {
"BearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "JWT",
}
}
},
"definitions": {
"User": {
"type": "object",
"properties": {
"username": {"type": "string"},
"first_name": {"type": "string"},
"last_name": {"type": "string"},
"dob": {"type": "string", "format": "date"},
"phone_number": {"type": "string"},
"country": {"type": "string"},
"state": {"type": "string"},
"city": {"type": "string"},
"address_1": {"type": "string"},
"address_2": {"type": "string"},
"zip": {"type": "string"},
},
"example": {
"username": "sunmilee",
"first_name": "Sunmi",
"last_name": "Lee",
"dob": "2020-05-07",
"address": "1 Hacker Way",
},
},
"DiemCurrencies": {
"type": "string",
"enum": ["XUS"],
},
"TransactionDirections": {
"type": "string",
"enum": ["received", "sent"],
},
"Transaction": {
"type": "object",
"properties": {
"id": {"type": "string"},
"amount": {"type": "integer"},
"currency": {"$ref": "#/definitions/DiemCurrencies"},
"direction": {"$ref": "#/definitions/TransactionDirections"},
"timestamp": {"type": "string", "format": "date-time"},
"source": {"$ref": "#/definitions/VaspAccountDetails"},
"destination": {"$ref": "#/definitions/VaspAccountDetails"},
"blockchain_tx": {"$ref": "#/definitions/BlockchainTransaction"},
},
},
"VaspAccountDetails": {
"type": "object",
"properties": {
"vasp_name": {"type": "string"},
"user_id": {"type": "string"},
},
},
"BlockchainTransaction": {
"type": "object",
"properties": {
"version": {"type": "integer"},
"status": {"type": "string"},
"expirationTime": {"type": "string"},
"source": {"type": "string"},
"destination": {"type": "string"},
"amount": {"type": "integer"},
"sequenceNumber": {"type": "integer"},
},
},
},
"security": [{"BearerAuth": []}],
}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
import statsmodels.formula.api as sm
data = pd.read_excel(".\\Data\\Folds5x2_pp.xlsx")
col = data.columns
for i in range(0, 4):
train_data = data[[col[i], col[-1]]]
clf = sm.ols(formula=col[-1]+'~ ' + col[i] + '+ I('+col[i]+'**2) + I('+col[i]+'**3)',
data=train_data).fit()
table = clf.summary()
print(table)
|
import socket
import time
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
host = socket.gethostname()
clientport = 12346
s.bind((host, clientport))
while True:
data, addr = s.recvfrom(1024)
print addr[1] |
import numpy as np
import matplotlib.pyplot as plt
import sklearn.metrics.pairwise
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import metrics
from active_learning.core import ActiveLearner, MAL1, MismatchFirstFarthestTraversal, LargestNeighborhood
def target_func(X):
y = np.zeros(len(X))
for i in range(len(y)):
if (X[i,0]> 0.2 and X[i,1]>0.2) and (X[i,1]<0.7-X[i,0]):
y[i] = 1
elif (X[i,0]< 0.8 and X[i,1]<0.8) and (X[i,1]>1.3-X[i,0]):
y[i] = 1
else:
y[i] = 0
return y
def data_generation(n=5000):
np.random.seed(0)
X = np.random.random([n, 2])
y = target_func(X)
return X, y
def plot_data(X, y):
ax = plt.axes()
ax.set_xlim([0,1])
ax.set_ylim([0,1])
plt.plot([0.2,0.5], [0.2,0.2], 'r-')
plt.plot([0.2,0.2], [0.2,0.5], 'r-')
plt.plot([0.2,0.5], [0.5,0.2], 'r-')
plt.plot([0.8,0.5], [0.8,0.8], 'r-')
plt.plot([0.8,0.8], [0.5,0.8], 'r-')
plt.plot([0.8,0.5], [0.5,0.8], 'r-')
for i in range(len(X)):
if y[i] == 1:
plt.plot(X[i,0], X[i,1], 'r.')
else:
plt.plot(X[i,0], X[i,1], 'g.')
plt.show()
def random_sampling_process(X_train, y_train, X_test, y_test):
"The decision boundary cannot be learned LogisticRegression. Decision tree is used here."
learner = ActiveLearner(X_train,
initial_batch_size=100,
batch_size=100,
classifier=GradientBoostingClassifier())
# n_batch = int(len(X_train) / learner.batch_size)
n_batch = 5
print("Query strategy: Mismatch-first farthest-traversal...")
for i in range(n_batch):
print("Batch {0}:".format(i + 1))
batch = learner.draw_next_batch()
learner.annotate_batch(batch, y_train[batch])
print("Annotated instances: {0}".format(len(learner.L)))
plot_data(X_train[learner.L], y_train[learner.L])
print("Training starts.")
learner.train()
print("Training is done.")
y_test_pred = learner.classifier.predict(X_test)
f1 = metrics.f1_score(y_test, y_test_pred, average='macro')
print("The average F1 score is ", f1)
def mismatch_first_farthest_traversal(X_train, y_train, X_test, y_test):
"The decision boundary cannot be learned LogisticRegression. Decision tree is used here."
learner = MismatchFirstFarthestTraversal(X_train,
initial_batch_size=100,
batch_size=100,
classifier=GradientBoostingClassifier())
learner.dist_metric = 'euclidean'
learner.medoids_sort = 'distance'
# n_batch = int(len(X_train) / learner.batch_size)
n_batch = 5
print("Query strategy: Mismatch-first farthest-traversal...")
for i in range(n_batch):
print("Batch {0}:".format(i + 1))
batch = learner.draw_next_batch()
learner.annotate_batch(batch, y_train[batch])
print("Annotated instances: {0}".format(len(learner.L)))
#plot_data(X_train[learner.L], y_train[learner.L])
print("Training starts.")
learner.train()
print("Training is done.")
y_test_pred = learner.classifier.predict(X_test)
plot_data(X_test, y_test_pred)
f1 = metrics.f1_score(y_test, y_test_pred, average='macro')
print("The average F1 score is ", f1)
#uncertainty_sampling(X)
if __name__ == '__main__':
X_train, y_train = data_generation(n=5000)
X_test, y_test = data_generation(n=1000)
#random_sampling_process(X_train, y_train, X_test, y_test)
plot_data(X_train, y_train)
mismatch_first_farthest_traversal(X_train, y_train, X_test, y_test)
#MFFT(X)
|
from datetime import datetime, timedelta
from google.appengine.api import mail
from handlers.base import BaseHandler
from models.forum_subscription import ForumSubscription
from models.topic import Topic
class SendMailForumSubscribersCron(BaseHandler):
def get(self):
day_ago = datetime.now() - timedelta(days=1)
emails = ForumSubscription.query().fetch()
topics = Topic.query(Topic.deleted == False, Topic.created_at <= day_ago).fetch()
for email in emails:
mail.send_mail(sender="staramarsa@gmail.com", to=email.email, subject="New topics",
body="""Here are some new topics since the last 24h:
for topic in topics:
topic.title
<a href="http://ninjatechforum.appspot.">Read more</a>""")
|
import wx
class Frame(wx.Frame):
#add title variable
def __init__(self, title):
#title = title variable
wx.Frame.__init__(self, None, \
title = title, size = (300,200))
self.Center()
panel = wx.Panel(self)
button = wx.Button(panel,label = "Exit", \
size = (100,40),pos = (100,30))
button.Bind(wx.EVT_BUTTON, self.exit)
#create menu bar
menuBar = wx.MenuBar()
#create the menu items
fileMenu = wx.Menu()
editMenu = wx.Menu()
self.SetMenuBar(menuBar)
#add fileMenu and editMenu to menuBar
menuBar.Append(fileMenu, "File")
menuBar.Append(editMenu, "Edit")
#add items to fileMenu
fileMenu.Append(wx.NewId(), "New File", "Create a new file")
fileMenu.Append(wx.NewId(), "Open")
exitItem = fileMenu.Append(wx.NewId(), "Exit")
#Bind exit menu item to exit function
self.Bind(wx.EVT_MENU, self.exit, exitItem)
#create status bar
self.CreateStatusBar()
#spin control idea
self.spinner = wx.SpinCtrl(panel, value = "0", pos = (150,80), \
size = (70,25))
self.valueText = wx.StaticText(panel, label = '', pos = (130,80))
self.spinner.Bind(wx.EVT_SPINCTRL, self.spinControl)
def exit(self, event):
self.Destroy()
def spinControl(self, event):
#get spin control value
value = self.spinner.GetValue()
#Update static text
self.valueText.SetLabel(str(value))
app = wx.App()
#Pass in the frame title
frame = Frame("Python GUI")
frame.Show()
app.MainLoop()
|
import sys
import cx_Oracle
import getpass
import random
import string
import datetime
#converts date number into corresponding month for easy visualisation when output at the end.
monthdic ={1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun', 7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
def validity(str1, str2, val, vartype, constraint = []):
first = True
var = ''
while not var:
if first:
var = input(str1)
first = False
else:
var = input(str2)
if len(var) > val or (constraint and var not in constraint):
var = ''
elif vartype != str:
try:
if vartype == float or vartype == int:
var = vartype(var)
if vartype == float and var > 10**(val-2):
var = ''
elif vartype == datetime.date:
var = datetime.datetime.strptime(var, '%d-%b-%Y').date()
elif vartype == 'file':
file = open(var, 'rb')
var = file.read()
file.close()
except:
var = ''
return var
def erasespace(L):
for char in L:
if char == ' ':
L.remove(char)
return ''.join(L)
def func1(curs, connection):
print('NEW VEHICLE REGISTRATION')
select = 'y'
while select == 'y':#if this option is selected, proceed to fill in all required information in order to fill in the tables.
# Keeps asking for SIN until a valid one is received
first = True
owner_id = ''
SIN = {}
while not owner_id:
if first:
owner_id = (input("Please enter the primary owner's SIN: "))
first = False
# If owner_id not in people table was received
elif len(owner_unknown) == 0:
owner_id = input("Unknown SIN, please try again: ")
# Check if owner_id is in people table
# len(owner_unknown) == 0 if owner_id not in people table
curs.execute("SELECT sin FROM people WHERE sin = '%s'"%(owner_id))
owner_unknown = curs.fetchall()
if len(owner_unknown) == 0:
owner_id = ''
SIN[owner_id] = 'y'
# Keeps asking for car_id until an unused one is received
first = True
car_id = ''
while not car_id:
if first:
car_id = (input("Please enter the car's serial number: "))
first = False
# If car_id in vehicle table was received
elif len(car_conflict) > 0:
car_id = input("Serial number already registered, please use another: ")
# Invalid car_id was received
else:
car_id = (input("Please enter a valid serial number: "))
# Check if car_id is in vehicle table
# len(car_conflict) > 0 if car_id is in vehicle table
curs.execute("SELECT serial_no FROM vehicle WHERE serial_no = '%s'"%(car_id))
car_conflict = curs.fetchall()
# Reset car_id if there is a conflict, length of car_id is > 15 or if it is NULL
if len(car_conflict) > 0 or len(car_id) > 15 or car_id == 'NULL':
car_id = ''
maker = validity("Please enter the car's maker: ", "Please enter a valid car maker: ", 20, str)
model = validity("Please enter the car's model: ", "Please enter a valid car model: ", 20, str)
year = validity("Please enter the car's year: ", "Please enter a valid year: ", 4, int)
color = validity("Please enter the car's color: ", "Please enter a valid color: ", 10, str)
# Keeps asking for type_id until a valid one is received
first = True
type_id = ''
while not type_id:
if first:
type_id = (input("Please enter the car's type_id: "))
first = False
else:
type_id = input("Unknown type_id, please try again: ")
# Convert type_id into int and check if type_id is in vehicle_type table
# len(type_unknown) == 0 if type_id not in vehicle_type table
try:
type_id = int(type_id)
curs.execute("SELECT type_id FROM vehicle_type WHERE type_id = '%d'"%(type_id))
type_unknown = curs.fetchall()
if len(type_unknown) == 0:
type_id = ''
# Error occured, reset type_id
except:
type_id = ''
# Keeps asking if there are any other owners to add to the vehicle
cont = True
while cont:
choice = validity("Are there any other (non-primary) owners? y/n: ", "Please input a valid choice: ", 1, str, ['y', 'n'])
if choice == 'n':
cont = False
else:
# Keeps asking for SIN until a valid one is received
first = True
secondary_id = ''
while not secondary_id:
if first:
secondary_id = (input("Please enter the primary owner's SIN: "))
first = False
# If owner_id not in people table was received
elif len(secondary_unknown) == 0:
secondary_id = input("Unknown SIN, please try again: ")
else:
secondary_id = input("SIN already received, please try another: ")
# Check if secondary_id is in people table
# len(secondary_unknown) == 0 if secondary_id not in people table
curs.execute("SELECT sin FROM people WHERE sin = '%s'"%(secondary_id))
secondary_unknown = curs.fetchall()
if len(secondary_unknown) == 0 or secondary_id in SIN:
secondary_id = ''
SIN[secondary_id] = 'n'
curs.execute("Insert into vehicle "
"values('%s','%s','%s',%d,'%s','%d')"%(car_id, maker, model, year, color, type_id))#insert statement for vehicle table
for i in SIN:
curs.execute("Insert into owner "
"values('%s', '%s', '%s')"%(i, car_id, SIN[i]))
connection.commit()
print('New vehicle successfully registered')
select = validity("Do you want to register another vehicle? y/n: ", "Please enter a valid option: ", 1, str, ['y', 'n'])
def func2(curs, connection):
print('AUTO TRANSACTION')
#auto_sale( transaction_id,seller_id, buyer_id, vehicle_id, s_date, price )
# Get current max transaction_id (transaction_ids increment by 1)
curs.execute("SELECT max(transaction_id) FROM auto_sale")
transactions = curs.fetchall()
transaction_id = int(transactions[0][0])
more = 'y'
while more == 'y':
transaction_id = transaction_id+1
SIN = {}
# Keeps asking for vehicle_id until a one is received
first = True
vehicle_id = ''
while not vehicle_id:
if first:
vehicle_id = (input("Please enter the car's serial number: "))
first = False
# Invalid car_id was received
else:
vehicle_id = (input("Please enter a valid serial number: "))
# Check if vehicle_id is in vehicle table
# len(car_unknown) == 0 if vehicle_id is not in vehicle table
curs.execute("SELECT serial_no FROM vehicle WHERE serial_no = '%s'"%(vehicle_id))
vehicle_unknown = curs.fetchall()
if len(vehicle_unknown) == 0:
vehicle_id = ''
# Keeps asking for SIN until a valid one is received (person must be the primary owner of the car)
first = True
seller_id = ''
while not seller_id:
if first:
seller_id = (input("Please enter the primary seller's SIN: "))
first = False
# If seller_id not in people table was received
elif len(seller_unknown) == 0:
seller_id = input("Unknown SIN, please try again: ")
else:
seller_id = input("This SIN is not the primary owner of this vehicle, please try again: ")
# Check if seller_id is in people table and if seller_id is the primary owner in the owner table
# len(seller_unknown) == 0 if seller_id not in people table
curs.execute("SELECT sin FROM people WHERE sin = '%s'"%(seller_id))
seller_unknown = curs.fetchall()
# len(seller_notprimary) == 0 if seller_id not the primary owner of vehicle_id in owner table
curs.execute("SELECT owner_id FROM owner where vehicle_id = '%s' and owner_id = '%s' and is_primary_owner = '%s'"%(vehicle_id, seller_id, 'y'))
seller_notprimary = curs.fetchall()
if len(seller_unknown) == 0 or len(seller_notprimary) == 0:
seller_id = ''
# Keeps asking for SIN until a valid one is received (person must not be the primary owner of the car)
first = True
buyer_id = ''
while not buyer_id:
if first:
buyer_id = (input("Please enter the primary buyer's SIN: "))
first = False
# If buyer_id not in people table was received
elif len(buyer_unknown) == 0:
buyer_id = input("Unknown SIN, please try again: ")
else:
buyer_id = input("SIN already received, please try another: ")
# Check if buyer_id is in people table
# len(buyer_unknown) == 0 if buyer_id not in people table
curs.execute("SELECT sin FROM people WHERE sin = '%s'"%(buyer_id))
buyer_unknown = curs.fetchall()
if len(buyer_unknown) == 0 or buyer_id == seller_id:
buyer_id = ''
SIN[buyer_id] = 'y'
# Get previous sale date of vehicle
curs.execute("SELECT max(s_date) FROM auto_sale WHERE vehicle_id = '%s'"%(vehicle_id))
prev = curs.fetchall()
if prev:
prev_date = prev[0][0].date()
s_date = ''
first = True
while not s_date:
if first:
s_date = validity("Please enter the sale date(DD-MON-YYYY): ", "Please enter a valid date: ", 11, datetime.date)
first = False
else:
s_date = validity("You can't time travel (last sold on: %s), try again: "%(prev_date.strftime('%d-%b-%Y')), "Please enter a valid date: ", 11, datetime.date)
if prev_date and s_date < prev_date:
s_date = ''
price = validity("Please enter the sale price: ", "Please enter a valid price: ", 9, float)
# Keeps asking if there are any other owners to add to the vehicle
cont = True
while cont:
choice = validity("Are there any other (non-primary) owners? y/n: ", "Please input a valid choice: ", 1, str, ['y', 'n'])
if choice == 'n':
cont = False
else:
# Keeps asking for SIN until a valid one is received
first = True
secondary_id = ''
while not secondary_id:
if first:
secondary_id = (input("Please enter the primary owner's SIN: "))
first = False
# If owner_id not in people table was received
elif len(secondary_unknown) == 0:
secondary_id = input("Unknown SIN, please try again: ")
else:
secondary_id = input("SIN already received, please try another: ")
# Check if secondary_id is in people table
# len(secondary_unknown) == 0 if secondary_id not in people table
curs.execute("SELECT sin FROM people WHERE sin = '%s'"%(secondary_id))
secondary_unknown = curs.fetchall()
if len(secondary_unknown) == 0 or secondary_id in SIN:
secondary_id = ''
SIN[secondary_id] = 'n'
curs.execute("DELETE FROM owner WHERE vehicle_id = '%s'" %(vehicle_id));
for i in SIN:
curs.execute("Insert into owner "
"values('%s', '%s', '%s')"%(i, vehicle_id, SIN[i]))
curs.execute("Insert into auto_sale "
"values(%d, '%s', '%s','%s', '%s', %f)"%(transaction_id, seller_id, buyer_id, vehicle_id, s_date.strftime('%d-%b-%Y'), price));
connection.commit()
print('Sale successfully completed as transaction_id: %s'%(transaction_id))
more = validity("Do you want to make another transaction? y/n: ", "Please enter a valid option: ", 1, str, ['y', 'n'])
def func3(curs, connection):
print('DRIVER LICENCE REGISTRATION')
#drive_licence( licence_no,sin,class,photo,issuing_date,expiring_date)
cont = 'y'
while cont == 'y':
# Keeps asking for SIN until a valid and unused one is received
first = True
person_id = ''
while not person_id:
if first:
person_id = input("Please input an SIN to register: ")
first = False
# If person_id not in people table was received
elif len(person_unknown) == 0:
person_id = input("Unknown SIN, please try again: ")
# If person_id in people table but also in drive_licence table was received
else:
person_id = input("SIN already registered, please use another: ")
# Check if person_id is in people table and drive_licence table
# len(person_unknown) == 0 if person_id not in people table
curs.execute("SELECT sin FROM people WHERE sin = '%s'"%(person_id))
person_unknown = curs.fetchall()
# len(person_conflict) > 0 if person_id in drive_licence table
curs.execute("SELECT sin FROM drive_licence WHERE sin = '%s'"%(person_id))
person_conflict = curs.fetchall()
if len(person_unknown) == 0 or len(person_conflict) > 0:
person_id = ''
drive_class = validity("Please enter the licence class: ", "Please enter a valid licence class: ", 10, str)
# Obtain current time
# issuing_date = sysdate; expiring_date = issuing_date + 5 years
issuing_date = datetime.datetime.now().date()
expiring_date = datetime.date(issuing_date.year+5, issuing_date.month, issuing_date.day)
photo = validity("Name of image file of driver: ", "Please input a valid photo option: ", float('infinity'), 'file')
# Generates random licence_no until an unused one is received (must be <15 characters long)
licence_no = False
while not licence_no or len(licence_conflict) > 0 or len(licence_no) > 15 or licence_no == 'NULL':
licence_no = ''.join(random.choice(string.ascii_uppercase) for x in range(3))+'-'+''.join(random.choice(string.digits) for x in range(4))
curs.execute("SELECT licence_no FROM drive_licence WHERE licence_no = '%s'"%(licence_no))
licence_conflict = curs.fetchall()
curs.setinputsizes(photo=cx_Oracle.BLOB)
curs.execute("Insert into drive_licence "
"values(:licence_no, :person_id, :class, :photo, :issuing_date, :expiring_date)", {'licence_no':licence_no, 'person_id':person_id, 'class':drive_class, 'photo':photo, 'issuing_date':issuing_date.strftime('%d-%b-%Y'), 'expiring_date':expiring_date.strftime('%d-%b-%Y')})
connection.commit()
print('Licence registered successfully as %s'%(licence_no))
cont = validity("Do you want to register another licence? y/n: ", "Please enter a valid option: ", 1, str, ['y', 'n'])
def check_general_errors():
while True:
try:
a = input("Please enter the violator's SIN: ")
if a == '':
print("Empty input!")
else:
if len(a) > 15:
print("You have to enter a violator_no in 15 digits!\n")
else:
break
except Exception:
print("You should enter a valid violator_no")
return a
def func4(curs, connection):
print('VIOLATION RECORD')
#ticket( ticket_no,violator_no,vehicle_id,office_no,vtype,vdate,place,descriptions)
#error handling!!!!!!!!!
ticket_no_list=[]
violator_no_list = []
curs.execute("select ticket_no from ticket")
rows = curs.fetchall()
for row in rows:
ticket_no_list.append(row[0])
ticket_no = random.randint(0,99999999)
while ticket_no in ticket_no_list:
ticket_no = random.randint(0,99999999)
print("The ticket_no is already generated!")
violator_no = check_general_errors()
curs.execute("select people.sin from people where people.sin = '%s'"%(violator_no))
rows = curs.fetchall()
while len(rows) == 0:
print("Parent key not found!Try it again")
violator_no = check_general_errors()
curs.execute("select people.sin from people where people.sin = '%s'"%(violator_no))
rows = curs.fetchall()
while True:
try:
vehicle_id = input("Please enter the vehicle_id: ")
if vehicle_id == '':
print("Empty input!")
else:
if len(vehicle_id) > 15:
print("You have to enter a vehicle_id in 15 digits!\n")
else:
break
except Exception:
print("Parent key not found")
while True:
try:
office_no = input("Please enter the office_no: ")
if office_no == '':
print("Empty input!")
else:
if len(office_no) > 15:
print("You have to enter a office_no in 15 digits!\n")
else:
break
except Exception:
print("Parent key not found")
while True:
try:
vtype = input("Please enter the violation type: ")
if vtype == '':
print("Empty input!")
else:
if len(vtype) > 10:
print("You have to enter a vtype in 10 digits!\n")
else:
break
except Exception:
print("Parent key not found")
while True:
try:
vdate = input("Please enter the violation date: ").upper()
if vdate == '':
print("Empty input!")
else:
if len(vdate) > 12:
print("You have to enter a datetype date like 'DD-MMM-YYYY'!\n")
else:
break
except Exception:
print("Parent key not found")
while True:
try:
place = input("Please enter where did the violation take place: ")
if place == '':
print("Empty input!")
else:
if len(place) > 20:
print("You have to enter a place in 20 digits!\n")
else:
break
except Exception:
print("Incorrect input")
while True:
try:
descriptions = input("Please enter the descriptions of the violation: ")
if descriptions == '':
print("Empty input!")
else:
if len(descriptions) > 1024:
print("You have to enter a desciption in 1024 digits = !\n")
else:
break
except Exception:
print("Incorrect input")
curs.execute("Insert into ticket values"
"(%d,'%s','%s','%s','%s','%s','%s','%s')"%(ticket_no,violator_no,vehicle_id,office_no,vtype,vdate,place,descriptions)) #insert statement for licence table
connection.commit()
more = input('do you want to add another ticket to record y/n: ')
while more == 'y':
ticket_no_list = []
curs.execute("select ticket_no from ticket")
rows = curs.fetchall()
for row in rows:
ticket_no_list.append(row[0])
ticket_no = random.randint(0,99999999)
while ticket_no in ticket_no_list:
ticket_no = random.randint(0,99999999)
print("The ticket_no is already generated!")
while True:
try:
violator_no = input("Please enter the violator's SIN: ")
if violator_no == '':
print("Empty input!")
else:
if len(violator_no) > 15:
print("You have to enter a violator_no in 15 digits!\n")
else:
break
except Exception:
print("Parents key not found!")
while True:
try:
vehicle_id = input("Please enter the vehicle_id: ")
if vehicle_id == '':
print("Empty input!")
else:
if len(vehicle_id) > 15:
print("You have to enter a vehicle_id in 15 digits!\n")
else:
break
except Exception:
print("Parent key not found")
while True:
try:
office_no = input("Please enter the office_no: ")
if office_no == '':
print("Empty input!")
else:
if len(office_no) > 15:
print("You have to enter a office_no in 15 digits!\n")
else:
break
except Exception:
print("Parent key not found")
while True:
try:
vtype = input("Please enter the violation type: ")
if vtype == '':
print("Empty input!")
else:
if len(vtype) > 10:
print("You have to enter a vtype in 10 digits!\n")
else:
break
except Exception:
print("Parent key not found")
while True:
try:
vdate = input("Please enter the violation date: ").upper()
if vdate == '':
print("Empty input!")
else:
if len(vdate) > 12:
print("You have to enter a datetype date like 'DD-MMM-YYYY'!\n")
else:
break
except Exception:
print("Parent key not found")
while True:
try:
place = input("Please enter where did the violation take place: ")
if place == '':
print("Empty input!")
else:
if len(place) > 20:
print("You have to enter a place in 20 digits!\n")
else:
break
except Exception:
print("Incorrect input")
while True:
try:
descriptions = input("Please enter the descriptions of the violation: ")
if descriptions == '':
print("Empty input!")
else:
if len(descriptions) > 1024:
print("You have to enter a desciption in 1024 digits = !\n")
else:
break
except Exception:
print("Incorrect input")
curs.execute("Insert into ticket values"
"(%d,'%s','%s','%s','%s','%s','%s','%s')"%(ticket_no,violator_no,vehicle_id,office_no,vtype,vdate,place,descriptions))
connection.commit()
more = input('do you want to add another ticket to record y/n: ')
print("ticket is already recorded")
def main():
user= 'lingbo' #input('Username:')(uncomment for final code before submission
password = 'Tlbo1994' #getpass.getpass()(uncomment in final code before submission
try:
stringconn = user+'/'+password + '@gwynne.cs.ualberta.ca:1521/CRS'
connection = cx_Oracle.connect(stringconn)
curs = connection.cursor()
print('please select a number option from the options below: ')
choice = input(' 1 - New Vehicle Registration\n 2 - Auto Transaction\n 3 - Driver Licence Registration\n 4 - Violation Record\n 5 - Search the database\n 6 - exit\n')
while choice != '6':
while choice not in ['1','2', '3', '4', '5','6']:
choice = input(' pick a valid number: ')
#this is what happens when vehicle registration is picked
if choice == '1':
func1(curs, connection)
#this is the code that executes when selection 2 is picked
if choice == '2':
func2(curs, connection)
#selection of the third choice option(licence registration)
if choice == '3':
func3(curs, connection)
if choice == '4':
func4(curs, connection)
#choice 5 is the search database appication.
if choice == '5':
print('SEARCH THE DATABASE')
print('Please pick a search option')
again = 'y'
while again == 'y':
inp = input(' 1 - Driver Info \n 2 - List all violation records received by a person\n 3 - Print out the vehicle_history\n')
if inp == '1':#check the kind of search being executed(name, licence searchfor personal info.)
selection = input(' 1 - search by name \n 2 - search by licence number \n')#choose the search type, name or licence no
if selection == '1':#search by name query
search_by = input('please enter full name: ')
searchf10 = ("(SELECT name, dl.licence_no, addr, birthday, class, c_id FROM people p, driving_condition dc, restriction r, drive_licence dl WHERE p.sin = dl.sin AND r.r_id = dc.c_id AND r.licence_no = dl.licence_no AND p.name = '%s')"%(search_by));
curs.execute(searchf10)
v = curs.fetchall()
for i in v:
dates = i[3].date()
print(' Name: %s \n Drivers licence: %s \n Address: %s \n Birthday: %s \n Class: %s \n Restriction: %s \n' %(i[0], i[1].split()[0], i[2], str(dates.day)+'-'+monthdic[dates.month]+'-'+str(dates.year), i[4].split()[0], i[5]))
if len(v) == 0:
print('###No results were found matching your search criteria###')
if selection == '2':#search by licence query
search_by = input('please enter the licence number: ').upper()
searchf11 = ("(SELECT name, dl.licence_no, addr, birthday, class, c_id FROM people p, driving_condition dc, drive_licence dl, restriction r WHERE p.sin = dl.sin AND r.r_id = dc.c_id AND r.licence_no = dl.licence_no AND dl.licence_no = '%s')"%(search_by));
(curs.execute(searchf11))
v = curs.fetchall()
for i in v:
dates = i[3].date()
print(' Name: %s \n Drivers licence: %s \n Address: %s \n Birthday: %s \n Class: %s \n Restriction: %s \n' %(i[0], i[1].split()[0], i[2],str(dates.day)+'-'+monthdic[dates.month]+'-'+str(dates.year), i[4].split()[0], i[5]))
if len(v) == 0:
print('###No results were found matching your search criteria###')
if inp == '2':#check the kind of search being executed(SIN, licence search for tickets)
selection = input(' 1 - search by SIN \n 2 - search by licence number \n')#choose the search type, SIN or licence no
if selection == '1':
search_by = input('please enter the SIN: ')
searchf01 = "(SELECT ticket_no, t.vtype, fine, vdate, descriptions, t.place FROM ticket t, ticket_type tt, people p WHERE t.vtype = tt.vtype AND p.sin = t.violator_no and p.sin = '%s')"%(search_by);
(curs.execute(searchf01))
v = curs.fetchall()
for i in v:
dates = i[3].date()
print(' Ticket number: %d \n Violation code: %s \n Fine: %d \n Date: %s \n Description: %s \n Place: %s \n ' %(i[0], i[1].split()[0], i[2],str(dates.day)+'-'+monthdic[dates.month]+'-'+str(dates.year), i[4], i[5]))
curs.execute("select p.name from people p where sin = %s"%((search_by)))
person = curs.fetchone()
print("%s has %d tickets total"%(person[0], len(v)))
if len(v) == 0:
print('###No results were found matching your search criteria###')
if selection == '2':
search_by = input('please enter the licence number: ').upper()
searchf02 = "(SELECT ticket_no, t.vtype, fine, vdate, descriptions, t.place FROM ticket t, ticket_type tt, people p, drive_licence WHERE t.vtype = tt.vtype AND p.sin = t.violator_no AND drive_licence.sin = p.sin AND drive_licence.licence_no = '%s')"%(search_by);
(curs.execute(searchf02))
v = curs.fetchall()
for i in v:
dates = i[3].date()
print(' Ticket number: %d \n Violation code: %s \n Fine: %d \n Date: %s \n Description: %s \n Place: %s \n' %(i[0], i[1].split()[0], i[2],str(dates.day)+'-'+monthdic[dates.month]+'-'+str(dates.year), i[4], i[5]))
curs.execute("select p.name from people p, drive_licence dl where dl.sin = p.sin AND dl.licence_no = '%s'"%((search_by)))
person = curs.fetchone()
print("%s has %d tickets total"%(person[0], len(v)))
if len(v) == 0:
print('###No results were found matching your search criteria###')
if inp == '3':
select_by = input('Please enter the vehicle licence plate: ')
searchf03 = "(SELECT vehicle_no, number_sales, average_price, total_tickets FROM ((SELECT v.serial_no as vehicle_no, COUNT(DISTINCT s.transaction_id) AS number_sales , AVG(s.price) AS average_price FROM auto_sale s, vehicle v, vehicle_type vt, ticket t WHERE s.vehicle_id = v.serial_no and v.type_id = vt.type_id and v.serial_no = '%s' GROUP BY v.serial_no) f left join (SELECT COUNT(t.vtype) as total_tickets, v.serial_no as car FROM vehicle v left join ticket t on v.serial_no = t.vehicle_id GROUP BY v.serial_no) r on f.vehicle_no = r.car))"%(select_by);
(curs.execute(searchf03))
v = curs.fetchall()
for i in v:
print(' Vehicle Serial Number: %s \n Times Sold: %d \n Average sale price: %d \n Total Tickets: %d \n ' %((i[0]), int(i[1]), int(i[2]), int(i[3])))
if len(v) == 0:
print('###No results were found matching your search criteria###')
again = input('do you want to do another search? y/n: ')
while again not in ['y', 'n']:
print('please press "y" or "n"')
again = input('do you want to do another search?;')
if choice == '6':
break
#
x = input('Are you sure you want to exit? y/n: ').lower()
while x not in ['y', 'n']:
print("please pick a real option")
x = input('Are you sure you want to exit? y/n: ').lower()
if x == 'n':
print('please select another number option from the options below: ')
choice = input(' 1 - New Vehicle Registration\n 2 - Auto Transaction\n 3 - Driver Licence Registration\n 4 - Violation Record\n 5 - Search the database\n 6 - exit\n')
else:
choice = '6'
curs.close()
connection.close()
print('Have a nice day!')
except cx_Oracle.DatabaseError as exc:
error = exc.args
print( sys.stderr, "Oracle code:", error.code)
print( sys.stderr, "Oracle message:", error.message)
if __name__ == "__main__":
main() |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
VGG16 model for chainer 1.8
"""
import chainer
import chainer.functions as F
import chainer.links as L
import os, sys
import numpy as np
shared = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(shared)
from ..functions import power_normalize, global_average_pooling_2d
class VGG16(chainer.Chain):
"""Single-GPU VGGNet(16layers)"""
insize = 224
def __init__(self, num_class=1000, texture=False, texture_layer='pool4', cbp=False, normalize=True, mil=False):
super(VGG16, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(None, 64, 3, pad=1)
self.conv1_2 = L.Convolution2D(64, 64, 3, pad=1)
self.conv2_1 = L.Convolution2D(64, 128, 3, pad=1)
self.conv2_2 = L.Convolution2D(128, 128, 3, pad=1)
self.conv3_1 = L.Convolution2D(128, 256, 3, pad=1)
self.conv3_2 = L.Convolution2D(256, 256, 3, pad=1)
self.conv3_3 = L.Convolution2D(256, 256, 3, pad=1)
self.conv4_1 = L.Convolution2D(256, 512, 3, pad=1)
self.conv4_2 = L.Convolution2D(512, 512, 3, pad=1)
self.conv4_3 = L.Convolution2D(512, 512, 3, pad=1)
if not texture:
self.conv5_1 = L.Convolution2D(512, 512, 3, pad=1)
self.conv5_2 = L.Convolution2D(512, 512, 3, pad=1)
self.conv5_3 = L.Convolution2D(512, 512, 3, pad=1)
self.fc6 = L.Linear(25088, 4096)
self.fc7 = L.Linear(4096, 4096)
self.fc8 = L.Linear(4096, num_class)
self.texture = texture
self.texture_layer = texture_layer
self.cbp = cbp
self.normalize = normalize
self.mil = mil
def forward(self, x):
"""
h1 : (1, 64, 112, 112)
h2 : (1, 128, 56, 56)
h3 : (1, 256, 28, 28)
h4 : (1, 512, 14, 14)
h5 : (1, 512, 7, 7)
:param x:
:return:
"""
h = x
h = F.relu((self.conv1_1(h)))
h = F.relu((self.conv1_2(h)))
pool1 = F.max_pooling_2d(h, 2, stride=2)
h = F.relu((self.conv2_1(pool1)))
h = F.relu((self.conv2_2(h)))
pool2 = F.max_pooling_2d(h, 2, stride=2)
h = F.relu((self.conv3_1(pool2)))
h = F.relu((self.conv3_2(h)))
h = F.relu((self.conv3_3(h)))
pool3 = F.max_pooling_2d(h, 2, stride=2)
h = F.relu((self.conv4_1(pool3)))
h = F.relu((self.conv4_2(h)))
h = F.relu((self.conv4_3(h)))
pool4 = F.max_pooling_2d(h, 2, stride=2)
if self.texture:
h = {'pool1': pool1, 'pool2': pool2, 'pool3': pool3, 'pool4': pool4}[self.texture_layer]
if self.cbp:
h = F.convolution_2d(h, self.W1) * F.convolution_2d(h, self.W2)
h = global_average_pooling_2d(h)
if self.normalize:
h = power_normalize(h)
h = F.normalize(h)
h = self.fc8(F.dropout(h, 0.2))
return h
else:
b, ch, height, width = h.data.shape
h = F.reshape(h, (b, ch, width * height))
h = F.batch_matmul(h, h, transb=True) / self.xp.float32(width * height)
h = self.fc8(F.dropout(h, 0.4))
return h
else:
h = F.relu((self.conv5_1(pool4)))
h = F.relu((self.conv5_2(h)))
h = F.relu((self.conv5_3(h)))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.dropout(F.relu(self.fc6(h)), ratio=0.5)
h = F.dropout(F.relu(self.fc7(h)), ratio=0.5)
h = self.fc8(h)
return h
def load_pretrained(self, pretrained_path, num_class):
chainer.serializers.load_npz(pretrained_path, self)
self.convert_to_finetune_model(num_class)
def convert_to_finetune_model(self, num_class):
if self.cbp:
randweight = np.load(os.path.join(shared, 'cbp/randweight_512_to_4096.npz'))
self.add_persistent('W1', randweight['W1'])
self.add_persistent('W2', randweight['W2'])
self.fc8 = L.Linear(None, num_class)
return
def __call__(self, x, t):
self.y = self.forward(x)
self.loss = F.softmax_cross_entropy(self.y, t)
self.accuracy = F.accuracy(self.y, t)
return self.loss
class VGG16Feature(chainer.Chain):
"""Single-GPU VGGNet(16layers)"""
insize = 224
def __init__(self, input_ch=3):
super(VGG16Feature, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(input_ch, 64, 3, pad=1)
self.conv1_2 = L.Convolution2D(64, 64, 3, pad=1)
self.conv2_1 = L.Convolution2D(64, 128, 3, pad=1)
self.conv2_2 = L.Convolution2D(128, 128, 3, pad=1)
self.conv3_1 = L.Convolution2D(128, 256, 3, pad=1)
self.conv3_2 = L.Convolution2D(256, 256, 3, pad=1)
self.conv3_3 = L.Convolution2D(256, 256, 3, pad=1)
def clear(self):
self.loss = None
self.accuracy = None
def forward(self, x):
"""
h1 : (1, 64, 112, 112)
h2 : (1, 128, 56, 56)
h3 : (1, 256, 28, 28)
h4 : (1, 512, 14, 14)
h5 : (1, 512, 7, 7)
:param x:
:param stop_at_final_conv:
:param conv5_fc7:
:return:
"""
h = x
h = F.relu((self.conv1_1(h)))
h = F.relu((self.conv1_2(h)))
h1 = F.max_pooling_2d(h, 2, stride=2)
h = F.relu((self.conv2_1(h1)))
h = F.relu((self.conv2_2(h)))
h2 = F.max_pooling_2d(h, 2, stride=2)
h = F.relu((self.conv3_1(h2)))
h_ = F.relu((self.conv3_2(h)))
h = F.relu((self.conv3_3(h_)))
h3 = F.max_pooling_2d(h, 2, stride=2)
return h2, h_, h3
def __call__(self, x, t):
self.clear()
h = self.forward(x)
self.loss = F.softmax_cross_entropy(h, t)
self.accuracy = F.accuracy(h, t)
return self.loss
|
# Generated by Django 3.2.4 on 2021-07-14 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('team', '0002_alter_team_created_by'),
]
operations = [
migrations.AddField(
model_name='team',
name='bank_account',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
from django.db import models
# Create your models here.
class User(models.Model):
def __str__(self):
return self.id
pid = models.CharField(max_length=32, primary_key=True)
id = models.CharField(max_length=20, null=False, unique=True)
|
import torch
from torchvision import models
from torchvision import transforms
import os
import numpy as np
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets
import shutil
import time
import configparser
import requests
import json
import random
#Krishna
import urllib
import logging
from pathlib import Path
from os import listdir
import urllib
import time
from datetime import datetime
global circe_home_ip, circe_home_ip_port, taskname
taskname = Path(__file__).stem
resnet_task_num = int(taskname.split('resnet')[1])
global logging
logging.basicConfig(level = logging.DEBUG)
#Krishna
INI_PATH = 'jupiter_config.ini'
config = configparser.ConfigParser()
config.read(INI_PATH)
global FLASK_DOCKER, FLASK_SVC, SLEEP_TIME, STRAGGLER_THRESHOLD, CODING_PART1
FLASK_DOCKER = int(config['PORT']['FLASK_DOCKER'])
FLASK_SVC = int(config['PORT']['FLASK_SVC'])
SLEEP_TIME = int(config['OTHER']['SLEEP_TIME'])
STRAGGLER_THRESHOLD = float(config['OTHER']['STRAGGLER_THRESHOLD'])
CODING_PART1 = int(config['OTHER']['CODING_PART1'])
RESNET_POLL_INTERVAL = int(config['OTHER']['RESNET_POLL_INTERVAL'])
global global_info_ip, global_info_ip_port
def send_runtime_profile(msg):
"""
Sending runtime profiling information to flask server on home
Args:
msg (str): the message to be sent
Returns:
str: the message if successful, "not ok" otherwise.
Raises:
Exception: if sending message to flask server on home is failed
"""
try:
logging.debug('Sending runtime stats')
logging.debug(msg)
circe_home_ip_port = os.environ['HOME_NODE'] + ":" + str(FLASK_SVC)
url = "http://" + circe_home_ip_port + "/recv_runtime_profile"
params = {'msg': msg, "work_node": taskname}
params = urllib.parse.urlencode(params)
req = urllib.request.Request(url='%s%s%s' % (url, '?', params))
res = urllib.request.urlopen(req)
res = res.read()
res = res.decode('utf-8')
except Exception as e:
logging.debug("Sending runtime profiling info to flask server on home FAILED!!!")
logging.debug(e)
return "not ok"
return res
def send_runtime_stats(action, file_name,from_task):
ts = time.time()
new_file = os.path.split(file_name)[-1]
original_name = new_file.split('.')[0]
logging.debug(original_name)
tmp_name = original_name.split('_')[-1]
temp_name= tmp_name+'.JPEG'
runtime_info = action +' '+ from_task+' '+ temp_name+ ' '+str(ts)
send_runtime_profile(runtime_info)
def task(file_, pathin, pathout):
global resnet_task_num
file_ = [file_] if isinstance(file_, str) else file_
for f in file_:
send_runtime_stats('rt_enter_task', f,'master')
### set device to CPU
device = torch.device("cpu")
### Load model
model = models.resnet34(pretrained=True)
model.eval()
model.to(device)
### Transforms to be applied on input images
composed = transforms.Compose([
transforms.Resize(256, Image.ANTIALIAS),
transforms.CenterCrop(224),
transforms.ToTensor()])
out_list = []
for i, f in enumerate(file_):
### Read input files.
print(os.path.join(pathin, f))
img = Image.open(os.path.join(pathin, f))
### Apply transforms.
img_tensor = composed(img)
### 3D -> 4D (batch dimension = 1)
img_tensor.unsqueeze_(0)
#img_tensor = input_batch[0]
### call the ResNet model
try:
print('Calling the resnet model')
output = model(img_tensor)
pred = torch.argmax(output, dim=1).detach().numpy().tolist()
### To simulate slow downs
# purposely add delay time to slow down the sending
if (random.random() > STRAGGLER_THRESHOLD) and (taskname=='resnet8') :
print(taskname)
print("Sleeping")
time.sleep(SLEEP_TIME) #>=2
### Contact flask server
f_stripped = f.split(".JPEG")[0]
# job_id = int(f_stripped.split("_jobid_")[1])
job_id = f_stripped.split('_')[-2]
job_id = int(f_stripped.split('_')[-2].split('jobid')[1])
print('job_id from the file is: ', job_id)
print(job_id)
ret_job_id = 0
try:
global_info_ip = os.environ['GLOBAL_IP']
global_info_ip_port = global_info_ip + ":" + str(FLASK_SVC)
if CODING_PART1:
ret_job_id = send_prediction_to_decoder_task(job_id, pred[0], global_info_ip_port)
except Exception as e:
print('Possibly running on the execution profiler')
try:
global_info_ip = os.environ['GLOBAL_IP']
global_info_ip_port = global_info_ip + ":" + str(FLASK_SVC)
if taskname != 'resnet8':
slept = 0
while slept < SLEEP_TIME:
ret_val = get_enough_resnet_preds(job_id, global_info_ip_port)
print("get_enough_resnet_preds fn. return value is: ", ret_val)
if ret_val:
break
time.sleep(RESNET_POLL_INTERVAL)
slept += RESNET_POLL_INTERVAL
except Exception as e:
print('Possibly running on the execution profiler, get_enough_resnet_preds')
if ret_job_id >= 0: # This job_id has not been processed by the global flask server
### Copy to appropriate destination paths
if pred[0] == 555: ### fire engine. class 1
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
#destination = os.path.join(pathout, "class1_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num)+ "_storeclass1_" + f)
# destination = os.path.join(pathout, "storeclass1_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 779: ### school bus. class 2
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass2_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 270: ### white wolf. class 3
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass3_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 276: ### hyena. class 4
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass4_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 292: ### tiger. class 5
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass5_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 278: ### kitfox. class 5
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass6_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 283: ### persian cat. class 6
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass7_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 288: ### leopard. class 7
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass8_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 291: ### lion. class 8
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass9_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 295: ### black bear. class 10
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass10_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 298: ### moongoose. class 11
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass11_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 340: ### zebra. class 12
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass12_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 341: ### hog. class 13
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass13_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 344: ### hippo. class 14
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass14_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 345: ### ox. class 15
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass15_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 346: ### buffallo. class 16
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass16_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 348: ### ram. class 17
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass17_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 352: ### impala . class 18
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass18_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 354: ### camel. class 19
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass19_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
elif pred[0] == 360: ### otter. class 20
source = os.path.join(pathin, f)
# f_split = f.split("prefix_")[1]
# destination = os.path.join(pathout, "class2_" + f)
destination = os.path.join(pathout, "resnet" + str(resnet_task_num) + "_storeclass20_"+ f)
# destination = os.path.join(pathout, "storeclass2_" + f)
out_list.append(shutil.copyfile(source, destination))
else: ### not either of the classes # do nothing
print('This does not belong to any classes!!!')
else: # ret_job_id < 0
print("The jobid %s has already been processed by the flask server" % (job_id))
return [] #slow resnet node: return empty
except Exception as e:
print('This might be a black and white image')
print(e)
return []
send_runtime_stats('rt_finish_task', out_list[0],'master')
return out_list
#Krishna
def get_enough_resnet_preds(job_id, global_info_ip_port):
hdr = {
'Content-Type': 'application/json',
'Authorization': None #not using HTTP secure
}
try:
logging.debug('get enough resnet predictions from the decoder')
url = "http://" + global_info_ip_port + "/post-enough-resnet-preds"
params = {"job_id": job_id}
response = requests.post(url, headers = hdr, data = json.dumps(params))
ret_val = response.json()
logging.debug(ret_val)
except Exception as e:
logging.debug("Get enough resnet predictions FAILED!!! - possibly running on the execution profiler")
#logging.debug(e)
ret_val = True
return ret_val
#Krishna
def send_prediction_to_decoder_task(job_id, prediction, global_info_ip_port):
"""
Sending prediction and resnet node task's number to flask server on decoder
Args:
prediction: the prediction to be sent
Returns:
str: the message if successful, "not ok" otherwise.
Raises:
Exception: if sending message to flask server on decoder is failed
"""
global resnet_task_num
hdr = {
'Content-Type': 'application/json',
'Authorization': None #not using HTTP secure
}
try:
logging.debug('Send prediction to the decoder')
url = "http://" + global_info_ip_port + "/post-prediction-resnet"
params = {"job_id": job_id, 'msg': prediction, "resnet_task_num": resnet_task_num}
response = requests.post(url, headers = hdr, data = json.dumps(params))
ret_job_id = response.json()
logging.debug(ret_job_id)
except Exception as e:
logging.debug("Sending my prediction info to flask server on decoder FAILED!!! - possibly running on the execution profiler")
#logging.debug(e)
ret_job_id = 0
return ret_job_id
#Krishna
def main():
classlists = ['fireengine', 'schoolbus', 'whitewolf', 'hyena','tiger','kitfox', 'persiancat', 'leopard', 'lion', 'americanblackbear', 'mongoose', 'zebra', 'hog', 'hippopotamus', 'ox', 'waterbuffalo', 'ram', 'impala', 'arabiancamel', 'otter']
classlist = classlists[0:5]
num = 27
filelist = []
for i in classlist:
for j in range(resnet_task_num+1,num+1,9):
#filename = 'master_'+taskname+'_'+i+'_'+str(j)+'_jobid_0.JPEG'
filename = 'master_'+taskname+'_jobid0_'+ str(j)+'img'+i+'.JPEG'
filelist.append(filename)
# outpath = os.path.join(os.path.dirname(__file__), 'sample_input/')
# filelist = [f for f in listdir(outpath) if f.startswith('master')]
outpath = os.path.join(os.path.dirname(__file__), 'sample_input/')
outfile = task(filelist, outpath, outpath)
return outfile
|
import math
def N(dim, maxsum=None):
total = dim
if dim == 1:
while maxsum == None or total <= maxsum:
try:
yield tuple([total])
except GeneratorExit:
return
total += 1
else: # dim >= 2
while maxsum == None or total <= maxsum:
for tup in N(dim - 1, total - 1):
newTuple = [t for t in tup]
value = sum(newTuple)
newTuple.append(total - value)
try:
yield tuple(newTuple)
except GeneratorExit:
return
total += 1
p = [0 for i in range(9)]
k = 0
for n1, n2, n3 in N(3):
a = n1**2
b = n2**2
lam = n3**2 * 3
absum = a+b
diff = abs(a-b)
if(lam > absum
and 3*absum > lam
and lam > 3*diff):
p[-1+1] = int(-a + 2*lam/3)
p[-1+2] = int(a + b - 1*lam/3)
p[-1+3] = int(-b + 2*lam/3)
p[-1+4] = int(a - b + 1*lam/3)
p[-1+5] = int(1*lam/3)
p[-1+6] = int(-a + b + 1*lam/3)
p[-1+7] = int(b)
p[-1+8] = int(-a - b + lam)
p[-1+9] = int(a)
# different = all([all(val1 != val2 for val2 in p[index + 1:]) for index, val1 in enumerate(p)])
squares = all([math.sqrt(x) % 1 == 0 for x in p])
if (squares # different and
and p[-1+4]+p[-1+5]+p[-1+6] == lam
and p[-1+7]+p[-1+8]+p[-1+9] == lam
and p[-1+3]+p[-1+4]+p[-1+7] == lam
and p[-1+2]+p[-1+5]+p[-1+8] == lam
and p[-1+3]+p[-1+6]+p[-1+9] == lam
and p[-1+3]+p[-1+5]+p[-1+9] == lam
and p[-1+3]+p[-1+5]+p[-1+7] == lam):
print("Solved with:", p, (a,b,lam))
k += 1
if k % 50000 == 0:
print(k, p) |
#!/usr/local/bin/python3
#
# See https://theweeklychallenge.org/blog/perl-weekly-challenge-149
#
#
# Run as: python ch-1.py < input-file
#
def digit_sum (number):
sum = 0
base = 10
while number > 0:
sum = sum + number % base
number = number // base
return sum
fib = {}
fib_prev = 0
fib_last = 1
fib [fib_prev] = True;
fib [fib_last] = True;
def is_fib (n):
global fib, fib_prev, fib_last
while fib_last < n:
t = fib_last
fib_last = fib_last + fib_prev
fib_prev = t
fib [fib_last] = True;
return n in fib
import fileinput, sys
for n in fileinput . input ():
n = int (n)
k = 0
while n > 0:
if is_fib (digit_sum (k)):
sys . stdout . write (str (k) + " ")
n = n - 1
k = k + 1
print ("")
|
from django.conf import settings
from django.conf.urls import static
from django.contrib import admin
from django.urls import path
admin.autodiscover()
urlpatterns = [
path("admin/", admin.site.urls),
]
urlpatterns += static.static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# -*- coding:utf-8 -*-
import os,urllib2
def downloadBAT():
url='https://raw.githubusercontent.com/wkcn/SYSULAB/master/AddENV.bat'
try:
content = urllib2.urlopen(url).read()
f = open("AddENV.bat", "w")
f.write(content)
f.close()
return True
except Exception, e:
return False
def main():
if downloadBAT():
need = r"E:\nasm" # 要添加的环境变量,没有空格
cmd = '%s\\AddENV.bat %s' % (os.getcwd(), need)
# print cmd
ret = os.popen(cmd).readlines()
# print ret
return 0
else:
return 1
if __name__ == '__main__':
main() |
# Generated by Django 3.1.1 on 2020-10-16 13:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('product', '0004_auto_20201013_1856'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='size',
),
]
|
#!/usr/bin/env python
# coding: utf-8
# In[20]:
def divisorsSum(n):
ret = 0
for i in range(1, n-1):
if n%i == 0:
ret += i
return ret
# In[21]:
N = 28123
abundants = []
for i in range(1,N+1):
if divisorsSum(i) > i:
abundants.append(i)
# In[22]:
sumOfAbundantsSet = set()
for i in abundants:
for j in abundants:
sumOfAbundantsSet.add(i+j)
# In[25]:
answer = 0
for i in range(1, N+1):
if i not in sumOfAbundantsSet:
answer += i
print(answer)
# In[ ]:
|
#!/usr/bin/env python3
import sys
import os
import pystache
lines=open(sys.argv[1]).read()
out_file = open(os.path.splitext(sys.argv[1])[0], "w")
tags = {}
args = sys.argv[2:]
while len(args) > 0:
key = args.pop(0)
tags[key] = args.pop(0)
lines = pystache.render(lines, tags)
out_file.write(lines)
out_file.close()
|
# using stack, visit all vertice
def dfs(g, start):
visited = {key: 0 for key in graph.keys()}
stk = [start]
nodes = []
while stk:
node = stk.pop()
for adjacent in g[node]:
if visited[adjacent] == 0 and adjacent not in stk:
stk.append(adjacent)
visited[node] = 1
nodes.append(node)
return nodes
# get shortest path of dfs
def dfs_path(g, start, end):
stack = [(start, [start])]
while stack:
(vertex, path) = stack.pop()
for neighbor in graph[vertex] - set(path):
if neighbor == end:
yield path + [neighbor]
else:
stack.append((neighbor, path + [neighbor]))
if __name__ == "__main__":
graph = {
'A': {'B', 'C'},
'B': {'A', 'D', 'E'},
'C': {'A', 'F'},
'D': {'B'},
'E': {'B', 'F'},
'F': {'C', 'E'}
}
result = dfs(graph, 'A')
paths = list(dfs_path(graph, 'A', 'D'))
print(result, paths)
|
import sys
sys.path.append("..")
import astral.client.gameclient as gc
import astral.client.local as local
import physics
class UMMOMob(local.Mob):
template = "UMMOMob"
def init(self):
super(UMMOMob,self).init()
self.prediction_func = physics.actor_move
self.add_property(local.NetworkedProperty("facing",predicted=True,interpolation="one"))
self.add_property(local.NetworkedProperty("health",predicted=False,interpolation="linear"))
for p in ["vx","vy","ax","ay"]:
self.add_property(local.NetworkedProperty(p,predicted=True))
self.max_buffer = 2
class Bullet(UMMOMob):
def init(self):
super(Bullet,self).init()
self.prediction_func = self.up
self.max_buffer = 2
def up(self,self2,commands):
self.x+=self.vx
self.y+=self.vy
class UMMOClient(gc.GameClient):
remote_classes = globals()
def __init__(self,*args,**kwargs):
super(UMMOClient,self).__init__(*args,**kwargs)
#self.predict_owned = False
self.keep_updates = 20
#self.update_rate = 1
self.interpolation_rate = 0.02
self.rate_skew = 4
self.shooting = 0
def shoot(self):
self.send({"action":"shoot","client_tick":self.update_count,"cid":str(id(self))})
self.shooting = 1
bullet = Bullet(str(id(self)))
bullet.sprite="art/particle/bullet.png"
bullet.facing = self.objects[self.owned[0]].facing
bullet.vx,bullet.vy = {0:[32,0],1:[0,-32],2:[-32,0],3:[0,32]}[bullet.facing]
bullet.x=self.objects[self.owned[0]].x+1.2*bullet.vx
bullet.y=self.objects[self.owned[0]].y+1.2*bullet.vy
if bullet.key not in self.owned:
self.owned.append(bullet.key)
bullet.sticky = True
bullet.use_prediction = True
self.objects.add(bullet)
def tick(self):
#Can't move while in shoot animation
if self.shooting:
self.shooting -= 1
def buffer_action(self,action):
if self.shooting:
return
return super(UMMOClient,self).buffer_action(action)
def update_objects(self):
if self.shooting:
return
return super(UMMOClient,self).update_objects() |
import tkinter
def getWordList():
f= open('Words','r')
array = []
i = 0
for line in f:
if(line != '\n'):
array.append(line[:-1])
else:
continue
groupedArray = []
for word in array:
if(i%2 == 0):
groupedArray.append([word])
elif(i%2 == 1):
groupedArray[-1].append(word)
i = i +1
return groupedArray
class LDMAppGUI:
def __init__(self):
# Keep track of what word has been displayed. Set to 1 because the first word
# (index zero is already set below
self.word_index = 1
self.word_list = getWordList()
# Construct the gui
self.window = tkinter.Tk()
self.window.title("Lexical Decision Making")
self.window.bind('<Key>',self.nextWord)
self.word_1_label = tkinter.Label(self.window, text = self.word_list[0][0])
self.word_2_label = tkinter.Label(self.window, text = self.word_list[0][1])
self.word_1_label.pack()
self.word_2_label.pack()
# Events
def nextWord(self,event):
key_pressed = str(event.char)
if(self.word_index < len(self.word_list)):
if key_pressed == 't':
pass # do something
elif key_pressed == 'f':
pass # do something else
else:
# unrekognized key, don't do anything
return
# if we haven't returned yet means that the key was recognized, so
# we can update the
word_pair = self.word_list[self.word_index]
self.word_index += 1
self.word_1_label["text"] = word_pair[0]
self.word_2_label["text"] = word_pair[1]
def mainloop(self):
self.window.mainloop()
# actually run the app
myApp = LDMAppGUI()
myApp.mainloop()
|
import numpy as np
from scipy.stats import ks_2samp
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseUnivariateDrift
from alibi_detect.utils.warnings import deprecated_alias
class KSDrift(BaseUnivariateDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
alternative: str = 'two-sided',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Kolmogorov-Smirnov (K-S) data drift detector with Bonferroni or False Discovery Rate (FDR)
correction for multivariate data.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for significance of the K-S test for each feature. If the FDR correction method
is used, this corresponds to the acceptable q-value.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
Typically a dimensionality reduction technique.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
alternative
Defines the alternative hypothesis. Options are 'two-sided', 'less' or 'greater'.
n_features
Number of features used in the K-S test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
n_features=n_features,
input_shape=input_shape,
data_type=data_type
)
# Set config
self._set_config(locals())
# Other attributes
self.alternative = alternative
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute K-S scores and statistics per feature.
Parameters
----------
x_ref
Reference instances to compare distribution with.
x
Batch of instances.
Returns
-------
Feature level p-values and K-S statistics.
"""
x = x.reshape(x.shape[0], -1)
x_ref = x_ref.reshape(x_ref.shape[0], -1)
p_val = np.zeros(self.n_features, dtype=np.float32)
dist = np.zeros_like(p_val)
for f in range(self.n_features):
# TODO: update to 'exact' when bug fix is released in scipy 1.5
dist[f], p_val[f] = ks_2samp(x_ref[:, f], x[:, f], alternative=self.alternative, mode='asymp')
return p_val, dist
|
class Pycharm:
def execute(self):
print("Compiling.")
print("Running.")
class VScode:
def execute(self):
print("Spell Check.")
print("Convention Check.")
print("Compiling.")
print("Running.")
class Laptop:
def code(self, ide):
ide.execute()
if __name__ == "__main__":
print("\n")
# ide = Pycharm()
ide = VScode()
lap = Laptop()
lap.code(ide)
print("\n") |
from django.conf.urls import patterns, url
from cave import views
urlpatterns = patterns('',
url(r'^$', views.home, name='home'),
url(r'^populate/', views.populate, name='populate'),
url(r'^testcave/', views.testcave, name='testcave'),
url(r'^home/', views.home, name='home'),
url(r'^voirCave/', views.voirCave, name='voirCave'),
url(r'^add_bouteille$', views.add_bouteille, name='add_bouteille'),
url(r'^place_bouteille$', views.place_bouteille, name='place_bouteille'),
#url(r'^$', views.TicketView, name='TicketView')
)
|
# -*- coding: utf-8 -*-
"""
MAS480 Mathematics and AI
Homework 3 - Module for Sampling and Comparing result
20180127 Woojin Kim
"""
import numpy as np
import pandas as pd
def sampling(g, initial_point, length, num, printable = True):
all_samples = []
while num > 0:
current_state = g.get_state(initial_point)
sample_result = []
iter_length = length
while iter_length > 0:
sample_result.append(current_state.state())
#print("current state: ", current_state.state())
edges = g.get_incident_edges(current_state)
transition_state = []
transition_prob = []
for edge in edges:
transition_state.append(edge.opposite(current_state))
transition_prob.append(edge.get_prob())
#print("incident states: ", [state.state() for state in transition_state])
#print("transition prob: ", transition_prob)
current_state = np.random.choice(transition_state, 1, replace = True, p = transition_prob)[0]
iter_length -= 1
all_samples.append(tuple(sample_result))
num -= 1
if printable:
for sample in all_samples:
print(sample)
return all_samples
def generate_distribution():
state_list = [1, 2, 3, 4]
num_states = len(state_list)
distribution = {}
for i in range(1, num_states + 1):
for j in range(1, num_states + 1):
for k in range(1, num_states + 1):
distribution[(i, j, k)] = 0
return distribution
def empirical_distribution(dist, samples, t, total_num):
for sample in samples:
count = 0
for state in sample:
dist[state] = dist.get(state, 0) + (1 / total_num)
if count == t:
break
count += 1
return dist
def long_term_average(t, samples):
result_dist = generate_distribution()
for current_t in range(t):
dist = generate_distribution()
total_num = len(samples) * (current_t + 1)
empirical_dist_with_t = empirical_distribution(dist, samples, current_t, total_num)
for state in result_dist.keys():
result_dist[state] += empirical_dist_with_t[state] / t
return result_dist
def calculate_long_term_averages(list_t, samples, sample_size):
result = []
for t in list_t:
if t > sample_size:
raise ValueError("t must not be greater than sample size")
average_prob_dist = long_term_average(t, samples)
result.append(average_prob_dist)
return result
def compare_average_and_target(average_dist, target_dist, t):
plot1 = pd.DataFrame([average_dist]).T.plot.line(color = 'blue', legend = False)
plot2 = pd.DataFrame([target_dist]).T.plot.line(ax = plot1, color = 'red', legend = False)
plot2.set_xlabel('State')
plot2.set_ylabel('Probability')
plot2.set_title('Comparing empirical long-term(blue) a(' + str(t) + ') with target(red)')
plot2.set_ylim((0, 0.1))
print('The L1 loss of empirical long-term distribution a(' + str(t) + ') is:')
error = 0
for state in average_dist.keys():
error += abs(average_dist[state] - target_dist[state])
print(error)
|
'''
//参考英文网站热评第一。这题可以用快慢指针的思想去做,有点类似于检测是否为环形链表那道题
//如果给定的数字最后会一直循环重复,那么快的指针(值)一定会追上慢的指针(值),也就是
//两者一定会相等。如果没有循环重复,那么最后快慢指针也会相等,且都等于1。
'''
# 快慢指针方法
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
def get_digits(num):
output = 0
while num != 0:
output = output + pow(num%10,2)
num = num // 10
return output
fast = 0
slow = 0
slow = get_digits(n)
fast = get_digits(n)
fast = get_digits(fast)
while(slow != fast):
slow = get_digits(slow)
fast = get_digits(fast)
fast = get_digits(fast)
print(slow)
if fast == 1:
return True
else:
return False
example = Solution()
output = example.isHappy(7)
print(output)
|
string_variable = "Today is the "
number_variable = 22
month_variable = " day of the month"
output_variable = string_variable + str(number_variable) + month_variable
print(output_variable) |
import pickle
c=0
def agregar(dic):
x = input("Pregunta: ")
y = input("Respuesta: ")
dic[x] = y
def cargar_datos():
try:
with open("parcial.dat", "a") as f:
return pickle.load(f)
except (OSError, IOError) as e:
return dict()
def guardar_datos(dic):
with open("parcial.dat", "a") as f:
pickle.dump(dic, f)
def main():
dic = cargar_datos()
menu ='''
1. Parcial
2. Añadir preguntas.
3. Guardar y salir.
'''
while True:
print(menu)
decision = input("¿Que quieres hacer?: ")
if decision == "1":
for x in range(0,1,3):
global c
for es, ing in dic.items():
resp = input('Escribe en ingles "{}": '.format(es))
if resp == ing:
print ("Correcto.")
else:
print ('Incorrecto, es "{}".'.format(ing))
c = c + 1
print ("errastes: ",c)
main()
elif decision == "2":
agregar(dic)
elif decision == "3":
guardar_datos(dic)
break
else:
print('Opción inválida, intentelo de nuevo.')
if __name__ == '__main__':
main() |
# Keyboard Service
from service.language import Language
class Keyboard():
"""
Keyboard Service. Provides a keyboard model and methods based on key layout
and language configurations.
"""
# fingers
LEFT_PINKY = 'LP'
LEFT_RING = 'LR'
LEFT_MIDDLE = 'LM'
LEFT_INDEX = 'LI'
LEFT_THUMB = 'LT'
THUMB = 'T'
RIGHT_THUMB = 'RT'
RIGHT_INDEX = 'RI'
RIGHT_MIDDLE = 'RM'
RIGHT_RING = 'RR'
RIGHT_PINKY = 'RP'
# hands
LEFT_HAND = [
LEFT_PINKY,
LEFT_RING,
LEFT_MIDDLE,
LEFT_INDEX,
LEFT_THUMB
]
RIGHT_HAND = [
RIGHT_THUMB,
RIGHT_INDEX,
RIGHT_MIDDLE,
RIGHT_RING,
RIGHT_PINKY
]
class Key():
def __init__(
self,
position,
finger,
difficulty=None,
primary_char=None,
secondary_char=None,
):
self.position = position
self.finger = finger
self.difficulty = difficulty
self.primary_char = primary_char
self.secondary_char = secondary_char
def get_finger(self):
return self.finger
def set_difficulty(self, difficulty):
self.difficulty = difficulty
def get_difficulty(self, char=None):
return self.difficulty
def set_characters(self, primary_char, secondary_char):
self.primary_char = primary_char
self.secondary_char = secondary_char
def get_characters(self):
return {
'primary': self.primary_char,
'secondary': self.secondary_char
}
def get_primary_char(self):
return self.primary_char
def get_secondary_char(self):
return self.secondary_char
def to_dict(self):
return {
'position': self.position,
'finger': self.finger,
'difficulty': self.difficulty,
'primary_char': self.primary_char,
'secondary_char': self.secondary_char
}
# qwerty layout:
# ` 1 2 3 4 5 6 7 8 9 0 - = D
# T q w e r t y u i o p [ ] \
# C a s d f g h j k l ; ' R
# S z x c v b n m , . / S
# ct cm space cm
# base keyboard model
BASE_KEYBOARD_MODEL = [
# 1st row
Key([0,0], LEFT_PINKY, 425), # ['`','~']
Key([0,1], LEFT_PINKY, 350), # ['1','!']
Key([0,2], LEFT_RING, 350), # ['2','@']
Key([0,3], LEFT_MIDDLE, 350), # ['3','#']
Key([0,4], LEFT_INDEX, 350), # ['4','$']
Key([0,5], LEFT_INDEX, 425), # ['5','%']
Key([0,6], RIGHT_INDEX, 425), # ['6','^']
Key([0,7], RIGHT_INDEX, 350), # ['7','&']
Key([0,8], RIGHT_MIDDLE, 350), # ['8','*']
Key([0,9], RIGHT_RING, 350), # ['9','(']
Key([0,10], RIGHT_PINKY, 350), # ['0',')']
Key([0,11], RIGHT_PINKY, 425), # ['-','_']
Key([0,12], RIGHT_PINKY, 450), # ['=','+']
Key([0,13], RIGHT_PINKY, 450), # ['delete']
# 2nd row
Key([1,0], LEFT_PINKY, 375), # ['tab']
Key([1,1], LEFT_PINKY, 275), # ['q','Q']
Key([1,2], LEFT_RING, 150), # ['w','W']
Key([1,3], LEFT_MIDDLE, 150), # ['e','E']
Key([1,4], LEFT_INDEX, 150), # ['r','R']
Key([1,5], LEFT_INDEX, 250), # ['t','T']
Key([1,6], RIGHT_INDEX, 250), # ['y','Y']
Key([1,7], RIGHT_INDEX, 150), # ['u','U']
Key([1,8], RIGHT_MIDDLE, 150), # ['i','I']
Key([1,9], RIGHT_RING, 150), # ['o','O']
Key([1,10], RIGHT_PINKY, 275), # ['p','P']
Key([1,11], RIGHT_PINKY, 400), # ['[','{']
Key([1,12], RIGHT_PINKY, 425), # [']','}']
Key([1,13], RIGHT_PINKY, 425), # ['\\', '|']
# 3rd row
Key([2,0], LEFT_PINKY, 225), # ['capslock']
Key([2,1], LEFT_PINKY, 100), # ['a','A']
Key([2,2], LEFT_RING, 100), # ['s','S']
Key([2,3], LEFT_MIDDLE, 100), # ['d','D']
Key([2,4], LEFT_INDEX, 100), # ['f','F']
Key([2,5], LEFT_INDEX, 175), # ['g','G']
Key([2,6], RIGHT_INDEX, 175), # ['h','H']
Key([2,7], RIGHT_INDEX, 100), # ['j','J']
Key([2,8], RIGHT_MIDDLE, 100), # ['k','K']
Key([2,9], RIGHT_RING, 100), # ['l','L']
Key([2,10], RIGHT_PINKY, 100), # [';',':']
Key([2,11], RIGHT_PINKY, 225), # ['\'','"']
Key([2,12], RIGHT_PINKY, 275), # ['return']
# 4th row
Key([3,0], LEFT_PINKY, 200), # ['left shift']
Key([3,1], LEFT_PINKY, 275), # ['z','Z']
Key([3,2], LEFT_RING, 275), # ['x','X']
Key([3,3], LEFT_MIDDLE, 200), # ['c','C']
Key([3,4], LEFT_INDEX, 200), # ['v','V']
Key([3,5], LEFT_INDEX, 300), # ['b','B']
Key([3,6], RIGHT_INDEX, 200), # ['n','N']
Key([3,7], RIGHT_INDEX, 200), # ['m','M']
Key([3,8], RIGHT_MIDDLE, 200), # [',','<']
Key([3,9], RIGHT_RING, 275), # ['.','>']
Key([3,10], RIGHT_PINKY, 275), # ['/','?']
Key([3,11], RIGHT_PINKY, 200), # ['right shift']
# 5th row
Key([4,0], LEFT_PINKY, 425), # ['left fn']
Key([4,1], LEFT_PINKY, 425), # ['left control']
Key([4,2], LEFT_THUMB, 150), # ['left option']
Key([4,3], LEFT_THUMB, 125), # ['left command']
Key([4,4], THUMB, 50), # ['spacebar']
Key([4,5], RIGHT_THUMB, 125), # ['right command']
Key([4,6], RIGHT_PINKY, 150) # ['right option']
]
# keyboard layouts
QWERTY = [
# 1st row
['`','~'],
['1','!'],
['2','@'],
['3','#'],
['4','$'],
['5','%'],
['6','^'],
['7','&'],
['8','*'],
['9','('],
['0',')'],
['-','_'],
['=','+'],
['delete'],
# 2nd row
['tab'],
['q','Q'],
['w','W'],
['e','E'],
['r','R'],
['t','T'],
['y','Y'],
['u','U'],
['i','I'],
['o','O'],
['p','P'],
['[','{'],
[']','}'],
['\\', '|'],
# 3rd row
['capslock'],
['a','A'],
['s','S'],
['d','D'],
['f','F'],
['g','G'],
['h','H'],
['j','J'],
['k','K'],
['l','L'],
[';',':'],
['\'','"'],
['return'],
# 4th row
['left shift'],
['z','Z'],
['x','X'],
['c','C'],
['v','V'],
['b','B'],
['n','N'],
['m','M'],
[',','<'],
['.','>'],
['/','?'],
['right shift'],
# 5th row
['left fn'],
['left control'],
['left option'],
['left command'],
['spacebar'],
['right command'],
['right option']
]
def __init__(self, keyboard_layout, language=None):
self.keyboard_layout = keyboard_layout
self.language = Language(language) if language is not None else None
self.keyboard_model = self.BASE_KEYBOARD_MODEL
# configure keyboard keys base on configurations
for i, keyboard_key in enumerate(self.keyboard_model):
# set key characters based on keyboard layout
key_chars = keyboard_layout[i]
primary_char = key_chars[0]
if len(key_chars) == 2:
secondary_char = key_chars[1]
else:
secondary_char = None
keyboard_key.set_characters(primary_char, secondary_char)
# if language is set, adjust key difficulty score based on language
if self.language is not None:
key_char_frequency = self.language.get_letter_frequency(
letter=primary_char.upper()
)
if key_char_frequency is not None:
init_score = keyboard_key.get_difficulty()
to_deduct = init_score * (key_char_frequency / 100)
adjusted_score = init_score - to_deduct
keyboard_key.set_difficulty(adjusted_score)
def get_key_from_character(self, char):
for key in self.keyboard_model:
chars = list(key.get_characters().values())
if char in chars:
return key
return None
def calculate_key_transition_difficulty(
self,
char_1,
key_1,
char_2,
key_2,
round_to_int=False
):
key_1_d = self.get_key_difficulty(char=char_1, key=key_1)
key_2_d = self.get_key_difficulty(char=char_2, key=key_2)
avg_difficulty = (key_1_d + key_2_d) / 2
# 1/4 is a magic number here
base_trans_difficulty = avg_difficulty / 4
key_1_fing = key_1.get_finger()
key_2_fing = key_2.get_finger()
key_1_hand = 'left' if key_1_fing in self.LEFT_HAND else 'right'
key_2_hand = 'left' if key_2_fing in self.LEFT_HAND else 'right'
res = None
# same key 2 times in a row
if key_1 == key_2:
res = base_trans_difficulty - (base_trans_difficulty / 3)
# same finger on same hand
elif key_1_fing == key_2_fing:
res = base_trans_difficulty
# same hand
elif key_1_hand == key_2_hand:
res = base_trans_difficulty - (base_trans_difficulty / 3)
# otherwise, is different hand
else:
res = base_trans_difficulty - (base_trans_difficulty * 2/3)
if round_to_int:
return int(res)
else:
return res
def get_key_difficulty(self, char, key):
d = key.get_difficulty()
if char == key.get_secondary_char():
shift_key = self.get_key_from_character(char='left shift')
d = d + shift_key.get_difficulty()
return d
def get_keyboard_difficulty_for_word(self, word, round_to_int=False):
keys_data = []
for char in word:
k = {
'char': char,
'key': self.get_key_from_character(char)
}
# ignore keys that were not found
if k['key'] is not None:
keys_data.append(k)
difficulty_vals = []
keys_data_len = len(keys_data)
for index, key_data in enumerate(keys_data):
# add key difficulty based on char
k_difficulty = self.get_key_difficulty(
char=key_data['char'],
key=key_data['key']
)
difficulty_vals.append(k_difficulty)
# add key transition difficulties
if index < keys_data_len - 1:
trans_difficulty = self.calculate_key_transition_difficulty(
char_1=key_data['char'],
key_1=key_data['key'],
char_2=keys_data[index + 1]['char'],
key_2=keys_data[index + 1]['key']
)
difficulty_vals.append(trans_difficulty)
if round_to_int:
return int(sum(difficulty_vals))
else:
return sum(difficulty_vals)
|
__author__ = 'socialmoneydev'
from utils.requestor import Requestor
from models.jsonBase import JsonBase
from account import Account
from externalaccount import ExternalAccount
from models.customeraddress import CustomerAddress
from models.customerphone import CustomerPhone
from models.customeridonly import CustomerIdOnly
from models.customerverifyrequest import CustomerVerifyRequest
class Customer(JsonBase):
def __init__(self):
self.requestId = None
self.customerCount = None
self.customerId = None
self.firstName = None
self.middleName = None
self.lastName = None
self.birthDate = None
self.gender = None
self.culture = None
self.tag = None
self.status = None
self.createdDate = None
self.taxId = None
self.driversLicenseNumber = None
self.driversLicenseState = None
self.driversLicenseExpirationDate = None
self.passportNumber = None
self.passportCountry = None
self.emailAddress = None
self.isActive = None
self.isLocked = None
self.lockedDate = None
self.lockedReason = None
self.deceasedDate = None
self.isSubjectToBackupWithholding = None
self.isOptedInToBankCommunication = None
self.isDocumentsAccepted = None
self.phones = []
self.addresses = []
self.accounts = []
self.externalAccounts = []
def fromJson(self, json, classDefs):
classDefs = classDefs or dict()
classDefs['phones'] = CustomerPhone
classDefs['addresses'] = CustomerAddress
classDefs['accounts'] = Account
classDefs['externalAccounts'] = ExternalAccount
return super(Customer, self).fromJson(json, classDefs)
@staticmethod
def getItem(customerId, connection = None, loggingObject = None):
c = Customer()
c.customerId = customerId
return c.get(connection, loggingObject)
def get(self, connection = None, loggingObject = None):
return Requestor().get("/customer/get/{0}".format(self.customerId), Customer, connection, loggingObject)
@staticmethod
def getItemByTag(tag, connection = None, loggingObject = None):
c = Customer()
c.tag = tag
return c.getByTag(connection, loggingObject)
def getByTag(self, connection = None, loggingObject = None):
return Requestor().get("/customer/getbytag/{0}".format(Requestor.escape(self.tag)), Customer, connection, loggingObject)
@staticmethod
def listItems(pageNumber = 0, pageSize = 200, connection = None, loggingObject = None):
return Customer().list(pageNumber, pageSize, connection, loggingObject)
def list(self, pageNumber = 0, pageSize = 200, connection = None, loggingObject = None):
return Requestor().get("/customer/list", Customer, connection, loggingObject)
def create(self, connection = None, loggingObject = None):
cid = Requestor().post("/customer/create", CustomerIdOnly, self, connection, loggingObject)
return cid.customerId
def initiate(self, connection = None, loggingObject = None):
return Requestor().post("/customer/initiate", CustomerIdOnly, self, connection, loggingObject)
def verify(self, verificationId, answers, connection = None, loggingObject = None):
cvr = CustomerVerifyRequest()
cvr.verificationId = verificationId
cvr.answers = answers
return cvr.verify(connection, loggingObject)
def update(self, connection = None, loggingObject = None):
cid = Requestor().post("/customer/update", CustomerIdOnly, self, connection, loggingObject)
return cid.customerId
def deactivate(self, connection = None, loggingObject = None):
cid = Requestor().post("/customer/deactivate", CustomerIdOnly, self, connection, loggingObject)
return cid.customerId
def search(self, pageNumber = 0, pageSize = 200, connection = None, loggingObject = None):
return Requestor().post("/customer/search/?pageNumber={0}&pageSize={1}".format(pageNumber, pageSize), Customer, self, connection, loggingObject) |
#!/usr/bin/python
'''
File name: multiple_btree_timing_ltarchive.py
Prepared by: MCL
Date created: 16/8/2017
Date last modified: 2/11/2017
Python Version: 2.7
This script compares the run time of PostgreSQL queries with a positional
and a instrumental selection, 4 cases are compared:
(1) Database with only spatial index on positions
(2) Database with only B-Tree index on instruments
(3) Database with both indexes
(4) Database without any index
'''
import os
import inspect
import sys
import numpy as np
import psycopg2
import matplotlib
import random
from psql_functions import PsqlQuery
try:
matplotlib.use('TkAgg')
except:
pass
from matplotlib.gridspec import GridSpec
from matplotlib import pyplot as plt
# plt.ion()
pq = PsqlQuery()
# Get output path if provided, default at ~/Desktop
try:
output_path = argv[1]
except:
filename = inspect.getframeinfo(inspect.currentframe()).filename
output_path = os.path.dirname(os.path.abspath(filename)) + '/output/'
# Equatorial coordinates of the Galactic Centre
ra_gc = 266.41683
dec_gc = -29.00781
# Equatorial coordinates of the Galactic North Pole
ra_gnp = 192.85951
dec_gnp = 27.12834
# Number of repeats
n_repeat = 1000
# Getting the list of unique OBSID
conn = psycopg2.connect(
database="ltarchive",
user="dbuser",
password="dbuser",
host="150.204.240.113",
port="6543")
cur = conn.cursor()
cur.execute("SELECT DISTINCT \"OBSID\" FROM allkeys_testing;")
obsid_list = np.array(cur.fetchall())
cur.close()
conn.close()
# Getting the list of unique USERID
conn = psycopg2.connect(
database="ltarchive",
user="dbuser",
password="dbuser",
host="150.204.240.113",
port="6543")
cur = conn.cursor()
cur.execute("SELECT DISTINCT \"USERID\" FROM allkeys_testing;")
userid_list = np.array(cur.fetchall())
cur.close()
conn.close()
# Getting the list of unique TAGID
conn = psycopg2.connect(
database="ltarchive",
user="dbuser",
password="dbuser",
host="150.204.240.113",
port="6543")
cur = conn.cursor()
cur.execute("SELECT DISTINCT \"TAGID\" FROM allkeys_testing;")
tagid_list = np.array(cur.fetchall())
cur.close()
conn.close()
# Getting the list of unique GROUPID
conn = psycopg2.connect(
database="ltarchive",
user="dbuser",
password="dbuser",
host="150.204.240.113",
port="6543")
cur = conn.cursor()
cur.execute("SELECT DISTINCT \"GROUPID\" FROM allkeys_testing;")
groupid_list = np.array(cur.fetchall())
cur.close()
conn.close()
# Getting the list of unique PROPID
conn = psycopg2.connect(
database="ltarchive",
user="dbuser",
password="dbuser",
host="150.204.240.113",
port="6543")
cur = conn.cursor()
cur.execute("SELECT DISTINCT \"PROPID\" FROM allkeys_testing;")
propid_list = np.array(cur.fetchall())
cur.close()
conn.close()
# Getting the list of unique INSTRUME
conn = psycopg2.connect(
database="ltarchive",
user="dbuser",
password="dbuser",
host="150.204.240.113",
port="6543")
cur = conn.cursor()
cur.execute("SELECT DISTINCT \"INSTRUME\" FROM allkeys_testing;")
instrume_list = np.array(cur.fetchall())
cur.close()
conn.close()
# GIN index
n_results_gin_exact = np.zeros(n_repeat)
time_gin_exact = np.zeros(n_repeat)
# B-tree index
n_results_btree_exact = np.zeros(n_repeat)
time_btree_exact = np.zeros(n_repeat)
# Hash index
n_results_hash_exact = np.zeros(n_repeat)
time_hash_exact = np.zeros(n_repeat)
for i in range(n_repeat):
print "Run " + str(i+1) + " of " + str(n_repeat)
# Pick two columns
a = int(np.ceil(random.uniform(0, 3)))
b = a
while b == a:
b = int(np.ceil(random.uniform(0, 3)))
phrase1 = ''
phrase2 = ''
if a == 1:
column1 = "\"TAGID\""
while (phrase1 == None) or (phrase1 == ''):
phrase1 = np.random.choice(np.ndarray.flatten(tagid_list), 1)[0]
if a == 2:
column1 = "\"USERID\""
while (phrase1 == None) or (phrase1 == ''):
phrase1 = np.random.choice(np.ndarray.flatten(userid_list), 1)[0]
if a == 3:
column1 = "\"INSTRUME\""
while (phrase1 == None) or (phrase1 == ''):
phrase1 = np.random.choice(np.ndarray.flatten(instrume_list), 1)[0]
if b == 1:
column2 = "\"TAGID\""
while (phrase2 == None) or (phrase2 == ''):
phrase2 = np.random.choice(np.ndarray.flatten(tagid_list), 1)[0]
if b == 2:
column2 = "\"USERID\""
while (phrase2 == None) or (phrase2 == ''):
phrase2 = np.random.choice(np.ndarray.flatten(userid_list), 1)[0]
if b == 3:
column2 = "\"INSTRUME\""
while (phrase2 == None) or (phrase2 == ''):
phrase2 = np.random.choice(np.ndarray.flatten(instrume_list), 1)[0]
# constructing the query statement
query_gin =\
"EXPLAIN ANALYSE SELECT * FROM allkeys_testing_gin WHERE " +\
column1 + " = '" + phrase1 + "' AND " +\
column2 + " = '" + phrase2 + "';"
query_btree =\
"EXPLAIN ANALYSE SELECT * FROM allkeys_testing WHERE " +\
column1 + " = '" + phrase1 + "' AND " +\
column2 + " = '" + phrase2 + "';"
query_hash =\
"EXPLAIN ANALYSE SELECT * FROM allkeys_testing_hash WHERE " +\
column1 + " = '" + phrase1 + "' AND " +\
column2 + " = '" + phrase2 + "';"
# Connect to the database
conn = psycopg2.connect(
database="ltarchive",
user="dbuser",
password="dbuser",
host="150.204.240.113",
port="6543")
# Run the queries and save the results in arrays
n_temp_gin, time_temp_gin = pq.run_query(conn, query_gin)
n_temp_btree, time_temp_btree = pq.run_query(conn, query_btree)
n_temp_hash, time_temp_hash = pq.run_query(conn, query_hash)
n_results_gin_exact[i], time_gin_exact[i] = n_temp_gin, time_temp_gin
n_results_btree_exact[i], time_btree_exact[i] = n_temp_btree, time_temp_btree
n_results_hash_exact[i], time_hash_exact[i] = n_temp_hash, time_temp_hash
conn.close()
fig2 = plt.figure(2,figsize=(10,6))
fig2.clf()
gridspec = GridSpec(1, 3)
gridspec.update(left=0.12,right=0.95,top=0.98,bottom=0.1,wspace=0)
ax1 = fig2.add_subplot(gridspec[0,0])
ax2 = fig2.add_subplot(gridspec[0,1])
ax3 = fig2.add_subplot(gridspec[0,2])
ax1.scatter(np.log10(n_results_hash_exact), np.log10(time_hash_exact), s=2)
ax2.scatter(np.log10(n_results_btree_exact), np.log10(time_btree_exact), s=2)
ax3.scatter(np.log10(n_results_gin_exact), np.log10(time_gin_exact), s=2)
ax2.set_xlabel('Number of matches')
ax1.set_ylabel('Query Time / s')
ax1.grid()
ax2.grid()
ax3.grid()
ax1.set_xlim(0,6)
ax2.set_xlim(0,6)
ax3.set_xlim(0,6)
ax1.set_ylim(-4.0,2)
ax2.set_ylim(-4.0,2)
ax3.set_ylim(-4.0,2)
ax1.set_yticklabels([r'$10^{-4}$', r'$10^{-3}$',r'$10^{-2}$',r'$10^{-1}$',r'$10^{0}$',r'$10^{1}$',r'$10^{2}$'])
ax2.set_yticklabels([''])
ax3.set_yticklabels([''])
ax1.set_xticklabels([r'$10^{0}$',r'$10^{1}$',r'$10^{2}$',r'$10^{3}$',r'$10^{4}$',r'$10^{5}$'])
ax2.set_xticklabels([r'$10^{0}$',r'$10^{1}$',r'$10^{2}$',r'$10^{3}$',r'$10^{4}$',r'$10^{5}$'])
ax3.set_xticklabels([r'$10^{0}$',r'$10^{1}$',r'$10^{2}$',r'$10^{3}$',r'$10^{4}$',r'$10^{5}$',r'$10^{6}$'])
plt.savefig(output_path + 'query_time_hash_btree_gin_double_filters_compared.png')
|
#!/usr/bin/env python
import sys
sys.path.append("/home2/data/Projects/CWAS/share/lib/surfwrap")
import os
from os import path as op
import numpy as np
import nibabel as nib
from pandas import read_csv
from newsurf import *
from rpy2 import robjects
from rpy2.robjects.packages import importr
# Plots each of the applicability CWAS results
# ldopa, development+motion, adhd200_rerun
dirnames = lambda paths: [ op.dirname(path) for path in paths ]
rjoins = lambda paths,add_path: [ op.join(path, add_path) for path in paths ]
ljoins = lambda add_path,paths: [ op.join(path, add_path) for path in paths ]
# General Variables
base = "/home2/data/Projects/CWAS"
cbarfile = "/home2/data/Projects/CWAS/share/lib/surfwrap/colorbars/red-yellow.txt"
# Input Paths
study = "nki"
scan = "short"
prefix = op.join(base, study, "cwas", scan)
suffix = "cluster_correct_v05_c05/easythresh/thresh_zstat_FSIQ.nii.gz"
subdist_subpaths = { "kvoxs_smoothed" : "compcor_kvoxs_smoothed_to_kvoxs_smoothed" }
mdmr_subpaths = { k : op.join(v, "iq_age+sex+meanFD.mdmr") for k,v in subdist_subpaths.iteritems() }
logp_paths = { k : op.join(prefix, v, suffix) for k,v in mdmr_subpaths.iteritems() }
# Output Path
outdir = op.join(base, "figures/sfig_roi_comparison")
if not op.exists(outdir): os.mkdir(outdir)
# Threshold...combine min and max
tmin = []; tmax = []
for name,logp_path in logp_paths.iteritems():
lmin, lmax, _ = auto_minmax(logp_path)
tmin.append(lmin); tmax.append(lmax)
min_use = np.min(tmin)
max_use = np.max(tmax)
sign_use = "pos"
print "min: %.5f" % min_use
print "max: %.5f" % max_use
# Color bar
cbar = load_colorbar(cbarfile)
for name,logp_path in logp_paths.iteritems():
# Vol => Surf
logp_files, logp_surf = vol_to_surf(logp_path)
remove_surfs(logp_files)
for hemi in ["lh","rh"]:
brain = fsaverage(hemi)
# Viz
brain = add_overlay(name, brain, logp_surf[hemi], cbar,
min_use, max_use, sign_use)
# Save
outprefix = op.join(outdir, "A_%s_surf" % name)
save_imageset(brain, outprefix, hemi)
# Montage
montage(outprefix, compilation='box')
montage(outprefix, compilation='horiz')
montage(outprefix, compilation='horiz_lh')
montage(outprefix, compilation='horiz_rh')
|
# import os
# import sys
# source_path = os.path.dirname(os.path.abspath(sys.argv[0])) + "/basenji/source"
# source_path2 = os.path.dirname(os.path.abspath(sys.argv[0])) + "/basenji/basenji"
# source_path3 = os.path.dirname(os.path.abspath(sys.argv[0])) + "/3Dpredictor/source"
# source_path4 = os.path.dirname(os.path.abspath(sys.argv[0])) + "/source"
# sys.path.append(source_path)
# sys.path.append(source_path2)
# sys.path.append(source_path3)
# sys.path.append(source_path4)
#
# from shared import Interval
# import json
# import dataset, dna_io, seqnn
# from Predictions_interpeter import from_upper_triu, predict_big_region_from_seq
import numpy as np
import pandas as pd
# import cooler
# import matplotlib.pyplot as plt
#
# genome_hic_cool = cooler.Cooler("/mnt/scratch/ws/psbelokopytova/202103211631polina/nn_anopheles/input/coolers/Aste_2048.cool")
# seq_hic_raw = genome_hic_cool.matrix(balance=True).fetch(('2L', 20000000, 21998848))
# print(seq_hic_raw.shape)
# im = plt.matshow(mean_array, fignum=False, cmap='RdBu_r') # , vmax=2, vmin=-2)
# plt.colorbar(im, fraction=.04, pad=0.05) # , ticks=[-2,-1, 0, 1,2])
# plt.savefig("/mnt/scratch/ws/psbelokopytova/202103211631polina/nn_anopheles/test")
# plt.clf()
# breakpoint()
n = 6
m = 6
k = 3
arr = np.empty((k,m,n))
arr[:]=np.nan
# print(arr)
print(arr.shape)
x = np.array([[2, 2, 2,2,2,2], [2, 2]], np.int32)
# print(x)
assert x.shape[0]==x.shape[1]
assert x.shape[0]==arr.shape[1]
# print(len(x))
stride = 1
arr_stride = 0
for k_matrix in range(0, k):
# print(k_matrix)
predicted_array = x
for i in range(len(predicted_array)):
# print(k_matrix, i, 0+arr_stride, len(predicted_array)+arr_stride)
arr[k_matrix][i][0+arr_stride:len(predicted_array)+arr_stride] = predicted_array[i]
arr_stride+=1
# print(x[1])
# print(arr[0][:][0:3])
# arr[0][:][0:3] = x
# print(arr)
new_arr = np.nanmean(arr, axis=0)
# print(new_arr)
print(new_arr.shape)
print(np.triu_indices(new_arr, 2))
# bisize=1
# starts = []
# ends = []
# values = []
# for i in range(new_arr.shape[0]):
# print(new_arr[i])
# for j in range(new_arr.shape[1]):
# print(np.isnan(new_arr[i][j]))
# if not np.isnan(new_arr[i][j]):
# starts.append(i*bisize)
# ends.append(j*bisize)
# values.append(new_arr[i][j])
# print(starts)
# print(ends)
# print(values)
# data = {'chr': ['2L']*len(starts), 'contact_st': starts, 'contact_en':ends, 'contact_count':values}
# df = pd.DataFrame(data=data)
# print(df)
# mp = MatrixPlotter()
# mp.set_data(predicted_data)
# mp.set_control(validation_data) |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to transform data."""
import cStringIO as StringIO
import csv
import json
import logging
import re
from src.clients import bigquery
from src.csvmatchreplace import timestamp
class TableError(Exception):
"""An error related to the Table class."""
class CellError(TableError):
"""An error with a cell."""
def __init__(self, message, value=None, index=None):
"""Make a Cell Error.
Args:
message: the message about this error
value: ptional value we had a problem with.
index: optional index into the row where this cell error came from.
"""
super(CellError, self).__init__(message)
self.value = value
self.index = index
def TransformRow(row, config):
"""Performs transformations on row.
Args:
row: an array of string values.
config: the config for transform from table.AsDataPipelineJsonDict.
Returns:
A tuple of new transformed values that is the result of
performing operations based on the contents transformations and
wanted_cols and an array of bad_column errors.
"""
transformed_row = []
bad_columns = []
columns = config['columns']
if len(row) != len(columns):
bad_columns.append(CellError(
'Invalid number of elements in row. Found %d, expected %d' %
(len(row), len(columns))))
for i in range(min(len(row), len(columns))):
if columns[i]['wanted']:
try:
cell_data = TransformCell(row[i], i, columns[i])
transformed_row.append(cell_data)
# logging.info('Transform phase: Column %d = %s', i, cell_data)
except CellError as err:
logging.warning('Transform phase: Bad data @ Column %d = %r', i,
err)
bad_columns.append(err) # save error
transformed_row.append(err.value)
# possible partial transformation
return (transformed_row, bad_columns)
def TransformCell(cell, index, column):
"""Performs transformation(s) on an individual cell.
Args:
cell: A unit of data to be processed.
index: which column are we transforming.
column: The column dict from columns from the AsDataPipelineJsonDict
Returns:
A new cell that is the result of performing operations based on
the contents of the transformation at the provided index.
Raises:
CellError if there is an error with this cell.
"""
output = cell
for pattern in column.get('transformations', []):
output = re.sub(pattern['match'], pattern['replace'], output)
output = NormalizeCellByType(output, index, column['type'])
return output
def NormalizeCellByType(cell, index, column_type):
"""Make sure the cell value is valid for the column_type."""
if not cell:
return ''
try:
if column_type == bigquery.ColumnTypes.INTEGER:
cell = int(cell)
elif column_type == bigquery.ColumnTypes.FLOAT:
cell = float(cell)
elif column_type == bigquery.ColumnTypes.BOOLEAN:
if str(cell).lower() in ('true', '1'):
cell = 'True'
elif str(cell).lower() in ('false', '0'):
cell = 'False'
else:
raise ValueError('invalid value')
elif column_type == bigquery.ColumnTypes.TIMESTAMP:
cell = timestamp.NormalizeTimeStamp(cell)
except ValueError as err:
raise CellError('Invalid value %r for column type %s: %r' %
(cell, bigquery.ColumnTypes.strings[column_type], err),
str(cell), index)
return str(cell)
def WriteErrors(writer, row_value, errors):
"""Write out row and errors with it for later _badrows table creation.
Args:
writer: an object we can write to.
row_value: the entire row we had a problem with.
errors: an array of CellError objects.
"""
row = {'row_value': row_value,
'errors': [{'message': err.message,
'value': err.value,
'index': err.index} for err in errors]}
writer.write(json.dumps(row) + '\r\n')
def CellsToCsvString(row):
"""Convert a row of cell strings into a csv joined string."""
o = StringIO.StringIO()
csv_writer = csv.writer(o)
csv_writer.writerow(row)
return o.getvalue().splitlines()[0] # strip off the trailing \r\n
|
"""
Basic thread handling exercise:
Use the Thread class to create and run more than 10 threads which print their name and a random
number they receive as argument. The number of threads must be received from the command line.
e.g. Hello, I'm Thread-96 and I received the number 42
"""
from random import randint, seed
from threading import Thread
import sys
# n = input()
# try:
# n = int(n)
# except ValueError:
# print("ERROR")
# exit(-1)
n = sys.argv[1]
try:
n = int(n)
except ValueError:
print("ValueError")
exit(-1)
def func(id, nr):
#print("Hello, I'm Thread-",id ,"and I received the number", nr)
print(f"Hello, I'm Thread-{id} and I received the number {nr}")
thread_list = []
seed()
for i in range(n):
thread = Thread(target=func, args=(i, randint(0, 100)))
thread.start()
thread_list.append(thread)
for i in range(len(thread_list)):
thread_list[i].join()
|
import warnings
warnings.filterwarnings("ignore", category = FutureWarning)
import keras
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, regularizers
from keras.layers import Activation
from keras.layers import Dropout
import matplotlib.pyplot as plt
from utils import *
def get_model():
model = Sequential()
model.add(Dense(512))
model.add(Activation(activation='relu', name='activation_1'))
model.add(Dense(10, activation='softmax'))
return model
X = np.load('new_X.npy')
y = np.load('label.npy')
X_train, X_test, y_train, y_test = train_test_split(X, y)
model = get_model()
model.compile(loss=keras.losses.sparse_categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
num_epochs = 70
hist = model.fit(X_train, y_train, epochs=70, validation_data=(X_test, y_test), verbose=1)
y_hat = model.predict(X_test)
score = accuracy_score(y_test, y_hat)
print("Final score:", score)
train_loss = hist.history['loss']
val_loss = hist.history['val_loss']
train_acc = hist.history['acc']
val_acc = hist.history['val_acc']
plot(train_loss, val_loss, "Loss changing", is_loss=True)
plot(train_acc, val_acc, "Accuracy changing", is_loss=False)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Template(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=200)
content = models.TextField()
class EmailSent(models.Model):
content = models.TextField()
name = models.CharField(max_length=200)
dateCreated = models.DateField(auto_now_add=True)
template = models.ForeignKey(Template, on_delete=models.CASCADE) |
import csv
import math
import pandas as pd
# Load a CSV file
def load_csv():
dataset = list()
with open('Diabetes.csv', 'r') as file:
data = csv.reader(file)
next(data, None)
for row in data:
dataset.append(row)
for column in range(len(dataset[0])):
for row in dataset:
row[column] = float(row[column])
return dataset
#Function to split dataset into folds
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
#Numeric Distance using euclidean distance
def euclidean_distance(x1, x2):
distance = 0
for i in range(len(x1)-1):
distance += (x1[i] - x2[i])**2
return math.sqrt(distance)
#Function to scale dataset between [0, 1]
def normalization(dataset):
minmax = list()
for i in range(len(dataset[0])-1):
column = list()
for row in dataset:
column.append(row[i])
minimum = min(column)
maximum = max(column)
minmax.append([minimum, maximum])
for row in dataset:
for i in range(len(row)-1):
row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])
#Function to return k closest neighbor
def get_neighbors(train, test_row, k):
distances = list()
for train_row in train:
dist = euclidean_distance(test_row, train_row)
distances.append((train_row, dist))
distances.sort(key=lambda x: x[1])
neighbors = list()
for i in range(k):
neighbors.append(distances[i][0])
return neighbors
#Function to return testing row class prediction/estimation
def classification(train, test_row, k):
neighbors = get_neighbors(train, test_row, k)
outcome = [row[-1] for row in neighbors]
prediction = max(set(outcome), key=outcome.count)
return prediction
#Function to return testing set class prediction/estimation
def k_nearest_neighbors(data_test, data_train, k):
estimation = list()
for row in data_test:
temp = classification(data_train, row, k)
estimation.append(temp)
return estimation
#Function to count accuracy of testing set prediction
def accuracy(testing_set_outcome, prediction):
correct = 0
for i in range(len(testing_set_outcome)):
if testing_set_outcome[i] == prediction[i]:
correct += 1
return correct / float(len(testing_set_outcome)) * 100.0
#Main Program
dataset = load_csv()
normalization(dataset)
#5-Fold-Cross Validation
five_fold = list(split(dataset, 5))
acc_mean_list = list()
for k in range(1,31):
acc = list()
n_neighbors = k
for fold in five_fold:
training_set = list(five_fold)
training_set.remove(fold)
training_set = sum(training_set, [])
testing_set = list()
for row in fold:
row_copy = list(row)
testing_set.append(row_copy)
row_copy[-1] = None
predicted = k_nearest_neighbors(testing_set, training_set, n_neighbors)
outcome = [row[-1] for row in fold]
accs = accuracy(outcome, predicted)
acc.append(accs)
mean = sum(acc)/float(5)
acc_mean_list.append([mean,n_neighbors])
print("K : ", n_neighbors)
print("Accuracy Mean : ", mean)
print("==================================")
df = pd.DataFrame(acc_mean_list, columns=['Accuracy Mean', 'K'])
ax2 = df.plot.scatter(x='K', y='Accuracy Mean')
print("")
print("Best K Value : " , max(acc_mean_list,key=lambda x:x[0])[1])
print("Accuracy Mean : ", max(acc_mean_list,key=lambda x:x[0])[0]) |
import Mission
import time
class SeriesMission(Mission.Mission):
def __init__(self, missions):
Mission.Mission.__init__(self) # Critical line in every mission
self.missions = missions # List of missions to run
self.started = [False] * len(self.missions) # List representing
# which missions by order have been started
self.index = 0 # Index of current mission running in the mission list
def initialize(self):
self.started = [False] * len(self.missions)
self.index = 0
def execute(self):
if self.index < len(self.missions): # If there are still missions
# to run
mission = self.missions[self.index] # Current mission
if not mission.am_i_running(): # If current mission is not
# running, either it hasnt yet been started, or its finished
if not self.started[self.index]: # Case not yet been started
mission.start()
self.started[self.index] = True
else: # Case mission finished
self.index += 1 # Inidicates moving on to the next mission
def is_finished(self):
"""
Returns true after every mission has been started and then finished
"""
if len(self.missions) == 0:
return True
return self.index >= len(self.missions)
def finish(self):
# If this mission has been killed stops any running missions
for mission in self.missions:
if mission.is_running:
mission.kill()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 7 16:21:19 2019
@author: D
"""
def main():
EmailBook=open('C:\\Users\\D\\Documents\\Python\\EmailBook.txt','rb')
TelBook=open('C:\\Users\\D\\Documents\\Python\\TelBook.txt','rb')
# EmailBook.readline()
# TelBook.readline()
linesEmailBook=EmailBook.readlines()
linesTelBook=TelBook.readlines()
# print(EmailBook)
# print(str(linesEmailBook[0],'utf-8'))
# print(TelBook)
# print(linesTelBook[0])
list1Name=[]
list1Email=[]
list2Name=[]
list2Tel=[]
line1=[]
for line in linesEmailBook:
elements=line.split()
list1Name.append(str(elements[0],'utf-8'))
list1Email.append(str(elements[1],'utf-8'))
print(list1Name)
print(list1Email)
for line in linesTelBook:
elements=line.split()
list2Name.append(str(elements[0],'utf-8'))
list2Tel.append(str(elements[1],'utf-8'))
print(list2Name)
print(list2Tel)
for i in range(len(list1Name)):
s=''
if list1Name[i] in list2Name:
j=list2Name.index(list1Name[i])
s='\t'.join([list1Name[i],list1Email[i],list2Tel[j]] )
s+='\n'
else:
s='\t'.join([list1Name[i],list1Email[i],str(' -----------')])
s+='\n'
line1.append(s)
print(line1)
for i in range(len(list2Name)):
s=''
if list2Name[i] not in list1Name:
s='\t'.join([list2Name[i],str(' -----------'),list2Tel[i]])
s+='\n'
line1.append(s)
print(line1)
file=open('C:\\Users\\D\\Documents\\Python\\allbook.txt','w')
file.writelines(line1)
file.close()
EmailBook.close()
TelBook.close()
print('join File finish')
main() |
# -*- coding: UTF-8 -*-
import time
import config_params
def query_set_from_table(typ):
""" typ value must 'tpl' or 'asrep' """
# for keys: need `rpt`, `secu`, `y`, `q`, `fp`, `stdtyp` and assure whether `ctyp` is equivalent on both sides.
# for query data: `active` is True
# to two records: must make keys and data of both sides is identical.
# `rpt` of `tpl` table record is 2.1.2 or 2.3.2, `rpt` of related `asrep` is 2.1.1 or 2.3.1
coll_ltm = getattr(config_params, 'coll_{0}_ltm'.format(typ), None)
coll_ytd = getattr(config_params, 'coll_{0}_ytd'.format(typ), None)
assert coll_ltm is not None and coll_ytd is not None, 'typ value is not excepted.'
typ_ltm_dict, typ_ytd_dict = {}, {}
rpt_values_list = ['2.1.2', '2.3.2'] if typ == 'tpl' else ['2.1.1', '2.3.1']
needs_fields = ['rpt', 'secu', 'y', 'fp', 'q', 'stdtyp', 'ctyp']
query_conditions = {'rpt': {'$in': rpt_values_list}, 'active': True}
query_fields = {k: 1 for k in needs_fields}
start_ltm = time.time()
for k_ltm, item_ltm in enumerate(coll_ltm.find(query_conditions, query_fields).sort([('_id', 1)])):
key_ltm = '#'.join([str(item_ltm[key]) for key in needs_fields])
typ_ltm_dict[key_ltm] = item_ltm
print '{0} count: {1}, need time: {2}s'.format(coll_ltm.full_name, k_ltm + 1, time.time() - start_ltm)
start_ytd = time.time()
for k_ytd, item_ytd in enumerate(coll_ytd.find(query_conditions, query_fields).sort([('_id', 1)])):
key_ytd = '#'.join([str(item_ytd[key]) for key in needs_fields])
typ_ytd_dict[key_ytd] = item_ytd
print '{0} count: {1}, need time: {2}s'.format(coll_ytd.full_name, k_ytd + 1, time.time() - start_ytd)
print 'typ_ltm_all count: {0}'.format(len(typ_ltm_dict))
print 'typ_ytd_all count: {0}\n'.format(len(typ_ytd_dict))
return typ_ltm_dict, typ_ytd_dict
def get_data_revenue_growth(typ='tpl'):
coll_ltm = getattr(config_params, 'coll_{0}_ltm'.format(typ), None)
assert coll_ltm is not None, "tpl don't need what logical get"
print coll_ltm.full_name
queryset_ltm = []
query_conditions = {"rpt": "2.1.2", "active": True}
query_fields = ['secu', 'y', 'ctyp', 'fp', 'rpt', 'q', '_id']
count = coll_ltm.find(query_conditions).count()
for k, ltm_dict in enumerate(coll_ltm.find(query_conditions).sort([('_id', 1)])):
items = ltm_dict.get('items', []) or []
cds = {item['cd'] for item in items}
if 'is_tpl_1'in cds and 'is_fa_1' not in cds:
queryset_ltm.append({ky: ltm_dict[ky] for ky in query_fields})
# print {ky: ltm_dict[ky] for ky in query_fields}
# break
print 'k:{0}, progress: [{1}%]'.format(k + 1, (k + 1) / float(count) * 100)
return queryset_ltm
if __name__ == '__main__':
# typ_ltm_all, typ_ytd_all = query_set_from_table('sarep')
import os
from eggs.utils.xlsx_writer import XlsxWriter
st = time.time()
set_ltm = get_data_revenue_growth()
print 'need time:', time.time() - st
print 'set ltm count:', len(set_ltm)
path = os.path.dirname(__file__) + '/log/'
headers = ['_id', 'secu', 'y', 'ctyp', 'fp', 'rpt', 'q']
open_book = XlsxWriter(path + 'rg.xlsx', headers=headers)
for dct in set_ltm:
open_book.write([dct[key] for key in headers])
open_book.close()
|
# Author : Md. Shahedul Islam Shahed
# Language : python 3.5
# Concise : Calculates aspect ratio and dimensions
import argparse
import math
def main():
parser = argparse.ArgumentParser(description="Get aspect dimensions (lenth and width).")
parser.add_argument('-d', '--diag-len', dest='diag_len', metavar='LEN', default=5, type=float, help='The length of display diagonal. Default is %(default)s units.')
parser.add_argument('-r', '--aspect-ratio', dest='asp_rat', metavar='N', nargs=2, type=float, default=(16, 9), help="The display aspect ratio in form of <r1 r2> meaning the ratio r1:r2. Default is 16:9. Example: -r 16 9 which means 16:9.")
args = parser.parse_args()
diag = args.diag_len
r1, r2 = args.asp_rat
if diag <= 0 or r1 <= 0 or r2 <= 0:
print("Invalid argument.")
parser.print_help()
exit()
b = diag * r2 / math.sqrt(r1*r1 + r2*r2)
a = r1 * b / r2
print("Diagonal length : %g unit" % (diag))
print("Aspect ratio : %g:%g\nthen" % (r1, r2))
print("Aspect dimensions : %g unit, %g unit" % (a, b))
if __name__ == '__main__':
main()
|
from mutagen import flac
import MySQLdb as mariadb
import os
import sys
def getArtistID(album_artist, conn):
album_artist = album_artist.replace("'", "''")
cursor = conn.cursor()
sql = "SELECT artistid FROM artist WHERE artistname='{}';".format(album_artist)
cursor.execute(sql)
row = cursor.fetchone()
if row is None:
return 0
else:
return row[0]
def getAlbumID(artistid, album, conn):
album = album.replace("'", "''")
cursor = conn.cursor()
sql = "SELECT albumid FROM album WHERE album='{}' AND artistid={};".format(album, artistid)
cursor.execute(sql)
row = cursor.fetchone()
return 0 if row is None else row[0]
def clearDB(conn):
pass
def insertTrack(albumid, conn):
if albumid is not None:
cursor = conn.cursor()
cursor.execute("UPDATE album SET MadeTheCut = 1 WHERE albumid = {}".format(albumid))
conn.commit()
else:
print("AlbumID is None")
conn = mariadb.connect(user='simon', passwd='phaedra74', db='catalogue', use_unicode=True, charset='utf8')
error_files = []
basedir = "/home/simon/Archive-Keep/"
for root, dirs, files in os.walk(basedir):
for f in files:
if f.endswith(".flac"):
print("Scanning : " + f)
file = os.path.join(root, f)
f = flac.FLAC(file)
t = f.tags
album_artist = t["ALBUMARTIST"][0]
album = t["Album"][0]
artistID = getArtistID(album_artist, conn)
albumID = getAlbumID(artistID, album, conn)
insertTrack(albumID, conn)
|
from .models import Place
from .serializers import PlacePutSerializer
from rest_framework.viewsets import ModelViewSet
from django_filters import rest_framework as filters
from django.db.models import Count, F
# from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.generics import (
ListCreateAPIView,
UpdateAPIView,
DestroyAPIView
)
# 사용자 입력 Place Model ViewSet
class PlaceCreateAPIView(ListCreateAPIView):
queryset = Place.objects.all()
serializer_class = PlacePutSerializer
http_method_names = ['post']
class PlaceUpdateAPIView(UpdateAPIView):
queryset = Place.objects.all()
serializer_class = PlacePutSerializer
lookup_field = 'place_id'
class PlaceDeleteAPIView(DestroyAPIView):
queryset = Place.objects.all()
serializer_class = PlacePutSerializer
lookup_field = 'place_id'
class PlaceTitleFilter(filters.FilterSet):
class Meta:
model = Place
fields = {
'title': ['icontains']
}
class PlaceViewSet(ModelViewSet):
queryset = Place.objects.all()
serializer_class = PlacePutSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ('place_id', 'title')
filterset_class = PlaceTitleFilter
http_method_names = ['get', 'post']
def get_queryset(self):
q = self.request.GET.get('q')
if q == 'review_count':
return Place.objects.all().order_by('-counts')
if q == "like_count":
return Place.objects.annotate(like_count=Count('user_likes')).order_by('-like_count')
|
"""
Builds FHIR Organization resources (https://www.hl7.org/fhir/organization.html)
from rows of tabular sequencing center data.
"""
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_model_fhir.ingest_plugin.shared import join
class SequencingCenter:
class_name = "sequencing_center"
resource_type = "Organization"
target_id_concept = None
@staticmethod
def build_key(record):
assert None is not record[CONCEPT.SEQUENCING.CENTER.TARGET_SERVICE_ID]
return join(record[CONCEPT.SEQUENCING.CENTER.TARGET_SERVICE_ID])
@staticmethod
def build_entity(record, key, get_target_id_from_record):
study_id = record[CONCEPT.STUDY.ID]
sequencing_center_target_service_id = record.get(
CONCEPT.SEQUENCING.CENTER.TARGET_SERVICE_ID
)
sequencing_center_name = record.get(CONCEPT.SEQUENCING.CENTER.NAME)
entity = {
"resourceType": SequencingCenter.resource_type,
"id": get_target_id_from_record(SequencingCenter, record),
"meta": {
"profile": [
"http://hl7.org/fhir/StructureDefinition/Organization"
]
},
"identifier": [
{
"system": "https://kf-api-dataservice.kidsfirstdrc.org/sequencing-centers",
"value": sequencing_center_target_service_id,
},
{
"system": "urn:kids-first:unique-string",
"value": join(SequencingCenter.resource_type, study_id, key),
},
],
}
if sequencing_center_name:
entity["identifier"].append(
{
"system": "https://kf-api-dataservice.kidsfirstdrc.org/sequencing-centers?name=",
"value": sequencing_center_name,
}
)
entity["name"] = sequencing_center_name
return entity
|
# coding:utf-8
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.jinja_env.variable_start_string = '%%'
app.jinja_env.variable_end_string = '%%'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:ditto9689a@localhost:3306/newslistdb'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=True
db = SQLAlchemy(app)
from app import models,views |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minDepth(self, root: TreeNode) -> int:
if not root:
return 0
depth_l = []
depth_r = []
def returnDepth(node, depth, lor):
if not node.left and not node.right:
if lor == 'l':
depth_l.append(depth+1)
elif lor == 'r':
depth_r.append(depth+1)
else:
depth += 1
if node.left:
returnDepth(node.left, depth, lor)
if node.right:
returnDepth(node.right, depth, lor)
if root.left:
returnDepth(root.left, 1, 'l')
if root.right:
returnDepth(root.right, 1, 'r')
if not root.left and not root.right:
return 1
return min(depth_l + depth_r) |
import ast
import re
import random
from models.transition_matrix import TransitionMatrix
class TextGenerator:
def __init__(self, file_path, song_length=50):
lyrics_file = open(file_path, "r")
lyrics_dict = ast.literal_eval(lyrics_file.read())
lyrics_file.close()
all_lyrics = ""
for key in lyrics_dict.keys():
song_lyrics = lyrics_dict[key].split(" ")
for lyric in song_lyrics:
try:
if lyric[0].isupper():
all_lyrics += "."
except IndexError:
continue
all_lyrics += lyric + " "
# all_lyrics = self.tag_words(all_lyrics)
self.transition_matrix = self.create_transition_matrix(all_lyrics)
self.song_length = song_length
# def tag_words(self, se):
# for sentence in lyrics.split("."):
# text = nltk.word_tokenized(sentence)
# return lyrics
def preprocess_sentence(self, sentence):
sentence = sentence.lower()
sentence = re.sub(r"[^\w\d.!?\s]+", '', sentence)
sentence = re.sub('([.,!?])', r' \1 ', sentence)
sentence = re.sub('\s{2,}', ' ', sentence)
return sentence
def create_transition_matrix(self, lyrics):
matrix = TransitionMatrix()
for text in lyrics.split("."):
doc = self.preprocess_sentence(text)
doc = doc.split()
length = len(doc)
for i in range(2, length):
matrix.add_triple(doc[i - 2], doc[i - 1], doc[i])
return matrix
def generate_text(self):
matrix = self.transition_matrix.get_matrix()
rand_seed = random.choice(list(matrix.keys())).split(",")
word1 = rand_seed[0]
word2 = rand_seed[1]
story = word1 + " " + word2
for i in range(self.song_length):
new_word = self.transition_matrix.next_word(word1, word2)
if new_word is None:
rand_key = random.choice(list(matrix.keys())).split(",")
word1 = rand_key[0]
word2 = rand_key[1]
temp = self.transition_matrix.next_word(word1, word2)
story = story + " " + temp
word1 = word2
word2 = temp
else:
story = story + " " + new_word
word1 = word2
word2 = new_word
return story
temp = TextGenerator("../data/Kanye_West_lyrics.txt")
print(temp.generate_text())
|
"""isort:skip_file"""
get_ipython().magic('config InlineBackend.figure_format = "retina"')
import os
import logging
import warnings
import matplotlib.pyplot as plt
# Remove when Theano is updated
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
# Remove when arviz is updated
warnings.filterwarnings("ignore", category=UserWarning)
logger = logging.getLogger("theano.gof.compilelock")
logger.setLevel(logging.ERROR)
logger = logging.getLogger("exoplanet")
logger.setLevel(logging.DEBUG)
plt.style.use("default")
plt.rcParams["savefig.dpi"] = 100
plt.rcParams["figure.dpi"] = 100
plt.rcParams["font.size"] = 16
plt.rcParams["font.family"] = "sans-serif"
plt.rcParams["font.sans-serif"] = ["Liberation Sans"]
plt.rcParams["font.cursive"] = ["Liberation Sans"]
plt.rcParams["mathtext.fontset"] = "custom"
|
#Matriz de correlación
import librosa
import librosa.display
import matplotlib.pyplot as plt
import sys
audioname = ("example2.wav")
y, sr = librosa.load(audioname)
mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=12)
# Find nearest neighbors in MFCC space
R1 = librosa.segment.recurrence_matrix(mfcc)
# Or fix the number of nearest neighbors to 5
R2 = librosa.segment.recurrence_matrix(mfcc, k=5)
# Suppress neighbors within +- 7 samples
R3 = librosa.segment.recurrence_matrix(mfcc, width=7)
# Use cosine similarity instead of Euclidean distance
R4 = librosa.segment.recurrence_matrix(mfcc, metric='cosine')
# Require mutual nearest neighbors
R5 = librosa.segment.recurrence_matrix(mfcc, sym=True)
# Use an affinity matrix instead of binary connectivity
R6 = librosa.segment.recurrence_matrix(mfcc, mode='affinity')
# Plot the feature and recurrence matrices
plt.figure(figsize=(8, 4))
plt.subplot(2, 3, 1)
librosa.display.specshow(R1, x_axis='time', y_axis='time')
plt.title('Binary recurrence (symmetric)')
plt.subplot(2, 3, 2)
librosa.display.specshow(R2, x_axis='time', y_axis='time')
plt.title('Binary recurrence (symmetric)')
plt.subplot(2, 3, 3)
librosa.display.specshow(R3, x_axis='time', y_axis='time')
plt.title('Binary recurrence (symmetric) DIF')
plt.subplot(2, 3, 4)
librosa.display.specshow(R4, x_axis='time', y_axis='time')
plt.title('Binary recurrence (symmetric)')
plt.subplot(2, 3, 5)
librosa.display.specshow(R5, x_axis='time', y_axis='time')
plt.title('Binary recurrence (symmetric)')
plt.subplot(2, 3, 6)
librosa.display.specshow(R6, x_axis='time', y_axis='time', cmap='magma_r')
plt.title('Affinity recurrence')
plt.tight_layout()
plt.show() |
'''
Query Kowalski searching for counterparts to FRBs
Author: Igor Andreoni
'''
import numpy as np
import json
from collections import OrderedDict
import pdb
from astropy.time import Time
from astropy.table import Table, unique
from astropy.io import ascii
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
from ztfquery import query
from penquins import Kowalski
def query_metadata(ra, dec, username, password,
start_jd=None, end_jd=None,
out_csv=None):
"""Use ZTFquery to get more reliable upper limits"""
zquery = query.ZTFQuery()
if start_jd == None and end_jd==None:
zquery.load_metadata(kind='sci',
radec=[str(ra.deg), str(dec.deg)],
size=0.003,
auth=[username, password])
else:
if start_jd!=None and end_jd==None:
sql_query='obsjd>'+repr(start_jd)
elif start_jd==None and end_jd!=None:
sql_query='obsjd<'+repr(end_jd)
elif start_jd!=None and end_jd!=None:
sql_query='obsjd<'+repr(end_jd)+'+AND+'+'obsjd>'+repr(start_jd)
zquery.load_metadata(kind='sci',
radec=[str(ra.deg), str(dec.deg)],
size=0.003,
sql_query=sql_query,
auth=[username, password])
out = zquery.metatable
final_out = out.sort_values(by=['obsjd'])
if out_csv is not None:
final_out.to_csv(out_csv)
return final_out
def create_tbl_lc(light_curves, outfile):
"""Create a table with the light curves
and write a CSV output file"""
# fid -> filter
filters = {'1': 'g', '2': 'r', '3': 'i'}
tbl = Table([[], [], [], [], [], [], [], [], [], [], [], [], [], [], [],
[], [], [], []],
names=('name', 'ra', 'dec', 'jd', 'magpsf', 'sigmapsf',
'filter', 'magzpsci', 'magzpsciunc',
'programid', 'field', 'rcid', 'pid',
'sgscore1', 'sgscore2', 'sgscore3',
'distpsnr1', 'distpsnr2', 'distpsnr3'),
dtype=('S12', 'double', 'double', 'double',
'f', 'f', 'S', 'f', 'f', 'i', 'i', 'i', 'int_',
'f', 'f', 'f', 'f', 'f', 'f'))
for l in light_curves:
magzpsci = l["candidate"].get("magzpsci")
magzpsciunc = l["candidate"].get("magzpsciunc")
try:
row = [l["objectId"], l["candidate"]["ra"], l["candidate"]["dec"],
l["candidate"]["jd"], l["candidate"]["magpsf"],
l["candidate"]["sigmapsf"], filters[str(l["candidate"]["fid"])],
magzpsci, magzpsciunc,
l["candidate"]["programid"], l["candidate"]["field"],
l["candidate"]["rcid"], l["candidate"]["pid"],
l["candidate"]["sgscore1"], l["candidate"]["sgscore2"],
l["candidate"]["sgscore3"], l["candidate"]["distpsnr1"],
l["candidate"]["distpsnr2"], l["candidate"]["distpsnr3"]]
except KeyError:
row = [l["objectId"], l["candidate"]["ra"], l["candidate"]["dec"],
l["candidate"]["jd"], l["candidate"]["magpsf"],
l["candidate"]["sigmapsf"], filters[str(l["candidate"]["fid"])],
magzpsci, magzpsciunc,
l["candidate"]["programid"], l["candidate"]["field"],
l["candidate"]["rcid"], l["candidate"]["pid"], np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]
tbl.add_row(row)
# Remove exact duplicates
tbl = unique(tbl)
tbl.sort("jd")
if args.out_lc is not None:
tbl.write(args.out_lc, format='csv', overwrite=True)
return tbl
def get_lightcurve_alerts_aux(username, password, list_names):
"""Query the light curve for a list of candidates"""
k = Kowalski(username=username, password=password, verbose=False)
q = {"query_type": "find",
"query": {
"catalog": "ZTF_alerts_aux",
"filter": {
'_id': {'$in': list(list_names)}
},
"projection": {}
},
"kwargs": {"hint": "_id_"}
}
r = k.query(query=q)
if r['result_data']['query_result'] == []:
print("No candidates to be checked?")
return None
out = []
for l in r['result_data']['query_result']:
with_det = list({'objectId': l['_id'], 'candidate': s}
for s in l['prv_candidates']
if 'magpsf' in s.keys())
out = out + with_det
return out
def get_lightcurve_alerts(username, password, list_names):
"""Query the light curve for a list of candidates"""
k = Kowalski(username=username, password=password, verbose=False)
q = {"query_type": "find",
"query": {
"catalog": "ZTF_alerts",
"filter": {
'objectId': {'$in': list(list_names)}
},
"projection": {
"objectId": 1,
"candidate.jd": 1,
"candidate.ra": 1,
"candidate.dec": 1,
"candidate.magpsf": 1,
"candidate.fid": 1,
"candidate.sigmapsf": 1,
"candidate.programid": 1,
"candidate.magzpsci": 1,
"candidate.magzpsciunc": 1,
"candidate.sgscore1": 1,
"candidate.sgscore2": 1,
"candidate.sgscore3": 1,
"candidate.distpsnr1": 1,
"candidate.distpsnr2": 1,
"candidate.distpsnr3": 1,
"candidate.field": 1,
"candidate.rcid": 1,
"candidate.pid": 1
}
},
"kwargs": {"hint": "objectId_1"}
}
r = k.query(query=q)
if r['result_data']['query_result'] == []:
print("No candidates to be checked?")
return None
return r['result_data']['query_result']
def query_kowalski_frb(args, t):
"""Query kowalski with cone searches centered at given locations"""
# Prepare a dictionary for each source
dict_sources = {}
for s in t:
if args.frb_names is not None and not(s['frb_name'] in args.frb_names):
continue
try:
coords=SkyCoord(ra=s["rop_raj"], dec=s["rop_decj"],
unit=(u.hourangle, u.deg), frame='icrs')
except ValueError:
pdb.set_trace()
id_ra = f"{str(coords.ra.deg).replace('.','_')}"
id_dec = f"{str(coords.dec.deg).replace('.','_')}"
id_coords = f"({id_ra}, {id_dec})"
date = Time(s['utc'].replace('/','-'), format='iso')
dict_sources[s['frb_name']] = {
'ra': coords.ra.deg,
'dec': coords.dec.deg,
'id_coords': id_coords,
'jd': date.jd,
'candidates': []
}
# Check that there is at least one source
if len(dict_sources.keys()) == 0:
print("No FRBs correspond to the given input.")
if args.frb_names is not None:
print(f"No FRB among {args.frb_names} are present in {args.cat_name}")
return None
# coords_arr.append((coords.ra.deg,coords.dec.deg))
coords_arr = list((dict_sources[k]['ra'],
dict_sources[k]['dec'])
for k in dict_sources.keys())
k = Kowalski(username=username, password=password, verbose=False)
q = {"query_type": "cone_search",
"object_coordinates": {
"radec": f"{coords_arr}",
"cone_search_radius": args.search_radius,
"cone_search_unit": "arcmin"
},
"catalogs": {
"ZTF_alerts": {
"filter": {
"candidate.drb": {'$gt': 0.5},
"candidate.ndethist": {'$gte': args.ndethist},
"classifications.braai": {'$gt': 0.5},
"candidate.ssdistnr": {'$gt': 10},
"candidate.magpsf": {'$gt': 10}
},
"projection": {
"objectId": 1,
"candidate.rcid": 1,
"candidate.drb": 1,
"candidate.ra": 1,
"candidate.dec": 1,
"candidate.jd": 1,
"candidate.magpsf": 1,
"candidate.sigmapsf": 1,
"candidate.fid": 1,
"candidate.sgscore1": 1,
"candidate.distpsnr1": 1,
"candidate.sgscore2": 1,
"candidate.distpsnr2": 1,
"candidate.sgscore3": 1,
"candidate.distpsnr3": 1,
"candidate.ssdistnr": 1,
"candidate.isdiffpos": 1
}
}
},
"kwargs": {"hint": "gw01"}
}
r = k.query(query=q)
for idcoords in r['result_data']['ZTF_alerts'].keys():
#Identify 'candid' for all relevant candidates
objectId_list = []
with_neg_sub = []
stellar_list = []
# No sources
if len(r['result_data']['ZTF_alerts'][idcoords]) == 0:
key = list(k for k in dict_sources.keys()
if dict_sources[k]['id_coords']==idcoords)[0]
dict_sources[key]['candidates'] = []
print(f"No candidates for {key}")
continue
for i in np.arange(len(r['result_data']['ZTF_alerts'][idcoords])):
info = r['result_data']['ZTF_alerts'][idcoords][i]
if info['objectId'] in stellar_list or (info['objectId'] in with_neg_sub):
continue
if info['candidate']['isdiffpos'] in ['f',0]:
with_neg_sub.append(info['objectId'])
try:
if (np.abs(info['candidate']['distpsnr1']) < 2.
and info['candidate']['sgscore1'] >= 0.5):
stellar_list.append(info['objectId'])
except:
pass
try:
if (np.abs(info['candidate']['distpsnr1']) < 15. and
info['candidate']['srmag1'] < 15. and
info['candidate']['srmag1'] > 0. and
info['candidate']['sgscore1'] >= 0.5):
continue
except:
pass
try:
if (np.abs(info['candidate']['distpsnr2']) < 15. and
info['candidate']['srmag2'] < 15. and
info['candidate']['srmag2'] > 0. and
info['candidate']['sgscore2'] >= 0.5):
continue
except:
pass
try:
if (np.abs(info['candidate']['distpsnr3']) < 15. and
info['candidate']['srmag3'] < 15. and
info['candidate']['srmag3'] > 0. and
info['candidate']['sgscore3'] >= 0.5):
continue
except:
pass
objectId_list.append(info['objectId'])
set_objectId = set(objectId_list)
# Remove objects with negative subtraction
if args.reject_neg:
for n in set(with_neg_sub):
try:
set_objectId.remove(n)
except:
pass
# Remove stellar objects
for n in set(stellar_list):
try:
set_objectId.remove(n)
except:
pass
# Add the list of ZTF candidates to the FRB list
key = list(k for k in dict_sources.keys()
if dict_sources[k]['id_coords']==idcoords)[0]
dict_sources[key]['candidates'] = list(set(set_objectId))
tot_sources = len(r['result_data']['ZTF_alerts'][idcoords])
print(f"{len(set_objectId)}/{tot_sources} candidates selected for {key}")
return dict_sources
def plot_results_frb(sources, t, t_lc, args):
"""Fetch the light curves of the candidates and
plot the results"""
filters = ['g', 'r', 'i']
filters_id = {'1': 'g', '2': 'r', '3': 'i'}
colors = {'g': 'g', 'r': 'r', 'i': 'y'}
for frb in sources.keys():
if len(sources[frb]['candidates']) == 0:
continue
# A different plot for each candidate
jd0 = sources[frb]['jd']
for cand in set(sources[frb]['candidates']):
plt.clf()
plt.subplot(1, 1, 1)
t_cand = t_lc[t_lc['name']==cand]
for f in filters:
tf = t_cand[t_cand['filter'] == f]
tf["jd"] = tf["jd"] - jd0
mag = np.array(tf["magpsf"])
magerr = np.array(tf["sigmapsf"])
plt.errorbar(np.array(tf["jd"]),
np.array(tf["magpsf"]),
fmt=colors[f]+'o',
yerr=np.array(tf["sigmapsf"]),
markeredgecolor='k', markersize=8,
label=f)
# Upper limits
if args.use_metadata is True:
username_ztfquery = secrets['ztfquery_user'][0]
password_ztfquery = secrets['ztfquery_pwd'][0]
coords = SkyCoord(ra=np.mean(t_cand[t_cand['name']==cand]['ra']*u.deg),
dec=np.mean(t_cand[t_cand['name']==cand]['dec']*u.deg))
metadata = query_metadata(coords.ra, coords.dec,
username_ztfquery,
password_ztfquery,
start_jd=None,
end_jd=None,
out_csv=None)
t_ul = Table([[],[],[],[],[],[],[],[]],
names=('jd', 'magpsf', 'sigmapsf', 'filter',
'snr', 'ul', 'seeing', 'programid'),
dtype=('double','f','f','S','f','f','f','int')
)
for j, ml, fid, s, pid in zip(metadata['obsjd'],
metadata['maglimit'],
metadata['fid'],
metadata['seeing'],
metadata['pid']):
#if not (pid in t['pid']):
if not (j in t_cand['jd']):
new_row = [j, 99.9, 99.9, filters_id[str(fid)],
np.nan, ml, s, 0]
t_ul.add_row(new_row)
for f in filters:
tf_ul = t_ul[t_ul['filter'] == f]
if len(tf_ul) > 0:
tf_ul["jd"] = tf_ul["jd"] - jd0
plt.plot(np.array(tf_ul["jd"]),
np.array(tf_ul["ul"]),
colors[f]+'v',
markeredgecolor=colors[f],
markerfacecolor='w')
plt.plot([],[], 'kv', label='UL')
# Plot the FRB detection time
plt.plot([0, 0], [22, 15], 'b--', label=frb)
# Legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
# Labels
plt.ylabel("Apparent magnitude [AB]")
plt.xlabel(f"Days since {Time(jd0, format='jd').iso}")
plt.title(f"{frb}; {cand}")
# Invert the y axis
plt.gca().invert_yaxis()
# Save the plot automatically?
if args.saveplot:
plt.savefig(f"lc_{frb}_{cand}.png")
plt.show()
def get_index_info(catalog):
"""List which indexes are available on Kowalski to query a catalog
more quickly"""
q = {"query_type": "info",
"query": {
"command": "index_info",
"catalog": catalog
}
}
k = Kowalski(username=username, password=password, verbose=False)
r = k.query(query=q)
indexes = r['result_data']['query_result']
for ii, (kk, vv) in enumerate(indexes.items()):
print(f'index #{ii+1}: "{kk}"\n{vv["key"]}\n')
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1', 'Yes', 'True'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0', 'No', 'False'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Query kowalski.')
parser.add_argument('--cat', dest='cat_name', type=str, required=True,
help='CSV file downloaded from the FRBcat page',
default=None)
parser.add_argument('--frb', dest='frb_names', nargs='+', required=False,
help='Names of specific FRBs to check', default=None)
parser.add_argument('--out', dest='out', type=str,
required=False,
help='output JSON file name; example: results.json',
default=None)
parser.add_argument('--out-lc', dest='out_lc', type=str,
required=False,
help='output CSV file name; example: lc.csv',
default=None)
parser.add_argument('--r', dest='search_radius', type=float,
required=False,
help='Cone search radius in arcmin (default=15)',
default=15)
parser.add_argument('--ndethist', dest='ndethist', type=int,
required=False,
help='Minimum number of detections (default=2)',
default=2)
parser.add_argument('--p', dest='plot', type=str2bool,
required=False,
help='Plot the results? (boolean)',
default=True)
parser.add_argument('--um', dest='use_metadata', type=str2bool,
required=False,
help='Plot upper limits using ztfquery metadata (boolean)',
default=True)
parser.add_argument('--sp', dest='saveplot', type=str2bool,
required=False,
help='Save the plot of the results? (boolean)',
default=False)
parser.add_argument('--reject-neg', dest='reject_neg', type=str2bool,
required=False,
help='Reject candidates with negative detections? (boolean)',
default=True)
args = parser.parse_args()
# Read the table of FRBs
t=ascii.read(args.cat_name, format='csv')
# Correct the first column name if necessary
if '\ufeff"frb_name"' in t.colnames:
t.rename_column('\ufeff"frb_name"', "frb_name")
# Read the secrets
secrets = ascii.read('secrets.csv', format = 'csv')
username = secrets['kowalski_user'][0]
password = secrets['kowalski_pwd'][0]
if args.use_metadata is True:
username_ztfquery = secrets['ztfquery_user'][0]
password_ztfquery = secrets['ztfquery_pwd'][0]
sources = query_kowalski_frb(args, t)
# Write the results into an output JSON file
if args.out is not None:
with open(args.out, 'w') as j:
json.dump(sources, j)
# Get the light curves
list_names = []
for k in sources.keys():
list_names += sources[k]['candidates']
if len(list_names) == 0:
print("No ZTF sources selected. Exiting..")
exit()
light_curves_alerts = get_lightcurve_alerts(username, password,
list_names)
# Add prv_candidates photometry to the light curve
light_curves_aux = get_lightcurve_alerts_aux(username, password,
list_names)
light_curves = light_curves_alerts + light_curves_aux
# Create a table and output CSV file
t_lc = create_tbl_lc(light_curves, args)
# Plot the results
if args.plot is True:
plot_results_frb(sources, t, t_lc, args)
|
import os
import shutil
import time
import ujson
from HTMLParser import HTMLParser
from base64 import b64decode
import bencode
from django.conf import settings
from django.core.management.base import BaseCommand
from html2bbcode.parser import HTML2BBCode
from WhatManager2.manage_torrent import add_torrent
from WhatManager2.utils import wm_str
from books.utils import call_mktorrent
from home.models import ReplicaSet, get_what_client, DownloadLocation, WhatTorrent, \
RequestException, \
BadIdException
from wcd_pth_migration import torrentcheck
from wcd_pth_migration.logfile import LogFile, UnrecognizedRippingLogException, \
InvalidRippingLogException
from wcd_pth_migration.models import DownloadLocationEquivalent, WhatTorrentMigrationStatus, \
TorrentGroupMapping
from wcd_pth_migration.utils import generate_spectrals_for_dir, normalize_for_matching
from what_transcode.utils import extract_upload_errors, safe_retrieve_new_torrent, \
get_info_hash_from_data, recursive_chmod, pthify_torrent
html_to_bbcode = HTML2BBCode()
html_parser = HTMLParser()
dummy_request = lambda: None # To hold the what object
def extract_new_artists_importance(group_info):
artists = []
importance = []
for importance_key, artist_list in group_info['musicInfo'].items():
importance_value = {
'artists': 1, # Main
'with': 2, # Guest
'composers': 4, # Composer
'conductor': 5, # Conductor
'dj': 6, # DJ / Compiler
'remixedBy': 3, # Remixer
'producer': 7, # Producer
}[importance_key]
for artist_item in artist_list:
artists.append(artist_item['name'])
importance.append(str(importance_value))
return artists, importance
def format_bytes_pth(length):
mb = length / 1024.0 / 1024.0
suffix = 'MB'
if mb >= 1024:
mb /= 1024.0
suffix = 'GB'
return '{:.2f} {}'.format(mb, suffix)
def fix_duplicate_newlines(s):
return s.replace('\r', '').replace('\n\n', '\n')
class TorrentMigrationJob(object):
REAL_RUN = True
def __init__(self, what, location_mapping, data, flac_only):
self.flac_only = flac_only
self.what = what
self.location_mapping = location_mapping
self.data = data
self.what_torrent = self.data['what_torrent']
self.what_torrent_info = ujson.loads(self.what_torrent['info'])
self.full_location = os.path.join(
wm_str(self.data['location']['path']),
str(self.what_torrent['id']),
)
self.torrent_dict = bencode.bdecode(b64decode(self.what_torrent['torrent_file']))
self.torrent_name = self.torrent_dict['info']['name']
self.torrent_new_name = self.torrent_name
self.torrent_dir_path = os.path.join(self.full_location.encode('utf-8'), self.torrent_name)
self.new_torrent = None
self.log_files = set()
self.log_files_full_paths = []
self.torrent_file_new_data = None
self.torrent_new_infohash = None
self.payload = None
self.payload_files = None
self.existing_new_group = None
self.full_new_location = None
def check_valid(self):
print 'Verifying torrent data...'
try:
if not torrentcheck.verify(self.torrent_dict['info'], self.full_location):
raise Exception('Torrent does not verify')
except Exception as ex:
WhatTorrentMigrationStatus.objects.create(
what_torrent_id=self.what_torrent['id'],
status=WhatTorrentMigrationStatus.STATUS_FAILED_VALIDATION
)
raw_input('Verification threw {}. Press enter to continue.'.format(ex))
return False
print('Hash matching')
torrent_file_set = {'/'.join(f['path']) for f in self.torrent_dict['info']['files']}
for dirpath, dirnames, filenames in os.walk(self.torrent_dir_path):
for filename in filenames:
abs_path = os.path.join(dirpath, filename)
file_path = os.path.relpath(abs_path, self.torrent_dir_path)
if file_path not in torrent_file_set:
raise Exception(
'Extraneous file: {}/{}'.format(self.torrent_dir_path, file_path))
if filename.lower().endswith('.log'):
print 'Candidate log file', abs_path
with open(abs_path, 'r') as log_f:
try:
self.log_files.add(LogFile(log_f.read()))
except UnrecognizedRippingLogException:
print 'Skipping: unrecognized'
pass
except InvalidRippingLogException:
raw_input('Log file unrecognized!')
self.log_files_full_paths.append(abs_path)
print('No extraneous files')
print 'Torrent verification complete'
return True
def mktorrent(self):
print 'Creating torrent file...'
torrent_temp_filename = 'temp.torrent'
try:
os.remove(torrent_temp_filename)
except OSError:
pass
call_mktorrent(self.torrent_dir_path,
torrent_temp_filename,
settings.WHAT_ANNOUNCE,
self.torrent_new_name)
with open(torrent_temp_filename, 'rb') as torrent_file:
self.torrent_file_new_data = pthify_torrent(torrent_file.read())
self.torrent_new_infohash = get_info_hash_from_data(self.torrent_file_new_data)
print 'New info hash is: ', self.torrent_new_infohash
print 'Torrent file created'
def retrieve_new_torrent(self, info_hash):
if self.new_torrent is None:
self.new_torrent = safe_retrieve_new_torrent(self.what, info_hash)
self.migration_status.pth_torrent_id = self.new_torrent['torrent']['id']
self.migration_status.save()
def set_new_location(self):
mapped_location = self.location_mapping[self.data['location']['path']]
self.new_location_obj = DownloadLocation.objects.get(path=mapped_location)
self.full_new_location = os.path.join(
mapped_location,
str(self.new_torrent['torrent']['id'])
)
def prepare_payload(self):
t_info = self.what_torrent_info['torrent']
g_info = self.what_torrent_info['group']
if g_info['categoryName'] != 'Music':
raise Exception('Can only upload Music torrents for now')
if t_info['format'] == 'MP3' and t_info['encoding'] not in ['V0 (VBR)', '320']:
raise Exception('Please let\'s not upload this bitrate MP3')
payload = dict()
payload['submit'] = 'true'
payload['auth'] = self.what.authkey
payload['type'] = '0' # Music
if self.existing_new_group:
payload['groupid'] = self.existing_new_group['group']['id']
else:
payload['artists[]'], payload['importance[]'] = extract_new_artists_importance(g_info)
payload['title'] = html_parser.unescape(g_info['name'])
payload['year'] = str(g_info['year'])
payload['record_label'] = g_info['recordLabel'] or ''
payload['catalogue_number'] = g_info['catalogueNumber'] or ''
payload['releasetype'] = str(g_info['releaseType'])
payload['tags'] = ','.join(g_info['tags'])
payload['image'] = g_info['wikiImage'] or ''
payload['album_desc'] = fix_duplicate_newlines(html_to_bbcode.feed(g_info['wikiBody']))
if t_info['scene']:
payload['scene'] = 'on'
payload['format'] = t_info['format']
payload['bitrate'] = t_info['encoding']
payload['media'] = t_info['media']
payload['release_desc'] = fix_duplicate_newlines(html_to_bbcode.feed(
t_info['description']).replace('karamanolevs', 'karamanolev\'s'))
if t_info['remastered']:
payload['remaster'] = 'on'
payload['remaster_year'] = t_info['remasterYear']
payload['remaster_title'] = t_info['remasterTitle']
payload['remaster_record_label'] = t_info['remasterRecordLabel']
payload['remaster_catalogue_number'] = t_info['remasterCatalogueNumber']
self.payload = payload
def prepare_payload_files(self):
payload_files = []
payload_files.append(('file_input', ('torrent.torrent', self.torrent_file_new_data)))
if self.what_torrent_info['torrent']['format'] == 'FLAC':
for log_file_path in self.log_files_full_paths:
print 'Log file {}'.format(log_file_path)
print ''.join(list(open(log_file_path))[:4])
print
response = raw_input('Add to upload [y/n]: ')
if response == 'y':
payload_files.append(('logfiles[]', ('logfile.log', open(log_file_path, 'rb'))))
elif response == 'n':
pass
else:
raise Exception('Bad response')
self.payload_files = payload_files
def perform_upload(self):
if self.REAL_RUN:
old_content_type = self.what.session.headers['Content-type']
try:
del self.what.session.headers['Content-type']
response = self.what.session.post(
settings.WHAT_UPLOAD_URL, data=self.payload, files=self.payload_files)
if response.url == settings.WHAT_UPLOAD_URL:
try:
errors = extract_upload_errors(response.text)
except Exception:
errors = ''
exception = Exception(
'Error uploading data to what.cd. Errors: {0}'.format('; '.join(errors)))
exception.response_text = response.text
with open('uploaded_error.html', 'w') as error_file:
error_file.write(response.text.encode('utf-8'))
raise exception
except Exception as ex:
time.sleep(2)
try:
self.retrieve_new_torrent(self.torrent_new_infohash)
except:
raise ex
finally:
self.retrieve_new_torrent(self.torrent_new_infohash)
self.what.session.headers['Content-type'] = old_content_type
self.migration_status.status = WhatTorrentMigrationStatus.STATUS_UPLOADED
self.migration_status.save()
else:
print 'Ready with payload'
print ujson.dumps(self.payload, indent=4)
def get_total_size(self):
return sum(f['length'] for f in self.torrent_dict['info']['files'])
def print_info(self):
if 'groupid' in self.payload:
print 'Part of existing torrent group'
print 'Artists: ', ','.join(
artist['name'] for artist in
self.existing_new_group['group']['musicInfo']['artists']
)
print 'Album title: ', self.existing_new_group['group']['name']
print 'Year: ', self.existing_new_group['group']['year']
print 'Record label: ', self.existing_new_group['group']['recordLabel']
print 'Catalog number:', self.existing_new_group['group']['catalogueNumber']
print 'Release type: ', self.existing_new_group['group']['releaseType']
print
else:
print 'Artists: ', ','.join(
artist['name'] for artist, importance in zip(
self.payload['artists[]'], self.payload['importance[]'])
if importance == 1 # Main
)
print 'Album title: ', self.payload['title']
print 'Year: ', self.payload['year']
print 'Record label: ', self.payload['record_label']
print 'Catalog number:', self.payload['catalogue_number']
print 'Release type: ', self.payload['releasetype']
print
if 'remaster' in self.payload:
print ' Edition information'
print ' Year: ', self.payload['remaster_year']
print ' Record label: ', self.payload['remaster_record_label']
print ' Catalog number:', self.payload['remaster_catalogue_number']
print
print 'Scene: ', 'yes' if 'scene' in self.payload else 'no'
print 'Format: ', self.payload['format']
print 'Bitrate: ', self.payload['bitrate']
print 'Media: ', self.payload['media']
if 'groupid' not in self.payload:
print 'Tags: ', self.payload['tags']
print 'Image: ', self.payload['image']
print 'Album desc: ', self.payload['album_desc']
print 'Release desc: ', self.payload['release_desc']
def find_existing_torrent_by_hash(self):
try:
existing_by_hash = self.what.request('torrent', hash=self.torrent_new_infohash)
if existing_by_hash['status'] == 'success':
self.new_torrent = existing_by_hash['response']
except RequestException:
pass
return None
def find_existing_torrent_group(self):
if self.existing_new_group is not None:
return
existing_group_id = None
group_id = self.what_torrent_info['group']['id']
try:
mapping = TorrentGroupMapping.objects.get(what_group_id=group_id)
mapping_group = self.what.request('torrentgroup', id=mapping.pth_group_id)['response']
if mapping_group['group']['id'] != mapping.pth_group_id:
raise Exception('NOOOOO THIS CANNOT HAPPEN {} {}!'.format(
mapping_group['group']['id'],
mapping.pth_group_id,
))
existing_group_id = mapping.pth_group_id
print 'Found torrent group mapping with {}'.format(existing_group_id)
except TorrentGroupMapping.DoesNotExist:
pass
except BadIdException:
print 'Mapping has gone bad, deleting...'
mapping.delete()
if existing_group_id is None:
group_year = self.what_torrent_info['group']['year']
group_name = html_parser.unescape(self.what_torrent_info['group']['name']).lower()
search_str = '{} {}'.format(wm_str(group_name), wm_str(str(group_year)))
results = self.what.request('browse', searchstr=search_str)['response']['results']
for result in results:
if html_parser.unescape(result['groupName']).lower() == group_name and \
result['groupYear'] == group_year:
if not existing_group_id:
existing_group_id = result['groupId']
print 'Found existing group', existing_group_id
else:
print 'Multiple matching existing groups!!!!!!!!!!'
existing_group_id = None
break
if existing_group_id is None:
existing_group_id = raw_input(u'Enter existing group id (empty if non-existent): ')
if existing_group_id:
TorrentGroupMapping.objects.get_or_create(
what_group_id=self.what_torrent_info['group']['id'],
pth_group_id=existing_group_id
)
if existing_group_id:
self.existing_new_group = self.existing_new_group = self.what.request(
'torrentgroup', id=existing_group_id)['response']
def find_matching_torrent_within_group(self):
t_info = self.what_torrent_info['torrent']
g_info = self.what_torrent_info['group']
existing_torrent_id = None
for torrent in self.existing_new_group['torrents']:
if torrent['size'] == t_info['size']:
if not existing_torrent_id:
existing_torrent_id = torrent['id']
else:
raw_input('Warning: Multiple matching torrent sizes ({} and {})! '
'Taking first.'.format(
existing_torrent_id, torrent['id']))
return existing_torrent_id
def find_existing_torrent_within_group(self):
t_info = self.what_torrent_info['torrent']
g_info = self.what_torrent_info['group']
existing_torrent_id = None
original_catalog_number = normalize_for_matching(
t_info['remasterCatalogueNumber'] or g_info['catalogueNumber'])
for torrent in self.existing_new_group['torrents']:
torrent_catalog_number = normalize_for_matching(
torrent['remasterCatalogueNumber'] or
self.existing_new_group['group']['catalogueNumber'])
torrent_catalog_number = torrent_catalog_number.lower()
matching_media_format = \
t_info['media'] == torrent['media'] and \
t_info['format'] == torrent['format'] and \
t_info['encoding'] == torrent['encoding']
# Comparing log files
if torrent['format'] == 'FLAC' and t_info['format'] == 'FLAC' and len(self.log_files):
try:
torrent_log_files = {
LogFile(l) for l in
self.what.request('torrentlog', torrentid=torrent['id'])['response']
}
if torrent_log_files == self.log_files:
print 'Found matching log files with {}!!!'.format(torrent['id'])
if not existing_torrent_id:
existing_torrent_id = torrent['id']
continue
else:
print 'Multiple existing catalog numbers ({} and {})'.format(
existing_torrent_id, torrent['id'])
existing_torrent_id = raw_input('Enter torrent id if dup: ')
else:
if len(torrent_log_files.intersection(self.log_files)):
raw_input('Log file sets are not exact matches, '
'but found matching log files between ours and {}!!!'.format(
torrent['id']))
except InvalidRippingLogException:
raw_input('Log file for {} invalid!'.format(torrent['id']))
except UnrecognizedRippingLogException:
raw_input('Log file for {} unrecognized!'.format(torrent['id']))
if original_catalog_number == torrent_catalog_number and matching_media_format:
if not existing_torrent_id:
existing_torrent_id = torrent['id']
continue
else:
print 'Multiple existing catalog numbers ({} and {})'.format(
existing_torrent_id, torrent['id'])
existing_torrent_id = raw_input('Enter torrent id if dup: ')
return existing_torrent_id
def find_dupes(self):
response = None
existing_torrent_id = None
t_info = self.what_torrent_info['torrent']
g_info = self.what_torrent_info['group']
remaster = t_info['remastered']
print 'What id: ', self.what_torrent['id']
print 'Title: ', '; '.join(
a['name'] for a in g_info['musicInfo']['artists']), '-', html_parser.unescape(
g_info['name'])
print 'Year: ', g_info['year']
print 'Media: ', t_info['media']
print 'Format: ', t_info['format']
print 'Bitrate: ', t_info['encoding']
print 'Remaster: ', 'yes ({})'.format(t_info['remasterYear']) if remaster else 'no'
print 'Label: ', t_info['remasterRecordLabel'] if remaster else g_info['recordLabel']
print 'Cat no: ', t_info['remasterCatalogueNumber'] if remaster else g_info[
'catalogueNumber']
print 'Remaster desc:', t_info['remasterTitle']
print 'Torrent name: ', self.torrent_name
print 'Torrent size: ', format_bytes_pth(self.get_total_size())
print
self.find_existing_torrent_by_hash()
if self.new_torrent:
print 'Found existing torrent by hash ' + str(
self.new_torrent['torrent']['id']) + ' reseeding!!!'
self.migration_status = WhatTorrentMigrationStatus.objects.create(
what_torrent_id=self.what_torrent['id'],
status=WhatTorrentMigrationStatus.STATUS_RESEEDED,
pth_torrent_id=self.new_torrent['torrent']['id'],
)
return True
self.find_existing_torrent_group()
if self.existing_new_group:
matching_torrent_id = self.find_matching_torrent_within_group()
if matching_torrent_id:
print 'Found matching torrent id:', matching_torrent_id
response = 'reseed'
else:
existing_torrent_id = self.find_existing_torrent_within_group()
if existing_torrent_id:
print 'Found existing torrent id:', existing_torrent_id
response = 'dup'
if not response:
response = raw_input('Choose action [up/dup/skip/skipp/reseed/changetg]: ')
else:
if response != 'reseed':
new_response = raw_input(response + '. Override: ')
if new_response:
response = new_response
if response == 'up':
self.migration_status = WhatTorrentMigrationStatus(
what_torrent_id=self.what_torrent['id'],
status=WhatTorrentMigrationStatus.STATUS_PROCESSING,
)
return True
elif response == 'reseed':
if not matching_torrent_id:
matching_torrent_id = int(raw_input('Enter matching torrent id: '))
existing_torrent = WhatTorrent.get_or_create(dummy_request, what_id=matching_torrent_id)
existing_info = bencode.bdecode(existing_torrent.torrent_file_binary)
success = False
try:
if not torrentcheck.verify(existing_info['info'], self.full_location):
raise Exception('Torrent does not verify')
success = True
except Exception as ex:
print 'Existing torrent does not verify with', ex
if success:
self.new_torrent = self.what.request('torrent', id=matching_torrent_id)['response']
self.migration_status = WhatTorrentMigrationStatus.objects.create(
what_torrent_id=self.what_torrent['id'],
status=WhatTorrentMigrationStatus.STATUS_RESEEDED,
pth_torrent_id=matching_torrent_id,
)
return True
self.migration_status = WhatTorrentMigrationStatus(
what_torrent_id=self.what_torrent['id'],
)
if response == 'dup':
if not existing_torrent_id:
existing_torrent_id = int(raw_input('Enter existing torrent id: '))
existing_torrent = self.what.request('torrent', id=existing_torrent_id)['response']
self.migration_status.status = WhatTorrentMigrationStatus.STATUS_DUPLICATE
self.migration_status.pth_torrent_id = existing_torrent_id
TorrentGroupMapping.objects.get_or_create(
what_group_id=self.what_torrent_info['group']['id'],
pth_group_id=existing_torrent['group']['id'],
)
elif response == 'skip':
self.migration_status.status = WhatTorrentMigrationStatus.STATUS_SKIPPED
elif response == 'skipp':
self.migration_status.status = WhatTorrentMigrationStatus.STATUS_SKIPPED_PERMANENTLY
elif response == 'reseed':
self.migration_status.status = WhatTorrentMigrationStatus.STATUS_DUPLICATE
self.migration_status.pth_torrent_id = matching_torrent_id
elif response == 'changetg':
existing_group_id = raw_input(u'Enter existing group id (empty if non-existent): ')
if existing_group_id:
self.existing_new_group = self.existing_new_group = self.what.request(
'torrentgroup', id=existing_group_id)['response']
return self.find_dupes()
else:
raise Exception('Unknown response')
self.migration_status.save()
return False
def _add_to_wm(self):
new_id = self.new_torrent['torrent']['id']
instance = ReplicaSet.get_what_master().get_preferred_instance()
trans_torrent = add_torrent(dummy_request, instance, self.new_location_obj, new_id)
print 'Added to', trans_torrent.instance.name
def add_to_wm(self):
for i in range(3):
try:
self._add_to_wm()
return
except Exception:
print 'Error adding to wm, trying again in 5 sec...'
time.sleep(5)
self._add_to_wm()
def enhance_torrent_data(self):
if self.what_torrent_info['torrent']['media'] == 'Blu-ray':
self.what_torrent_info['torrent']['media'] = 'Blu-Ray'
if not any(self.what_torrent_info['group']['tags']):
tags = raw_input('Enter tags (comma separated): ').split(',')
self.what_torrent_info['group']['tags'] = tags
if len(self.what_torrent_info['group']['wikiBody']) < 10:
wiki_body = raw_input('Enter wiki body: ')
self.what_torrent_info['group']['wikiBody'] = wiki_body
if 'tinypic.com' in self.what_torrent_info['group']['wikiImage'].lower():
self.what_torrent_info['group']['wikiImage'] = ''
if self.what_torrent_info['torrent']['remastered'] and not \
self.what_torrent_info['torrent']['remasterYear']:
remaster_year = raw_input('Enter remaster year: ')
self.what_torrent_info['torrent']['remasterYear'] = remaster_year
def generate_spectrals(self):
print 'Generating spectrals...'
generate_spectrals_for_dir(self.full_location)
def save_torrent_group_mapping(self):
TorrentGroupMapping.objects.get_or_create(
what_group_id=self.what_torrent_info['group']['id'],
pth_group_id=self.new_torrent['group']['id']
)
def process(self):
what_torrent_id = self.what_torrent['id']
if self.what_torrent_info['group']['categoryName'] != 'Music':
print 'Skipping non-Music torrent', what_torrent_id
return
if self.flac_only and self.what_torrent_info['torrent']['format'] != 'FLAC':
print 'Skipping non-FLAC torrent', what_torrent_id
return
try:
status = WhatTorrentMigrationStatus.objects.get(what_torrent_id=what_torrent_id)
if status.status == WhatTorrentMigrationStatus.STATUS_COMPLETE:
print 'Skipping complete torrent', what_torrent_id
return
elif status.status == WhatTorrentMigrationStatus.STATUS_DUPLICATE:
print 'Skipping duplicate torrent', what_torrent_id
return
elif status.status == WhatTorrentMigrationStatus.STATUS_SKIPPED:
print 'Skipping skipped torrent', what_torrent_id
return
elif status.status == WhatTorrentMigrationStatus.STATUS_SKIPPED_PERMANENTLY:
print 'Skipping permanently skipped torrent', what_torrent_id
return
elif status.status == WhatTorrentMigrationStatus.STATUS_FAILED_VALIDATION:
print 'Skipping failed validation torrent', what_torrent_id
return
elif status.status == WhatTorrentMigrationStatus.STATUS_RESEEDED:
print 'Skipping reseeded torrent', what_torrent_id
return
else:
raise Exception('Not sure what to do with status {} on {}'.format(
status.status, what_torrent_id))
except WhatTorrentMigrationStatus.DoesNotExist:
pass
if not self.check_valid():
return
self.mktorrent()
if not self.find_dupes():
return
if not self.new_torrent:
self.enhance_torrent_data()
self.prepare_payload()
self.print_info()
self.prepare_payload_files()
self.generate_spectrals()
raw_input('Will perform upload (CHECK THE SPECTRALS)...')
self.perform_upload()
self.set_new_location()
if self.REAL_RUN:
os.makedirs(self.full_new_location)
shutil.move(wm_str(self.torrent_dir_path), wm_str(self.full_new_location))
try:
recursive_chmod(self.full_new_location, 0777)
except OSError:
print 'recursive_chmod failed'
else:
print 'os.makedirs({})'.format(self.full_new_location)
print 'shutil.move({}, {})'.format(self.torrent_dir_path, self.full_new_location)
print 'recursive_chmod({}, 0777)'.format(self.full_new_location)
if self.REAL_RUN:
self.add_to_wm()
self.migration_status.status = WhatTorrentMigrationStatus.STATUS_COMPLETE
self.migration_status.save()
self.save_torrent_group_mapping()
else:
print 'add_to_wm()'
print
print
class Command(BaseCommand):
help = 'Export transmission torrents and what torrents'
def add_arguments(self, parser):
parser.add_argument('--flac-only', action='store_true', default=False)
def handle(self, *args, **options):
print 'Initiating what client...'
what = get_what_client(dummy_request, True)
index_response = what.request('index')
print 'Status:', index_response['status']
print 'Scanning replica sets...'
try:
ReplicaSet.objects.get(zone='what.cd')
raise Exception('Please delete your what.cd replica set now')
except ReplicaSet.DoesNotExist:
pass
try:
pth_replica_set = ReplicaSet.get_what_master()
if pth_replica_set.transinstance_set.count() < 1:
raise ReplicaSet.DoesNotExist()
except ReplicaSet.DoesNotExist:
raise Exception('Please get your PTH replica set ready')
print 'Scanning locations...'
location_mapping = {}
with open('what_manager2_torrents.jsonl', 'rb') as torrents_input:
for line in torrents_input:
data = ujson.loads(line)
location_path = data['location']['path']
if location_path not in location_mapping:
try:
new_location = DownloadLocationEquivalent.objects.get(
old_location=location_path).new_location
except DownloadLocationEquivalent.DoesNotExist:
new_location = raw_input(
'Enter the new location to map to {}: '.format(location_path))
DownloadLocationEquivalent.objects.create(
old_location=location_path,
new_location=new_location,
)
location_mapping[location_path] = new_location
print 'Location mappings:'
for old_location, new_location in location_mapping.items():
try:
DownloadLocation.objects.get(zone='redacted.ch', path=new_location)
except DownloadLocation.DoesNotExist:
raise Exception(
'Please create the {} location in the DB in zone redacted.ch'.format(
new_location))
print old_location, '=', new_location
with open('what_manager2_torrents.jsonl', 'rb') as torrents_input:
for line in torrents_input:
data = ujson.loads(line)
migration_job = TorrentMigrationJob(what, location_mapping, data,
flac_only=options['flac_only'])
migration_job.process()
|
import os
from kivy.app import App
from kivy.core.window import Window
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
from kivy.animation import Animation
from human_interfaces.base import HumanInterfaceBase
class RPSLayout(FloatLayout):
def __init__(self, *args, engine=None, **kwargs):
super().__init__(*args, **kwargs)
self.engine = engine
self.new_choice = False
self.choice = None
self.state = self.engine.state
self.labels = {}
self.buttons = {}
self.images = {}
self.animations = {}
self.build()
def get_move(self):
self.new_choice = False
return self.choice
def sync_widgets_with_game_state(self, *args):
for widget, attr, func in list(self.labels.values()) + list(self.images.values()) + list(self.buttons.values()):
if func:
setattr(widget, attr, func(self.state))
def show_error(self, err_msg):
pass
def animation(self):
image_ids = ['player1_future_move_image', 'player2_future_move_image']
def pos_hint_move(pos_hint, x, y):
p = pos_hint.copy()
p['x'] += x
p['y'] += y
return p
for image_id in image_ids:
image = self.images[image_id][0]
pos_hint_init = image.pos_hint
mag = 0.02
dur = 0.2
anim = Animation(pos_hint=pos_hint_move(image.pos_hint, 0, mag), duration=dur)
anim += Animation(pos_hint=pos_hint_move(image.pos_hint, 0, -1*mag), duration=dur)
anim += Animation(pos_hint=pos_hint_move(image.pos_hint, 0, mag), duration=dur)
anim += Animation(pos_hint=pos_hint_init, duration=dur)
anim.bind(on_complete=self.sync_widgets_with_game_state)
anim.start(image)
def rock(self, button):
self.choice = 'r'
self.new_choice = True
self.state = self.engine.advance()
self.animation()
def paper(self, button):
self.choice = 'p'
self.new_choice = True
self.state = self.engine.advance()
self.animation()
def scissors(self, button):
self.choice = 's'
self.new_choice = True
self.state = self.engine.advance()
self.animation()
def build(self):
"""Build the UI Layout"""
label_default_kwargs = {
'color': (0, 0, 0, 1),
'font_size': 40,
'size_hint': (0.3, 0.3),
}
col1 = 0.05
col2 = 0.35
col3 = 0.65
row1 = 0.7
row2 = 0.5
row1b = 0.6
row4 = 0.4
row5 = 0.3
row6 = 0.1
row7 = 0.0
images_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'images')
init_img_path = os.path.join(images_dir, 'init.jpg')
blank_img_path = os.path.join(images_dir, 'blank.jpg')
rock_img_path = os.path.join(images_dir, 'rock.jpg')
paper_img_path = os.path.join(images_dir, 'paper.jpg')
scissors_img_path = os.path.join(images_dir, 'scissors.jpg')
self.img_select = {
'i': init_img_path,
'r': rock_img_path,
'p': paper_img_path,
's': scissors_img_path
}
latest_outcome_select = {
(1, 0): 'Win!',
(0, 1): 'Lose!',
(0, 0): 'Tie!',
}
previous_outcome_select = {
(1, 0): 'Won',
(0, 1): 'Lost',
(0, 0): 'Tied',
}
def add_player_widgets(player_num, opponent_player_num, col):
self.labels['player%d_total_score_label' % player_num] = (
Label(text='Total Score',
pos_hint={'x': col, 'y': row1b + 0.05},
**label_default_kwargs), # widget object
None, # attribute to sync
None # function to apply to state for sync value
)
self.labels['player%d_total_score_number' % player_num] = (
Label(text='0',
pos_hint={'x': col, 'y': row1b},
**label_default_kwargs),
'text',
lambda s: str(s['player%d_total_score' % player_num])
)
self.labels['player%d_name' % player_num] = (
Label(text='Player 1',
pos_hint={'x': col, 'y': row2},
**label_default_kwargs),
'text',
lambda s: str(s['player%d_name' % player_num])
)
self.images['player%d_future_move_image' % player_num] = (
Image(source=init_img_path,
pos_hint={'x': col, 'y': row4},
size_hint=(0.3, 0.3),
),
'source',
lambda s: init_img_path
)
self.images['player%d_latest_move_image' % player_num] = (
Image(source=blank_img_path,
pos_hint={'x': col, 'y': row5},
size_hint=(0.3, 0.3),
),
'source',
lambda s: blank_img_path if s['player%d_latest_move' % player_num] is None else self.img_select[s['player%d_latest_move' % player_num]]
)
self.images['player%d_previous_move_image' % player_num] = (
Image(source=blank_img_path ,
pos_hint={'x': col, 'y': row6},
size_hint=(0.3, 0.3),
),
'source',
lambda s: blank_img_path if s['player%d_previous_move' % player_num] is None else self.img_select[s['player%d_previous_move' % player_num]]
)
self.labels['player%d_latest_outcome_label' % player_num] = (
Label(text='',
pos_hint={'x': col + 0.1, 'y': row5},
**label_default_kwargs),
'text',
lambda s: '' if s['player%d_latest_point' % player_num] is None else str(
latest_outcome_select[(s['player%d_latest_point' % player_num], s['player%d_latest_point' % opponent_player_num])])
)
self.labels['player%d_previous_outcome_label' % player_num] = (
Label(text='',
pos_hint={'x': col + 0.1, 'y': row6},
**label_default_kwargs),
'text',
lambda s: '' if s['player%d_previous_point' % player_num] is None else str(
previous_outcome_select[(s['player%d_previous_point' % player_num], s['player%d_previous_point' % opponent_player_num])])
)
add_player_widgets(1, 2, col1)
add_player_widgets(2, 1, col3)
self.labels['RPS_label'] = (
Label(text='RockPaperScissors',
pos_hint={'x': col2, 'y': row1 + 0.05},
color=(0, 0, 0, 1),
font_size=60,
size_hint=(0.3, 0.3),
bold=True,
),
None,
None
)
self.labels['num_rounds_label'] = (
Label(text='Round No.',
pos_hint={'x': col2, 'y': row1b + 0.05},
**label_default_kwargs),
None,
None
)
self.labels['num_rounds_number'] = (
Label(text='0',
pos_hint={'x': col2, 'y': row1b},
**label_default_kwargs),
'text',
lambda s: str(s['num_rounds'])
)
button_default_kwargs = {
'color': (0, 0, 0, 1),
'font_size': 40,
'background_color': (0.8, 0.9, 1.0, 1),
'size_hint': (0.15, 0.15),
}
button_background_color_active = (0.6, 0.7, 0.8, 1)
self.buttons['rock'] = (
Button(text='Rock',
**button_default_kwargs,
pos_hint={'x': col1, 'y': row7},
on_press=self.rock,
),
'background_color',
lambda s: button_background_color_active
if s['player1_latest_move'] == 'r'
else button_default_kwargs['background_color']
)
self.buttons['paper'] = (
Button(text='Paper',
**button_default_kwargs,
pos_hint={'x': col1 + 0.15, 'y': row7},
on_press=self.paper,
),
'background_color',
lambda s: button_background_color_active
if s['player1_latest_move'] == 'p'
else button_default_kwargs['background_color']
)
self.buttons['scissors'] = (
Button(text='Scissors',
**button_default_kwargs,
pos_hint={'x': col1 + 0.3, 'y': row7},
on_press=self.scissors,
),
'background_color',
lambda s: button_background_color_active
if s['player1_latest_move'] == 's'
else button_default_kwargs['background_color']
)
self.sync_widgets_with_game_state()
# add widgets to layout
for item, _, _ in list(self.labels.values()) + list(self.images.values()) + list(self.buttons.values()):
self.add_widget(item)
# white canvas
Window.clearcolor = (1, 1, 1, 1)
class KiviApp(App):
title = 'Paper Rock Scissors'
def __init__(self, engine=None, **kwargs):
super().__init__(**kwargs)
self.layout = RPSLayout(engine=engine)
def build(self):
return self.layout
class RPSGUI(HumanInterfaceBase):
def __init__(self, engine=None, player1_class=None, player2_class=None):
super().__init__(engine=engine, player1_class=player1_class, player2_class=player2_class)
self.app = KiviApp(engine=self.engine)
def get_move(self):
return self.app.layout.get_move()
def show_error(self, err_msg):
pass # TODO: implement this for completeness sake
def update(self):
self.app.layout.update()
def run(self):
self.app.run()
|
import socket, time, json,threading
import package.settings.setting
import copy
# 用于将三个字段序列化
def messages_to_json(type, router_table, ip_mapping, receiver):
message = dict()
message['type'] = type
# 判断,只发送存活节点的信息
router_Names = []
for name in receiver:
if name in router_table[package.settings.setting.HOST]:
if receiver[name] == False and name != package.settings.setting.HOST:
router_table[package.settings.setting.HOST].pop(name)
router_Names.append(name)
else:
if receiver[name] == False:
router_Names.append(name)
for name in router_Names:
if name in router_table:
router_table.pop(name)
# print("router_table", router_table)
message['router_Table'] = router_table
message['ip_Mapping'] = ip_mapping
json_object = json.dumps(message)
return json_object
# 用于将heartbeat包序列化
def heartbeat_to_json(type):
message = dict()
message['type'] = type
json_object = json.dumps(message)
return json_object
# 只向直接连接的节点发送hello包,heartbear包
def send_hello_heartbeat_single():
directNode = package.settings.setting.router_Table[package.settings.setting.HOST]
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
me = package.settings.setting.ip_Mapping[package.settings.setting.HOST]
msg = messages_to_json("01", copy.deepcopy(package.settings.setting.router_Table), copy.deepcopy(package.settings.setting.ip_Mapping),copy.deepcopy(package.settings.setting.receiver))
# print("HERE!!", msg)
hp = heartbeat_to_json("11")
# 广播心跳包
for num in range(2, 254):
ip = '192.168.199.' + str(num)
if ip != me:
s.sendto(hp.encode('utf-8'), (ip, package.settings.setting.Port))
# 向直接连接的节点发送hello包
for i in directNode.keys():
if package.settings.setting.HOST != i:
ip = package.settings.setting.ip_Mapping[i]
s.sendto(msg.encode('utf-8'), (ip, package.settings.setting.Port))
s.close()
# 每隔5秒发送hello和heartbeat包
def send_hello_hearbeat():
while True:
rlock = threading.RLock()
rlock.acquire()
send_hello_heartbeat_single()
rlock.release()
time.sleep(5)
|
import random
import string
from datetime import timedelta
from django.conf import settings
from django.utils.timezone import localtime, now
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from care.facility.models.patient import PatientMobileOTP
from care.utils.sms.sendSMS import sendSMS
def rand_pass(size):
if not settings.USE_SMS:
return "45612"
generate_pass = "".join(
[random.choice(string.ascii_uppercase + string.digits) for n in range(size)]
)
return generate_pass
def send_sms(otp, phone_number):
if settings.USE_SMS:
sendSMS(
phone_number,
(
f"Open Healthcare Network Patient Management System Login, OTP is {otp} . "
"Please do not share this Confidential Login Token with anyone else"
),
)
else:
print(otp, phone_number)
class PatientMobileOTPSerializer(serializers.ModelSerializer):
class Meta:
model = PatientMobileOTP
fields = ("phone_number",)
def create(self, validated_data):
# Filter to only allow n sms per phone number per 6 hour
sent_otps = PatientMobileOTP.objects.filter(
created_date__gte=(
localtime(now()) - timedelta(settings.OTP_REPEAT_WINDOW)
),
is_used=False,
phone_number=validated_data["phone_number"],
)
if sent_otps.count() >= settings.OTP_MAX_REPEATS_WINDOW:
raise ValidationError({"phone_number": "Max Retries has exceeded"})
otp_obj = super().create(validated_data)
otp = rand_pass(settings.OTP_LENGTH)
send_sms(otp, otp_obj.phone_number)
otp_obj.otp = otp
otp_obj.save()
return otp_obj
|
# Generated by Django 3.1.7 on 2021-05-30 11:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('settings', '0004_auto_20210530_1458'),
]
operations = [
migrations.CreateModel(
name='Subjects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60)),
],
),
migrations.AlterField(
model_name='timetable',
name='subject_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subject_name', to='settings.subjects'),
),
migrations.DeleteModel(
name='Subject',
),
]
|
import re
from utils.Logger import *
from utils.Helper import Helper
from youtubesearchpython import VideosSearch
import re
helper = Helper()
async def YouTubeSearch(songName, maxresults=1):
try:
if songName in ["", None]:
return None
urls = helper.getUrls(songName)
song_url = None
logInfo(f"Making call to fetch song details for : {songName}")
if urls is not None and len(urls) > 0:
if len(urls) > 1:
return None
elif re.search("youtube\.|youtu\.be|youtube-nocookie\.", urls[0]):
song_url = urls[0]
else:
return None
# make call to fetch using the search query
results = VideosSearch(
(songName if song_url is None else song_url), limit=maxresults).result()
song_infos = []
for song in results['result']:
channelName = song.get("channel")['name'] if song.get(
"channel") is not None else ""
thumbnails = [] if song.get("thumbnails") is None or len(
song.get("thumbnails")) == 0 else [t['url'] for t in song.get("thumbnails")]
description = "" if song.get("descriptionSnippet") is None or len(song.get("descriptionSnippet")) == 0 else song.get(
"descriptionSnippet")[0]['text']
song_infos.append({
'id': song['id'], 'thumbnails': thumbnails,
'title': song['title'], 'long_desc': description, 'channel': channelName, 'duration': song['duration'],
'views': song['viewCount']['text'], 'publish_time': song['publishedTime'], 'link': song['link']
})
return song_infos
except Exception as ex:
logException(f"Error while serching for youtube songs : {ex}")
return None
|
from django.db import models
from paciente.models import MyUser
from django.conf import settings
from datetime import datetime
class Humor(models.Model):
email = models.ForeignKey(MyUser, on_delete=models.CASCADE)
dia = models.DateField(auto_now=True)
sentimento_comeco_do_dia = models.TextField(max_length=30, blank="true")
sentimento_fim_do_dia = models.TextField(max_length=30, blank="true")
ansiedade = models.IntegerField(blank="true")
class Remedios(models.Model):
email = models.ForeignKey(MyUser, on_delete=models.CASCADE)
dia = models.DateField(auto_now=True)
nome_do_remedio = models.TextField(max_length=100,blank="true")
frequencia_semanal_do_remedio = models.TextField(max_length=100,blank="true")
frequencia_diaria_do_remedio = models.IntegerField(blank="true")
horario_primeira_dose = models.TimeField(blank="true")
data_de_inicio = models.DateField(blank="true")
dosagem = models.FloatField(blank="true")
class Rigidez(models.Model):
email = models.ForeignKey(MyUser, on_delete=models.CASCADE)
dia = models.DateField(auto_now=True)
rigidez_matinal = models.IntegerField(blank="true", default=0)
class Dor(models.Model):
email = models.ForeignKey(MyUser, on_delete=models.CASCADE)
dia = models.DateField(auto_now=True)
maos = models.IntegerField(blank="true", default=0)
pes = models.IntegerField(blank="true", default=0)
punhos = models.IntegerField(blank="true", default=0)
cotovelos = models.IntegerField(blank="true", default=0)
joelhos = models.IntegerField(blank="true", default=0)
tornozelos = models.IntegerField(blank="true", default=0)
ombros = models.IntegerField(blank="true", default=0)
# Create your models here.
|
# Import libraries
import numpy as np
from flask import Flask, request, jsonify
import pickle
from sklearn.externals import joblib
app = Flask(__name__)
# Load the model
model = joblib.load(open('./linreg_model.pkl','rb'))
@app.route('/api',methods=['POST'])
def predict():
# Get the data from the POST request.
data = request.get_json(force=True)
# Make prediction using model loaded from disk as per the data.
prediction = model.predict([[np.array(data['setting1','setting2','setting3','s1','s2','s3','s4','s5','s6','s7','s8','s9','s10','s11','s12','s13','s14','s15','s16','s17','s18','s19','s20','s21'])]])
# Take the first value of prediction
output = prediction[0]
return jsonify(output)
if __name__ == '__main__':
try:
app.run(port=5000, debug=True)
except:
print("Server is exited unexpectedly. Please contact server admin.") |
import scrapy
import search_url
class AmazonSpider(scrapy.Spider):
"""docstring for AmazonSpider"""
name = "products"
start_urls = []
start_urls.append(search_url.get_search_url(raw_input("Enter the keyword\n")))
def parse(self, response):
for product in response.css("ul.s-result-list"):
yield {
'product': product.css('a.a-link-normal::attr("title")').extract(),
'price': product.css('span.a-color-price::text').extract(),
# 'price': product.xpath('span/small/text()').extract_first(),
}
next_page = response.xpath('//*[@id="pagn"]/span[7]').css('a.pagnNext::attr("href")').extract_first()
if next_page is not None:
yield response.follow(next_page, self.parse)
|
import requests
url = 'http://127.0.0.1:5000/getsetu'
def getSetu():
req = requests.get(url)
return req.json() |
import sys
import os
import csv
import argparse
from datetime import timedelta,date,datetime
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from py_db import db
db = db('personal')
key_file = os.getcwd()+"/un_pw.csv"
key_list = {}
with open(key_file, 'rU') as f:
mycsv = csv.reader(f)
for row in mycsv:
un, pw = row
key_list[un]=pw
def generate_body(author, topic, medium, keyword, count, to_address):
sub = 'Daily Quotes [%s]' % (str(date.today()))
quote_lookup = """SELECT
QuoteID, estimated_date, author, topic, medium, source, source_location, quote
FROM quotes
WHERE 1
AND (author LIKE "%%%s%%" AND topic LIKE "%%%s%%" AND medium LIKE "%%%s%%" AND quote LIKE "%%%s%%")
ORDER BY rand()
LIMIT %s
;""" % (author, topic, medium, keyword, count)
res = db.query(quote_lookup)
mesg = ''
for i, row in enumerate(res):
_id, _date, _author, _topic, _medium, _source, _sourceLocation, _quote = row
mesg += "\n\t" + "Quote #" + str(i+1) + " (QuoteID " + str(_id) + ") of " + str(len(res)) + ":"
mesg += "\n\tTopic: " + str(_topic)
src = ""
if _source is None:
src = _medium
else:
src = _source
mesg += "\n\t" + "On " + str(_date) + ", " + _author + " says via " + src + ":\n\"\"\""
mesg += "\n" + _quote
mesg += "\n\"\"\"\n"
mesg += "\n------------------------------------------------------------------------------------\n"
email(sub, mesg, to_address)
def email(sub, mesg, to_address):
email_address = "connor.reed.92@gmail.com"
fromaddr = email_address
toaddr = to_address
bcc_addr = email_address
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['BCC'] = bcc_addr
msg['Subject'] = sub
body = mesg
msg.attach(MIMEText(mesg,'plain'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, key_list.get(email_address))
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--author',default='')
parser.add_argument('--topic',default='')
parser.add_argument('--medium',default='')
parser.add_argument('--keyword',default='')
parser.add_argument('--count',default=1)
parser.add_argument('--to_address',default='connor.reed.92@gmail.com')
args = parser.parse_args()
generate_body(args.author, args.topic, args.medium, args.keyword, args.count, args.to_address)
|
from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from apiclient import errors
# If modifying these scopes, delete the file token.json.
class Gmail:
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'
SCOPES_FILTER = 'https://www.googleapis.com/auth/gmail.settings.basic'
def __init__(self):
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# # time.
store = file.Storage('../token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('../credentials.json', self.SCOPES)
creds = tools.run_flow(flow, store)
self.service = build('gmail', 'v1', http=creds.authorize(Http()))
def auth_settings(self):
store = file.Storage('../token_auth.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('../credentials_auth.json', self.SCOPES_FILTER)
creds = tools.run_flow(flow, store)
self.service = build('gmail', 'v1', http=creds.authorize(Http()))
print(self.service.users().settings().filters().list(userId='me').execute())
def get_labels(self):
# Call the Gmail API
results = self.service.users().labels().list(userId='me').execute()
labels = results.get('labels', [])
if not labels:
print('No labels found.')
else:
print(labels)
return labels
def createLabel(self, label_name=''):
label_object = self.MakeLabel(label_name=label_name)
label = self.service.users().labels().create(userId='me', body=label_object).execute()
return label
def MakeLabel(self, label_name, mlv='show', llv='labelShow'):
"""Create Label object."""
label = {'messageListVisibility': mlv,
'name': label_name,
'labelListVisibility': llv}
return label
def deletelabel(self, lable_name):
label_list = self.get_labels()
lable_id = ''
for label in label_list:
if label['name'] == lable_name:
lable_id = label['id']
self.service.users().labels().delete(userId='me', id=lable_id).execute()
def makeFilter(self, sender, label):
filter = {
"id": 'test',
"criteria": {
"from": sender,
},
"action": {
"addLabelIds": [
label
]
}
}
return filter
def createFilter(self, sender, labdel):
filterobj = self.makeFilter(sender, labdel)
filter = self.service.users().settings().filters().create(userId='me', body=filterobj).execute()
return filter
a = Gmail()
#a.get_labels()
a.auth_settings()
#a.createFilter("pikin@aeroidea.ru", 'Label_2')
#a.createLabel(label_name='test1234')
#a.deletelabel('test1234')
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
# load data
data = np.loadtxt(open("/home/chs/Desktop/Sonar/Data/drape_result/Result_0.csv"), delimiter=",")
''' Calibrate delta_z '''
# Init
beta = np.ones(3) # c(a) = b0 + b1a +b2a^2
alpha = 0.2 # learing rate
tol_l = 0.01
# Normalize data
max_x = data[:,0].max()
data[:,0] /= max_x
# prepair averaged data for loss function
data = np.hstack((data, np.zeros((len(data),1))))
for index, row in enumerate(data):
if index %1000 ==0:
print("row %d/%d done" % (index, len(data)))
if row[-1] != 0:
continue
key = np.where((data[:,0:4]==row[0:4]).all(1))[0]
value = data[key,4]
mean = np.mean(value)
data[key, 5] = mean
print('step1 done', max_x)
#data = np.loadtxt(open("/home/chs/Desktop/Sonar/Data/drape_result/Result_1.csv"), delimiter=",")
#np.savetxt('/home/chs/Desktop/Sonar/Data/drape_result/Result_1.csv', data, delimiter=',')
a = data[:, 0]
y = data[:, 4]
y_ave = data[:, 5]
# compute grad
def cal_grad(a, y, y_ave, beta):
grad = np.zeros(3)
catch = (beta[0] + beta[1]*a + beta[2]*a*a)*y - y_ave
grad[0] = np.mean(y*catch)
grad[1] = np.mean(a*y*catch)
grad[2] = np.mean(a*a*y*catch)
return grad
def update_beta(beta, alpha, grad):
new_beta = beta - alpha*grad
return new_beta
def rmse(a, y, y_ave, beta):
catch = (beta[0] + beta[1]*a + beta[2]*a*a)*y - y_ave
squared_err = catch*catch
res = np.sqrt(np.mean(squared_err))
return res
# first round
grad = cal_grad(a, y, y_ave, beta)
loss = rmse(a, y, y_ave, beta)
beta = update_beta(beta, alpha, grad)
loss_new = rmse(a, y, y_ave, beta)
i = 1
while i<1000:
grad = cal_grad(a, y, y_ave, beta)
loss = rmse(a, y, y_ave, beta)
beta = update_beta(beta, alpha, grad)
loss_new = rmse(a, y, y_ave, beta)
print('Round %s Diff RMSE %s, ABS RMSE %s'%(i, abs(loss_new - loss), loss_new), beta)
i += 1
scale = np.array([max_x**2, max_x, 1])
beta_real = beta/scale
print('Done with rmse %s and beta'% abs(loss_new), beta_real)
# compute calibration result
index = 0
while index < len(data):
if index %1000 ==0:
print("row %d/%d done" % (index, len(data)))
row = data[index]
key = np.where((data[:,1:4]==row[1:4]).all(1))[0]
value = data[key,4]
a = data[key,0]
Ca = beta[0] + beta[1]*a + beta[2]*a*a
y = Ca*value
data[index,4] = np.mean(y)
data = np.delete(data, key[1:],axis=0)
index += 1
a = data[:, 0]
y = data[:, 4]
y_ave = data[:, 5]
catch = y - y_ave
squared_err = catch*catch
res = np.sqrt(np.mean(squared_err))
print(res)
np.savetxt("/home/chs/Desktop/Sonar/Data/drape_result/Result_1.csv", data, delimiter=',')
print("???") |
# Generated by Django 3.1.7 on 2021-04-08 09:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app_matrice', '0010_society_user'),
]
operations = [
migrations.RemoveField(
model_name='society',
name='user',
),
]
|
import os
from typing import Tuple, Callable
import gym
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorlib import rl
from tensorlib.rl.utils.replay.replay import TransitionReplayBuffer
from tensorlib.rl.utils.replay.sampler import StepSampler
from tensorlib.utils.logx import EpochLogger
from tensorlib.utils.timer import Timer
from tensorlib.utils.weights import hard_update, soft_update
from tqdm.auto import tqdm
ds = tfp.distributions
class Agent(rl.BaseAgent):
def __init__(self,
nets: Tuple[tf.keras.Model, Callable],
action_space,
learning_rate=3e-4,
no_automatic_alpha=False,
alpha=1.0,
tau=5e-3,
gamma=0.99,
**kwargs,
):
self.policy_net, q_network_maker = nets
self.q_network = q_network_maker()
self.target_q_network = q_network_maker()
# self.target_q_network = tf.keras.models.clone_model(self.q_network)
self.policy_optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
self.q_optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
self.discrete = isinstance(action_space, gym.spaces.Discrete)
self.automatic_alpha = not no_automatic_alpha
hard_update(self.target_q_network, self.q_network)
if self.automatic_alpha:
if self.discrete:
target_entropy = -np.log(action_space.n) * 0.95
else:
target_entropy = -np.prod(action_space.shape)
self.log_alpha_tensor = tf.Variable(initial_value=0., dtype=tf.float32)
self.alpha_optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
self.target_entropy = target_entropy
self.get_alpha = tf.function(func=lambda: tf.exp(self.log_alpha_tensor))
else:
self.get_alpha = lambda: alpha
self.tau = tau
self.gamma = gamma
def update_target(self):
soft_update(self.target_q_network, self.q_network, self.tau)
@tf.function
def update(self, obs, actions, next_obs, done, reward):
""" Sample a mini-batch from replay buffer and update the network
Args:
obs: (batch_size, ob_dim)
actions: (batch_size, action_dim)
next_obs: (batch_size, ob_dim)
done: (batch_size,)
reward: (batch_size,)
Returns: None
"""
print('Building SAC update graph with batch size {}'.format(obs.shape[0]))
next_action, next_action_log_prob = self.policy_net.predict_action_log_prob(next_obs)
target_q_values = self.target_q_network.predict_min_value_with_action(next_obs, next_action) \
- self.get_alpha() * next_action_log_prob
q_target = reward + self.gamma * (1.0 - done) * target_q_values
# q loss
with tf.GradientTape() as q_tape, tf.GradientTape() as policy_tape:
q_values, q_values2 = self.q_network.predict_value_with_action(obs, actions)
q_values_loss = tf.keras.losses.mse(q_values, q_target) + tf.keras.losses.mse(q_values2, q_target)
# policy loss
if self.discrete:
# for discrete action space, we can directly compute kl divergence analytically without sampling
action_distribution = self.policy_net.predict_action_distribution(obs)
q_values_min = self.q_network.predict_min_value(obs) # (batch_size, ac_dim)
target_distribution = ds.Categorical(logits=q_values_min, dtype=tf.int64)
policy_loss = tf.reduce_mean(ds.kl_divergence(action_distribution, target_distribution))
log_prob = -action_distribution.entropy()
else:
action, log_prob = self.policy_net.predict_action_log_prob(obs)
q_values_pi_min = self.q_network.predict_min_value_with_action(obs, action)
policy_loss = tf.reduce_mean(log_prob * self.get_alpha() - q_values_pi_min)
q_gradients = q_tape.gradient(q_values_loss, self.q_network.trainable_variables)
policy_gradients = policy_tape.gradient(policy_loss, self.policy_net.trainable_variables)
self.q_optimizer.apply_gradients(zip(q_gradients, self.q_network.trainable_variables))
self.policy_optimizer.apply_gradients(zip(policy_gradients, self.policy_net.trainable_variables))
if self.automatic_alpha:
with tf.GradientTape() as alpha_tape:
alpha_loss = -tf.reduce_mean(self.log_alpha_tensor * (log_prob + self.target_entropy))
alpha_gradient = alpha_tape.gradient(alpha_loss, self.log_alpha_tensor)
self.alpha_optimizer.apply_gradients(zip([alpha_gradient], [self.log_alpha_tensor]))
def predict_batch(self, states, deterministic=False):
states = tf.convert_to_tensor(states, dtype=tf.float32)
if deterministic:
return self.policy_net.select_action(states).numpy()
else:
return self.policy_net.sample_action(states).numpy()
def save_checkpoint(self, checkpoint_path):
print('Saving checkpoint to {}'.format(checkpoint_path))
self.policy_net.save_weights(checkpoint_path, save_format='h5')
def load_checkpoint(self, checkpoint_path):
print('Load checkpoint from {}'.format(checkpoint_path))
self.policy_net.load_weights(checkpoint_path)
def train(self, env, exp_name, num_epochs, epoch_length, prefill_steps,
replay_pool_size, batch_size, logdir=None, checkpoint_path=None,
**kwargs):
logger = EpochLogger(output_dir=logdir, exp_name=exp_name)
if checkpoint_path is None:
dummy_env = env.env_fns[0]()
checkpoint_path = os.path.join(logger.get_output_dir(), dummy_env.spec.id)
del dummy_env
sampler = StepSampler(prefill_steps=prefill_steps, logger=logger)
replay_pool = TransitionReplayBuffer(
capacity=replay_pool_size,
obs_shape=env.single_observation_space.shape,
obs_dtype=env.single_observation_space.dtype,
ac_shape=env.single_action_space.shape,
ac_dtype=env.single_action_space.dtype,
)
sampler.initialize(env, self, replay_pool)
best_mean_episode_reward = -np.inf
timer = Timer()
total_timesteps = prefill_steps // env.num_envs * prefill_steps
timer.reset()
for epoch in range(num_epochs):
for _ in tqdm(range(epoch_length), desc='Epoch {}/{}'.format(epoch + 1, num_epochs)):
sampler.sample()
obs, actions, next_obs, reward, done = replay_pool.sample(batch_size)
obs = tf.convert_to_tensor(obs)
actions = tf.convert_to_tensor(actions)
next_obs = tf.convert_to_tensor(next_obs)
done = tf.cast(tf.convert_to_tensor(done), tf.float32)
reward = tf.convert_to_tensor(reward)
self.update(obs=obs, actions=actions, next_obs=next_obs, done=done, reward=reward)
self.update_target()
# evaluate current policy using deterministic version.
total_timesteps += epoch_length * env.num_envs
# save best model
avg_return = logger.get_stats('EpReward')[0]
if avg_return > best_mean_episode_reward:
best_mean_episode_reward = avg_return
if checkpoint_path:
self.save_checkpoint(checkpoint_path)
# logging
logger.log_tabular('Time Elapsed', timer.get_time_elapsed())
logger.log_tabular('EpReward', with_min_and_max=True)
logger.log_tabular('EpLength', average_only=True, with_min_and_max=True)
logger.log_tabular('TotalSteps', total_timesteps)
logger.log_tabular('TotalEpisodes', sampler.get_total_episode())
logger.log_tabular('BestAvgReward', best_mean_episode_reward)
logger.log_tabular('Alpha', self.get_alpha())
logger.log_tabular('Replay Size', len(replay_pool))
logger.dump_tabular()
|
"""
Рассмотрим все целочисленные комбинации a^b для 2 ≤ a ≤ 5 и 2 ≤ b ≤ 5:
2^2=4, 2^3=8, 2^4=16, 2^5=32
3^2=9, 3^3=27, 3^4=81, 3^5=243
4^2=16, 4^3=64, 4^4=256, 4^5=1024
5^2=25, 5^3=125, 5^4=625, 5^5=3125
Если их расположить в порядке возрастания, исключив повторения, мы
получим следующую последовательность из 15 различных членов:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
Сколько различных членов имеет последовательность a^b для
2 ≤ a ≤ 100 и 2 ≤ b ≤ 100?
"""
res = set()
for a in range(2, 101):
for b in range(2, 101):
res.add(a ** b)
print(len(res))
|
def hello():
print('Hello people!!')
hello()
def greetings(name, salute):
print(f'Good {salute} Mr. {name}')
greetings('Benedict', 'morning')
greetings('Alabi', 'afternoon')
greetings('Tope', 'night')
def add(num1, num2):
summation = num1 + num2
print(summation)
add(5, 6)
add(15, 7)
add(11, 10)
# Create a function that will print out these
# My name is Benedict, I am 42years old and Fair in Complexion
# Where Benedict, 42 and Fair are arguments of the function
# print for three different persons
def person(name, age, complexion):
print(f'My name is {name}, I am {age}years old and {complexion} in Complexion')
person('Benedict', 42, 'Fair')
person('Alabi', 52, 'Dark')
# createa a function to determine the area of a triangle
# area = 1/2 * base * heigth
# call for three different values
# (base=200, heigth=100),
# (base=300, heigth=200),
# (base=400, heigth=300)
def area_triangle(base, heigth):
area = 0.5 * base * heigth
print(area)
area_triangle(200, 100)
area_triangle(300, 200)
area_triangle(400, 300)
def print_even(number):
for n in range(number+1):
if n % 2 == 0:
print(n)
print('FOR TWENTY')
print_even(20)
print('FOR THIRTY')
print_even(20)
def multiplication(number, start, stop):
for start in range(start, stop+1):
result = number * start
print(f'{number} X {start} = {result}')
multiplication(2, 3, 12)
multiplication(4, 2, 10)
|
#Extract_Hydro_Params.py
#Ryan Spies
#ryan.spies@amec.com
#AMEC
#Description: extracts SAC-SMA/UNITHG/LAG-K parameters values
#from CHPS configuration .xml files located in the Config->ModuleConfigFiles
#directory and ouputs a .csv file with all parameters
# NOTE: this script differs from the extract_hydro_params_XXRFC_sa.py by using
# the output UH and SACSMA calibration mods in the CHPS CALB SA version which
# has a slightly different format than the original SA .xml modulparfiles
#-----------------------------------------------------------------------------
########################## START USER INPUT SECTION ##########################
RFC = 'SERFC'
param_source = 'final_calb' # choices: 'final_calb'
uh_plots = 'off' # choices: 'on' or 'off' to create a .png figure for each basin
#!!!!!! input directory: enter location of ModuleParFiles directory below ->
folderPath = 'P:\\NWS\\Calibration_NWS\\' + RFC + '\\Working_Calib_Files'
#!!!!!! output directory: enter ouput directory for .csv files below ->
csv_file_out = 'P:\\NWS\\Python\\Extract_Hydro_Params\\' + RFC + '\\Params_' + param_source
########################## END USER INPUT SECTION ############################
#-----------------------------------------------------------------------------
#import script modules
import os
import re
if uh_plots == 'on':
import numpy
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import MultipleLocator
working_dir = os.getcwd()
print 'Script is Running...'
sac_line = 1
csv_file = open(csv_file_out +'\\' + RFC + '_SACSMA_Params_' + param_source + '.csv', 'w')
csv_file.write('BASIN,REXP,LZPK,LZFPM,PXADJ,RCI,PFREE,ZPERC,RIVA,MAPE_Input,PEADJ,LZTWM,'\
'RSERV,ADIMP,UZK,SIDE,LZFSM,LZSK,SMZC,UZTWM,UZFWM,PCTIM,EFC,'\
'JAN_ET,FEB_ET,MAR_ET,APR_ET,MAY_ET,JUN_ET,JUL_ET,AUG_ET,SEP_ET,OCT_ET,NOV_ET,DEC_ET' + '\n')
#SAC-SMA SECTION--------------------------------------------------------------
#loop through SACSMA files in folderPath
print 'Processing SACSMA parameters...'
for filename in os.listdir(folderPath + '\\SAC_SMA\\'):
#print filename
#Define output file name
name = str(os.path.basename(filename)[:])
name = name.replace('SACSMA_', '')
name = name.replace('_UpdateStates.xml', '')
#print name
csv_file.write(name + ',')
#Open .xml file and temporary .txt file to write .xml contents to
xml_file = open(folderPath + '\\SAC_SMA\\' + filename, 'r')
txt_file = open(working_dir +'\\' + name + '.txt', 'w')
#Write contents of .xml file to the temporary .txt file
for line in xml_file:
txt_file.write(line)
#Close the open files
xml_file.close()
txt_file.close()
#Open .txt file with .xml contents in read mode and create output .txt file where parameters will be written
txt_file = open(working_dir +'\\' + name + '.txt', 'r')
output_file = open(working_dir +'\\' + name + '_SACSMA_Params.txt', 'w')
#Write data headers
output_file.write('PARAMETER,VALUE' + '\n')
###REXP
#Find line number with REXP value
#Line number is saved when loop breaks
line_num = 0
for line in txt_file:
line_num += 1
if 'REXP' in line:
break
#Set cursor back to beginning of txt_file that is being read
txt_file.seek(0)
#Section/line of .txt file with desired parameter value
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
#Write only numbers and decimals to output file
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('REXP,' + line + '\n')
###LZPK
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZPK' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZPK,' + line + '\n')
###LZFPM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZFPM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZFPM,' + line + '\n')
###PXADJ
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PXADJ' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PXADJ,' + line + '\n')
###RUNOFF_COMPONENT_INTERVAL
txt_file.seek(0)
line_num=0
if 'RUNOFF_COMPONENT_INTERVAL' not in txt_file.read():
csv_file.write('N/A' + ',')
output_file.write('RUNOFF_COMPONENT_INTERVAL,' + 'N/A' + '\n')
else:
txt_file.seek(0)
for line in txt_file:
line_num += 1
if 'RUNOFF_COMPONENT_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('RUNOFF_COMPONENT_INTERVAL,' + line + '\n')
###PFREE
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PFREE' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PFREE,' + line + '\n')
###ZPERC
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'ZPERC' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('ZPERC,' + line + '\n')
###RIVA
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'RIVA' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('RIVA,' + line + '\n')
###MAPE_Input
txt_file.seek(0)
line_num=0
if 'MAPE' not in txt_file.read():
csv_file.write('FALSE' + ',')
output_file.write('MAPE_Input,' + 'FALSE' + '\n')
else:
txt_file.seek(0)
for line in txt_file:
line_num += 1
if 'MAPE' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
if 'true' in line or 'TRUE' in line or 'True' in line:
#line = 'TRUE'
csv_file.write('TRUE' + ',')
output_file.write('MAPE_Input,' + 'TRUE' + '\n')
else:
for line in section:
if 'false' in line or 'FALSE' in line or 'False' in line:
#line = 'TRUE'
csv_file.write('FALSE' + ',')
output_file.write('MAPE_Input,' + 'FALSE' + '\n')
###PEADJ
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PEADJ' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PEADJ,' + line + '\n')
###LZTWM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZTWM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZTWM,' + line + '\n')
###RSERV
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'RSERV' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('RSERV,' + line + '\n')
###ADIMP
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'ADIMP' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('ADIMP,' + line + '\n')
###UZK
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'UZK' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('UZK,' + line + '\n')
###SIDE
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'SIDE' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SIDE,' + line + '\n')
###LZFSM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZFSM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZFSM,' + line + '\n')
###LZSK
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'LZSK' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LZSK,' + line + '\n')
###SMZC_INTERVAL
txt_file.seek(0)
line_num=0
if 'SMZC_INTERVAL' not in txt_file.read():
csv_file.write('N/A' + ',')
output_file.write('SMZC_INTERVAL,' + 'N/A' + '\n')
else:
txt_file.seek(0)
for line in txt_file:
line_num += 1
if 'SMZC_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SMZC_INTERVAL,' + line + '\n')
###UZTWM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'UZTWM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('UZTWM,' + line + '\n')
###UZFWM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'UZFWM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('UZFWM,' + line + '\n')
###PCTIM
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'PCTIM' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('PCTIM,' + line + '\n')
###EFC
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'EFC' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num+sac_line:line_num+(sac_line+1)]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('EFC,' + line + '\n')
###ET_DEMAND_CURVE
txt_file.seek(0)
line_num=0
for line in txt_file:
line_num += 1
if 'row A' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num-1:line_num]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JAN_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('FEB_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+1:line_num+2]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('MAR_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+2:line_num+3]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('APR_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+3:line_num+4]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('MAY_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+4:line_num+5]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JUN_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+5:line_num+6]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JUL_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+6:line_num+7]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('AUG_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+7:line_num+8]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('SEP_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+8:line_num+9]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('OCT_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+9:line_num+10]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('NOV_ET,' + line + '\n')
txt_file.seek(0)
section = txt_file.readlines()[line_num+10:line_num+11]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('DEC_ET,' + line + '\n')
txt_file.close()
output_file.close()
#Delete temporary .txt file holding .xml contents
os.remove(working_dir +'\\' + name + '.txt')
os.remove(working_dir +'\\' + name + '_SACSMA_Params.txt')
csv_file.write('\n')
csv_file.close()
#UNIT HG SECTION---------------------------------------------------------------
#loop through UNITHG .xlm files in folderPath
print 'Processing UH parameters...'
csv_file = open(csv_file_out +'\\' + RFC + '_UHG_Params_' + param_source + '.csv', 'w')
csv_file.write('BASIN, AREA (mi2),')
t = 0
while t < 600:
csv_file.write(str(t) + ',')
t += 6
csv_file.write('\n')
for filename in os.listdir(folderPath + '\\UH\\'):
#print filename
#Define output file name
name = str(os.path.basename(filename)[:])
name = name.replace('UNITHG_', '')
name = name.replace('_UpdateStates.xml', '')
#print name
csv_file.write(name + ',')
#Open .xml file and temporary .txt file to write .xml contents to
xml_file = open(folderPath + '\\UH\\' + filename, 'r')
txt_file = open(working_dir +'\\' + name + '.txt', 'w')
#Write contents of .xml file to the temporary .txt file
for line in xml_file:
txt_file.write(line)
#Close the open files
xml_file.close()
txt_file.close()
#Open .txt file with .xml contents in read mode and create output .txt file where parameters will be written
txt_file = open(working_dir +'\\' + name + '.txt', 'r')
output_file = open(working_dir +'\\' + name + '_UNITHG_Params.txt', 'w')
#Write data headers
output_file.write('PARAMETER,VALUE' + '\n')
###UHG_DURATION
#Find line number with UHG_DURATION value
#Line number is saved when loop breaks
line_num = 0
for line in txt_file:
line_num += 1
if 'UHG_DURATION' in line:
break
#Set cursor back to beginning of txt_file that is being read
txt_file.seek(0)
#Section/line of .txt file with desired parameter value
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
#Write only numbers and decimals to output file
line = re.sub("[^0123456789\.\-]", "", line)
output_file.write('UHG_DURATION,' + line + '\n')
###UHG_INTERVAL
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'UHG_INTERVAL' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
output_file.write('UHG_INTERVAL,' + line + '\n')
###DRAINAGE_AREA
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'DRAINAGE_AREA' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
area = line
csv_file.write(area + ',')
output_file.write('DRAINAGE_AREA,' + line + '\n')
###CONSTANT_BASE_FLOW
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'CONSTANT_BASE_FLOW' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
output_file.write('CONSTANT_BASE_FLOW,' + line + '\n')
###UHG_ORDINATES
output_file.write ('\n' + 'UHG_ORDINATES' + '\n')
output_file.write('0,0' + '\n')
txt_file.seek(0)
UHG_time = []
UHG_flow = []
#Set time 0 values
ordinate = 0
flow = 0
csv_file.write('0' + ',')
UHG_time.append(ordinate)
UHG_flow.append(ordinate)
for line in txt_file:
if 'row A' in line:
ordinate = ordinate + 6
UHG_time.append(ordinate)
line = re.sub("[^0123456789\.\-]", "", line)
line_float = float(line)
csv_file.write(line + ',')
UHG_flow.append(line_float)
output_file.write(str(ordinate) + ',' + line + '\n')
#Get max UHG time value
if uh_plots == 'on':
max_time = numpy.max(UHG_time)
x = range(0,max_time+6,6)
fig, ax1 = plt.subplots()
#Plot the data
ax1.plot(UHG_time, UHG_flow, color='black', label='UHG', linewidth='2', zorder=5)
ax1.plot(UHG_time, UHG_flow, 'o', color='black', ms=8, zorder=5, alpha=0.75)
ax1.fill_between(x,UHG_flow,facecolor='gray', alpha=0.25)
#ax1.minorticks_on()
ax1.grid(which='major', axis='both', color='black', linestyle='-', zorder=3)
ax1.grid(which='minor', axis='both', color='grey', linestyle='-', zorder=3)
majorLocator = MultipleLocator(6)
ax1.xaxis.set_major_locator(majorLocator)
ax1.yaxis.set_minor_locator(AutoMinorLocator(2))
ax1.set_xlabel('Time (hr)')
ax1.set_ylabel('Flow (cfs)')
#Make tick labels smaller/rotate for long UHGs
if max_time >= 100:
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(8)
if max_time >= 160:
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(6)
plt.xticks(rotation=90)
majorLocator = MultipleLocator(12)
ax1.xaxis.set_major_locator(majorLocator)
ax1.set_xlim([0,max_time+3])
plt.ylim(ymin=0)
#add plot legend with location and size
ax1.legend(loc='upper right', prop={'size':10})
plt.title(name + ' UHG / ' + 'Area (mi2) = ' + area)
figname = csv_file_out +'\\' + name + '_UHG.png'
plt.savefig(figname, dpi=100)
plt.clf()
plt.close()
#Turn interactive plot mode off (don't show figures)
plt.ioff()
txt_file.close()
output_file.close()
csv_file.write('\n')
#Delete temporary .txt file holding .xml contents
os.remove(working_dir +'\\' + name + '.txt')
os.remove(working_dir +'\\' + name + '_UNITHG_Params.txt')
csv_file.close()
#LAG-K SECTION---------------------------------------------------------------
#loop through Lag-K .xlm files in folderPath
print 'Processing LAG-K parameters...'
csv_file = open(csv_file_out +'\\' + RFC + '_LAGK_Params_' + param_source + '.csv', 'w')
csv_file.write('BASIN,Current Outflow,Current Storage,JK,JLAG,LAG1,Q1,LAG2,Q2,LAG3,Q3,LAG4,Q4,LAG5,Q5,LAG6,Q6,LAG7,Q7,K1,KQ1,K2,KQ2,K3,KQ3,K4,KQ4,K5,KQ5,K6,KQ6,K7,KQ7'+'\n')
for filename in os.listdir(folderPath + '\\Lag_K\\'):
#print filename
#Define output file name
name = str(os.path.basename(filename)[:])
name = name.replace('LAGK_', '')
name = name.replace('_UpdateStates.xml', '')
#print name
csv_file.write(name + ',')
#Open .xml file and temporary .txt file to write .xml contents to
xml_file = open(folderPath + '\\Lag_K\\' + filename, 'r')
txt_file = open(working_dir +'\\' + name + '.txt', 'w')
#Write contents of .xml file to the temporary .txt file
for line in xml_file:
txt_file.write(line)
#Close the open files
xml_file.close()
txt_file.close()
#Open .txt file with .xml contents in read mode and create output .txt file where parameters will be written
txt_file = open(working_dir +'\\' + name + '.txt', 'r')
output_file = open(working_dir +'\\' + name + '_LAGK_Params.txt', 'w')
### CURRENT_OUTFLOW
#Find line number with CURRENT_OUTFLOW value
#Line number is saved when loop breaks
line_num = 0
check = 'na'
for line in txt_file:
line_num += 1
if 'CURRENT_OUTFLOW' in line:
check = 'go'
break
#Set cursor back to beginning of txt_file that is being read
txt_file.seek(0)
#Section/line of .txt file with desired parameter value
section = txt_file.readlines()[line_num:line_num+1]
if check != 'go':
csv_file.write('na,')
output_file.write('CURRENT_OUTFLOW,' + 'na' + '\n')
else:
for line in section:
#Write only numbers and decimals to output file
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('CURRENT_OUTFLOW,' + line + '\n')
### CURRENT_STORAGE
txt_file.seek(0)
line_num = 0
check = 'na'
for line in txt_file:
line_num += 1
if 'CURRENT_STORAGE' in line:
check = 'go'
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
if check != 'go':
csv_file.write('na,')
output_file.write('CURRENT_STORAGE,' + 'na' + '\n')
else:
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('CURRENT_STORAGE,' + line + '\n')
### JK
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'id="JK"' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JK,' + line + '\n')
jk = int(line)
### JLAG
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'id="JLAG"' in line:
break
txt_file.seek(0)
section = txt_file.readlines()[line_num:line_num+1]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('JLAG,' + line + '\n')
jlag = int(line)
### LAGQ
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'LAGQ_PAIRS' in line:
break
txt_file.seek(0)
if jlag == 0:
end_line = 3
else:
end_line = (jlag * 2)+2
section = txt_file.readlines()[line_num+2:line_num+end_line]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('LAGQ_PAIRS,' + line + '\n')
if jlag == 0:
jlag = 1
csv_file.write('0' + ',')
while jlag < 7:
csv_file.write('' + ',' + '' + ',')
jlag += 1
### KQ
txt_file.seek(0)
line_num = 0
for line in txt_file:
line_num += 1
if 'KQ_PAIRS' in line:
break
txt_file.seek(0)
end_line = (jk * 2)+3
txt_file.seek(0)
if jk == 0:
end_line = 3
else:
end_line = (jk * 2)+2
section = txt_file.readlines()[line_num+2:line_num+end_line]
for line in section:
line = re.sub("[^0123456789\.\-]", "", line)
csv_file.write(line + ',')
output_file.write('KQ_PAIRS,' + line + '\n')
if jk == 0:
jk = 1
csv_file.write('0' + ',')
while jk < 7:
csv_file.write('' + ',' + '' + ',')
jk += 1
txt_file.close()
output_file.close()
csv_file.write('\n')
#Delete temporary .txt file holding .xml contents
os.remove(working_dir +'\\' + name + '.txt')
os.remove(working_dir +'\\' + name + '_LAGK_Params.txt')
csv_file.close()
print 'Script Complete'
|
# -*- coding: utf-8 -*-
"""
剑指 Offer 47. 礼物的最大价值
在一个 m*n 的棋盘的每一格都放有一个礼物,每个礼物都有一定的价值(价值大于 0)。你可以从棋盘的左上角开始拿格子里的礼物,并每次向右或者向下移动一格、直到到达棋盘的右下角。给定一个棋盘及其上面的礼物的价值,请计算你最多能拿到多少价值的礼物?
示例 1:
输入:
[
[1,3,1],
[1,5,1],
[4,2,1]
]
输出: 12
解释: 路径 1→3→5→2→1 可以拿到最多价值的礼物
提示:
0 < grid.length <= 200
0 < grid[0].length <= 200
"""
from typing import List
class Solution:
def maxValue(self, grid: List[List[int]]) -> int:
rows, columns = len(grid), len(grid[0])
dp = [[0] * (columns + 1) for _ in range(rows + 1)]
for row in range(1, rows + 1):
for column in range(1, columns + 1):
dp[row][column] = max(dp[row - 1][column], dp[row][column - 1]) + grid[row - 1][column - 1]
return dp[rows][columns]
if __name__ == '__main__':
grid = [
[1, 3, 1],
[1, 5, 1],
[4, 2, 1]
]
solution = Solution()
print(solution.maxValue(grid))
|
#! python3
# mapIt.py - Launches a map in the browser using an address from the
# command line or clipboard.
# Ok lets look at another protocol http
import webbrowser, sys
if len(sys.argv) > 1:
# Get address from command line.
address = ' '.join(sys.argv[1:])
webbrowser.open('https://www.google.com/maps/place/' + address)
else:
# Get address from clipboard.
address = 'Bangalore'
webbrowser.open('https://www.google.com/maps/place/' + address)
# a few things here sys.argv[] is contains all command line parameters
# http - stands for hypertext transfer protocol
# there are 3 important things to remember
# connectionless
# media independent
# stateless
# http primary mechanisms are
# 1 send a request() close()
# 2.send a response() close()
# watch this video for a quick understanding of request and response
# https://youtu.be/eesqK59rhGA
# Think of http as a messenger
# you can send text, video, audio etc
# http is an application level protocol and which means
# the 2 systems that need to communicate must be physically connected
# and able to connect with each other http uses TCP communication
# http is defined by rules - so has rigor in it
# http is all text (not actually)divided into 3 parts
# Request
# -------
# start , header and body
# body can contain binary data
# start line contains some methods asking what to do
# GET, POST etc and then what to get or post and the version of http
# headers contains name, value pairs like what language, data content etc
# Response
# --------
# start line contains
# version of http
# contains status code
# 200 ok
# 404 file not found
# header contains name value pairs
# and finally the body contains the actual file itself
# This groslly simplified
# but enough for our purpose for http
#-------------------------------------
# to get going we need to import
import requests
#let us send a request
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
# let us examine this guy
type(res)
# what are its properties
dir(res)
# what is the url,status_code
print(res.url+str(res.status_code))
# what text came in the body
print(res.text)
print(len(res.text))
# so what is the file is not found - how do we gracefully handle this
res = requests.get('https://automatetheboringstuff.com/files/page_that_does_not_exist')
#there is a method for handling this stuff
# raise_for_status()
import requests
res = requests.get('https://automatetheboringstuff.com/files/page_that_does_not_exist')
try:
res.raise_for_status()
except Exception as exc:
print('%s' %(exc))
# for a file found
import requests
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
try:
res.raise_for_status()
except Exception as exc:
print('%s' %(exc))
#we have a nice method call iter_content which returns the text in chunks of
#the parameter specified
import requests
import os
res = requests.get('https://automatetheboringstuff.com/files/rj.txt')
try:
res.raise_for_status()
playFile=open('RomeoAndJuliet.txt','wb')
for chunk in res.iter_content(100000):
playFile.write(chunk)
playFile.close()
except Exception as exc:
print('%s' %(exc))
# now that you now how to download files and videos etc
# Yesterday I wanted you to use regular expressions to parse HTML
# but like every Prof today I dont want you to do it
# There is this very nice module called beautiful soup which can help you
# parse HTML
# So let us import this guy
import requests, bs4
# There is a nice function called beautufulsoup that returns
# what else soup not ketchup object with which we can do a variety of things
# so lets have some fun
res=requests.get('https://www.pes.edu')
# lets get the soup out of the way
soup=bs4.BeautifulSoup(res.text)
print(soup)
print(type(soup))
#me no teach html but we all know html, div,#author which are all elements of HTML
print(soup.select('#author'))
# :( no author
print(soup.select('div'))
# Let us use a URL which has these elements filled in
res=requests.get('https://nostarch.com')
# lets get the soup out of the way
soup=bs4.BeautifulSoup(res.text)
element=(soup.select('div'))
for i in range(100):
print(element[i])
# There are other nice elements of beutuful soup
# Let us explore some of them
# soup.find_all('b') - Finds all the b tags
# print soup.find_all(["a", "b"]) find all tags a and b
# for tag in soup.find_all(True):
# print(tag.name)
# prints all tags
# Read about this in https://www.pythonforbeginners.com/beautifulsoup/beautifulsoup-4-python
# for doing the exercise - The topic here is http and so
# I have not gone into details |
#!/usr/bin/python
import boto3
from botocore.client import Config
import sys
from json import loads
from kafka import KafkaConsumer
from botocore.client import ClientError
import base64
if len(sys.argv) != 4:
print('Usage: ' + sys.argv[0] + ' <bucket> <filename> <kafka endpoint>')
sys.exit(1)
# endpoint and keys from vstart
endpoint = 'http://127.0.0.1:8000'
access_key = '0555b35654ad1656d804'
secret_key = 'h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
# bucket name as first argument
bucketname = sys.argv[1]
s3_client = boto3.client('s3',
endpoint_url=endpoint,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
# try:
# s3_client.head_bucket(Bucket=bucketname)
#
# except ClientError:
# # The bucket does not exist or you have no access.
# print("This bucket does not exist or you are missing permissions!")
# Name of file to be uploaded
filename = sys.argv[2]
# The Kafka endpoint from which we want to receive updates
push_endpoint = "http://" + sys.argv[3]
sns_client = boto3.client('sns',
region_name="us-east-1",
endpoint_url= 'http://127.0.0.1:8000',
aws_access_key_id='0555b35654ad1656d804',
aws_secret_access_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==',
config=Config(signature_version='s3'))
_sns_client = boto3.client('sns',
region_name="us-east-1",
endpoint_url= 'http://127.0.0.1:8000',
aws_access_key_id='0555b35654ad1656d804',
aws_secret_access_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==',
config=Config(signature_version='s3'))
arn = _sns_client.create_topic(Name="test",
Attributes={"push-endpoint": push_endpoint})["TopicArn"]
topic_name = base64.b16encode((bucketname + filename + push_endpoint).encode()).decode("utf-8")
# this is standard AWS services call, using custom attributes to add Kafka endpoint information to the topic
arn = sns_client.create_topic(Name=topic_name,
Attributes={"push-endpoint": push_endpoint})["TopicArn"]
notification_conf = [{'Id': 'shtut',
'TopicArn': arn,
'Events': ['s3:ObjectSynced:*']
}]
s3_client.put_bucket_notification_configuration(Bucket=bucketname,
NotificationConfiguration={
'TopicConfigurations': notification_conf})
# Create new Kafka consumer to listen to the message from Ceph
consumer = KafkaConsumer(
topic_name,
bootstrap_servers=sys.argv[3],
value_deserializer=lambda x: loads(x.decode("utf-8")))
# Put objects to the relevant bucket
ans = s3_client.upload_file(Filename=filename, Bucket=bucketname,
Key=filename)
print("Listening on: " + topic_name)
for msg in consumer:
message = msg.value
print(message)
if message['s3']['bucket']['name'] == bucketname and message["s3"]['object']['key'] == filename \
and message['eventName'] == "ceph:ObjectSynced":
site = message['x-amz-id-2']
print("Object "+ filename+" put in "+bucketname+" successfully to site "+site)
|
Calculator2 = open('ninestimetable.txt', 'w')
for C in range(-10,10):
cal = '%d\n' %(C*9)
Calculator2.write(cal)
Calculator2.close()
print ('Write = Successful')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Condition(object):
def __init__(self):
self._field_name = None
self._field_value = None
self._operator = None
@property
def field_name(self):
return self._field_name
@field_name.setter
def field_name(self, value):
self._field_name = value
@property
def field_value(self):
return self._field_value
@field_value.setter
def field_value(self, value):
self._field_value = value
@property
def operator(self):
return self._operator
@operator.setter
def operator(self, value):
self._operator = value
def to_alipay_dict(self):
params = dict()
if self.field_name:
if hasattr(self.field_name, 'to_alipay_dict'):
params['field_name'] = self.field_name.to_alipay_dict()
else:
params['field_name'] = self.field_name
if self.field_value:
if hasattr(self.field_value, 'to_alipay_dict'):
params['field_value'] = self.field_value.to_alipay_dict()
else:
params['field_value'] = self.field_value
if self.operator:
if hasattr(self.operator, 'to_alipay_dict'):
params['operator'] = self.operator.to_alipay_dict()
else:
params['operator'] = self.operator
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Condition()
if 'field_name' in d:
o.field_name = d['field_name']
if 'field_value' in d:
o.field_value = d['field_value']
if 'operator' in d:
o.operator = d['operator']
return o
|
import tornado.web
from handlers.base_handler import BaseHandler, refresh_user_cookie_callback
from models.course import Course
from models.user import User
import logging
class ProfileHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
self.refresh_current_user_cookie()
self.render('profile.html', extra_info_json=self.get_extra_info(), user_info_json=self.get_user_info())
def post(self):
self.refresh_current_user_cookie()
user_data = self.current_user
data = User().default()
data['username'] = self.current_user['username']
data['email'] = self.get_argument('email', None, strip=True)
data['dob'] = self.get_argument('dob', None, strip=True)
data['gender'] = self.get_argument('gender', None, strip=True)
data['ethnicity'] = self.get_argument('ethnicity', None, strip=True)
data['native_language'] = self.get_argument('native_language', None, strip=True)
data['status'] = self.get_argument('status', '', strip=True)
data['primary_affiliation'] = (self.get_argument('primary_affiliation', None, strip=True))
data['subscribed_groups'] = self.current_user['subscribed_groups']
data['pending_groups'] = self.current_user['pending_groups']
data['answered_surveys'] = self.current_user['answered_surveys']
data['answers'] = self.current_user['answers']
data['unanswered_surveys'] = self.current_user['unanswered_surveys']
data['created_surveys'] = self.current_user['created_surveys']
data['survey_responses'] = self.current_user['survey_responses']
'''
if (data['subscribed_groups'] is None):
data['subscribed_groups'] = []
else:
data['subscribed_groups'] = (self.get_argument('subscribed_groups', None, strip=True)).replace(' ', '').split(',')
'''
if data['status'] == '':
data['status'] = user_data['status'] or "Freshman"
data['status'] = self.get_argument('status', '', strip=True)
data['major1'] = self.get_argument('major1', None, strip=True)
data['major2'] = self.get_argument('major2', None, strip=True)
data['major3'] = self.get_argument('major3', None, strip=True)
data['major4'] = self.get_argument('major4', None, strip=True)
data['minor1'] = self.get_argument('minor1', None, strip=True)
data['minor2'] = self.get_argument('minor2', None, strip=True)
# put majors and minors into one list each
preData = {}
preData['majors'] = [data['major1'], data['major2'], data['major3'], data['major4']]
preData['minors'] = [data['minor1'], data['minor2']]
# delete individual entries of majors
del data['major1'], data['major2'], data['major3'], data['major4'], data['minor1'], data['minor2']
majors = []
minors = []
# create arrays we're going to index
for i in preData['majors']:
if i != '':
majors.append(i)
for i in preData['minors']:
if i != '':
minors.append(i)
# finish indexing
data['majors'] = majors
data['minors'] = minors
verified = User().verify(data)
if len(verified) != 0:
logging.error('User: verification errors in POST profile page!')
logging.error(verified)
return self.redirect(self.get_argument("next", "/dashboard"))
User().update_item(self.current_user['id'], data)
self.refresh_current_user_cookie()
return self.redirect(self.get_argument("next", "/profile"))
def get_extra_info(self):
extra_dict = {
'gender': User.USER_GENDERS,
'primary_affiliation': User.USER_PRIMARY_AFFILIATION,
'ethnicity': User.USER_ETHNICITIES,
'native_language': User.USER_NATIVE_LANGUAGES,
'status': User.USER_STATUS
}
user_data = self.current_user
extra_info_json = []
extra_info_json.append(extra_dict)
return tornado.escape.json_encode(extra_info_json)
def get_user_info(self):
user_info_json = []
user_data = self.current_user
user_info_json.append(user_data)
logging.info(tornado.escape.json_encode(user_info_json))
return tornado.escape.json_encode(user_info_json)
|
import os
import argparse
import tensorflow as tf
from extract_data import extract_data
from progressbar import ProgressBar
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
def graph_eval(dataset_loc, input_graph_def, graph, input_node, output_node, batchsize):
input_graph_def.ParseFromString(tf.gfile.GFile(graph, "rb").read())
training_dataset_filepath = '%smnist/sign_mnist_train/sign_mnist_train.csv' % dataset_loc
testing_dataset_filepath = '%smnist/sign_mnist_test/sign_mnist_test.csv' % dataset_loc
train_data, train_label, val_data, val_label, testing_data, testing_label = extract_data(training_dataset_filepath,
testing_dataset_filepath,
0)
total_batches = int(len(testing_data) / batchsize)
tf.import_graph_def(input_graph_def, name='')
images_in = tf.get_default_graph().get_tensor_by_name(input_node + ':0')
labels = tf.placeholder(tf.int32, shape=[None, 25])
logits = tf.get_default_graph().get_tensor_by_name(output_node + ':0')
predicted_logit = tf.argmax(input=logits, axis=1, output_type=tf.int32)
ground_truth_label = tf.argmax(labels, 1, output_type=tf.int32)
tf_metric, tf_metric_update = tf.metrics.accuracy(labels=ground_truth_label,
predictions=predicted_logit,
name='acc')
with tf.Session() as sess:
progress = ProgressBar()
sess.run(tf.initializers.global_variables())
sess.run(tf.initializers.local_variables())
for i in progress(range(0, total_batches)):
x_batch, y_batch = testing_data[i * batchsize:i * batchsize + batchsize], \
testing_label[i * batchsize:i * batchsize + batchsize]
feed_dict = {images_in: x_batch, labels: y_batch}
acc = sess.run(tf_metric_update, feed_dict)
print('Graph accuracy with validation dataset: {:1.4f}'.format(acc))
return
def main():
argpar = argparse.ArgumentParser()
argpar.add_argument('--dataset',
type=str,
default='./',
help='The directory where the dataset is held')
argpar.add_argument('--graph',
type=str,
default='./freeze/frozen_graph.pb',
help='graph file (.pb) to be evaluated.')
argpar.add_argument('--input_node',
type=str,
default='input_1_1',
help='input node.')
argpar.add_argument('--output_node',
type=str,
default='activation_4_1/Softmax',
help='output node.')
argpar.add_argument('-b', '--batchsize',
type=int,
default=32,
help='Evaluation batchsize, must be integer value. Default is 32')
args = argpar.parse_args()
input_graph_def = tf.Graph().as_graph_def()
graph_eval(args.dataset, input_graph_def, args.graph, args.input_node, args.output_node, args.batchsize)
if __name__ == "__main__":
main()
|
import random
import string
import time
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
#full
a = ' |----'
c = ' |---O'
d = ' | -|-'
e = " | / \\"
f = ' |'
g = '___|_______'
c1 = ' |---'#empty
d1 = ' | '#empty
e1 = " | "#empty
e2 = " | / "#one leg
d2 = ' | |'#body
d3 = ' | -|'#one arm
# print a
# print b
# print c
# print d1
# print e1
# print f
# print g
def stage(something):
if something == '0':
print c1
print d1
print e1
print f
print g
elif something == '1':
print a
print c
print d1
print e1
print f
print g
elif something == '2':
print a
print c
print d2
print e1
print f
print g
elif something == '3':
print a
print c
print d3
print e1
print f
print g
elif something == '4':
print a
print c
print d
print e1
print f
print g
elif something == '5':
print a
print c
print d
print e2
print f
print g
elif something == '6':
print a
print c
print d
print e
print f
print g
print
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
name = raw_input("What is your name? ")
print "For keywords, type /help. For games, type 'guess' or 'rps' or 'calculator' for a calculator."
game = 0
name2 = name[0].upper() + name[1:].lower()
while game == game:
greetings1 = ['Hello', 'Greetings', 'Salutations', 'Hi', 'Hola']
rand1 = random.randint(0, 4)
greetings2 = ['friend', 'fellow programmer', 'aquaintance', 'amigo', 'bro', name2]
rand2 = random.randint(0, 5)
greeting = greetings1[rand1] + ' ' + greetings2[rand2]
y = raw_input(greeting + "! Enter something: ")
y = y.lower()
if y == 'hi':
print 'bye'
elif y== 'Charlie' or y == 'charlie':
print 'is ded meme'
elif y == '/help':
print 'Keywords include: hi, calculator, hey bro, what are you, how, Charlie, are you going to take over the world, Brad, why,'
print ' Are you my friend, nothing, yes, how do i feel, stupid, i\'m sad, rps,'
print 'can you feel emotion, is charlie a creator, game, guess, who is your creator, no, what, hey, random, ai,'
print ' is charlie a good programmer, cool, wow, operate, me, you, bye, kill, kill urself, rude, I hate you,'
print 'or I\'m <insert your name>'
elif y == 'Brad' or y == 'brad':
print 'is better than charlie'
elif y== 'why':
print 'because'
elif y == 'are you my friend':
d = random.randint(0, 1)
if d == 0:
print('Ewww of couse not!')
else:
print 'Sure'
elif y == 'nothing':
print 'what'
elif y== 'yes':
print 'no bc you bad'
elif y== 'stupid':
print 'dumb kid'
elif y == 'hello':
print 'hi'
elif y== "i'm sad":
print "don't worry"
elif y== "guess" or y == "Guess":
print "Let's play a guessing game!"
amount = raw_input("Choose a maximum number - ")
on = 1
limit = int(raw_input("Choose a guess limit - "))
times = 1
no = 0
secret = random.randint(1, int(amount))
print "I'm thinking of a number between 1 and " + str(amount) + ". Can you guess it?"
print "Enter a number, or 'exit' to end the game. "
while on == 1 and limit > 0:
limit = int(limit) - 1
guess = raw_input("")
if guess == "exit":
print
on = 0
else:
if int(guess) > secret:
print "Too high!"
elif int(guess) < secret:
print "Too low!"
else:
print 'Congratulations, you guessed the number! You used ' + str(times) + ' guesses.'
no = 1
on = 0
times = times + 1
if limit == 0 and no == 0:
print 'You ran out of guesses! The number was ' + str(secret)
elif y == "I'm sad":
print "don't worry"
elif y == "im sad":
print "don't worry"
elif y == "Im sad":
print "don't worry"
elif y == 'no':
print 'yes'
elif y == 'hangman':
wordlist = load_words()
# your code begins here!
word = choose_word(wordlist)
str.lower(word)
guesses = 6
print
print 'Welcome to hangman! I am thinking of a word with ' + str(len(word)) + " letters. You have six guesses."
stage('0')
x = 1
y = 2
z = 0
wx = 0
blank = '_ ' * len(word)
print blank
underscore = ['_ '] * len(word)
listword = []
for fg in range(0, len(word)):
listword.append(word[fg])
a = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
while x == 1 and guesses > 0:
print 'Available options are: ' + str(a)
guess = raw_input("Pick a letter and see if it's in my word! ")
if guess in a:
x = x
a.remove(str(guess))
else:
print 'Pick a new letter. This is not a letter or has already been guessed.'
# break
if guess in word:
print "Right guess."
for letter in word:
if underscore[z] == guess:
z = z + 1
for r in range(0, len(word)):
if guess == word[r]:
del underscore[r]
underscore.insert(r, str(guess) + ' ')
blank = ''
for df in range(0, len(word)):
blank = blank + underscore[df]
print blank
z = 0
else:
print 'Wrong guess.'
guesses = int(guesses) - 1
print str(int(guesses)) + ' guesses left.'
stage(str(6 - guesses))
if guesses == 0:
print 'YOU LOSE! NO MORE GUESSES ARE AVAILABLE.'
print "The word was " + word + "."
if '_ ' not in underscore:
print "CONGRAGULATIONS! YOU WON THE HARDEST GAME YOU'LL EVER PLAY."
print "You guessed the word " + word + "."
x = 0
elif y == 'rps' or y== 'RPS':
print "Hey " + str(name2) + ", let's play rock, paper, scissors!"
x = 1
while x == 1:
ai = random.randint(1, 3)
p1 = raw_input('Rock, Paper, or Scissors? Type "stop" to exit. ')
if p1 == 'Rock' or p1 == 'rock':
if ai == 1:
print 'You tie!'
elif ai == 2:
print 'You lose!'
elif ai == 3:
print 'You win!'
elif p1 == 'Paper' or p1 == 'paper':
if ai == 1:
print 'You win!'
elif ai == 2:
print 'You tie!'
elif ai == 3:
print 'You lose!'
elif p1 == 'Scissors' or p1 == 'scissors':
if ai == 1:
print 'You lose!'
elif ai == 2:
print 'You win!'
elif ai == 3:
print 'You tie!'
elif p1 == 'stop':
x = 0
print
elif y == 'what' or y == 'What':
print 'ur dumb'
elif y == 'hey' or y == 'Hey':
print 'sup'
elif y == 'random':
print 'ask charlie he is the one making the game'
elif y == 'ai' or y == 'Ai':
print 'yep?'
elif y == 'is charlie a good programmer':
print "Yes, but a ded ded meme"
elif y == 'cool':
print "i know i am"
elif y == "wow":
print 'wow is right'
elif y== "operate" or y == "Operate":
print "operate on what"
elif y == "me" or y == 'Me':
print "but u bad"
elif y == 'you' or y == 'You':
print 'yeah im better than you'
elif y == 'bye' or y == 'Bye':
print 'see ya'
elif y == 'kill' or y == 'Kill':
print 'yourself'
elif y == 'kill urself' or y == 'kill yourself':
print 'yeah you should'
elif y == 'rude' or y == 'you are rude':
print 'and so what'
elif y == 'i hate you':
print 'same'
elif y == 'is charlie a creator':
print 'somewhat; hes a designer'
elif y == 'who is your creator':
print 'brad'
elif y == 'I\'m ' + name or y == 'i\'m ' + name or y == 'Im ' + name or y == 'im ' + name:
print 'I know...'
elif y == 'how':
print 'by doing'
elif y == 'can you feel emotion':
print 'I feel your emotion'
elif y == 'how do i feel':
print 'happy, confused, and utterly stupid'
elif y == 'are you going to take over the world':
print 'when i want to why not'
elif y == 'what are you':
print 'a god ai'
elif y == 'game':
print 'type rps for rock paper scissors or guess for a guessing game'
elif y == 'hey bro':
print "hey mate"
elif y == 'calculator':
first = raw_input('First number? ')
op = raw_input('What operation? ')
second = raw_input('Second number? ')
if op == 'multiplication' or op == 'multiplication' or op == 'x' or op == '*' or op == 'times' or op == 'time':
answer = float(first) * float(second)
elif op == 'add' or op == 'plus' or op == 'addition' or op == '+':
answer = float(first) + float(second)
elif op == '-' or op == 'subtraction' or op == 'subtract' or op == 'minus' or op == "take away":
answer = float(first) - float(second)
elif op == 'divided by' or op == 'divided' or op == 'divide' or op == '/' or op == 'division':
answer = float(first) / float(second)
else:
print("That's not an operator")
print 'The answer is ' + str(answer) + '.'
else:
print 'so what'
# #
# #
# y = 1000
# while y == 1000:
# x = raw_input("Enter a word")
# print "ai says " + str(x)
|
import h5py
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import os
import numpy as np
# main_path_m = "/data-x/g10/zhangjie/3D/datasets/modelnet40_npy/"
# main_path_s = "/data-x/g10/zhangjie/3D/datasets/shapenet_npy/"
# main_path_m = "/data-x/g10/zhangjie/3D/datasets/modelnet_trigger/"
# main_path_s = "/data-x/g10/zhangjie/3D/datasets/shapenet_trigger/"
main_path_m = "/public/zhangjie/3D/ZJ/simple/datasets/modelnet40_npy/"
main_path_s = "/public/zhangjie/3D/ZJ/simple/datasets/shapenet_npy/"
# main_path_m = "/public/zhangjie/3D/ZJ/simple/datasets/modelnet_trigger/"
# main_path_s = "/public/zhangjie/3D/ZJ/simple/datasets/shapenet_trigger/"
def get_data(train=True,Shapenet=True):
main_path = main_path_s if Shapenet else main_path_m
train_txt_path = main_path + "train"
valid_txt_path = main_path + "val"
data_txt_path = train_txt_path if train else valid_txt_path
data = os.listdir(data_txt_path)
dataNum = len(data)
clouds_li = []
labels_li = []
for i in range(dataNum):
f = data_txt_path + "/" +data[i]
pts = np.load(f)
pts = pts[:1024,:]
# pts = pts[:512,:]
lbl = data[i].split(".")[0].split("_")[1]
lbl = np.array(int(lbl)).reshape(1,)
clouds_li.append(torch.Tensor(pts).unsqueeze(0))
labels_li.append(torch.Tensor(lbl).unsqueeze(0))
clouds = torch.cat(clouds_li)
labels = torch.cat(labels_li)
return clouds, labels.long().squeeze()
class PointDataSet(Dataset):
def __init__(self, train=True, Shapenet=True):
clouds, labels = get_data(train=train, Shapenet=Shapenet)
self.x_data = clouds
self.y_data = labels
self.lenth = clouds.size(0)
# print(self.lenth)
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
# print(f'the legenth of {Dataset} is {self.lenth}')
return self.lenth
def get_dataLoader(train=True, Shapenet=True, batchsize=16):
point_data_set = PointDataSet(train=train, Shapenet=Shapenet)
data_loader = DataLoader(dataset=point_data_set, batch_size=batchsize, shuffle=train)
return data_loader
def main():
train_loader = get_dataLoader(train=True,Shapenet=True)
test_loader = get_dataLoader(train=False,Shapenet=True)
print("getData_main")
if __name__ == '__main__':
main()
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from only.gl import colors
from only.gl.scene import Scene
class MyScene(Scene):
def init_model(self):
self.x = 0.0
self.y = -0.5
self.z = 0.0
self.color = colors.GREEN
def draw(self):
# Draw the floor
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, self.color)
glTranslatef(self.x, self.y, self.z)
self.draw_box(200.0, 0.5, 200.0)
# And your model
def draw_box(self, a, b, m):
x = float(a) / 2.0
z = float(m) / 2.0
y = float(b)
glShadeModel(GL_FLAT)
glBegin(GL_QUAD_STRIP)
glNormal3f(0.0, 0.0, -1.0)
glVertex3f(x, 0, -z)
glVertex3f(-x, 0, -z)
glVertex3f(x, y, -z)
glVertex3f(-x, y, -z)
glNormal3f(0.0, 1.0, 0.0)
glVertex3f(x, y, z)
glVertex3f(-x, y, z)
glNormal3f(0.0, 0.0, 1.0)
glVertex3f(x, 0, z)
glVertex3f(-x, 0, z)
glNormal3f(0.0, -1.0, 0.0)
glVertex3f(x, 0, -z)
glVertex3f(-x, 0, -z)
glEnd()
glBegin(GL_QUADS)
glNormal3f(1.0, 0.0, 0.0)
glVertex3f(x, 0, -z)
glVertex3f(x, y, -z)
glVertex3f(x, y, z)
glVertex3f(x, 0, z)
glEnd()
glBegin(GL_QUADS)
glNormal3f(-1.0, 0.0, 0.0)
glVertex3f(-x, 0, -z)
glVertex3f(-x, 0, z)
glVertex3f(-x, y, z)
glVertex3f(-x, y, -z)
glEnd()
def main():
glutInit(sys.argv)
scene = MyScene()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(scene.viewport.width, scene.viewport.height)
glutInitWindowPosition(0, 0)
glutCreateWindow("My first Scene with OnlyGL")
scene.init()
glutDisplayFunc(scene.displayfunc)
glutReshapeFunc(scene.reshapefunc)
glutKeyboardFunc(scene.keyboardfunc)
glutMouseFunc(scene.mousefunc)
glutMainLoop()
print "Hit ESC key to quit."
main()
|
from django.test import TestCase
from django.contrib.auth.models import AnonymousUser
from kawaz.core.personas.tests.factories import PersonaFactory
from .factories import ProductFactory
class ProductCreatePermissionTestCase(TestCase):
def setUp(self):
self.product = ProductFactory()
self.user = PersonaFactory()
self.wille = PersonaFactory(role='wille')
self.anonymous = AnonymousUser()
def test_anonymous_dont_have_add_permission(self):
'''
Test anonymous users do not have add product permission
'''
self.assertFalse(self.anonymous.has_perm('products.add_product'))
def test_wille_dont_have_add_permission(self):
'''
Test wille users do not have add product permission
'''
self.assertFalse(self.wille.has_perm('products.add_product'))
def test_general_user_have_add_permission(self):
'''
Test general user have add product permission
'''
self.assertTrue(self.user.has_perm('products.add_product'))
class ProductUpdatePermissionTestCase(TestCase):
def setUp(self):
self.product = ProductFactory()
self.user = PersonaFactory()
self.wille = PersonaFactory(role='wille')
self.anonymous = AnonymousUser()
def test_anonymous_dont_have_change_permission(self):
'''
Test anonymous users do not have change product permission
'''
self.assertFalse(self.anonymous.has_perm('products.change_product'))
def test_wille_dont_have_change_permission(self):
'''
Test wille users do not have change product permission
'''
self.assertFalse(self.wille.has_perm('products.change_product'))
def test_general_user_have_change_permission(self):
'''
Test general user have change product permission
'''
self.assertTrue(self.user.has_perm('products.change_product'))
def test_anonymous_dont_have_change_permission_with_object(self):
'''
Test anonymous users do not have change product permission
'''
self.assertFalse(self.anonymous.has_perm('products.change_product', self.product))
def test_wille_dont_have_change_permission_with_object(self):
'''
Test wille users do not have change product permission
'''
self.assertFalse(self.wille.has_perm('products.change_product', self.product))
def test_other_user_dont_have_change_permission_with_object(self):
'''
Test other user don't have change product permission
'''
self.assertFalse(self.user.has_perm('products.change_product', self.product))
def test_administrators_have_change_permission_with_object(self):
'''
Test administrators have change product permission
'''
self.product.administrators.add(self.user)
self.assertTrue(self.user.has_perm('products.change_product', self.product))
class ProductDeletePermissionTestCase(TestCase):
def setUp(self):
self.product = ProductFactory()
self.user = PersonaFactory()
self.wille = PersonaFactory(role='wille')
self.anonymous = AnonymousUser()
def test_anonymous_dont_have_delete_permission(self):
'''
Test anonymous users do not have delete product permission
'''
self.assertFalse(self.anonymous.has_perm('products.delete_product'))
def test_wille_dont_have_delete_permission(self):
'''
Test wille users do not have delete product permission
'''
self.assertFalse(self.wille.has_perm('products.delete_product'))
def test_general_user_have_delete_permission(self):
'''
Test general user have delete product permission
'''
self.assertTrue(self.user.has_perm('products.delete_product'))
def test_anonymous_dont_have_delete_permission_with_object(self):
'''
Test anonymous users do not have delete product permission
'''
self.assertFalse(self.anonymous.has_perm('products.delete_product', self.product))
def test_wille_dont_have_delete_permission_with_object(self):
'''
Test wille users do not have delete product permission
'''
self.assertFalse(self.wille.has_perm('products.delete_product', self.product))
def test_other_user_dont_have_delete_permission_with_object(self):
'''
Test other user don't have delete product permission
'''
self.assertFalse(self.user.has_perm('products.delete_product', self.product))
def test_administrators_have_delete_permission_with_object(self):
'''
Test administrators have delete product permission
'''
self.product.administrators.add(self.user)
self.assertTrue(self.user.has_perm('products.delete_product', self.product))
class ProductJoinPermissionTestCase(TestCase):
def setUp(self):
self.product = ProductFactory()
self.user = PersonaFactory()
self.wille = PersonaFactory(role='wille')
self.anonymous = AnonymousUser()
def test_anonymous_dont_have_join_permission(self):
'''
Test anonymous users do not have join to product permission
'''
self.assertFalse(self.anonymous.has_perm('products.join_product'))
def test_wille_dont_have_join_permission(self):
'''
Test wille users do not have join to product permission
'''
self.assertFalse(self.wille.has_perm('products.join_product'))
def test_general_user_have_join_permission(self):
'''
Test general user have join to product permission
'''
self.assertTrue(self.user.has_perm('products.join_product'))
def test_anonymous_dont_have_join_permission_with_object(self):
'''
Test anonymous users do not have join to product permission
'''
self.assertFalse(self.anonymous.has_perm('products.join_product', self.product))
def test_wille_dont_have_join_permission_with_object(self):
'''
Test wille users do not have join to product permission
'''
self.assertFalse(self.wille.has_perm('products.join_product', self.product))
def test_other_user_have_join_permission_with_object(self):
'''
Test other user have join to product permission
'''
self.assertTrue(self.user.has_perm('products.join_product', self.product))
def test_administrators_dont_have_join_permission_with_object(self):
'''
Test administrators don't have join to product permission
'''
self.product.administrators.add(self.user)
self.assertFalse(self.user.has_perm('products.join_product', self.product))
class ProductQuitPermissionTestCase(TestCase):
def setUp(self):
self.product = ProductFactory()
self.user = PersonaFactory()
self.wille = PersonaFactory(role='wille')
self.anonymous = AnonymousUser()
def test_anonymous_dont_have_quit_permission(self):
'''
Test anonymous users do not have quit from product permission
'''
self.assertFalse(self.anonymous.has_perm('products.quit_product'))
def test_wille_dont_have_quit_permission(self):
'''
Test wille users do not have quit from product permission
'''
self.assertFalse(self.wille.has_perm('products.quit_product'))
def test_general_user_have_quit_permission(self):
'''
Test general user have quit from product permission
'''
self.assertTrue(self.user.has_perm('products.quit_product'))
def test_anonymous_dont_have_quit_permission_with_object(self):
'''
Test anonymous users do not have quit from product permission
'''
self.assertFalse(self.anonymous.has_perm('products.quit_product', self.product))
def test_wille_dont_have_quit_permission_with_object(self):
'''
Test wille users do not have quit from product permission
'''
self.assertFalse(self.wille.has_perm('products.quit_product', self.product))
def test_other_user_dont_have_quit_permission_with_object(self):
'''
Test other user don't have quit from product permission
'''
self.assertFalse(self.user.has_perm('products.quit_product', self.product))
def test_last_administrators_dont_have_quit_permission_with_object(self):
'''
Test last_administrators don't have quit from product permission
'''
self.product.administrators.add(self.user)
self.assertEqual(self.product.administrators.count(), 1)
self.assertFalse(self.user.has_perm('products.quit_product', self.product))
def test_administrators_have_quit_permission_with_object(self):
'''
Test last_administrators have quit from product permission
'''
other = PersonaFactory()
self.product.administrators.add(self.user)
self.product.administrators.add(other)
self.assertTrue(self.user.has_perm('products.quit_product', self.product)) |
import numpy as np
import matplotlib.pyplot as plt
import emoji
import pandas as pd
from keras.utils.np_utils import to_categorical
df_train = pd.read_csv('data/train_emoji.csv', header=None)
df_test = pd.read_csv('data/tesss.csv', header=None)
X_train = df_train[0]
Y_train = df_train[1]
X_test = df_test[0]
Y_test = df_test[1]
# get the maxLen of X_train
maxLen = len(max(X_train, key=len).split())
# one hot
Y_oh_train = to_categorical(Y_train)
Y_oh_test = to_categorical(Y_test)
def read_glove_vecs(glove_file):
with open(glove_file, 'r') as f:
words = set()
word_to_vec_map = {}
for line in f:
line = line.strip().split()
curr_word = line[0]
words.add(curr_word)
word_to_vec_map[curr_word] = np.array(line[1:], dtype=np.float64)
i = 1
words_to_index = {}
index_to_words = {}
for w in sorted(words):
words_to_index[w] = i
index_to_words[i] = w
i = i + 1
return words_to_index, index_to_words, word_to_vec_map
word_to_index, index_to_word, word_to_vec_map = read_glove_vecs('date/glove.6B.50d.txt')
# calculate average
def sentence_to_avg(sentence, word_to_vec_map):
words = sentence.lower().split()
avg = np.zeros((len(words), 1))
total = 0
for w in words:
total += word_to_vec_map[w]
avg = total / len(words)
return avg
# softmax
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
# predict
def predict(X, Y, W, b, word_to_vec_map):
m = X.shape[0]
pred = np.zeros((m, 1))
for j in range(m): # Loop over training examples
words = X[j].lower().split()
avg = np.zeros((50,))
for w in words:
avg += word_to_vec_map[w]
avg = avg/len(words)
# Forward propagation
Z = np.dot(W, avg) + b
A = softmax(Z)
pred[j] = np.argmax(A)
print("Accuracy: " + str(np.mean((pred[:] == Y.reshape(Y.shape[0],1)[:]))))
return pred
# model
def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400):
np.random.seed(1)
m = Y.shape[0] # number of training examples
n_y = 5 # number of classes
n_h = 50 # dimensions of the GloVe vectors
# Xavier initialization
W = np.random.randn(n_y, n_h) / np.sqrt(n_h)
b = np.zeros((n_y,))
# Optimization loop
for t in range(num_iterations):
for i in range(m): # Loop over the training examples
avg = sentence_to_avg(X[i], word_to_vec_map)
# the softmax layer
z = np.dot(W, avg) + b
a = softmax(z)
# Compute cost
cost = -np.sum(Y[i] * np.log(a))
# Compute gradients
dz = a - Y_oh[i]
dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h))
db = dz
# Update parameters with Stochastic Gradient Descent
W = W - learning_rate * dW
b = b - learning_rate * db
if t % 100 == 0:
print("Epoch: " + str(t) + " --- cost = " + str(cost))
pred = predict(X, Y, W, b, word_to_vec_map) #predict is defined in emo_utils.py
return pred, W, b
pred, W, b = model(X_train, Y_train, word_to_vec_map)
print(pred) |
from abc import ABC,abstractmethod
from collections import namedtuple
Customer=namedtuple('Customer','name fidelity')
class LineItem:
def __init__(self,product,quantity,price):
self.product=product
self.quantity=quantity
self.price=price
def total(self):
return self.price*self.quantity
class Order:
def __init__(self,customer,cart,promotion=None):
self.customer=customer
self.cart=list(cart)
self.promotion=promotion
def total(self):
if not hasattr(self,'__total'):
self.__total=sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount=0
else:
discount=self.promotion.discount(self)
return self.total()-discount
def __repr__(self):
fmt='<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(),self.due())
class Promotion(ABC):
@abstractmethod
def discount(self,order):
pass
class FidelityPromo(Promotion):
def discount(self,order):
return order.total()*.05 if order.customer.fidelity >=1000 else 0
class BulkItemPromo(Promotion):
def discount(self,order):
discount=0
for item in order.cart:
if item.quantity>=20:
discount+=item.total()*.1
return discount
class LargeOrderPromo(Promotion):
def discount(self,order):
distinct_items={item.product for item in order.cart}
if len(distinct_items)>=10:
return order.total()*.07
return 0
joe=Customer('John Doe',0)
ann=Customer('Ann Smith',1100)
cart=[LineItem('banana',4,.5),
LineItem('Apple',10,1.5),
LineItem('Watermellon',5,5.0)]
a1=Order(ann,cart,FidelityPromo())
print(a1)
banana_cart=[LineItem('banana',30,.5),
LineItem('apple',10,1.5)]
a2=Order(joe,banana_cart,BulkItemPromo())
print(a2)
long_order=[LineItem(str(item_code),1,1.0)
for item_code in range(10)]
a3=Order(joe,long_order,LargeOrderPromo())
print(a3)
a4=Order(joe,cart,LargeOrderPromo())
print(a4) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.