index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,400 | 626ff92911719b473f6087f7ac90c7b1fc6e1446 | """
Copyright 2019 BBC. Licensed under the terms of the Apache License 2.0.
"""
from io import StringIO
import json
import os
import subprocess
import sys
import git
from foxglove.shared import LogLevel
class LockDoc:
"""
Build a document that captures the parameters needed to repeat the current
environmental conditions.
"""
def __init__(self, target_model):
"""
:param: target_model subclass of :class:`foxglove.Model`
"""
self.logger = StringIO() # injected if needed
self.target_model = target_model
def log(self, msg, log_level=LogLevel.INFO):
"""
log a string
"""
# TODO levels and allow injection of a log handler
if self.logger:
msg = msg.strip()
self.logger.write(msg+"\n")
def get_code_references(self):
"""
Get parameters needed to return code to this state at a later date.
Currently only supports git.
:returns: dict that is safe to serialise to JSON
or raises NotImplemented or ValueError if not possible.
"""
# TODO - this assumes TaBS module is executed as "python my_tabs.py"
# also need "./my_tabs.py" and pipenv variants
target_module = self.target_model.__class__.__module__
executing_file = os.path.abspath(sys.modules[target_module].__file__)
executing_file_path = os.path.abspath(os.path.dirname(executing_file))
try:
git_repo = git.Repo(executing_file_path, search_parent_directories=True)
except (git.exc.InvalidGitRepositoryError, git.exc.NoSuchPathError):
msg = "Not a git repo and only git is currently supported"
raise NotImplementedError(msg)
if git_repo.is_dirty():
msg = "There are uncommitted changes so can't get committish"
self.log(msg, LogLevel.WARNING)
if git_repo.untracked_files:
# log as warning but don't stop
untracked = ", ".join(git_repo.untracked_files)
self.log(f"There are untracked files: {untracked}", LogLevel.WARNING)
# My understanding, which might be wrong, is that branch doesn't need to be recorded, just the commit-ish.
# example of what is being done here...
"""
mc-n357827:example_project parkes25$ git log
commit 7f75cef7239ad8582187d7fbebddd4af3f410616 (HEAD -> master)
Author: Si Parker <si.parker@bbc.co.uk>
Date: Tue Mar 19 13:15:13 2019 +0000
hello world
"""
# 7f75cef7239ad8582187d7fbebddd4af3f410616 is what we are getting here
current_branch = git_repo.head.reference
current_commit_ish = current_branch.commit.hexsha
d = {'commit_ish': current_commit_ish}
try:
remote_origin = git_repo.remotes.origin.url
d['origin_url'] = remote_origin
except:
pass
# or give the local url. They could both be given but without uncommitted changes
# there doesn't seem to be a reason in giving away local info.
if 'origin_url' not in d:
d['local_dir'] = git_repo.git_dir
return {'git': d}
def get_data_sources(self):
"""
Examine self.tabs_module's data connections and find those that were
resolved into connection parameters by using a catalogue lookup.
:returns: dict that is safe to serialise to JSON. Key is the connection name.
"""
d = {}
for k, connector in self.target_model.datasets().items():
if not connector.uses_dataset_discovery:
continue
# can foxglove eval on demand or is this needed?
# # force dataset to load, this will ensure dataset discovery has evaluated
# # connection parameters.
# assert connection.data
d[k] = connector.engine_params
return d
def get_code_dependencies(self):
"""
Just pip freeze output for now.
:returns: list in pip freeze format
"""
pip_commands = ['pip', 'pip3', '/usr/local/bin/pip3']
for pip_cmd in pip_commands:
try:
raw_stdout = subprocess.check_output([pip_cmd, 'freeze'])
except FileNotFoundError:
continue
dependencies = raw_stdout.decode('ascii').split('\n')[0:-1]
if dependencies:
return dependencies
else:
msg = "Couldn't find pip executable in: {}"
raise ValueError(msg.format(','.join(pip_commands)))
def get_document(self):
"""
Assemble the data and code parts. This method assumes sub documents don't make a namespace
that overwrites anothers'.
"""
d = {
'code_local': self.get_code_references(),
'data_connections': self.get_data_sources(),
'code_dependencies': self.get_code_dependencies(),
}
# any additional info generated by the locking process
self.logger.seek(0)
logs = [l.strip() for l in self.logger.readlines()]
if logs:
d['lock_log'] = logs
return d
def relock(self, lock_doc):
"""
Apply parameters from a previous build to self.tabs_module in order to re-create
an old build.
Throws an exception if not possible. Bit limiting, next step is to provide info
needed at a system level to re-create the environment. e.g. package version
numbers to apply.
:param: lock_doc (str) in JSON format.
:returns: boolean when self.tabs_module is at correct state.
"""
lock_info = json.loads(lock_doc)
if 'data_connections' in lock_info:
for dataset_name, dataset_new_details in lock_info['data_connections'].items():
dataset_connection = getattr(self.tabs_module, dataset_name)
for k,v in dataset_new_details.items():
print(k,v)
setattr(dataset_connection, k, v)
return True
|
17,401 | 4c875283c8aed2a3e9ae18e415a2e59aa13cd122 | #Importerar Tkinter (GUI), PIL (Bilder), PyPDF2 (För att läsa & skriva PDF), reportlab (För att skapa PDF), tkcalendar (Datepicker) och MySQL Connector (SQL)
from tkinter import *
from tkinter import ttk, messagebox
from PIL import ImageTk,Image
from PyPDF2 import PdfFileWriter, PdfFileReader
import io
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter
import PIL
import mysql.connector
from tkinter.simpledialog import askstring
from tkinter import filedialog
from tkcalendar import DateEntry
from datetime import datetime,date
import os
import traceback
from python_mysql_dbconfig import read_db_config
#Skapar och namnger huvudfönstret samt sätter storleken på fönstret
root = Tk()
root.title("T-schakts rapportgenerator")
root.geometry("800x340")
root.resizable(False, False)
#Hämtar databas informationen ifrån en config.ini fil.
db_config=read_db_config()
#Skapar en Databas klass med alla inbyggad funktioner färdiga som funktioner.
class DB():
def __init__(self, db_local):
self.connection=None
self.connection = mysql.connector.connect(**db_local)
#Skapar cursorn och skickar in queryn tillsammans med argumenten.
def query(self, sql, args):
cursor = self.connection.cursor()
cursor.execute(sql, args)
return cursor
#Kör fetchall
def fetch(self, sql, args):
rows=[]
cursor = self.query(sql,args)
if cursor.with_rows:
rows=cursor.fetchall()
cursor.close()
return rows
#Kör fetchone
def fetchone(self, sql, args):
row = None
cursor = self.query(sql, args)
if cursor.with_rows:
row = cursor.fetchone()
cursor.close()
return row
#Kör en insert.
def insert(self, sql ,args):
cursor = self.query(sql, args)
id = cursor.lastrowid
self.connection.commit()
cursor.close()
return id
#Kör en update.
def update(self,sql,args):
cursor = self.query(sql, args)
rowcount = cursor.rowcount
self.connection.commit()
cursor.close()
return rowcount
#Stänger ner anslutningen när den inte används längre. Garbage collectas.
def __del__(self):
if self.connection!=None:
self.connection.close()
#Skapar en GUI klass, allt utseende och majoriteten av funktionerna skapas här.
class GUI:
def __init__(self, master):
#Skapar framen allt annat ska hamna i.
home = Frame(master)
home.pack()
#Skapar de widgets vi har på Home-fliken
self.EntMedlemsnummer = Entry(home, width=5, text = "Medlemsnummer")
self.EntMedlemsnummer.grid(row=1, column=1, sticky = W, pady =(10,0), padx=(10,0))
self.EntMedlemsnummer.bind("<KeyRelease>", lambda args: self.hamtaDelagareFranEntry())
self.EntMaskinnummer = Entry(home, width=5, text ="Maskinnummer")
self.EntMaskinnummer.grid(row=1, column=3, sticky = W, pady =(10,0), padx=(10,0))
self.EntMaskinnummer.bind("<KeyRelease>", lambda args: self.hamtaMaskinerFranEntry())
self.lblForare = Label(home, text="Kopplad förare.")
self.lblForare.grid(column=5,row=3, sticky=N, pady=(10,0))
self.entForare = Entry(home,state=DISABLED)
self.entForare.grid(column=5, row=3, columnspan = 2, sticky=W+E+S, padx=(10,0),pady=(10,0))
self.LbDelagare = Listbox(home, width = 60, height = 15, exportselection=0)
self.LbDelagare.grid(row = 2, column = 1, columnspan = 2, rowspan = 2, pady =(10,0), padx=(10,0))
self.LbDelagare.bind('<<ListboxSelect>>', lambda x:self.hamtaAllaMaskiner())
self.LblDelagare = Label(home, text="Delägare")
self.LblDelagare.grid(row=1, column =1, pady =(10,0), padx=(0,0), sticky=E)
self.LbMaskiner = Listbox(home, width = 30, height = 15, exportselection=0)
self.LbMaskiner.grid(row = 2, column = 3, columnspan = 2, rowspan = 2, pady =(10,0), padx=(10,0))
self.LbMaskiner.bind('<<ListboxSelect>>', lambda args: self.fyllTillbehorOchForare())
self.LblMaskiner = Label(home, text="Maskiner")
self.LblMaskiner.grid(row=1, column= 4, pady =(10,0), padx=(0,0), sticky=W)
self.LbTillbehor = Listbox(home, width=30, exportselection =0)
self.LbTillbehor.grid(row=2, column=5, columnspan=2, pady =(10,0), padx=(10,0), sticky=N+S+W+E)
self.ScbDelagare = Scrollbar(home, orient="vertical")
self.ScbDelagare.grid(row = 2, column = 2, sticky = N+S+E, rowspan = 2)
self.ScbDelagare.config(command =self.LbDelagare.yview)
self.ScbDMaskiner = Scrollbar(home, orient="vertical")
self.ScbDMaskiner.grid(row = 2, column = 4, sticky = N+S+E, rowspan = 2)
self.ScbDMaskiner.config(command =self.LbMaskiner.yview)
self.ScbTillbehor = Scrollbar(home, orient="vertical")
self.ScbTillbehor.grid(row = 2, column = 6, sticky = N+S+E)
self.ScbTillbehor.config(command =self.LbTillbehor.yview)
self.LbDelagare.config(yscrollcommand=self.ScbDelagare.set)
self.LbMaskiner.config(yscrollcommand=self.ScbDMaskiner.set)
self.LbTillbehor.config(yscrollcommand=self.ScbTillbehor.set)
self.BtnMiljodeklaration = Button(home, text="Miljödeklaration", command=lambda:self.miljodeklaration())
self.BtnMiljodeklaration.grid(row=4, column=0, pady=(10,0), padx=(10,15), sticky=W, columnspan=2)
self.BtnMaskinpresentation = Button(home, text="Maskinpresentation",command=lambda:self.maskinpresentation())
self.BtnMaskinpresentation.grid(row=4, column=1, pady=(10,0), padx=(0,140), sticky=E, columnspan=2)
self.EntSokTillbehor = Entry(home, width= 15)
self.EntSokTillbehor.grid(row=4, column=3, columnspan=2, sticky=E, pady=(10,0), padx=(0,90))
self.BtnSokTillbehor = Button(home, text=("Sök tillbehör"), command=self.hamtaMaskinerGenomTillbehor)
self.BtnSokTillbehor.grid(row=4, column=4, sticky=E, pady=(10,0), padx=(0,10))
self.entSokForare = Entry(home)
self.entSokForare.grid(row=4, column=5,sticky=E, pady=(10,0),padx=(10,0))
self.btnSokForare = Button(home, text=("Sök förare"),command = self.hamtaMaskinerGenomForare)
self.btnSokForare.grid(row=4, column=6, sticky=E, pady=(10,0),padx=(10,0))
self.LblTillbehor = Label(home, text="Tillbehör")
self.LblTillbehor.grid(row=1, column=5, pady =(10,0), padx=(10,0), sticky=E)
self.fyllListboxDelagare()
def hamtaMaskinerGenomForare(self):
entry = '{}%'.format(self.entSokForare.get())
if len(entry)==0:
messagebox.showerror("Fel", "Du måste skriva i något i tillbehörs sökrutan.")
else:
sql_query="""SELECT Maskinnummer, MarkeModell, Arsmodell FROM maskinregister WHERE forarid in (select forarid from forare where namn like %s)"""
databas = DB(db_config)
result =databas.fetch(sql_query, (entry,))
if self.LbMaskiner.index("end") != 0:
self.LbMaskiner.delete(0, "end")
for item in result:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= ""
else:
s+= " - "
s+=str(item[1])
if item[2] == "":
s+= " "
else:
s+= " - "
s+=str(item[2])
self.LbMaskiner.insert("end",s )
else:
for item in result:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= ""
else:
s+= " - "
s+=str(item[1])
if item[2] == "":
s+= " "
else:
s+= " - "
s+=str(item[2])
self.LbMaskiner.insert("end",s )
#Hämtar maskinerna som har ett tillbehör kopplat till sig vilket liknar tillbehöret man skrivit in i sökrutan.
def hamtaMaskinerGenomTillbehor(self):
self.LbTillbehor.delete(0,'end')
entry = '{}%'.format(self.EntSokTillbehor.get())
if len(entry)==0:
messagebox.showerror("Fel", "Du måste skriva i något i tillbehörs sökrutan.")
else:
sql_query="""SELECT Maskinnummer, MarkeModell, Arsmodell FROM maskinregister WHERE maskinnummer in (select maskinnummer from tillbehor where tillbehor like %s)"""
databas = DB(db_config)
result =databas.fetch(sql_query, (entry,))
if self.LbMaskiner.index("end") != 0:
self.LbMaskiner.delete(0, "end")
for item in result:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= ""
else:
s+= " - "
s+=str(item[1])
if item[2] == "":
s+= " "
else:
s+= " - "
s+=str(item[2])
self.LbMaskiner.insert("end",s )
else:
for item in result:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= ""
else:
s+= " - "
s+=str(item[1])
if item[2] == "":
s+= " "
else:
s+= " - "
s+=str(item[2])
self.LbMaskiner.insert("end",s )
#Hämtar alla maskiner när programmet körs och fyller på LbMaskiner listan.
def hamtaAllaMaskiner(self):
selectedDelagare = self.LbDelagare.get(self.LbDelagare.curselection())
indexSpace = selectedDelagare.index(" ")
stringSelectedDelagare = str(selectedDelagare[0:indexSpace])
delagare = "".join(stringSelectedDelagare)
self.LbTillbehor.delete(0,'end')
sql_query="""SELECT Maskinnummer, MarkeModell, Arsmodell FROM maskinregister WHERE Medlemsnummer = %s"""
try:
databas = DB(db_config)
result =databas.fetch(sql_query, (delagare,))
except:
pass
if self.LbMaskiner.index("end") != 0:
self.LbMaskiner.delete(0, "end")
for item in result:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= ""
else:
s+= " - "
s+=str(item[1])
if item[2] == "":
s+= " "
else:
s+= " - "
s+=str(item[2])
self.LbMaskiner.insert("end",s )
else:
for item in result:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= ""
else:
s+= " - "
s+=str(item[1])
if item[2] == "":
s+= " "
else:
s+= " - "
s+=str(item[2])
self.LbMaskiner.insert("end",s )
#Fyller LbDelagare (Listboxen på Home-fliken) med delägarna ifrån databsen
def fyllListboxDelagare(self):
sql="SELECT Medlemsnummer, Fornamn, Efternamn, Foretagsnamn FROM foretagsregister"
self.LbDelagare.delete(0, 'end')
try:
test = DB(db_config)
delagareLista=test.fetch(sql, None)
except:
pass
for item in delagareLista:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= ""
else:
s+= " - "
s+=str(item[1])
s+= " "
s+=str(item[2])
s+=" - "
s+=str(item[3])
self.LbDelagare.insert("end", s)
#Hämtar alla delägare som matchar siffrorna som skrivit i än så länge i delägar sökrutan.
def hamtaDelagareFranEntry(self):
entry = '{}%'.format(self.EntMedlemsnummer.get())
sql_query = """SELECT Medlemsnummer, Fornamn, Efternamn, Foretagsnamn FROM foretagsregister WHERE Medlemsnummer LIKE %s"""
delagareLista = []
try:
databas = DB(db_config)
delagareLista = databas.fetch(sql_query, (entry,))
except:
pass
self.LbDelagare.delete(0, 'end')
for item in delagareLista:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= ""
else:
s+= " - "
s+=str(item[1])
s+= " "
s+=str(item[2])
s+=" - "
s+=str(item[3])
self.LbDelagare.insert("end", s)
#Hämtar alla maskiner som matchar siffrorna som skrivit i än så länge i maskin sökrutan.
def hamtaMaskinerFranEntry(self):
entry = '{}%'.format(self.EntMaskinnummer.get())
sql_query="""SELECT Maskinnummer, MarkeModell, Arsmodell FROM maskinregister WHERE Maskinnummer LIKE %s"""
result = []
databas = DB(db_config)
result = databas.fetch(sql_query, (entry,))
self.LbMaskiner.delete(0, "end")
for item in result:
item = list(item)
if item[1] == None:
item[1] = ""
if item[2] == None:
item[2] = ""
s=""
s += str(item[0])
if item[1] == "":
s+= " "
else:
s+= " - "
s+=str(item[1])
if item[2] == "":
s+= " "
else:
s+= " - "
s+=str(item[2])
self.LbMaskiner.insert("end",s )
#Funktion som skapar PDF-rapporten miljödeklaration
def miljodeklaration(self):
maskinnummer=""
try:
maskinnummer = self.LbMaskiner.get(self.LbMaskiner.curselection())
except:
pass
if len(maskinnummer) == 0:
messagebox.showerror("Fel", "Ingen maskin är vald.")
else:
maskin_sql_query = """select * from maskinregister where maskinnummer = %s"""
indexSpace = maskinnummer.index(" ")
stringSelectedDelagare = str(maskinnummer[0:indexSpace])
maskin = "".join(stringSelectedDelagare)
databas = DB(db_config)
maskin_resultat=databas.fetchone(maskin_sql_query,(maskin,))
print(maskin_resultat[4])
delagare_sql_query = """SELECT Fornamn, Efternamn, Foretagsnamn, Gatuadress, Postnummer, Postadress FROM foretagsregister WHERE Medlemsnummer = %s"""
delagarInfoLista = databas.fetchone(delagare_sql_query, (maskin_resultat[4],))
forsakring_sql_query ="""SELECT forsakringsgivare FROM forsakringsgivare WHERE idforsakringsgivare = '1'"""
forsakring = databas.fetchone(forsakring_sql_query, None)
packet = io.BytesIO()
c = canvas.Canvas(packet, pagesize=letter)
for item in range(len(maskin_resultat)):
if item == None:
item[0] = ""
for item in range(len(delagarInfoLista)):
if delagarInfoLista[item] == None:
delagarInfoLista[item] = ""
c.setFontSize(11)
#Översta delen
c.drawString(130, 722, str(maskin_resultat[4]))
c.drawString(130, 702, str(delagarInfoLista[2]))
c.drawString(130, 682, str(delagarInfoLista[0]))
c.drawString(195, 682, str(delagarInfoLista[1]))
c.drawString(130, 662, str(delagarInfoLista[3]))
c.drawString(130, 642, str(delagarInfoLista[4]))
c.drawString(190, 642, str(delagarInfoLista[5]))
c.drawString(470, 722, str(maskin_resultat[0]))
c.drawString(458, 702, str(maskin_resultat[1]))
if maskin_resultat[6] is not None:
c.drawString(458, 682, str(maskin_resultat[6]))
c.drawString(458, 662, str(maskin_resultat[26]))
if maskin_resultat[2] is not None:
c.drawString(458, 642, str(maskin_resultat[2]))
c.drawString(458, 622, str(maskin_resultat[27]))
#Motor
c.drawString(50, 540, str(maskin_resultat[8]))
c.drawString(160, 540, str(maskin_resultat[9]))
if maskin_resultat[10] is not None:
c.drawString(270, 540, str(maskin_resultat[10]))
#Eftermonterad avgasreninsutrustning
if maskin_resultat[14] == 1:
c.drawString(50, 482, "Ja")
elif maskin_resultat[14] == 0:
c.drawString(50, 482, "Nej")
if maskin_resultat[15] == 1:
c.drawString(120, 482, "Ja")
elif maskin_resultat[15] == 0:
c.drawString(120, 482, "Nej")
if maskin_resultat[12] == 1:
c.drawString(195, 482, "Ja")
elif maskin_resultat[12] == 0:
c.drawString(195, 482, "Nej")
if maskin_resultat[11] == 1:
c.drawString(280, 482, "Ja")
elif maskin_resultat[11] == 0:
c.drawString(280, 482, "Nej")
#Bullernivå
c.drawString(340, 482, str(maskin_resultat[29]))
c.drawString(430, 482, str(maskin_resultat[31]))
#Oljor och smörjmedel - Volym, liter
if maskin_resultat[16] is not None:
if len(maskin_resultat[16]) < 25:
c.drawString(50, 417, str(maskin_resultat[16]))
else:
c.setFontSize(9)
c.drawString(50, 417, str(maskin_resultat[16]))
c.setFontSize(11)
if maskin_resultat[18] is not None:
if len(maskin_resultat[18]) < 25:
c.drawString(50, 385, str(maskin_resultat[18]))
else:
c.setFontSize(9)
c.drawString(50, 385, str(maskin_resultat[18]))
c.setFontSize(11)
if maskin_resultat[20] is not None:
if len(maskin_resultat[20]) < 25:
c.drawString(50, 355, str(maskin_resultat[20]))
else:
c.setFontSize(9)
c.drawString(50, 355, str(maskin_resultat[20]))
c.setFontSize(11)
c.drawString(50, 325, str(maskin_resultat[24]))
c.drawString(205, 420, str(maskin_resultat[17]))
c.drawString(205, 390, str(maskin_resultat[19]))
c.drawString(205, 360, str(maskin_resultat[21]))
#Miljöklassificering
c.drawString(340, 420, str(maskin_resultat[30]))
if maskin_resultat[22] == 1:
c.drawString(345, 330, "Ja")
elif maskin_resultat[22] == 0:
c.drawString(345, 330, "Nej")
#Övrigt
c.drawString(50, 244, str(maskin_resultat[13]))
if maskin_resultat[37] == 1:
c.drawString(125, 244, "Ja")
elif maskin_resultat[37] == 0:
c.drawString(125, 244, "Nej")
c.drawString(205, 244, str(maskin_resultat[25]))
if maskin_resultat[35] == 1:
c.drawString(375, 244, "Ja")
elif maskin_resultat[35] == 0:
c.drawString(375, 244, "Nej")
c.drawString(470, 210, str(maskin_resultat[38]))
if maskin_resultat[33] is not None:
if len(maskin_resultat[33]) > 25:
c.setFontSize(9)
c.drawString(50, 210, str(maskin_resultat[33]))
c.setFontSize(11)
else:
c.drawString(50, 210, str(maskin_resultat[33]))
c.drawString(205, 210, str(maskin_resultat[34]))
if maskin_resultat[36] == 1:
c.drawString(375, 210, "Ja")
elif maskin_resultat[36] == 0:
c.drawString(375, 210, "Nej")
c.drawString(470, 210, str(maskin_resultat[39]))
#Bränsle
c.drawString(50, 155, str(maskin_resultat[23]))
#Försärking
if maskin_resultat[3] == 1:
c.drawString(50, 102, forsakring[0])
if maskin_resultat[7] is not None:
c.drawString(240, 102, str(maskin_resultat[7]))
if maskin_resultat[7] != "":
c.drawString(305, 102, "-")
c.drawString(315, 102, str(maskin_resultat[42]))
#Datum
c.drawString(435, 52, str(datetime.date(datetime.now())))
c.save()
packet.seek(0)
new_pdf = PdfFileReader(packet)
existing_pdf = PdfFileReader(open("PDFMallar/Miljödeklaration.pdf", "rb"))
output = PdfFileWriter()
page = existing_pdf.getPage(0)
page.mergePage(new_pdf.getPage(0))
output.addPage(page)
outputStream = open( "Miljödeklaration - " + str(maskin) + ".pdf", "wb")
output.write(outputStream)
outputStream.close()
os.startfile("Miljödeklaration - " + str(maskin) + ".pdf" )
#Funktion som skapar PDF-rapporten maskinpresentation
def maskinpresentation(self):
maskinnummer =""
try:
maskinnummer=self.LbMaskiner.get(self.LbMaskiner.curselection())
except:
pass
if len(maskinnummer) == 0:
messagebox.showerror("Fel", "Ingen maskin är vald.")
else:
indexSpace = maskinnummer.index(" ")
stringSelectedDelagare = str(maskinnummer[0:indexSpace])
maskin = "".join(stringSelectedDelagare)
maskin_sql_query = """SELECT Medlemsnummer, MarkeModell, Arsmodell, Registreringsnummer, ME_Klass, Maskintyp, Forarid FROM maskinregister WHERE Maskinnummer = %s"""
try:
databas = DB(db_config)
maskin_resultat=databas.fetchone(maskin_sql_query,(maskin,))
except:
pass
foretags_sql_query = """SELECT Foretagsnamn FROM foretagsregister WHERE medlemsnummer = %s"""
foretag = databas.fetchone(foretags_sql_query,(str(maskin_resultat[0]),))
tillbehor_sql_query="""SELECT tillbehor FROM tillbehor WHERE Maskinnummer =%s"""
tillbehor = databas.fetch(tillbehor_sql_query,(maskin,))
bild_sql_query = """SELECT sokvag FROM bilder WHERE Maskinnummer = %s order by bildid desc LIMIT 1;"""
bild = databas.fetchone(bild_sql_query, (maskin,))
print(maskin_resultat)
if maskin_resultat[6] is not None:
forare_sql_query = """select namn from forare where forarid = %s"""
forarnamn = databas.fetchone(forare_sql_query, (str(maskin_resultat[6]),))
referens_sql_query="""SELECT Beskrivning FROM referens WHERE forarid = %s"""
referenser = databas.fetch(referens_sql_query, (str(maskin_resultat[6]),))
referenser = list(referenser)
else:
forarnamn = None
referenser = None
packet = io.BytesIO()
c = canvas.Canvas(packet, pagesize=letter)
rad1=""
rad2=""
rad3=""
rad4=""
rad5=""
y=1
if bild is not None:
c.drawImage(bild[0], 72, 134, 450, 340)
if maskin_resultat[0] is not None:
c.drawString(133, 710, str(maskin_resultat[0]))
if maskin_resultat[1] is not None:
c.drawString(455, 690, str(maskin_resultat[1]))
if maskin_resultat[2] is not None:
c.drawString(455, 670, str(maskin_resultat[2]))
if maskin_resultat[3] is not None:
c.drawString(455, 650, str(maskin_resultat[3]))
if maskin_resultat[4] is not None:
c.drawString(455, 630, str(maskin_resultat[4]))
if maskin_resultat[5] is not None:
c.drawString(455, 610, str(maskin_resultat[5]))
if forarnamn is not None:
c.drawString(133, 670, str(forarnamn[0]))
if foretag[0] is not None:
c.drawString(133, 690, str(foretag[0]))
if maskin is not None:
c.drawString(470, 712, str(maskin))
counter = 0
for x in tillbehor:
counter +=1
s = x[0]
if(counter == len(tillbehor)):
s+=""
else:
s+=", "
if y>12:
rad5+=s
elif y>9:
y+=1
rad4+=s
elif y>6:
y+=1
rad3+=s
elif y>3:
y+=1
rad2+=s
else:
y+=1
rad1+=s
c.drawString(142, 561, str(rad1))
c.drawString(142, 541, str(rad2))
c.drawString(142, 521, str(rad3))
c.drawString(142, 501, str(rad4))
c.drawString(142, 481, str(rad5))
if referenser is not None and len(referenser) != 0:
c.drawString(152, 112, str(referenser[0][0]))
c.drawString(152, 86, str(referenser[1][0]))
c.save()
packet.seek(0)
new_pdf = PdfFileReader(packet)
existing_pdf = PdfFileReader(open("PDFMallar/Maskinpresentation.pdf", "rb"))
output = PdfFileWriter()
page = existing_pdf.getPage(0)
page.mergePage(new_pdf.getPage(0))
output.addPage(page)
#Fixa i framtiden så att man kan använda sig av custom paths (till servern) för att spara dokumenten på andra ställen.
outputStream = open("Maskinpresentationer/Maskinpresentation - " + maskin + ".pdf", "wb")
output.write(outputStream)
outputStream.close()
#Öppnar dokumentet efter man skapat det. Måste ändra sökväg efter vi fixat servern.
os.startfile("Maskinpresentationer\Maskinpresentation - " + maskin + ".pdf")
#Funktion som fyller LbTillbehor när man trycker på en maskin i LbMaskiner
def fyllTillbehorOchForare(self):
sql="SELECT Tillbehor FROM tillbehor WHERE Maskinnummer =%s"
sql_forare = """select namn from forare where forarid = (select forarid from maskinregister where maskinnummer =%s)"""
maskinnummer=""
maskinnummer = self.LbMaskiner.get(self.LbMaskiner.curselection())
indexSpace = maskinnummer.index(" ")
stringSelectedMaskin = str(maskinnummer[0:indexSpace])
maskin = "".join(stringSelectedMaskin)
databas = DB(db_config)
tillbehor_resultat = databas.fetch(sql,(maskin,))
forare_namn=databas.fetchone(sql_forare,(maskin,))
self.LbTillbehor.delete(0,'end')
for x in tillbehor_resultat:
self.LbTillbehor.insert('end', x[0])
self.entForare.config(state=NORMAL)
self.entForare.delete(0,'end')
if forare_namn is not None:
self.entForare.insert(0,forare_namn[0])
self.entForare.config(state=DISABLED)
#Dessa körs endast när denna fil körs som main. Om denna någon gång importeras till en annan fil så kommer dessa funktioner ej köras direkt.
if __name__ == "__main__":
#Instansierar en ny GUI klass.
Gui = GUI(root)
#Håller fönstret igång, ta ej bort eller flytta!
root.mainloop()
|
17,402 | 79275ae0ff9bce9fd4806a50ac62ca7ae4daf8ca | import nltk
import pickle
import random
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix, classification_report
//Three Inputs model, xtest and ytest
def model_Evaluate(model, X_test, y_test):
# Predict values for Test dataset
y_pred = model.predict(X_test)
# Print the evaluation metrics for the dataset.
print(classification_report(y_test, y_pred))
# Compute and plot the Confusion matrix
cf_matrix = confusion_matrix(y_test, y_pred)
categories = ['Negative','Positive']
group_names = ['True Neg','False Pos', 'False Neg','True Pos']
group_percentages = ['{0:.2%}'.format(value) for value in cf_matrix.flatten() / np.sum(cf_matrix)]
labels = [f'{v1}\n{v2}' for v1, v2 in zip(group_names,group_percentages)]
labels = np.asarray(labels).reshape(2,2)
sns.heatmap(cf_matrix, annot = labels, cmap = 'Blues',fmt = '',
xticklabels = categories, yticklabels = categories)
plt.xlabel("Predicted values", fontdict = {'size':14}, labelpad = 10)
plt.ylabel("Actual values" , fontdict = {'size':14}, labelpad = 10)
plt.title ("Confusion Matrix", fontdict = {'size':18}, pad = 20)
model_Evaluate(model) |
17,403 | 505cf442f640f068c196d1cfa907593738f52385 | """Support for IPX800 switches."""
import logging
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.components.switch import SwitchEntity
from pypx800 import *
from .device import *
from .const import *
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the IPX800 switches."""
async_add_entities(
[
RelaySwitch(device)
for device in (
item
for item in discovery_info
if item.get("config").get(CONF_TYPE) == TYPE_RELAY
)
],
True,
)
async_add_entities(
[
VirtualOutSwitch(device)
for device in (
item
for item in discovery_info
if item.get("config").get(CONF_TYPE) == TYPE_VIRTUALOUT
)
],
True,
)
async_add_entities(
[
VirtualInSwitch(device)
for device in (
item
for item in discovery_info
if item.get("config").get(CONF_TYPE) == TYPE_VIRTUALIN
)
],
True,
)
class RelaySwitch(IpxDevice, SwitchEntity):
"""Representation of a IPX Switch through relay."""
def __init__(self, ipx_device):
super().__init__(ipx_device)
self.control = Relay(self.controller.ipx, self._id)
@property
def is_on(self) -> bool:
return self.coordinator.data[f"R{self._id}"] == 1
def turn_on(self, **kwargs):
self.control.on()
def turn_off(self, **kwargs):
self.control.off()
class VirtualOutSwitch(IpxDevice, SwitchEntity):
"""Representation of a IPX Virtual Out."""
def __init__(self, ipx_device):
super().__init__(ipx_device)
self.control = VOutput(self.controller.ipx, self._id)
@property
def is_on(self) -> bool:
return self.coordinator.data[f"VO{self._id}"] == 1
def turn_on(self, **kwargs):
self.control.on()
def turn_off(self, **kwargs):
self.control.off()
def toggle(self, **kwargs):
self.control.toggle()
class VirtualInSwitch(IpxDevice, SwitchEntity):
"""Representation of a IPX Virtual In."""
def __init__(self, ipx_device):
super().__init__(ipx_device)
self.control = VInput(self.controller.ipx, self._id)
@property
def is_on(self) -> bool:
return self.coordinator.data[f"VI{self._id}"] == 1
def turn_on(self, **kwargs):
self.control.on()
def turn_off(self, **kwargs):
self.control.off()
def toggle(self, **kwargs):
self.control.toggle()
|
17,404 | bbbd50a40349d383590b984fa46a1b5b3d621b06 | import dataclasses
from reviews.notifications import PullRequestNotification
def test_model_with_required_fields():
model = PullRequestNotification(
org="apoclyps",
repository="Code Review Manager",
name="Pull Request Approved",
number=1,
)
assert dataclasses.asdict(model) == {
"org": "apoclyps",
"repository": "Code Review Manager",
"name": "Pull Request Approved",
"number": 1,
}
|
17,405 | 8f5cd513f1f556032c1f64b1e89dd1acbc631165 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-26 07:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('serverctl', '0004_auto_20170326_0550'),
]
operations = [
migrations.CreateModel(
name='PaymentHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('CREATED', '請求'), ('PAID', '決済')], max_length=12)),
],
),
migrations.CreateModel(
name='Payments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('amount', models.IntegerField(default=0)),
('paid', models.BooleanField(default=False)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='serverctl.GameServerGroup')),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='serverctl.Player')),
],
),
migrations.AlterField(
model_name='gameserver',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='serverhistory',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AddField(
model_name='paymenthistory',
name='payment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='serverctl.Payments'),
),
]
|
17,406 | 8d4bb95dc0c53e90ff93a2931fbe928676fa3eda | #Embedded file name: carbon/client/script/entities\simpleTestClient.py
"""
A module providing a simple test component and service, in part as an example of how to
create one, but also as placeholder for developing and testing other CEF systems.
Provides:
class SimpleTestClientComponent
class SimpleTestClient
See also:
simpleTestServer.py
"""
import service
import collections
class SimpleTestClientComponent:
__guid__ = 'entity.SimpleTestClientComponent'
def __init__(self):
self.someState = 'DefaultState'
class SimpleTestClient(service.Service):
__guid__ = 'svc.simpleTestClient'
__notifyevents__ = []
__componentTypes__ = ['simpleTestComponent']
def Run(self, *etc):
service.Service.Run(self, etc)
self.Running = True
def CreateComponent(self, name, state):
"""
"state" corresponds to the dictionary that was created in
the server component's "PackUpForClientTransfer" method.
"""
component = SimpleTestClientComponent()
component.__dict__.update(state)
return component
def PrepareComponent(self, sceneID, entityID, component):
pass
def SetupComponent(self, entity, component):
component.isSetup = True
def RegisterComponent(self, entity, component):
pass
def ReportState(self, component, entity):
"""
Report current state. Uses a sorted ordered dict for user-convenience.
"""
report = collections.OrderedDict(sorted(component.__dict__.items()))
return report
def UnRegisterComponent(self, entity, component):
pass
def PreTearDownComponent(self, entity, component):
component.isSetup = False
def TearDownComponent(self, entity, component):
pass
|
17,407 | ae43f21c69ef4687f8052251485bc6573f94c406 | import requests
import sqlite3
from bs4 import BeautifulSoup
import json
import time
SCHOOL = 'michigan'
ECE_URL = 'https://ece.engin.umich.edu/people/directory/faculty/'
CSE_URL = 'https://cse.engin.umich.edu/people/faculty/'
headers = {'User-Agent': 'UMSI 507 Course Project - Python Web Scraping'}
time_now = time.strftime('%Y-%m-%d',time.localtime(time.time()))
def check_data():
try:
cache_file = open('cache/michigan.json', 'r')
cache_file_contents = cache_file.read()
faculty = json.loads(cache_file_contents)
cache_file.close()
except:
faculty = {'cache_time': time_now,'total_number' : 0, 'detail': []}
if (faculty['cache_time'][:7] != time_now[:7] or faculty['total_number'] == 0):
print('University of Michigan: Fetching from website...')
faculty['cache_time'] = time_now
ece_response = requests.get(ECE_URL, headers=headers)
cse_response = requests.get(CSE_URL, headers=headers)
ece_soup = BeautifulSoup(ece_response.text, 'html.parser')
cse_soup = BeautifulSoup(cse_response.text, 'html.parser')
# parse ece faculty detail
for soup in [ece_soup, cse_soup]:
people_lists_html = soup.find_all('div', class_='eecs_person_copy')
for person in people_lists_html:
name = person.find('h4').text.split(', ')
lastname = name[0]
firstname = name[1]
title = person.find('span', class_='person_title_section').text
try:
research_interests = person.find('span', class_='person_copy_section pcs_tall').text
except:
research_interests = None
try:
web = person.find('a', class_='person_web').text
except:
web = None
email_script = str(person.find('script'))
email = email_script[email_script.index('one')+7:email_script.index('two')-6] + '@' + email_script[email_script. index ('two') +7:email_script.index('document')-2]
faculty['total_number'] += 1
faculty['detail'].append({
'firstname': firstname,
'lastname': lastname,
'title': title,
'research_interests': research_interests,
'personal_web': web,
'email': email
})
cache_file = open('cache/michigan.json', 'w')
cache_content_write = json.dumps(faculty)
cache_file.write(cache_content_write)
cache_file.close()
print('Updating database...')
connection = sqlite3.connect('faculty.sqlite')
cursor = connection.cursor()
delete_old_data = '''
DELETE FROM faculty
WHERE SchoolId in (SELECT Id from school WHERE name = "michigan")'''
cursor.execute(delete_old_data)
connection.commit()
for data in faculty['detail']:
update_data = f'''
INSERT INTO faculty ("FirstName", "LastName", "SchoolId", "Title", "ResearchInterests", "PersonalWeb", "Email") VALUES("{data['firstname']}", "{data['lastname']}", 1, "{data['title']}", "{data['research_interests']}", "{data['personal_web']}", "{data['email']}")
'''
cursor.execute(update_data)
connection.commit()
else:
print('University of Michigan: Using cache')
|
17,408 | c18d4f856e98f725835d2e72ac6e0d5f55e19dc3 | from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import numpy as np
class PcaEV(Pipeline):
"""Principal component analysis (PCA) with the number of components
set to reach a required explained variance.
Parameters
----------
required_ev : float (Default 0.1)
Required Explained Variance threshold.
Example
-------
trans = PcaEV()
trans.set_params(**{'required_ev': 0.2})
trans.fit(X_train)
X_new = trans.transform(X_train)
Notes
-----
The class 'PcaEV' is a sklearn Pipeline and is equivalent to
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
required_ev = 0.1
trans = Pipeline(steps=[
('scl', StandardScaler()),
('pca', PCA(n_components = required_ev, svd_solver='full'))
])
trans.fit(X_train)
X_new = trans.transform(X_train).astype(np.float16)
PcaEV
- runs checks that "0.0 < PCA.n_components < 1.0",
- transformed outputs are memory-friendly (np.float16),
- create "feature_names_" output
"""
def __init__(self, required_ev=0.1):
self.required_ev = required_ev
super().__init__(steps=[
('scl', StandardScaler(with_mean=True, with_std=True, copy=True)),
('pca', PCA(
n_components=self.required_ev,
svd_solver='full', whiten=False, copy=True))
])
def __str__(self):
return 'PcaEV(required_ev={}, steps={})'.format(
self.required_ev, self.steps)
def __repr__(self):
return self.__str__()
def set_params(self, **kwargs):
if 'required_ev' in kwargs:
self.required_ev = kwargs['required_ev']
self.steps[1][1].set_params(**{'n_components': self.required_ev})
def transform(self, X, y=None):
return super().transform(X).astype(np.float16)
# def fit(self, X, y=None):
# super().fit(X)
# self.feature_names_ = [
# self.prefix + "_" + str(i) for i in
# range(self.steps[1][1].n_components_)]
# return self
trans = PcaEV(required_ev=0.1)
meta = {
'id': 'dim3',
'name': 'PCA req EV',
'description': (
"Number of components is determined by "
"a required Explained Variance threshold"),
'keywords': [
'dimensionality reduction', 'principal component anlysis',
'StandardScaler', 'PCA', 'Explained Variance'],
'feature_names_prefix': 'dim_ev'
}
"""Example
from verto.dim3 import trans, meta
from datasets.demo1 import X_train
from seasalt import create_feature_names
import pandas as pd
import numpy as np
trans.set_params(**{'required_ev': 0.8})
X_new = trans.fit_transform(X_train).astype(np.float16)
names = create_feature_names(meta['feature_names_prefix'], X_new.shape[1])
df = pd.DataFrame(data=X_new, columns=names)
df
"""
|
17,409 | 9e6b62689523c3210608b0364205c06f35ec146d | from abc import ABC, abstractmethod
from typing import Dict, List, Any
class Template(ABC):
@property
@abstractmethod
def env(self) -> Any: pass
@property
@abstractmethod
def paths(self) -> List[str]: pass
@property
@abstractmethod
def context_functions(self) -> Dict: pass
@property
@abstractmethod
def context_filters(self) -> Dict: pass
@property
@abstractmethod
def filters(self) -> Dict: pass
@property
@abstractmethod
def tests(self) -> Dict: pass
|
17,410 | 93f8b39f4c5081c206f8f1547e057e6f62bdb04b | # Haoxuan Li
# Student ID: 10434197
from haoxuanli_810_09.People import People
class Student(People):
def say(self):
print("Thank you professor!")
def __init__(self, cwid: str, name: str, major: str):
self.cwid = cwid
self.name = name
self.major = major
self.Courses = dict()
def add_course(self, course_name: str, score: str):
self.Courses[course_name] = score
def pt_show(self):
return [self.cwid, self.name, list(self.Courses.keys())]
@staticmethod
def get_fields():
return ["CWID", "Name", "Completed Course"]
|
17,411 | 21b0e4224538738501c3487991445a68345d9edd | def Atcoder_Crackers(n , k):
return 0 if n % k == 0 else 1
def main():
n , k = map(int , input().split())
print(Atcoder_Crackers(n , k))
if __name__ == '__main__':
main() |
17,412 | 39748d4522f9975ce329b8813645a18adf6fdb87 | from fastapi import FastAPI, Request, Form, Response
from fastapi.responses import RedirectResponse
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
import psycopg2
import psycopg2.extras
from config import configdb, configemail
from datetime import date
from mlsc_utilities import db_connect, sql_count, get_market_list, get_exchange_list
# Fast API - Initial Load
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
# path operation decorators
@app.get("/")
async def index(request: Request):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
exchanges = sql_count("exchanges")
strategies = sql_count("strategies")
trades = sql_count("strategies_symbol")
instruments = sql_count("instruments")
return templates.TemplateResponse("index.html", {"request": request, "exchanges": exchanges, "strategies": strategies, "trades": trades, "instruments": instruments})
@app.get("/exchanges")
async def exchanges(request: Request):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
cursor.execute("""
SELECT
id,
exchange
FROM
exchanges
""")
rows = cursor.fetchall()
return templates.TemplateResponse("exchanges.html", {"request": request, "exchanges": rows})
@app.get("/settings")
async def settings(request: Request):
params = configemail()
del params['email_password']
dbparams = configdb()
del dbparams['password']
return templates.TemplateResponse("settings.html", {"request": request, "email_settings": params, "db_settings": dbparams })
@app.get("/instruments/p/{page}")
async def instrument(request: Request, page):
markets = get_market_list()
instrument_filters = request._query_params.get('filter', False)
# Pagination
page_current = int(page)
records_per_page = 15
offset = (page_current - 1) * records_per_page
# Database
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
# Filters
cursor.execute("""
SELECT
count(*)
FROM
instruments
WHERE
market_id <> 1
""")
total_pages = cursor.fetchone()
total_pages = round(total_pages[0] / records_per_page)
cursor.execute("""
SELECT
t1.name,
t1.id id,
t2.name market,
t3.exchange
FROM
instruments t1,
markets t2,
exchanges t3
WHERE
t1.market_id = t2.id and
t1.market_id <> 1 and
t1.exchange_id = t3.id
ORDER BY
t1.market_id, t1.name
LIMIT %s
OFFSET %s
""", (records_per_page, offset,))
rows = cursor.fetchall()
pagination = {"page_current": page_current, "records_per_page": records_per_page, "offset": offset }
return templates.TemplateResponse("instruments.html", {"request": request, "instruments": rows, "total_pages": total_pages, "pagination": pagination, "markets": markets})
@app.get("/strategy/{strategy_id}")
async def strategy(request: Request, strategy_id):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
cursor.execute("""
SELECT
t1.id,
t1.symbol_id,
t1.strategy_id,
t2.name,
t1.strategy_bias,
t1.entry_point,
t1.stop_loss,
t1.take_profit,
t1.date,
t1.status
FROM
strategies_symbol t1,
instruments t2
WHERE
t1.symbol_id = t2.id and
t1.strategy_id = %s and
t1.status in ('new', 'trading')
ORDER by
t1.status
""", (strategy_id,))
strategies = cursor.fetchall()
cursor.execute("""
SELECT
name
FROM
strategies_symbol t1,
strategies t2
WHERE
t1.strategy_id = t2.id and
t1.strategy_id = %s
GROUP BY
name
""", (strategy_id,))
strategy_name = cursor.fetchone()['name']
return templates.TemplateResponse("strategy.html", {"request": request, "strategies": strategies, "strategy_name": strategy_name })
@app.get("/strategies")
async def strategies(request: Request):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
cursor.execute("""
SELECT id, name FROM strategies
""")
rows = cursor.fetchall()
return templates.TemplateResponse("strategies.html", {"request": request, "strategies": rows})
@app.get("/instrument/{symbolid}")
async def symbol_details(request: Request, symbolid):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
# get symbol name
cursor.execute("""
SELECT name FROM instruments WHERE id = %s
""", (symbolid,))
row = cursor.fetchone()['name']
symbolname = row.replace('/', '')
# get symbol price list
cursor.execute("""
SELECT
date,
timeframe,
bidopen,
bidclose,
bidhigh,
bidlow,
symbolid,
name
FROM
prices_fxcm_api,
instruments
WHERE
instruments.id = symbolid and
symbolid = %s
ORDER BY
timeframe,
date desc
""", (symbolid,))
rows = cursor.fetchall()
# get strategies list
cursor.execute("""
SELECT id, name FROM strategies
""")
strategies = cursor.fetchall()
return templates.TemplateResponse("instrument_details.html", {"request": request, "prices": rows, "symbolid": symbolid, "symbolname": symbolname, "strategies": strategies })
@app.get("/instruments/new")
async def intruments_new(request: Request):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
markets = get_market_list()
exchanges = get_exchange_list()
return templates.TemplateResponse("instruments_new.html", {"request": request, "markets": markets, "exchanges": exchanges })
@app.post("/create_instrument")
async def create_instrument(request: Request, symbol: str = Form(...), market_id: int = Form(...), exchange_id: int = Form(...) ):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
sql = "INSERT INTO instruments(name, market_id, exchange) VALUES (%s, %s, %s);"
cursor.execute(sql, (symbol, market_id, exchange_id,))
conn.commit()
message = f'New instrument created {symbol}'
return templates.TemplateResponse("message_create.html", {"request": request, "message": message })
@app.post("/apply_strategy")
async def apply_strategy(strategy_id: int = Form(...), symbolid: int = Form(...), strategy_bias: str = Form(...), ):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
# check for existent symbol_strategies rows
cursor.execute("""
SELECT
t1.id,
t1.symbol_id,
t1.strategy_id,
t2.name
FROM
strategies_symbol t1,
instruments t2
WHERE
t1.symbol_id = t2.id and
t1.strategy_id = %s and
t1.symbol_id = %s and
t1.status = 'new'
""", (strategy_id, symbolid))
strategy = cursor.fetchone()
if not strategy:
cursor.execute("""
INSERT INTO strategies_symbol( symbol_id, strategy_id, strategy_bias, entry_point, stop_loss, take_profit, date, status ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s);
""", (symbolid, strategy_id, strategy_bias, 0, 0, 0, date.today(), 'new', ))
conn.commit()
return RedirectResponse(url=f"/strategy/{strategy_id}", status_code = 303)
@app.post("/delete_instrument/{trading_id}")
async def delete_instrument(request: Request, trading_id):
conn = db_connect()
cursor = conn.cursor(cursor_factory = psycopg2.extras.DictCursor)
cursor.execute("""
DELETE FROM strategies_symbol WHERE id = %s
""", (trading_id,))
conn.commit()
message = f'Trading deleted {trading_id}'
if Response(status_code=200):
return templates.TemplateResponse("message_create.html", {"request": request, "message": message })
@app.get("/login")
async def user_login(request: Request):
return templates.TemplateResponse("login.html", {"request": request}) |
17,413 | 45c1b0e5d991f99d0e1b8ba2f309a3a82ace06c3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
#import random
import utils
import time
np.random.seed(1729)
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.regularizers import l2, l1
from keras import backend as K
datafolder = 'data'
resultsfolder = 'results/results2'
start_at = 1
#run_only = (4,16,27,36,51,63,75,87,99)
run_only= range(150) # run all
# Load and normalize data
xs, ys, vs = utils.load_data(datafolder)
xs, ys = utils.normalize_data(xs, ys)
# Set filenames
runs_filename = resultsfolder + '/runs.csv'
results_filename = resultsfolder + '/results.csv'
# Load the parameters of the runs
runs = utils.load_runs(runs_filename)
nfolds = np.unique(vs).size
for r, params in enumerate(runs[start_at-1:], start=start_at):
# hacky shortcut to repeat important runs
if not r in run_only:
continue
# Network architecture
features = params['features']
targets = params['targets']
input_dim = len(features)
hidden_layers = params['hidden_layers']
output_dim = len(targets)
# Regularization
reg_type = params['reg_type']
reg_v = params['reg_v']
reg = {"l1":l1,"l2":l2}[reg_type](reg_v)
batch_size = params['batch_size']
epochs = params['epochs']
optimizer = "adam"
results = np.zeros((nfolds+1, len(targets), 4))
print('')
for fold in range(0, nfolds):
# Model creation
model = Sequential()
model.add(Dense(hidden_layers[0],
input_dim = input_dim,
bias_initializer="zeros",
kernel_initializer="normal",
activation='linear',
kernel_regularizer=reg))
for neurons in hidden_layers[1:]:
model.add(Dense(neurons,
bias_initializer="zeros",
kernel_initializer="normal",
activation='linear',
kernel_regularizer=reg))
model.add(Dropout(params['dropout']))
model.add(Dense(output_dim,
bias_initializer="zeros",
kernel_initializer="normal",
activation='linear',
kernel_regularizer=reg))
model.compile(loss='mse',
optimizer=optimizer,
metrics=[])
# Training
xs_train = xs[vs != fold,:][:,features]
xs_val = xs[vs == fold,:][:,features]
ys_train = ys[vs != fold,:][:,targets]
ys_val = ys[vs == fold,:][:,targets]
print('Run {}/{}, split {}/{}'.format(r, len(runs), fold+1, nfolds))
model.fit(xs_train, ys_train,
batch_size = batch_size,
epochs = epochs,
verbose=1,
)
# Validation
results[fold], _ = utils.evaluate_model(model, xs_val, ys_val)
del(model)
K.clear_session()
# Train and save final model
print('Run {}/{}, final model training'.format(r, len(runs)))
# TODO: make a create_model function, or make it so that this code doesn't get repeated
model = Sequential()
model.add(Dense(hidden_layers[0],
input_dim = input_dim,
bias_initializer="zeros",
kernel_initializer="normal",
activation='linear',
kernel_regularizer=reg))
for neurons in hidden_layers[1:]:
model.add(Dense(neurons,
bias_initializer="zeros",
kernel_initializer="normal",
activation='linear',
kernel_regularizer=reg))
model.add(Dropout(params['dropout']))
model.add(Dense(output_dim,
bias_initializer="zeros",
kernel_initializer="normal",
activation='linear',
kernel_regularizer=reg))
model.compile(loss='mse',
optimizer=optimizer,
metrics=[])
xs_train = xs[:,features]
ys_train = ys[:,targets]
t0 = time.time()
model.fit(xs_train, ys_train,
batch_size = batch_size,
epochs = epochs,
verbose=1,
)
train_dt = time.time() - t0
t0 = time.time()
model.predict(xs_train)
test_dt = time.time() - t0
model.save(resultsfolder + '/run{}.h5'.format(r))
del(model)
K.clear_session()
# Record mean errors
for t, tar in enumerate(targets):
for e in range(4):
results[nfolds,t,e] = np.mean(results[:-1,t,e])
# Print results
print('\n Run {}/{} results \n'.format(r, len(runs)))
utils.print_all_results(results[nfolds], targets)
# Log results
delimiter = ','
try:
results_file = open(results_filename, 'r+')
results_file.read()
except:
results_file = open(results_filename, 'w')
colnames = ('Run', 'Target', 'ME', 'RMSE', 'MAE',
'Pearson', 'Train time', 'Test time')
header = delimiter.join(colnames)
results_file.write(header + '\n')
temp = '{},{},{r[0]:.5f},{r[1]:.5f},{r[2]:.5f},{r[3]:.5f},{:.4f},{:.4f}'
for t, tar in enumerate(targets):
row = temp.format(r, tar, train_dt, test_dt, r=results[nfolds,t,:])
results_file.write(row + '\n')
results_file.close()
|
17,414 | f12ed6926ed3990fe719d0c384446d2a847839ae |
from worldquant.api import WQClient
from worldquant.api.submission import WQSubmissionClient
from worldquant.exceptions import WQException
import random
import os
import time
user ='***'
pswd = '***'
client = WQClient()
client.login(user, pswd)
print(5)
f = open('AlphaIds2.txt')
submit = WQSubmissionClient(client)
overview = client.myalphas.alphasoverview()
for id in f:
try:
if id:
id = id.strip()
info = client.myalphas.alphainfo(id)
settings = info['AlphaSettings']
sim_sum = info['AlphaSimSum']
average_sum = sim_sum[-1]
if average_sum['Sharpe'] > 1.25:
if average_sum['ShortCount'] + average_sum['LongCount'] > 10 :
#print('Good')
result = submit.start(id)
print(f"{id} : {result}")
time.sleep(10)
# else:
#print("notGood")
except KeyError :
print("Err with:", id)
time.sleep(10)
|
17,415 | 607eb771aa4970f36e4d219dac61c679cb21410e | #!/usr/bin/env python
"""Helper script that auto-updates all response data for integration tests"""
# TODO: rework implementation to use logging instead of print so we can see timestamps of operations
# TODO: add a nested progress bar to show number of remaining tests to fix
# TODO: add support for verbosity levels (ie: by default just show progress bars)
from time import sleep
import math
import sys
import shlex
import json
from pathlib import Path
from contextlib import redirect_stdout, redirect_stderr
from datetime import datetime
from dateutil import tz
import pytest
from pytest import ExitCode
from tqdm import trange
import humanize
import click
from click.exceptions import ClickException, Abort
from friendlypins.utils.rest_io import RestIO
CUR_PATH = Path(__file__).parent
DEFAULT_KEY_FILE = CUR_PATH.joinpath("key.txt")
DEBUG_LOG_FILE = CUR_PATH.joinpath("debug.log")
CASSETTE_PATH = CUR_PATH.joinpath("tests").joinpath("cassettes")
REPORT_FILE = CUR_PATH.joinpath(".report.json")
PREVIOUS_REPORT = None
def get_secret():
"""Loads authentication token for Pinterest
Returns:
str: authentication token parsed from the file
"""
if not DEFAULT_KEY_FILE.exists():
raise Exception("Authentication key must be stored in a file named " + DEFAULT_KEY_FILE.name)
retval = DEFAULT_KEY_FILE.read_text().strip()
if not retval or len(retval) < 10:
raise Exception("Invalid authentication token")
return retval
def load_report():
"""Loads unit test data from the latest pytest report
Requires the pytest-json-report plugin
Assumes the output is stored i a file named .report.json in the current folder
Returns:
dict: parsed report data
"""
if not REPORT_FILE.exists():
raise Exception("pytest report file not found: " + REPORT_FILE.name)
retval = json.loads(REPORT_FILE.read_text())
# Reformat our JSON report to make it easier to read
REPORT_FILE.write_text(json.dumps(retval, indent=4))
return retval
def analyse_report(report):
"""Analyses a pytest report, and displays summary information to the console
Args:
report (dict):
pytest report data, as generated by the :meth:`load_report` method
Returns:
int: number of failing unit tests still remaining
"""
global PREVIOUS_REPORT # pylint: disable=global-statement
if report["summary"]["total"] == 0:
raise Exception("pytest report has no test results")
current_failures = list()
for cur_test in report["tests"]:
if cur_test["outcome"] in ("passed", "skipped"):
continue
if "RateLimitException" not in str(cur_test) and "Network is disabled" not in str(cur_test):
raise Exception("Unit test {0} has failed for unexpected reasons. See debug.log for details".format(
cur_test["nodeid"]))
current_failures.append(cur_test["nodeid"])
click.secho(
"{0} of the {1} selected tests were successful".format(
report["summary"].get("passed", 0),
report["summary"]["total"]
),
fg="green"
)
if PREVIOUS_REPORT:
fixed_tests = list()
for cur_test in PREVIOUS_REPORT["tests"]:
if cur_test["outcome"] in ("passed", "skipped"):
continue
if cur_test["nodeid"] not in current_failures:
fixed_tests.append(cur_test["nodeid"])
if fixed_tests:
click.secho("Fixed the following {0} tests:".format(len(fixed_tests)), fg="green")
for cur_test in fixed_tests:
click.secho("\t{0}".format(cur_test), fg="green")
PREVIOUS_REPORT = report
return report["summary"].get("failed", 0)
def sanity_check(secret):
"""Makes sure there are no further mentions of our auth token anywhere in any cassette
Args:
secret (str):
Auth token to detect
Returns:
bool:
True if everything looks OK, False if there are still mentions of the auth token in 1 or more cassettes
"""
matches = list()
for cur_file in CASSETTE_PATH.rglob("*.yaml"):
if secret in cur_file.read_text():
matches.append(cur_file)
if matches:
click.secho("Found {0} cassettes that still mention auth token:".format(len(matches)), fg="red")
for cur_match in matches:
click.secho("\t{0}".format(cur_match.name), fg="red")
return False
click.secho("Cassettes look clean - no mentions of auth tokens!", fg="green")
return True
def run_tests(params):
"""Launches pytest to orchestrate a test run
All output from the test runner will be hidden to keep the console clean
Args:
params (list of str):
command line parameters to pass to the test runner
these options will be combined with a default set defined internally
Returns:
int: return code produced by the test run
"""
default_test_params = [
"./tests",
"-vv",
"--json-report",
"--key-file",
DEFAULT_KEY_FILE.name
]
with DEBUG_LOG_FILE.open("a") as debug_out:
with redirect_stdout(debug_out):
with redirect_stderr(sys.stdout):
return pytest.main(default_test_params + params)
@click.command()
@click.option("--force", is_flag=True,
help="Forces overwrite of all cassettes even if their tests are currently passing")
def main(force):
"""Regenerates vcrpy cassettes for integration tests, accounting for rate limits
enforced by the Pinterest REST APIs
"""
secret = get_secret()
service = RestIO(secret)
# Make sure we re-create our debug log for each run
if DEBUG_LOG_FILE.exists():
DEBUG_LOG_FILE.unlink()
if force:
# Regenerate all cassette data until we hit our rate limit
click.secho("Regenerating all recorded cassettes")
result = run_tests(shlex.split("--record-mode=rewrite"))
num_failures = analyse_report(load_report())
else:
click.secho("Generating baseline...")
# Start by generating a baseline state without using any API calls
run_tests(shlex.split("--record-mode=none --block-network"))
num_failures = analyse_report(load_report())
if num_failures == 0:
click.secho("All unit tests passed. Aborting rebuild.", fg="yellow")
click.secho("To force a rebuild of all cassettes try --force", fg="yellow")
return
# The re-run any failed tests, forcing the cassettes to get regenerated
# We append --lf to only rerun the tests that failed on the last pass
click.secho("Rebuilding initial cassettes...")
result = run_tests(shlex.split("--record-mode=rewrite --lf"))
num_failures = analyse_report(load_report())
iteration = 1
while result == ExitCode.TESTS_FAILED and num_failures != 0:
# check headers to see when the next token renewal is
now = datetime.now(tz=tz.tzlocal())
renewal = service.headers.time_to_refresh
service.refresh_headers()
wait_time = renewal - now
minutes = math.ceil(wait_time.total_seconds() / 60)
# if the rate limit has expired wait until the limit has been refreshed
if minutes > 0:
click.secho("Next renewal: {0}".format(renewal.astimezone(tz.tzlocal())))
click.secho("Sleeping for {0} minutes...".format(minutes))
# Give regular status updates to the user via a progress bar once every minute
for _ in trange(minutes):
sleep(60)
# Give the API a few additional seconds before we try again to account for clock skew
sleep(10)
click.secho("Running test iteration " + str(iteration))
# We append --lf to only rerun the tests that failed on the previous run
result = run_tests(shlex.split("--record-mode=rewrite --lf"))
# If the number of failing tests hasn't changed or has gotten worse, we are not making any progress
# and thus we should exit to avoid a deadlock
temp = analyse_report(load_report())
if temp >= num_failures:
raise Exception("Last unit test run had {0} failures and current run had {1}".format(num_failures, temp))
num_failures = temp
# repeat until all tests pass
iteration += 1
# return the final test run result to the caller
if result != ExitCode.OK:
raise ClickException("Regeneration failed for unexpected reason: " + str(result))
def _main(args):
"""Primary entry point function
Args:
args (list of str):
command line arguments to pass to the command interpreter
Returns:
int:
return code to pass back to the shell
"""
start = datetime.now()
try:
main.main(args, standalone_mode=False)
except Abort:
click.secho("Operation aborted!", fg="yellow", bold=True)
except Exception as err: # pylint: disable=broad-except
click.secho("Error: " + str(err), fg="red")
return 1
finally:
if "--help" not in sys.argv:
# display overall runtime for reference when performing update
end = datetime.now()
runtime = end - start
click.secho("Operation complete. Total runtime: " + humanize.naturaldelta(runtime), fg="green")
return 0
if __name__ == "__main__":
sys.exit(_main(sys.argv[1:]))
|
17,416 | 46d6bb4b59fd9142dd9d32e4c5461e5675d414a6 | #!/usr/bin/env python
import argparse
import os
import bs4
import requests
PAGE = 'https://www.ksi.is/mot/felog/adildarfelog/'
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_absolute_url(absolute_path):
return 'https://www.ksi.is%s' % (absolute_path,)
def main(club_id, out_folder, club_name=None):
r = requests.get(get_absolute_url(f'/mot/felag/?lid={club_id}'))
r.raise_for_status()
soup = bs4.BeautifulSoup(r.text, 'html.parser')
h1 = soup.find('h1')
if not h1:
raise RuntimeError("No h1 found")
if not club_name:
club_name = h1.text.split('-')[1].strip()
if not club_name:
raise RuntimeError("Club name not found")
for img_tag in soup.findAll('img'):
if img_tag.get('alt', '') == 'Model.BasicInfo.ShortName':
img_url = img_tag['src']
break
else:
raise RuntimeError("Did not find img!")
exts = [os.path.splitext(str(img_url))[1], '.svg']
for ext in exts:
path = os.path.join(out_folder, '%s%s' % (club_name, ext))
if os.path.isfile(path):
print('%s exists' % (path,))
break
else:
path = os.path.join(out_folder, '%s%s' % (club_name, exts[0]))
r2 = requests.get(get_absolute_url(img_url))
r2.raise_for_status()
with open(path, 'wb') as f:
for chunk in r2.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
print(
'Saved %s for %s'
% (
path,
club_name,
)
)
club_ids = os.path.join(BASE, 'src', 'club-ids.js')
line = " '%s': '%s',\n" % (club_name, club_id)
with open(club_ids, 'r') as f:
lines = f.readlines()
if line not in lines:
lines[1:-1] = sorted(lines[1:-1] + [line])
with open(club_ids, 'w') as f:
for line in lines:
f.write(line)
club_logos = os.path.join(BASE, 'src', 'images', 'clubLogos.js')
line = " %s: require('./%s'),\n" % (
club_name,
os.path.relpath(path, os.path.dirname(club_logos)),
)
with open(club_logos, 'r') as f:
lines = f.readlines()
if line not in lines:
lines[2:-1] = sorted(lines[2:-1] + [line])
with open(club_logos, 'w') as f:
for line in lines:
f.write(line)
def get_club_id(club_name):
r = requests.get(
get_absolute_url(
f'/leit/?searchstring={club_name}&contentcategories=F%c3%a9l%c3%b6g'
)
)
r.raise_for_status()
soup = bs4.BeautifulSoup(r.text, 'html.parser')
all_h2 = soup.findAll('h2')
for h2 in all_h2:
if h2.text == club_name:
a = h2.find('a')
if not a:
raise RuntimeError("No link found in search result")
href = a['href']
return int(href.replace('/mot/lid/?lid=', ''))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
folder = os.path.join(BASE, 'src', 'images', 'club-logos')
parser.add_argument('club_id', type=str)
args = parser.parse_args()
club_name = None
if args.club_id.isdigit():
club_id = int(args.club_id)
else:
club_id = get_club_id(args.club_id)
club_name = args.club_id
main(club_id, folder, club_name=club_name)
|
17,417 | 3f53452dc58ad42c1c9e0ef0ac40223b6ba544da | import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3, decode_predictions, preprocess_input
from tensorflow.keras.preprocessing import image
"""
This files uses the pre-trained model Inception_v3 which is a CNN used for image analysis and object detection.
It is trained on the ImageNet data set and has state of the art performance.
"""
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# This line allows the network to use the GPU VRAM uncapped. !!! NEED THIS LINE FOR NETWORK TO RUN !!!
for idx, g in enumerate(gpus):
tf.config.experimental.set_memory_growth(tf.config.experimental.list_physical_devices('GPU')[idx], True)
# tf.config.experimental.set_visible_devices(gpus[1], 'GPU')
except RuntimeError as e:
print(e)
def main():
classify_images()
def classify_images():
"""
Input a image and it will return the top 3 classes that the networks thinks the picture is.
The classes is based on the ImageNet and is it is 1k classes in total.
:return: void
"""
# Load the desired image
img_path = 'dataset/colorize_images/n02085782_919.jpg'
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
model = InceptionV3(weights="imagenet")
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
if __name__ == "__main__":
main()
|
17,418 | 64c9abd82b1edd9f2af2b4094ae62eb90d221beb | import pickle as pkl
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
import sys
from collections import defaultdict
import os
def identify_cascades(GTD_event_dict, id_to_groups, groups_to_id):
attack_times = defaultdict(list)
for attack, attack_info in GTD_event_dict.iteritems():
timestamp = (attack_info['iday']*1.0)/(31.0*365.0) + (attack_info['iyear']-1970) + (attack_info['imonth']/12.0)
attack_times[(attack_info['weaptype1'], attack_info['attacktype1'], attack_info['targtype1'], attack_info['region'])].append((groups_to_id[attack_info['gname']],timestamp))
return attack_times
def print_attacks(attack_times, id_to_groups):
data = open("../../data/full_cascade_fastinf_noun.txt", 'w+')
for group in id_to_groups:
# data.write('%d,%s\n' % (group, id_to_groups[group]))
data.write('%d,%d\n' % (group, group))
data.write('\n')
for casc,timepair in attack_times.iteritems():
used = set()
used.add(1741)
if (len(timepair) < 5): continue
for val in timepair:
if val[0] not in used:
tag = "%d,%f," % (val[0],val[1])
data.write(str(tag).rstrip('\n'))
used.add(val[0])
data.seek(-1, os.SEEK_END)
data.truncate()
data.write("\n")
data.close()
def main():
with open("../../data/pkl/GTD_dict.p", 'rb') as f:
GTD_event_dict = pkl.load(f)
with open("../../data/pkl/id_to_groups.p") as f:
id_to_groups = pkl.load(f)
with open("../../data/pkl/groups_to_id.p") as f:
groups_to_id = pkl.load(f)
attack_times = identify_cascades(GTD_event_dict, id_to_groups, groups_to_id)
print_attacks(attack_times, id_to_groups)
main() |
17,419 | 41086a4cf68befccac3b76559e6681d0a936e896 | import os.path
import sys
import unittest
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
test_dir = os.path.dirname(__file__)
sys.path.insert(0, test_dir)
def get_tests():
start_dir = os.path.dirname(__file__)
return unittest.TestLoader().discover(".", pattern="test*.py")
|
17,420 | df0c7dc00e1a7ddf30136272b6e270ae12e6cd8d | import logging
import os
from mandrill import Mandrill, Error
import pystache
from shuffle.config import config
from shuffle.services.gravatar_service import GravatarService
class EmailService:
def __init__(self):
self.__email_api = Mandrill(config.MANDRILL_API_KEY)
def send_emails_to_groups_with_template(self, randomized_groups, email_from, email_subject, email_template_file):
logging.info("Emailing groups")
for group in randomized_groups:
recipients = []
template_recipients = {"recipients": []}
for user in group.get_members():
recipients.append({
"email": user.get_email(),
"type": "to",
})
template_recipients["recipients"].append({
"gravatar_link": GravatarService.get_gravatar_link(user.get_email()),
"email": user.get_email()
})
email_body = self.__create_message_body(email_template_file, template_recipients)
message = self.__create_message(email_from, recipients, email_subject, email_body)
# By sending from 'me' it will send the message as the currently authenticated user
self.__send_message(message)
@staticmethod
def __create_message_body(email_template_file, recipients):
try:
email_template_file = os.path.join(os.path.dirname(config.__file__), email_template_file)
f = open(email_template_file)
template_body = f.read()
template_body = pystache.render(template_body, recipients)
except IOError as error:
logging.error("Could not find the email template file. This is unrecoverable, please create a email template file and try again. {0}".format(error))
raise error
return template_body
@staticmethod
def __create_message(sender, recipients, subject, message_text):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
Returns:
An object containing a base64 encoded email object.
"""
message = {
"to": recipients,
"from_email": sender,
"subject": subject,
"html": message_text,
}
return message
def __send_message(self, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
logging.debug("Sending message")
try:
message = self.__email_api.messages.send(message=message)
return message
except Error as error:
logging.error('An error occurred emailing a user: {0}'.format(error))
raise error
|
17,421 | b34178065776e7e3dcba9fbaf23d76e53f5a6deb | import board
import busio
import time
import sys
import RPi.GPIO as GPIO
sys.path.insert(0, "/home/pi/packages")
from RaspberryPiCommon.pidev import stepper, RPiMIB
sys.path.insert(0, "/home/pi/packages/Adafruit_16_Channel_PWM_Module_Easy_Library")
from Adafruit_Ease_Lib import Adafruit_Ease_Lib as ael
led = ael()
sys.path.insert(0, "/home/pi/packages/Adafruit_Python_ADS1x15/examples")
# Import the ADS1x15 module.
import Adafruit_ADS1x15
# Create an ADS1115 ADC (16-bit) instance.
adc = Adafruit_ADS1x15.ADS1115()
GAIN = 1
clamp = lambda n, min_n, max_n: max(min(max_n, n), min_n)
import Slush
import spidev
increment = 5
motor_1 = stepper(port = 0, speed = 20, micro_steps = 128)
print("g")
motor_1.home(0)
home_pos_1 = 11.34
current_pos_x = home_pos_1
while True:
joy_val_x = (adc.read_adc(1, gain=GAIN)-9408)/12000
#joy_val_y = abs(9500 - adc.read_adc(2, gain=GAIN))
print(joy_val_x)
#print(joy_val_y)
current_pos_x = current_pos_x + joy_val_x*increment
#current_pos_x = current_pos_x + joy_val_x*increment
current_pos_x = clamp(current_pos_x, home_pos_1 - 13, home_pos_1 + 13)
# current_pos_x = clamp(current_pos_x, home_pos_1 - 15.77, home_pos_ + 15.77)
motor_1.start_go_to_position(current_pos_x)
|
17,422 | 7b3fc1b2dc0a542781064c69015b93d9af537f84 | class Thresholds:
submission = 800
comment = 1000
pm = 1000
class Messages:
comment = "I **strongly advise** investing! This meme hit #1 on [hot](https://www.reddit.com/r/memeeconomy/hot/) within **{min}**, at **{upvotes}** upvotes. If you invest now, you'll break even at **{break_even}** upvotes.\n\n[Click here](https://www.param.me/meme/calculator/break-even) to calculate the current break-even point. [Click here](https://www.reddit.com/message/compose?to=MemeAdviser&subject=Subscribe&message=Subscribe) to subscribe to daily market updates.\n***\n^(Beep boop, I'm a bot | [Contact me](https://www.reddit.com/message/compose?to=hypnotic-hippo&subject=MemeAdviser))"
submission = "This meme just hit #1 on MemeEconomy with only {upvotes} upvotes! Invest now and break even at {break_even} upvotes"
pm = "[This meme](https://reddit.com{link}) just hit #1 on MemeEconomy with only {upvotes} upvotes! Invest now and break even at {break_even} upvotes\n***\n^(You're recieving this message because you've subscribed to this bot. To unsubscribe, reply 'Unsubscribe')"
|
17,423 | 9a2f6dd7e0a7ac4a2f5e9aa004cf4e19f2f8fb3e | import tkinter as tk
import random
class PmuDataDisplay(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
# level set to random value for now
self.canvas = tk.Canvas(self, background="white")
self.canvas.pack(side="top", fill="both", expand=True)
# create line for graph
self.level_line1 = self.canvas.create_line(0, 0, 0, 0, fill="red")
self.level_line2 = self.canvas.create_line(0, 0, 0, 0, fill="blue")
def update_plot(self, lev1, lev2, spoof_status, cybergrid_status):
# update the plot
self.add_point(self.level_line1, lev1)
self.add_point(self.level_line2, lev2)
self.canvas.xview_moveto(1.0)
return
def add_point(self, line, y):
coords = self.canvas.coords(line)
x = coords[-2] + 10
coords.append(x)
coords.append(y)
coords = coords[-800:] # keep # of points to a manageable size
self.canvas.coords(line, *coords)
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
return
|
17,424 | 5b4d375ec48c5a1d08b7ed5dc389c2c664e9f1e2 | # -*- coding: utf-8 -*-
"""
ppstore.feedback
~~~~~~
This module has been developed to take an IP address and a set of countries predicted by Speed of Light
constraints, use this information to see if only one country is predicted. If
only one country is predicted then gather information from all the geolocation
sources and insert/update the ground truth label for that IP address. It either
updates (if the IP address exists in ground truth) or adds a new entry for the
IP address.
:author: Muzammil Abdul Rehman
:copyright: Northeastern University © 2018.
:license: Custom BSD, see LICENSE for more details.
:email: passport@ccs.neu.edu
"""
###remove-me-later-muz###import settings as DJANOG_SETTINGS
import configs.system
from ppstore.models import CLASSIFIER_DATA_TRAIN
from ppstore.models import DDEC_Hostname
from ppstore.models import Hints_DDEC_Location_Lat_Long
from ppstore.models import IP_WHOIS_INFORMATION
from ppstore.models import Hints_AS_INFO
from ppstore.models import Loc_Source_DB_IP
from ppstore.models import Loc_Source_EUREKAPI
from ppstore.models import Loc_Source_IP2LOCATION
from ppstore.models import Loc_Source_IPINFO_IO
from ppstore.models import Loc_Source_MAXMIND_GEOLITE_CITY
#####################################################################
# remove feedback-rewrite
#####################################################################
#####################################################################
# add feedback
#####################################################################
def add_feedback_to_ground(ip_address, real_country_list, hst_nm = ''):
if not configs.system.APPLY_FEEDBACK:
return
num_countries = len(real_country_list)
if num_countries > configs.system.FEEDBACK_MAX_COUNTRIES:
return
# no countries.
if num_countries == 0:
return
for real_cntry in real_country_list:
dataset = CLASSIFIER_DATA_TRAIN.objects.filter(ip=ip_address,
realcountry=real_cntry)
# see if IP-real_country pair exists.
if dataset.count() > 0:
return
# update if a copy exists
dataset = CLASSIFIER_DATA_TRAIN.objects.filter(ip=ip_address)
# see if IP exists exists.
try:
if dataset.count() > 0:
#update the ip address real country tuple.if it already exists.
training_instance = dataset[0]
training_instance.realcountry=real_cntry
training_instance.save()
return
except:
print "Couldn't update instance after feedback:", ip_address
# add to training dataset.
ip_str = ip_address
#all_hsts = Host.objects.filter(ip=ip_str)
#try:
# cur_hst = all_hsts[0]
# ip_str = cur_hst.ip
# hst_nm = cur_hst.hostname
#except:
# hst_nm = ''
try:
host_objs = DDEC_Hostname.objects.filter(hostname=hst_nm)
loc = host_objs[0].location
x = Hints_DDEC_Location_Lat_Long.objects.filter(location=loc)
ddeccountry = x.country
except:
ddeccountry = ''
try:
db_ipcountry = Loc_Source_DB_IP.objects.filter(ip=ip_str)[0].country
except:
db_ipcountry = ''
try:
ipinfocountry = Loc_Source_IPINFO_IO.objects.filter(ip=ip_str)[0].country
except:
ipinfocountry = ''
try:
eurekapicountry = Loc_Source_EUREKAPI.objects.filter(ip=ip_str)[0].country
except:
eurekapicountry = ''
try:
ip2locationcountry = Loc_Source_IP2LOCATION.objects.filter(ip=ip_str)[0].country
except:
ip2locationcountry = ''
try:
maxmindcountry = Loc_Source_MAXMIND_GEOLITE_CITY.objects.filter(ip=ip_str)[0].country
except:
maxmindcountry = ''
asn_num = -1
try:
ip_object = IP_WHOIS_INFORMATION.objects.filter(ip=ip_str)[0]
asn_num = ip_object.asn
asn_cidr_bgp1 = ip_object.asn_cidr_bgp
asn1 = ip_object.asn
asn_registry1 = ip_object.asn_registry
isp1 = ip_object.isp
isp_city1 = ip_object.isp_city
isp_region1 = ip_object.isp_region
ISPCountry1 = ip_object.isp_country
ASCountry1 = ip_object.asn_country
except:
asn_registry1 = ''
isp1 = ''
isp_city1 = ''
isp_region1 = ''
ISPCountry1 = ''
ASCountry1 = ''
asn1 = -1
asn_cidr_bgp1 = ''
as_name1 = ''
num_as_in_org1 = -1
num_ipv4_prefix_in_org1 = -1
num_ipv4_ip_in_org1 = -1
try:
asn_object = Hints_AS_INFO.objects.filter(as_number=asn_num)[0]
as_name1 = asn_object.as_name
num_as_in_org1 = asn_object.num_as_in_org
num_ipv4_prefix_in_org1 = asn_object.num_ipv4_prefix_in_org
num_ipv4_ip_in_org1 = asn_object.num_ipv4_ip_in_org
except:
pass
try:
#update the ip address real country tuple.if it already exists.
training_instance = CLASSIFIER_DATA_TRAIN(ip=ip_address,
realcountry=real_cntry, DDECcountry=ddeccountry,
db_ip_country=db_ipcountry, eurekapi_country=eurekapicountry,
ip2location_country=ip2locationcountry,
ipinfo_country=ipinfocountry,
maxmind_country=maxmindcountry, asn=asn1,
asn_registry=asn_registry1, hostname=hst_nm, isp=isp1,
isp_region=isp_region1, ISPcountry=ISPCountry1,
AScountry=ASCountry1, isp_city=isp_city1, as_name=as_name1,
num_as_in_org=num_as_in_org1,
num_ipv4_prefix_in_org=num_ipv4_prefix_in_org1,
num_ipv4_ip_in_org=num_ipv4_ip_in_org1,
asn_cidr_bgp=asn_cidr_bgp1)
training_instance.save()
except:
#traceback.print_exc()
print "Couldn't add instance after feedback:", ip_address
|
17,425 | 91999416584dc2d0c9a998ced1ad3c07e03f6751 | # -*- coding: utf-8 -*-
from celery import task
from termcolor import colored
from mapshop.models import Preorder
from django.template import loader, Context
from django.contrib.sites.models import get_current_site
from django.contrib.sites.models import Site
from django.core.mail import EmailMultiAlternatives
from settings import EMAIL_REPLY
import logging
logger = logging.getLogger(__name__)
def sendm(email, title, body):
msg = EmailMultiAlternatives(title, body, EMAIL_REPLY, (email,))
msg.content_subtype = "html"
msg.send()
@task(name='test_task')
def test_task(product):
site = Site.objects.get_current()
for i in Preorder.objects.all().filter(type='email'):
t = loader.get_template('mapshop/mail_templates/remaind_mail.tpl')
print colored('send email to %s' % i.contact, 'red')
link_url = ''.join(['http://', site.domain, i.product.get_absolute_url()])
link_html = '<a href="%s">%s</a>' % (link_url,i.product)
c = Context({'site_name': site.name, 'product': i.product, 'link': link_html})
print colored(t.render(c), 'yellow')
sendm(i.contact,u'Уведомление о поступлении товара', t.render(c))
for i in Preorder.objects.all().filter(type='phone'):
logger.info('Sending SMS to %s' % i.contact)
@task(name='change_order_status_task')
def change_order_status_task(order):
#print 'changing status on %s' % order.status
if int(order.status)==6:
t = loader.get_template('mapshop/mail_templates/order_delivered.tpl')
title = u'Ваш товар доставлен.'
elif int(order.status)==5:
t = loader.get_template('mapshop/mail_templates/order_delivering.tpl')
title = u'Ваш товар передан в службу доставки.'
elif int(order.status)==4:
t = loader.get_template('mapshop/mail_templates/order_paied.tpl')
title = u'Ваш товар оплачен.'
try:
c = Context({'order': order})
logger.info(t.render(c))
sendm(order.client.email,title, t.render(c))
except:
pass
@task(name='mapshop_create_user_email')
def mapshop_create_user_email(user,password):
site = Site.objects.get_current()
t = loader.get_template('mapshop/mail_templates/new_user_created.tpl')
title = u'Вы зарегистрированы на сайте.'
c = Context({'user': user, 'password': password, 'site_name': site.name})
logger.info(t.render(c))
sendm(user.email,title, t.render(c))
|
17,426 | ecbcced37b4f9b941042178d23111f67c5ae9145 | #!/usr/bin/python3
# coding=utf-8
import sys
import os
import inspect
import configparser
from util import configSectionMap
from peewee import MySQLDatabase, Model
# read database config file
config = configparser.ConfigParser()
dbDir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
config.read(dbDir + '/config.ini')
ConfigMap = configSectionMap('DB', config)
mHost = str(ConfigMap['host'])
mPort = int(ConfigMap['port'])
mUser = str(ConfigMap['user'])
mPasswd = str(ConfigMap['passwd'])
mDb = str(ConfigMap['db'])
conn = MySQLDatabase(mDb, host=mHost, port=mPort, user=mUser, passwd=mPasswd)
class BaseModel(Model):
class Meta:
database = conn
def connectMysql():
conn.connect()
def closeConnect():
conn.close()
|
17,427 | d5bb7370aca7a8ac9e3b132d1684aca0219259a2 | # __author: ZhengNengjin
# __date: 2018/10/14
import socket, subprocess
# family type
sk = socket.socket()
print(sk)
address = ('127.0.0.1', 8888) # IP地址和端口
sk.bind(address) # sk 的bind方法 后面跟元组,绑定ip地址和端口
sk.listen(3)
print("服务端启动...")
while True:
conn, address = sk.accept() # 阻塞,直到客户端来链接
print(address)
while True:
try:
data = conn.recv(1024) # *收数据
except Exception:
print("意外中断")
break
if not data: break
print(str(data, 'utf8')) # *打印数据
obj = subprocess.Popen(str(data,'utf8'), shell=True, stdout=subprocess.PIPE)
cmd_result = obj.stdout.read()
result_len = bytes(str(len(cmd_result)),'utf8')
conn.sendall(result_len)
# inp = input(">>>") # ** 输入数据
conn.recv(1021) #解决粘包问题,隔断开两个send
conn.sendall(cmd_result) # **发送数据
sk.close()
|
17,428 | c1c47d102e737237625567d388d94370d11faadf | #
# PySNMP MIB module WLSX-USER6-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/WLSX-USER6-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:30:11 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
wlsxEnterpriseMibModules, = mibBuilder.importSymbols("ARUBA-MIB", "wlsxEnterpriseMibModules")
ArubaPhyType, ArubaUserForwardMode, ArubaAuthenticationMethods, ArubaSubAuthenticationMethods, ArubaEncryptionType, ArubaHTMode = mibBuilder.importSymbols("ARUBA-TC", "ArubaPhyType", "ArubaUserForwardMode", "ArubaAuthenticationMethods", "ArubaSubAuthenticationMethods", "ArubaEncryptionType", "ArubaHTMode")
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
Unsigned32, Bits, ObjectIdentity, iso, Integer32, snmpModules, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, TimeTicks, Gauge32, Counter64, Counter32, NotificationType, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Bits", "ObjectIdentity", "iso", "Integer32", "snmpModules", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "TimeTicks", "Gauge32", "Counter64", "Counter32", "NotificationType", "IpAddress")
TAddress, TDomain, TextualConvention, TimeInterval, DisplayString, TestAndIncr, RowStatus, MacAddress, StorageType, TruthValue, PhysAddress = mibBuilder.importSymbols("SNMPv2-TC", "TAddress", "TDomain", "TextualConvention", "TimeInterval", "DisplayString", "TestAndIncr", "RowStatus", "MacAddress", "StorageType", "TruthValue", "PhysAddress")
wlsxSwitchMIB, = mibBuilder.importSymbols("WLSX-SWITCH-MIB", "wlsxSwitchMIB")
wlanESSID, = mibBuilder.importSymbols("WLSX-WLAN-MIB", "wlanESSID")
wlsxUser6MIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14))
wlsxUser6MIB.setRevisions(('1910-01-26 18:06',))
if mibBuilder.loadTexts: wlsxUser6MIB.setLastUpdated('1001261806Z')
if mibBuilder.loadTexts: wlsxUser6MIB.setOrganization('Aruba Wireless Networks')
wlsxUser6AllInfoGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1))
wlsxUser6InfoGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4))
wlsxTotalNumOfUsers6 = MibScalar((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wlsxTotalNumOfUsers6.setStatus('current')
wlsxUser6Table = MibTable((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2), )
if mibBuilder.loadTexts: wlsxUser6Table.setStatus('current')
wlsxUser6Entry = MibTableRow((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1), ).setIndexNames((0, "WLSX-USER6-MIB", "nUser6PhyAddress"), (0, "WLSX-USER6-MIB", "nUser6IpAddress"))
if mibBuilder.loadTexts: wlsxUser6Entry.setStatus('current')
nUser6PhyAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 1), MacAddress())
if mibBuilder.loadTexts: nUser6PhyAddress.setStatus('current')
nUser6IpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128)))
if mibBuilder.loadTexts: nUser6IpAddress.setStatus('current')
nUser6Name = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6Name.setStatus('current')
nUser6Role = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6Role.setStatus('current')
nUser6UpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 5), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6UpTime.setStatus('current')
nUser6AuthenticationMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 6), ArubaAuthenticationMethods()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6AuthenticationMethod.setStatus('current')
nUser6SubAuthenticationMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 7), ArubaSubAuthenticationMethods()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6SubAuthenticationMethod.setStatus('current')
nUser6AuthServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6AuthServerName.setStatus('current')
nUser6ExtVPNAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 9), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6ExtVPNAddress.setStatus('current')
nUser6ApLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6ApLocation.setStatus('current')
nUser6ApBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 11), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6ApBSSID.setStatus('current')
nUser6IsOnHomeAgent = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 12), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6IsOnHomeAgent.setStatus('current')
nUser6HomeAgentIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 13), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6HomeAgentIpAddress.setStatus('current')
nUser6MobilityStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("visitor", 1), ("away", 2), ("associated", 3), ("wired", 4), ("wireless", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6MobilityStatus.setStatus('current')
nUser6HomeVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6HomeVlan.setStatus('current')
nUser6DefaultVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6DefaultVlan.setStatus('current')
nUser6AssignedVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6AssignedVlan.setStatus('current')
nUser6BWContractName = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 18), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6BWContractName.setStatus('deprecated')
nUser6BWContractUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("user", 1), ("shared", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6BWContractUsage.setStatus('deprecated')
nUser6BWContractId = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6BWContractId.setStatus('deprecated')
nUser6IsProxyArpEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 21), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6IsProxyArpEnabled.setStatus('current')
nUser6CurrentVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6CurrentVlan.setStatus('current')
nUser6IsWired = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 23), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6IsWired.setStatus('current')
nUser6ConnectedSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6ConnectedSlot.setStatus('current')
nUser6ConnectedPort = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6ConnectedPort.setStatus('current')
nUser6PhyType = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 26), ArubaPhyType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6PhyType.setStatus('current')
nUser6MobilityDomainName = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 27), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6MobilityDomainName.setStatus('current')
nUser6UPBWContractName = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 28), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6UPBWContractName.setStatus('current')
nUser6UPBWContractUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("user", 1), ("shared", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6UPBWContractUsage.setStatus('current')
nUser6UPBWContractId = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6UPBWContractId.setStatus('current')
nUser6DNBWContractName = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 31), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6DNBWContractName.setStatus('current')
nUser6DNBWContractUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("user", 1), ("shared", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6DNBWContractUsage.setStatus('current')
nUser6DNBWContractId = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 33), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6DNBWContractId.setStatus('current')
nUser6HTMode = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 34), ArubaHTMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6HTMode.setStatus('current')
nUser6DeviceID = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 35), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6DeviceID.setStatus('current')
nUser6DeviceType = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 36), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6DeviceType.setStatus('current')
nUser6ConnectedModule = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 37), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6ConnectedModule.setStatus('current')
nUser6RxDataPkts64 = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 38), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6RxDataPkts64.setStatus('current')
nUser6TxDataPkts64 = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 39), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6TxDataPkts64.setStatus('current')
nUser6RxDataOctets64 = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 40), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6RxDataOctets64.setStatus('current')
nUser6TxDataOctets64 = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 41), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6TxDataOctets64.setStatus('current')
nUser6ForwardMode = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 42), ArubaUserForwardMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6ForwardMode.setStatus('current')
nUser6EncryptionMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 43), ArubaEncryptionType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nUser6EncryptionMethod.setStatus('current')
nVIAUser6DeviceID = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 2, 1, 44), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nVIAUser6DeviceID.setStatus('current')
wlsxUser6SessionTimeTable = MibTable((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 3), )
if mibBuilder.loadTexts: wlsxUser6SessionTimeTable.setStatus('current')
wlsxUser6SessionTimeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 3, 1), ).setIndexNames((0, "WLSX-WLAN-MIB", "wlanESSID"), (0, "WLSX-USER6-MIB", "wlsxUser6SessionTimeLength"))
if mibBuilder.loadTexts: wlsxUser6SessionTimeEntry.setStatus('current')
wlsxUser6SessionTimeLength = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 3, 1, 1), Integer32())
if mibBuilder.loadTexts: wlsxUser6SessionTimeLength.setStatus('current')
wlsxUser6SessionTimeCount = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 14, 1, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wlsxUser6SessionTimeCount.setStatus('current')
wlsxSwitchUser6Table = MibTable((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1), )
if mibBuilder.loadTexts: wlsxSwitchUser6Table.setStatus('current')
wlsxSwitchUser6Entry = MibTableRow((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1), ).setIndexNames((0, "WLSX-USER6-MIB", "user6IpAddress"))
if mibBuilder.loadTexts: wlsxSwitchUser6Entry.setStatus('current')
user6IpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32)))
if mibBuilder.loadTexts: user6IpAddress.setStatus('current')
user6PhyAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6PhyAddress.setStatus('current')
user6Name = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6Name.setStatus('current')
user6Role = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6Role.setStatus('current')
user6UpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 5), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6UpTime.setStatus('current')
user6AuthenticationMethod = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("none", 1), ("other", 2), ("web", 3), ("dot1x", 4), ("vpn", 5), ("mac", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6AuthenticationMethod.setStatus('current')
user6Location = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6Location.setStatus('current')
user6ServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6ServerName.setStatus('current')
user6ConnectedVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6ConnectedVlan.setStatus('current')
user6ConnectedSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6ConnectedSlot.setStatus('current')
user6ConnectedPort = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6ConnectedPort.setStatus('current')
user6BWContractName = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6BWContractName.setStatus('current')
user6BWContractUsage = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("user", 1), ("shared", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6BWContractUsage.setStatus('current')
user6ConnectedModule = MibTableColumn((1, 3, 6, 1, 4, 1, 14823, 2, 2, 1, 1, 4, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: user6ConnectedModule.setStatus('current')
mibBuilder.exportSymbols("WLSX-USER6-MIB", nUser6UPBWContractName=nUser6UPBWContractName, user6Name=user6Name, nUser6BWContractName=nUser6BWContractName, nUser6IsProxyArpEnabled=nUser6IsProxyArpEnabled, user6IpAddress=user6IpAddress, nUser6DNBWContractId=nUser6DNBWContractId, user6ConnectedVlan=user6ConnectedVlan, nUser6PhyAddress=nUser6PhyAddress, nUser6TxDataOctets64=nUser6TxDataOctets64, PYSNMP_MODULE_ID=wlsxUser6MIB, nUser6AuthServerName=nUser6AuthServerName, nUser6DNBWContractUsage=nUser6DNBWContractUsage, wlsxTotalNumOfUsers6=wlsxTotalNumOfUsers6, nUser6IpAddress=nUser6IpAddress, nUser6DeviceID=nUser6DeviceID, nUser6BWContractUsage=nUser6BWContractUsage, nVIAUser6DeviceID=nVIAUser6DeviceID, wlsxUser6SessionTimeTable=wlsxUser6SessionTimeTable, nUser6ExtVPNAddress=nUser6ExtVPNAddress, wlsxUser6SessionTimeLength=wlsxUser6SessionTimeLength, user6ServerName=user6ServerName, nUser6UpTime=nUser6UpTime, nUser6DeviceType=nUser6DeviceType, nUser6HomeAgentIpAddress=nUser6HomeAgentIpAddress, nUser6TxDataPkts64=nUser6TxDataPkts64, nUser6AssignedVlan=nUser6AssignedVlan, user6AuthenticationMethod=user6AuthenticationMethod, nUser6AuthenticationMethod=nUser6AuthenticationMethod, user6ConnectedModule=user6ConnectedModule, user6Role=user6Role, user6ConnectedPort=user6ConnectedPort, nUser6SubAuthenticationMethod=nUser6SubAuthenticationMethod, nUser6MobilityStatus=nUser6MobilityStatus, nUser6DNBWContractName=nUser6DNBWContractName, user6BWContractName=user6BWContractName, user6BWContractUsage=user6BWContractUsage, wlsxSwitchUser6Entry=wlsxSwitchUser6Entry, nUser6BWContractId=nUser6BWContractId, user6Location=user6Location, nUser6PhyType=nUser6PhyType, nUser6CurrentVlan=nUser6CurrentVlan, nUser6ConnectedPort=nUser6ConnectedPort, nUser6ApBSSID=nUser6ApBSSID, nUser6RxDataPkts64=nUser6RxDataPkts64, wlsxUser6MIB=wlsxUser6MIB, nUser6HomeVlan=nUser6HomeVlan, nUser6Name=nUser6Name, wlsxUser6SessionTimeCount=wlsxUser6SessionTimeCount, wlsxUser6SessionTimeEntry=wlsxUser6SessionTimeEntry, nUser6ConnectedModule=nUser6ConnectedModule, wlsxUser6Table=wlsxUser6Table, nUser6ApLocation=nUser6ApLocation, nUser6IsWired=nUser6IsWired, nUser6ConnectedSlot=nUser6ConnectedSlot, wlsxUser6InfoGroup=wlsxUser6InfoGroup, nUser6UPBWContractId=nUser6UPBWContractId, nUser6EncryptionMethod=nUser6EncryptionMethod, user6PhyAddress=user6PhyAddress, wlsxUser6AllInfoGroup=wlsxUser6AllInfoGroup, user6ConnectedSlot=user6ConnectedSlot, user6UpTime=user6UpTime, wlsxUser6Entry=wlsxUser6Entry, wlsxSwitchUser6Table=wlsxSwitchUser6Table, nUser6RxDataOctets64=nUser6RxDataOctets64, nUser6IsOnHomeAgent=nUser6IsOnHomeAgent, nUser6DefaultVlan=nUser6DefaultVlan, nUser6HTMode=nUser6HTMode, nUser6MobilityDomainName=nUser6MobilityDomainName, nUser6UPBWContractUsage=nUser6UPBWContractUsage, nUser6ForwardMode=nUser6ForwardMode, nUser6Role=nUser6Role)
|
17,429 | c762692e4d01853ccd1ba403ac1c29fcff86dad9 | # %load q02_data_split/build.py
from greyatomlib.multivariate_regression_project.q01_load_data.build import load_data
from sklearn.model_selection import train_test_split
import pandas as pd
df = load_data('data/student-mat.csv')
df1 = df.copy()
# Write your code below
def split_dataset(df):
X = df.iloc[:,:-1]
y = df.iloc[:,-1]
x_test,x_train,y_test,y_train = train_test_split(X,y,test_size = 0.8,random_state = 42)
return x_train,x_test,y_train,y_test
|
17,430 | c4ebb158d27df39f698d102d26789e2839f93f67 | from tkinter import *
import csv
root = Tk()
with open('champions.csv') as csvfile:
championsCSV = csv.reader(csvfile, delimiter=',')
for row in championsCSV:
print (row)
list = ("Morgana", "Perl", "one", "Two", "Three")
myLabel = Label(root, text = "Hello World!")
myLabel.pack()
myList = Listbox(root)
j = 0
for i in list:
myList.insert(j, i)
j = j + 1
myList.pack()
root.mainloop()
|
17,431 | 633009c25f056ea87b65822a275a8284da6406f1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-04-13 09:38:20
# @Author : Linsir (root@linsir.org)
# @Link : http://linsir.org
# @Version : 0.1
import subprocess
import time
from datetime import datetime, timedelta
import logging
backup_file_path = "/home/data/mysqlbak"
# backup_file_path = "./"
data = [
{
"db_host": "127.0.0.1",
"db_name": "db1",
"db_user": "user",
"db_password": "password",
},
{
"db_host": "127.0.0.1",
"db_name": "dbv2",
"db_user": "user",
"db_password": "password",
},
]
# 格式化时间, 默认返回当前时间
def fmt_time(fmt='%Y-%m-%d %H:%M:%S', seconds=None):
if not seconds: seconds = time.time()
t = datetime.utcfromtimestamp(seconds)
t = t + timedelta(hours=+8) # 时区
return t.strftime(fmt)
log_name = '%s/mysql_backup_%s.log'%(backup_file_path, fmt_time('%Y-%m-%d'))
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
filename=log_name,
filemode='w',
)
def backup_db(db_name, db_user, db_password, db_host='127.0.0.1'):
time = fmt_time('%Y-%m-%d')
db_filename = "%s/%s_%s_sql.gz" %(backup_file_path, db_name, time)
command = "mysqldump -h%s -u%s -p%s %s |gzip >%s " %(db_host, db_user, db_password, db_name, db_filename)
p = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)
info = p.stderr.read()
if info == '':
logging.info("Backup %s Sucessful..."%db_name)
return '%s : Sucess\n'%db_name
else:
logging.error("Failed to backup %s ..."%db_name)
logging.error(info)
command = 'rm -f %s'%db_filename
subprocess.call(command,shell=True)
return '%s : Failed\n'%db_name
def backup_from_list(list=data):
starttime = time.time()
line = "\n----------------------\n"
backup_result = ''
logging.info(line + 'Backup stared..')
for db in data:
db_name = db["db_name"]
db_user = db["db_user"]
db_password = db["db_password"]
db_host = db['db_host']
backup_result = backup_result + backup_db(db_name,db_user,db_password,db_host)
###
delete_expires_files()
endtime = time.time()
time_info = line + "Total used time: %.2fs." %(endtime - starttime)
logging.info(line + backup_result + time_info)
return backup_result
def delete_expires_files(day=7):
command = 'find %s \( -name "*_sql.gz" -or -name "*.log" \) -type f +mtime +%s -exec rm -f {} \;' %(backup_file_path, day)
subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)
info = "Already delelte the expires files %s days ago.."%day
logging.info(info)
if __name__ == '__main__':
backup_from_list()
# delete_expires_files()
# print fmt_time()
# backup_db("db_name", "db_user", "db_password")
# delete_expires_files()
|
17,432 | 6b9b44adc8653e5a933ce60f11655d08f07c9885 | # @project : Pytorch implementation of RefineGAN
# @author : Bingyu Xin
# @Institute : CS@Rutgers
# @Code : https://github.com/hellopipu/RefineGAN
import torch
from utils import RF
def total_variant(images):
'''
:param images: [B,C,W,H]
:return: total_variant
'''
pixel_dif1 = images[:, :, 1:, :] - images[:, :, :-1, :]
pixel_dif2 = images[:, :, :, 1:] - images[:, :, :, :-1]
tot_var = torch.abs(pixel_dif1).sum([1, 2, 3]) + torch.abs(pixel_dif2).sum([1, 2, 3])
return tot_var
def build_loss(dis_real, dis_fake):
'''
calculate WGAN loss
'''
d_loss = torch.mean(dis_fake - dis_real)
g_loss = -torch.mean(dis_fake)
return g_loss, d_loss
def cal_loss(S01, S01_k_un, S02, S02_k_un, mask, Sp1, S1, Tp1, T1, Sp2, S2, Tp2, T2, S1_dis_real, S1_dis_fake,
T1_dis_fake, S2_dis_real, S2_dis_fake,
T2_dis_fake, cal_G=True):
'''
TODO: input arguments are too much, and some calculation is redundant
'''
G_loss_AA, D_loss_AA = build_loss(S1_dis_real, S1_dis_fake)
G_loss_Aa, D_loss_Aa = build_loss(S1_dis_real, T1_dis_fake)
G_loss_BB, D_loss_BB = build_loss(S2_dis_real, S2_dis_fake)
G_loss_Bb, D_loss_Bb = build_loss(S2_dis_real, T2_dis_fake)
G_loss_AB, D_loss_AB = build_loss(S1_dis_real, S2_dis_fake)
G_loss_Ab, D_loss_Ab = build_loss(S1_dis_real, T2_dis_fake)
G_loss_BA, D_loss_BA = build_loss(S2_dis_real, S1_dis_fake)
G_loss_Ba, D_loss_Ba = build_loss(S2_dis_real, T1_dis_fake)
if cal_G:
recon_frq_AA = torch.mean(torch.abs(S01_k_un - RF(Sp1, mask)))
recon_frq_BB = torch.mean(torch.abs(S02_k_un - RF(Sp2, mask)))
recon_frq_Aa = torch.mean(torch.abs(S01_k_un - RF(Tp1, mask)))
recon_frq_Bb = torch.mean(torch.abs(S02_k_un - RF(Tp2, mask)))
recon_img_AA = torch.mean((torch.abs((S01) - (S1))))
recon_img_BB = torch.mean((torch.abs((S02) - (S2))))
error_img_AA = torch.mean(torch.abs((S01) - (Sp1)))
error_img_BB = torch.mean(torch.abs((S02) - (Sp2)))
smoothness_AA = torch.mean(total_variant(S1))
smoothness_BB = torch.mean(total_variant(S2))
recon_img_Aa = torch.mean(torch.abs((S01) - (T1)))
recon_img_Bb = torch.mean(torch.abs((S02) - (T2)))
error_img_Aa = torch.mean(torch.abs((S01) - (Tp1)))
error_img_Bb = torch.mean(torch.abs((S02) - (Tp2)))
smoothness_Aa = torch.mean(total_variant(T1))
smoothness_Bb = torch.mean(total_variant(T2))
ALPHA = 1e+1
GAMMA = 1e-0
DELTA = 1e-4
RATES = torch.count_nonzero(torch.ones_like(mask)) / 2. / torch.count_nonzero(mask)
GAMMA = RATES
g_loss = \
(G_loss_AA + G_loss_BB + G_loss_AB + G_loss_BA) + \
(G_loss_Aa + G_loss_Bb + G_loss_Ab + G_loss_Ba) + \
(recon_img_AA + recon_img_BB) * 1.00 * ALPHA * RATES + \
(recon_img_Aa + recon_img_Bb) * 1.00 * ALPHA * RATES + \
(error_img_AA + error_img_BB) * 1e+2 * ALPHA * RATES + \
(error_img_Aa + error_img_Bb) * 1e+2 * ALPHA * RATES + \
(recon_frq_AA + recon_frq_BB) * 1.00 * GAMMA * RATES + \
(recon_frq_Aa + recon_frq_Bb) * 1.00 * GAMMA * RATES + \
(smoothness_AA + smoothness_BB + smoothness_Aa + smoothness_Bb) * DELTA
return g_loss, [G_loss_AA, G_loss_Aa, recon_img_AA, recon_img_Aa, error_img_AA, error_img_Aa, recon_frq_AA,
recon_frq_Aa, smoothness_AA, smoothness_Aa]
else:
d_loss = \
D_loss_AA + D_loss_BB + D_loss_AB + D_loss_BA + \
D_loss_Aa + D_loss_Bb + D_loss_Ab + D_loss_Ba
return d_loss, [D_loss_AA, D_loss_Aa, D_loss_AB, D_loss_Ab]
|
17,433 | d20e95a57a7dcedcc867188aa7b4f6a4aed4271d | import os
import re
def LD(s, t):
if s == "":
return len(t)
if t == "":
return len(s)
if s[-1] == t[-1]:
cost = 0
else:
cost = 1
res = min([LD(s[:-1], t)+1,
LD(s, t[:-1])+1,
LD(s[:-1], t[:-1]) + cost])
return res
file_item = open('./count_1w.txt')
words = file_item.read().split('\n')
sum = 0
for word in words:
wor = word.split('\t')[0]
if(wor[:4]=='she'):
sum = sum + int(word.split('\t')[1])
list = []
for word in words:
wor = word.split('\t')[0]
if wor[:4]=='she' and LD('shep',wor)<=3:
list.append(word)
my_file = open('TaskB', 'w')
for word in list:
wor = word.split('\t')[0]
rep = float(word.split('\t')[1])
my_file.write(wor+'\t'+str(rep/sum)+'\n')
my_file.close() |
17,434 | ad7bb90d47163248eaa72c5cdf8c1063d736de16 | class Persona:
def __init__(self, edad, nombre):
self.edad = edad
self.nombre = nombre
print("Se ha creado a",self.nombre,"de",self.edad)
def hablar (self,*palabras):
for frase in palabras:
print(self.nombre,':',frase)
class Deportista(Persona):
def practicarDeporte (self):
print(self.nombre,": Voy a practicar")
Juan = Persona(18,"Juan")
Juan.hablar("Hola estoy hablando", "Este soy yo")
Luis = Deportista(20,"Luis")
Luis.hablar("Hola estoy hablando", "Este soy yo")
Luis.practicarDeporte()
|
17,435 | b8fff37da58405a44eec0a07d530c15a6b436bcd | def SortInput(f,ButterFly,Size,Bits):
for i,n in enumerate(ButterFly):
k=Size-n*Bits
f(f"\nassign X0[{i}][0]=Xn_vect_real[{k-1}:{k-Bits}];")
f(f"\nassign X0[{i}][1]=Xn_vect_imag[{k-1}:{k-Bits}];")
f('\n')
def GenerateMACBlocks(f,MAC):
for m in range(MAC):
f(f"""
radix_2_fft r2_{m}
(MAC_in[{m}][0][0],MAC_in [{m}][0][1],
MAC_in [{m}][1][0],MAC_in [{m}][1][1],
MAC_in [{m}][2][0],MAC_in [{m}][2][1],
MAC_out[{m}][0][0],MAC_out[{m}][0][1],
MAC_out[{m}][1][0],MAC_out[{m}][1][1]);""")
f('\n')
def ConnectOutputs(f,N,Size,Bits,Layers):
for n in range(N):
k=Size-n*Bits
f(f"""
assign Xk_vect_real[{k-1}:{k-Bits}]=X_reg[{Layers}][{n}][0];
assign Xk_vect_imag[{k-1}:{k-Bits}]=X_reg[{Layers}][{n}][1];""")
f('\n')
|
17,436 | 6404a665997081333d464e3127e3bf0758b5631f | """Philips Hue bridge discovery using N-UPnP.
Philips Hue bridge limits how many SSDP lookups you can make. To work
around this they recommend to do N-UPnP lookup simultaneously with
SSDP lookup: https://developers.meethue.com/documentation/hue-bridge-discovery
"""
import xml.etree.ElementTree as ElementTree
import logging
import requests
from netdisco.util import etree_to_dict
_LOGGER = logging.getLogger(__name__)
# pylint: disable=too-few-public-methods
class PHueBridge(object):
"""Parses Philips Hue bridge description XML into an
object similar to UPNPEntry.
"""
def __init__(self, location, description_xml):
self.location = location
tree = ElementTree.fromstring(description_xml)
self.description = etree_to_dict(tree).get("root", {})
def __repr__(self):
friendly_name = self.description['device']['friendlyName']
url_base = self.description['URLBase']
return str((friendly_name, url_base))
class PHueNUPnPDiscovery(object):
"""Philips Hue bridge discovery using N-UPnP."""
PHUE_NUPNP_URL = "https://www.meethue.com/api/nupnp"
DESCRIPTION_URL_TMPL = "http://{}/description.xml"
def __init__(self):
self.entries = []
def scan(self):
"""Scan the network."""
try:
response = requests.get(self.PHUE_NUPNP_URL, timeout=5)
response.raise_for_status()
self.entries = []
bridges = response.json()
for bridge in bridges:
entry = self.fetch_description(bridge)
if entry:
self.entries.append(entry)
except requests.exceptions.RequestException as err:
_LOGGER.warning('Could not query server %s: %s',
self.PHUE_NUPNP_URL,
err)
def fetch_description(self, bridge):
"""Fetches description XML of a Philips Hue bridge."""
url = self.bridge_description_url(bridge)
try:
response = requests.get(url, timeout=5)
response.raise_for_status()
return PHueBridge(url, response.text)
except requests.exceptions.RequestException as err:
_LOGGER.warning('Could not query server %s: %s',
url, err)
def bridge_description_url(self, bridge):
"""Returns URL for fetching description XML"""
ipaddr = bridge["internalipaddress"]
return self.DESCRIPTION_URL_TMPL.format(ipaddr)
def main():
"""Test N-UPnP discovery."""
from pprint import pprint
disco = PHueNUPnPDiscovery()
disco.scan()
pprint(disco.entries)
if __name__ == "__main__":
main()
|
17,437 | 5ea46219f49696d5ad41846d7d7f7a2f67d4ec7e | from django.contrib import admin
from .models import Genre, Movie
@admin.register(Genre)
class GenreAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
list_display_links = ['id', 'name']
@admin.register(Movie)
class MovieAdmin(admin.ModelAdmin):
list_display = ['title']
list_display_links = ['title']
|
17,438 | 13b3c03fd905e3e5be8037ccc3e18ce10afd420c | """File to copy the LCPS ICU admission programme"""
import cvxpy as cp
import numpy as np
from sklearn.metrics import mean_absolute_error
class LCPSModel:
"""
Class to recreate LCPS model
Minimization with trend penalty term
"""
def __init__(self, y, w, gamma=10):
self.y = y
self.gamma = gamma
self.w = w
def loss(self, x, s):
"""Function for loss function"""
return sum(cp.abs(x + s[self.w] - np.log(self.y)))
def regularizer(self, x):
"""
Penalty term that penalizes trend changes
"""
return sum(cp.abs((x[2:] - x[1:-1]) - (x[1:-1] - x[:-2])))
def objective(self, x, s):
return self.loss(x, s) + self.gamma * self.regularizer(x)
def predict(self, x, s, w_train, t):
"""
Function to get the t-day ahead prediction
"""
# w_pred is the weekday of the day we want to predict. Given the weekday
# of x[-1]
w_pred = w_train[-7 + (t - 1)]
return np.exp(x[-1] + t * (x[-1] - x[-2]) + s[w_pred])
def solve(self):
p = self.y.shape
x = cp.Variable(p)
# variable for days of the week
s = cp.Variable((7,))
obj = cp.Minimize(self.objective(x, s))
problem = cp.Problem(obj)
# different solver?
problem.solve('ECOS')
self.x = np.array(x.value)
self.s = np.array(s.value)
def rolling_pred_LCPS(method, y_train, y_test, w_train, w_test, t=1, gamma=10):
"""
Function to perform a rolling prediction for values in the test set.
Model is first estimated on training set, but data points from the test
set are added iteratively.
"""
# create list for rolling predictions
y_pred = []
# we make a prediction for every element in the test set
for i in range(len(y_test)):
# for i = 0, we make a prediction on the training set
# for i > 0, we add the next observation to the training set
if i > 0:
y_train = np.append(y_train, y_test[i - 1])
w_train = np.append(w_train, w_test[i - 1])
# we create a model based on the training and test set
algo = method(y_train, w_train, gamma=gamma)
# solve the model
algo.solve()
# add prediction to list of predictions
y_pred.append(algo.predict(algo.x, algo.s, w_train, t=t))
return y_pred
def gridsearch_LCPS(y, w, splits_list, grid=None, t=1):
"""
Find the optimal value for the smoothing parameter lambda by a block-
time series split. We optimzie based on the mean absolute error of rolling
predictions
:return:
optimal value of lambda
"""
# repeat loop for every parameter in grid
average_mae_per_par = dict()
for parameter in grid:
mae_list = []
# for loop for each set of indices per fold
for index_dict in splits_list:
# perform rolling predictions using train set on the validation set
y_pred = rolling_pred_LCPS(LCPSModel,
y[index_dict["train"][0]:
index_dict["train"][1]],
y[index_dict["validation"][0]:
index_dict["validation"][1]],
w[index_dict["train"][0]:
index_dict["train"][1]],
w[index_dict["validation"][0]:
index_dict["validation"][1]],
t=t, gamma=parameter)
# add the mean absolute error on validation set to the list
mae_list.append(mean_absolute_error(
np.exp(y[
index_dict["validation"][0]:
index_dict["validation"][1]]),
np.exp(y_pred)))
# add average mae for parameter to dict
average_mae_per_par["{}".format(parameter)] = np.mean(mae_list)
# return parameter with average mae
return min(average_mae_per_par, key=average_mae_per_par.get), \
average_mae_per_par
|
17,439 | 9e738f74aa3309af80b055123943cb2df7f864af | #!/usr/bin/python3
# Password manager
# This program will store passwords for each account name
# Running account name as argument will put its password into clipboard
# Insecure! There is no file encryption involved
import sys, pyperclip
passwords = {'email': 'emailpasswordhere',
'blog': 'blogpasswordhere',
'luggage': '12345'}
if len(sys.argv) < 2:
print('Usage: python3 pw.py [account] - copy account password')
sys.exit()
account = sys.argv[1] # first cmd line argument is account name
if account in passwords:
pyperclip.copy(passwords[account])
print('Password for ' + account + ' copied to clipboard.')
else:
print('There is no account named ' + account)
|
17,440 | ff8935114f91ef986084703c4f0262192eb5fe81 | #coding=UTF-8
import json
import time
hour_score=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
hour_count=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
output = open("FM_general_interaction",'w')
for i in range(0,32):
if i < 10:
file_detail = "0"+str(i)
else:
file_detail = str(i)
print file_detail
input = open("FM_comment_"+file_detail,'r')
for line in input:
data_dict = json.loads(line, encoding='UTF-8')
old_time = data_dict["time"]
value = time.localtime(float(old_time))
year = time.strftime('%Y', value)
mouth = time.strftime('%m', value)
day = time.strftime('%d', value)
hour = time.strftime('%H', value)
hour = int(hour) -8
if hour <0:
hour = hour +24
textscore = data_dict["textscore"]
if hour >= 24:
hour = 0
hour_score[hour] = round((hour_score[hour]*hour_count[hour]+float(textscore))/(hour_count[hour]+1),2)
hour_count[hour] = hour_count[hour] + 1
result = {}
result["hour_score"] = hour_score
json.dump(result, output)
output.write('\n')
output.close()
|
17,441 | 8eb4f9a4889e6a3cb36810d7ca52ba4ee6bebf0b | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_raises, assert_allclose
from statsmodels.multivariate.manova import MANOVA
from statsmodels.multivariate.multivariate_ols import MultivariateTestResults
from statsmodels.tools import add_constant
# Example data
# https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
# viewer.htm#statug_introreg_sect012.htm
X = pd.DataFrame([['Minas Graes', 2.068, 2.070, 1.580],
['Minas Graes', 2.068, 2.074, 1.602],
['Minas Graes', 2.090, 2.090, 1.613],
['Minas Graes', 2.097, 2.093, 1.613],
['Minas Graes', 2.117, 2.125, 1.663],
['Minas Graes', 2.140, 2.146, 1.681],
['Matto Grosso', 2.045, 2.054, 1.580],
['Matto Grosso', 2.076, 2.088, 1.602],
['Matto Grosso', 2.090, 2.093, 1.643],
['Matto Grosso', 2.111, 2.114, 1.643],
['Santa Cruz', 2.093, 2.098, 1.653],
['Santa Cruz', 2.100, 2.106, 1.623],
['Santa Cruz', 2.104, 2.101, 1.653]],
columns=['Loc', 'Basal', 'Occ', 'Max'])
def test_manova_sas_example():
# Results should be the same as figure 4.5 of
# https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/
# viewer.htm#statug_introreg_sect012.htm
mod = MANOVA.from_formula('Basal + Occ + Max ~ Loc', data=X)
r = mod.mv_test()
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Value'],
0.60143661, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Value'],
0.44702843, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace", 'Value'],
0.58210348, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Value'],
0.35530890, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'F Value'],
0.77, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'F Value'],
0.86, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace", 'F Value'],
0.75, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'F Value'],
1.07, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace", 'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Num DF'],
3, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Den DF'],
16, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Den DF'],
18, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace", 'Den DF'],
9.0909, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Den DF'],
9, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Pr > F'],
0.6032, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Pr > F'],
0.5397, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace", 'Pr > F'],
0.6272, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Pr > F'],
0.4109, decimal=4)
def test_manova_no_formula():
# Same as previous test only skipping formula interface
exog = add_constant(pd.get_dummies(X[['Loc']], drop_first=True,
dtype=float))
endog = X[['Basal', 'Occ', 'Max']]
mod = MANOVA(endog, exog)
intercept = np.zeros((1, 3))
intercept[0, 0] = 1
loc = np.zeros((2, 3))
loc[0, 1] = loc[1, 2] = 1
hypotheses = [('Intercept', intercept), ('Loc', loc)]
r = mod.mv_test(hypotheses)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Value'],
0.60143661, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Value'],
0.44702843, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'Value'],
0.58210348, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Value'],
0.35530890, decimal=8)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'F Value'],
0.77, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'F Value'],
0.86, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'F Value'],
0.75, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'F Value'],
1.07, decimal=2)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'Num DF'],
6, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Num DF'],
3, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Den DF'],
16, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Den DF'],
18, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'Den DF'],
9.0909, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Den DF'],
9, decimal=3)
assert_almost_equal(r['Loc']['stat'].loc["Wilks' lambda", 'Pr > F'],
0.6032, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Pillai's trace", 'Pr > F'],
0.5397, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Hotelling-Lawley trace",
'Pr > F'],
0.6272, decimal=4)
assert_almost_equal(r['Loc']['stat'].loc["Roy's greatest root", 'Pr > F'],
0.4109, decimal=4)
@pytest.mark.smoke
def test_manova_no_formula_no_hypothesis():
# Same as previous test only skipping formula interface
exog = add_constant(pd.get_dummies(X[['Loc']], drop_first=True,
dtype=float))
endog = X[['Basal', 'Occ', 'Max']]
mod = MANOVA(endog, exog)
r = mod.mv_test()
assert isinstance(r, MultivariateTestResults)
def test_manova_test_input_validation():
mod = MANOVA.from_formula('Basal + Occ + Max ~ Loc', data=X)
hypothesis = [('test', np.array([[1, 1, 1]]), None)]
mod.mv_test(hypothesis)
hypothesis = [('test', np.array([[1, 1]]), None)]
assert_raises(ValueError, mod.mv_test, hypothesis)
"""
assert_raises_regex(ValueError,
('Contrast matrix L should have the same number of '
'columns as exog! 2 != 3'),
mod.mv_test, hypothesis)
"""
hypothesis = [('test', np.array([[1, 1, 1]]), np.array([[1], [1], [1]]))]
mod.mv_test(hypothesis)
hypothesis = [('test', np.array([[1, 1, 1]]), np.array([[1], [1]]))]
assert_raises(ValueError, mod.mv_test, hypothesis)
"""
assert_raises_regex(ValueError,
('Transform matrix M should have the same number of '
'rows as the number of columns of endog! 2 != 3'),
mod.mv_test, hypothesis)
"""
def test_endog_1D_array():
assert_raises(ValueError, MANOVA.from_formula, 'Basal ~ Loc', X)
def test_manova_demeaned():
# see last example in #8713
# If a term has no effect, all eigenvalues below threshold, then computaion
# raised numpy exception with empty arrays.
# currently we have an option to skip the intercept test, but don't handle
# empty arrays directly
ng = 5
loc = ["Basal", "Occ", "Max"] * ng
y1 = (np.random.randn(ng, 3) + [0, 0.5, 1]).ravel()
y2 = (np.random.randn(ng, 3) + [0.25, 0.75, 1]).ravel()
y3 = (np.random.randn(ng, 3) + [0.3, 0.6, 1]).ravel()
dta = pd.DataFrame(dict(Loc=loc, Basal=y1, Occ=y2, Max=y3))
mod = MANOVA.from_formula('Basal + Occ + Max ~ C(Loc, Helmert)', data=dta)
res1 = mod.mv_test()
# subtract sample means to have insignificant intercept
means = dta[["Basal", "Occ", "Max"]].mean()
dta[["Basal", "Occ", "Max"]] = dta[["Basal", "Occ", "Max"]] - means
mod = MANOVA.from_formula('Basal + Occ + Max ~ C(Loc, Helmert)', data=dta)
res2 = mod.mv_test(skip_intercept_test=True)
stat1 = res1.results["C(Loc, Helmert)"]["stat"].to_numpy(float)
stat2 = res2.results["C(Loc, Helmert)"]["stat"].to_numpy(float)
assert_allclose(stat1, stat2, rtol=1e-10)
|
17,442 | 5d40f804e0bcc2e1dc483fc8a031cb9b5800e8b4 | """
Представьте себе бухгалтерскую процедуру, используемую в книжном магазине. Он работает в списке с подсписками, которые выглядят так:
+--------------+------------------------------------+----------+----------------+
| Order Number | Book Title and Author | Quantity | Price per Item |
+--------------+------------------------------------+----------+----------------+
| 34587 | Learning Python, Mark Lutz | 4 | 40.95 |
| 98762 | Programming Python, Mark Lutz | 5 | 56.80 |
| 77226 | Head First Python, Paul Barry | 3 | 32.95 |
| 88112 | Einführung in Python3, Bernd Klein | 3 | 24.99 |
+--------------+------------------------------------+----------+----------------+
(каждая строка таблицы, это подсписок:
[
[34587, 'Learning Python, Mark Lutz', 4, 40.95],
[98762, 'Programming Python, Mark Lutz', 5, 56.80]
]
и т.д.
)
Напишите программу на Python, которая возвращает список кортежей.
Каждый кортеж состоит из номера заказа и произведения цены на товары и количества.
Сумма заказа должена быть увеличен на 10€, если стоимость заказа меньше 100,00 €.
Напишите программу на Python, используя лямбду и карту.
"""
from random import randint
from random import uniform
line = '+--------------+------------------------------------+------------+----------------+\n'
# line = ('+{}+{}+{}+{}+\n'.format('-'*14, '-'*36, '-'*12, '-'*16))
lst = [[randint(1, 10000), 'Learning Python, Mark Lutz', randint(1, 10), round(float(uniform(5, 40)), 2)],
[randint(1, 10000), 'Programming Python, Mark Lutz', randint(1, 10), round(float(uniform(5, 50)), 2)],
[randint(1, 10000), 'Head First Python, Paul Barry', randint(1, 10), round(float(uniform(5, 30)), 2)],
[randint(1, 10000), 'Einführung in Python3, Bernd Klein', randint(1, 10), round(float(uniform(5, 25)), 2)]]
print(line, end='')
print('|{OrderNumber:^14}|{TitleAndAuthor:^36}|{Quantity:^12}|{Price:^16}|\n'.format(
OrderNumber='Номер заказа', TitleAndAuthor="Автор и название книги", Quantity="Количество", Price="Цена"), end='')
print(line, end='')
for i in range(len(lst)):
print('|{:>13} | {:<35}|{:>11} |{:>15} |'.format(lst[i][0], lst[i][1], lst[i][2], lst[i][3]))
print(line, end='')
res = []
summ = list(map(lambda x: x[0] and round(x[2]*x[3], 2) if x[2]*x[3] > 100 else round(x[2]*x[3]+10, 2), lst))
for i in range(len(lst)):
res1 = []
res1.append(lst[i][0])
res1.append(summ[i])
res.append(tuple(res1))
res1 = []
print(res)
# Если надо по красоте :
# for i in range(len(res)):
# print('Цена заказа №{} = {}'.format(res[i][0], res[i][1]))
|
17,443 | 864f3eefd3cb32af834a2efc1e898e1bf2e4e153 | """Problem 145
16 March 2007
Some positive integers n have the property that the sum [ n +
reverse(n) ] consists entirely of odd (decimal) digits. For instance,
36 + 63 = 99 and 409 + 904 = 1313. We will call such numbers
reversible; so 36, 63, 409, and 904 are reversible. Leading zeroes are
not allowed in either n or reverse(n).
There are 120 reversible numbers below one-thousand.
How many reversible numbers are there below one-billion (10**9)?
This is very slow. Check euler145.c (runs in 233 seconds)
This one finished in more than 1 hour. =/
"""
from eulerlib import reverseNum
# Added to eulerlib!
def isReversible(n):
"""Returns true if a number is reversible.
A number is reversible if the sum of n + reverseNum(n) produces a
number with only odd digits.
"""
if n % 10 == 0:
return False
s = n + reverseNum(n)
while s > 0:
digit = s % 10
if not digit in [1,3,5,7,9]:
return False
s //= 10
return True
count = 0
for n in range(1, 1000000000):
if isReversible(n):
count += 1
print(count)
|
17,444 | 67689b81217a01582bf0994937858e9339165254 | import json
import requests
import configs
import urllib.parse
from slugbot import botutils
import facebook
import random
class PagePost:
def __init__(self, post:dict):
self.time = ''
self.id = ''
self.message = ''
if 'created_time' in post:
self.time = post['created_time']
if 'id' in dict:
self.id = post['id']
if 'message' in post:
self.message = post['message']
class CrawlingSlug:
def __init__(self, page_id: str, token: str):
self.page_id = page_id
self.token = token
self.requestLimit = 100
def crawl(self, url='') -> dict:
if url == '':
url = 'https://graph.facebook.com/v2.10/{}/posts?limit={}&access_token={}'.format(self.page_id,
self.requestLimit,
self.token)
try:
req = requests.get(url)
except Exception:
print('connection error')
return {}
return req.json()
def find(self, wannafind: str, max_posts=4096, exclusion=False) -> list:
goodies = self.crawl()
posts = []
res = []
cnt = 0
while True:
if 'data' in goodies:
posts = goodies['data']
for post in posts:
if 'message' in post:
msg = str(post['message'])
if cnt >= max_posts:
return res
if exclusion:
gab = msg.find('\n')
mic = msg.rfind('\n')
if gab != -1 and mic != -1 and gab != mic:
msg = msg[gab+1:mic]
if msg.find(wannafind) != -1:
res.append(post)
cnt += 1
if 'paging' in goodies:
if 'next' in goodies['paging']:
goodies = self.crawl(goodies['paging']['next'])
else:
break
else:
break
return res
def view(self, trgt: str, solo=False) -> list:
goodies = self.crawl()
posts = []
res = []
while True:
if 'data' in goodies:
posts = goodies['data']
for post in posts:
if 'message' in post:
msg = str(post['message'])
mic = msg.find('\n')
if mic != -1:
msg = msg[0:mic]
if msg == trgt:
res.append(post)
if solo:
return res
if 'paging' in goodies:
if 'next' in goodies['paging']:
goodies = self.crawl(goodies['paging']['next'])
else:
break
else:
break
return res
def view_comments(self, post_id) -> list:
url = 'https://graph.facebook.com/v2.10/{}/comments?access_token={}'.format(post_id, self.token)
res = []
try:
goodies = requests.get(url).json()
except Exception:
return res
while True:
if 'data' in goodies:
for cmt in goodies['data']:
res.append(cmt)
if 'paging' in goodies:
if 'next' in goodies['paging']:
try:
goodies = requests.get(goodies['paging']['next']).json()
except Exception:
goodies = {}
else:
break
else:
break
return res
class EasterEggHandler:
def __init__(self, bot: botutils.SlugBot):
self._bot = bot
self.easter_names = {}
with open('source/easter/name.json', 'r') as f:
self.easter_names = json.load(f)
@property
def the_bot(self):
return self._bot
def on_message(self, user: botutils.User, message: str):
value = 0
for name in self.easter_names:
if message.find(name) != -1:
self.the_bot.smart_send_msg(user, self.easter_names[name])
value = 1
class ChatHandler:
def __init__(self, bot: botutils.SlugBot):
self._bot = bot
self.list_chat = []
with open('source/text/chitchat.json', 'r') as f:
self.list_chat = json.load(f)
@property
def the_bot(self):
return self._bot
def on_message(self, user: botutils.User, message: str):
#TEMPORARY
#TODO implement natural language processing
self.the_bot.smart_send_msg(random.choice(user, self.list_chat[user.lang]))
def get_slug(site):
if site in slug_map:
return slug_map[site]
else:
s = ''
for entry in slug_map:
s += entry + '\n'
return s
def localize(user: botutils.User):
lang = 0
users = []
with open('userstat.json', 'r') as f:
users = json.load(f)
if user.userid in users:
lang = users[user.userid]['lang']
else:
users[user.userid] = {'score': 0, 'lang': 0}
with open('userstat.json', 'w') as f:
json.dump(users, f)
user.lang = lang
def page_post(ID, access_token, msg):
id = urllib.parse.quote(ID)
nmsg = urllib.parse.quote(msg)
r = requests.post('https://graph.facebook.com/v2.10/{}/feed?message={}&access_token={}'.format(id, nmsg,
access_token))
return r.content
def find_in_saved(filename, target):
saved = []
with open('source/parsed/' + filename, 'r') as f:
saved = json.load(f)
for sv in saved:
pass
aqua_slug = CrawlingSlug(configs.PAGE_ID[0], configs.ACCESS_TOKEN)
ignis_slug = CrawlingSlug(configs.PAGE_ID[1], configs.ACCESS_TOKEN)
flamma_slug = CrawlingSlug(configs.PAGE_IDS['hn'], configs.ACCESS_TOKEN)
terra_slug = CrawlingSlug(configs.PAGE_IDS['c8'], configs.ACCESS_TOKEN)
aer_slug = CrawlingSlug(configs.PAGE_IDS['h8'], configs.ACCESS_TOKEN)
tenebrae_slug = CrawlingSlug(configs.PAGE_IDS['hhchs'], configs.ACCESS_TOKEN)
lux_slug = CrawlingSlug(configs.PAGE_IDS['chchs'], configs.ACCESS_TOKEN)
test_slug = CrawlingSlug('1181443615334961', configs.ACCESS_TOKEN)
slug_map = {
'cn': aqua_slug,
'hn': flamma_slug,
'c8': terra_slug,
'h8': aer_slug,
'hhchs': tenebrae_slug,
'chchs': lux_slug,
'test30182384': test_slug
}
|
17,445 | ccf0bdbbe2ce2426fafc7b897f1986f03de925e1 | # encoding:utf-8
from urllib.parse import urlparse
import requests
telegram_autoplay_limit = 10 * 1024 * 1024
def get_url(submission):
url = submission.url
# TODO: Better url validation
if url.endswith('.gif'):
return 'gif', url
elif url.endswith('.gifv'):
return 'gif', url[0:-1]
elif urlparse(url).netloc == 'www.reddit.com':
return 'text', None
else:
return 'other', url
def download_file(url, filename):
# http://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
#f.flush() commented by recommendation from J.F.Sebastian
return True
|
17,446 | 53b58f19be67114755741ea978dc49ffcbc395eb | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import os,time,sys
from utils import plot_all_complex,SimpleDataIterator
################################################################
# data and parameters
ITERATIONS = 40000
CRITIC_ITERS = 5
DATA = "Geometry"
LOSS = "Sqrt"
MODE = "wgan-gp"
X_dim = 2
Z_dim = 2
H_dim = 500
data_type = tf.float32
LAMBDA = float(sys.argv[1])
BATCH_SIZE = 256
######################################################
# define model
# real data Circular Ring
R2 = 1;
R1 = np.sqrt(0.5);
Xc = 0.5;
Yc = 0.5;
circle_angle = tf.random_uniform([BATCH_SIZE, 1],0,1,dtype=data_type)* (2*np.pi)
circle_radius = tf.sqrt(tf.random_uniform([BATCH_SIZE, 1],0,1,dtype=data_type)* (R2**2- R1**2) + R1**2)
circle_x = Xc + circle_radius*tf.cos(circle_angle);
circle_y = Yc + circle_radius*tf.sin(circle_angle);
real_data_circle = tf.concat([circle_x,circle_y],axis=1)
# real data Square
square_x = tf.random_uniform([BATCH_SIZE, 1],0,1,dtype=data_type)
square_y = tf.random_uniform([BATCH_SIZE, 1],0,1,dtype=data_type)
real_data_square = tf.concat([square_x,square_y],axis=1)
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / np.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev*2,dtype=data_type)
def bias_init(shape):
initial = tf.truncated_normal(shape, stddev=1,dtype=data_type)
return initial
def generator(z,name,scope_reuse=False):
with tf.variable_scope(name) as scope:
if scope_reuse:
scope.reuse_variables()
G_W1 = tf.get_variable('W1',initializer=xavier_init([Z_dim, H_dim]))
G_b1 = tf.get_variable('b1',initializer=bias_init([H_dim]))
G_W2 = tf.get_variable('W2',initializer=xavier_init([H_dim, X_dim]))
G_b2 = tf.get_variable('b2',initializer=bias_init([X_dim]))
G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
out = tf.matmul(G_h1, G_W2) + G_b2
return out
def discriminator(x,name,scope_reuse=False):
with tf.variable_scope(name) as scope:
if scope_reuse:
scope.reuse_variables()
D_h1_dim = 512
D_h2_dim = 512
D_h3_dim = 512
D_W0 = tf.get_variable('W0',initializer=xavier_init([X_dim, D_h1_dim]))
D_b0 = tf.get_variable('b0',initializer=tf.zeros(shape=[D_h1_dim]))
D_W1 = tf.get_variable('W1',initializer=xavier_init([D_h1_dim, D_h2_dim]))
D_b1 = tf.get_variable('b1',initializer=tf.zeros(shape=[D_h2_dim]))
D_W2 = tf.get_variable('W2',initializer=xavier_init([D_h2_dim, D_h3_dim]))
D_b2 = tf.get_variable('b2',initializer=tf.zeros(shape=[D_h3_dim]))
D_W3 = tf.get_variable('W3',initializer=xavier_init([D_h3_dim, 1]))
D_b3 = tf.get_variable('b3',initializer=tf.zeros(shape=[1]))
D_h1 = tf.tanh(tf.matmul(x, D_W0) + D_b0)
D_h2 = tf.tanh(tf.matmul(D_h1, D_W1) + D_b1)
D_h3 = tf.tanh(tf.matmul(D_h2, D_W2) + D_b2)
out = tf.matmul(D_h3, D_W3) + D_b3
return out
Z = tf.random_uniform([BATCH_SIZE, Z_dim],0,1,dtype=data_type)
fake_data = generator(Z,'Generator')
D1_real = discriminator(real_data_circle,'Discriminator1')
D2_real = discriminator(real_data_square,'Discriminator2')
D1_fake = discriminator(fake_data,'Discriminator1',True)
D2_fake = discriminator(fake_data,'Discriminator2',True)
D1_loss = tf.reduce_mean(D1_fake) - tf.reduce_mean(D1_real)
D2_loss = tf.reduce_mean(D2_fake) - tf.reduce_mean(D2_real)
G_loss = (-tf.reduce_mean(D2_fake) ) #-tf.reduce_mean(D1_fake) + (-tf.reduce_mean(D2_fake) )
Z_fix = tf.constant(np.random.uniform(low=0.0, high=1.0, size=(3000,Z_dim)),dtype=data_type)
Fixed_sample = generator(Z_fix,'Generator',True)
train_variables = tf.trainable_variables()
generator_variables = [v for v in train_variables if v.name.startswith("Generator")]
discriminator1_variables = [v for v in train_variables if v.name.startswith("Discriminator1")]
discriminator2_variables = [v for v in train_variables if v.name.startswith("Discriminator2")]
# WGAN gradient penalty
if MODE == 'wgan-gp':
alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.,maxval=1.)
interpolates = alpha*real_data_circle + ((1-alpha)*fake_data)
disc_interpolates = discriminator(interpolates,'Discriminator1',True)
gradients = tf.gradients(disc_interpolates, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1)**2)
D1_loss += LAMBDA*gradient_penalty
alpha = tf.random_uniform(shape=[BATCH_SIZE,1], minval=0.,maxval=1.)
interpolates = alpha*real_data_square + ((1-alpha)*fake_data)
disc_interpolates = discriminator(interpolates,'Discriminator2',True)
gradients = tf.gradients(disc_interpolates, [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1)**2)
D2_loss += LAMBDA*gradient_penalty
disc1_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(D1_loss, var_list=discriminator1_variables)
disc2_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(D2_loss, var_list=discriminator2_variables)
gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5, beta2=0.9).minimize(G_loss, var_list=generator_variables)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0, allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
samples_circle,samples_square = sess.run([real_data_circle,real_data_square])
fig = plt.figure()
plt.scatter(samples_circle[:,0],samples_circle[:,1])
plt.savefig('out/{}.png'
.format('real_circle'), bbox_inches='tight')
plt.close(fig)
fig = plt.figure()
plt.scatter(samples_square[:,0],samples_square[:,1])
plt.savefig('out/{}.png'
.format('real_square'), bbox_inches='tight')
plt.close(fig)
for it in range(ITERATIONS):
for _ in range(CRITIC_ITERS):
D1_loss_curr, _ = sess.run([D1_loss,disc1_train_op])
D2_loss_curr, _ = sess.run([D2_loss,disc2_train_op])
G_loss_curr, _ = sess.run( [ G_loss, gen_train_op])
if it % 100 == 0:
print('Iter: {}; D loss: {:.4};D loss: {:.4}; G_loss: {:.4}'
.format(it, D1_loss_curr,D1_loss_curr, G_loss_curr))
if it % 4000 == 0:
samples = sess.run(Fixed_sample)
fig = plt.figure()
plt.scatter(samples[:,0],samples[:,1])
plt.savefig('out/{}_{}_{}_{}.png'
.format(DATA,'dual_square_',LAMBDA,str(it).zfill(3)), bbox_inches='tight')
plt.close(fig)
|
17,447 | e384609bd4c2a988a89cfd8c43fc8c0e2e9dc09a | #!/home/porosya/.local/share/virtualenvs/checkio-VEsvC6M1/bin/checkio --domain=py run sendgrid-geo-stats
# https://py.checkio.org/mission/sendgrid-geo-stats/
# To solve this mission you must use theSendGrid API Key. When you click "Run" you will see the results of using your API key with your data, but if you click "Check" your solution will be tested using our data.
#
# You should be able to operate with your statistical email data and SendGrid has a lot of APIs that provide information you may need.
#
# Your mission is to figure out which country opens your emails the most. You can use this information in order to focus on a specific segment.
#
# Input:Day as a string in format 'YYYY-MM-DD'
#
# Output:String, Two-digit country code of country that has more unique clicks.
#
# Example:
#
#
# best_country('2016-01-01') == 'UA'
#
# END_DESC
import sendgrid
API_KEY = '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'
sg = sendgrid.SendGridAPIClient(apikey=API_KEY)
def best_country(str_date):
return 'UA'
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
print('Your best country in 2016-01-01 was ' + best_country('2016-01-01'))
print('Check your results') |
17,448 | fa01670f87d775db0df0721678540e7d85d3f64e | StringBuilder text = new StringBuilder();
// deal with potential null variables
if(sentenceVariance == null){
sentenceVariance = 0;
}
if(includeEnochian == null){
includeEnochian = false;
}
if(enochianWeight == null){
enochianWeight = 1;
}
int sentenceLengthMin = SENTENCE_LENGTH_MIN - sentenceVariance;
int sentenceLengthMax = SENTENCE_LENGTH_MAX - sentenceVariance;
ArrayList<String> words = new ArrayList<String>();
words.addAll(WORDS);
// append Enochian words to list if includeEnochian is true,
// and add n times according to the weighting.
if(includeEnochian.booleanValue()){
while(enochianWeight >= 1){
words.addAll(ENOCHIAN);
enochianWeight--;
}
}
// randomize array order
Collections.shuffle(words);
for(int p=0;p<nParagraphs;p++){
StringBuilder paragraph = new StringBuilder();
int paragraphSentenceCount = randomInRange(PARAGRAPH_SENTENCE_COUNT_MIN,PARAGRAPH_SENTENCE_COUNT_MAX);
// add sentences to paragraph
for(int i=0;i<paragraphSentenceCount;i++){
StringBuilder sentence = new StringBuilder();
int sentenceLength = randomInRange(sentenceLengthMin,sentenceLengthMax);
int previousWordIndex = 0;
// add words to sentence
for(int l=0;l<sentenceLength;l++){
int index = randomInRange(0,words.size());
// if index is the same as the previous word index, get a new one
while (index == previousWordIndex){
index = randomInRange(0,words.size());
}
previousWordIndex = index;
// append the word
sentence.append(words.get(index));
// unless it is the last word in the sentence, add a space
if(l < sentenceLength-1){
sentence.append(" ");
}
}
sentence.append(". ");
// capitalize first letter of the sentence
sentence.setCharAt(0,Character.toUpperCase(sentence.charAt(0)));
paragraph.append(sentence);
}
// if it is the first paragraph, prepend Satan ipsum to paragraph
if(p == 0){
String leaderText = "Satan ipsum ";
paragraph.insert(0,leaderText);
paragraph.setCharAt(leaderText.length(),Character.toLowerCase(paragraph.charAt(leaderText.length())));
}
text.append(paragraph);
text.append("<br/><br/>");
}
return text.toString(); |
17,449 | 4d5bfe69da3790bb6de401a137af3ad4617c4aa7 | #!/usr/bin/python3
""" module for states query """
from api.v1.views import app_views
from flask import jsonify, abort, request
import models
@app_views.route("/states/<state_id>/cities", methods=["POST"],
strict_slashes=False)
def create_city(state_id):
"""Creates city"""
obj = models.storage.get("State", state_id)
if obj is None:
abort(404)
json = request.get_json()
City = models.city.City
if json is not None:
if json.get("name") is not None:
obj = City(name=json.get("name"), state_id=state_id)
obj.save()
return jsonify(obj.to_dict()), 201
else:
abort(400, "Missing name")
else:
abort(400, "Not a JSON")
@app_views.route("/states/<state_id>/cities", methods=["GET"],
strict_slashes=False)
def citiesId(state_id):
"""Returns the city with an id"""
obj = models.storage.get("State", state_id)
if obj is None:
abort(404)
all_cities = obj.cities
new_dict = [val.to_dict() for val in all_cities]
return jsonify(new_dict)
@app_views.route("/cities/<city_id>",
methods=["GET"], strict_slashes=False)
def retrieve_city(city_id):
"""Returns a city object"""
obj = models.storage.get("City", city_id)
if obj is not None:
return jsonify(obj.to_dict())
else:
abort(404)
@app_views.route("/cities/<city_id>", methods=["DELETE"],
strict_slashes=False)
def city_del(city_id):
""" return empty dict with 200 status"""
obj = models.storage.get("City", city_id)
if obj is not None:
models.storage.delete(obj)
models.storage.save()
return jsonify({})
else:
abort(404)
@app_views.route("/cities/<city_id>", methods=["PUT"], strict_slashes=False)
def update_city(city_id):
"""Returns the city with an id"""
obj = models.storage.get("City", city_id)
json = request.get_json()
if obj is not None:
if json is not None:
for key, value in json.items():
if key not in ["id", "updated_at", "created_at",
"state_id"]:
setattr(obj, key, value)
obj.save()
return jsonify(obj.to_dict())
else:
abort(400, "Not a JSON")
else:
abort(404)
|
17,450 | d97bb4ecafcaf635b678a2223fef53628148f686 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, jsonify, request
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTIONS'] = True
current_ip = ''
@app.route('/save_ip', methods=['GET', 'POST'])
def get_service_info():
global current_ip
current_ip = request.args.get('ip', 'No ip')
return jsonify({'result': 'done'})
@app.route('/get_ip', methods=['GET', 'POST'])
def update_subscription_statistics():
return jsonify({'result': current_ip})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=10000)
|
17,451 | 25ad44f87b14357fdc4e3b79b48261edc21718b7 | """
Task
Given an integer, n, and n space-separated integers as input, create a tuple,t , of those n integers. Then compute and print the result of hash(t).
Note: hash() is one of the functions in the __builtins__ module, so it need not be imported.
Input Format
The first line contains an integer, n, denoting the number of elements in the tuple.
The second line contains n space-separated integers describing the elements in tuple t.
Output Format
Print the result of hash(t).
Sample Input 0
2
1 2
Sample Output 0
3713081631934410656
"""
n = int(input())
ints = input().split()
t = tuple(int(i) for i in ints)
print(hash(t)) |
17,452 | 3640a16389dbb26c7325d1629d165519674e6850 | import os.path
from nltk.classify import NaiveBayesClassifier
import json
posfeats_file = os.path.dirname(os.path.realpath(__file__)) + '/posfeats.txt'
negfeats_file = os.path.dirname(os.path.realpath(__file__)) + '/negfeats.txt'
print posfeats_file
posfeats = []
negfeats = []
def word_feats(words):
return dict([(word, True) for word in words])
with open(posfeats_file, 'r') as f:
posfeats=json.loads(f.readline())
with open(negfeats_file, 'r') as f:
negfeats=json.loads(f.readline())
#Set cut off: 4/5 for train and 1/5 for test
negcutoff = len(negfeats)*6/7
poscutoff = len(posfeats)*6/7
trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff]
testfeats = negfeats[negcutoff:] + posfeats[poscutoff:]
#The function call to train the data
classifier = NaiveBayesClassifier.train(trainfeats)
classifier.show_most_informative_features(10)
def get_sentiment(raw):
tweetWords=[]
words=raw.split()
for i in words:
i = i.lower().strip('\'$"?,.!')
tweetWords.append(i)
tweet = tweetWords
return classifier.classify(word_feats(tweet))
|
17,453 | f2dd645112c5e2f2b13cec059fbbb7a035f943fa | '''Dashboard views for the swimapp'''
import json
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.http import (HttpResponseRedirect, HttpResponse,
HttpResponseBadRequest)
from django.views.generic import UpdateView, ListView
from django.views.generic import TemplateView
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from django.views.generic.edit import ModelFormMixin
from swimapp.forms.fileupload import FileUploadForm
from swimapp.models.fileupload import FileUpload
from swimapp.tasks import process_hy3_upload
class FileUploadView(TemplateView):
'''file upload view'''
model = FileUpload
template_name = 'swimapp/file_upload.html'
form_class = FileUploadForm
#success_url = reverse_lazy('swimapp_team_list')
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(FileUploadView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(FileUploadView, self).get_context_data(
*args, **kwargs)
context['form'] = FileUploadForm
#context['teams'] = Team.objects.filter(users=self.request.user) \
#.select_related('team_reg', 'team_type')
return context
class FileUploadList(ListView):
'''List file upload view'''
model = FileUpload
template_name = 'swimapp/file_upload_list.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(FileUploadList, self).dispatch(*args, **kwargs)
class FileUploadCreate(SingleObjectTemplateResponseMixin,
ModelFormMixin, View):
'''File upload create view'''
model = FileUpload
template_name = 'swimapp/file_upload_edit.html'
form_class = FileUploadForm
#success_url = reverse_lazy('swimapp_file_upload_list')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(FileUploadCreate, self).dispatch(*args, **kwargs)
def form_valid(self, form, request):
"""
If the form is valid, save the associated model.
"""
self.object = form.save()
if (self.object.filetype == FileUpload.HY3_FILE):
process_hy3_upload.delay(self.object.id)
if request.is_ajax():
return HttpResponse('OK')
else:
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form, request):
"""
If the form is invalid, re-render the context data with the
data-filled form and errors.
"""
if request.is_ajax():
errors_dict = {}
if form.errors:
for error in form.errors:
e = form.errors[error]
errors_dict[error] = unicode(e)
return HttpResponseBadRequest(json.dumps(errors_dict))
else:
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form, request)
else:
return self.form_invalid(form, request)
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
class FileUploadUpdate(UpdateView):
'''File upload update view'''
model = FileUpload
template_name = 'swimapp/file_upload_edit.html'
form_class = FileUploadForm
success_url = reverse_lazy('swimapp_file_upload_list')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(FileUploadUpdate, self).dispatch(*args, **kwargs)
|
17,454 | afcd3d15dfed8e26f7ce98c4d2ae81f56dd3a73b | #coding:utf-8
from itertools import islice
f=open("testText")
# l=f.readlines()
# print l
# i=islice(f,0,5)
# i=islice(f,8)
i=islice(f,4,None)
# for x in i:
# print x
# for x in range(6):
# print i.next()
l=[x for x in range(20)]
t=iter(l)
#会接着原来的位置进行迭代
for x in islice(t,5,10):
print x
for x in t:
print x
|
17,455 | a3b1ef7bc7ec6d27d05b6bded2d06475c0b9cb1d | """
Module: device_scanner.py
Author: Chris Lawton, DLS Solutions, Inc.
Description: This module is resposible for performing a system scan (aka an inventory)
of devices that are expected to be present on the system. These devices are defined in
the ../config/LunaSrv/LunaSrcDeviceConfig.xml file. There are currently three types of
devices supported and are categorized by how they communicate with the OS.
1) USB-Serial: Devices that communicate via a serial port (e.g. /dev/ttyXXXX
2) USB: Devices that communicate via a vendor supplied API
3) Ethernet: Devices that communicate via TCP/IP
"""
import xml.etree.ElementTree as ET
import usb_serial_device
import usb_device
import ethernet_device
import subprocess
import xml_config_subprocess
import re
import socket
#import serial #sudo apt-get install python3-serial
import os
import logbase
class DeviceScanner(logbase.LogBase):
"""
A class to scan (i.e. look for) devices on the system
It uses an XML file as directions for what to look for.
"""
configurationFile = ""
expectedDevices = [] # List of expected devices
foundDevices = [] # List of devices actually found on the system
def __init__(self, fullPathToConfigFile):
"""
Constructor, will scan the config file and record info found there.
:param fullPathToConfigFile: Full path to file to read
"""
self.configurationFile=fullPathToConfigFile
self.logger.debug("Read XML file: " + self.configurationFile)
# Read XML file
tree = ET.parse(self.configurationFile)
root = tree.getroot()
# Parse the expected USBSerial devices
for elemUSBSerial in root.iter('USBSerial'):
#print(elemUSBSerial.attrib)
aUSBSerialDevice = usb_serial_device.USBSerialDevice()
aUSBSerialDevice.name = elemUSBSerial.attrib['Name']
aUSBSerialDevice.pid = elemUSBSerial.attrib['Pid']
aUSBSerialDevice.vid = elemUSBSerial.attrib['Vid']
aUSBSerialDevice.uid = elemUSBSerial.attrib['Uid']
for x in elemUSBSerial:
# A serial USB device should have port settings associated with it
elemPortSettings = None
if x.tag == 'PortSettings':
elemPortSettings = x
if elemPortSettings is not None:
aUSBSerialDevice.portSettings.baud = int(elemPortSettings.attrib['Baud'])
aUSBSerialDevice.portSettings.parity = elemPortSettings.attrib['Parity']
aUSBSerialDevice.portSettings.dataBits = int(elemPortSettings.attrib['DataBits'])
aUSBSerialDevice.portSettings.stopBits = int(elemPortSettings.attrib['StopBits'])
# Some of our serial USB devices have a special intermediate process that does the
# actual communication with the device. How to start that process is defined in the
# subprocess element.
elemSubProcess = None
if x.tag == 'SubProcess':
elemSubProcess = x
if elemSubProcess is not None:
aUSBSerialDevice.subProc.cmd = elemSubProcess.attrib['cmd']
for anArg in elemSubProcess.iter('Arg'):
aUSBSerialDevice.subProc.args.append(anArg.attrib['arg'])
if len(aUSBSerialDevice.subProc.cmd) == 0:
aUSBSerialDevice.subProc = None
# Remember what we've read.
self.expectedDevices.append(aUSBSerialDevice)
# Parse the expected USB devices
for elemUSB in root.iter('USB'):
#print(elemUSB.attrib)
aUSBDevice = usb_device.USBDevice()
aUSBDevice.name = elemUSB.attrib['Name']
aUSBDevice.pid = elemUSB.attrib['Pid']
aUSBDevice.vid = elemUSB.attrib['Vid']
aUSBDevice.uid = elemUSB.attrib['Uid']
self.expectedDevices.append(aUSBDevice)
# Parse the expected Ethernet devices
for elemEthernet in root.iter('Ethernet'):
#print(elemEthernet.attrib)
anEthernetDevice = ethernet_device.EthernetDevice()
anEthernetDevice.name = elemEthernet.attrib['Name']
anEthernetDevice.host = elemEthernet.attrib['Host']
anEthernetDevice.port = int(elemEthernet.attrib['Port'])
self.expectedDevices.append(anEthernetDevice)
def InventoryDevices(self):
"""
Perform an inventory of the expected devices. Record the results in foundDevices
:return: None
"""
self.logger.debug("Start Inventory...")
# Find our desired usb devices. These should be present in /dev somewhere.
osDevices = os.listdir("/dev")
osDevices.sort()
# Loop through all devices in /dev asking them what they are.
for anOSDevice in osDevices:
deviceName = "/dev/" + anOSDevice
# We're making use of the unix command "udevadm". Read up on it!
cmd = ["udevadm", "info", "-q", "all", "-n", deviceName]
#print(cmd)
pid=""
vid=""
uid=""
# Launch udevadm for the current device name.
FNULL = open(os.devnull, 'w')
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=FNULL)
while True:
line = proc.stdout.readline()
if len(line) != 0:
#print(line.rstrip())
# Parse out the pieces of the output lines looking for the relavent information.
parts = re.split("[ ]", line.__str__())
#print(parts)
if len(parts) > 1:
kvParts = re.split("[=]", parts[1].__str__())
#print(kvParts)
# We care about procuct id, vendor id and serial number.
if (kvParts[0] == "ID_VENDOR_ID"):
vid = kvParts[1][:-1]
if (kvParts[0] == "ID_MODEL_ID"):
pid = kvParts[1][:-1]
if (kvParts[0] == "ID_SERIAL"):
uid = kvParts[1][:-1]
if (kvParts[0] == "ID_SERIAL_SHORT"):
uid = kvParts[1][:-1]
else:
break
# We found a device with a Product ID and Vendor ID. Is it one were expecting?
if len(pid) > 0 and len(vid) > 0:
self.logger.info( "Checking if device with ProductID: " + pid + " and VendorID: " + vid + " on " + deviceName + " is needed...")
foundItem = next((x for x in self.expectedDevices if isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) and
x.pid == pid and
x.vid == vid and
x.uid == uid and
x.inventoried == False), None)
if foundItem is not None:
if isinstance(foundItem, usb_serial_device.USBSerialDevice) == True:
if anOSDevice.startswith( 'tty') == True:
# Device is a Serial USB device.
foundItem.devPath = deviceName
foundItem.inventoried = True
foundItem.checked = True
else:
#Device is a plain USB device.
foundItem.devPath = deviceName
foundItem.inventoried = True
foundItem.checked = True
FNULL.close()
# At this point, we may still not have all the found devices. So we'll fall back to using "lsub" to look for devices.
# The reason they are not found is that some devices do not add an entry to /dev. However, lsusb does not give a
# serial number
cmd = ["lsusb"]
# print(cmd)
pid = ""
vid = ""
uid = ""
# Launch udevadm for the current device name.
FNULL = open(os.devnull, 'w')
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=FNULL)
while True:
line = proc.stdout.readline()
if len(line) != 0:
# print(line.rstrip())
# Parse out the pieces of the output lines looking for the relavent information.
parts = re.split("[ ]", line.__str__())
# print(parts)
if len(parts) > 1:
kvParts = re.split("[:]", parts[5].__str__())
# print(kvParts)
# We care about procuct id, vendor id.
vid = kvParts[0]
pid = kvParts[1]
# We found a device with a Product ID and Vendor ID. Is it one were expecting?
if len(pid) > 0 and len(vid) > 0:
self.logger.info(
"Checking if device with ProductID: " + pid + " and VendorID: " + vid + " is needed...")
foundItem = next((x for x in self.expectedDevices if
isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) and
x.pid == pid and
x.vid == vid and
x.uid == uid and
x.inventoried == False), None)
if foundItem is not None:
if isinstance(foundItem, usb_serial_device.USBSerialDevice) == True:
if anOSDevice.startswith('tty') == True:
# Device is a Serial USB device.
foundItem.devPath = deviceName
foundItem.inventoried = True
foundItem.checked = True
else:
# Device is a plain USB device.
foundItem.devPath = deviceName
foundItem.inventoried = True
foundItem.checked = True
else:
break
FNULL.close()
# Here, we probe to see if any ethernet connected devices are up and listening for connections.
while True:
foundItem = next((x for x in self.expectedDevices if isinstance(x, (ethernet_device.EthernetDevice)) and
x.inventoried == False and x.checked == False), None)
if foundItem is not None:
#socket.setdefaulttimeout(10.0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(10.0)
try:
s.connect((foundItem.host, foundItem.port))
foundItem.inventoried = True;
except:
foundItem.inventoried = False;
# Okay to swallow!
pass
finally:
s.close()
foundItem.checked = True;
else:
break
# Record what we found.
self.logger.info("The following devices were inventoried:")
for x in self.expectedDevices:
if x.inventoried == True:
if isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) == True:
self.logger.info(x.name + " Device Node: " + x.devPath)
else:
self.logger.info(x.name)
self.foundDevices.append(x)
|
17,456 | 0b5df71f4cd8926aa8b5dc72d4e970a810c75ac1 | from flask import Flask, request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
products = []
class Device(Resource):
def get(self,device_id):
device = next(filter(lambda x: x['device_id'] == device_id, products), None)
return {'device': device}, 200 if device else 404
def post(self,device_id):
device = {'device_name': 'sevket', 'device_id': device_id, 'status': False, 'alarm': False}
products.append(device)
return device, 201
def put(self, device_id):
request_data = request.get_json()
alarm = request_data['alarm']
status = request_data['status']
device_name = request_data['device_name']
device = next(filter(lambda x:x['device_id'] == device_id, products), None)
products.remove(device)
device = {'device_id': device_id, 'device_name': device_name, 'status': status, 'alarm': alarm}
products.append(device)
print(device)
return device
class DeviceList(Resource):
def get(self):
return{'products':products}
api.add_resource(Device, '/products/<string:device_id>')
api.add_resource(DeviceList, '/devicesList')
app.run(port = 5000)
|
17,457 | e5d1ee1bbe6878d92b8259ae47ae6bad42ff373d | from .db import db
import mongoengine_goodjson as gj
class Transaction(gj.Document):
client_cpf = db.StringField(min_value=10, max_value=12, required=True)
total = db.FloatField(min_value=None, max_value=None, required=True)
received = db.FloatField(min_value=None, max_value=None, required=True)
change = db.FloatField(min_value=None, max_value=None, required=True)
bills_quantities = db.ListField(db.DictField(), required=True) |
17,458 | 5c360ef6e82e3bd88fc0f53dbb05546029022ca9 | from django.apps import AppConfig
class StatisticConfig(AppConfig):
name = 'statistic'
|
17,459 | d205979c2cfeb2140dc0f14da26d7f83f290285b |
from os import error
from datetime import datetime
def format_error(e):
return f'\n{datetime.now()}: {repr(e)}'
def log(e):
try:
with open('log.txt', 'a') as f:
f.write(format_error(e))
except IOError as e:
print(e)
except e:
print(e) |
17,460 | 53e39d767bc5d8d9b2f3b52d36f95524e64ab522 | from typing import List # noqa: F401
from libqtile import bar, layout, widget, hook, extension
from libqtile.config import Click, Drag, Group, Key, Screen, ScratchPad, DropDown, Match
from libqtile.lazy import lazy
from libqtile.utils import guess_terminal
from libqtile.dgroups import simple_key_binder
mod = "mod4"
alt = "mod1"
extension_defaults = dict(
background = "#3B4252",
foreground = "#D8DEE9",
selected_background = "#434C5E",
selected_foreground = "#E5E9F0",
dmenu_height = 24,
fontsize = 9
)
keys = [
# Switch focus
Key([mod], "h", lazy.layout.left()),
Key([mod], "j", lazy.layout.next()),
Key([mod], "k", lazy.layout.up()),
Key([mod], "l", lazy.layout.right()),
# Swap windows
Key([mod, "shift"], "h", lazy.layout.shuffle_left()),
Key([mod, "shift"], "j", lazy.layout.shuffle_down()),
Key([mod, "shift"], "k", lazy.layout.shuffle_up()),
Key([mod, "shift"], "l", lazy.layout.shuffle_right()),
Key([mod, "shift"], "semicolon", lazy.layout.flip()),
# Change windows sizes
Key([mod], "equal", lazy.layout.grow()),
Key([mod], "minus", lazy.layout.shrink()),
Key([mod, "shift"], "equal", lazy.layout.normalize()),
Key([mod, "shift"], "minus", lazy.layout.maximize()),
Key([mod], "bracketleft", lazy.prev_screen()),
Key([mod], "bracketright", lazy.next_screen()),
# Toggle between different layouts as defined below
Key([mod], "t", lazy.group.setlayout('monadtall')),
Key([mod], "y", lazy.group.setlayout('monadwide')),
Key([mod], "m", lazy.group.setlayout('max')),
Key([mod], "s", lazy.window.toggle_floating(), desc = "Toggle floating"),
Key([mod], "f", lazy.window.toggle_fullscreen(), desc = "Toggle fullscreen"),
Key([mod], "w", lazy.window.kill(), desc = "Kill focused window"),
Key([mod, "control"], "r", lazy.restart(), desc = "Restart qtile"),
Key([mod, "control"], "q", lazy.shutdown(), desc = "Shutdown qtile"),
# brightness
Key([], "XF86MonBrightnessUp", lazy.spawn("light -A 5")),
Key([], "XF86MonBrightnessDown", lazy.spawn("light -U 5")),
Key(["shift"], "XF86MonBrightnessUp", lazy.spawn("light -A 20")),
Key(["shift"], "XF86MonBrightnessDown", lazy.spawn("light -U 20")),
Key(["control"], "XF86MonBrightnessUp", lazy.spawn("light -S 75")),
Key(["control"], "XF86MonBrightnessDown", lazy.spawn("light -S 25")),
# volume
Key([], "XF86AudioMute", lazy.spawn("mute-toggle")),
Key([], "XF86AudioRaiseVolume", lazy.spawn("change-volume +5%")),
Key([], "XF86AudioLowerVolume", lazy.spawn("change-volume -5%")),
# media
Key([], "XF86AudioPlay", lazy.spawn("playerctl play-pause")),
Key([], "XF86AudioStop", lazy.spawn("playerctl stop")),
Key([], "XF86AudioNext", lazy.spawn("playerctl next")),
Key([], "XF86AudioPrev", lazy.spawn("playerctl previous")),
# screeshots
Key([], "Print", lazy.spawn('screenshot')),
Key([mod], "Return", lazy.spawn("alacritty")),
Key([mod], "r", lazy.spawn("alacritty -e ranger")),
Key([mod], "v", lazy.spawn("alacritty -e nvim")),
Key([mod], "space", lazy.spawn("rofi -show drun")),
Key([mod], "c", lazy.spawn("clipmenu")),
Key([mod], "p", lazy.spawn("bwmenu")),
Key([mod], "p", lazy.spawn("bwmenu")),
Key([mod], "o", lazy.spawn("rofi -show calc -modi calc -no-show-match -no-sort")),
Key([mod], "q", lazy.run_extension(extension.CommandSet(commands = {
'lock': 'slock',
'suspend': 'systemctl suspend',
'logout': 'qtile-cmd -o cmd -f shutdown',
'restart': 'systemctl reboot',
'poweroff': 'systemctl poweroff -i',
}))),
]
groups = [
Group(" MAIN "),
Group(" CODE ", matches=[Match(wm_class=["jetbrains-idea"])]),
Group(" TOOL "),
Group(" PLAY ", matches=[Match(wm_class=["spotify", "pocket-casts-linux"])]),
Group(" GAME ", matches=[Match(wm_class=["Steam", "FantasyGrounds.x86_64"])]),
Group(" VIRT "),
Group(" FILE "),
Group(" CONF "),
Group(" CHAT "),
Group(" WORK "),
]
dgroups_key_binder = simple_key_binder("mod4")
dgroups_app_rules = []
layout_defaults = dict(
border_focus = "#434c5e",
border_normal = "#2E3440",
border_width = 1,
margin = 5,
)
layouts = [
layout.MonadTall(align = layout.MonadTall._left, **layout_defaults),
layout.MonadWide(align = layout.MonadTall._left, **layout_defaults),
layout.Max(**layout_defaults),
]
floating_layout = layout.Floating(**layout_defaults, float_rules = [
# Run the utility of `xprop` to see the wm class and name of an X client.
{'wmclass': 'confirm'},
{'wmclass': 'dialog'},
{'wmclass': 'download'},
{'wmclass': 'error'},
{'wmclass': 'file_progress'},
{'wmclass': 'notification'},
{'wmclass': 'splash'},
{'wmclass': 'toolbar'},
{'wmclass': 'confirmreset'}, # gitk
{'wmclass': 'makebranch'}, # gitk
{'wmclass': 'maketag'}, # gitk
{'wname': 'branchdialog'}, # gitk
{'wname': 'pinentry'}, # GPG key password entry
{'wmclass': 'ssh-askpass'}, # ssh-askpass
])
widget_defaults = dict(
font = 'DejaVuSansMono Nerd Font',
fontsize = 12,
background = "#3B4252",
foreground = "#D8DEE9",
padding = 5,
)
sep_defaults = dict(
linewidth = 0,
padding = 15,
)
extension_defaults = widget_defaults.copy()
bar_defaults = dict(
background = "#3B4252",
)
bar_groups = [
widget.Sep(**sep_defaults),
widget.Image(**widget_defaults, filename = "~/.config/qtile/icon.png",
mouse_callbacks = {'Button1': lambda qtile: qtile.cmd_spawn("rofi -show drun")}),
widget.Sep(**sep_defaults),
widget.GroupBox(**widget_defaults, highlight_method = "block", borderwidth = 0, rounded = False, spacing = 0,
active = "#D8DEE9", inactive = "#D8DEE9", urgent_border = "#BF616A", urgent_text = "#D8DEE9",
this_current_screen_border = "#4C566A", this_screen_border = "#4C566A",
other_current_screen_border = "#4C566A", other_screen_boder = "#4C566A"),
widget.Sep(**sep_defaults),
widget.TextBox("", **widget_defaults),
widget.WindowName(width = bar.STRETCH, **widget_defaults, for_current_screen = True),
]
bar_notification = [
widget.Sep(**sep_defaults),
widget.TextBox("墳", **widget_defaults),
widget.Volume(**widget_defaults),
widget.Sep(**sep_defaults),
widget.TextBox("", **widget_defaults),
widget.Wlan(**widget_defaults, format = "{essid} - {percent:2.0%}", interface = "wlp3s0"),
widget.Sep(**sep_defaults),
widget.TextBox("襤", **widget_defaults),
widget.Battery(**widget_defaults, format = "{percent:2.0%} - {hour:d}:{min:02d}",
update_interval = 15, notify_below = 0.1),
widget.Sep(**sep_defaults),
widget.TextBox("", **widget_defaults),
widget.Clock(**widget_defaults, format = '%a %d %b %Y %H:%M:%S'),
widget.Sep(**sep_defaults),
]
main_screen = Screen(top = bar.Bar(
[*bar_groups, *bar_notification], 24, **bar_defaults
))
hdmi_screen = Screen(top = bar.Bar(
[*bar_groups, *bar_notification], 24, **bar_defaults
))
if (True):
screens = [main_screen, hdmi_screen]
else:
screens = [main_screen]
# Drag floating layouts.
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(), start = lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(), start = lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
main = None # WARNING: this is deprecated and will be removed soon
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
auto_fullscreen = True
focus_on_window_activation = "smart"
# for java apps to function
wmname = "LG3D"
import os, subprocess
@hook.subscribe.startup_once
def autostart():
home = os.path.expanduser('~/.config/qtile/autostart.sh')
subprocess.call([home])
@hook.subscribe.screen_change
def restart_on_randr(event):
qtile.cmd_restart()
@hook.subscribe.client_new
def floating_size_hints(window):
hints = window.window.get_wm_normal_hints()
if hints and 0 < hints['max_width'] < 960:
window.floating = True
|
17,461 | 938c9a8307e1d448b40fb979eb7e30eb063ab74b | from functional.state.phone_states import *
class Phone:
def __init__(self, state=NormalState()):
self.state = state
def set_state(self, state):
self.state = state
def ring(self):
self.state.ring()
|
17,462 | 2fcba1040a811a64d84f8d20565fe9aac821c58d | import config as cfg
from tinydb import TinyDB, Query
# from operator import itemgetter
import datetime
def set_expire():
db = TinyDB('db.json')
Record = Query()
for record in db.all():
if 'timestamp_epoch' not in record:
date_object = datetime.datetime.strptime(record['pubdate_api'], '%Y-%m-%dT%H:%M:%S')
timestamp_epoch = int((date_object - datetime.datetime(1970, 1, 1)).total_seconds())
db.update({'timestamp_epoch': timestamp_epoch}, Record.asset_id == record['asset_id'])
set_expire()
|
17,463 | d232cdcb6a602d0d648a2a3efe2841d71e3dc994 | assert True # interpritter ignores this
assert False # control flows out
assert True # this wont run becasue program terminated
|
17,464 | 8250d0f16adb3736ecde9e223e32ce1660426f5e | #!/usr/bin/python3
'''List all states in the DB'''
import MySQLdb
import sys
argv = sys.argv
if (__name__ == "__main__"):
user = argv[1]
passwd = argv[2]
db_name = argv[3]
state = argv[4]
db = MySQLdb.connect(host="localhost", port=3306, user=user,
passwd=passwd, db=db_name, charset="utf8")
cursor = db.cursor()
querry = ("SELECT * FROM states WHERE name LIKE BINARY '" +
"{}".format(state) + "' ORDER BY id")
cursor.execute(querry)
res = cursor.fetchall()
for item in res:
print(item)
cursor.close()
db.close()
|
17,465 | 9afe5b2d576b889ef900c0d2947690ad9b7a2ad6 | from django.shortcuts import render
from rest_framework.viewsets import ViewSet, GenericViewSet
from utils.response import APIResponse
from user.models import User
from user.serializer import UserModelserializer
from rest_framework.response import Response
# Create your views here.
class UserAPIView(ViewSet):
# 用户登陆请求
def user_login(self, request, *args, **kwargs):
request_data = request.data
serializer = UserModelserializer(data=request_data)
# serializer.is_valid(raise_exception=True)
user_obj = User.objects.filter(username=request_data['username'], password=request_data['password'])
if user_obj:
return APIResponse(200, "登陆成功", results=request_data)
else:
return APIResponse(201, "登陆失败")
def user_register(self, request, *args, **kwargs):
request_data = request.data
# 将前端传递的参数交给反序列化器进行校验
serializer = UserModelserializer(data=request_data)
# 校验数据是否合法 raise_exception: 一旦校验失败,立即抛出异常
serializer.is_valid(raise_exception=True)
user_obj = serializer.save()
if user_obj:
return APIResponse(400, "注册成功", results=request_data)
else:
return APIResponse(401, "注册失败", results=request_data)
# class BookGenericAPIView(ListModelMixin,
# RetrieveModelMixin,
# CreateModelMixin,
# DestroyModelMixin,
# UpdateModelMixin,
# GenericAPIView):
# # 获取当前视图类要操作的模型
# queryset = Book.objects.all()
# # 指定当前视图要使用的序列化器类
# serializer_class = BookModelSerializer
# # 指定获取单个对象的主键的名称
# lookup_field = "id"
#
# # 混合视图 查询所有
# def get(self, request, *args, **kwargs):
# if "id" in kwargs:
# # 查询单个
# return self.retrieve(request, *args, **kwargs)
# return self.list(request, *args, **kwargs)
#
# def post(self, request, *args, **kwargs):
# return self.create(request, *args, **kwargs)
#
# def delete(self, request, *args, **kwargs):
# return self.destroy(request, *args, **kwargs)
#
# # 整体修改
# def put(self, request, *args, **kwargs):
# return self.update(request, *args, **kwargs)
#
# # 局部修改
# def patch(self, request, *args, **kwargs):
# response = self.partial_update(request, *args, **kwargs)
# return APIResponse(results=response.data)
|
17,466 | af609485c68bc165e73c3de9e81e8300cb307c7d | """
Linear Regression with one variable.
@author GalenS <galen.scovell@gmail.com>
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import seaborn as sns
sns.set_style('white')
ITERATIONS = 1500 # Number of iterations to use for gradient descent
ALPHA = 0.01 # Learning rate: how big steps are, larger is more aggressive
def scatterplot(x, y):
"""
Make scatterlot from initial data.
:param x: x values
:type x: 2d ndarray [[1., x-val], [1., x-val], ...]
:param y: y values
:type y: 2d ndarray [[y-val], [y-val], ...]
"""
plt.figure(figsize=(14, 8), dpi=80)
plt.scatter(x[:, 1], y, s=30, c='r', marker='x', linewidths=1)
plt.grid(True)
plt.xlim(4, 24)
plt.ylabel('Profit ($10k)')
plt.xlabel('Population (10k)')
plt.show()
plt.close()
def compute_cost(x, y, theta=[[0], [0]]):
"""
Compute cost J from current theta value.
:param x: x values
:type x: 2d ndarray [[1., x-val], [1., x-val], ...]
:param y: y values
:type y: 2d ndarray [[y-val], [y-val], ...]
:param theta: current theta value to use in computation
:type theta: 2d ndarray [[theta0 float], [theta1 float]]
:return: float cost
"""
m = y.size
h = x.dot(theta)
j = 1 / (2 * m) * np.sum(np.square(h - y))
return j
def gradient_descent(x, y, theta=[[0], [0]]):
"""
Minimize cost using gradient descent.
:param x: x values
:type x: 2d ndarray [[1., x-val], [1., x-val], ...]
:param y: y values
:type y: 2d ndarray [[y-val], [y-val], ...]
:param theta: starting theta values
:type theta: 2d ndarray [[theta0 float], [theta1 float]]
:return: tuple, theta 2d array and j_history array
"""
m = y.size
j_history = []
for i in range(ITERATIONS):
h = x.dot(theta)
theta = theta - (ALPHA / m) * (x.T.dot(h - y))
j_history.append(compute_cost(x, y, theta))
return theta, j_history
def plot_costs(j_history):
"""
Plot line of costs calculated in gradient descent (J's).
:param j_history: costs calculated from descent
:type j_history: list of floats
"""
plt.figure(figsize=(14, 8))
plt.plot(range(len(j_history)), j_history)
plt.grid(True)
plt.title('J (Cost)')
plt.xlabel('Iteration')
plt.ylabel('Cost function')
plt.xlim([0, 1.05 * ITERATIONS])
plt.ylim([4, 7])
plt.show()
plt.close()
def plot_descent(x, y, theta):
"""
Plot gradient descent thetas as line over dataset scatterplot.
:param x: x values
:type x: 2d ndarray [[1., x-val], [1., x-val], ...]
:param y: y values
:type y: 2d ndarray [[y-val], [y-val], ...]
:param theta: calculated theta values
:type theta: 2d ndarray [[theta0 float], [theta1 float]]
"""
# Compute prediction for each point in xx range using calculated theta values
# h(x) = (theta0 * x0) + (theta1 * x1)
xx = np.arange(5, 23)
yy = theta[0] + theta[1] * xx
plt.figure(figsize=(14, 8), dpi=80)
plt.scatter(x[:, 1], y, s=30, c='r', marker='x', linewidths=1)
plt.plot(xx, yy, label='Hypothesis: h(x) = {0:.2f} + {1:.2f}x'.format(float(theta[0]), float(theta[1])))
plt.grid(True)
plt.xlim(4, 24) # Extend plot slightly beyond data bounds
plt.xlabel('Population of City (10k)')
plt.ylabel('Profit ($10k)')
plt.legend(loc=4)
plt.show()
plt.close()
def make_prediction(theta, value):
"""
Make a prediction based on gradient descent theta results.
:param theta: calculated theta values
:type theta: 2d ndarray [[theta0 float], [theta1 float]]
:param value: Given value to predict based off of
:type value: int
:return: float prediction
"""
# x0 is always 1.0 (theta0 has no coefficient in hypothesis equation)
return theta.T.dot([1.0, value]) * 10000
def plot_3d(x, y):
"""
Plot x vs y vs z (cost, j value) 3D plot.
:param x: x values
:type x: 2d ndarray [[1., x-val], [1., x-val], ...]
:param y: y values
:type y: 2d ndarray [[y-val], [y-val], ...]
"""
# Create grid coordinates
x_axis = np.linspace(-10, 10, 50)
y_axis = np.linspace(-1, 4, 50)
xx, yy = np.meshgrid(x_axis, y_axis, indexing='xy')
z = np.zeros((x_axis.size, y_axis.size))
# Calculate z-values based on grid coefficients
for (i, j), v in np.ndenumerate(z):
z[i, j] = compute_cost(x, y, theta=[[xx[i, j]], [yy[i, j]]])
# Construct plot
fig = plt.figure(figsize=(12, 10))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(xx, yy, z, rstride=1, cstride=1, alpha=0.6, cmap=plt.cm.jet)
ax.set_zlabel('Cost')
ax.set_zlim(z.min(), z.max())
ax.view_init(elev=15, azim=230)
plt.title('X vs. Y vs. Cost')
ax.set_xlabel(r'$\theta_0$', fontsize=17)
ax.set_ylabel(r'$\theta_1$', fontsize=17)
plt.show()
plt.close()
if __name__ == '__main__':
# Read in data and visualize
data = np.loadtxt('ex1data1.txt', delimiter=',')
x = np.c_[np.ones(data.shape[0]), data[:,0]] # data.shape[0] = 97 (rows in 1st column)
# np.ones(data.shape[0]) = list of 97 1's
# data[:, 0] = all data in 1st column
# np.c_[] = combine results above:
# list of lists, each inner list
# is [1., column val]
y = np.c_[data[:, 1]] # list of lists, each inner list is single entry [2nd column val]
scatterplot(x, y)
# Gradient descent and visualize
theta, j_history = gradient_descent(x, y)
plot_costs(j_history)
plot_descent(x, y, theta)
# Make some predictions
print('Predicted profit for 3.5k population: {0}'.format(make_prediction(theta, 3.5)))
print('Predicted profit for 7k population: {0}'.format(make_prediction(theta, 7)))
plot_3d(x, y)
|
17,467 | 6ec97e1e31dd8c5520cc1171cd67b8d62b0afade | """
Question 1: What are the key terms? e.g. explain convolution in your own words, pooling in your own words
for a 2D convolution, the input tensor and conv2d output tensor consist of two spatial dimensions (width & height), and
one feature dimension (rgb color for images in image input).
The kernel or filter is a 4d tensor that stores weights and biases used for recognizing "patterns" of the layer,
whether that's an edge or line earlier on in the network or the makeup of a face or vehicle later on. Each of the kernel
weights correspond to a region of the input, and when the region of a particular filter is multiplied by the corresponding
weight/bias of the filter, the output value is some number that varies depending on how well the input matched some
pattern of a given class.
the CNN explainer site seems to regard the relu activation layer as worth highlighting just as prominently as the conv
layer, and while nonlinearity is important for differentiation between classes since you don't want the class
probability prediction to simply be some linear combination of the inputs, I don't think it's as interesting except that
it sort of acts to emphasize the fact that the output is just another 3D tensor, a better approximation below:
https://www.youtube.com/watch?v=eMXuk97NeSI&t=254s
Feature map/activation map/rectified feature map all mean the same exact thing, it's called an activation map because it
is a mapping that corresponds to the activation of different parts of the image.
The pooling layer is responsible for 'blurring' the spatial extent of the network, so a 9x9 region could become a 1x1
or similar, it reduces the # of parameters used later on in the network. This also helps to reduce overfitting, the
inclusion of maxpooling2D layers reduced the # of parameters by 50x and led to an improved validation score.
The flatten layer simply removes any spatial organization of a feature map. A 4x4x10 3D tensor becomes a vector with
160 values in it.
Question 2:
What is the kernel size?
What is the stride?
How could you adjust each of these in TensorFlow code?
kernel size is the dimensions of the sliding window over the input.
- prefer smaller kernels in order to stack more layers deeper in the network to learn more complex features.
stride indicates how many pixels the kernel should be shifted over at a time, a larger stride is akin to downsampling
or compressing the media.
- ensure that kernel slides across the input symetrically when implementing a CNN.
you would change it with the conv2d layer params
tf.keras.layers.Conv2D(
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
)
kernel_size: An integer or tuple/list of 2 integers, specifying the height and width of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height and width. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.
""" |
17,468 | 06a68e2c2cbf07d9900c024c94acd44efff0442a | import os
import h5py
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import tables
from bpz_explorer.plots import PlotBPZ
from config import cat_version
alhambra_fit_file = h5py.File('kk_alhambra_fit_fl1.hdf5')
file_dir = '/Users/william/data/alhambra_images'
catalog = alhambra_fit_file['bpz_catalog']
spec_file = os.path.expanduser('~/workspace/pzT_templates/templates/eB11.list')
template_names = [x.split('_')[0] for x in np.loadtxt(spec_file, dtype="S20")]
pdf_file = '/Users/william/data/alhambra_gold_feb2016/alhambragold_added_%s_1e-4_B13v6_eB11.h5' % cat_version
pdf = tables.File(pdf_file)
# tmp:
field = 2
pointing = 1
ccd = 2
# mask = np.bitwise_and(np.bitwise_and(catalog['Field'] == field, catalog['Pointing'] == pointing), catalog['CCD'] == ccd)
mask = np.ones(len(catalog['Field']), dtype=bool)
# mask = np.bitwise_and(catalog['Field'] == field, mask)
mask = np.bitwise_and(mask, catalog["stell"] < .4)
mask = np.bitwise_and(mask, catalog["MS"] > 0)
mask = np.bitwise_and(mask, catalog["MS"] > 0)
mask = np.bitwise_and(mask, catalog['F814W'] < 22.764)
# mask = np.bitwise_and(mask, catalog['F814W'] > 18)
mask = np.bitwise_and(mask, pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["odds"] > .8)
mask = np.bitwise_and(mask, pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["Mabs"] < -17)
mask = np.bitwise_and(mask, pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["zml"] > 0.05)
mask = np.bitwise_and(mask, pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["chi2"] < .5)
catalog = catalog[mask]
# tmp end
gal_parameters_bins = alhambra_fit_file["gal_parameters_bins"]
gal_parameters_likelihood = alhambra_fit_file["gal_parameters_likelihood"][mask, :, :]
i_par = int(np.argwhere(alhambra_fit_file['gal_parameters_names'].value == 'mass'))
### Plot mass W vs. mass Tx.
aux_mass = [np.average(np.log10(gal_parameters_bins[..., i_par]), weights=gal_parameters_likelihood[..., i_par][i]) for
i in range(len(catalog))]
plt.figure(1)
plt.clf()
plt.scatter(catalog["MS"], aux_mass, c=pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["Tml"][mask])
plt.plot([7, 12], [7, 12])
plt.xlim(7, 12)
plt.ylim(7, 12)
plt.xlabel("BPZ - Taylor")
plt.ylabel("Willy")
plt.colorbar()
plt.draw()
plt.figure(4)
plt.clf()
plt.hist(catalog["MS"] - aux_mass, bins=50)
plt.title("%3.2f +/- %3.2f" % (np.mean(catalog["MS"] - aux_mass), np.std(catalog["MS"] - aux_mass)))
plt.xlabel("BPZ - Willy")
plt.draw()
### Plot check abs mags
pzt = np.zeros((len(alhambra_fit_file["gal_alhambra_seq_id"]), len(pdf.root.z), len(pdf.root.xt)), "float")
# pzt = np.zeros((n_galaxies, len(h5file.root.z), len(h5file.root.xt)), "float")
# for j, x in enumerate(h5file.root.Posterior[:pzt.shape[0]]):
i_gal = 0
for j in alhambra_fit_file["gal_alhambra_seq_id"]:
gz = pdf.root.goodz[j]
gt = pdf.root.goodt[j]
if pdf.root.Posterior[j].sum() > 0:
pzt[i_gal][np.outer(gz, gt)] += (pdf.root.Posterior[j] / pdf.root.Posterior[j].sum())
i_gal += 1
plt.figure(3)
plt.clf()
aux_absmag = pdf.root.Absolute_Magnitude_zT_for_m0eq20[:100] - 20 + \
pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["m0"][:, np.newaxis, np.newaxis]
pzt = pzt[:, :aux_absmag.shape[1], :]
pzt /= pzt.sum(axis=(1, 2))[:, np.newaxis, np.newaxis]
absmag = np.average(aux_absmag, weights=pzt, axis=(1, 2))[mask]
plt.scatter(pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["Mabs"][mask], absmag,
c=pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["Tml"][mask])
plt.plot([-20, -10], [-20, -10])
plt.colorbar()
plt.xlabel('BPZ cat')
plt.ylabel('BPZ pdf')
plt.draw()
old_mass = None
old_absmag = None
# for i_gal in np.argsort(catalog["area"])[::-1]:
for i_gal in np.argsort((catalog["MS"] - aux_mass) ** 2)[::-1]:
# for i_gal in np.argsort((pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["Mabs"][mask] - absmag) ** 2)[
# ::-1]:
img_file = '%s/f0%dp0%d_OPTICAL_%d.png' % (file_dir, catalog["Field"][i_gal], catalog["Pointing"][i_gal],
catalog["CCD"][i_gal])
img = mpimg.imread(img_file)[::-1, ...]
bpz_plot = PlotBPZ(img, catalog, pdf, bpz_template_names=template_names, i_figure=2)
bpz_plot.plot_dossier(i_gal)
bpz_plot.figure.axes[3].hist(np.log10(gal_parameters_bins[..., i_par]),
weights=gal_parameters_likelihood[i_gal, ..., i_par], bins=20, normed=True)
# bpz_plot.figure.axes[3].hist(np.log10(alhambra_fit_file["gal_parameters_bins"][..., i_par]),
bpz_plot.figure.axes[3].plot([catalog[i_gal]["MS"]], [.2], "*", color="yellow")
plt.figure(1)
if old_mass is not None:
plt.plot(old_mass[0], old_mass[1], '*', color="red")
plt.plot(catalog[i_gal]["MS"], aux_mass[i_gal], '*', color="yellow")
old_mass = [catalog[i_gal]["MS"], aux_mass[i_gal]]
plt.draw()
plt.figure(3)
if old_absmag is not None:
plt.plot(old_absmag[0], old_absmag[1], '*', color="red")
plt.plot(pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["Mabs"][mask][i_gal], absmag[i_gal], '*',
color="yellow")
old_absmag = [pdf.root.bpz[alhambra_fit_file["gal_alhambra_seq_id"].value]["Mabs"][mask][i_gal], absmag[i_gal]]
plt.draw()
raw_input('delta_M = %3.2f. ENTER for next...' % (aux_mass[i_gal] - catalog[i_gal]["MS"]))
|
17,469 | be480436fe4b84af640eb647465c54c488fd52d5 | # -*- coding:utf-8 -*-
__author__ = 'zhaojm'
import pymongo
import logging
import random
# MONGO
# MONGO_URI = "localhost:27017"
# mongo_client = pymongo.MongoClient(MONGO_URI)
mongo_client = pymongo.MongoClient()
job_58_db = mongo_client["job_58"]
class Job58DB(object):
def __init__(self):
pass
# @staticmethod
# def upsert_company(item):
# logging.info("<MONGO> %s" % item)
# job_58_db.company_info.update({'company_url': item['company_url']}, {'$set': item}, True, True)
#
# @staticmethod
# def check_have(company_url):
# if job_58_db.company_info.find_one({"company_url": company_url}):
# return True
# else:
# return False
# @staticmethod
# def get_one_random_company_id():
# cur = job_58_db.company_info.find()
# count = cur.count()
# r = random.randint(count)
# company = cur[r]
# return company['company_id']
# @staticmethod
# def check_have_job(url):
# if job_58_db.job_info.find_one({"url": url}):
# return True
# else:
# return False
@staticmethod
def upsert_job(item):
logging.info("<MONGO> %s" % item)
job_58_db.job_info.update({'job_url': item['job_url']}, {'$set': item}, True, True)
|
17,470 | ee516dcfafc36a430333253372a5d9ff71fd6f7d | import pandas as pd
def loadAndMergeMovieData():
'''
This function loads the movie name dataset and the user rating dataset
Returns merged dataframe with user-movie ratings
'''
rating_cols = ['user_id', 'movie_id', 'rating']
ratings = pd.read_csv('Z:/ML/DataScience/DataScience/ml-100k/u.data', sep = '\t',
names = rating_cols, usecols = range(3))
movie_cols = ['movie_id', 'title']
movies = pd.read_csv('Z:/ML/DataScience/DataScience/ml-100k/u.item', sep = '|',
names = movie_cols, usecols = range(2))
ratings = pd.merge(movies, ratings)
return ratings
def createRatingsPivot(ratings):
'''
This function creates a pivot table with user_id as index and the rating
for each movie given by the user as column values
'''
userRatings = ratings.pivot_table(index = ['user_id'], columns = ['title'], values = 'rating')
return userRatings
def createCorrMatrix(userRatings):
'''
This function creates the correlation matrix which gives the similarity of ratings
for each pair of movies rated by a user
This uses the Pearson correlation scores and ignores the movie data that are
rated less than 100 people
'''
corrMatrix = userRatings.corr(method='pearson', min_periods=100)
return corrMatrix
def selectUserForRecommendation(userRatings):
'''
This function selects the movie rating data for the user to whom
the recommendations are to be made
'''
userDf = pd.read_csv('Z:/ML/DataScience/DataScience/ml-100k/userid.csv')
userIndex = userDf['userId']
selectedUser = userRatings.loc[userIndex].dropna()
return selectedUser
def getSimilarMovies(userData,corrMatrix):
'''
This function creates a dataframe of similar movies based on the
user's ratings
'''
simCandidates = pd.Series()
for i in range(0, len(userData.index)):
# Retrieving similar movies to that of the user rated
similarMovies = corrMatrix[userData.index[i]].dropna()
# Scaling the similarity
similarMovies = similarMovies.map(lambda x: x * userData[i])
# Add the score to the list of similarity candidates
similarMovieCandidates = simCandidates.append(similarMovies)
similarMovieCandidates.sort_values(inplace = True, ascending = False)
#Grouping the repeated results and similarity scores are added
similarMovieCandidates = similarMovieCandidates.groupby(similarMovieCandidates.index).sum()
similarMovieCandidates.sort_values(inplace = True, ascending = False)
#Removing the movies data that are rated by the user
filteredSimilarMovies = similarMovieCandidates.drop(userData.index)
return filteredSimilarMovies
def main():
ratings = loadAndMergeMovieData()
userRatings = createRatingsPivot(ratings)
corrMatrix = createCorrMatrix(userRatings)
userData = selectUserForRecommendation(userRatings)
similarMovies = getSimilarMovies(userData,corrMatrix)
similarMovies.to_csv('Z:/ML/DataScience/DataScience/ml-100k/similarMovies.csv', sep =',')
if __name__ == 'main':
main()
|
17,471 | 9d28da833ee390259399530fbc038bb26424703b | import json
import codecs
from gensim.models import LdaModel
from gensim.corpora import Dictionary
from gensim import corpora, models
import tomotopy as tp
# file = open("D:/final_result.json", 'r', encoding='utf-8')
# line = file.readline()
for i in range(14):
file = open("D:/final_result.json", 'r', encoding='utf-8')
line = file.readline()
publish_time_all = []
number = 0
while line:
number = number + 1
dic = json.loads(line)
topic = dic["topic_6"]
topic_main = dic["topic_main"]
print(topic)
print(len(topic))
print(number)
if topic[i] > 0.25 and topic_main[5] > 0.25:
print("找到")
publish_time_all.append(dic["publish_time"])
line = file.readline()
time = sorted(set(publish_time_all))
numbers = []
for time_stap in time:
number_new = 0
for time_all in publish_time_all:
if time_all == time_stap:
number_new = number_new + 1
numbers.append(number_new)
final_result = list(zip(time, numbers))
title_file_name = r"D:/topic_6_time/" + str(i) + ".txt"
ms = open(title_file_name, 'w', encoding='utf-8')
for element in final_result:
ms.write(str(element))
ms.write('\n')
print('写入完成')
file.close()
|
17,472 | cb4e55626395251b9adc40b10e70e94f28b6fa1e | # -*- coding: utf-8 -*-
# Copyright 2017 Jarvis (www.odoomod.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import pycnnum
def amount2cn(num, counting_type=pycnnum.COUNTING_TYPES[1],
big=True, traditional=False, alt_zero=False, alt_two=False,
use_zeros=True):
result = pycnnum.num2cn(num, counting_type, big, traditional, alt_zero, alt_two, use_zeros)
if result == '':
result = '零'
jiao, fen = 0, 0
jiaofen_index = result.find('点')
if jiaofen_index > -1:
result = result[:jiaofen_index]
num_str = str(num)
jiaofen_index = num_str.find('.')
try:
jiao = int(num_str[jiaofen_index + 1:jiaofen_index + 2])
fen = int(num_str[jiaofen_index + 2:jiaofen_index + 3])
except:
pass
if jiao == 0 and fen > 0:
return '%s元%s%s分' % (result, pycnnum.big_number_s[jiao], pycnnum.big_number_s[fen])
elif jiao > 0 and fen == 0:
return '%s元%s角' % (result, pycnnum.big_number_s[jiao])
elif jiao > 0 and fen > 0:
return '%s元%s角%s分' % (result, pycnnum.big_number_s[jiao], pycnnum.big_number_s[fen])
else:
return '%s元整' % result
|
17,473 | 9f00a6243279cde8594b0f53d35315c5d7aa0f7e | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Aaron-Yang [code@jieyu.ai]
Contributors:
"""
import logging
import arrow
import jqdatasdk as jq
from pandas import DataFrame
import numpy as np
from alpha.core.signal import moving_average, polyfit
from alpha.core.stocks import stocks
logger = logging.getLogger(__name__)
class One:
def screen(self,frame, end_dt=None, adv_lim=25, win=7, a5=0.02, a10=0.001):
all = []
fired = []
if end_dt is None:
end_dt = arrow.now().datetime
for i, code in enumerate(stocks.all_stocks()):
try:
name = stocks.name_of(code)
if name.endswith("退"):
continue
if name.find("ST") != -1:
continue
bars = stocks.get_bars(code, 30, frame, end_dt=end_dt)
if len(bars) == 0:
print("get 0 bars", code)
continue
if arrow.get(bars['date'].iat[-1]).date() != arrow.get(end_dt).date():
continue
# 30日涨幅必须小于adv_lim
if bars['close'].iat[-1] / bars['close'].min() >= 1 + adv_lim / 100:
print(f"{code}涨幅大于", adv_lim)
continue
ma5 = np.array(moving_average(bars['close'], 5))
ma10 = np.array(moving_average(bars['close'], 10))
err5, coef5, vertex5 = polyfit(ma5[-win:])
err10, coef10, vertex10 = polyfit(ma10[-win:])
vx5, _ = vertex5
vx10, _ = vertex10
_a5 = coef5[0]
_a10 = coef10[0]
all.append([code, _a5, _a10, vx5, vx10, err5, err10])
# print(code, round_list([err5, vx, pred_up, y5, ma5[-1], y10, ma10[-1]],3))
# 如果曲线拟合较好,次日能上涨up%以上,10日线也向上,最低点在win/2以内
t1 = err5 <= 0.003 and err10 <=0.003
t2 = _a5 > a5 and _a10 > a10
t3 = (win - 1 > vx5 >= win/2-1) and (vx10 < win/2 - 1)
if t1 and t2 and t3:
c1, c0 = bars['close'].iat[-2], bars['close'].iat[-1]
if stocks.check_buy_limit(c1, c0, name): # 跳过涨停的
continue
print(f"{stocks.name_of(code)} {code}",[_a5,_a10,vx5,vx10,err5,
err10])
fired.append([code, _a5, _a10, vx5, vx10, err5, err10])
except Exception as e:
print(i, e)
continue
return DataFrame(data=all,
columns=['code', 'a5', 'a10', 'vx5', 'vx10', 'err_5',
'err_10']) |
17,474 | 612a3b2bfb0d98a331ebce49c02aca503f4633f8 | from torch.utils.data import Dataset
import torch
from PIL import Image
import numpy as np
import torchvision.transforms as ttf
device = "cuda" if torch.cuda.is_available() else "cpu"
class GAN_Data(Dataset):
def __init__(self, path_list, transforms= None):
super().__init__()
self.path_list = path_list
self.transforms = transforms
self.t = ttf.Resize((256, 256))
self.blur = ttf.GaussianBlur(3, sigma=(0.1, 2.0))
def __getitem__(self, idx):
img_path = self.path_list[idx]
img = np.array(Image.open(r"D:/Desktop/Medical Imaging/MRI_512/" + img_path).convert('RGB').resize((512, 512)))
img = torch.tensor(img, dtype= torch.float).permute(2, 0, 1)
if self.transforms:
img = self.transforms(img)
lr_img = self.blur(self.t(img))
return lr_img.to(device) / 255., img.to(device) / 255.
def __len__(self):
return len(self.path_list)
|
17,475 | c389fdb06bcbd16b162990a7d05d00e925869681 | num=input("Enter Number")
print(num) |
17,476 | 67af01671d92f07233c609500702ee66201fe81a | # Generated by Django 3.1.2 on 2020-11-14 21:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appointments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='appointment',
name='service',
field=models.CharField(choices=[('Massage', 'Massage'), ('Manicure', 'Manicure'), ('Pedicure', 'Pedicure'), ('Facial Cleansing', 'Facial Cleansing'), ('Permanent Hair Removal', 'Permanent Hair Removal'), ('Cryotherapy', 'Cryotherapy')], max_length=25),
),
migrations.AlterField(
model_name='appointment',
name='time',
field=models.CharField(choices=[('8:00 AM', '8:00 AM'), ('9:00 AM', '9:00 AM'), ('10:00 AM', '10:00 AM'), ('11:00 AM', '11:00 AM'), ('1:00 PM', '1:00 PM'), ('2:00 PM', '2:00 PM'), ('3:00 PM', '3:00 PM'), ('4:00 PM', '4:00 PM')], max_length=10),
),
]
|
17,477 | ae5684b5484232ca2304f96499fee990f5fdf76d | from __future__ import absolute_import
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from pylint_factory.__pkginfo__ import BASE_ID
class FactoryBoyInstalledChecker(BaseChecker):
name = 'factory-installed-checker'
msgs = {
'F%s01' % BASE_ID: ("Factory Boy is not available on the PYTHONPATH",
'factory-not-available',
"Factory Boy could not be imported by the pylint-factory plugin, so most Factory Boy related "
"improvements to pylint will fail."),
'W%s99' % BASE_ID: ('Placeholder message to prevent disabling of checker',
'factory-not-available-placeholder',
'PyLint does not recognise checkers as being enabled unless they have at least'
' one message which is not fatal...')
}
@check_messages('factory-not-available')
def close(self):
try:
import factory
except ImportError:
self.add_message('F%s01' % BASE_ID)
|
17,478 | b1dd692fa9c19c9f5ed7fb7ad60b24cd90a9ba0c | from PyTest import *
##//////////////////// PROBLEM STATEMENT ////////////////////////
## Given a 24 hour time of day as hours minutes seconds, add //
## a time interval which is specified as hours minutes seconds //
## //
## hrs mins secs hrs mins secs hrs mins secs //
## 13 24 30 2 40 40 -> 16 5 10 //
##///////////////////////////////////////////////////////////////
|
17,479 | 943da56bd84e1005b15666f0ef4513d246246f55 | from decouple import config
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = config('TIME_ZONE', default='UTC')
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
17,480 | d88d163bca1ce74920765604a9389aa3a413b532 | import numpy as np
import matplotlib.pyplot as plt
from hipe4ml.tree_handler import TreeHandler
import matplotlib.backends.backend_pdf
results_dir = "../../Results/"
pdf = matplotlib.backends.backend_pdf.PdfPages(results_dir + "resolutions.pdf")
hndl = TreeHandler("/data/fmazzasc/PbPb_3body/pass3/tables/SignalTable_20g7.root", "SignalTable")
hndl.apply_preselections("gReconstructed==1")
hndl.eval_data_frame("pt_res = gPt - pt", inplace=True)
hndl.eval_data_frame("ct_res = gCt - ct", inplace=True)
plt.hist(hndl["pt_res"], bins=1000, range=[-1,1])
plt.xlabel(r"p$_T$ resolution")
pdf.savefig()
plt.figure()
plt.hist(hndl["ct_res"], bins=1000, range=[-5,5])
plt.xlabel(r"$c$t resolution")
pdf.savefig()
pdf.close()
|
17,481 | f39bfe6d79d3bf4981fdcf326a3a0105247e3dd5 | import json
import numpy as np
from example_functions import visualization_function_dict
from line_search_methods import line_search_dict
from main_methods import main_method_dict
from config import visualization_params as v_params
from helpers import generate_x0
def run_one(_theta, _main_method, _ls_method, params, ls_params, x0=None):
theta = _theta()
if x0 is None:
x0 = generate_x0(theta.n, *theta.bounds)
ls_method = _ls_method(ls_params)
main_method = _main_method(params, ls_method)
result = main_method(theta, np.array(x0))
return result
def result_to_string(result):
perf = result['performance']
ls_perf = perf['line_search']
return ', '.join([str(s) for s in [
result['status'], perf['iterations'], f"{perf['duration']} ms",
ls_perf['iterations'], f"{round(ls_perf['duration'], 2)} ms",
]])
np.warnings.filterwarnings('ignore', category=RuntimeWarning)
# for i, theta in enumerate(visualization_function_dict):
# output = {}
theta = 'Himmelblau'
# for j, main_method in enumerate(v_params):
main_method = 'NewtonsMethod'
output = {theta: {main_method: {}}}
for k, line_search in enumerate(v_params[main_method]):
print(
f'\nNow running: {theta} + {main_method} + ' +
f'{line_search}'
)
print(
# f'Total progress - theta: {i + 1}/' +
f'{len(visualization_function_dict)}, ' +
# f'main method: {j + 1}/{len(v_params)}, ' +
f'line search: {k + 1}/{len(v_params[main_method])}, '
)
# line_search = 'ConstantSearch'
result = run_one(
visualization_function_dict[theta],
main_method_dict[main_method],
line_search_dict[line_search],
v_params[main_method][line_search]['params'],
v_params[main_method][line_search]['ls_params'],
x0=[[0.0], [0.0]]
)
status = result['status']
if not status:
print(f">>> FAILURE {theta},{main_method},{line_search}")
steps = [
p.reshape(1, 2).flatten().tolist() for p in result['steps']
]
print(steps[0])
# print(steps)
# output[theta] = {main_method: {line_search: steps}}
output[theta][main_method][line_search] = steps
with open('visualization/steps.js', 'w') as f:
f.write(f'const stepData = {json.dumps(output)}')
|
17,482 | 87a9920b94f68f7b2539bf5bd3b9311df6678a91 | from django.contrib import admin
from .models import Donor
from .models import RequestModel
from .models import DonorDate
from .models import RequesterDate
# Register your models here.
admin.site.register(Donor)
admin.site.register(RequestModel)
admin.site.register(DonorDate)
admin.site.register(RequesterDate)
|
17,483 | d21163b89e9b5f9ad69db2e450bab0547db6c034 | s = "Мой дядя самых честных правил, Когда не в шутку занемог, Он уважать себя заставил И лучше выдумать не мог"
print(" ".join([x for x in s.split() if not x.startswith(("м", "М"))])) |
17,484 | 720e6ebd54caed759bf149793e78b40e92ae58b9 | from django.contrib import admin
from django.urls import path
from core.views import HomeView, ChartData
urlpatterns = [
path('admin/', admin.site.urls),
path('', HomeView.as_view()),
path('api/', ChartData.as_view(), name='api-data'),
]
|
17,485 | 42b0e893b4ddcc8c2ca256151d4bbb40505587a9 | from behave import *
from page import Brochure_Page
from locator import Brochure_Locators
@given('I am on page with brochures')
def step_impl(context):
context.browser.get("https://www.epam.com/our-work/brochures/epams-services-for-direct-to-learner-solution-providers");
@when('I push on a {button}')
def step_impl(context, button):
base_page = Brochure_Page(context)
base_page.click_button(int(button),Brochure_Locators.SOCIAL_BUTTON)
@then('a {site} window should be opended')
def step_impl(context, site):
base_page = Brochure_Page(context)
base_page.is_site_opened(site) |
17,486 | 86510c1ec9fa3eed04d9b412876b1d1d5c1cf826 | import argparse
import numpy as np
import sys
import tensorflow as tf
from CAModel import CAModel
from DataLoader import float_to_note
from MIDIConverter import midi_to_chroma, midi_to_piano_roll
parser = argparse.ArgumentParser('Train a model on a midi file')
parser.add_argument('-a', '--piano-roll', action='store_true', dest='piano_roll', default=False, help='Use piano roll instead of chromagraph')
parser.add_argument('-b', '--batch-size', type=int, dest='batch_size', default=8, help='Set batch size')
parser.add_argument('-c', '--chorale', type=int, dest='chorale', default=0, help='Which chorale to use as a model')
parser.add_argument('-e', '--epochs', type=int, dest='epochs', default=8000, help='Number of learning epochs')
parser.add_argument('-f', '--framerate', type=int, dest='framerate', default=20, help='Number of epochs between graph updates')
parser.add_argument('-g', '--graphing', action='store_true', dest='graphing', default=False, help='Print chorale and exit')
parser.add_argument('-i', '--filters', type=int, dest='filters', default=128, help='Number of convolutional filters')
parser.add_argument('-l', '--load-model', type=str, dest='model', default=None, help='Continue learning from existing weights')
parser.add_argument('-m', '--midi-file', type=str, dest='midi_file', default=None, help='MIDI file to process, will override chorale')
parser.add_argument('-n', '--name', type=str, dest='output_name', default='output', help='Name of the weight output file')
parser.add_argument('-p', '--past-notes', type=int, dest='past_notes', default=16, help='How far into the past to stretch the convolutional window')
parser.add_argument('-o', '--output-destination', type=str, dest='output_destination', default='./outputs', help='Folder to save figures')
parser.add_argument('-r', '--chroma-frequency', type=int, dest='chroma_frequency', default=4, help='MIDI to chroma sampling frequency')
parser.add_argument('-s', '--slurm', action='store_true', dest='slurm', default=False, help='Just the learning')
parser.add_argument('-t', '--testing', type=int, default=None, dest='testing', help='How many rounds to test the model')
parser.add_argument('-w', '--width', type=int, dest='width', default=1, help='The width of the convolutional window, how many other notes the model can see')
args = parser.parse_args()
if not args.slurm:
import ffmpeg
import matplotlib.pyplot as plt
if args.midi_file is None:
from DataLoader import list_chorales
chorale = list_chorales()[args.chorale]
note_chorale = [float_to_note(i) for i in chorale]
notes = range(len(chorale))
chorale = np.array(chorale).reshape((1, -1))
else:
chorale = midi_to_piano_roll(args.midi_file, fs=args.chroma_frequency) if args.piano_roll else midi_to_chroma(args.midi_file, fs=args.chroma_frequency)
note_chorale = (chorale - np.min(chorale))/(np.max(chorale) - np.min(chorale))
notes = range(note_chorale.shape[1])
if args.graphing:
import matplotlib.pyplot as plt
if note_chorale is list:
plt.plot(notes, note_chorale)
plt.title(f'Chorale {args.chorale}')
plt.ylim(55, 80)
plt.xlabel('Time step')
plt.ylabel('Note')
else:
fig, axs = plt.subplots(3, 4, sharex=True, sharey=True)
for i, a in enumerate(np.asarray(axs).flatten()):
a.plot(notes, note_chorale[i].tolist())
fig.suptitle(args.midi_file)
fig.text(0.5, 0.04, 'Time step', ha='center')
fig.text(0.04, 0.5, 'Note', va='center', rotation='vertical')
plt.show()
sys.exit('Graphing complete! Exiting..')
target = tf.pad(np.array(note_chorale).astype('float32'), [(0, 0), (args.past_notes - 1, 0)])
seed = np.zeros([target.shape[0],target.shape[1],args.past_notes + 1], np.float32)
seed[:, args.past_notes-1, -1] = note_chorale[:, 0]
def loss_f(x): return tf.reduce_mean(tf.square(x[..., -1] - target))
def scale(x): return x if args.midi_file is not None else float_to_note(x)
ca = CAModel(past_notes=args.past_notes, width=args.width, filters=args.filters, piano_roll=args.piano_roll)
if not args.model is None:
ca.load_weights(args.model)
loss_log = []
if not args.testing is None:
from tqdm import tqdm
import matplotlib.pyplot as plt
x0 = np.repeat(seed[None, ...], args.batch_size, 0)
for i in tqdm(range(args.testing)):
x0 = ca(x0)
loss_log.append(np.log10(tf.reduce_mean(loss_f(x0))))
plt.plot(loss_log)
plt.show()
sys.exit('Testing complete')
lr = 2e-3
lr_sched = tf.keras.optimizers.schedules.PiecewiseConstantDecay([2000], [lr, lr * 0.1])
trainer = tf.keras.optimizers.Adam(lr_sched)
loss0 = loss_f(seed).numpy()
@tf.function
def train_step(x):
iter_n = tf.random.uniform([], 64, 96, tf.int32)
with tf.GradientTape() as g:
for i in tf.range(iter_n):
x = ca(x)
loss = tf.reduce_mean(loss_f(x))
grads = g.gradient(loss, ca.weights, unconnected_gradients='zero')
grads = [g/(tf.norm(g)+1e-8) for g in grads]
trainer.apply_gradients(zip(grads, ca.weights))
return x, loss
if not args.slurm:
plt.ion()
if args.piano_roll:
fig, axs = plt.subplots(2, 1)
axs[0].imshow(target, aspect='auto')
axs[1].imshow(tf.reduce_mean(np.repeat(seed[None, ...], args.batch_size, 0)[..., -1], 0), aspect='auto')
else:
lines = []
plt.rcParams['axes.grid'] = True
music_graphs = 1 if args.midi_file is None else 12
batch_graphs = args.batch_size if args.midi_file is None else 0
total_graphs = music_graphs + batch_graphs
root = np.sqrt(total_graphs)
rows = np.floor(root)
cols = np.ceil(root)
if rows * cols < total_graphs:
rows += 1
fig, axs = plt.subplots(int(rows), int(cols), sharex=True, sharey=True)
for i, a in enumerate(np.asarray(axs).flatten()):
if i < music_graphs:
a.set_title(f'Music Channel {i + 1}')
a.plot(notes, note_chorale[i])
lines.append(a.plot(notes, [0] * max(chorale.shape))[0])
elif i < total_graphs:
a.set_title(f'Batch {i - music_graphs + 1}')
a.plot(notes, np.mean(note_chorale, axis=0))
lines.append(a.plot(notes, [0] * max(chorale.shape))[0])
else:
fig.delaxes(a)
fig.suptitle('Epoch 0')
fig.text(0.5, 0.04, 'Time step', ha='center')
fig.text(0.04, 0.5, 'Note', va='center', rotation='vertical')
mgr = plt.get_current_fig_manager().window.state('zoomed')
plt.show()
framenum = 0
for i in range(1, args.epochs + 1):
x0 = np.repeat(seed[None, ...], args.batch_size, 0)
x, loss = train_step(x0)
step_i = len(loss_log)
loss_log.append(loss.numpy())
print('\r step: %d, log10(loss): %.3f'%(i+1, np.log10(loss)), end='')
if not args.slurm and step_i % args.framerate == 0:
xn = x.numpy()
if args.piano_roll:
axs[1].imshow(tf.reduce_mean(xn[..., -1], 0), aspect='auto')
else:
for j in range(music_graphs):
lines[j].set_ydata([scale(k) for k in np.mean(xn, axis=0)[j, :, -1].flatten().tolist()[args.past_notes - 1:]])
for j in range(batch_graphs):
lines[music_graphs + j].set_ydata([scale(k) for k in np.mean(xn, axis=1)[j, :, -1].flatten().tolist()[args.past_notes - 1:]])
fig.suptitle(f'Epoch {i - 1}')
plt.gcf().canvas.draw()
plt.gcf().canvas.flush_events()
plt.savefig(f'{args.output_destination}/frame-{str(framenum).zfill(5)}.jpg')
framenum += 1
ca.save_weights(args.output_name or 'weights', overwrite=True)
if not args.slurm:
#ffmpeg.input('/outputs/*.jpg', framerate=25).output('output.gif').run()
input('\nPress ENTER to exit')
import json
with open((args.model or '') + '-loss.json', 'w') as filename:
json.dump(np.array(loss_log).tolist(), filename) |
17,487 | bc2e2a85efcb1eda7f8df113e216219be2855e88 | from ansible.module_utils.selvpc_utils.licenses import \
get_project_licenses_quantity
from ansible.module_utils.selvpc_utils.floatingips import \
get_project_ips_quantity
from ansible.module_utils.selvpc_utils.subnets import \
get_project_subnets_quantity
from ansible.module_utils.selvpc_utils.vrrp import \
get_project_vrrp_subnets_quantity
from ansible.module_utils.selvpc_utils.keypairs import \
keypair_exists
from tests import params
from tests.mock_objects import get_mocked_client
FLOATING_IPS_PARSED_OUTPUT = {
"ru-1": {"ACTIVE": 2, "DOWN": 1}, "ru-2": {"ACTIVE": 0, "DOWN": 2}
}
SUBNETS_PARSED_OUTPUT = {('ru-1', 'ipv4', 25): {'ACTIVE': 1, 'DOWN': 1},
('ru-1', 'ipv4', 29): {'ACTIVE': 1, 'DOWN': 0},
('ru-2', 'ipv4', 29): {'ACTIVE': 1, 'DOWN': 1}
}
LICENSES_PARSED_OUTPUT = {
('ru-1', 'license_windows_2012_standard'): {'ACTIVE': 3, 'DOWN': 1},
('ru-2', 'license_windows_2012_standard'): {'ACTIVE': 1, 'DOWN': 1}
}
VRRP_PARSED_OUTPUT = {
(29, 'ipv4', 'ru-1', 'ru-7'): {'ACTIVE': 1, 'DOWN': 1},
(29, 'ipv4', 'ru-2', 'ru-7'): {'ACTIVE': 1, 'DOWN': 0},
}
KEYPAIR_EXISTS_OUTPUT = [
True, True, False,
True, False, False,
False, False, True
]
def test_parse_existing_floating_ips():
client = get_mocked_client()
assert get_project_ips_quantity(
client, params.PROJECT_ID) == FLOATING_IPS_PARSED_OUTPUT
def test_parse_existing_subnets():
client = get_mocked_client()
assert get_project_subnets_quantity(
client, params.PROJECT_ID) == SUBNETS_PARSED_OUTPUT
def test_parse_existing_licenses():
client = get_mocked_client()
assert get_project_licenses_quantity(
client, params.PROJECT_ID) == LICENSES_PARSED_OUTPUT
def test_parse_existing_vrrp():
client = get_mocked_client()
assert get_project_vrrp_subnets_quantity(
client, params.PROJECT_ID) == VRRP_PARSED_OUTPUT
def test_keypair_exists():
client = get_mocked_client()
for kp, r in zip(params.KEYPAIRS, KEYPAIR_EXISTS_OUTPUT):
assert keypair_exists(client, kp[0], kp[1]) == r
|
17,488 | 7a45a3de59cba31f913f6a90940002711f33189b | # Copyright 2018 Xiaomi, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import filelock
import hashlib
import os
import re
import sh
import urllib
from model_list import BENCHMARK_MODELS
FRAMEWORKS = (
"MACE",
"SNPE",
"NCNN",
"TFLITE"
)
RUNTIMES = (
"CPU",
"GPU",
"DSP"
)
def strip_invalid_utf8(str):
return sh.iconv(str, "-c", "-t", "UTF-8")
def split_stdout(stdout_str):
stdout_str = strip_invalid_utf8(stdout_str)
# Filter out last empty line
return [l.strip() for l in stdout_str.split('\n') if len(l.strip()) > 0]
def make_output_processor(buff):
def process_output(line):
print(line.rstrip())
buff.append(line)
return process_output
def device_lock_path(serialno):
return "/tmp/device-lock-%s" % serialno
def device_lock(serialno, timeout=3600):
return filelock.FileLock(device_lock_path(serialno), timeout=timeout)
def adb_devices():
serialnos = []
p = re.compile(r'(\w+)\s+device')
for line in split_stdout(sh.adb("devices")):
m = p.match(line)
if m:
serialnos.append(m.group(1))
return serialnos
def adb_getprop_by_serialno(serialno):
outputs = sh.adb("-s", serialno, "shell", "getprop")
raw_props = split_stdout(outputs)
props = {}
p = re.compile(r'\[(.+)\]: \[(.+)\]')
for raw_prop in raw_props:
m = p.match(raw_prop)
if m:
props[m.group(1)] = m.group(2)
return props
def adb_supported_abis(serialno):
props = adb_getprop_by_serialno(serialno)
abilist_str = props["ro.product.cpu.abilist"]
abis = [abi.strip() for abi in abilist_str.split(',')]
return abis
def file_checksum(fname):
hash_func = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_func.update(chunk)
return hash_func.hexdigest()
def adb_push_file(src_file, dst_dir, serialno):
src_checksum = file_checksum(src_file)
dst_file = os.path.join(dst_dir, os.path.basename(src_file))
stdout_buff = []
sh.adb("-s", serialno, "shell", "md5sum", dst_file,
_out=lambda line: stdout_buff.append(line))
dst_checksum = stdout_buff[0].split()[0]
if src_checksum == dst_checksum:
print("Equal checksum with %s and %s" % (src_file, dst_file))
else:
print("Push %s to %s" % (src_file, dst_dir))
sh.adb("-s", serialno, "push", src_file, dst_dir)
def adb_push(src_path, dst_dir, serialno):
if os.path.isdir(src_path):
for src_file in os.listdir(src_path):
adb_push_file(os.path.join(src_path, src_file), dst_dir, serialno)
else:
adb_push_file(src_path, dst_dir, serialno)
def get_soc_serialnos_map():
serialnos = adb_devices()
soc_serialnos_map = {}
for serialno in serialnos:
props = adb_getprop_by_serialno(serialno)
soc_serialnos_map.setdefault(props["ro.board.platform"], []) \
.append(serialno)
return soc_serialnos_map
def get_target_socs_serialnos(target_socs=None):
soc_serialnos_map = get_soc_serialnos_map()
serialnos = []
if target_socs is None:
target_socs = soc_serialnos_map.keys()
for target_soc in target_socs:
serialnos.extend(soc_serialnos_map[target_soc])
return serialnos
def download_file(configs, file_name, output_dir):
file_path = output_dir + "/" + file_name
url = configs[file_name]
checksum = configs[file_name + "_md5_checksum"]
if not os.path.exists(file_path) or file_checksum(file_path) != checksum:
print("downloading %s..." % file_name)
urllib.urlretrieve(url, file_path)
if file_checksum(file_path) != checksum:
print("file %s md5 checksum not match" % file_name)
exit(1)
return file_path
def get_mace(configs, abis, output_dir, build_mace):
if build_mace:
sh.bash("tools/build_mace.sh", abis, os.path.abspath(output_dir),
_fg=True)
else:
file_path = download_file(configs, "libmace.zip", output_dir)
sh.unzip("-o", file_path, "-d", "third_party/mace")
def get_tflite(configs, output_dir):
file_path = download_file(configs, "tensorflow-1.9.0-rc1.zip", output_dir)
sh.unzip("-o", file_path, "-d", "third_party/tflite")
def bazel_build(target,
abi="armeabi-v7a", frameworks=None):
print("* Build %s with ABI %s" % (target, abi))
if abi == "host":
bazel_args = (
"build",
target,
)
else:
bazel_args = (
"build",
target,
"--config",
"android",
"--cpu=%s" % abi,
"--action_env=ANDROID_NDK_HOME=%s"
% os.environ["ANDROID_NDK_HOME"],
)
for framework in frameworks:
bazel_args += ("--define", "%s=true" % framework.lower())
sh.bazel(
_fg=True,
*bazel_args)
print("Build done!\n")
def bazel_target_to_bin(target):
# change //aibench/a/b:c to bazel-bin/aibench/a/b/c
prefix, bin_name = target.split(':')
prefix = prefix.replace('//', '/')
if prefix.startswith('/'):
prefix = prefix[1:]
host_bin_path = "bazel-bin/%s" % prefix
return host_bin_path, bin_name
def prepare_device_env(serialno, abi, device_bin_path, frameworks):
# for snpe
if "SNPE" in frameworks and abi == "armeabi-v7a":
snpe_lib_path = \
"bazel-mobile-ai-bench/external/snpe/lib/arm-android-gcc4.9"
adb_push("bazel-mobile-ai-bench/external/snpe/lib/dsp",
device_bin_path, serialno)
if snpe_lib_path:
adb_push(snpe_lib_path, device_bin_path, serialno)
libgnustl_path = os.environ["ANDROID_NDK_HOME"] + \
"/sources/cxx-stl/gnu-libstdc++/4.9/libs/%s/" \
"libgnustl_shared.so" % abi
adb_push(libgnustl_path, device_bin_path, serialno)
# for mace
if "MACE" in frameworks and abi == "armeabi-v7a":
adb_push("third_party/nnlib/libhexagon_controller.so",
device_bin_path, serialno)
# for tflite
if "TFLITE" in frameworks:
tflite_lib_path = ""
if abi == "armeabi-v7a":
tflite_lib_path = \
"third_party/tflite/tensorflow/contrib/lite/" + \
"lib/armeabi-v7a/libtensorflowLite.so"
elif abi == "arm64-v8a":
tflite_lib_path = \
"third_party/tflite/tensorflow/contrib/lite/" + \
"lib/arm64-v8a/libtensorflowLite.so"
if tflite_lib_path:
adb_push(tflite_lib_path, device_bin_path, serialno)
def prepare_model_and_input(serialno, models_inputs, device_bin_path,
output_dir):
file_names = [f for f in models_inputs if not f.endswith("_md5_checksum")]
for file_name in file_names:
file_path = models_inputs[file_name]
local_file_path = file_path
if file_path.startswith("http"):
local_file_path = \
download_file(models_inputs, file_name, output_dir)
else:
checksum = models_inputs[file_name + "_md5_checksum"]
if file_checksum(local_file_path) != checksum:
print("file %s md5 checksum not match" % file_name)
exit(1)
adb_push(local_file_path, device_bin_path, serialno)
def prepare_all_model_and_input(serialno, configs, device_bin_path, output_dir,
frameworks, build_mace):
models_inputs = configs["models_and_inputs"]
if "MACE" in frameworks:
if build_mace:
# mace model files are generated from source
for model_file in os.listdir(output_dir):
if model_file.endswith(".pb") or model_file.endswith(".data"):
model_file_path = output_dir + '/' + model_file
adb_push(model_file_path, device_bin_path, serialno)
else:
prepare_model_and_input(serialno, models_inputs["MACE"],
device_bin_path, output_dir)
if "SNPE" in frameworks:
prepare_model_and_input(serialno, models_inputs["SNPE"],
device_bin_path, output_dir)
if "TFLITE" in frameworks:
prepare_model_and_input(serialno, models_inputs["TFLITE"],
device_bin_path, output_dir)
# ncnn model files are generated from source
if "NCNN" in frameworks:
ncnn_model_path = "bazel-genfiles/external/ncnn/models/"
adb_push(ncnn_model_path, device_bin_path, serialno)
prepare_model_and_input(serialno, models_inputs["NCNN"],
device_bin_path, output_dir)
def adb_run(abi,
serialno,
configs,
host_bin_path,
bin_name,
run_interval,
num_threads,
build_mace,
frameworks=None,
model_names=None,
runtimes=None,
device_bin_path="/data/local/tmp/aibench",
output_dir="output",
):
host_bin_full_path = "%s/%s" % (host_bin_path, bin_name)
device_bin_full_path = "%s/%s" % (device_bin_path, bin_name)
props = adb_getprop_by_serialno(serialno)
print(
"====================================================================="
)
print("Trying to lock device %s" % serialno)
with device_lock(serialno):
print("Run on device: %s, %s, %s" %
(serialno, props["ro.board.platform"],
props["ro.product.model"]))
try:
sh.bash("tools/power.sh",
serialno, props["ro.board.platform"],
_fg=True)
except Exception, e:
print("Config power exception %s" % str(e))
sh.adb("-s", serialno, "shell", "mkdir -p %s" % device_bin_path)
sh.adb("-s", serialno, "shell", "rm -rf %s"
% os.path.join(device_bin_path, "interior"))
sh.adb("-s", serialno, "shell", "mkdir %s"
% os.path.join(device_bin_path, "interior"))
prepare_device_env(serialno, abi, device_bin_path, frameworks)
prepare_all_model_and_input(serialno, configs, device_bin_path,
output_dir, frameworks, build_mace)
adb_push(host_bin_full_path, device_bin_path, serialno)
print("Run %s" % device_bin_full_path)
stdout_buff = []
process_output = make_output_processor(stdout_buff)
cmd = "cd %s; ADSP_LIBRARY_PATH='.;/system/lib/rfsa/adsp;/system" \
"/vendor/lib/rfsa/adsp;/dsp'; LD_LIBRARY_PATH=. " \
"./model_benchmark" % device_bin_path
if frameworks == ['all']:
frameworks = FRAMEWORKS
if runtimes == ['all']:
runtimes = RUNTIMES
if model_names == ['all']:
model_names = BENCHMARK_MODELS
for runtime in runtimes:
for framework in frameworks:
for model_name in model_names:
print(framework, runtime, model_name)
args = "--run_interval=%d --num_threads=%d " \
"--framework=%s --runtime=%s --model_name=%s " \
"--product_soc=%s.%s" % \
(run_interval, num_threads, framework, runtime,
model_name,
props["ro.product.model"].replace(" ", ""),
props["ro.board.platform"])
sh.adb(
"-s",
serialno,
"shell",
"%s %s" % (cmd, args),
_tty_in=True,
_out=process_output,
_err_to_out=True)
return "".join(stdout_buff)
|
17,489 | 2173ee617edbb8d6d9d9c06747f96d26e89bacc4 | # O(n**2)
import random
import time
from typing import List
def time_measurement(sort_func) -> float:
def wrapper(*args,**kwargs):
elapsed_time = 0
for _ in range(10):
start_time = time.time()
sort_func(*args,**kwargs)
elapsed_time += time.time() - start_time
return elapsed_time/10
return wrapper
@time_measurement
def bubble_sort(numbers: List[int]) -> List[int]:
len_numbers = len(numbers)
for i in range(len_numbers):
for j in range(len_numbers - i - 1):
if numbers[j] > numbers[j + 1]:
numbers[j],numbers[j + 1] = numbers[j + 1], numbers[j]
return numbers
if __name__=='__main__':
nums = [random.randint(0, 1000) for _ in range(1000)]
print(bubble_sort(nums)) |
17,490 | a0695f3a68e32a9dd30bfaf63aff45394fe73afd | # Example 6.15
# Unsteady heat equation
from pylab import*
from tri_diag import*
clf()
N=8
alpha = 0.1
dt = 0.1
xbig = linspace(0., 1., N+1)
x = xbig[1:N]
dx = 1./N
beta = alpha*dt/(2.*dx**2)
t0 = 0.
t1 = 1.5
phi0 = sin(pi*x)
phi = phi0
A, f = makef(phi, beta)
for t in r_[t0:t1 + dt:dt]:
if (t == 0.0 or t == .5 or t == 1.0 or t == 1.5):
xexact = linspace(0., 1., 1000)
uexact = multiply(sin(pi*xexact), exp(-alpha*pi**2*t))
phibig = hstack((0., phi, 0.))
annotate("t=%.2f" % t, xy = (x[x.shape[0]//2], phi.max()), xytext = (0., 5.), textcoords = "offset points")
if t == 0.:
plot(xbig, phibig, "ko", mfc = "none", label = "N=%d" % N)
plot(xexact, uexact, "k", label = "exact")
else:
plot(xexact, uexact, "k")
plot(xbig, phibig, "ko", mfc = "none")
A, f = makef(phi, beta)
phi = tri_diag(A, f)
legend(loc = 0)
grid("on")
show()
|
17,491 | 658ae4fead0b447a416ca609f91ccfc72b817e84 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# 使用原始文件名执行文件IO操作 也就是说文件名并没有经过系统默认编码去解码或编码
# 默认情况下 文件名都会根据 sys.getfilesystemencoding() 返回的文本编码
# 或解码
import sys
print(sys.getfilesystemencoding())
# 如果因为某种原因想忽略这种编码 可以使用原始字节字符串指定一个文件名即可
# Wrte a file using a unicode filename
with open('jalape\xf1o.txt', 'w') as f:
f.write('Spicy!')
import os
os.listdir('.')
os.listdir(b'.') # Note: byte string
# Open file with raw filename
with open(b'jalapen\xcc\x83o.txt') as f:
print(f.read())
# 在最后两个操作中 你给文件相关函数 如 open 和 os.listdir() 传递
# 字节字符串时候 文件名的处理方式会稍有不同
# 通常来讲 你不需要担心文件名的编码和解码 普通文件名操作应该就没有问题
# 但是 有些操作系统允许用户通过偶然或者恶意方式区创建名字不符合默认编码的文件
# 这可能会中断需要处理大量文件的python程序
# 读取目录并通过原始未解码方式处理文件名可以有效的避免这个问题
|
17,492 | 973014ada78ac62a8b1c58f59d1ac5619de47fee | #!/usr/bin/env python
# Define a bunch of useful functions
class myfuncs:
"""
Define all of my functions within a class to keep the global name
space clean.
"""
@staticmethod
def qp(F, V):
"""
Quickly plot a surface defined by a face and vertex list, F and
V respectively. The faces are colored blue. This is simply a
rewrite of Ken's qp in MATLAB.
F : Nx3 NumPy array of faces (V1, V2, V3)
V : Nx3 NumPy array of vertexes ( X, Y, Z)
"""
import matplotlib.pyplot
from mpl_toolkits.mplot3d import Axes3D
#
# Plot the surface
fig = matplotlib.pyplot.figure()
axs = fig.add_subplot(1,1,1, projection="3d")
axs.plot_trisurf(V[:,0], V[:,1], V[:,2], triangles=F)
#
# Label the axes and set them equal
axs.set_xlabel("x")
axs.set_ylabel("y")
axs.set_zlabel("z")
axs.axis("equal")
#
# And show the figure
matplotlib.pyplot.show()
return fig
# end def qp
@staticmethod
def reload(mod):
"""
Given a module, add tracking information to the module and log
the changes. This will facilitate knowing what version of
module was used during development.
"""
import difflib, imp, logging
# Set the logger
logger = logging.getLogger("myfuncs.reload")
logger.addHandler( logging.NullHandler() )
logger.setLevel( logging.DEBUG )
#
if mod.__file__[-1] in "oc":
mod.__file__ = mod.__file__[:-1]
# end if
#
if "__track_source__" in mod.__dict__:
orig = mod.__track_source__
else:
orig = None
# end if
#
# Read the source file in its current state.
with open(mod.__file__, "r") as fid:
mod.__track_source__ = fid.readlines()
# end with
#
# Check for differences and report any changes.
logger.debug(mod.__file__)
if orig is None:
for it in range(len(mod.__track_source__)):
logger.debug("{:d} {:s}".format( \
it+1, mod.__track_source__[it].rstrip() \
) )
# end for
else:
diffs = difflib.unified_diff( \
orig, mod.__track_source__, \
fromfile="Original", tofile="Updated" \
)
for line in diffs:
logger.debug(line.rstrip())
# end for
# end if
return imp.reload(mod)
# end def reload
# end class myfuncs
|
17,493 | e8be1ffaf76126de2cbf034d36b858502a8a94da | from rest_framework.viewsets import ModelViewSet
from website.api.serializer import AccountSerializer
from website.models import Account
class AccountViewSet(ModelViewSet):
serializer_class = AccountSerializer
queryset = Account.objects.all()
|
17,494 | cda07b46d35317590ef613faf18c7bcdfd94a30e | import json
units = {}
for i in range(1,377):
units[i] = {}
units[i]["found"] = False
units[i]["values"]=[{}]
units[i]["values"][0]["units"] = ""
units[i]["values"][0]["name"] = ""
units[i]["values"][0]["description"] = ""
units[i]["values"][0]["uploadDate"] = ""
units[i]["values"][0]["uploader"] = ""
units[i]["values"][0]["upvotes"] = ""
units[i]["values"][0]["downvotes"] = ""
with open('unit.json', 'w') as outfile:
json.dump(units, outfile)
|
17,495 | c6c87816a405fb0f91d605b25f146b6d389933e9 | import gym
import os
from Arguments import get_args
from RL_Agent_Models import DDPGAgent
###########################################################################
# Name: get_env_params
# Function: get the parameters of the environment provided by gym
# Comment:
###########################################################################
def get_env_params(env):
obs = env.reset()
dim_obs = obs['observation'].shape[0]
dim_d_goal = obs['desired_goal'].shape[0]
dim_action = env.action_space.shape[0]
action_max = env.action_space.high[0]
params = {'obs': dim_obs,
'd_goal': dim_d_goal,
'action': dim_action,
'action_max': action_max,
}
params['max_timesteps'] = env._max_episode_steps
return params
def launch(args):
# create the environment
env = gym.make(args.env_name)
# get the environment parameters
env_params = get_env_params(env)
# create the DDPG agent to interact with the environment
ddpg_trainer = DDPGAgent(args, env, env_params)
# let the agent learn by itself
ddpg_trainer.learning()
if __name__ == '__main__':
# take the configuration for the HER
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['IN_MPI'] = '1'
# get the params
arguments = get_args()
launch(arguments)
|
17,496 | 293920cb24c2d7e11abb15c2da2ba9865aa259bf | import globals
commands = {
'!report': {
'limit': 200,
'argc': 1,
'return': 'command',
'space_case': True,
'user_level': 'mod',
'usage': "!report [insert bug report text here]"
},
'!opinion': {
'limit': 0,
'argc': 0,
'return': 'command',
'user_level': 'reg',
'usage': '!opinion',
'user_limit': 30
},
'!ammo': {
'limit': 0,
'argc': 3,
'return': 'command',
'usage': "!ammo *['add'/'remove'] [username] [amount]",
'optional': True,
'user_limit': 30,
'user_level': 'mod'
},
'!help': {
'limit': 15,
'return': 'There is a super useful README for the bot at at github.com/singlerider/jadebot',
'usage': '!help',
'user_limit': 30
},
'!followers': {
'limit': 30,
'user_level': 'mod',
'return': 'command',
'argc': 0,
'usage': '!followers',
'user_limit': 30,
},
'!follower': {
'limit': 0,
'return': 'command',
'argc': 1,
'usage': '!follower [username]',
'user_level': 'mod'
},
'!uptime': {
'limit': 15,
'return': 'command',
'argc': 0,
'usage': '!uptime',
'user_limit': 30,
},
'!stream': {
'limit': 0,
'return': 'command',
'argc': 0,
'usage': '!stream'
},
'!winner': {
'limit': 0,
'argc': 0,
'return': 'command',
'usage': '!winner',
'user_limit': 30,
},
'!popularity': {
'limit': 0,
'argc': 1,
'return': 'command',
'space_case': True,
'usage': '!popularity [name_of_game]'
},
'!caster': {
'limit': 0,
'argc': 1,
'return': 'command',
'usage': '!caster [streamer_username]',
'user_level': 'mod'
},
'!donation': {
'limit': 0,
'argc': 2,
'return': 'command',
'usage': '!donation [username] [currency_amount]',
'user_level': 'mod'
},
'!tip': {
'limit': 0,
'argc': 2,
'return': 'command',
'usage': '!tip [username] [currency_amount]',
'user_level': 'mod'
},
'!reload': {
'limit': 0,
'argc': 0,
'return': 'command',
'usage': '!reload'
},
'!drop': {
'limit': 0,
'argc': 0,
'return': 'command',
'usage': '!drop'
},
'!leaderboard': {
'limit': 300,
'argc': 0,
'return': 'command',
'usage': '!leaderboard',
'user_level': 'mod'
}
}
user_cooldowns = {"channels": {}}
def initalizeCommands(config):
for channel in config['channels']:
globals.CHANNEL_INFO[channel.lstrip("#")] = {"drop": {}}
user_cooldowns["channels"][channel] = {"commands": {}}
for command in commands:
commands[command][channel] = {}
commands[command][channel]['last_used'] = 0
if "user_limit" in commands[command]:
user_cooldowns["channels"][channel]["commands"][command] = {
"users": {}}
if __name__ == "__main__": # pragma: no cover
print "{\n" + ",\n".join([" \"" + key + "\": \"" + commands[key][
"usage"] for key in commands]) + "\"\n}"
|
17,497 | 0c859fa168004a39982e094ab2a2ee6bf2e41777 | # Generated by Django 2.2.6 on 2020-01-19 06:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teams', '0013_team_status'),
]
operations = [
migrations.AddField(
model_name='team',
name='kakao_chat_url',
field=models.URLField(default='https://open.kakao.com/o/gbIUlwTbc'),
preserve_default=False,
),
]
|
17,498 | 6e39bc0f817acc84f3f3f00e6328f8f06c619421 | from . import ir_mail_server |
17,499 | 06714441bf2c68ebdebaecd50ec217ce32386f17 | class Sample:
def __init__(self, data, words, steps, label, flag_word):
self.input_ = data[0:steps]
self.sentence = words[0:steps]
self.length = 0
self.label = label
for word in self.input_:
if word == flag_word:
break
self.length += 1
class Batch:
def __init__(self, samples):
self.samples = samples
self.batch_size = len(samples)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.