index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
993,700 | 6f61398f1b2985d69b92d175ccde8d3ef42df926 | class Node:
def __init__(self, val=None):
self.val = val
self.prev = None
self.next = None
self.freq = 1
class DLL:
def __init__(self):
self.head = Node()
self.tail = Node()
self.head.next = self.tail
self.tail.prev = self.head
self.size = 0
def insert(self, node):
node.next = self.head.next
node.prev = self.head
self.head.next.prev = node
self.head.next = node
self.size += 1
def getTop(self):
node = self.head.next
self.head.next = node.next
node.next.prev = self.head
node.prev = None
node.next = None
self.size -= 1
return node
class FreqStack:
def __init__(self):
self.nodeDict = defaultdict(int)
self.freqDict = defaultdict(DLL)
self.max_freq = 0
def push(self, val: int) -> None:
if val not in self.nodeDict:
node = Node(val)
self.nodeDict[val] = 1
self.freqDict[1].insert(node)
if self.max_freq == 0:
self.max_freq = 1
else:
freq = self.nodeDict[val]
node = Node(val)
node.freq += 1
if node.freq > self.max_freq:
self.max_freq = node.freq
self.freqDict[node.freq].insert(node)
def pop(self) -> int:
if not self.freqDict[self.max_freq].size:
return -1
node = self.freqDict[self.max_freq].getTop()
if not self.freqDict[self.max_freq].size:
self.max_freq -= 1
node.freq -= 1
if not node.freq:
del self.nodeDict[node.val]
return node.val
# Your FreqStack object will be instantiated and called as such:
# obj = FreqStack()
# obj.push(x)
# param_2 = obj.pop() |
993,701 | faa74b0ae538ec87c4053785a3f64738ca4766ea | __author__ = "Safal Khanal"
__copyright__ = "Copyright 2021"
__credits__ = ["Safal Khanal"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Safal Khanal"
__email__ = "skhanal@respiro.com.au"
__status__ = "In Development"
import os
import smtplib
import sys
import subprocess
import tkinter as tk
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from os.path import basename
from tkinter.filedialog import asksaveasfilename, askopenfilename
from tkinter import messagebox
import tkinter.scrolledtext as st
import tkinter.ttk as ttk
import csv
import time
from os import path
os.system('pip3 install xlrd')
os.system('pip3 install pyats')
os.system('pip3 install genie')
os.system('pip3 install pyats.contrib')
try:
import pandas as pd
except ModuleNotFoundError:
os.system('pip3 install pandas')
import pandas as pd
DIR_PATH_NAME = time.strftime("%Y-%m-%d")
def generatesourcetestbed():
filepath = askopenfilename(initialdir=os.getcwd(), filetypes=[("Excel file", "*.xls")])
if not filepath:
return
if len(pd.read_excel(filepath)) == 0:
txt_edit.config(state=tk.NORMAL)
txt_edit.delete("1.0", tk.END)
txt_edit.insert(tk.END, "Source switch testbed file is empty.\n")
txt_edit.config(state=tk.DISABLED)
else:
try:
os.system('pyats create testbed file --path ' + filepath + ' --output sourcetestbed.yml')
txt_edit.config(state=tk.NORMAL)
txt_edit.delete("1.0", tk.END)
txt_edit.insert(tk.END, "Source switch testbed file created.\n")
txt_edit.config(state=tk.DISABLED)
btn_load_target["state"] = "active"
except:
txt_edit.insert(tk.END, "Testbed file format is wrong\n")
def generatetargettestbed():
filepath = askopenfilename(initialdir=os.getcwd(), filetypes=[("Excel file", "*.xls")])
if not filepath:
return
if len(pd.read_excel(filepath)) == 0:
txt_edit.config(state=tk.NORMAL)
txt_edit.delete("1.0", tk.END)
txt_edit.insert(tk.END, "Target switch testbed file is empty.\n")
txt_edit.config(state=tk.DISABLED)
return
else:
try:
os.system('pyats create testbed file --path ' + filepath + ' --output targettestbed.yml')
txt_edit.config(state=tk.NORMAL)
txt_edit.delete("1.0", tk.END)
txt_edit.insert(tk.END, "Target switch testbed file created.\n")
txt_edit.config(state=tk.DISABLED)
btn_script1["state"] = "active"
except:
txt_edit.insert(tk.END, "Testbed file format is wrong\n")
# Run pyats job and display the output
def run_script1():
txt_edit.config(state=tk.NORMAL)
txt_edit.delete("1.0", tk.END)
value = messagebox.askokcancel("askokcancel", "This action takes few minutes to execute. Do you want to continue?")
if value:
try:
# invoke_process_popen_poll_live('pyats run job job.py --html-logs /logs')
os.system('pyats run job job.py --html-logs log/' + DIR_PATH_NAME)
filepath = "log/" + DIR_PATH_NAME + "/source_up.csv"
report_filepath = "log/" + DIR_PATH_NAME + "/report.txt"
with open(filepath, "r") as input_file:
text = input_file.read()
txt_edit.insert(tk.END, text)
txt_edit.insert(tk.END, '\n')
with open(report_filepath, "r") as input_file:
text = input_file.read()
txt_edit.insert(tk.END, text)
txt_edit.config(state=tk.DISABLED)
btn_save["state"] = "active"
btn_email["state"] = "active"
btn_report["state"] = "active"
btn_script3["state"] = "active"
except:
txt_edit.insert(tk.END, "Error occurred while running the script")
# Display console output live in tkinter text box (Currently not in use- In development)
# def invoke_process_popen_poll_live(cmd, timeout=None):
# txt_edit.config(state=tk.NORMAL)
# p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# output = ''
# try:
# txt_edit.insert(tk.END, "Success!!" + '\n')
# for line in p.stdout:
# line = line.decode(encoding=sys.stdout.encoding,
# errors='replace' if sys.version_info < (3, 5)
# else 'backslashreplace').rstrip()
# txt_edit.insert(tk.END, line + '\n')
# output += line
# retval = p.wait(timeout)
# return retval, output
# except:
# txt_edit.insert(tk.END, "Failed!!" + '\n')
# txt_edit.insert(tk.END, "There was some error while running the script" + '\n')
def view_report():
value = messagebox.askokcancel("askokcancel", "This action takes few minutes to execute. Do you want to continue?")
if value:
txt_edit.config(state=tk.NORMAL)
txt_edit.delete("1.0", tk.END)
filepath = "log/" + DIR_PATH_NAME + "/report_log.csv"
# generate_csvtable(filepath)
with open(filepath, "r") as input_file:
text = input_file.read()
txt_edit.insert(tk.END, text)
window.title(f"Switch port Consolidation - {filepath}")
txt_edit.config(state=tk.DISABLED)
txt_edit.insert(tk.END, '\n')
# Display CSV file content in a tabular format (In development)
# def generate_csvtable(filepath):
# header = pd.read_csv(filepath, index_col=0, nrows=0).columns.tolist()
# txt_edit.pack(side=tk.TOP)
# tree = ''
# for items in header:
# tree = ttk.Treeview(txt_edit, columns=items, selectmode="extended")
#
# with open(filepath) as f:
# reader = csv.reader(f, delimiter=',')
# for row in reader:
# ss = row[0]
# sp = row[1]
# ts = row[2]
# tp = row[3]
# tree.insert("", 0, values=(ss, sp, ts, tp))
# tree.pack()
def run_targetconfig():
value = messagebox.askokcancel("askokcancel", "This action takes few minutes to execute. Do you want to continue?")
if value:
txt_edit.config(state=tk.NORMAL)
txt_edit.delete("1.0", tk.END)
os.system('pyats run job check_migration_job.py --html-logs log/' + DIR_PATH_NAME)
filepath = "log/" + DIR_PATH_NAME + "/switch_migration_status.csv"
with open(filepath, "r") as input_file:
text = input_file.read()
txt_edit.insert(tk.END, text)
txt_edit.config(state=tk.DISABLED)
def save_file():
"""Save the current file as a new file."""
filepath = asksaveasfilename(
defaultextension="txt",
filetypes=[("Text Files", "*.txt"), ("All Files", "*.*")],
)
if not filepath:
return
with open(filepath, "w") as output_file:
text = txt_edit.get("1.0", tk.END)
output_file.write(text)
window.title(f"Switch port Consolidation - {filepath}")
def openNewWindow():
newWindow = tk.Toplevel(window)
newWindow.rowconfigure(0, minsize=100, weight=1)
newWindow.columnconfigure(1, minsize=100, weight=1)
newWindow.configure(bg='#ededed')
newWindow.title("Send email")
l1 = tk.Label(newWindow, text="Email address: ", bg='#ededed')
l1.grid(row=0, column=0, padx=(10, 10))
e1 = tk.Entry(newWindow, bg='white')
e1.grid(row=0, column=1, columnspan=10)
btn_send_email = tk.Button(newWindow, text="Send email", command=lambda: send_email(e1, newWindow), fg='green',
bg='light green')
btn_send_email.grid(row=1, column=1)
def send_email(e1, newWindow):
subject = "Switch Port Consolidation log files"
body = "Hi, \n\nFew of the log files generated during switch port migration are attached in the mail. \n\nThank " \
"you, \nRespiro team. "
sender_email = "respirotest0@gmail.com"
receiver_email = e1.get()
password = "respiroemail"
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = e1.get()
message["Subject"] = subject
message["Date"] = formatdate(localtime=True)
message.attach(MIMEText(body, "plain"))
current_dir = os.getcwd()
if path.exists(current_dir + '/log/' + DIR_PATH_NAME + '/switch_migration_status.csv'):
filename = {"log/" + DIR_PATH_NAME + "/source_up.csv", "log/" + DIR_PATH_NAME + "/TaskLog.job.html",
"log/" + DIR_PATH_NAME + "/target_down.csv", "log/" + DIR_PATH_NAME + "/report_log.csv",
"log/" + DIR_PATH_NAME + "/switch_migration_status.csv", "log/" + DIR_PATH_NAME + "/report.txt",
"log/" + DIR_PATH_NAME + "/TaskLog.check_migration_job.html"}
else:
filename = {"log/" + DIR_PATH_NAME + "/source_up.csv", "log/" + DIR_PATH_NAME + "/TaskLog.job.html",
"log/" + DIR_PATH_NAME + "/target_down.csv", "log/" + DIR_PATH_NAME + "/report_log.csv",
"log/" + DIR_PATH_NAME + "/report.txt"}
for items in filename:
try:
with open(items, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=basename(items)
)
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(items)
message.attach(part)
except:
messagebox.showerror("Error", "There was an error while sending attachments. Run the script to generate "
"the attachments")
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message.as_string())
newWindow.destroy()
except:
messagebox.showerror("Error", "There was an error while sending email")
def viewlog():
txt_edit.config(state=tk.NORMAL)
filepath = askopenfilename(initialdir=os.getcwd() + "/log",
filetypes=[("Text Files", "*.txt"), ("CSV files", "*.csv")])
if not filepath:
return
txt_edit.delete("1.0", tk.END)
with open(filepath, "r") as input_file:
text = input_file.read()
print(text)
txt_edit.insert(tk.END, text)
window.title(f"Simple Text Editor - {filepath}")
window = tk.Tk()
window.title("Respiro | Switch port Consolidation")
window.rowconfigure(0, minsize=500, weight=1)
window.columnconfigure(1, minsize=800, weight=1)
txt_edit = st.ScrolledText(window, bg='white')
fr_buttons = tk.Frame(window, bg='#7db1c9')
txt_edit.config(state=tk.DISABLED)
btn_load_source = tk.Button(fr_buttons, text="Upload source switch testbed excel file(*.xls)",
command=generatesourcetestbed, activebackground="#717982")
btn_load_target = tk.Button(fr_buttons, text="Upload target switch testbed excel file(*.xls)",
command=generatetargettestbed, activebackground="#717982")
btn_script1 = tk.Button(fr_buttons, text="Run script to check port status on switches", command=run_script1)
btn_report = tk.Button(fr_buttons, text="View recommended port migration log", command=view_report)
btn_script3 = tk.Button(fr_buttons, text="Run script to verify the port migration", command=run_targetconfig)
btn_save = tk.Button(fr_buttons, text="Save As...", command=save_file)
btn_email = tk.Button(fr_buttons, text="Send log files as email", command=openNewWindow)
btn_logs = tk.Button(fr_buttons, text="View all logs", command=viewlog)
btn_load_source.grid(row=0, column=0, sticky="ew", padx=5, pady=5)
btn_load_target.grid(row=1, column=0, sticky="ew", padx=5, pady=5)
btn_script1.grid(row=2, column=0, sticky="ew", padx=5, pady=5)
btn_report.grid(row=3, column=0, sticky="ew", padx=5, pady=5)
btn_script3.grid(row=4, column=0, sticky="ew", padx=5, pady=5)
btn_save.grid(row=5, column=0, sticky="ew", padx=5, pady=5)
btn_email.grid(row=6, column=0, sticky="ew", padx=5, pady=5)
btn_logs.grid(row=7, column=0, sticky="ew", padx=5, pady=5)
fr_buttons.grid(row=0, column=0, sticky="ns")
txt_edit.grid(row=0, column=1, sticky="nsew")
btn_save["state"] = "disable"
btn_report["state"] = "disable"
btn_script3["state"] = "disable"
btn_script1["state"] = "disable"
btn_email["state"] = "disable"
btn_load_target["state"] = "disable"
window.mainloop()
|
993,702 | cab5f6a1adb47ac07fe22df43872e5be66ca3ebb | """
Generated by leslie in shanghai, @leadrive.
Version 2.0
name: main.py
time: 2019年3月20日10:18:32
kep o mvi
"""
from packages.for_file.modi_init_mcan_port import Modi_Init_Macan_Port
from packages.for_file.modi_Com_cbk_Adap import Modi_Com_Cbk_Adap
from packages.for_file.modi_PduR_PBcfg import Modi_PduR_PBcfg
from packages.for_file.modi_Can_PBCfg import Modi_Can_PBCfg
from packages.for_file.modi_Can_Cfg import Modi_Can_Cfg
from packages.for_file.modi_CanIf_cfg import Modi_CanIf_cfg
from packages.for_file.modi_Com_cfg import Modi_Com_cfg
from packages.basic.log import Set_Log
from packages.basic.basic_functions import Check_File,Copy_Files
import os
import sys
import csv
import logging
log_name = 'run_log.log'
log_path = os.path.abspath('.')
log = Set_Log(log_name, log_path)
def Mainfuction():
'''文件处理'''
str_path = 'pending_file\\'
files_name = ['Can_Cfg.h', 'Can_PBCfg.c', 'CanIf_cfg.h',
'CanIf_PBcfg.c', 'Com_Cbk.h', 'Com_Cbk_Adap.c',
'Com_cfg.c', 'Com_cfg.h', 'init_mcan_port.c',
'PduR_cfg.h', 'PduR_PBcfg.c', 'rte_com.c',
'rte_com.h'
]
Check_File(files_name, files_path=str_path, logger_path=log_path)
#copy all files
source_dir = 'pending_file'
target_dir = 'processed_file'
Copy_Files(source_dir, target_dir)
# Check and Read CSV file and initial
try:
with open('Configuration.csv') as file_object:
dic = {}
reader = csv.reader(file_object)
header_line = ' '
while header_line:
try:
header_line = next(reader)
key = header_line[0].strip()
dic[key] = header_line[1].strip()
except:
break
print('\nCSV file is ready.')
except:
print('# Configuration.csv file is not found.')
log.info('# Configuration.csv file is not found.')
sys.stdin.readline()
exit()
frame_type = 'FrameType'
bswtimebase = 'BSWTimeBase'
dbcname = 'DBCName'
dbcfile_name = []
if frame_type not in dic.keys():
print(dic)
log.info("Spelling Error in Configuration.csv: Frame type.")
print("Spelling Error in Configuration.csv: Frame type.")
sys.stdin.readline()
exit()
str_CAN_mode = dic[frame_type]
if str_CAN_mode != '0' and str_CAN_mode != '1':
log.info('FrameType can only be set as 1 and 0 in Configuration.csv. please recheck for it.')
print('FrameType can only be set as 1 and 0 in Configuration.csv. please recheck for it.')
sys.stdin.readline()
exit()
str_bswtimebase = dic[bswtimebase]
#Check DBC file
if dic[dbcname]:
dbcfile_name.append(dic[dbcname])
else:
log.info('no valid dbc file name in CSV file.')
print('no valid dbc file name in CSV file.')
sys.stdin.readline()
exit()
Check_File(dbcfile_name, logger_path=log_path)
print(dic[dbcname]+' is ready.')
print("\t****************All Flies are ready.*********************\t\n\n")
log.info("**********************All Flies are ready.*************************************\n\n")
#modifiy init_mcan_port
print("#####################Step1: process init_mcan_port.c#######################")
log.info("#####################Step1: process init_mcan_port.c#########################")
Modi_Init_Macan_Port()
print("**********************End process init_mcan_port.c*************************\n\n")
log.info("**********************End process init_mcan_port.c*************************************\n\n")
#modify Com_Cbk_Adap
print("#####################Step2: process Com_Cbk_Adap.c#########################")
log.info("#####################Step2: process Com_Cbk_Adap.c##########################")
Modi_Com_Cbk_Adap(dic[dbcname],dic)
print("*******************End process Com_Cbk_Adap.c******************************\n\n")
log.info("**********************End process Com_Cbk_Adap.c***************************\n\n")
#modify PduR_PBcfg
print("#####################Step3: process PduR_PBcfg.c###########################")
log.info("#####################Step3: process PduR_PBcfg.c##############################")
Modi_PduR_PBcfg(dic[dbcname])
print("***********************End process PduR_PBcfg.c****************************\n\n")
log.info("***********************End process PduR_PBcfg.c****************************\n\n")
#modify Can_PBCfg
print("#####################Step4: process Can_PBCfg.c############################")
log.info("#####################Step4: process Can_PBCfg.c###############################")
Modi_Can_PBCfg(str_CAN_mode, dic[dbcname])
print("***********************End process Can_PBCfg.c*****************************\n\n")
log.info("***********************End process Can_PBCfg.c****************************\n\n")
#modify Can_Cfg
print("#####################Step5: process Can_Cfg.h############################")
log.info("#####################Step5: process Can_Cfg.h###############################")
Modi_Can_Cfg(str_CAN_mode)
print("***********************End process Can_Cfg.h*****************************\n\n")
log.info("***********************End process Can_Cfg.h****************************\n\n")
#modify CanIf_cfg
print("#####################Step6: process CanIf_cfg.h############################")
log.info("#####################Step6: process CanIf_cfg.h###############################")
Modi_CanIf_cfg(str_CAN_mode)
print("***********************End process CanIf_cfg.h*****************************\n\n")
log.info("***********************End process CanIf_cfg.h****************************\n\n")
#modify Com_cfg.c
print("#####################Step7: process Com_cfg.c############################")
log.info("#####################Step7: process Com_cfg.c###############################")
Modi_Com_cfg(str_bswtimebase, dic[dbcname])
print("***********************End process Com_cfg.c*****************************\n\n")
log.info("***********************End process Com_cfg.c****************************\n\n")
print("It's OK.")
sys.stdin.readline()
|
993,703 | 255c407d1872e7906a0be1281e70f3ec8e97969c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 12 17:53:08 2017
@author: cbilgili
"""
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rnn"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
# Starts
reset_graph()
n_inputs = 3
n_neurons = 5
X0 = tf.placeholder(tf.float32, [None, n_inputs])
X1 = tf.placeholder(tf.float32, [None, n_inputs])
Wx = tf.Variable(tf.random_normal(shape=[n_inputs, n_neurons],dtype=tf.float32))
Wy = tf.Variable(tf.random_normal(shape=[n_neurons,n_neurons],dtype=tf.float32))
b = tf.Variable(tf.zeros([1, n_neurons], dtype=tf.float32))
Y0 = tf.tanh(tf.matmul(X0, Wx) + b)
Y1 = tf.tanh(tf.matmul(Y0, Wy) + tf.matmul(X1, Wx) + b)
init = tf.global_variables_initializer()
X0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]]) # t = 0
X1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]]) # t = 1
with tf.Session() as sess:
init.run()
Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})
print(Y0_val)
print(Y1_val)
# =============================================================================
# Using static_rnn()
# =============================================================================
n_inputs = 3
n_neurons = 5
reset_graph()
X0 = tf.placeholder(tf.float32, [None, n_inputs])
X1 = tf.placeholder(tf.float32, [None, n_inputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
output_seqs, states = tf.contrib.rnn.static_rnn(basic_cell, [X0, X1], dtype=tf.float32)
Y0, Y1 = output_seqs
init = tf.global_variables_initializer()
X0_batch = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 0, 1]]) # t = 0
X1_batch = np.array([[9, 8, 7], [0, 0, 0], [6, 5, 4], [3, 2, 1]]) # t = 1
with tf.Session() as sess:
init.run()
Y0_val, Y1_val = sess.run([Y0, Y1], feed_dict={X0: X0_batch, X1: X1_batch})
print(Y0_val)
print(Y1_val)
# Packing sequences
n_steps = 2
n_inputs = 3
n_neurons = 5
reset_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
X_seqs = tf.unstack(tf.transpose(X, perm=[1, 0, 2]))
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
output_seqs, states = tf.contrib.rnn.static_rnn(basic_cell, X_seqs,
dtype=tf.float32)
outputs = tf.transpose(tf.stack(output_seqs), perm=[1, 0, 2])
init = tf.global_variables_initializer()
X_batch = np.array([
# t = 0 t = 1
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
with tf.Session() as sess:
init.run()
outputs_val = outputs.eval(feed_dict={X: X_batch})
print(outputs_val)
print(np.transpose(outputs_val, axes=[1, 0, 2])[1])
# =============================================================================
# Using dynamic_rnn()
# =============================================================================
n_steps = 2
n_inputs = 3
n_neurons = 5
reset_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.array([
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
with tf.Session() as sess:
init.run()
outputs_val = outputs.eval(feed_dict={X: X_batch})
print(outputs_val)
# Setting the sequence lengths
n_steps = 2
n_inputs = 3
n_neurons = 5
reset_graph()
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
seq_length = tf.placeholder(tf.int32, [None])
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32,
sequence_length=seq_length)
init = tf.global_variables_initializer()
X_batch = np.array([
# step 0 step 1
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2 (padded with zero vectors)
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
seq_length_batch = np.array([2, 1, 2, 2])
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run(
[outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch})
print(outputs_val)
print(states_val)
# =============================================================================
# Training a sequence classifier
# =============================================================================
reset_graph()
n_steps = 28
n_inputs = 28
n_neurons = 150
n_outputs = 10
learning_rate = 0.001
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32)
logits = tf.layers.dense(states, n_outputs)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
n_epochs = 100
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((-1, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
# =============================================================================
# Multi-layer RNN - was not on the book
# =============================================================================
reset_graph()
n_steps = 28
n_inputs = 28
n_outputs = 10
learning_rate = 0.001
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
n_neurons = 100
n_layers = 3
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons,
activation=tf.nn.relu)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
states_concat = tf.concat(axis=1, values=states)
logits = tf.layers.dense(states_concat, n_outputs)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((-1, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
# =============================================================================
# Time series
# =============================================================================
t_min, t_max = 0, 30
resolution = 0.1
def time_series(t):
return t * np.sin(t) / 3 + 2 * np.sin(t*5)
def next_batch(batch_size, n_steps):
t0 = np.random.rand(batch_size, 1) * (t_max - t_min - n_steps * resolution)
Ts = t0 + np.arange(0., n_steps + 1) * resolution
ys = time_series(Ts)
return ys[:, :-1].reshape(-1, n_steps, 1), ys[:, 1:].reshape(-1, n_steps, 1)
t = np.linspace(t_min, t_max, int((t_max - t_min) / resolution))
n_steps = 20
t_instance = np.linspace(12.2, 12.2 + resolution * (n_steps + 1), n_steps + 1)
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.title("A time series (generated)", fontsize=14)
plt.plot(t, time_series(t), label=r"$t . \sin(t) / 3 + 2 . \sin(5t)$")
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "b-", linewidth=3, label="A training instance")
plt.legend(loc="lower left", fontsize=14)
plt.axis([0, 30, -17, 13])
plt.xlabel("Time")
plt.ylabel("Value")
plt.subplot(122)
plt.title("A training instance", fontsize=14)
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "bo", markersize=10, label="instance")
plt.plot(t_instance[1:], time_series(t_instance[1:]), "w*", markersize=10, label="target")
plt.legend(loc="upper left")
plt.xlabel("Time")
save_fig("time_series_plot")
plt.show()
X_batch, y_batch = next_batch(1, n_steps)
np.c_[X_batch[0], y_batch[0]]
# Using an OuputProjectionWrapper
reset_graph()
n_steps = 20
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
# Using an OuputProjectionWrapper
reset_graph()
n_steps = 20
n_inputs = 1
n_neurons = 100
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.OutputProjectionWrapper(
tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu),
output_size=n_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
learning_rate = 0.001
loss = tf.reduce_mean(tf.square(outputs - y)) # MSE
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_iterations = 1500
batch_size = 50
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if iteration % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(iteration, "\tMSE:", mse)
saver.save(sess, "./my_time_series_model") # not shown in the book
with tf.Session() as sess: # not shown in the book
saver.restore(sess, "./my_time_series_model") # not shown
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
print(y_pred)
plt.title("Testing the model", fontsize=14)
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "bo", markersize=10, label="instance")
plt.plot(t_instance[1:], time_series(t_instance[1:]), "w*", markersize=10, label="target")
plt.plot(t_instance[1:], y_pred[0,:,0], "r.", markersize=10, label="prediction")
plt.legend(loc="upper left")
plt.xlabel("Time")
save_fig("time_series_pred_plot")
plt.show()
# Without using an OutputProjectionWrapper
reset_graph()
n_steps = 20
n_inputs = 1
n_neurons = 100
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons, activation=tf.nn.relu)
rnn_outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
n_outputs = 1
learning_rate = 0.001
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_mean(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_iterations = 1500
batch_size = 50
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if iteration % 100 == 0:
mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
print(iteration, "\tMSE:", mse)
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
saver.save(sess, "./my_time_series_model")
plt.title("Testing the model", fontsize=14)
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "bo", markersize=10, label="instance")
plt.plot(t_instance[1:], time_series(t_instance[1:]), "w*", markersize=10, label="target")
plt.plot(t_instance[1:], y_pred[0,:,0], "r.", markersize=10, label="prediction")
plt.legend(loc="upper left")
plt.xlabel("Time")
plt.show()
# Generating a creative new sequence
with tf.Session() as sess: # not shown in the book
saver.restore(sess, "./my_time_series_model") # not shown
sequence = [0.] * n_steps
for iteration in range(300):
X_batch = np.array(sequence[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence.append(y_pred[0, -1, 0])
plt.figure(figsize=(8,4))
plt.plot(np.arange(len(sequence)), sequence, "b-")
plt.plot(t[:n_steps], sequence[:n_steps], "b-", linewidth=3)
plt.xlabel("Time")
plt.ylabel("Value")
plt.show()
# 2
with tf.Session() as sess:
saver.restore(sess, "./my_time_series_model")
sequence1 = [0. for i in range(n_steps)]
for iteration in range(len(t) - n_steps):
X_batch = np.array(sequence1[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence1.append(y_pred[0, -1, 0])
sequence2 = [time_series(i * resolution + t_min + (t_max-t_min/3)) for i in range(n_steps)]
for iteration in range(len(t) - n_steps):
X_batch = np.array(sequence2[-n_steps:]).reshape(1, n_steps, 1)
y_pred = sess.run(outputs, feed_dict={X: X_batch})
sequence2.append(y_pred[0, -1, 0])
plt.figure(figsize=(11,4))
plt.subplot(121)
plt.plot(t, sequence1, "b-")
plt.plot(t[:n_steps], sequence1[:n_steps], "b-", linewidth=3)
plt.xlabel("Time")
plt.ylabel("Value")
plt.subplot(122)
plt.plot(t, sequence2, "b-")
plt.plot(t[:n_steps], sequence2[:n_steps], "b-", linewidth=3)
plt.xlabel("Time")
save_fig("creative_sequence_plot")
plt.show()
# =============================================================================
# Deep RNN
# =============================================================================
reset_graph()
n_inputs = 2
n_steps = 5
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
n_neurons = 100
n_layers = 3
layers = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
for layer in range(n_layers)]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(layers)
outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.random.rand(2, n_steps, n_inputs)
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run([outputs, states], feed_dict={X: X_batch})
print(outputs_val.shape)
# =============================================================================
# Dropout
# =============================================================================
reset_graph()
n_inputs = 1
n_neurons = 100
n_layers = 3
n_steps = 20
n_outputs = 1
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.float32, [None, n_steps, n_outputs])
keep_prob = tf.placeholder_with_default(1.0, shape=())
cells = [tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
for layer in range(n_layers)]
cells_drop = [tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=keep_prob)
for cell in cells]
multi_layer_cell = tf.contrib.rnn.MultiRNNCell(cells_drop)
rnn_outputs, states = tf.nn.dynamic_rnn(multi_layer_cell, X, dtype=tf.float32)
learning_rate = 0.01
stacked_rnn_outputs = tf.reshape(rnn_outputs, [-1, n_neurons])
stacked_outputs = tf.layers.dense(stacked_rnn_outputs, n_outputs)
outputs = tf.reshape(stacked_outputs, [-1, n_steps, n_outputs])
loss = tf.reduce_mean(tf.square(outputs - y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_iterations = 1500
batch_size = 50
train_keep_prob = 0.5
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
X_batch, y_batch = next_batch(batch_size, n_steps)
_, mse = sess.run([training_op, loss],
feed_dict={X: X_batch, y: y_batch,
keep_prob: train_keep_prob})
if iteration % 100 == 0: # not shown in the book
print(iteration, "Training MSE:", mse) # not shown
saver.save(sess, "./my_dropout_time_series_model")
with tf.Session() as sess:
saver.restore(sess, "./my_dropout_time_series_model")
X_new = time_series(np.array(t_instance[:-1].reshape(-1, n_steps, n_inputs)))
y_pred = sess.run(outputs, feed_dict={X: X_new})
plt.title("Testing the model", fontsize=14)
plt.plot(t_instance[:-1], time_series(t_instance[:-1]), "bo", markersize=10, label="instance")
plt.plot(t_instance[1:], time_series(t_instance[1:]), "w*", markersize=10, label="target")
plt.plot(t_instance[1:], y_pred[0,:,0], "r.", markersize=10, label="prediction")
plt.legend(loc="upper left")
plt.xlabel("Time")
plt.show()
# =============================================================================
# LSTM - Long Short-Term Memory
# =============================================================================
reset_graph()
lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)
n_steps = 28
n_inputs = 28
n_neurons = 150
n_outputs = 10
n_layers = 3
learning_rate = 0.001
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
y = tf.placeholder(tf.int32, [None])
X_test = mnist.test.images.reshape((-1, n_steps, n_inputs))
y_test = mnist.test.labels
lstm_cells = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)
for layer in range(n_layers)]
multi_cell = tf.contrib.rnn.MultiRNNCell(lstm_cells)
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
top_layer_h_state = states[-1][1]
logits = tf.layers.dense(top_layer_h_state, n_outputs, name="softmax")
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
X_batch = X_batch.reshape((batch_size, n_steps, n_inputs))
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print("Epoch", epoch, "Train accuracy =", acc_train, "Test accuracy =", acc_test) |
993,704 | bc7d46854265c1ff86bf9cc1e951118c5fd7c50f | from commands.base_command import BaseCommand
import utils
class End(BaseCommand):
def __init__(self):
description = "Moves everyone from attacking and defending to lobby"
params = None
super().__init__(description, params)
async def handle(self, params, message, client):
await utils.end(client)
|
993,705 | d08fcfe5aab257d07870d021de0c4c70829ae9be | from __future__ import absolute_import
from collections import defaultdict
import json
import requests
import os
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group, User
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, render_to_response, render
from django.core import exceptions as ex
from django.template import RequestContext
from django.core import signing
from django import forms
from rest_framework.decorators import api_view
from mezzanine.conf import settings
from mezzanine.pages.page_processors import processor_for
import autocomplete_light
from inplaceeditform.commons import get_dict_from_obj, apply_filters
from inplaceeditform.views import _get_http_response, _get_adaptor
from django_irods.storage import IrodsStorage
from hs_core import hydroshare
from hs_core.hydroshare import get_resource_list
from hs_core.hydroshare.utils import get_resource_by_shortkey, resource_modified, user_from_id
from .utils import authorize, upload_from_irods
from hs_core.models import ResourceFile, GenericResource, resource_processor, CoreMetaData
from . import resource_rest_api
from . import user_rest_api
from hs_core.hydroshare import utils
from . import utils as view_utils
from hs_core.hydroshare import file_size_limit_for_display
from hs_core.signals import *
def short_url(request, *args, **kwargs):
try:
shortkey = kwargs['shortkey']
except KeyError:
raise TypeError('shortkey must be specified...')
m = get_resource_by_shortkey(shortkey)
return HttpResponseRedirect(m.get_absolute_url())
def verify(request, *args, **kwargs):
_, pk, email = signing.loads(kwargs['token']).split(':')
u = User.objects.get(pk=pk)
if u.email == email:
if not u.is_active:
u.is_active=True
u.save()
u.groups.add(Group.objects.get(name="Hydroshare Author"))
from django.contrib.auth import login
u.backend = settings.AUTHENTICATION_BACKENDS[0]
login(request, u)
return HttpResponseRedirect('/account/update/')
else:
from django.contrib import messages
messages.error(request, "Your verification token was invalid.")
return HttpResponseRedirect('/')
def add_file_to_resource(request, shortkey, *args, **kwargs):
resource, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True)
res_files = request.FILES.getlist('files')
extract_metadata = request.REQUEST.get('extract-metadata', 'No')
extract_metadata = True if extract_metadata.lower() == 'yes' else False
try:
utils.resource_file_add_pre_process(resource=resource, files=res_files, user=request.user,
extract_metadata=extract_metadata)
except hydroshare.utils.ResourceFileSizeException as ex:
request.session['file_size_error'] = ex.message
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except (hydroshare.utils.ResourceFileValidationException, Exception) as ex:
request.session['file_validation_error'] = ex.message
return HttpResponseRedirect(request.META['HTTP_REFERER'])
try:
hydroshare.utils.resource_file_add_process(resource=resource, files=res_files, user=request.user,
extract_metadata=extract_metadata)
except (hydroshare.utils.ResourceFileValidationException, Exception) as ex:
request.session['file_validation_error'] = ex.message
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def _get_resource_sender(element_name, resource):
core_metadata_element_names = [el_name.lower() for el_name in CoreMetaData.get_supported_element_names()]
if element_name in core_metadata_element_names:
sender_resource = GenericResource().__class__
else:
sender_resource = resource.__class__
return sender_resource
def get_supported_file_types_for_resource_type(request, resource_type, *args, **kwargs):
resource_cls = hydroshare.check_resource_type(resource_type)
if request.is_ajax:
# TODO: use try catch
ajax_response_data = {'file_types': json.dumps(resource_cls.get_supported_upload_file_types())}
return HttpResponse(json.dumps(ajax_response_data))
else:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def is_multiple_file_allowed_for_resource_type(request, resource_type, *args, **kwargs):
resource_cls = hydroshare.check_resource_type(resource_type)
if request.is_ajax:
# TODO: use try catch
ajax_response_data = {'allow_multiple_file': resource_cls.can_have_multiple_files()}
return HttpResponse(json.dumps(ajax_response_data))
else:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def add_metadata_element(request, shortkey, element_name, *args, **kwargs):
res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True)
sender_resource = _get_resource_sender(element_name, res)
handler_response = pre_metadata_element_create.send(sender=sender_resource, element_name=element_name,
request=request)
is_add_success = False
for receiver, response in handler_response:
if 'is_valid' in response:
if response['is_valid']:
element_data_dict = response['element_data_dict']
if element_name == 'subject':
keywords = [k.strip() for k in element_data_dict['value'].split(',')]
if res.metadata.subjects.all().count() > 0:
res.metadata.subjects.all().delete()
for kw in keywords:
res.metadata.create_element(element_name, value=kw)
else:
element = res.metadata.create_element(element_name, **element_data_dict)
is_add_success = True
resource_modified(res, request.user)
if request.is_ajax():
if is_add_success:
if res.metadata.has_all_required_elements():
metadata_status = "Sufficient to make public"
else:
metadata_status = "Insufficient to make public"
if element_name == 'subject':
ajax_response_data = {'status': 'success', 'element_name': element_name, 'metadata_status': metadata_status}
else:
ajax_response_data = {'status': 'success', 'element_id': element.id, 'element_name': element_name, 'metadata_status': metadata_status}
return HttpResponse(json.dumps(ajax_response_data))
else:
ajax_response_data = {'status': 'error'}
return HttpResponse (json.dumps(ajax_response_data))
if 'resource-mode' in request.POST:
request.session['resource-mode'] = 'edit'
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def update_metadata_element(request, shortkey, element_name, element_id, *args, **kwargs):
res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True)
sender_resource = _get_resource_sender(element_name, res)
handler_response = pre_metadata_element_update.send(sender=sender_resource, element_name=element_name,
element_id=element_id, request=request)
is_update_success = False
is_redirect = False
for receiver, response in handler_response:
if 'is_valid' in response:
if response['is_valid']:
element_data_dict = response['element_data_dict']
res.metadata.update_element(element_name, element_id, **element_data_dict)
if element_name == 'title':
res.title = res.metadata.title.value
res.save()
if res.public:
if not res.can_be_public:
res.public = False
res.save()
is_redirect = True
resource_modified(res, request.user)
is_update_success = True
if request.is_ajax():
if is_update_success:
if res.metadata.has_all_required_elements():
metadata_status = "Sufficient to make public"
else:
metadata_status = "Insufficient to make public"
ajax_response_data = {'status': 'success', 'element_name': element_name, 'metadata_status': metadata_status}
return HttpResponse(json.dumps(ajax_response_data))
else:
ajax_response_data = {'status': 'error'}
return HttpResponse(json.dumps(ajax_response_data))
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@api_view(['GET'])
def file_download_url_mapper(request, shortkey, filename):
""" maps the file URIs in resourcemap document to django_irods download view function"""
authorize(request, shortkey, view=True, edit=True, full=True, superuser=True)
irods_file_path = '/'.join(request.path.split('/')[2:-1])
istorage = IrodsStorage()
file_download_url = istorage.url(irods_file_path)
return HttpResponseRedirect(file_download_url)
def delete_metadata_element(request, shortkey, element_name, element_id, *args, **kwargs):
res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True)
res.metadata.delete_element(element_name, element_id)
resource_modified(res, request.user)
request.session['resource-mode'] = 'edit'
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def delete_file(request, shortkey, f, *args, **kwargs):
res, _, user = authorize(request, shortkey, edit=True, full=True, superuser=True)
hydroshare.delete_resource_file(shortkey, f, user)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def delete_resource(request, shortkey, *args, **kwargs):
res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True)
res.delete()
return HttpResponseRedirect('/my-resources/')
def publish(request, shortkey, *args, **kwargs):
res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True)
res.edit_users = []
res.edit_groups = []
res.published_and_frozen = True
res.doi = "to be assigned"
res.save()
resource_modified(res, request.user)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
def change_permissions(request, shortkey, *args, **kwargs):
class AddUserForm(forms.Form):
user = forms.ModelChoiceField(User.objects.all(), widget=autocomplete_light.ChoiceWidget("UserAutocomplete"))
class AddGroupForm(forms.Form):
group = forms.ModelChoiceField(Group.objects.all(), widget=autocomplete_light.ChoiceWidget("GroupAutocomplete"))
res, _, _ = authorize(request, shortkey, edit=True, full=True, superuser=True)
t = request.POST['t']
values = [int(k) for k in request.POST.getlist('designees', [])]
if t == 'owners':
res.owners = User.objects.in_bulk(values)
elif t == 'edit_users':
res.edit_users = User.objects.in_bulk(values)
elif t == 'edit_groups':
res.edit_groups = Group.objects.in_bulk(values)
elif t == 'view_users':
res.view_users = User.objects.in_bulk(values)
elif t == 'view_groups':
res.view_groups = Group.objects.in_bulk(values)
elif t == 'add_view_user':
frm = AddUserForm(data=request.POST)
if frm.is_valid():
res.view_users.add(frm.cleaned_data['user'])
elif t == 'add_edit_user':
frm = AddUserForm(data=request.POST)
if frm.is_valid():
res.edit_users.add(frm.cleaned_data['user'])
elif t == 'add_view_group':
frm = AddGroupForm(data=request.POST)
if frm.is_valid():
res.view_groups.add(frm.cleaned_data['group'])
elif t == 'add_view_group':
frm = AddGroupForm(data=request.POST)
if frm.is_valid():
res.edit_groups.add(frm.cleaned_data['group'])
elif t == 'add_owner':
frm = AddUserForm(data=request.POST)
if frm.is_valid():
res.owners.add(frm.cleaned_data['user'])
elif t == 'make_public':
#if res.metadata.has_all_required_elements():
if res.can_be_public:
res.public = True
res.save()
elif t == 'make_private':
res.public = False
res.save()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
# view functions mapped with INPLACE_SAVE_URL(/hsapi/save_inline/) for Django inplace editing
def save_ajax(request):
if not request.method == 'POST':
return _get_http_response({'errors': 'It is not a POST request'})
adaptor = _get_adaptor(request, 'POST')
if not adaptor:
return _get_http_response({'errors': 'Params insufficient'})
if not adaptor.can_edit():
return _get_http_response({'errors': 'You can not edit this content'})
value = adaptor.loads_to_post(request)
new_data = get_dict_from_obj(adaptor.obj)
form_class = adaptor.get_form_class()
field_name = adaptor.field_name
new_data['in_menus'] = ''
form = form_class(data=new_data, instance=adaptor.obj)
try:
value_edit = adaptor.get_value_editor(value)
value_edit_with_filter = apply_filters(value_edit, adaptor.filters_to_edit)
new_data[field_name] = value_edit_with_filter
if form.is_valid():
adaptor.save(value_edit_with_filter)
return _get_http_response({'errors': False,
'value': adaptor.render_value_edit()})
messages = [] # The error is for another field that you are editing
for field_name_error, errors_field in form.errors.items():
for error in errors_field:
messages.append("%s: %s" % (field_name_error, unicode(error)))
message_i18n = ','.join(messages)
return _get_http_response({'errors': message_i18n})
except ValidationError as error: # The error is for a field that you are editing
message_i18n = ', '.join([u"%s" % m for m in error.messages])
return _get_http_response({'errors': message_i18n})
class CaptchaVerifyForm(forms.Form):
challenge = forms.CharField()
response = forms.CharField()
def verify_captcha(request):
f = CaptchaVerifyForm(request.POST)
if f.is_valid():
params = dict(f.cleaned_data)
params['privatekey'] = getattr(settings, 'RECAPTCHA_PRIVATE_KEY', '6LdNC_USAAAAADNdzytMK2-qmDCzJcgybFkw8Z5x')
params['remoteip'] = request.META['REMOTE_ADDR']
# return HttpResponse('true', content_type='text/plain')
resp = requests.post('http://www.google.com/recaptcha/api/verify', params=params)
lines = resp.text.split('\n')
if lines[0].startswith('false'):
raise ex.PermissionDenied('captcha failed')
else:
return HttpResponse('true', content_type='text/plain')
def verify_account(request, *args, **kwargs):
context = {
'username' : request.GET['username'],
'email' : request.GET['email']
}
return render_to_response('pages/verify-account.html', context, context_instance=RequestContext(request))
@processor_for('resend-verification-email')
def resend_verification_email(request):
u = get_object_or_404(User, username=request.GET['username'], email=request.GET['email'])
try:
token = signing.dumps('verify_user_email:{0}:{1}'.format(u.pk, u.email))
u.email_user(
'Please verify your new Hydroshare account.',
"""
This is an automated email from Hydroshare.org. If you requested a Hydroshare account, please
go to http://{domain}/verify/{token}/ and verify your account.
""".format(
domain=Site.objects.get_current().domain,
token=token
))
context = {
'is_email_sent' : True
}
return render_to_response('pages/verify-account.html', context, context_instance=RequestContext(request))
except:
pass # FIXME should log this instead of ignoring it.
class FilterForm(forms.Form):
start = forms.IntegerField(required=False)
published = forms.BooleanField(required=False)
edit_permission = forms.BooleanField(required=False)
owner = forms.CharField(required=False)
user = forms.ModelChoiceField(queryset=User.objects.all(), required=False)
from_date = forms.DateTimeField(required=False)
@processor_for('my-resources')
def my_resources(request, page):
# import sys
# sys.path.append("/home/docker/pycharm-debug")
# import pydevd
# pydevd.settrace('172.17.42.1', port=21000, suspend=False)
frm = FilterForm(data=request.REQUEST)
if frm.is_valid():
res_cnt = 20 # 20 is hardcoded for the number of resources to show on one page, which is also hardcoded in my-resources.html
owner = frm.cleaned_data['owner'] or None
user = frm.cleaned_data['user'] or (request.user if request.user.is_authenticated() else None)
edit_permission = frm.cleaned_data['edit_permission'] or False
published = frm.cleaned_data['published'] or False
startno = frm.cleaned_data['start']
if(startno < 0):
startno = 0
start = startno or 0
from_date = frm.cleaned_data['from_date'] or None
words = request.REQUEST.get('text', None)
public = not request.user.is_authenticated()
search_items = dict(
(item_type, [t.strip() for t in request.REQUEST.getlist(item_type)])
for item_type in ("type", "author", "contributor", "subject")
)
# TODO ten separate SQL queries for basically the same data
res = set()
for lst in get_resource_list(
user=user,
owner= owner,
published=published,
edit_permission=edit_permission,
from_date=from_date,
full_text_search=words,
public=public,
**search_items
).values():
res = res.union(lst)
total_res_cnt = len(res)
reslst = list(res)
# need to return total number of resources as 'ct' so have to get all resources
# and then filter by start and count
# TODO this is doing some pagination/limits before sorting, so it won't be consistent
if(start>=total_res_cnt):
start = total_res_cnt-res_cnt
if(start < 0):
start = 0
if(start+res_cnt > total_res_cnt):
res_cnt = total_res_cnt-start
reslst = reslst[start:start+res_cnt]
# TODO sorts should be in SQL not python
res = sorted(reslst, key=lambda x: x.title)
return {
'resources': res,
'first': start,
'last': start+len(res),
'ct': total_res_cnt,
}
@processor_for(GenericResource)
def add_generic_context(request, page):
class AddUserForm(forms.Form):
user = forms.ModelChoiceField(User.objects.all(), widget=autocomplete_light.ChoiceWidget("UserAutocomplete"))
class AddGroupForm(forms.Form):
group = forms.ModelChoiceField(Group.objects.all(), widget=autocomplete_light.ChoiceWidget("GroupAutocomplete"))
cm = page.get_content_model()
return {
'resource_type': cm._meta.verbose_name,
'bag': cm.bags.first(),
'users': User.objects.all(),
'groups': Group.objects.all(),
'owners': set(cm.owners.all()),
'view_users': set(cm.view_users.all()),
'view_groups': set(cm.view_groups.all()),
'edit_users': set(cm.edit_users.all()),
'edit_groups': set(cm.edit_groups.all()),
'add_owner_user_form': AddUserForm(),
'add_view_user_form': AddUserForm(),
'add_edit_user_form': AddUserForm(),
'add_view_group_form': AddGroupForm(),
'add_edit_group_form': AddGroupForm(),
}
res_cls = ""
resource = None
@login_required
def create_resource_select_resource_type(request, *args, **kwargs):
return render_to_response('pages/create-resource.html', context_instance=RequestContext(request))
@login_required
def create_resource(request, *args, **kwargs):
resource_type = request.POST['resource-type']
res_title = request.POST['title']
resource_files = request.FILES.getlist('files')
irods_fname = request.POST.get('irods_file_name')
if irods_fname:
user = request.POST.get('irods-username')
password = request.POST.get("irods-password")
port = request.POST.get("irods-port")
host = request.POST.get("irods-host")
zone = request.POST.get("irods-zone")
try:
upload_from_irods(username=user, password=password, host=host, port=port,
zone=zone, irods_fname=irods_fname, res_files=resource_files)
except Exception as ex:
context = {'resource_creation_error': ex.message}
return render_to_response('pages/create-resource.html', context, context_instance=RequestContext(request))
url_key = "page_redirect_url"
try:
page_url_dict, res_title, metadata = hydroshare.utils.resource_pre_create_actions(resource_type=resource_type, files=resource_files,
resource_title=res_title,
page_redirect_url_key=url_key, **kwargs)
except utils.ResourceFileSizeException as ex:
context = {'file_size_error': ex.message}
return render_to_response('pages/create-resource.html', context, context_instance=RequestContext(request))
except utils.ResourceFileValidationException as ex:
context = {'file_validation_error': ex.message}
return render_to_response('pages/create-resource.html', context, context_instance=RequestContext(request))
except Exception as ex:
context = {'resource_creation_error': ex.message}
return render_to_response('pages/create-resource.html', context, context_instance=RequestContext(request))
if url_key in page_url_dict:
return render(request, page_url_dict[url_key], {'title': res_title, 'metadata': metadata})
try:
resource = hydroshare.create_resource(
resource_type=request.POST['resource-type'],
owner=request.user,
title=res_title,
keywords=None,
metadata=metadata,
files=resource_files,
content=res_title
)
except Exception as ex:
context = {'resource_creation_error': ex.message }
return render_to_response('pages/create-resource.html', context, context_instance=RequestContext(request))
try:
utils.resource_post_create_actions(resource=resource, user=request.user, metadata=metadata, **kwargs)
except (utils.ResourceFileValidationException, Exception) as ex:
request.session['file_validation_error'] = ex.message
# go to resource landing page
request.session['just_created'] = True
return HttpResponseRedirect(resource.get_absolute_url())
@login_required
def get_file(request, *args, **kwargs):
from django_irods.icommands import RodsSession
name = kwargs['name']
session = RodsSession("./", "/usr/bin")
session.runCmd("iinit");
session.runCmd('iget', [ name, 'tempfile.' + name ])
return HttpResponse(open(name), content_type='x-binary/octet-stream')
processor_for(GenericResource)(resource_processor)
@processor_for('resources')
def resource_listing_processor(request, page):
owned_resources = list(GenericResource.objects.filter(owners__pk=request.user.pk))
editable_resources = list(GenericResource.objects.filter(owners__pk=request.user.pk))
viewable_resources = list(GenericResource.objects.filter(public=True))
return locals()
# FIXME need a task somewhere that amounts to checking inactive accounts and deleting them after 30 days.
|
993,706 | 8424f45487a60966851de466d9282a2056be8e24 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['DedicatedIpAssignmentArgs', 'DedicatedIpAssignment']
@pulumi.input_type
class DedicatedIpAssignmentArgs:
def __init__(__self__, *,
destination_pool_name: pulumi.Input[str],
ip: pulumi.Input[str]):
"""
The set of arguments for constructing a DedicatedIpAssignment resource.
:param pulumi.Input[str] destination_pool_name: Dedicated IP address.
:param pulumi.Input[str] ip: Dedicated IP address.
"""
pulumi.set(__self__, "destination_pool_name", destination_pool_name)
pulumi.set(__self__, "ip", ip)
@property
@pulumi.getter(name="destinationPoolName")
def destination_pool_name(self) -> pulumi.Input[str]:
"""
Dedicated IP address.
"""
return pulumi.get(self, "destination_pool_name")
@destination_pool_name.setter
def destination_pool_name(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_pool_name", value)
@property
@pulumi.getter
def ip(self) -> pulumi.Input[str]:
"""
Dedicated IP address.
"""
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: pulumi.Input[str]):
pulumi.set(self, "ip", value)
@pulumi.input_type
class _DedicatedIpAssignmentState:
def __init__(__self__, *,
destination_pool_name: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering DedicatedIpAssignment resources.
:param pulumi.Input[str] destination_pool_name: Dedicated IP address.
:param pulumi.Input[str] ip: Dedicated IP address.
"""
if destination_pool_name is not None:
pulumi.set(__self__, "destination_pool_name", destination_pool_name)
if ip is not None:
pulumi.set(__self__, "ip", ip)
@property
@pulumi.getter(name="destinationPoolName")
def destination_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
Dedicated IP address.
"""
return pulumi.get(self, "destination_pool_name")
@destination_pool_name.setter
def destination_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_pool_name", value)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
"""
Dedicated IP address.
"""
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
class DedicatedIpAssignment(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_pool_name: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource for managing an AWS SESv2 (Simple Email V2) Dedicated IP Assignment.
This resource is used with "Standard" dedicated IP addresses. This includes addresses [requested and relinquished manually](https://docs.aws.amazon.com/ses/latest/dg/dedicated-ip-case.html) via an AWS support case, or [Bring Your Own IP](https://docs.aws.amazon.com/ses/latest/dg/dedicated-ip-byo.html) addresses. Once no longer assigned, this resource returns the IP to the [`ses-default-dedicated-pool`](https://docs.aws.amazon.com/ses/latest/dg/managing-ip-pools.html), managed by AWS.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sesv2.DedicatedIpAssignment("example",
destination_pool_name="my-pool",
ip="0.0.0.0")
```
## Import
SESv2 (Simple Email V2) Dedicated IP Assignment can be imported using the `id`, which is a comma-separated string made up of `ip` and `destination_pool_name`, e.g.,
```sh
$ pulumi import aws:sesv2/dedicatedIpAssignment:DedicatedIpAssignment example "0.0.0.0,my-pool"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_pool_name: Dedicated IP address.
:param pulumi.Input[str] ip: Dedicated IP address.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DedicatedIpAssignmentArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource for managing an AWS SESv2 (Simple Email V2) Dedicated IP Assignment.
This resource is used with "Standard" dedicated IP addresses. This includes addresses [requested and relinquished manually](https://docs.aws.amazon.com/ses/latest/dg/dedicated-ip-case.html) via an AWS support case, or [Bring Your Own IP](https://docs.aws.amazon.com/ses/latest/dg/dedicated-ip-byo.html) addresses. Once no longer assigned, this resource returns the IP to the [`ses-default-dedicated-pool`](https://docs.aws.amazon.com/ses/latest/dg/managing-ip-pools.html), managed by AWS.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.sesv2.DedicatedIpAssignment("example",
destination_pool_name="my-pool",
ip="0.0.0.0")
```
## Import
SESv2 (Simple Email V2) Dedicated IP Assignment can be imported using the `id`, which is a comma-separated string made up of `ip` and `destination_pool_name`, e.g.,
```sh
$ pulumi import aws:sesv2/dedicatedIpAssignment:DedicatedIpAssignment example "0.0.0.0,my-pool"
```
:param str resource_name: The name of the resource.
:param DedicatedIpAssignmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DedicatedIpAssignmentArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_pool_name: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DedicatedIpAssignmentArgs.__new__(DedicatedIpAssignmentArgs)
if destination_pool_name is None and not opts.urn:
raise TypeError("Missing required property 'destination_pool_name'")
__props__.__dict__["destination_pool_name"] = destination_pool_name
if ip is None and not opts.urn:
raise TypeError("Missing required property 'ip'")
__props__.__dict__["ip"] = ip
super(DedicatedIpAssignment, __self__).__init__(
'aws:sesv2/dedicatedIpAssignment:DedicatedIpAssignment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
destination_pool_name: Optional[pulumi.Input[str]] = None,
ip: Optional[pulumi.Input[str]] = None) -> 'DedicatedIpAssignment':
"""
Get an existing DedicatedIpAssignment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_pool_name: Dedicated IP address.
:param pulumi.Input[str] ip: Dedicated IP address.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _DedicatedIpAssignmentState.__new__(_DedicatedIpAssignmentState)
__props__.__dict__["destination_pool_name"] = destination_pool_name
__props__.__dict__["ip"] = ip
return DedicatedIpAssignment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationPoolName")
def destination_pool_name(self) -> pulumi.Output[str]:
"""
Dedicated IP address.
"""
return pulumi.get(self, "destination_pool_name")
@property
@pulumi.getter
def ip(self) -> pulumi.Output[str]:
"""
Dedicated IP address.
"""
return pulumi.get(self, "ip")
|
993,707 | 2eef94a7041c9c32e54ec48913ba2dae131f14ce | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import xlwt
from report_engine_xls import report_xls
from ad_budget_report.report.report_budget_utilization import report_budgets
import cStringIO
from tools.translate import _
import pooler
class budget_utilization_xls(report_xls):
def _get_start_date(self, data):
# ok
if data.get('form', False) and data['form'].get('date_from', False):
return data['form']['date_from']
return ''
def _get_end_date(self, data):
# ok
if data.get('form', False) and data['form'].get('date_to', False):
return data['form']['date_to']
return ''
def get_start_period(self, data):
if data.get('form', False) and data['form'].get('period_from', False):
return pooler.get_pool(self.cr.dbname).get('account.period').browse(self.cr,self.uid,data['form']['period_from']).name
return ''
def get_end_period(self, data):
if data.get('form', False) and data['form'].get('period_to', False):
return pooler.get_pool(self.cr.dbname).get('account.period').browse(self.cr, self.uid, data['form']['period_to']).name
return ''
def _get_target_move(self, data):
if data.get('form', False) and data['form'].get('target_move', False):
if data['form']['target_move'] == 'all':
return _('All Entries')
return _('All Posted Entries')
return ''
def _get_filter(self, data):
if data.get('form', False) and data['form'].get('filter', False):
if data['form']['filter'] == 'filter_date':
return _('Date')
elif data['form']['filter'] == 'filter_period':
return _('Periods')
return _('No Filter')
def _display_filter(self, parser, data):
filter_mode = self._get_filter(data)
filter_string = filter_mode
if filter_mode == 'Date':
filter_string = '%s -> %s' % (parser.formatLang(self._get_start_date(data), date=True),
parser.formatLang(self._get_end_date(data), date=True))
elif filter_mode == 'Periods':
filter_string = '%s -> %s' % (self.get_start_period(data),
self.get_end_period(data))
moves_string = self._get_target_move(data)
display_acct_string = ''
if data['form']['display_account'] == 'bal_all':
display_acct_string = 'All'
elif data['form']['display_account'] == 'bal_movement':
display_acct_string = 'With movements'
else:
display_acct_string = 'With balance is not equal to 0'
fiscal_year_str = parser.get_fiscalyear_text(data['form'])
period_date_str = parser.get_periods_and_date_text(data['form'])
return 'Fiscal Year: %s, Period & Date By: %s' % (fiscal_year_str, period_date_str)
def _display_fiscalyear(self, parser, data):
"""k = parser.get_fiscalyear_text(data)
if k:
k = 'Fiscal Year: %s' % (k)"""
k = "asdfasdfasdfasdf"
return k
## Modules Begin
def _size_col(sheet, col):
return sheet.col_width(col)
def _size_row(sheet, row):
return sheet.row_height(row)
## Modules End
def _department_list(self, data):
# if data.get('form', False) and data['form'].get('dept_relation2', False):
# return pooler.get_pool(self.cr.dbname).get('hr.department').browse(self.cr,self.uid,data['form']['dept_relation2']).name
# return ''
if data.get('form', False) and data['form'].get('dept_relation2', False):
return data['form']['dept_relation2']
return ''
"""def id_generator(self, size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))"""
def generate_xls_report(self, parser, data, obj, wb):
c = parser.localcontext['company']
ws = wb.add_sheet(('Utilization'))
ws.panes_frozen = True
ws.remove_splits = True
ws.portrait = 0 # Landscape
ws.fit_width_to_pages = 1
ws.col(0).width = len("ABCD")*1024
ws.col(1).width = len("ABCD")*2048
ws.col(2).width = len("ABC")*256
ws.col(3).width = len("ABCDEF")*1024
ws.col(4).width = len("AB")*256
ws.col(6).width = len("AB")*256
ws.col(8).width = len("AB")*256
ws.col(10).width = len("AB")*256
ws.col(12).width = len("AB")*256
ws.col(15).width = len("AB")*256
ws.col(18).width = len("AB")*256
ws.col(20).width = len("AB")*256
# ws.col(24).width = len("AB")*256
# ws.col(26).width = len("AB")*256
#ws.col(3).width = len("ABC")*256
#ws.col(4).width = len("A bunch of longer text not wrapped")*256
ws.row(7).height = len("AB")*256
company = "%s" % (c.name)
#act_ytd_view = parser.compute_view_xls(data, False, i['id'], data['form']['period_id'], data['form']['cut_date'])['act_ytd']
styles = dict(
bold = 'font: bold 1',
italic = 'font: italic 1',
# Wrap text in the cell
wrap_bold = 'font: bold 1; align: wrap 1;',
# White text on a blue background
reversed = 'pattern: pattern solid, fore_color blue; font: color black;',
# Light orange checkered background
light_orange_bg = 'pattern: pattern fine_dots, fore_color white, back_color orange;',
# Heavy borders
bordered = 'border: top thick, right thick, bottom thick, left thick;',
# 16 pt red text
big_red = 'font: height 320, color red;',
)
#print styles['light_orange_bg']
cols_specs = [
# Headers data
('Kosong', 1, 0, 'text',
lambda x, d, p: ""),
('Note', 1, 0, 'text',
lambda x, d, p: 'Note:'),
('Note1', 6, 0, 'text',
lambda x, d, p: "1. This rolling report should include P&L, cashflow & balance sheet"),
('Note2', 6, 0, 'text',
lambda x, d, p: "2. ERP should produce both detail & summary (high level, major accounts)"),
('Note3', 6, 0, 'text',
lambda x, d, p: "3. Need to add Revenue"),
('Space', 22, 0, 'text',
lambda x, d, p: ""),
('Company', 22, 0, 'text',
lambda x, d, p: company.upper()),
('Judul', 22, 0, 'text',
lambda x, d, p: "Budget Reporting"),
('Dept', 22, 0, 'text',
lambda x, d, p: parser.get_dept_text(data)),
('Department', 22, 0, 'text',
lambda x, d, p: x.name),
('Div', 22, 0, 'text',
lambda x, d, p: 'Division'),
# ('inIDR', 23, 0, 'text',
# lambda x, d, p: 'in IDR'),
('YearEnded', 22, 0, 'text',
lambda x, d, p: 'for year ended %s' % (parser.formatLang(data['form']['cut_date'], date=True))),
('inIDR', 22, 0, 'text',
lambda x, d, p: "in IDR"),
('HeaderCOA', 2, 0, 'text',
lambda x, d, p: "COA"),
('HeaderDesc', 1, 0, 'text',
lambda x, d, p: ""),
('HAPY', 1, 0, 'text',
lambda x, d, p: "Actual Previous Year"),
('HACY', 3, 0, 'text',
lambda x, d, p: "Actual Current Year"),
('HBCY', 3, 0, 'text',
lambda x, d, p: "Budget Current Year"),
('VRC', 5, 0, 'text',
lambda x, d, p: "(Over)/Under"),
('RB', 5, 0, 'text',
lambda x, d, p: "Remaining Budget"),
('UTB', 3, 0, 'text',
lambda x, d, p: "vs Total Budget Dept."),#compute_view(data, dept,item,o.period_id,o.cut_date)
('Code', 1, 0, 'text',
lambda x, d, p: x['code']),
('Desc', 1, 0, 'text',
lambda x, d, p: ' '*x['level'] +x['name']),
#LIST ALL EXPENSE
('AmtHAPY', 1, 0, 'number',
lambda x, d, p: parser._lastyear(i['id'], i['type'], data['form']['period_id'], data['form']['cut_date'], False)),
('AmtActMonth', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtActMonth', False)),
('AmtActYear', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtActYear', False)),
('AmtBgtMonth', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtBgtMonth', False)),
('AmtBgtYear', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtBgtYear', False)),
('AmtVarMonth', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtVarMonth', False)),
('AmtVarYear', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtVarYear', False)),
('PreVarMonth', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'PreVarMonth', False)),
('PreVarYear', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'PreVarYear', False)),
('AmtTotBudget', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtTotBudget', False)),
('PreTotBudget', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'PreTotBudget', False)),
#TOTAL ALL EXPENSE
('TotalDesc', 3, 0, 'text', lambda x, d, p: "TOTAL OPERATING EXPENSES"),
('AmtActMonthTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'AmtActMonthTot', False)),
('AmtActYearTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'AmtActYearTot', False)),
('AmtBgtMonthTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'AmtBgtMonthTot', False)),
('AmtBgtYearTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'AmtBgtYearTot', False)),
('AmtVarMonthTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'AmtVarMonthTot', False)),
('PreVarMonthTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'PreVarMonthTot', False)),
('AmtVarYearTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'AmtVarYearTot', False)),
('PreVarYearTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'PreVarYearTot', False)),
('AmtTotBudgetTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'AmtTotBudgetTot', False)),
('PreTotBudgetTot', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], exps, False, 'PreTotBudgetTot', False)),
#TOTAL ALL COGS
('TotalCOGSDesc', 3, 0, 'text', lambda x, d, p: "TOTAL PRODUCTION COST"),
('AmtActMonthTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'AmtActMonthTot', False)),
('AmtActYearTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'AmtActYearTot', False)),
('AmtBgtMonthTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'AmtBgtMonthTot', False)),
('AmtBgtYearTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'AmtBgtYearTot', False)),
('AmtVarMonthTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'AmtVarMonthTot', False)),
('PreVarMonthTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'PreVarMonthTot', False)),
('AmtVarYearTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'AmtVarYearTot', False)),
('PreVarYearTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'PreVarYearTot', False)),
('AmtTotBudgetTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'AmtTotBudgetTot', False)),
('PreTotBudgetTotCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogs, False, 'PreTotBudgetTot', False)),
#LIST DEPT
('AmtHAPYDEP', 1, 0, 'number',
lambda x, d, p: parser._lastyear(i['id'], i['type'], data['form']['period_id'], data['form']['cut_date'], dep['id'])),
('AmtActMonthDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtActMonth', dep['id'])),
('AmtActYearDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtActYear', dep['id'])),
('AmtBgtMonthDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtBgtMonth', dep['id'])),
('AmtBgtYearDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtBgtYear', dep['id'])),
('AmtVarMonthDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtVarMonth', dep['id'])),
('AmtVarYearDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtVarYear', dep['id'])),
('PreVarMonthDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'PreVarMonth', dep['id'])),
('PreVarYearDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'PreVarYear', dep['id'])),
('AmtTotBudgetDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'AmtTotBudget', dep['id'])),
('PreTotBudgetDEP', 1, 0, 'number',
lambda x, d, p: parser.get_period_actual(data['form']['cut_date'], data['form']['period_id'], i['id'], i['type'], 'PreTotBudget', dep['id'])),
#TOTAL DEPT EXPENSE
('TotalDepDesc', 3, 0, 'text', lambda x, d, p: "TOTAL OPERATING EXPENSES"),
('AmtActMonthTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'AmtActMonthTot', dep['id'])),
('AmtActYearTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'AmtActYearTot', dep['id'])),
('AmtBgtMonthTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'AmtBgtMonthTot', dep['id'])),
('AmtBgtYearTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'AmtBgtYearTot', dep['id'])),
('AmtVarMonthTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'AmtVarMonthTot', dep['id'])),
('PreVarMonthTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'PreVarMonthTot', dep['id'])),
('AmtVarYearTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'AmtVarYearTot', dep['id'])),
('PreVarYearTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'PreVarYearTot', dep['id'])),
('AmtTotBudgetTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'AmtTotBudgetTot', dep['id'])),
('PreTotBudgetTotDEP', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], expsD, False, 'PreTotBudgetTot', dep['id'])),
#TOTAL DEPT COGS
('TotalCOGSDepDesc', 3, 0, 'text', lambda x, d, p: "TOTAL PRODUCTION COST"),
('AmtActMonthTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'AmtActMonthTot', dep['id'])),
('AmtActYearTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'AmtActYearTot', dep['id'])),
('AmtBgtMonthTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'AmtBgtMonthTot', dep['id'])),
('AmtBgtYearTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'AmtBgtYearTot', dep['id'])),
('AmtVarMonthTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'AmtVarMonthTot', dep['id'])),
('PreVarMonthTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'PreVarMonthTot', dep['id'])),
('AmtVarYearTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'AmtVarYearTot', dep['id'])),
('PreVarYearTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'PreVarYearTot', dep['id'])),
('AmtTotBudgetTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'AmtTotBudgetTot', dep['id'])),
('PreTotBudgetTotDEPCOGS', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], cogsD, False, 'PreTotBudgetTot', dep['id'])),
#Total DEPT CAPEX
('TotalCAPEXDepDesc', 3, 0, 'text', lambda x, d, p: "TOTAL CAPITAL EXPENSES"),
('AmtActMonthTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'AmtActMonthTot', dep['id'])),
('AmtActYearTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'AmtActYearTot', dep['id'])),
('AmtBgtMonthTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'AmtBgtMonthTot', dep['id'])),
('AmtBgtYearTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'AmtBgtYearTot', dep['id'])),
('AmtVarMonthTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'AmtVarMonthTot', dep['id'])),
('PreVarMonthTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'PreVarMonthTot', dep['id'])),
('AmtVarYearTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'AmtVarYearTot', dep['id'])),
('PreVarYearTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'PreVarYearTot', dep['id'])),
('AmtTotBudgetTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'AmtTotBudgetTot', dep['id'])),
('PreTotBudgetTotDEPCAPEX', 1, 0, 'number',
lambda x, d, p: '%s' % parser.get_period_total(data['form']['cut_date'], data['form']['period_id'], capexD, False, 'PreTotBudgetTot', dep['id'])),
]
row_hdr0 = self.xls_row_template(cols_specs, ['Kosong','Note','Note1'])
row_hdr1 = self.xls_row_template(cols_specs, ['Kosong','Kosong','Note2'])
row_hdr2 = self.xls_row_template(cols_specs, ['Kosong','Kosong','Note3'])
row_hdr3 = self.xls_row_template(cols_specs, ['Space'])
row_hdr4 = self.xls_row_template(cols_specs, ['Company'])
row_hdr5a = self.xls_row_template(cols_specs, ['Judul'])
row_hdr5b = self.xls_row_template(cols_specs, ['YearEnded'])
row_hdr5c = self.xls_row_template(cols_specs, ['Div'])
row_hdr5d = self.xls_row_template(cols_specs, ['Dept'])
row_hdr5e = self.xls_row_template(cols_specs, ['inIDR'])
#row_hdr6 = self.xls_row_template(cols_specs, ['Kosong','AsOff'])
row_hdr7 = self.xls_row_template(cols_specs, ['Space'])
row_hdr8 = self.xls_row_template(cols_specs, ['Kosong','HeaderDesc','Kosong','HAPY','Kosong','HACY','Kosong','HBCY','Kosong','VRC','Kosong','UTB'])
# row_hdr9 = self.xls_row_template(cols_specs, ['Kosong','StateCOA','StateDesc','StateM1','StateM2','StateM3','StateM4','StateM5','StateM6','StateM7','StateM8','StateM9','StateM10','StateM11','StateM12','Kosong','Kosong','Kosong'])
#row_hdr9 = self.xls_row_template(cols_specs, ['Kosong','Space'])
row_hdr10 = self.xls_row_template(cols_specs, ['Space'])
row_hdr11 = self.xls_row_template(cols_specs, ['Department'])
# row_loopDep = self.xls_row_template(cols_specs, ['Kosong','Code','Name','MD1','MD2','MD3','MD4','MD5','MD6','MD7','MD8','MD9','MD10','MD11','MD12','TotalD','BudgetD','VarianceD'])#row_loop_test[17][2]['PreVarYear'],row_loop_test[21][2]['PreTotBudget']
# row_loop = self.xls_row_template(cols_specs, ['Code','Desc','Kosong','AmtHAPY','Kosong','AmtActMonth','Kosong','AmtActYear','Kosong','AmtBgtMonth','Kosong','AmtBgtYear','Kosong','AmtVarMonth','PreVarMonth','Kosong','AmtVarYear','PreVarYear','Kosong','AmtTotBudget','Kosong','PreTotBudget'])
row_loop_test = self.xls_row_template(cols_specs, ['Code','Desc','Kosong','AmtHAPY','Kosong','AmtActMonth','Kosong','AmtActYear','Kosong','AmtBgtMonth','Kosong','AmtBgtYear','Kosong','AmtVarMonth','PreVarMonth','Kosong','AmtVarYear','PreVarYear','Kosong','AmtTotBudget','Kosong','PreTotBudget'])
row_loop_dep = self.xls_row_template(cols_specs, ['Code','Desc','Kosong','AmtHAPYDEP','Kosong','AmtActMonthDEP','Kosong','AmtActYearDEP','Kosong','AmtBgtMonthDEP','Kosong','AmtBgtYearDEP','Kosong','AmtVarMonthDEP','PreVarMonthDEP','Kosong','AmtVarYearDEP','PreVarYearDEP','Kosong','AmtTotBudgetDEP','Kosong','PreTotBudgetDEP'])
# row_total_cogs = self.xls_row_template(cols_specs, ['Kosong','TotalCOGSDesc','MtotCOGS1','MtotCOGS2','MtotCOGS3','MtotCOGS4','MtotCOGS5','MtotCOGS6','MtotCOGS7','MtotCOGS8','MtotCOGS9','MtotCOGS10','MtotCOGS11','MtotCOGS12','TotalCOGS','BudgetCOGS','VarianceCOGS'])
row_total_expense = self.xls_row_template(cols_specs, ['TotalDesc','Kosong','Kosong','AmtActMonthTot','Kosong','AmtActYearTot','Kosong','AmtBgtMonthTot','Kosong','AmtBgtYearTot','Kosong','AmtVarMonthTot','PreVarMonthTot','Kosong','AmtVarYearTot','PreVarYearTot','Kosong','AmtTotBudgetTot','Kosong','PreTotBudgetTot'])
row_total_expense_dep = self.xls_row_template(cols_specs, ['TotalDepDesc','Kosong','Kosong','AmtActMonthTotDEP','Kosong','AmtActYearTotDEP','Kosong','AmtBgtMonthTotDEP','Kosong','AmtBgtYearTotDEP','Kosong','AmtVarMonthTotDEP','PreVarMonthTotDEP','Kosong','AmtVarYearTotDEP','PreVarYearTotDEP','Kosong','AmtTotBudgetTotDEP','Kosong','PreTotBudgetTotDEP'])
#
row_total_cogs = self.xls_row_template(cols_specs, ['TotalCOGSDesc','Kosong','Kosong','AmtActMonthTotCOGS','Kosong','AmtActYearTotCOGS','Kosong','AmtBgtMonthTotCOGS','Kosong','AmtBgtYearTotCOGS','Kosong','AmtVarMonthTotCOGS','PreVarMonthTotCOGS','Kosong','AmtVarYearTotCOGS','PreVarYearTotCOGS','Kosong','AmtTotBudgetTotCOGS','Kosong','PreTotBudgetTotCOGS'])
row_total_cogs_dep = self.xls_row_template(cols_specs, ['TotalCOGSDepDesc','Kosong','Kosong','AmtActMonthTotDEPCOGS','Kosong','AmtActYearTotDEPCOGS','Kosong','AmtBgtMonthTotDEPCOGS','Kosong','AmtBgtYearTotDEPCOGS','Kosong','AmtVarMonthTotDEPCOGS','PreVarMonthTotDEPCOGS','Kosong','AmtVarYearTotDEPCOGS','PreVarYearTotDEPCOGS','Kosong','AmtTotBudgetTotDEPCOGS','Kosong','PreTotBudgetTotDEPCOGS'])
row_total_capex = self.xls_row_template(cols_specs, ['TotalCAPEXDesc','Kosong','Kosong','AmtActMonthTotCAPEX','Kosong','AmtActYearTotCAPEX','Kosong','AmtBgtMonthTotCAPEX','Kosong','AmtBgtYearTotCAPEX','Kosong','AmtVarMonthTotCAPEX','PreVarMonthTotCAPEX','Kosong','AmtVarYearTotCAPEX','PreVarYearTotCAPEX','Kosong','AmtTotBudgetTotCAPEX','Kosong','PreTotBudgetTotCAPEX'])
row_total_capex_dep = self.xls_row_template(cols_specs, ['TotalCAPEXDepDesc','Kosong','Kosong','AmtActMonthTotDEPCAPEX','Kosong','AmtActYearTotDEPCAPEX','Kosong','AmtBgtMonthTotDEPCAPEX','Kosong','AmtBgtYearTotDEPCAPEX','Kosong','AmtVarMonthTotDEPCAPEX','PreVarMonthTotDEPCAPEX','Kosong','AmtVarYearTotDEPCAPEX','PreVarYearTotDEPCAPEX','Kosong','AmtTotBudgetTotDEPCAPEX','Kosong','PreTotBudgetTotDEPCAPEX'])
#
##
# row_total_cogsDep = self.xls_row_template(cols_specs, ['Kosong','TotalCOGSDepDesc','MtotCOGSDep1','MtotCOGSDep2','MtotCOGSDep3','MtotCOGSDep4','MtotCOGSDep5','MtotCOGSDep6','MtotCOGSDep7','MtotCOGSDep8','MtotCOGSDep9','MtotCOGSDep10','MtotCOGSDep11','MtotCOGSDep12','TotalCOGSDep','BudgetCOGSDep','VarianceCOGSDep'])
# row_total_expenseDep = self.xls_row_template(cols_specs, ['Kosong','TotalExpenseDep','MtotEXPDep1','MtotEXPDep2','MtotEXPDep3','MtotEXPDep4','MtotEXPDep5','MtotEXPDep6','MtotEXPDep7','MtotEXPDep8','MtotEXPDep9','MtotEXPDep10','MtotEXPDep11','MtotEXPDep12','TotalEXPDep','BudgetEXPDep','VarianceEXPDep'])
## Style variable Begin borders: top thick, bottom solid, left double, right double;
hdr_style = xlwt.easyxf('pattern: pattern solid, fore_color gray25;')
row_normal_style= xlwt.easyxf('font: height 170, colour_index black;pattern: pattern solid, fore_color white;',num_format_str='#,##0.00;(#,##0.00)')
row_bold_style = xlwt.easyxf('font: height 180, colour_index black, bold on;pattern: pattern solid, fore_color white;',num_format_str='#,##0.00;(#,##0.00)')
row_normal_style_pre = xlwt.easyxf('font: height 180, colour_index black;pattern: pattern solid, fore_color white;',num_format_str='#,##0.00;(#,##0.00)')
row_bold_style_pre = xlwt.easyxf('font: height 180, colour_index black, bold on;pattern: pattern solid, fore_color white;',num_format_str='#,##0.00;(#,##0.00)')
row_bold_style_total = xlwt.easyxf('font: height 180, colour_index black, bold on;pattern: pattern solid, fore_color white;borders: top thin, bottom medium;',num_format_str='#,##0.00;(#,##0.00)')
style = xlwt.easyxf(styles['reversed'])
tittle_style = xlwt.easyxf('font: height 180,name Arial, colour_index white, bold on; pattern: pattern solid, fore_color brown;')
tittle_style2 = xlwt.easyxf('font: height 180,name Arial, colour_index white, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
tittle_bold_left_style = xlwt.easyxf('font: height 240, name Times New Roman, colour_index black, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
tittle_bold_left_style2 = xlwt.easyxf('font: height 200, name Arial, colour_index black, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;borders: bottom double;')
tittle_left_italic_style = xlwt.easyxf('font: height 190, name Arial, colour_index black, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;')
tittle_bold_center_style = xlwt.easyxf('font: height 210, name Times New Roman, colour_index black, bold on; align: wrap on, vert centre, horiz centre; pattern: pattern solid, fore_color gray50;')
tittle_bold_center_style3 = xlwt.easyxf('font: height 210, name Times New Roman, colour_index black, bold on; align: wrap on, vert centre, horiz centre; pattern: pattern solid, fore_color gray50;borders: top thin;')
tittle_bold_center_style2 = xlwt.easyxf('font: height 200, name Times New Roman, colour_index black, bold on; align: wrap on, vert centre, horiz centre; pattern: pattern solid, fore_color gray50;borders: top thin;')
tittle_bold_left = xlwt.easyxf('font: height 210, name Times New Roman, colour_index black, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color gray50;borders: top thin;')
tittle_bold_right = xlwt.easyxf('font: height 210, name Times New Roman, colour_index black, bold on; align: wrap on, vert centre, horiz right; pattern: pattern solid, fore_color gray50;borders: top thin;')
#row_normal_style = xlwt.easyxf('font: height 170, name Arial, colour_index black; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;',num_format_str='#,##0;(#,##0)')
#row_bold_style = xlwt.easyxf('font: height 180, name Arial, colour_index black, bold on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color white;',num_format_str='#,##0;(#,##0)')
subtittle_right_style = xlwt.easyxf('font: height 170, name Arial, colour_index black, bold on, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color gray25;')
subtittle_top_and_bottom_style = xlwt.easyxf('font: height 240, name Arial, colour_index black, bold off, italic on; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color gray25;')
blank_style = xlwt.easyxf('font: height 650, name Arial, colour_index brown, bold off; align: wrap on, vert centre, horiz left; pattern: pattern solid, fore_color gray25;')
normal_style = xlwt.easyxf('font: height 240, name Arial, colour_index black, bold off; align: wrap on, vert centre, horiz left;')
total_style = xlwt.easyxf('font: height 240, name Arial, colour_index brown, bold on, italic on; align: wrap on, vert centre;', num_format_str='#,##0.00;(#,##0.00)')
## Style variable End
# Write headers
# ws.write(0, 0, '', tittle_style2)
# ws.write(0, 1, '', tittle_style2)
# ws.write(0, 2, 'Note: ', tittle_style)
# ws.write(0, 3, '1.', tittle_style)
# ws.write(0, 4, 'This rolling report should include P&L, cashflow & balance sheet', tittle_style)
# for x in [5,6,7,8,9]:
# ws.write(0, x, '', tittle_style)
# for x in [10,11,12,13,14,15,16,17,18,19,20,21,22,23]:
# ws.write(0, x, '', tittle_style2)
#
# ws.write(1, 0, '', tittle_style2)
# ws.write(1, 1, '', tittle_style2)
# ws.write(1, 2, '', tittle_style)
# ws.write(1, 3, '2.', tittle_style)
# ws.write(1, 4, 'ERP should produce both detail & summary (high level, major accounts)', tittle_style)
# for x in [5,6,7,8,9]:
# ws.write(1, x, '', tittle_style)
# for x in [10,11,12,13,14,15,16,17,18,19,20,21,22,23]:
# ws.write(1, x, '', tittle_style2)
#
# ws.write(2, 0, '', tittle_style2)
# ws.write(2, 1, '', tittle_style2)
# ws.write(2, 2, '', tittle_style)
# ws.write(2, 3, '3.', tittle_style)
# ws.write(2, 4, 'Need to add Revenue', tittle_style)
# for x in [5,6,7,8,9]:
# ws.write(2, x, '', tittle_style)
# for x in [10,11,12,13,14,15,16,17,18,19,20,21,22,23]:
# ws.write(2, x, '', tittle_style2)
#====================================================================
# self.xls_write_row(ws, None, data, parser, 3, row_hdr0, tittle_style)
# self.xls_write_row(ws, None, data, parser, 4, row_hdr1, tittle_style)
# self.xls_write_row(ws, None, data, parser, 5, row_hdr2, tittle_style)
# self.xls_write_row(ws, None, data, parser, 3, row_hdr3, tittle_style2)#Space
self.xls_write_row(ws, None, data, parser, 0, row_hdr4, tittle_bold_left_style)#Company
self.xls_write_row(ws, None, data, parser, 1, row_hdr5a, tittle_bold_left_style)#Budget Rolling
self.xls_write_row(ws, None, data, parser, 2, row_hdr5b, tittle_bold_left_style)#Budget Rolling
self.xls_write_row(ws, None, data, parser, 3, row_hdr5c, tittle_bold_left_style)#Budget Rolling
self.xls_write_row(ws, None, data, parser, 4, row_hdr5d, tittle_bold_left_style)#Budget Rolling
self.xls_write_row(ws, None, data, parser, 5, row_hdr5e, tittle_bold_left_style)#Budget Rolling
#self.xls_write_row(ws, None, data, parser, 6, row_hdr6, tittle_left_italic_style)#As of
self.xls_write_row(ws, None, data, parser, 6, row_hdr7, tittle_style2)#Space
self.xls_write_row(ws, None, data, parser, 7, row_hdr8, tittle_bold_center_style)
ws.write(8, 0, 'ACCOUNT', tittle_bold_center_style)
ws.write(8, 1, 'DESCRIPTION', tittle_bold_center_style)
#ws.write(8, 2, '', tittle_bold_center_style)
#ws.write(8, 3, '', tittle_bold_center_style)
#ws.write(8, 4, '', tittle_bold_center_style)
ws.write(8, 5, 'Month', tittle_bold_center_style3)
#ws.write(8, 6, '', tittle_bold_center_style3)
ws.write(8, 7, 'Ytd', tittle_bold_center_style3)
#ws.write(8, 8, '', tittle_bold_center_style)
ws.write(8, 9, 'Month', tittle_bold_center_style3)
#ws.write(8, 10, '', tittle_bold_center_style3)
ws.write(8, 11, 'Ytd', tittle_bold_center_style3)
#ws.write(8, 12, '', tittle_bold_center_style)
ws.write(8, 13, 'Monthly', tittle_bold_right)
#ws.write(8, 14, '', tittle_bold_center_style3)
#ws.write(8, 15, '', tittle_bold_center_style3)
#ws.write(8, 16, '', tittle_bold_center_style3)
ws.write(8, 17, 'Ytd', tittle_bold_left)
#ws.write(8, 18, '', tittle_bold_center_style)
#ws.write(8, 19, 'Monthly', tittle_bold_right)
#ws.write(8, 20, '', tittle_bold_center_style3)
#ws.write(8, 21, '', tittle_bold_center_style3)
#ws.write(8, 22, '', tittle_bold_center_style3)
#ws.write(8, 23, 'Ytd', tittle_bold_left)
#ws.write(8, 24, '', tittle_bold_center_style)
# ws.write(8, 19, '', tittle_bold_center_style)
#ws.write(8, 26, '', tittle_bold_center_style3)
# ws.write(8, 21, '', tittle_bold_center_style)
for x in [2,3,4,8,12,18,19,20,21]:
ws.write(8, x, '', tittle_bold_center_style)
for x in [6,10,14,15,16]:
ws.write(8, x, '', tittle_bold_center_style3)
# 9
#ws.write(9, 0, '', tittle_bold_center_style)
#ws.write(9, 1, '', tittle_bold_center_style)
#ws.write(9, 2, '', tittle_bold_center_style)
ws.write(9, 3, 'Amt', tittle_bold_center_style2)
#ws.write(9, 4, '', tittle_bold_center_style)
ws.write(9, 5, 'Amt', tittle_bold_center_style2)
#ws.write(9, 6, '', tittle_bold_center_style)
ws.write(9, 7, 'Amt', tittle_bold_center_style2)
#ws.write(9, 8, '', tittle_bold_center_style)
ws.write(9, 9, 'Amt', tittle_bold_center_style2)
#ws.write(9, 10, '', tittle_bold_center_style)
ws.write(9, 11, 'Amt', tittle_bold_center_style2)
#ws.write(9, 12, '', tittle_bold_center_style)
ws.write(9, 13, 'Amt', tittle_bold_center_style2)
ws.write(9, 14, '%', tittle_bold_center_style2)
#ws.write(9, 15, '', tittle_bold_center_style)
ws.write(9, 16, 'Amt', tittle_bold_center_style2)
ws.write(9, 17, '%', tittle_bold_center_style2)
#ws.write(9, 18, '', tittle_bold_center_style)
#ws.write(9, 19, 'Amt', tittle_bold_center_style2)
#ws.write(9, 20, '%', tittle_bold_center_style2)
#ws.write(9, 21, '', tittle_bold_center_style)
#ws.write(9, 22, 'Amt', tittle_bold_center_style2)
#ws.write(9, 23, '%', tittle_bold_center_style2)
#ws.write(9, 24, '', tittle_bold_center_style)
ws.write(9, 19, 'Amt', tittle_bold_center_style2)
#ws.write(9, 26, '', tittle_bold_center_style)
ws.write(9, 21, '% Remain', tittle_bold_center_style2)
for x in [0,1,2,4,6,8,10,12,15,18,20]:
ws.write(9, x, '', tittle_bold_center_style)
# for x in [3,9,11,13,14,16,17,19,20,22,23,25,27]:
# ws.write(9, x, '', tittle_bold_center_style2)
#self.xls_write_row(ws, None, data, parser, 6, row_hdr9, tittle_bold_center_style)
row_count = 10
ws.horz_split_pos = row_count
if len(parser.get_dept(data)) > 0:
for dep in parser.get_dept(data):
self.xls_write_row(ws, dep, data, parser, row_count, row_hdr11, tittle_bold_left_style2)
row_count += 1
##################CAPEX######################
capexD = []
for i in parser.get_data(data):
if i['type_budget'] == 'capex':
capexD.append(i['id'])
if i['type'] == 'view':
style = row_bold_style
else:
style = row_normal_style
if data['form']['without_zero']:
if i['balance']:
# if i['type'] == 'view':
# if row_loop_dep[14][2][0] == 'PreVarMonthDEP' or row_loop_dep[17][2][0] == 'PreVarYearDEP' or row_loop_dep[21][2][0] == 'PreTotBudgetDEP':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, style)
row_count += 1
else:
# if row_loop_test[14][2][0] == 'PreVarMonthDEP' or row_loop_test[17][2][0] == 'PreVarYearDEP' or row_loop_test[21][2][0] == 'PreTotBudgetDEP':
# if i['type'] == 'view':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, style)
row_count += 1
if capexD:
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
# if row_total_expense_dep[12][2][0] == 'PreVarMonthTotDEP' or row_total_expense_dep[15][2][0] == 'PreVarYearTotDEP' or row_total_expense_dep[19][2][0] == 'PreTotBudgetTotDEP':
# self.xls_write_row(ws, expsD, data, parser, row_count, row_total_expense_dep, row_bold_style_pre)
# else:
self.xls_write_row(ws, capexD, data, parser, row_count, row_total_capex_dep, row_bold_style_total)
#self.xls_write_row(ws, expsD, data, parser, row_count, row_total_expense_dep, row_bold_style_total)
row_count += 1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
cogsD = []
for i in parser.get_data(data):
if i['type_budget'] == 'cogs':
cogsD.append(i['id'])
if i['type'] == 'view':
style = row_bold_style
else:
style = row_normal_style
if data['form']['without_zero']:
if i['balance']:
# if i['type'] == 'view':
# if row_loop_dep[14][2][0] == 'PreVarMonthDEP' or row_loop_dep[17][2][0] == 'PreVarYearDEP' or row_loop_dep[21][2][0] == 'PreTotBudgetDEP':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, style)
row_count += 1
else:
# if i['type'] == 'view':
# if row_loop_dep[14][2][0] == 'PreVarMonthDEP' or row_loop_dep[17][2][0] == 'PreVarYearDEP' or row_loop_dep[21][2][0] == 'PreTotBudgetDEP':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, style)
row_count += 1
if cogsD:
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
# if row_total_cogs_dep[12][2][0] == 'PreVarMonthTotDEPCOGS' or row_total_cogs_dep[15][2][0] == 'PreVarYearTotDEPCOGS' or row_total_cogs_dep[19][2][0] == 'PreTotBudgetTotDEPCOGS':
# self.xls_write_row(ws, cogsD, data, parser, row_count, row_total_cogs_dep, row_bold_style_pre)
# else:
self.xls_write_row(ws, cogsD, data, parser, row_count, row_total_cogs_dep, row_bold_style_total)
#self.xls_write_row(ws, cogsD, data, parser, row_count, row_total_cogs_dep, row_bold_style_total)
row_count += 1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
expsD = []
for i in parser.get_data(data):
if i['type_budget'] == 'expense':
expsD.append(i['id'])
if i['type'] == 'view':
style = row_bold_style
else:
style = row_normal_style
if data['form']['without_zero']:
if i['balance']:
# if i['type'] == 'view':
# if row_loop_dep[14][2][0] == 'PreVarMonthDEP' or row_loop_dep[17][2][0] == 'PreVarYearDEP' or row_loop_dep[21][2][0] == 'PreTotBudgetDEP':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, style)
row_count += 1
else:
# if row_loop_test[14][2][0] == 'PreVarMonthDEP' or row_loop_test[17][2][0] == 'PreVarYearDEP' or row_loop_test[21][2][0] == 'PreTotBudgetDEP':
# if i['type'] == 'view':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_dep, style)
row_count += 1
if expsD:
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
# if row_total_expense_dep[12][2][0] == 'PreVarMonthTotDEP' or row_total_expense_dep[15][2][0] == 'PreVarYearTotDEP' or row_total_expense_dep[19][2][0] == 'PreTotBudgetTotDEP':
# self.xls_write_row(ws, expsD, data, parser, row_count, row_total_expense_dep, row_bold_style_pre)
# else:
self.xls_write_row(ws, expsD, data, parser, row_count, row_total_expense_dep, row_bold_style_total)
#self.xls_write_row(ws, expsD, data, parser, row_count, row_total_expense_dep, row_bold_style_total)
row_count += 1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
else:
cogs = []
for i in parser.get_data(data):
if i['type_budget'] == 'cogs':
cogs.append(i['id'])
if i['type'] == 'view':
style = row_bold_style
else:
style = row_normal_style
if data['form']['without_zero']:
if i['balance']:
# if row_loop_test[14][2][0] == 'PreVarMonth' or row_loop_test[17][2][0] == 'PreVarYear' or row_loop_test[21][2][0] == 'PreTotBudget':
# if i['type'] == 'view':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, style)
row_count += 1
else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, style)
row_count += 1
if cogs:
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
# if row_total_cogs[12][2][0] == 'PreVarMonthTotCOGS' or row_total_cogs[15][2][0] == 'PreVarYearTotCOGS' or row_total_cogs[19][2][0] == 'PreTotBudgetTotCOGS':
# self.xls_write_row(ws, cogs, data, parser, row_count, row_total_cogs, row_bold_style_pre)
# else:
self.xls_write_row(ws, cogs, data, parser, row_count, row_total_cogs, row_bold_style_total)
row_count += 1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
exps = []
for i in parser.get_data(data):
if i['type_budget'] == 'expense':
exps.append(i['id'])
if i['type'] == 'view':
style = row_bold_style
else:
style = row_normal_style
if data['form']['without_zero']:
if i['balance']:
# if row_loop_test[14][2][0] == 'PreVarMonth' or row_loop_test[17][2][0] == 'PreVarYear' or row_loop_test[21][2][0] == 'PreTotBudget':
# if i['type'] == 'view':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, style)
row_count += 1
else:#row_loop_test[17][2]['PreVarYear'],row_loop_test[21][2]['PreTotBudget']
# if row_loop_test[14][2][0] == 'PreVarMonth' or row_loop_test[17][2][0] == 'PreVarYear' or row_loop_test[21][2][0] == 'PreTotBudget':
# if i['type'] == 'view':
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, row_bold_style_pre)
# else:
# self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, row_normal_style_pre)
# else:
self.xls_write_row(ws, i, data, parser, row_count, row_loop_test, style)
row_count += 1
if exps:
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
# print "xxxxxxxxxxxxxxx12",row_total_expense[12][2]
# print "xxxxxxxxxxxxxxx13",row_total_expense[13][2]
# print "xxxxxxxxxxxxxxx14",row_total_expense[14][2]
# print "xxxxxxxxxxxxxxx15",row_total_expense[15][2]
# print "xxxxxxxxxxxxxxx16",row_total_expense[16][2]
# print "xxxxxxxxxxxxxxx17",row_total_expense[17][2]
# print "xxxxxxxxxxxxxxx18",row_total_expense[18][2]
# print "xxxxxxxxxxxxxxx19",row_total_expense[19][2]
# print "xxxxxxxxxxxxxxx20",row_total_expense[20][2]
# print "xxxxxxxxxxxxxxx21",row_total_expense[21][2]
# if row_total_expense[12][2][0] == 'PreVarMonthTot':# or row_total_expense[15][2][0] == 'PreVarYearTot' or row_total_expense[19][2][0] == 'PreTotBudgetTot':
# self.xls_write_row(ws, exps, data, parser, row_count, row_total_expense, row_bold_style_pre)
# else:
self.xls_write_row(ws, exps, data, parser, row_count, row_total_expense, row_bold_style_total)
row_count += 1
self.xls_write_row(ws, None, data, parser, row_count, row_hdr3, tittle_style2)
row_count += 1
budget_utilization_xls(
'report.budgets.report.xls',
'ad_budget.item',
'addons/ad_budget_report/report/print_budgets_report.mako',
parser=report_budgets,
header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
993,708 | f2d390799ca8527a6ff6f4e32e37db36f6e530ed | from kombu import Exchange, Queue
task_exchange = Exchange('tasks', type='direct')
print("task_exchange: ", task_exchange)
task_queues = [Queue('hipri', task_exchange, routing_key='hipri'),
Queue('midpri', task_exchange, routing_key='midpri'),
Queue('lopri', task_exchange, routing_key='lopri')]
print("task_queues: ", task_queues) |
993,709 | 2b5b691244afb2572641a4bd705a4a719823ce73 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '..')))
sys.path.append(os.path.abspath(os.path.join(__dir__, '..', '..', 'PaddleSlim')))
from ppocr.data import build_dataloader
from ppocr.modeling.architectures import build_model
from ppocr.postprocess import build_post_process
from ppocr.losses import build_loss
from ppocr.optimizer import build_optimizer
from ppocr.metrics import build_metric
from ppocr.utils.save_load import init_model
from ppocr.utils.utility import print_dict
import tools.program as program
import paddle
from paddle.jit import to_static
import numpy as np
from paddleslim.dygraph import FPGMFilterPruner
from paddleslim.analysis import dygraph_flops as flops
from paddleslim.analysis import model_size
from tqdm import tqdm
def get_size(file_path):
""" Get size of file or directory.
Args:
file_path(str): Path of file or directory.
Returns:
size(int): Size of file or directory in bits.
"""
size = 0
if os.path.isdir(file_path):
for root, dirs, files in os.walk(file_path):
for f in files:
size += os.path.getsize(os.path.join(root, f))
elif os.path.isfile(file_path):
size = (os.path.getsize(file_path))
return size
def main():
global_config = config['Global']
# build dataloader
train_dataloader = build_dataloader(config, 'Train', device, logger)
if len(train_dataloader) == 0:
logger.error(
"No Images in train dataset, please ensure\n" +
"\t1. The images num in the train label_file_list should be larger than or equal with batch size.\n"
+
"\t2. The annotation file and path in the configuration file are provided normally."
)
return
valid_dataloader = build_dataloader(config, 'Eval', device, logger)
if len(valid_dataloader) == 0:
logger.error(
"No Images in eval dataset, please ensure\n" +
"\t1. The images num in the train label_file_list should be larger than or equal with batch size.\n"
+
"\t2. The annotation file and path in the configuration file are provided normally."
)
return
# build post process
post_process_class = build_post_process(config['PostProcess'],
global_config)
# build metric
eval_class = build_metric(config['Metric'])
# build model
# for rec algorithm
if hasattr(post_process_class, 'character'):
config['Architecture']["Head"]['out_channels'] = len(
getattr(post_process_class, 'character'))
model = build_model(config['Architecture'])
shape = config['Train']['dataset']['transforms'][3]['RecResizeImg']['image_shape']
use_srn = config['Architecture']['algorithm'] == "SRN"
if (not global_config.get('checkpoints')) and (not global_config.get('pretrained_model')):
logger.error(
"No checkpoints or pretrained_model found.\n"
)
return
# build loss
loss_class = build_loss(config['Loss'])
# build optim
optimizer, lr_scheduler = build_optimizer(
config['Optimizer'],
epochs=config['Global']['epoch_num'],
step_each_epoch=len(train_dataloader),
parameters=model.parameters())
# build metric
eval_class = build_metric(config['Metric'])
# load pretrain model
checkpoints = global_config.get('checkpoints')
config['Global']['checkpoints'] = None
best_model_dict = init_model(config, model, logger)
config['Global']['checkpoints'] = checkpoints
logger.info("Model before pruning: ")
summary_dict = paddle.summary(model, (1, shape[0], shape[1], shape[2]))
baseline_metric = {}
baseline_metric['acc'] = best_model_dict.get('acc')
baseline_metric['flops'] = flops(model, [1, shape[0], shape[1], shape[2]])
baseline_metric['params'] = summary_dict['total_params']
logger.info('baseline metric:')
for k, v in baseline_metric.items():
logger.info('{}:{}'.format(k, v))
# pruner
pruner = FPGMFilterPruner(model, [1, shape[0], shape[1], shape[2]])
FILTER_DIM = [0]
# condition
condition = "flops"
assert condition in ["acc", "flops", "params"]
target = 0.2
while(True):
# random pruning startegy
max_rate = 0.95
min_rate = 0
ratios = {}
skip_vars = ['conv_last_weights'] # for mobilenetv3
# skip_vars = ['res5b_branch2b_weights','res5b_branch2a_weights','res5a_branch2b_weights','res5a_branch1_weights']
# skip_vars = ['res5a_branch2b_weights','res5a_branch1_weights'] # for resnet
pruner.skip_vars = skip_vars
for group in pruner.var_group.groups:
var_name = group[0][0]
if var_name in skip_vars:
continue
ratios[var_name] = float(np.random.rand(1) * max_rate + min_rate)
plan = pruner.prune_vars(ratios, FILTER_DIM)
logger.info("Model after pruning: ")
summary_dict = paddle.summary(model, (1, shape[0], shape[1], shape[2]))
# Adaptive-BN
model.train()
max_iter = int(train_dataloader.batch_sampler.total_size / 30 / train_dataloader.batch_sampler.batch_size)
with paddle.no_grad():
pbar = tqdm(total=max_iter, desc='adaptiveBN model:')
for idx, batch in enumerate(train_dataloader):
if idx > max_iter:
break
model.forward(batch[0])
pbar.update(1)
pbar.close()
# Eval
eval_metric = program.eval(model, valid_dataloader, post_process_class,
eval_class, use_srn)
pruned_metric = {}
pruned_metric['acc'] = eval_metric['acc']
pruned_metric['flops'] = flops(model, [1, shape[0], shape[1], shape[2]])
pruned_metric['params'] = summary_dict['total_params']
logger.info('pruned metric:')
for k, v in pruned_metric.items():
logger.info('{}:{}'.format(k, v))
ratio = (baseline_metric[condition] - pruned_metric[condition]) / baseline_metric[condition]
logger.info('ratio:{}'.format(ratio))
if ratio > target: # For acc
logger.info('Save model')
break
else:
logger.info('Restore model')
plan.restore(model)
# Finetune
if global_config.get('checkpoints'):
checkpoints_model_dict = init_model(config, model, logger, optimizer)
if len(checkpoints_model_dict):
logger.info('metric in ckpt ***************')
for k, v in checkpoints_model_dict.items():
logger.info('{}:{}'.format(k, v))
else:
checkpoints_model_dict = {}
logger.info("Model after pruning: ")
summary_dict = paddle.summary(model, (1, shape[0], shape[1], shape[2]))
program.train(config, train_dataloader, valid_dataloader, device, model,
loss_class, optimizer, lr_scheduler, post_process_class,
eval_class, checkpoints_model_dict, logger, vdl_writer)
# Eval after finetune
metric = program.eval(model, valid_dataloader, post_process_class,
eval_class, use_srn)
logger.info('metric finetune ***************')
for k, v in metric.items():
logger.info('{}:{}'.format(k, v))
logger.info("Model after finetune: ")
summary_dict = paddle.summary(model, (1, shape[0], shape[1], shape[2]))
# Save
model.eval()
save_path = '{}/prune'.format(config['Global']['save_model_dir'])
infer_shape = [3, 32, -1] # for rec model, H must be 32
model = to_static(
model,
input_spec=[
paddle.static.InputSpec(
shape=[None] + infer_shape, dtype='float32')
])
paddle.jit.save(model, save_path)
logger.info('pruned model is saved to {}'.format(save_path))
# Calculate model size
model_size = get_size(os.path.join(save_path + '.pdiparams')) + get_size(os.path.join(save_path + '.pdmodel'))
logger.info('pruned model size is {}MB'.format(model_size/1000/1000))
if __name__ == '__main__':
config, device, logger, vdl_writer = program.preprocess()
main()
|
993,710 | 5457e494dd50b9173eb2ef7cc6f593ef5ea7ab0e | from ply.lex import lex
class QLSLexer:
def __init__(self):
self.__errors = []
self.__lexer = None
@property
def lexer(self):
return self.__lexer
@property
def errors(self):
return self.__errors
def build(self):
self.__lexer = lex(module=self)
def tokenize(self, data):
self.__errors = []
self.lexer.input(data)
token = self.lexer.token()
tokens = [token]
while token:
token = self.lexer.token()
tokens.append(token)
return tokens
tokens = [
'COLON', 'COMMA',
'LEFT_BRACE', 'RIGHT_BRACE',
'LEFT_BRACKET', 'RIGHT_BRACKET',
'IDENTIFIER',
'HEX_COLOR',
'INTEGER_LITERAL', 'STRING_LITERAL',
]
# List of reserved keywords
reserved = {
'stylesheet': 'STYLESHEET',
'page': 'PAGE',
'section': 'SECTION',
'question': 'QUESTION',
'default': 'DEFAULT',
# Properties
'height': 'HEIGHT',
'width': 'WIDTH',
'font': 'FONT',
'fontsize': 'FONT_SIZE',
'color': 'COLOR',
'widget': 'WIDGET',
# QL types
'boolean': 'BOOLEAN',
'date': 'DATE',
'decimal': 'DECIMAL',
'integer': 'INTEGER',
'money': 'MONEY',
'string': 'STRING',
# Widgets
'calendar': 'CALENDAR',
'checkbox': 'CHECKBOX',
'line_edit': 'LINE_EDIT',
'spinbox': 'SPINBOX',
'radio': 'RADIO',
}
tokens += list(reserved.values())
# Regular expression rules for simple tokens
t_ignore = ' \t'
t_COLON = r':'
t_COMMA = r','
t_LEFT_BRACE = r'\{'
t_RIGHT_BRACE = r'\}'
t_LEFT_BRACKET = r'\('
t_RIGHT_BRACKET = r'\)'
t_HEX_COLOR = r'\#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})'
@staticmethod
def t_newline(token):
r'\n+'
token.lexer.lineno += len(token.value)
@staticmethod
def t_eof(token):
token.lexer.lineno = 1
@staticmethod
def t_INTEGER_LITERAL(token):
r'\d+'
token.value = int(token.value)
return token
@staticmethod
def t_STRING_LITERAL(token):
r'\"(.+?)\"'
token.value = token.value[1:-1]
return token
def t_IDENTIFIER(self, token):
r'[a-zA-Z][a-zA-Z_0-9]*'
token.type = self.reserved.get(token.value, 'IDENTIFIER') # Check for reserved words
return token
def t_error(self, token):
self.errors.append("Illegal character '%s'" % token.value[0])
token.lexer.skip(1)
|
993,711 | 638657c6b24bb39c1d434b1f22d9fbe288a6f84c | #coding=utf-8
"""
Simple iOS tests, showing accessing elements and getting/setting text from them.
"""
import unittest
import os
from random import randint
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from time import sleep
class testLogin(unittest.TestCase):
def setUp(self):
# set up appium
app = os.path.abspath('../../apps/TestYanXuan/build/debug-iphonesimulator/NeteaseYanxuan.app')
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'app': app,
'platformName': 'iOS',
'platformVersion': '9.3',
'deviceName': 'iPhone 6',
'autoAcceptAlerts':'true'
})
def testLogin(self):
#关闭新人礼弹窗
#sleep(30)
#try:
#newUsr=self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIAStaticText[1]')
#self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIAButton[3]').click
#print('关闭弹窗')
#except Exception as e:
#pass
#else:
#pass
#点击个人
els=self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIATabBar[1]/UIAScrollView[1]/UIAImage[5]')
action=TouchAction(self.driver)
action.tap(els).perform()
#点击账号输入框
elsUsrname=self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIATextField[1]')
elsUsrname.send_keys('yanxuantest1999@163.com')
#点击密码框,输入密码
elsPassword=self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIASecureTextField[1]')
elsPassword.send_keys('abc123')
self.driver.hide_keyboard()
#点击登录
self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIAButton[1]').click
sleep(30)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(testLogin)
unittest.TextTestRunner(verbosity=2).run(suite)
|
993,712 | ee13f4348f1a88eaf6a97bfda53bade5ffeba5b8 | from coapthon.client.helperclient import HelperClient
from coapthon import defines
from scale_client.networks.util import DEFAULT_COAP_PORT, msg_fits_one_coap_packet
import logging
log = logging.getLogger(__name__)
class CoapClient(HelperClient):
"""
This helper class performs CoAP requests in a simplified way.
It patches the coapthon HelperClient that has a bug in which
the send_request call creates a thread that will never go away
even after the response is received and handled. This results
in several outstanding requests getting in each others' way and
responses going to the wrong callback as well as the client not
quitting properly since only one of the threads properly exits
and the others are left waiting on the Queue.
We also patch this class to allow sending NON-confirmable messages.
"""
def __init__(self, server_hostname, server_port=DEFAULT_COAP_PORT, sock=None, src_port=None,
cb_ignore_read_exception=None, cb_ignore_write_exception=None,
confirmable_messages=True):
# TODO: make some @properties to keep these variables in sync with self.server
self.server_hostname = server_hostname
self.server_port = server_port
# convert args for multiple versions of HelperClient
coapthon_server_arg = (self.server_hostname, self.server_port)
# Newer version accepts callbacks too
try:
super(CoapClient, self).__init__(coapthon_server_arg, sock=sock, cb_ignore_read_exception=cb_ignore_read_exception,
cb_ignore_write_exception=cb_ignore_write_exception)
except TypeError:
super(CoapClient, self).__init__(coapthon_server_arg, sock=sock)
assert cb_ignore_read_exception is None and cb_ignore_write_exception is None, "this coapthon version doesn't support callbacks in client constructor!"
self.confirmable_messages = confirmable_messages
# XXX: to request a specific source port, we can do this:
if src_port is not None:
self.protocol._socket.bind(('', src_port))
##### XXX: to allow sending non-confirmable messages, we hack/patch HelperClient
def mk_request(self, method, path):
request = super(CoapClient, self).mk_request(method, path)
if not self.confirmable_messages:
request.type = defines.Types["NON"]
return request
def put(self, path, payload, callback=None, timeout=None):
if not msg_fits_one_coap_packet(payload):
log.error("requested payload size of %d is too large to fit in a single CoAP packet!"
" Sending anyway but expect errors from the receiver...")
super(CoapClient, self).put(path, payload=payload, callback=callback, timeout=timeout) |
993,713 | d282dabffe40c75395e08298d6930f7beb17a62d | """
Training module using TF Boosted Trees Classifier to predict if Customer will make specific
transaction in the future or not!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import pandas as pd
import argparse
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--tf-data-dir',
type=str,
default='/tmp/data/',
help='GCS path or local path of training data.')
parser.add_argument('--tf-model-dir',
type=str,
help='GCS path or local directory.')
parser.add_argument('--tf-export-dir',
type=str,
default='export/',
help='GCS path or local directory to export model')
parser.add_argument('--tf-learning-rate',
type=float,
default=0.01,
help='Learning rate for training.')
parser.add_argument('--n-trees',
type=int,
default=100,
help='Number of trees.')
parser.add_argument('--max-depth',
type=int,
default=6,
help='Maximum depth for the trees.')
parser.add_argument('--train-start',
type=int,
default=0,
help='Start index of train examples within the data.')
parser.add_argument('--train-count',
type=int,
default=200,
help='Number of train examples within the data.')
parser.add_argument('--eval-start',
type=int,
default=200,
help='Start index of eval examples within the data.')
parser.add_argument('--eval-count',
type=int,
default=200,
help='Number of eval examples within the data.')
args = parser.parse_args()
return args
def read_ct_data(train_start, train_count, eval_start, eval_count):
"""
Read a Santander training data
:param train_start: Start index for the training set
:param train_count: Number of instances to be used for training
:param eval_start: Start index for the eval set
:param eval_count: Number of instances to be used for evaluation
:return: The required division between train and test set
"""
data = pd.read_csv('/opt/train.csv')
# Dropping the id column
data.drop(['ID_code'], axis=1, inplace=True)
data = data.values
return (data[train_start:train_start + train_count],
data[eval_start:eval_start + eval_count])
def make_inputs_from_np_arrays(features_np, label_np):
"""Makes and returns input_fn and feature_columns from numpy arrays.
The generated input_fn will return tf.data.Dataset of feature dictionary and a
label, and feature_columns will consist of the list of
tf.feature_column.BucketizedColumn.
Note, for in-memory training, tf.data.Dataset should contain the whole data
as a single tensor. Don't use batch.
Args:
features_np: A numpy ndarray (shape=[batch_size, num_features]) for
float32 features.
label_np: A numpy ndarray (shape=[batch_size, 1]) for labels.
Returns:
input_fn: A function returning a Dataset of feature dict and label.
feature_names: A list of feature names.
feature_column: A list of tf.feature_column.BucketizedColumn.
"""
num_features = features_np.shape[1]
features_np_list = np.split(features_np, num_features, axis=1)
# 1-based feature names.
feature_names = ["feature_%02d" % (i + 1) for i in range(num_features)]
# Create source feature_columns and bucketized_columns.
def get_bucket_boundaries(feature):
"""Returns bucket boundaries for feature by percentiles."""
return np.unique(np.percentile(feature, range(0, 100))).tolist()
source_columns = [
tf.feature_column.numeric_column(
feature_name, dtype=tf.float32,
default_value=0.0)
for feature_name in feature_names
]
bucketized_columns = [
tf.feature_column.bucketized_column(
source_columns[i],
boundaries=get_bucket_boundaries(features_np_list[i]))
for i in range(num_features)
]
# Make an input_fn that extracts source features.
def input_fn():
"""Returns features as a dictionary of numpy arrays, and a label."""
features = {
feature_name: tf.constant(features_np_list[i])
for i, feature_name in enumerate(feature_names)
}
return tf.data.Dataset.zip((tf.data.Dataset.from_tensors(features),
tf.data.Dataset.from_tensors(label_np),))
return input_fn, feature_names, bucketized_columns
def make_eval_inputs_from_np_arrays(features_np, label_np):
"""Makes eval input as streaming batches."""
num_features = features_np.shape[1]
features_np_list = np.split(features_np, num_features, axis=1)
# 1-based feature names.
feature_names = ["feature_%02d" % (i + 1) for i in range(num_features)]
def input_fn():
features = {
feature_name: tf.constant(features_np_list[i])
for i, feature_name in enumerate(feature_names)
}
return tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(features),
tf.data.Dataset.from_tensor_slices(label_np),)).batch(1000)
return input_fn
def _make_csv_serving_input_receiver_fn(column_names, column_defaults):
"""Returns serving_input_receiver_fn for csv.
The input arguments are relevant to `tf.decode_csv()`.
Args:
column_names: a list of column names in the order within input csv.
column_defaults: a list of default values with the same size of
column_names. Each entity must be either a list of one scalar, or an
empty list to denote the corresponding column is required.
e.g. [[""], [2.5], []] indicates the third column is required while
the first column must be string and the second must be float/double.
Returns:
a serving_input_receiver_fn that handles csv for serving.
"""
def serving_input_receiver_fn():
csv = tf.placeholder(dtype=tf.string, shape=[None], name="csv")
features = dict(zip(column_names, tf.decode_csv(csv, column_defaults)))
receiver_tensors = {"inputs": csv}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
return serving_input_receiver_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
args = parse_arguments()
train_data, eval_data = read_ct_data(args.train_start, args.train_count, args.eval_start, args.eval_count)
train_input_fn, feature_names, feature_columns = make_inputs_from_np_arrays(
features_np=train_data[:, 1:], label_np=train_data[:, 0:1])
eval_input_fn = make_eval_inputs_from_np_arrays(
features_np=eval_data[:, 1:], label_np=eval_data[:, 0:1])
print("Training starting...")
classifier = tf.contrib.estimator.boosted_trees_classifier_train_in_memory(
train_input_fn,
feature_columns,
model_dir=args.tf_model_dir,
n_trees=args.n_trees,
max_depth=args.max_depth,
learning_rate=args.tf_learning_rate)
print("Training Finished Successfully...")
eval_results = classifier.evaluate(eval_input_fn)
print("Export saved model...")
export_dir = args.tf_export_dir
classifier.export_savedmodel(
export_dir,
_make_csv_serving_input_receiver_fn(
column_names=feature_names,
# columns are all floats.
column_defaults=[[0.0]] * len(feature_names)),
strip_default_attrs=True)
print("Done exporting the model...")
if __name__ == '__main__':
tf.app.run()
|
993,714 | 56f7087b1f9c7eadd4eda5b06d4cac7800d44685 | """
tests.py
Unit tests for toggl-cli.
Usage: python tests.py [CLASSNAME[.METHODNAME]]
NB: These tests add and modify entries in your toggl account. All of the
entries have the prefix given below, and they should all be removed after
the tests are complete.
"""
PREFIX = "unittest_"
import datetime
import unittest
import pytz
import time
import toggl
def desc(description):
return "%s%s" % (PREFIX, description)
#----------------------------------------------------------------------------
# TestClientList
#----------------------------------------------------------------------------
class TestClientList(unittest.TestCase):
def setUp(self):
self.list = toggl.ClientList()
def test_iterator(self):
num_clients = len(self.list.client_list)
count = 0
for client in self.list:
count += 1
self.assertEquals(count, num_clients)
#----------------------------------------------------------------------------
# TestDateAndTime
#----------------------------------------------------------------------------
class TestDateAndTime(unittest.TestCase):
def setUp(self):
self.dat = toggl.DateAndTime()
def test_duration_since_epoch(self):
# one hour into the epoch
dt = datetime.datetime(1970,1,1,1,0,0,0,tzinfo=pytz.UTC)
self.assertEquals( self.dat.duration_since_epoch(dt), 3600)
def test_duration_str_to_seconds(self):
# one element
self.assertEquals(self.dat.duration_str_to_seconds("1"), 1)
# two elements
self.assertEquals(self.dat.duration_str_to_seconds("1:1"), 61)
# three elements
self.assertEquals(self.dat.duration_str_to_seconds("1:1:1"), 3661)
#----------------------------------------------------------------------------
# TestProjectList
#----------------------------------------------------------------------------
class TestProjectList(unittest.TestCase):
def setUp(self):
self.list = toggl.ProjectList()
def test_iterator(self):
num_projects = len(self.list.project_list)
count = 0
for client in self.list:
count += 1
self.assertEquals(count, num_projects)
def test_find_by_id(self):
# invalid id's return None
self.assertIsNone( self.list.find_by_id(-1) )
# otherwise, we get a project object back
id = self.list.project_list[0]['id']
self.assertEquals( self.list.find_by_id(id)['id'], id )
def test_find_by_name(self):
# invalid names return None
self.assertIsNone( self.list.find_by_name('XYZ') )
# grab first three characters of the first project name
prefix = self.list.project_list[0]['name'][0:3]
self.assertEquals( self.list.find_by_name(prefix)['name'][0:3], prefix )
#----------------------------------------------------------------------------
# TestTimeEntry
#----------------------------------------------------------------------------
class TestTimeEntry(unittest.TestCase):
def setUp(self):
self.entry = toggl.TimeEntry()
# force timezone to be UTC
toggl.DateAndTime.tz = pytz.UTC
def find_time_entry(self, description):
return toggl.TimeEntryList().reload().find_by_description(description)
def mock_time_time(self):
"""Mock time.time()"""
return 10
def test_add(self):
# time entry has no data, raises an exception
self.assertRaises(Exception, self.entry.add)
# create basic entry and add it
start_time = toggl.DateAndTime().now()
self.entry = toggl.TimeEntry(description=desc('add'),
start_time=start_time, duration=10)
self.entry.add()
# make sure it shows up in the list
entry = self.find_time_entry(desc('add'))
self.assertIsNotNone(entry)
self.assertEquals(entry.get('duration'), 10)
def test_continue_from_today(self):
# create a completed time today
now = datetime.datetime.utcnow().isoformat()
toggl.CLI()._add_time_entry([desc('continue2'), now, 'd1:0:0'])
# find it
entry = self.find_time_entry(desc('continue2'))
self.assertIsNotNone(entry)
# continue it
entry.continue_entry()
# find it again, this time, it should be running.
entry = self.find_time_entry(desc('continue2'))
self.assertTrue(int(entry.get('duration')) < 0)
def test_continue_from_yesterday(self):
# create a completed time yesterday
yesterday = (datetime.datetime.utcnow() - datetime.timedelta(days=1)).isoformat()
toggl.CLI()._add_time_entry([desc('continue'), yesterday, 'd1:0:0'])
# find it
entry = self.find_time_entry(desc('continue'))
self.assertIsNotNone(entry)
# continue it
entry.continue_entry()
# find the new one
entry2 = self.find_time_entry(desc('continue'))
self.assertNotEqual(entry.get('duration'), entry2.get('duration'))
def test_delete(self):
# start a time entry
self.entry = toggl.TimeEntry(description=desc('delete'))
self.entry.start()
# deleting an entry without an id is an error
self.assertRaises(Exception, self.entry.delete)
# make sure it shows up in the list, this also fetches the id
entry = self.find_time_entry(desc('delete'))
self.assertIsNotNone(entry)
# delete it
entry.delete()
# make sure it shows up in the list
entry = self.find_time_entry(desc('delete'))
self.assertIsNone(entry)
def test_get(self):
# test invalid property
self.assertIsNone( self.entry.get('foobar') )
# test valid property
self.assertIsNotNone( self.entry.get('created_with') )
def test_has(self):
# test nonexistant property
self.assertFalse( self.entry.has('foobar') )
# test existing, but None property
self.entry.set('foobar', None)
self.assertFalse( self.entry.has('foobar') )
# test existing, non-None property
self.entry.set('foobar', True)
self.assertTrue( self.entry.has('foobar') )
def test_normalized_duration(self):
# no duration set, raise an exception
self.assertRaises(Exception, self.entry.normalized_duration)
# positive duration
self.entry.set('duration', 1)
self.assertEquals( self.entry.normalized_duration(), 1 )
# negative duration. mock time.time() for this test only.
self.entry.set('duration', -1)
old_time = time.time
time.time = self.mock_time_time
self.assertEquals( self.entry.normalized_duration(), 9 )
time.time = old_time
def test_set(self):
# basic test
self.entry.set('foo', 'bar')
self.assertEquals( self.entry.data['foo'], 'bar' )
# remove value
self.entry.set('foo', None)
self.assertFalse('foo' in self.entry.data)
def test_start_simple(self):
# test with simpliest entry
self.entry = toggl.TimeEntry(description=desc('start'))
self.entry.start()
orig_start = self.entry.get('start')
# fetch the entry from toggl and compare with what we created
entry = self.find_time_entry(desc('start'))
self.assertIsNotNone(entry)
# round duration to nearest integer
#self.assertEqual(entry.get('start'), orig_start)
def test_start_complex(self):
# test with preset start time one hour ago UTC
one_hour_ago = pytz.UTC.localize(datetime.datetime.utcnow() - datetime.timedelta(hours=1))
self.entry = toggl.TimeEntry(description=desc('start2'),
start_time=one_hour_ago)
self.entry.start()
orig_duration = self.entry.get('duration')
# see what toggl has
entry = self.find_time_entry(desc('start2'))
self.assertIsNotNone(entry)
# toggl duration should be 1 hour
self.assertGreaterEqual(entry.normalized_duration(), 3600)
def test_stop_simple(self):
# empty time entry raises an exception
self.assertRaises(Exception, self.entry.stop)
# non-running entry raises an exception
self.entry.set('duration', 10)
self.assertRaises(Exception, self.entry.stop)
# missing an id raises an exception
self.entry.set('duration', -10)
self.assertRaises(Exception, self.entry.stop)
# start an entry now
self.entry = toggl.TimeEntry(description=desc('stop'))
self.entry.start()
# find it
entry = self.find_time_entry(desc('stop'))
self.assertIsNotNone(entry)
# stop it
entry.stop()
# find it again
entry = self.find_time_entry(desc('stop'))
# make sure duration is positive. we can't be more specific because
# we don't know the lag between us and toggl.
self.assertGreaterEqual(entry.get('duration'), 0)
def test_stop_complex(self):
# start an entry now
self.entry = toggl.TimeEntry(description=desc('stop2'))
self.entry.start()
# find it
entry = self.find_time_entry(desc('stop2'))
self.assertIsNotNone(entry)
# stop it an hour from now
one_hour_ahead = pytz.UTC.localize(datetime.datetime.utcnow() + datetime.timedelta(hours=1))
entry.stop(one_hour_ahead)
# find it again
entry = self.find_time_entry(desc('stop2'))
self.assertIsNotNone(entry)
# make sure duration is at least 1 hour (3600 seconds)
self.assertGreaterEqual(entry.get('duration'), 3600)
def test_validate(self):
# entry must have 'start', 'duration', and 'description' properties.
self.assertRaises(Exception, self.entry.validate)
self.entry.set('start', 'start')
self.assertRaises(Exception, self.entry.validate)
self.entry.set('duration', 'duration')
self.assertRaises(Exception, self.entry.validate)
self.entry.set('description', 'description')
self.assertTrue( self.entry.validate() )
#----------------------------------------------------------------------------
# TestTimeEntryList
#----------------------------------------------------------------------------
class TestTimeEntryList(unittest.TestCase):
def setUp(self):
self.list = toggl.TimeEntryList()
def test_find_by_description(self):
toggl.CLI()._start_time_entry([desc('find_by_description')])
self.list.reload()
# searching for something that doesn't exist returns none
self.assertIsNone( self.list.find_by_description('foobar') )
# otherwise, we get an entry with the matching description
entry1 = self.list.find_by_description(desc('find_by_description'))
self.assertEquals( entry1.get('description'), desc('find_by_description'))
# start another entry with the same description
toggl.CLI()._start_time_entry([desc('find_by_description')])
self.list.reload()
# searching should return the newer entry
entry2 = self.list.find_by_description(desc('find_by_description'))
#self.assertNotEquals( entry1.get('start'), entry2.get('start') )
def test_iterator(self):
num_entries = len(self.list.time_entries)
count = 0
for client in self.list:
count += 1
self.assertEquals(count, num_entries)
def test_now(self):
# test with no entries running
toggl.CLI()._stop_time_entry([])
self.list.reload()
self.assertIsNone( self.list.now() )
# test with running entry
toggl.CLI()._start_time_entry([desc('now')])
self.list.reload()
current = self.list.now()
self.assertIsNotNone(current)
self.assertEquals( current.get('description'), desc('now') )
current.stop()
def tearDownModule():
"""
Cleans up toggl with all the unittest entries we just created. This
relies on TimeEntryList and TimeEntry.delete.
"""
print "Removing toggl entries created by the test..."
for entry in toggl.TimeEntryList():
if entry.get('description') is not None and entry.get('description').startswith('unittest_'):
entry.delete()
if __name__ == '__main__':
toggl.CLI() # this initializes Logger to INFO
#toggl.Logger.level = toggl.Logger.DEBUG
toggl.Logger.level = toggl.Logger.NONE
unittest.main()
|
993,715 | 98a9c8deb9dc5e929055ecf9ecacfbde96cce1b5 | from rest_framework import serializers
from django.contrib.auth.models import User
class RegisterSerializer(serializers.ModelSerializer): #serializer for register model
class Meta:
model= User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only':True}} # to hide the password from return response
def validate(self, attrs): # validate the inputed attribute
email = attrs.get('email', '')
username = attrs.get('username', '')
return attrs
def create(self, validated_data, *args):
user = User.objects.create_user(**validated_data)
return user
|
993,716 | 15df1f144c3146cf63c792c2f107c700128b315b | #coding=utf-8
from b2.stop2 import StopWords
__ALL__ = [ "get_url_site"]
_url_protocls = StopWords(words = ["http://" , "https://"] )
def get_url_site(url):
"""得到链接站点 , 主要是第一个/切分
params
url 链接
return
value 提取站点失败后,返回None,否则返回站点字符串
return
False
Test:
>>> get_url_site("http://www.test.com/index.php")
>>> get_url_site("123.12.21.0:81/look")
"""
if url:
url = url.lower()
value , msg = _url_protocls.startswith(url)
url = url[value:] if value else url
return url.split("/")[0]
return None
def join_url(site , url ,protocl = "http://"):
"""站点进行补全操作
"""
url_site = get_url_site(url)
if url_site == "":
return "{protocl}{site}{url}".format(protocl = protocl ,site = site , url = url)
value , msg = _url_protocls.startswith(url)
if value:
return url
else:
return "{protocl}{url}".format(protocl = value , url = url)
return url
class TranslateLink(object):
def __init__(self , rules ):
pass
|
993,717 | d5f72ea9644fd225062b999506ddeb158ca16ffd | #!/usr/bin/python2.7
#from OpenSSL import SSL
#context = SSL.Context(SSL.SSLv23_METHOD)
#context.use_privatekey_file('server.key')
#context.use_certificate_file('server.crt')
from app import app
#app.run(debug = True,ssl_context = context)
app.run(debug = True)
|
993,718 | bc0325b21b418e5f4bd16cc531d9c0683600117f | a=int(input("Introduce el lado a: "))
b=int(input("Introduce el lado b: "))
c=int(input("Introduce el lado c: "))
if a!=b and a!=c and b!=c:
print("Es escaleno")
else:
print("No es escaleno")
|
993,719 | a76b17d7e48a3b997eba01b485e7ce3e91613dbc | # This is a demo of the main bci system. It will run the task defined here
# using the parameters file passed to it.
def main():
import bci_main
from bcipy.helpers.load import load_json_parameters
from bcipy.tasks.task_registry import TaskType
from bcipy.helpers.parameters import DEFAULT_PARAMETERS_PATH
# Load a parameters file
parameters = DEFAULT_PARAMETERS_PATH
# Task. Ex. `RSVP Calibration`
task = TaskType.by_value('RSVP Calibration')
# Experiment. Use the default registered experiment!
experiment = 'default'
# Define a user
user = 'bci_main_demo_user'
# Try and initialize with bci main
try:
bci_main.bci_main(parameters, user, task, experiment)
except Exception as e:
print("BCI MAIN Fail. Exiting. Error: \n")
print(e)
if __name__ == "__main__":
main()
|
993,720 | 916851a71d0aa43de5ea3229a1b1a2198944d451 | from django.db import models
PROJECT_IMAGES_PATH = 'media'
class Project(models.Model):
project_name = models.TextField(null=False,blank=False)
class Technology(models.Model):
technology_name = models.TextField(null=False,blank=False)
# class ProjectImages(models.Model):
# cover = models.ImageField("Image",upload_to=PROJECT_IMAGES_PATH,max_length=500, default=None)
# obj = models.ForeignKey(Project)
class ProjectImages(models.Model):
description = models.CharField(max_length=255, blank=True)
images = models.FileField(upload_to=PROJECT_IMAGES_PATH,blank=True)
project = models.ForeignKey(Project) |
993,721 | 20ce1f1be3e0d8fb16b279ba47a6957d08e286fb | import numpy as np
import matplotlib.pyplot as plt; plt.ion()
import netgraph
# Construct sparse, directed, weighted graph
# with positive and negative edges:
total_nodes = 20
weights = np.random.randn(total_nodes, total_nodes)
connection_probability = 0.2
is_connected = np.random.rand(total_nodes, total_nodes) <= connection_probability
graph = np.zeros((total_nodes, total_nodes))
graph[is_connected] = weights[is_connected]
# Make a standard plot:
netgraph.draw(graph, node_shape='^')
|
993,722 | 3fbcd3d12586a4d642d6f5f1b3be010e0cf820c6 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: RpcHeader.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='RpcHeader.proto',
package='hadoop.common',
syntax='proto2',
serialized_pb=_b('\n\x0fRpcHeader.proto\x12\rhadoop.common\"\xa2\x02\n\x15RpcRequestHeaderProto\x12,\n\x07rpcKind\x18\x01 \x01(\x0e\x32\x1b.hadoop.common.RpcKindProto\x12\x42\n\x05rpcOp\x18\x02 \x01(\x0e\x32\x33.hadoop.common.RpcRequestHeaderProto.OperationProto\x12\x0e\n\x06\x63\x61llId\x18\x03 \x02(\x11\x12\x10\n\x08\x63lientId\x18\x04 \x02(\x0c\x12\x16\n\nretryCount\x18\x05 \x01(\x11:\x02-1\"]\n\x0eOperationProto\x12\x14\n\x10RPC_FINAL_PACKET\x10\x00\x12\x1b\n\x17RPC_CONTINUATION_PACKET\x10\x01\x12\x18\n\x14RPC_CLOSE_CONNECTION\x10\x02\"\xca\x05\n\x16RpcResponseHeaderProto\x12\x0e\n\x06\x63\x61llId\x18\x01 \x02(\r\x12\x44\n\x06status\x18\x02 \x02(\x0e\x32\x34.hadoop.common.RpcResponseHeaderProto.RpcStatusProto\x12\x1b\n\x13serverIpcVersionNum\x18\x03 \x01(\r\x12\x1a\n\x12\x65xceptionClassName\x18\x04 \x01(\t\x12\x10\n\x08\x65rrorMsg\x18\x05 \x01(\t\x12L\n\x0b\x65rrorDetail\x18\x06 \x01(\x0e\x32\x37.hadoop.common.RpcResponseHeaderProto.RpcErrorCodeProto\x12\x10\n\x08\x63lientId\x18\x07 \x01(\x0c\x12\x16\n\nretryCount\x18\x08 \x01(\x11:\x02-1\"3\n\x0eRpcStatusProto\x12\x0b\n\x07SUCCESS\x10\x00\x12\t\n\x05\x45RROR\x10\x01\x12\t\n\x05\x46\x41TAL\x10\x02\"\xe1\x02\n\x11RpcErrorCodeProto\x12\x15\n\x11\x45RROR_APPLICATION\x10\x01\x12\x18\n\x14\x45RROR_NO_SUCH_METHOD\x10\x02\x12\x1a\n\x16\x45RROR_NO_SUCH_PROTOCOL\x10\x03\x12\x14\n\x10\x45RROR_RPC_SERVER\x10\x04\x12\x1e\n\x1a\x45RROR_SERIALIZING_RESPONSE\x10\x05\x12\x1e\n\x1a\x45RROR_RPC_VERSION_MISMATCH\x10\x06\x12\x11\n\rFATAL_UNKNOWN\x10\n\x12#\n\x1f\x46\x41TAL_UNSUPPORTED_SERIALIZATION\x10\x0b\x12\x1c\n\x18\x46\x41TAL_INVALID_RPC_HEADER\x10\x0c\x12\x1f\n\x1b\x46\x41TAL_DESERIALIZING_REQUEST\x10\r\x12\x1a\n\x16\x46\x41TAL_VERSION_MISMATCH\x10\x0e\x12\x16\n\x12\x46\x41TAL_UNAUTHORIZED\x10\x0f\"\xdd\x02\n\x0cRpcSaslProto\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x34\n\x05state\x18\x02 \x02(\x0e\x32%.hadoop.common.RpcSaslProto.SaslState\x12\r\n\x05token\x18\x03 \x01(\x0c\x12\x33\n\x05\x61uths\x18\x04 \x03(\x0b\x32$.hadoop.common.RpcSaslProto.SaslAuth\x1a\x64\n\x08SaslAuth\x12\x0e\n\x06method\x18\x01 \x02(\t\x12\x11\n\tmechanism\x18\x02 \x02(\t\x12\x10\n\x08protocol\x18\x03 \x01(\t\x12\x10\n\x08serverId\x18\x04 \x01(\t\x12\x11\n\tchallenge\x18\x05 \x01(\x0c\"\\\n\tSaslState\x12\x0b\n\x07SUCCESS\x10\x00\x12\r\n\tNEGOTIATE\x10\x01\x12\x0c\n\x08INITIATE\x10\x02\x12\r\n\tCHALLENGE\x10\x03\x12\x0c\n\x08RESPONSE\x10\x04\x12\x08\n\x04WRAP\x10\x05*J\n\x0cRpcKindProto\x12\x0f\n\x0bRPC_BUILTIN\x10\x00\x12\x10\n\x0cRPC_WRITABLE\x10\x01\x12\x17\n\x13RPC_PROTOCOL_BUFFER\x10\x02\x42\x34\n\x1eorg.apache.hadoop.ipc.protobufB\x0fRpcHeaderProtos\xa0\x01\x01')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RPCKINDPROTO = _descriptor.EnumDescriptor(
name='RpcKindProto',
full_name='hadoop.common.RpcKindProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RPC_BUILTIN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_WRITABLE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_PROTOCOL_BUFFER', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1396,
serialized_end=1470,
)
_sym_db.RegisterEnumDescriptor(_RPCKINDPROTO)
RpcKindProto = enum_type_wrapper.EnumTypeWrapper(_RPCKINDPROTO)
RPC_BUILTIN = 0
RPC_WRITABLE = 1
RPC_PROTOCOL_BUFFER = 2
_RPCREQUESTHEADERPROTO_OPERATIONPROTO = _descriptor.EnumDescriptor(
name='OperationProto',
full_name='hadoop.common.RpcRequestHeaderProto.OperationProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RPC_FINAL_PACKET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_CONTINUATION_PACKET', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RPC_CLOSE_CONNECTION', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=232,
serialized_end=325,
)
_sym_db.RegisterEnumDescriptor(_RPCREQUESTHEADERPROTO_OPERATIONPROTO)
_RPCRESPONSEHEADERPROTO_RPCSTATUSPROTO = _descriptor.EnumDescriptor(
name='RpcStatusProto',
full_name='hadoop.common.RpcResponseHeaderProto.RpcStatusProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=635,
serialized_end=686,
)
_sym_db.RegisterEnumDescriptor(_RPCRESPONSEHEADERPROTO_RPCSTATUSPROTO)
_RPCRESPONSEHEADERPROTO_RPCERRORCODEPROTO = _descriptor.EnumDescriptor(
name='RpcErrorCodeProto',
full_name='hadoop.common.RpcResponseHeaderProto.RpcErrorCodeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ERROR_APPLICATION', index=0, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_NO_SUCH_METHOD', index=1, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_NO_SUCH_PROTOCOL', index=2, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_RPC_SERVER', index=3, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_SERIALIZING_RESPONSE', index=4, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_RPC_VERSION_MISMATCH', index=5, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL_UNKNOWN', index=6, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL_UNSUPPORTED_SERIALIZATION', index=7, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL_INVALID_RPC_HEADER', index=8, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL_DESERIALIZING_REQUEST', index=9, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL_VERSION_MISMATCH', index=10, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FATAL_UNAUTHORIZED', index=11, number=15,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=689,
serialized_end=1042,
)
_sym_db.RegisterEnumDescriptor(_RPCRESPONSEHEADERPROTO_RPCERRORCODEPROTO)
_RPCSASLPROTO_SASLSTATE = _descriptor.EnumDescriptor(
name='SaslState',
full_name='hadoop.common.RpcSaslProto.SaslState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEGOTIATE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INITIATE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHALLENGE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESPONSE', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRAP', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1302,
serialized_end=1394,
)
_sym_db.RegisterEnumDescriptor(_RPCSASLPROTO_SASLSTATE)
_RPCREQUESTHEADERPROTO = _descriptor.Descriptor(
name='RpcRequestHeaderProto',
full_name='hadoop.common.RpcRequestHeaderProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='rpcKind', full_name='hadoop.common.RpcRequestHeaderProto.rpcKind', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rpcOp', full_name='hadoop.common.RpcRequestHeaderProto.rpcOp', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='callId', full_name='hadoop.common.RpcRequestHeaderProto.callId', index=2,
number=3, type=17, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='clientId', full_name='hadoop.common.RpcRequestHeaderProto.clientId', index=3,
number=4, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retryCount', full_name='hadoop.common.RpcRequestHeaderProto.retryCount', index=4,
number=5, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RPCREQUESTHEADERPROTO_OPERATIONPROTO,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=35,
serialized_end=325,
)
_RPCRESPONSEHEADERPROTO = _descriptor.Descriptor(
name='RpcResponseHeaderProto',
full_name='hadoop.common.RpcResponseHeaderProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='callId', full_name='hadoop.common.RpcResponseHeaderProto.callId', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='status', full_name='hadoop.common.RpcResponseHeaderProto.status', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serverIpcVersionNum', full_name='hadoop.common.RpcResponseHeaderProto.serverIpcVersionNum', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='exceptionClassName', full_name='hadoop.common.RpcResponseHeaderProto.exceptionClassName', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='errorMsg', full_name='hadoop.common.RpcResponseHeaderProto.errorMsg', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='errorDetail', full_name='hadoop.common.RpcResponseHeaderProto.errorDetail', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='clientId', full_name='hadoop.common.RpcResponseHeaderProto.clientId', index=6,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='retryCount', full_name='hadoop.common.RpcResponseHeaderProto.retryCount', index=7,
number=8, type=17, cpp_type=1, label=1,
has_default_value=True, default_value=-1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RPCRESPONSEHEADERPROTO_RPCSTATUSPROTO,
_RPCRESPONSEHEADERPROTO_RPCERRORCODEPROTO,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=328,
serialized_end=1042,
)
_RPCSASLPROTO_SASLAUTH = _descriptor.Descriptor(
name='SaslAuth',
full_name='hadoop.common.RpcSaslProto.SaslAuth',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='method', full_name='hadoop.common.RpcSaslProto.SaslAuth.method', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mechanism', full_name='hadoop.common.RpcSaslProto.SaslAuth.mechanism', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='protocol', full_name='hadoop.common.RpcSaslProto.SaslAuth.protocol', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='serverId', full_name='hadoop.common.RpcSaslProto.SaslAuth.serverId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='challenge', full_name='hadoop.common.RpcSaslProto.SaslAuth.challenge', index=4,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1200,
serialized_end=1300,
)
_RPCSASLPROTO = _descriptor.Descriptor(
name='RpcSaslProto',
full_name='hadoop.common.RpcSaslProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='hadoop.common.RpcSaslProto.version', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state', full_name='hadoop.common.RpcSaslProto.state', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='token', full_name='hadoop.common.RpcSaslProto.token', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='auths', full_name='hadoop.common.RpcSaslProto.auths', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RPCSASLPROTO_SASLAUTH, ],
enum_types=[
_RPCSASLPROTO_SASLSTATE,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1045,
serialized_end=1394,
)
_RPCREQUESTHEADERPROTO.fields_by_name['rpcKind'].enum_type = _RPCKINDPROTO
_RPCREQUESTHEADERPROTO.fields_by_name['rpcOp'].enum_type = _RPCREQUESTHEADERPROTO_OPERATIONPROTO
_RPCREQUESTHEADERPROTO_OPERATIONPROTO.containing_type = _RPCREQUESTHEADERPROTO
_RPCRESPONSEHEADERPROTO.fields_by_name['status'].enum_type = _RPCRESPONSEHEADERPROTO_RPCSTATUSPROTO
_RPCRESPONSEHEADERPROTO.fields_by_name['errorDetail'].enum_type = _RPCRESPONSEHEADERPROTO_RPCERRORCODEPROTO
_RPCRESPONSEHEADERPROTO_RPCSTATUSPROTO.containing_type = _RPCRESPONSEHEADERPROTO
_RPCRESPONSEHEADERPROTO_RPCERRORCODEPROTO.containing_type = _RPCRESPONSEHEADERPROTO
_RPCSASLPROTO_SASLAUTH.containing_type = _RPCSASLPROTO
_RPCSASLPROTO.fields_by_name['state'].enum_type = _RPCSASLPROTO_SASLSTATE
_RPCSASLPROTO.fields_by_name['auths'].message_type = _RPCSASLPROTO_SASLAUTH
_RPCSASLPROTO_SASLSTATE.containing_type = _RPCSASLPROTO
DESCRIPTOR.message_types_by_name['RpcRequestHeaderProto'] = _RPCREQUESTHEADERPROTO
DESCRIPTOR.message_types_by_name['RpcResponseHeaderProto'] = _RPCRESPONSEHEADERPROTO
DESCRIPTOR.message_types_by_name['RpcSaslProto'] = _RPCSASLPROTO
DESCRIPTOR.enum_types_by_name['RpcKindProto'] = _RPCKINDPROTO
RpcRequestHeaderProto = _reflection.GeneratedProtocolMessageType('RpcRequestHeaderProto', (_message.Message,), dict(
DESCRIPTOR = _RPCREQUESTHEADERPROTO,
__module__ = 'RpcHeader_pb2'
# @@protoc_insertion_point(class_scope:hadoop.common.RpcRequestHeaderProto)
))
_sym_db.RegisterMessage(RpcRequestHeaderProto)
RpcResponseHeaderProto = _reflection.GeneratedProtocolMessageType('RpcResponseHeaderProto', (_message.Message,), dict(
DESCRIPTOR = _RPCRESPONSEHEADERPROTO,
__module__ = 'RpcHeader_pb2'
# @@protoc_insertion_point(class_scope:hadoop.common.RpcResponseHeaderProto)
))
_sym_db.RegisterMessage(RpcResponseHeaderProto)
RpcSaslProto = _reflection.GeneratedProtocolMessageType('RpcSaslProto', (_message.Message,), dict(
SaslAuth = _reflection.GeneratedProtocolMessageType('SaslAuth', (_message.Message,), dict(
DESCRIPTOR = _RPCSASLPROTO_SASLAUTH,
__module__ = 'RpcHeader_pb2'
# @@protoc_insertion_point(class_scope:hadoop.common.RpcSaslProto.SaslAuth)
))
,
DESCRIPTOR = _RPCSASLPROTO,
__module__ = 'RpcHeader_pb2'
# @@protoc_insertion_point(class_scope:hadoop.common.RpcSaslProto)
))
_sym_db.RegisterMessage(RpcSaslProto)
_sym_db.RegisterMessage(RpcSaslProto.SaslAuth)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\036org.apache.hadoop.ipc.protobufB\017RpcHeaderProtos\240\001\001'))
# @@protoc_insertion_point(module_scope)
|
993,723 | b6f71b4e021f7448bf85c4a93fc040eb1a2a7c04 | from random import randint, choice
class CanAdd:
"""Mixin to allow addition"""
def addition(self, difficulty=0):
"""
Creates an addition question.
returns an array → [question, answer]
"""
num1 = 0
num2 = 0
if difficulty == 0:
num1 = randint(0, 20)
num2 = randint(0, 20)
elif difficulty == 1:
num1 = randint(9, 50)
num2 = randint(9, 50)
else:
num1 = randint(15, 100)
num2 = randint(15, 100)
answer = num1 + num2
question = f"What is {num1} plus {num2} ?"
return [question, answer]
class CanSubtract:
"""Mixin to allow subtraction"""
def subtraction(self, difficulty=0):
"""
Creates a subtraction question.
returns an array → [question, answer]
"""
num1 = 0
num2 = 0
while True:
if difficulty == 0:
num1 = randint(0, 20)
num2 = randint(0, 20)
elif difficulty == 1:
num1 = randint(9, 50)
num2 = randint(9, 50)
else:
num1 = randint(15, 100)
num2 = randint(15, 100)
if not num1 - num2 < 0:
break
answer = num1 - num2
question = f"What is {num1} minus {num2} ?"
return [question, answer]
class CanDivide:
"""Mixin to allow divison"""
def division(self, difficulty=0):
"""
Creates a division question.
returns an array → [question, answer]
"""
num1 = 0
num2 = 0
while True:
if difficulty == 0:
num1 = randint(6, 20)
num2 = randint(2, int(num1 / 2))
elif difficulty == 1:
num1 = randint(15, 50)
num2 = randint(2, int(num1 / 2))
else:
num1 = randint(20, 100)
num2 = randint(2, int(num1 / 2))
if num1 % num2 == 0:
break
answer = num1 // num2
question = f"What is {num1} divided by {num2} ?"
return [question, answer]
class CanMultiply:
"""Mixin to allow multiplication"""
def multiplication(self, difficulty=0):
"""
Creates a multiplication question.
returns an array → [question, answer]
"""
num1 = 0
num2 = 0
if difficulty == 0:
num1 = randint(3, 9)
num2 = randint(3, 9)
elif difficulty == 1:
num1 = randint(3, 14)
num2 = randint(3, 14)
else:
num1 = randint(4, 20)
num2 = randint(4, 20)
answer = num1 * num2
question = f"What is {num1} times {num2} ?"
return [question, answer]
class CanSquare:
"""Mixin to allow exponential multiplication"""
def square(self, difficulty=0):
"""
Creates an exponent question.
returns an array → [question, answer]
"""
num1 = 0
num2 = 0
if difficulty == 0:
num1 = randint(2, 5)
num2 = 2
elif difficulty == 1:
num1 = randint(2, 10)
num2 = 2
else:
num1 = randint(2, 13)
num2 = randint(2, 3)
if num2 == 2:
second_half = "squared"
else:
second_half = "cubed"
question = f"What is {num1} {second_half} ?"
answer = num1 ** num2
return [question, answer]
class CanRoot:
"""Mixin to allow radical division"""
def __perfect_cube(self, num):
cube_root = num ** (1 / 3)
return int(cube_root+0.1)
def root(self, difficulty=0):
"""
Creates a radical division question.
returns an array → [question, answer]
"""
squares = [4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225]
cubes = [8, 27, 64, 125, 216, 343]
num = 0
is_square = True
if difficulty == 0:
num = choice(squares[:10])
is_square = True
elif difficulty == 1:
num = choice(squares[5:])
is_square = True
else:
num = choice(cubes)
is_square = False
if is_square:
root = "square"
answer = int(num ** (1 / 2))
else:
root = "cube"
answer = self.__perfect_cube(num)
question = f"What is the {root} root of {num} ?"
return [question, answer]
if __name__ == "__main__":
r = CanRoot()
print(r.root(3)) |
993,724 | 189d7a99a004a23227af1de4d078f16694d471f0 | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
class ActionList(models.Model):
action = models.CharField(max_length=255, unique=True)
quality = models.FloatField(
validators=[MinValueValidator(0), MaxValueValidator(100)],
default=0,
verbose_name=u'Процент брака по контракту'
)
prize = models.FloatField(
verbose_name=u'Премия',
validators=[MinValueValidator(0), ],
default=0
)
fine = models.FloatField(
verbose_name=u'Штраф',
validators=[MinValueValidator(0), ],
default=0
)
def __str__(self):
return self.action
class EventList(models.Model):
class Meta:
unique_together = ('percent', 'probability')
event = models.CharField(max_length=255)
percent = models.FloatField(
validators=[MinValueValidator(0), MaxValueValidator(100)],
default=0,
verbose_name=u'Процент брака по делу'
)
probability = models.FloatField(
validators=[
MinValueValidator(0),
MaxValueValidator(1)],
default=1
)
def __str__(self):
return self.event
class ConditionalProfit(models.Model):
action = models.ForeignKey(ActionList)
event = models.ForeignKey(EventList)
probability = models.FloatField(default=0)
quality = models.FloatField(
validators=[MinValueValidator(0), MaxValueValidator(100)],
default=0,
verbose_name=u'Процент брака по контракту'
)
percent = models.FloatField(
validators=[MinValueValidator(0), MaxValueValidator(100)],
default=0,
verbose_name=u'Процент брака по делу'
)
conditionalProfit = models.FloatField(default=0, verbose_name=u'Прибль в $')
|
993,725 | 2dd3ebf033277062b231ff8f6e9c93552721d5d6 | from pyplasm import *
import numpy
"""
roundCoordinates is a function that given a list of vertices, round the coordinates of every vertex,
if the vertex has a coordinate smaller than 0.001 it will be rounded to 0, alternatively it will
be rounded to the first decimal.
@param vertsList: a list containing vertices
"""
def roundCoordinates(vertsList):
for j in range(len(vertsList)):
for i in range(len(vertsList[j])):
if(abs(vertsList[j][i]) < 0.001):
vertsList[j][i] = 0
else:
vertsList[j][i] = round(vertsList[j][i],1)
"""
point2Cells is a function that given the result of UKPOL() function, build a dictionary in which every key
represent a vertex, and every value represent the list of incident faces on the vertex.
@param listUkpol: result of UKPOL() function call
@return dictionary: dictionary cointaining key and values as described above (vertex:[faces])
"""
def point2Cells(listUkpol):
dictionary = {}
verts = listUkpol[0]
roundCoordinates(verts)
cells = listUkpol[1]
for cell in cells:
for label in cell:
point = str(verts[int(label)-1])
if(point not in dictionary):
dictionary[point] = []
dictionary[point].append(label)
return dictionary
"""
planarSurfaces is a function that given a list of vertices and the corrisponding
list of convex cells, check the complanarity of the vertices composing every convex cells.
@see https://en.wikipedia.org/wiki/Coplanarity: for better understanding about the resolution method adopted
@param verts: list of vertices
@param cells: list of convex cells, according to the list of vertices passed as first argument
@return Boolean: True if every face is composed by coplanar vertices, False otherwise
"""
def planarSurfaces(verts, cells):
#rounding coordinates
roundCoordinates(verts)
for cell in cells:
if(len(cell) > 3):
#building matrix
matrix = []
lastpoint = cell[-1]
for label in cell:
point = verts[int(label)-1]
row = []
for i in range(len(point)):
row.append(point[i]-verts[lastpoint-1][i])
matrix.append(row)
#calculating matrix rank
A = numpy.matrix(matrix)
dim = numpy.linalg.matrix_rank(A)
#the points are coplanar if the matrix has rank 2 or less
if(dim > 2):
return False
return True
"""
removeBaseCells is a function that given a list of vertices and the corrisponding list of convex
cells, removes the cells in which every vertex, that compose the convex cells, has a Z-coordinate of value 0.
This function is used to create an opened roof on the bottom side.
@param verts: list of vertices
@param cells: list of convex cells, according to the list of vertices passed as first argument
@return cleaned: list of convex cells without the cells described above
"""
def removeBaseCells(cells, verts):
cleaned = []
for i in range(len(cells)-1):
isBaseCell = True
for pointIndex in cells[i]:
if(verts[pointIndex-1][2] != 0):
isBaseCell = False
if(not isBaseCell):
cleaned.append(cells[i])
return cleaned
"""
ggpl_L_and_U_roof_builder is a function that given a list of vertices and the corrisponding list of convex
cells, build an HPC model of a L/U roof and of its beam structure.
@param verts: list of vertices
@param cells: list of convex cells, according to the list of vertices passed as first argument
@return HPCmodel: the HPC model of the roof and its beam structure
"""
def ggpl_L_and_U_roof_builder(verts, cells):
if(not planarSurfaces(verts, cells)):
return None
#roofModel used to construct the beam structure
roofModel = MKPOL([verts,cells, None])
#cleaning the cells
cells = removeBaseCells(cells,verts)
#building top roof
roof = MKPOL([verts,cells,None])
roof = OFFSET([.1,.1,.1])(roof)
roof = T([3])([.1])(roof)
roof = COLOR(Color4f([1/255., 61/255., 31/255.,1]))(roof)
#building beam structure
beams = OFFSET([.1,.1,.1])(SKEL_1(roofModel))
beams = S([3])(.95)(beams)
beams = COLOR(Color4f([132/255., 54/255., 9/255.,1]))(beams)
#returning the result roof+beams
return STRUCT([roof,beams])
#verts = [[0,0,0],[0,3,0],[6,3,0],[6,9,0],[9,9,0],[9,0,0],[1.5,1.5,2],[7.5,1.5,2],[7.5,7.5,2]]
#cells = [[1,7,2],[2,7,8,3],[3,8,9,4],[4,9,5],[8,6,5,9],[1,6,8,7],[1,6,3,2],[3,6,5,4]]
verts = [[0,0,0], [0,10,0],[6,10,0],[6,8,0],[3,8,0],[3,4,0],[6,4,0],[6,0,0],[5,2,2],[1,2,2],[1,9,2],[5,9,2]]
cells = [[9,8,7],[7,6,10,9],[9,10,1,8],[1,10,11,2],[2,11,12,3],[3,12,4],[4,12,11,5],[5,11,10,6],[8,7,6,1],[1,6,5,2],[2,5,4,3]]
#verts = [[0,0,0],[6,0,0],[6,-12,0],[3,-12,0],[3,-3,0],[0,-3,0],[1.5,-1.5,3],[4.5,-1.5,3],[4.5,-10.5,3]]
#cells = [[1,7,6],[2,8,7,1],[2,3,9,8],[4,3,9],[4,9,8,5],[5,8,7,6],[6,5,2,1],[5,4,3,2]]
VIEW(ggpl_L_and_U_roof_builder(verts,cells))
|
993,726 | d38ccfb435e4d107ae98df03aa91a31cc37b40e6 | c = [ 1, 2, 4, 6, 10 ]
output = 0
for index in Range(len(c)):
output:output + c[index]
+ =([index]
print(output)
|
993,727 | bc13721de28a04732246ccce14f5769f56ea7d77 | import requests
def test_valid_api_token():
response = requests.get("http://localhost/wp-json/anxapi/v1/up/?access_token=test_access_token")
assert response.status_code == 200
assert response.text == "OK"
def test_invalid_api_token():
response = requests.get("http://localhost/wp-json/anxapi/v1/up/?access_token=invalid_access_token")
assert response.status_code == 401
assert response.text == "You are not authorized to do this"
|
993,728 | 17c538ba27ab35c465575099b0553c1dc311efb6 | def A(a):
if a == 0:
return 1
else:
return a * A(a-1)
a = 8
print(A(a))
|
993,729 | 4a2243d4a97e35af3093fc8ae4d2705abc0666cf | import numpy as np
from models.LogisticRegression import LogisticRegression
from utils import optimizer, Accuracy
np.random.seed(10)
Dataset = np.loadtxt('data/logistic_check_data.txt')
x_data, y_data = Dataset[:, :-1], Dataset[:, -1]
_epoch = 100
_batch_size = 5
_lr = 0.01
_optim = 'SGD'
#======================================================================================================
print('='*20, 'Sigmoid Test', '='*20)
test_case_1 = np.array([0.5, 0.5, 0.5])
test_case_2 = np.array([
[6.23, -7.234, 8.3],
[-1, -6.23, -9]
])
test_case_3 = np.array([
[[1.0, 1.1], [5.672, -4]],
[[0.0, 9], [-9, 0.1]]
])
test_result_1 = LogisticRegression._sigmoid(None, test_case_1)
test_result_2 = LogisticRegression._sigmoid(None, test_case_2)
test_result_3 = LogisticRegression._sigmoid(None, test_case_3)
print('## Test case 1')
print('Input:\n', test_case_1)
print('Output:\n', test_result_1, end='\n\n')
print('## Test case 2')
print('Input:\n', test_case_2)
print('Output:\n', test_result_2, end='\n\n')
print('## Test case 3')
print('Input:\n', test_case_3)
print('Output:\n', test_result_3, end='\n\n')
'''
You should get results as:
## Test case 1
Input:
[0.5 0.5 0.5]
Output:
[0.62245933 0.62245933 0.62245933]
## Test case 2
Input:
[[ 6.23 -7.234 8.3 ]
[-1. -6.23 -9. ]]
Output:
[[9.98034419e-01 7.21108196e-04 9.99751545e-01]
[2.68941421e-01 1.96558078e-03 1.23394576e-04]]
## Test case 3
Input:
[[[ 1. 1.1 ]
[ 5.672 -4. ]]
[[ 0. 9. ]
[-9. 0.1 ]]]
Output:
[[[7.31058579e-01 7.50260106e-01]
[9.96570823e-01 1.79862100e-02]]
[[5.00000000e-01 9.99876605e-01]
[1.23394576e-04 5.24979187e-01]]]
'''
#======================================================================================================
print('='*20, 'Logistic Regression Test', '='*20)
model = LogisticRegression(num_features=x_data.shape[1])
optimizer = optimizer(_optim)
print('Initial weight: \n', model.W.reshape(-1))
print()
model.fit(x=x_data, y=y_data, epochs=_epoch, batch_size=_batch_size, lr=_lr, optim=optimizer)
print('Trained weight: \n', model.W.reshape(-1))
print()
# model evaluation
inference = model.eval(x_data)
# Error calculation
error = Accuracy(inference, y_data)
print('Accuracy on Check Data : %.4f \n' % error)
'''
You should get results as:
Initial weight:
[0. 0. 0. 0.]
Trained weight:
[-0.30839267 0.07120854 0.27459075 0.08573039 0.34718609]
Accuracy on Check Data : 0.8000
'''
|
993,730 | 776f431c87b1354aa2ef3bdf3c00c09687bf97f0 | import datetime
import time
rec_time_limit = input("What duration of each record do you want?")
rec_time_limit = int(rec_time_limit)
if rec_time_limit is None:
rec_time_limit = 5 # seconds
class Timer(rec_time_limit):
def to_set_start_time(self):
start_time = int( time.time() )
return srart_time
def to_set_stop_time(self):
stop_time = int( time.time() )
return stop_time
def to_find_start_stop_delta(self):
timedelta = stop_time - start_time
return timedelta
def to_stop_working_process_by_time(self):
is_it_time_to_stop = False
if timedelta >= rec_time_limit:
is_it_time_to_stop = True
return is_it_time_to_stop
|
993,731 | da8628ea6d9f1c17cb16057c89f4e7d02a13a583 | from myapp.models import Credentials
from django.forms import ModelForm
from django import forms
class RegForm(ModelForm):
class Meta:
model=Credentials
fields=['passw','usern']
class login_form(forms.Form):
#your_name = forms.CharField(label='Your name', max_length=100)
user_name = forms.CharField(max_length=30)
pass_word = forms.CharField(max_length=20)
|
993,732 | 0bbd006c5cf221f1182b25b224503c6ad9ae62a7 |
# ===================
# pigen.py
# ===================
# ====================================================
# Calculating Pi using Liebniz Formula and Generator
# ====================================================
# An infinite series can be useful even if you will never end up generating an infinite number of values.
# Example:
# We will calculate Pi using an infinite series using Liebniz formula:
# https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80
# In mathematics, the Leibniz formula for Pi, states that:
# 1 - 1/3 + 1/5 - 1/7 + 1/9 - 1/11 ... = Pi/4
# We will calculate the value of Pi using an infinite generator
# ==========
# Challenge
# ==========
# Create a generator to return an infinite sequence of odd numbers, starting at 1
# Print the first 100 numbers, to check that the generator is working correctly
# NOTE that 100 is just for testing. We will need more than 100 numbers but we don't know how many
# Thats why we are creating our own generator instead of just using a range.
# First we create the generator called oddnumbers
def oddnumbers():
n = 1 # initialize it to 1
while True:
yield n # First yield is 1
n += 2 # Will keep adding 2 to generate odd numbers only
# Test code to test the generator:
# ================================
# Then we call our generator and assign it to variable odds.
odds = oddnumbers()
# Optional test
# This shows you that odds is a <generator object oddnumbers at 0x008BDED0>
# print(odds)
# we will now print this using range 100 and print next number in the generator object odds
# results show 1, 3, 5, 7 etc up to 199. All odd numbers
# NOTE: we need to give range because oddsgenerator is an infinite generator and would continue running if we don't give it a stop
for i in range(100):
print(next(odds))
# ========================================================
# Calculating Pi
# ========================================================
# How to use above odd number generator to calculate Pi
#
# In this case, we don't need the test code above, so we delete it.
# We are going to add the _=input(enter) sections to understand how the program works.
# Then remove the _=input(enter) in the next section after we understood how the program works
def oddnumbers():
n = 1 # initialize it to 1
while True:
yield n # First yield is 1
n += 2 # Will keep adding 2 to generate odd numbers only
# Now we write function (pi_series) to calculate Pi using odd numbers generated by oddnumbers function
# NOTE that under the while loop, when using conventional code, we may want to keep track of whether number is added or subtracted.
# But this is unnecessary because a generator will continue from where it left after yielding a value.
# So we can do the addition part, yield, then do the subtraction part, and yield again
def pi_series():
odds = oddnumbers() # Calls oddsnumbers functions to generate odd numbers and assign them to odds
approximation = 0 # This initializes the first number to 0
while True:
_=input("First approximation and Initial next(odds):")
print("Initial approximation = {}".format(approximation))
# print("Initial next(odds) = {}".format(next(odds))) # NOTE: we comment this out because it was calling next(odds) and going to next odd
print("="*20)
approximation += (4 / next(odds)) # new approximation is 0 + 4/1 = 4.0
yield approximation # Yields 4.0
_=input("First updated approximation yielded after Plus:")
print("First updated approximation = {}".format(approximation))
print("="*20)
approximation -= (4 / next(odds)) # Second approximation = 4 - 4/3 = 2.666
yield approximation # Yields 2.666
_=input("Second approximation and Second next(odds):")
print("Second approximation = {}".format(approximation))
# print("Second next(odds) = {}".format(next(odds))) # NOTE: we comment this out because it was calling next(odds) and going to next odd
print("="*20)
# We now call def pi_series to calculate pi
approx_pi = pi_series()
# We print it using a for loop and give it a range
for x in range(2): # NOTE: The bigger range you give, the more accurate the approximation comes closer to 3.14
# print(next(approx_pi))
print("pi_series results = {}".format(next(approx_pi)))
print("="*20)
# ======================================================
# Calculate Pi
# ======================================================
# We remove the _input(enter) code here
def oddnumbers():
n = 1 # initialize it to 1
while True:
yield n # First yield is 1
n += 2 # Will keep adding 2 to generate odd numbers only
# Now we write function (pi_series) to calculate Pi using odd numbers generated by oddnumbers function
def pi_series():
odds = oddnumbers() # Calls oddsnumbers functions to generate odd numbers and assign them to odds
approximation = 0 # This initializes the first number to 0
while True:
approximation += (4 / next(odds))
yield approximation
approximation -= (4 / next(odds))
yield approximation
# We now call def pi_series to calculate pi
approx_pi = pi_series()
# We print it using a for loop and give it a range
for x in range(10): # NOTE: The bigger range you give, the more accurate the approximation comes closer to 3.14
print(next(approx_pi))
# print("pi_series results = {}".format(next(approx_pi)))
# ==================================================
# Importance of using Infinite Generator
# ==================================================
# As we can see in above code, we are now getting a more accurate value of Pi as we increase the range
# The value of using infinite generator is when you don't know what range you want to achieve your goal
# For example, in Google indexing, they send robots to index websites, but they initially don't know how
# many websites are there to index, so the robots run using infinite generator until they reach all the websites available.
# When they start reaching websites that are already index, then the generator terminates
|
993,733 | df7fbeca3d397f239602a490fc51b23bad67dd6a | import websockets
import json
from typing import Any, Callable, Coroutine, List
from src.models import Item
class Response:
"""
A response to a query to send back to the server.
"""
def __init__(self, found: bool, items: List[Item]):
self.found = found
self.items = items
def serialize(self) -> dict:
"""
Serializes the response to a dictionary.
"""
return {
"found": self.found,
"items": list(map(lambda item: item.serialize(), self.items)),
}
def __repr__(self) -> str:
return str(self.serialize())
class Request:
"""
A request sent by the server.
"""
def __init__(self, connection, item: Item):
self._connection = connection
self.item = item
async def reply(self, response: Response):
"""
Answers the request with the given response.
"""
await self._connection.send(
json.dumps(response.serialize(), ensure_ascii=False).encode()
)
def __repr__(self) -> str:
return str(self.item)
class Client:
"""
A client built on top of WebSockets to communicate with the server.
"""
def __init__(self, server_uri: str):
self._uri = server_uri
async def connect(self):
"""
Connects to the server.
"""
ssl = True if self._uri.startswith("wss") else False
async for websocket in websockets.connect(
self._uri, ssl=ssl
) if ssl else websockets.connect(self._uri):
# Try-except-continue used for automatic reconnection with exponential backoff
try:
self._connection = websocket
async for message in self._connection:
json_obj = json.loads(message.decode())
item = Item(
json_obj["type"], json_obj["manufacturer"], json_obj["model"]
)
request = Request(self._connection, item)
await self.on_message_handler(request)
except websockets.ConnectionClosed:
continue
async def close(self):
"""
Closes the connection to the server.
"""
await self._connection.close()
def on_message(self, handler: Callable[[Request], Coroutine[Any, Any, Any]]):
"""
Registers a message handler.
"""
self.on_message_handler = handler
|
993,734 | badcd1ba758fbc2a8a598e6ccfc6aa665b51f9e7 | n,k=map(int,input().split())
a=list(map(int,input().split()))
suf=[0]*(n+1)
for i in range(n-1,-1,-1):
suf[i]=suf[i+1]+a[i]
ans=suf[0]
aft=[0]
aft.extend(sorted(suf[1:]))
for i in range(n-1,n-k+1,-1):
ans-=aft[i]
print(ans) |
993,735 | f7744ca9002900320893ad7d0a92284d02c17e4a | #!/usr/bin/python
import RPi.GPIO as GPIO
import pygame
import time
import random
from threading import Thread, Lock
from serial import Serial
GPIO.setmode(GPIO.BCM)
class Bug:
channels = 0
def __init__(self, pinNumber, soundFile, duration):
self.duration = duration
self.pin = pinNumber
self.soundFile = soundFile
self.sound = pygame.mixer.Sound("/home/pi/sounds/%s" % soundFile)
self.channel = pygame.mixer.Channel(Bug.channels+1)
self.channel.set_volume(.2)
self.lastPlayTime = time.time()
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
Bug.channels += 1
def playSound(pin):
print "detected interrupt on pin %s" % pin
time.sleep(.05)
if GPIO.input(pin) == False and time.time()-self.lastPlayTime > self.duration:
self.lastPlayTime = time.time()
print "playing %s" % self.soundFile
self.channel.play(self.sound)
GPIO.add_event_detect(self.pin, GPIO.FALLING, playSound, bouncetime=1000)
class Firefly:
def __init__(self, pinNumber):
self.pin = pinNumber
self.lastPlayTime = time.time()
self.totalTime = 1
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def lightFirefly(pin):
time.sleep(.05)
if GPIO.input(pin) == False and time.time()-self.lastPlayTime > self.totalTime:
print "detected interrupt on pin %s" % pin
self.lastPlayTime = time.time()
colors = [ (220,229,30), (234,100,15), (255,255,0) ]
color = random.choice(colors)
if random.randint(0, 10) == 0:
flashes = random.randint(1, 9)
else:
flashes = random.randint(1, 2)
flashTime = 1000/(flashes)+random.randint(0,100)
flashTimeMSB = flashTime >> 8
flashTimeLSB = flashTime & 255
speed = random.randint(5, 25)
repeat = random.randint(2, 4)
pause = random.uniform(1.5, 3.5)
self.totalTime = ((flashes*flashTime)/1000)*repeat+pause*(repeat-1)
# print "color: %s" % (color,)
# print "flashes: %s" % flashes
# print "flashTime: %s = (%s << 8) | %s" % (flashTime, flashTimeMSB, flashTimeLSB)
# print "speed: %s" % speed
for i in range(repeat):
speed += random.randint(-5, 5)
if speed < 5:
speed = 5
bytes = bytearray([color[0], color[1], color[2], flashes, flashTimeMSB, flashTimeLSB, speed])
print "%s" % [ b for b in bytes ]
ser.write(bytes)
if i < repeat-1:
time.sleep(pause)
GPIO.add_event_detect(self.pin, GPIO.FALLING, lightFirefly, bouncetime=1000)
ser = Serial('/dev/ttyACM0', 9600);
pygame.mixer.init()
bugs = [ Bug(6, "bee.wav", 7.5),
Bug(5, "clickbeetle.wav", 9),
Bug(23, "cricket.wav", 10.5),
Bug(17, "fly.wav", 10.5),
Bug(16, "cockroach.wav", 10) ]
firefly = Firefly(24)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
|
993,736 | 87f110b6c1e4a7e9b6e822c3d64dfa684820c0d9 | from RPi import GPIO
import time
#left
e1 = 27#digital
m1 = 18#digital
ena = 13#pwm
#right
e2 = 6#digital
m2 = 12#digital
enb = 19#pwm
#GPIO.setwarnings(False)
GPIO.setmode (GPIO.BCM)
GPIO.setup(e1,GPIO.OUT)
GPIO.setup(e2,GPIO.OUT)
GPIO.setup(m1,GPIO.OUT)
GPIO.setup(m2,GPIO.OUT)
GPIO.setup(ena,GPIO.OUT)
GPIO.setup(enb,GPIO.OUT)
#To create a PWM instance: p = GPIO.PWM(channel, frequency)
p_w1 = GPIO.PWM(ena,100)
p_w2 = GPIO.PWM(enb,100)
p_w1.start(0)
p_w2.start(0)
def motor_1(x):
if x > 0:
p_w1.ChangeDutyCycle(x)
GPIO.output(e1, GPIO.HIGH)
GPIO.output(m1, GPIO.LOW)
if x < 0:
p_w1.ChangeDutyCycle(-x)
GPIO.output(m1, GPIO.HIGH)
GPIO.output(e1, GPIO.LOW)
def motor_2(x):
if x > 0:
p_w1.ChangeDutyCycle(x)
GPIO.output(e2, GPIO.HIGH)
GPIO.output(m2, GPIO.LOW)
if x < 0:
p_w1.ChangeDutyCycle(-x)
GPIO.output(m2, GPIO.HIGH)
GPIO.output(e2, GPIO.LOW)
|
993,737 | bd8467cb8cc28bca621a704642498d3502061eab | # 위상정렬 Topology Sort
from collections import deque
def topology_sort():
# 처음 시작시, 진입차수가 0인 노드를 큐에 삽입
for i in range(1, v+1):
if indegree[i] == 0:
q.append(i)
while q:
now = q.popleft()
result.append(now)
# now가 진입하는 노드의 진입차수 감소
for i in graph[now]:
indegree[i] -= 1
if indegree[i] == 0: # 새롭게 진입차수가 0이 된 노드를 큐에 삽입
q.append(i)
v, e = map(int, input().split()) # v = 노드 수, e = 간선 수
indegree = [0] * (v+1) # 진입차수 리스트
graph = [[] for _ in range(v+1)] # 간선 정보를 담는 그래프
result = [] # 수행 결과를 담을 리스트
q = deque()
for _ in range(e):
a, b = map(int, input().split())
graph[a].append(b) # a -> b 로 이동가능
indegree[b] += 1 # b의 진입차수 1 증가
topology_sort()
for i in result:
print(i, end=' ')
|
993,738 | c1670ad9f70bdeec21a86e3646c8c8bb1c8734b2 | from flask import render_template, url_for, flash, redirect, request, Blueprint, abort, jsonify
from flask_login import login_user, current_user, logout_user, login_required
from codearena import db, bcrypt
from codearena.models import User, Team
from codearena.teams.forms import NewTeamForm, EditTeamForm, SearchTeamForm
from codearena.teams.utils import save_picture, search_teams
teams = Blueprint('teams', __name__)
@teams.route("/new-team", methods=['get', 'post'])
@login_required
def new_team():
form = NewTeamForm()
if form.validate_on_submit():
team = Team(name=form.name.data, about=form.about.data, leader=current_user.id)
if form.image_file.data:
picture_file = save_picture(form.image_file.data)
team.image_file = picture_file
if form.github.data:
team.github = form.github.data
if form.discord.data:
team.discord = form.discord.data
if form.tags.data:
team.tags = form.tags.data
if form.bio.data:
team.bio = form.bio.data
team.members.append(current_user)
db.session.add(team)
db.session.commit()
flash("Team Successfully created!", category="success")
return redirect(url_for('users.dashboard'))
return render_template('new-team.jinja', title='New Team', form=form)
@teams.route("/team/<uuid>")
@login_required
def view_team(uuid):
team = Team.query.get(uuid)
if not team:
abort(404, description="Team not found.")
def get_user_from_id(uuid):
return User.query.get(uuid)
return render_template('team-page.jinja', title=team.name, team=team, get_user=get_user_from_id)
@teams.route("/team/join/<uuid>")
@login_required
def join_team(uuid):
team = Team.query.get(uuid)
if not team:
abort(404, description="Team not found.")
team.members.append(current_user)
db.session.commit()
return redirect(url_for('teams.view_team', uuid=uuid))
@teams.route("/team/edit/<uuid>", methods=['get', 'post'])
@login_required
def edit_team(uuid):
team = Team.query.get(uuid)
if not team:
abort(404, description="Team not found.")
if team.leader != current_user.id:
abort(403, description="Permission Denied.")
form = EditTeamForm()
if request.method == "GET":
form.name.data = team.name
form.about.data = team.about
form.github.data = team.github
form.discord.data = team.discord
form.bio.data = team.bio
if form.validate_on_submit():
team.name = form.name.data
team.about = form.about.data
team.github = form.github.data
team.discord = form.discord.data
team.tags = form.tags.data
team.bio = form.bio.data
if form.image_file.data:
picture_file = save_picture(form.image_file.data)
team.image_file = picture_file
db.session.commit()
flash("Team edited successfully!", category="success")
return redirect(url_for('users.dashboard'))
return render_template('edit-team.jinja', title='New Team',
form=form, tags=team.tags.split(',') if team.tags else [])
@teams.route("/search/team", methods=['get', 'post'])
@login_required
def search_team():
return render_template('search-team.jinja', title='Search Team')
@teams.route('/search/team/api', methods=['post'])
@login_required
def search_api_team():
search = request.form.get("text")
tags = request.form.get("tags")
tags = tags.split(',') if tags.strip()!= "" else []
teams = Team.query.all()
teams = search_teams(teams, tags, search)
result = []
for team in teams:
dic = {}
dic['name'] = team.name
dic['uuid'] = team.id
dic['image'] = team.image_file
dic['about'] = team.about
dic['tags'] = team.tags
result.append(dic)
return jsonify(result)
|
993,739 | 6a6b8bef5340e419590d50cddc54f562ed498254 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pandas as pd
import copy
# klasa definiująca cechy pojedynczego automatu (agenta)
class Agent:
def __init__(self, color, ingroup_coop, outgroup_coop):
self.color = color
self.inner = ingroup_coop
self.outer = outgroup_coop
self.ptr = 0.12
def give(self):
self.ptr -= 0.01
def receive(self):
self.ptr += 0.03
def reset(self):
self.ptr = 0.12
# klasa modelu przestrzeni symulacyjnej (tablica dwuwymiarowa)
class World:
def __init__(self, size):
self.size = size
self.grid = [[None for j in range(size)] for i in range(size)]
def visualization(self): # struktura reprezentująca przestrzeń symulacyjną (dane potrzebne do wizualizacji)
g = self.grid
visual_data = {'x': [], 'y': [], 'c': [], 'edgecolors': []}
for x in range(self.size):
for y in range(self.size):
if g[x][y] is not None:
visual_data['x'].append(x)
visual_data['y'].append(y)
if g[x][y].inner and g[x][y].outer: # samarytanizm
visual_data['c'].append(g[x][y].color)
visual_data['edgecolors'].append('w')
elif g[x][y].inner and not g[x][y].outer: # etnocentryzm
visual_data['c'].append(g[x][y].color)
visual_data['edgecolors'].append(g[x][y].color)
elif not g[x][y].inner and g[x][y].outer: # zdrada
visual_data['c'].append('w')
visual_data['edgecolors'].append(g[x][y].color)
elif not g[x][y].inner and not g[x][y].outer: # egozim
visual_data['c'].append('k')
visual_data['edgecolors'].append(g[x][y].color)
return visual_data
# klasa instancji obiektu przeprowadzającego proces symulacji
class Simulation:
def __init__(self, size):
self.size = size
self.world = World(size)
self.colors = ['c', 'm', 'y', 'r']
self.mutation_rate = 0.005
self.death_probability = 0.10
def __randomize_features(self): # metoda generująca losowe wartości dla poszczególnych cech nowego automatu (agenta)
return np.random.choice(self.colors), np.random.choice([True, False]), np.random.choice([True, False])
@staticmethod
def __prisoners_dilemma(a, b): # logika intekacji dla relacji wewnątrzgrupowej i międzygrupowej
if a.color == b.color:
if a.inner and b.inner:
a.give()
a.receive()
b.give()
b.receive()
elif a.inner and not b.inner:
a.give()
b.receive()
elif not a.inner and b.inner:
b.give()
a.receive()
elif not a.inner and not b.inner:
pass
if a.color != b.color:
if a.outer and b.outer:
a.give()
b.receive()
b.give()
a.receive()
elif a.outer and not b.outer:
a.give()
b.receive()
elif not a.outer and b.outer:
b.give()
a.receive()
elif not a.outer and not b.outer:
pass
def settlement(self): # metoda zasiedlająca losową wolną komórkę w przestrzeni symulacyjnej
g = self.world.grid
for i in np.random.permutation(self.size):
for j in np.random.permutation(self.size):
if g[i][j] is None:
g[i][j] = Agent(*self.__randomize_features())
return
def interaction(self): # metoda parująca agentów pozostających w interkacji
g = self.world.grid
for i in range(self.size):
for j in range(self.size):
a = g[i][j]
b = g[i][(j + 1) % self.size]
if a is not None and b is not None:
self.__prisoners_dilemma(a, b)
for j in range(self.size):
for i in range(self.size):
a = g[i][j]
b = g[(i + 1) % self.size][j]
if a is not None and b is not None:
self.__prisoners_dilemma(a, b)
def reproduction(self): # metoda reprodukująca agentów w oparciu o współczynnik PTR
g = self.world.grid
for i in np.random.permutation(self.size):
for j in np.random.permutation(self.size):
if g[i][j] is not None:
if np.random.rand() < g[i][j].ptr:
up = ((i - 1) % self.size, j)
down = ((i + 1) % self.size, j)
left = (i, (j - 1) % self.size)
right = (i, (j + 1) % self.size)
options = [up, down, left, right]
order = np.random.permutation(4)
for k in order:
if g[options[k][0]][options[k][1]] is None:
g[options[k][0]][options[k][1]] = copy.deepcopy(g[i][j])
# mutacja cech potomstwa
if np.random.rand() < self.mutation_rate:
g[options[k][0]][options[k][1]].color = np.random.choice(self.colors)
if np.random.rand() < self.mutation_rate:
g[options[k][0]][options[k][1]].inner = np.random.choice([True, False])
if np.random.rand() < self.mutation_rate:
g[options[k][0]][options[k][1]].outer = np.random.choice([True, False])
break
# przywracanie współczynnika potencjału reprodukcyjnego (PTR) do stanu początkowego
for i in range(self.size):
for j in range(self.size):
if g[i][j] is not None:
g[i][j].reset()
# metoda losowo uśmiercająca wybranych agentów w oparciu o przyjęty współczynnik prawdopodobieństwa
def death(self):
g = self.world.grid
for i in range(self.size):
for j in range(self.size):
if np.random.rand() < self.death_probability:
g[i][j] = None
def statistics(self):
total = 0
ethnocentric = 0
samaritan = 0
traitor = 0
selfish = 0
g = self.world.grid
for i in range(self.size):
for j in range(self.size):
if g[i][j] is not None:
total += 1
if g[i][j].inner and not g[i][j].outer:
ethnocentric += 1
elif g[i][j].inner and g[i][j].outer:
samaritan += 1
elif not g[i][j].inner and g[i][j].outer:
traitor += 1
elif not g[i][j].inner and not g[i][j].outer:
selfish += 1
return [ethnocentric, samaritan, traitor, selfish]
###################################################################################################
plt.rcParams['animation.ffmpeg_path'] = 'D:\\ffmpeg-20180202-caaa40d-win64-static\\bin\\ffmpeg.exe'
size = 50
simulation = Simulation(size)
fig, ax = plt.subplots()
fig.set_size_inches(6.5, 6.5, True)
scat = ax.scatter([], [])
def perform_simulation(iterations=1000):
simulation = Simulation(50)
agents_stat = [[], [], [], []]
for i in range(iterations):
simulation.settlement()
simulation.interaction()
simulation.reproduction()
simulation.death()
result = simulation.statistics()
agents_stat[0].append(result[0])
agents_stat[1].append(result[1])
agents_stat[2].append(result[2])
agents_stat[3].append(result[3])
data = {'Etnocentryzm': pd.Series(agents_stat[0]),
'Samarytanizm': pd.Series(agents_stat[1]),
'Zdrada': pd.Series(agents_stat[2]),
'Egoizm': pd.Series(agents_stat[3])}
df = pd.DataFrame(data)
df.plot()
plt.title('Model Axelroda-Hammonda')
plt.ylabel('Liczba agentów')
plt.xlabel('Iteracje')
plt.tight_layout()
plt.savefig('axelrod-hammond.png')
plt.close()
def init():
ax.set(xlim=(-4, size + 4), ylim=(-4, size + 4))
ax.set_facecolor('0.85')
return scat,
def animate(frame):
simulation.settlement()
simulation.interaction()
simulation.reproduction()
simulation.death()
ax.clear()
ax.set(xlim=(-4, size + 4), ylim=(-4, size + 4))
data = simulation.world.visualization()
ax.scatter(data['x'], data['y'], c=data['c'], edgecolors=data['edgecolors'], marker='s')
return scat,
def main():
ani = animation.FuncAnimation(fig, animate, init_func=init, frames=1000, blit=True)
ani.save('axelrod_anim.mp4', writer=animation.FFMpegFileWriter(), dpi=150)
plt.show()
perform_simulation(1000)
if __name__ == "__main__":
main()
|
993,740 | 6d1f3940a128dbf29070817f5e29f564381ac0a8 | import datetime
# sunday
startDate = datetime.date(1901, 1, 6)
endDate = datetime.date(2000, 12, 31)
currentDate = startDate
numSundays = 0
while currentDate <= endDate:
if currentDate.day == 1:
numSundays += 1
currentDate += datetime.timedelta(7)
print numSundays |
993,741 | 71046c4caec51a4994c9ccbf987535b0039fefdb | import datetime as dt
from pytz import timezone
from learning_record.settings import TIME_ZONE
from django.db import models
class Item(models.Model):
""" learning item """
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=50, verbose_name='Name')
description = models.CharField(max_length=1000, verbose_name='Description',
blank=True, null=True)
def __str__(self):
return self.name
class Record(models.Model):
""" learning record """
id = models.AutoField(primary_key=True)
time = models.DateTimeField() # time of learning
item = models.ForeignKey(Item, on_delete=models.CASCADE)
duration = models.PositiveSmallIntegerField(default=10) # unit: minutes
# brief intro of what you learned
key_content = models.CharField(max_length=200, verbose_name='Key Content')
description = models.CharField(max_length=1000, verbose_name='Description',
blank=True, null=True)
@classmethod
def get_specified_days_stat(cls, days):
""" get stat of last specified days. """
specified_days_ago = dt.date.today() - dt.timedelta(days=days)
stat = []
for item in Item.objects.all():
duration_sum = sum(cls.objects.filter(item=item).filter(
time__gte=specified_days_ago).values_list('duration', flat=True))
# drop item whoes duration_sum is 0;
if duration_sum > 0:
stat.append({
'name': item.name,
'sum': duration_sum,
})
return stat
@classmethod
def get_today_stat(cls):
""" get all records data of today. """
return cls.get_specified_days_stat(0)
@classmethod
def get_seven_days_stat(cls):
""" get all records data of last seven days. """
return cls.get_specified_days_stat(7)
@classmethod
def get_this_year_stat(cls):
""" get all records data of this year."""
year = dt.date.today().year
stat = []
for item in Item.objects.all():
duration_sum = sum(cls.objects.filter(item=item).filter(
time__year=year).values_list('duration', flat=True))
# drop item whoes duration_sum is 0;
if duration_sum > 0:
stat.append({
'name': item.name,
'sum': duration_sum,
})
return stat
def __str__(self):
time_timezone = self.time.astimezone(timezone(TIME_ZONE))
return '%s: %d, %s, %s' % (time_timezone.strftime('%m-%d %H:%M'),
self.duration, self.item.name, self.key_content) |
993,742 | 27a2af43f3c815b5815aa785f74b90f84d816b7b | """
Prepare is a script to remove the generated files, run wikifier, and finally zip the package.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import archive
from fabmetheus_utilities.fabmetheus_tools import wikifier
import os
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def prepareWikify():
'Remove generated files, then wikify the file comments.'
removeGeneratedFiles()
wikifier.main()
removeZip()
def removeCSVFile(csvFilePath):
'Remove csv file.'
if 'alterations' in csvFilePath and 'example_' not in csvFilePath:
os.remove(csvFilePath)
print('removeGeneratedFiles deleted ' + csvFilePath)
def removeGcodeFile(gcodeFilePath):
'Remove gcode file.'
if 'alterations' not in gcodeFilePath:
os.remove(gcodeFilePath)
print('removeGeneratedFiles deleted ' + gcodeFilePath)
return
if 'example_' not in gcodeFilePath:
os.remove(gcodeFilePath)
print('removeGeneratedFiles deleted ' + gcodeFilePath)
def removeGeneratedFiles():
'Remove generated files.'
csvFilePaths = archive.getFilesWithFileTypesWithoutWordsRecursively(['csv'])
for csvFilePath in csvFilePaths:
removeCSVFile(csvFilePath)
gcodeFilePaths = archive.getFilesWithFileTypesWithoutWordsRecursively(['gcode'])
for gcodeFilePath in gcodeFilePaths:
removeGcodeFile(gcodeFilePath)
svgFilePaths = archive.getFilesWithFileTypesWithoutWordsRecursively(['svg'])
for svgFilePath in svgFilePaths:
removeSVGFile(svgFilePath)
xmlFilePaths = archive.getFilesWithFileTypesWithoutWordsRecursively(['xml'])
for xmlFilePath in xmlFilePaths:
removeXMLFile(xmlFilePath)
archive.removeBackupFilesByTypes(['gcode', 'svg', 'xml'])
def removeSVGFile(svgFilePath):
'Remove svg file.'
if archive.getEndsWithList(svgFilePath, ['_bottom.svg', '_carve.svg', '_chop.svg', '_cleave.svg', '_scale.svg', '_vectorwrite.svg']):
os.remove(svgFilePath)
print('removeGeneratedFiles deleted ' + svgFilePath)
def removeXMLFile(xmlFilePath):
'Remove xml file.'
if archive.getEndsWithList(xmlFilePath, ['_interpret.xml']):
os.remove(xmlFilePath)
print('removeGeneratedFiles deleted ' + xmlFilePath)
def removeZip():
'Remove the zip file, then generate a new one.zip -r reprap_python_beanshell * -x \*.pyc \*~'
zipName = 'reprap_python_beanshell'
zipNameExtension = zipName + '.zip'
if zipNameExtension in os.listdir(os.getcwd()):
os.remove(zipNameExtension)
shellCommand = 'zip -r %s * -x \*.pyc \*~' % zipName
commandResult = os.system(shellCommand)
if commandResult != 0:
print('Failed to execute the following command in removeZip in prepare.')
print(shellCommand)
def main():
'Run main function.'
prepareWikify()
if __name__ == "__main__":
main()
|
993,743 | 57c800a786aadae38daa44505aa0b7fcca37f2e3 | from django.shortcuts import render
from rest_framework.generics import (
ListAPIView, RetrieveAPIView,
UpdateAPIView, CreateAPIView,
DestroyAPIView)
from .models import Post
# Create your views here.
from .serializer import PostSerializer
class PostListAPIView(ListAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostDetailAPIView(RetrieveAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostUpdateAPIView(UpdateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostDeleteAPIView(DestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostCreateAPIView(CreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
|
993,744 | 400ec7b13da5b35fc87f330e09ded735b1a2353d |
import socket
import os
def cls():#clears the console
os.system('cls' if os.name=='nt' else 'clear')
bytesToSend= str.encode("Hello UDP Server")
bufferSize = 1024
ACK_MESSAGE='THIS IS SERIAL2TCP/UDP SERVER'
def getServerIP():
"""Sends a message to every IP on the local network and if the respond is the ACK_MESSAGE
it knows that the server is running in this specific IP
Returns:
IP (str): The IP of the server which runs SERIAL2TCP/UDP.
"""
# Create a UDP socket at client side
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
UDPClientSocket.settimeout(0.15)
for i in ['127.0.0.1']+list(range(0,256)):#iterating through all network IPs....127.0.0.1 is localhost
try:
IP="192.168.2."+str(i) if i!='127.0.0.1' else i #
print(IP,end=" ")
UDPClientSocket.sendto(bytesToSend, (IP, 20001))#send message
msg,IP = UDPClientSocket.recvfrom(bufferSize)#get response
if (msg==str.encode(ACK_MESSAGE)):
print()#printed IP wont clear without this command
cls()#if IP found it clears all the console
return IP[0]
except Exception as e:
print(e)
return 0
if(__name__=='__main__'):
IP=getServerIP()
print('Found Server at IP:'+IP)
|
993,745 | b7ba9a0410a46a0e4a02752e01da773e34103a93 | def distinctPowers(start,end):
results = []
for a in range(start,end+1):
for b in range(start,end+1):
n = pow(a,b)
if n not in results:
results.append(n)
results.sort()
return results
print len(distinctPowers(2,100)) |
993,746 | 11cfc1f10ab998b06f700a15ec55f205db78b8a4 | class PassWord():
def __init__(self, driver):
self.driver = driver
driver.password_textbox_xpath = "//input[@name='password']"
driver.password_login_next_btn_xpath = "//span/span[text()='Next']"
def enter_password(self, password):
self.driver.find_element_by_xpath(self.driver.password_textbox_xpath).clear()
self.driver.find_element_by_xpath(self.driver.password_textbox_xpath).send_keys(password)
def password_next_btn(self):
self.driver.find_element_by_xpath(self.driver.password_login_next_btn_xpath).click()
|
993,747 | ebfd1fc04cbeed868abce1e57048d644c26d4da0 | import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from torch.nn.functional import softmax
def get_resnet18(num_classes):
new_layers = nn.Sequential(
nn.Linear(1000, 256),
nn.Linear(256, 128),
nn.Linear(128, num_classes)
)
backbone = models.resnet50(pretrained=True)
net = nn.Sequential(backbone, new_layers)
return net
def get_squeezenet(num_classes):
backbone = models.squeezenet1_1(pretrained=True)
backbone.num_classes = num_classes
backbone.classifier = nn.Sequential(
nn.Dropout(p=0.5),
nn.Conv2d(512, num_classes, kernel_size=1),
nn.ReLU(inplace=True),
nn.AvgPool2d(13)
)
backbone.forward = lambda x: backbone.classifier(backbone.features(x)).view(x.size(0), 7)
return backbone
def get_prediction(network, input_data, device):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
face_tensor = transform(input_data)
face_tensor = face_tensor.view(1, 3, 512, 512).float().to(device)
with torch.no_grad():
result = network(face_tensor).float()
result.to('cpu')
return softmax(result, dim=1) |
993,748 | c27e4d77008dd2c2e0dd154f2ca72d2526a54eda | # Create an area plot showing the minimum and maximum precipitation observed in each month.
#
import pyvista
import numpy as np
x = np.arange(12)
p_min = [11, 0, 16, 2, 23, 18, 25, 17, 9, 12, 14, 21]
p_max = [87, 64, 92, 73, 91, 94, 107, 101, 84, 88, 95, 103]
chart = pyvista.Chart2D()
_ = chart.area(x, p_min, p_max)
chart.x_axis.tick_locations = x
chart.x_axis.tick_labels = ["Jan", "Feb", "Mar", "Apr", "May",
"Jun", "Jul", "Aug", "Sep", "Oct",
"Nov", "Dec"]
chart.x_axis.label = "Month"
chart.y_axis.label = "Precipitation [mm]"
chart.show()
|
993,749 | a6d2707d433cd37fd7bfb167481ccde2ffff954a | #!/bin/env python3
# -*- coding: utf-8 -*-
# KEEP THAT! ^^^^^
from textwrap import wrap
from pushing_outshoot_unfold import Term
import calendar as cal
term = Term()
def progress(percentage):
start = "Quarantine: "
end = f" {str(round(percentage)).rjust(2)}% completed"
percentage = percentage / 100
progress_width = term.width - len(start) - len(end)
fill_amount = round(percentage * progress_width)
bar = ("█" * fill_amount)
bar = bar[:-1] + "█"
print(start + bar.ljust(progress_width, '_') + end)
def notification(title="",text="This message has no content",appname="TheLocal",\
date="0s ago", emoji="X"):
appname = emoji + " " + appname
width = 50 # Width of notification
print('╔' + ('═' * width) + '╗')
appnamewidth = len(appname)
datewidth = len(date)
spacer = " " * (width - appnamewidth - datewidth) # Add a spacer so the date is on the right side
print('║' + term.grey(appname + spacer + date) + '║')
print('║' + (" " * width) + '║')
spacer = " " * (width - len(title))
title = term.underline(title)
print('║' + title + spacer + '║')
text = wrap(text, width)
for line in text:
print('║' + line.ljust(width) + '║')
print('╚' + ("═" * width) + '╝')
def calendar(month, year, replace = []):
text = cal.month(year, month)
lines = text.split('\n')
maxwidth = max(*map(len, lines)) # Get length of longest line
for line in lines:
line = term.center(line.ljust(maxwidth))
for date in replace:
line = line.replace(f' {date} ', term.cyan(f' {date} '))
print(line)
# I just commented this out because it's not done yet --- cole
# def alarmclock(time = 'AM'):
# .-.-.
|
993,750 | dff16826e737839d7774008a7a7a28eef75359ef | words = []
def create_words(lev, s):
global words
VOWELS = ['A', 'E', 'I', 'O', 'U']
words.append(s)
for i in range(0, 5):
if lev < 5:
create_words(lev+1, s + VOWELS[i])
def solution(word):
global words
words = []
answer = 0
create_words(0, '')
for idx, i in enumerate(words):
if word == i:
answer = idx
break
return answer
word1 = "AAAAE"
ret1 = solution(word1)
print("solution 함수의 반환 값은", ret1, "입니다.")
word2 = "AAAE"
ret2 = solution(word2)
print("solution 함수의 반환 값은", ret2, "입니다.") |
993,751 | b1057a938e989117d615b5fc6165c97edc5a6641 | import json
import concurrent.futures
import os
import sys
def create_schema(file_name):
file_name_and_ext = os.path.basename(file_name)
basename = os.path.splitext(file_name_and_ext)[0]
strings = ['nvarchar',
'char',
'nchar',
'varchar',
'ntext']
integers = ['mediumint',
'smallint',
'tinyint',
'int']
floats = ['decimal',
'float',
'real']
json_schema = []
with open(file_name, 'r') as sql_file:
for line in sql_file:
json_dict = {}
if 'NULL' in line or 'NOT NULL' in line:
if 'SET' not in line:
line_split = line.strip().split(']')
name = line_split[0]
if '[' in name:
name = name.strip('[')
if ']' in name:
name = name.strip(']')
if '(%)' in name:
name = name.strip('%')
if len(name.split()) > 1:
name = f'{name[0]}_{name[1]}'
data_type = line_split[1].split('[')[1]
if data_type in integers:
data_type = 'integer'
json_dict['name'] = name
json_dict['type'] = data_type
json_schema.append(json_dict)
if data_type in strings:
data_type = 'string'
json_dict['name'] = name
json_dict['type'] = data_type
json_schema.append(json_dict)
if data_type in floats:
data_type = 'float'
json_dict['name'] = name
json_dict['type'] = data_type
json_schema.append(json_dict)
if data_type == 'bit':
data_type = 'boolean'
json_dict['name'] = name
json_dict['type'] = data_type
json_schema.append(json_dict)
with open(os.path.abspath(f'../P53_Database/P53_data_schema/{basename}.json'), 'w') as out_file:
json.dump(json_schema,
out_file,
indent=4
)
def main():
arg = input('Would you like to run this in parallel? (Y/n): ')
file_names = [
'../P53_Database/P53/dbo.FUNCTION_ISHIOKA.Table.sql',
# '../P53_Database/P53/dbo.AA_change.Table.sql',
# '../P53_Database/P53/dbo.AA_codes.Table.sql',
# '../P53_Database/P53/dbo.GermlineRefView.Table.sql',
# '../P53_Database/P53/dbo.GermlineView.Table.sql',
# '../P53_Database/P53/dbo.SomaticView.Table.sql'
]
abs_path = [os.path.abspath(a_file) for a_file in file_names]
# Synchronous
if arg.lower() == 'n' or arg.lower() == 'no':
for a_file in abs_path:
create_schema(a_file)
# Parallelized
if arg.lower() == 'y' or arg.lower() == 'yes':
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(create_schema, abs_path)
if __name__ == '__main__':
main() |
993,752 | 4a6360bdba5c89152ce5ad0c7e7b9f2c1aea2fd5 | from django.db import models
# Create your models here.
class Question(models.Model):
title = models.CharField(max_length=255, default="", blank = True) |
993,753 | 647bd30703cec023ed48de4d6bc2ea6f8fa5ea87 | import requests
import time
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
times = []
url = "http://127.0.0.1:5000/"
N = 1000
for i in range(N):
# Calculando o tempo de resposta da API em segundos
response = requests.post(url, timeout=1000)
elapsed_time = response.elapsed.total_seconds()
times.append(elapsed_time)
print(elapsed_time)
# Gráfico
fig = plt.figure()
ax = plt.axes()
x = list(range(N))
ax.plot(x, times, color='blue')
plt.show()
|
993,754 | b33791a81836ba957e386662c6822d5ee4b9cf09 | import Telstra_Messaging
from Telstra_Messaging.rest import ApiException
import requests
from flask_restful import Resource, Api
from flask import Flask, request, send_from_directory, render_template, send_file
def generate_text(number, message):
# create an instance of the API class
api_instance = Telstra_Messaging.AuthenticationApi()
client_id = "Agl2rsjQ0fbLC1xqPGDNve2Oianci7wK" # str |
client_secret = "eWJoCzsYcTk2ITRl" # str |
grant_type = 'client_credentials' # str | (default to client_credentials)
try:
# Generate OAuth2 token
api_response = api_instance.auth_token(client_id, client_secret, grant_type)
access_token = api_response.__getattribute__('access_token')
configuration = Telstra_Messaging.Configuration()
configuration.access_token = access_token
api_instance = Telstra_Messaging.MessagingApi(Telstra_Messaging.ApiClient(configuration))
payload = {
"to": number,
"validity":"60",
"body": message
}
try:
# Send SMS
api_response = api_instance.send_sms(payload)
except ApiException as e:
print("Exception when calling MessagingApi->send_sms: %s\n" % e)
except ApiException as e:
print("Exception when calling AuthenticationApi->auth_token: %s\n" % e)
return "text sent"
class Text(Resource):
def get(self):
number = request.args['number']
text = request.args['text']
generate_text(number, text)
return "text sent"
|
993,755 | 1360a02703e48c7cee0c53073e3dd650f78c742c | #!/usr/bin/python
"""
Purpose: Boolean Operations
"""
# True, False
choice = True
print 'choice = ', choice
true = 'Udhay Prakash'
choice = true
print 'choice = ', choice
choice = False
print 'choice = ', choice
print "True = ", True
print "True * 30 = ", True * 30 # True has a value of one
print "False = ", False
print "False * 30 = ", False * 30 # False has a value of zero
|
993,756 | 0311088cc07c2c6982c475c9289d91b1b2deb2d6 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from Entity import *
from methods.WebParseHelper import *
from sqlalchemy import Column, String, Integer, create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.ext.declarative import declarative_base
class EntityMapper(object):
"""docstring for EntityMapper"""
def __init__(self, db_session):
self.db_session = db_session
def get_selected_gene_ids(self, gene_symbol):
gene_info = {}
try:
gene = self.db_session.query(Geneinfo).filter(Geneinfo.Gene_Symbol == gene_symbol).one()
gene_info = {
'entrez': gene.entrez_id,
'HGNC': gene.HGNC_id,
'synonyms': gene.synonyms,
'chr':gene.Chr,
'chromosome_band': gene.chromosome_band,
'type':gene.type_of_gene,
'description': gene.description
}
except Exception as ex:
raise ex
return gene_info
def get_selected_gene_summary(self, entrez_id):
summary_info = ''
try:
result_proxy = self.db_session.execute('SELECT * FROM gene_summary_ncbi where entrez_id = :entrez_id;', {
'entrez_id': entrez_id
}).fetchall()
if len(result_proxy) == 0:
summary_info = ''
else:
summary_info = result_proxy[0][1]
except Exception as ex:
summary_info = ''
raise ex
return summary_info
def get_selected_gene_ensembl_id(self, entrez_id):
ensembl_id = ''
try:
ensembl_info = self.db_session.query(Ensemblinfo).filter(Ensemblinfo.entrez_id == entrez_id).first()
ensembl_id = ensembl_info.gene_id
except Exception as ex:
raise ex
return ensembl_id
def get_selected_gene_uniport(self, ensembl_id):
uniprot_id = ''
try:
uniprot = self.db_session.query(Uniprotinfo).filter(Uniprotinfo.gene_id == ensembl_id).one()
uniprot_id = uniport.Uniprot_id
except Exception as ex:
raise ex
return uniprot_id
def get_selected_relate_drug(self, entrez_id):
result_proxy = {}
try:
result = self.db_session.execute('SELECT * FROM drugtarge_pharmagkbttd where entrez_id = :entrez_id;', {
'entrez_id': entrez_id
}).fetchall()
for item in result:
if item[1] not in result_proxy.keys():
result_proxy[item[1]] = item[-1]
else:
continue
except Exception as ex:
raise ex
return result_proxy
def get_selected_relate_drug_id(self, entrez_id):
result_proxy = {}
try:
result = self.db_session.execute('SELECT * FROM drugtarge_pharmagkbttd where entrez_id = :entrez_id;', {
'entrez_id': entrez_id
}).fetchall()
for item in result:
if item[0] not in result_proxy.keys():
result_proxy[item[0]] = item[-1]
else:
continue
except Exception as ex:
raise ex
return result_proxy
def get_selected_relate_disease(self, drugbank_id):
result_proxy = {}
try:
result_proxy = self.db_session.execute('SELECT drug_commonname, indication, sources FROM drugtodisease_all WHERE drugtodisease_all.DrugBank_id = :drugbank_id;', {'drugbank_id': drugbank_id}).fetchall()
except Exception as e:
raise e
return result_proxy
def get_seleted_relate_all_diseases(self, drug_ids):
diseases = []
result_proxy = {}
for drug in drug_ids:
diseases.extend(self.get_selected_relate_disease(drug))
for index in range(0, len(diseases)):
disease = {}
disease['name'] = diseases[index][1]
disease['drug'] = diseases[index][0]
disease['source'] = diseases[index][-1]
result_proxy[index] = disease
return result_proxy
def get_seleted_relate_pubmedids(self, entrez_id):
pubmedids = ''
try:
result_proxy = self.db_session.execute('SELECT pubmed_id FROM gene_pumedid_ncbi where entrez_id = :entrez_id;', {'entrez_id': entrez_id}).fetchone()
if result_proxy == None:
result_proxy = ''
else:
result_proxy = result_proxy[0]
except Exception as e:
raise e
return result_proxy
def get_seleted_relate_superpathway(self, gene_symbol):
result_proxy = {}
try:
result_proxy = pathcards_parser(gene_symbol)
except Exception as ex:
raise ex
return result_proxy
def get_seleted_relate_omim_hgmd(self, gene_symbol):
result_hgmd = self.db_session.execute( 'SELECT dbSNP_ID, disease_name FROM variation_hgmd where gene_symbol = :gene_symbol;', {'gene_symbol': gene_symbol}).fetchall()
result_omim = self.db_session.execute( 'SELECT omim_id, Phenotype_Combind FROM variation_omim WHERE gene_symbol = :gene_symbol;', {'gene_symbol': gene_symbol}).fetchall()
key = 0
result_proxy = {}
for row in result_hgmd:
if row[1] in result_proxy.keys():
result_proxy[row[1]] = 'dbSNP'
else:
result_proxy[row[1]] = {}
result_proxy[row[1]] = 'dbSNP'
for row in result_omim:
if row[1] == 'null':
continue
if row[1] in result_proxy.keys():
result_proxy[row[1]] = 'OMIM'
else:
result_proxy[row[1]] = {}
result_proxy[row[1]] = 'OMIM'
return result_proxy
def get_net(self, gene_symbol):
result_proxy = self.db_session.execute("SELECT * from pathwaycommons9allhgnc WHERE (PARTICIPANT_A = :gene_symbol or PARTICIPANT_B= :gene_symbol) and `INTERACTION_TYPE` != :edge", {'gene_symbol': gene_symbol, 'edge' : "chemical-affects"}).fetchall()
version_data = []
if len(result_proxy) == 0:
return version_data
for row in result_proxy:
pathway = {}
pathway['Entity1'] = row[0]
pathway['Entity2'] = row[2]
if 'Reference' in row[1] or row[1] == ' ':
pathway['Interaction'] = 'other'
else:
pathway['Interaction'] = row[1]
pathway['PathID'] = 'ipa'
pathway['PathName'] = row[5]
pathway['Manuscripts'] = row[4]
pathway['resource'] = row[3]
version_data.append(pathway)
return version_data |
993,757 | 6c436da2f46d1316c011f35f3eff08a6a4a37237 | import rocks.commands
class Plugin(rocks.commands.Plugin):
def provides(self):
return 'ssl'
#[sorting to] run after 'ManagedFork' plugin
def requires(self):
return ['ManagedFork','Pbs','SGE','Condor']
def run(self, argv):
# 1. Get the hostname and the config file to store
host, addOutput, configs = argv
configssl = configs['ssl']
port = self.db.getHostAttr(host,'OSG_RSV_Port')
sport = self.db.getHostAttr(host,'OSG_RSV_SPort')
addOutput(host, '#begin config %s' % (configssl))
addOutput(host, '/bin/cp -f /etc/httpd/conf.d/ssl.conf.template %s' % (configssl))
if sport>0:
addOutput(host, 'sed -i -e "s#Listen 443#Listen %s#" %s' % (sport,configssl))
addOutput(host, 'sed -i -e "s#VirtualHost _default_:443#VirtualHost _default_:%s#" %s' % (sport,configssl))
addOutput(host, 'sed -i -e "s#SSLCertificateFile /etc/pki/tls/certs/localhost.crt#SSLCertificateFile /etc/grid-security/http/httpcert2.pem#" %s' % (configssl))
addOutput(host, 'sed -i -e "s#SSLCertificateKeyFile /etc/pki/tls/private/localhost.key#SSLCertificateKeyFile /etc/grid-security/http/httpkey2.pem#" %s' % (configssl))
addOutput(host, '#end config %s' % (configssl))
addOutput(host, '')
|
993,758 | d5801beb1da97521ca00607495ded5656a12727f | coureses_set = ("Math", "Physics", 1)
print("Math" in coureses_set) |
993,759 | a0b6a5a8543ed4c4126f782e239670ffc6276ad3 | strs = list(input())
nums = list(map(int, strs))
op_patterns = []
ops = ['+', '-']
for op1 in ops:
for op2 in ops:
for op3 in ops:
op_patterns.append((op1, op2, op3))
for pat in op_patterns:
express = '{}{}{}{}{}{}{}'.format(nums[0], pat[0], nums[1], pat[1], nums[2], pat[2], nums[3])
result = eval(express)
if result == 7:
print(express + '=7')
exit() |
993,760 | 82da2a2aae2525958805d2ce4bae04c4273c9750 | from Chef_understand_inheritance import Chef
class ChineseChef(Chef):
def make_fried_rice(self):
print("The chef is able to make the fried rice")
# you can override any method of the parent class if you want different output, i.e Override them |
993,761 | eff5025690bdf9996c3035da45b5a4acb85e4c2c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0301
# pylint: disable=C0111
"""
Created on Tue Oct 6 00:15:14 2015
Updated and improved by x86dev Dec 2017.
@author: Leo; Eduardo; x86dev
"""
import getopt
import json
import logging
import os
import signal
import sys
import time
import urllib.parse
from datetime import datetime
from platform import python_version_tuple
from random import randint
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select, WebDriverWait
from selenium_stealth import stealth
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
fh = logging.FileHandler('kleinanzeigen.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(message)s')
log.addHandler(ch)
log.addHandler(fh)
print(end='\a')
def profile_read(profile, config):
if os.path.isfile(profile):
with open(profile, encoding="utf-8") as file:
config.update(json.load(file))
def profile_write(profile, config):
with open(profile, "w+", encoding='utf8') as fh_config:
text = json.dumps(config, sort_keys=True, indent=4, ensure_ascii=False)
fh_config.write(text)
def login(config):
input_email = config['glob_username']
input_pw = config['glob_password']
log.info("Login with account email: " + input_email)
driver.get('https://www.ebay-kleinanzeigen.de/m-einloggen.html')
# wait for the 'accept cookie' banner to appear
WebDriverWait(driver, 3).until(EC.element_to_be_clickable((By.ID, 'gdpr-banner-accept'))).click()
text_area = WebDriverWait(driver, 1).until(EC.presence_of_element_located((By.ID, 'login-email')))
text_area.send_keys(input_email)
fake_wait(120)
text_area = driver.find_element_by_id('login-password')
text_area.send_keys(input_pw)
fake_wait(91)
submit_button = driver.find_element_by_id('login-submit')
submit_button.click()
def fake_wait(ms_sleep=None):
if ms_sleep is None:
ms_sleep = randint(600, 2000)
if ms_sleep < 100:
ms_sleep = randint(70, 105)
log.debug("Waiting %d ms ..." % ms_sleep)
time.sleep(ms_sleep / 1000)
def delete_ad(driver, ad):
log.info("\tDeleting ad ...")
driver.get("https://www.ebay-kleinanzeigen.de/m-meine-anzeigen.html")
fake_wait()
ad_id_elem = None
if "id" in ad:
try:
ad_id_elem = driver.find_element_by_xpath("//a[@data-adid='%s']" % ad["id"])
except NoSuchElementException:
log.info("\tNot found by ID")
if ad_id_elem is None:
try:
ad_id_elem = driver.find_element_by_xpath("//a[contains(text(), '%s')]/../../../../.." % ad["title"])
except NoSuchElementException:
log.info("\tNot found by title")
if ad_id_elem is not None:
try:
btn_del = ad_id_elem.find_elements_by_class_name("managead-listitem-action-delete")[1]
btn_del.click()
fake_wait()
btn_confirm_del = driver.find_element_by_id("modal-bulk-delete-ad-sbmt")
btn_confirm_del.click()
log.info("\tAd deleted")
fake_wait(randint(2000, 3000))
webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()
return True
except NoSuchElementException:
log.info("\tDelete button not found")
else:
log.info("\tAd does not exist (anymore)")
ad.pop("id", None)
return False
# From: https://stackoverflow.com/questions/983354/how-do-i-make-python-to-wait-for-a-pressed-key
def wait_key():
""" Wait for a key press on the console and return it. """
result = None
if os.name == 'nt':
result = input("Press Enter to continue...")
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
def post_ad_has_captcha(driver):
has_captcha = False
try:
captcha_field = driver.find_element_by_xpath('//*[@id="postAd-recaptcha"]')
if captcha_field:
has_captcha = True
except NoSuchElementException:
pass
log.info(f"Captcha: {has_captcha}")
return has_captcha
def post_ad_is_allowed(driver):
is_allowed = True
# Try checking for the monthly limit per account first.
try:
shopping_cart = driver.find_elements_by_xpath('/html/body/div[1]/form/fieldset[6]/div[1]/header')
if shopping_cart:
log.info("\t*** Monthly limit of free ads per account reached! Skipping ... ***")
is_allowed = False
except:
pass
log.info(f"Ad posting allowed: {is_allowed}")
return is_allowed
def post_ad(driver, ad, interactive):
log.info("\tPublishing ad ...")
if config['glob_phone_number'] is None:
config['glob_phone_number'] = ''
if ad["price_type"] not in ['FIXED', 'NEGOTIABLE', 'GIVE_AWAY']:
ad["price_type"] = 'NEGOTIABLE'
# Navigate to page
driver.get('https://www.ebay-kleinanzeigen.de/p-anzeige-aufgeben.html')
fake_wait(randint(2000, 3500))
category_selected = False
try:
driver.find_element_by_id('pstad-lnk-chngeCtgry')
log.info("Using new layout")
except:
log.info("Using old layout")
# legacy handling for old page layout where you have to first select the category (currently old and new layout are served randomly)
driver.get(ad["caturl"].replace('p-kategorie-aendern', 'p-anzeige-aufgeben'))
fake_wait(300)
driver.find_element_by_css_selector("#postad-step1-sbmt button").click()
fake_wait(300)
category_selected = True
# Check if posting an ad is allowed / possible
fRc = post_ad_is_allowed(driver)
if fRc is False:
return fRc
# Fill form
if "type" in ad and ad["type"] == "WANTED":
driver.find_element_by_id('adType2').click()
title_input = driver.find_element_by_id('postad-title')
title_input.click()
title_input.send_keys(ad["title"])
driver.find_element_by_id('pstad-descrptn').click() # click description textarea to lose focus from title field which will trigger category auto detection
if not category_selected:
# wait for category auto detection
try:
WebDriverWait(driver, 3).until(lambda driver: driver.find_element_by_id('postad-category-path').text.strip() != '')
category_selected = True
except:
pass
# Change category if present in config (otherwise keep auto detected category from eBay Kleinanzeigen)
cat_override = ad["caturl"]
if cat_override:
cat_override = cat_override.replace('p-anzeige-aufgeben', 'p-kategorie-aendern') # replace old links for backwards compatibility
driver.find_element_by_id('pstad-lnk-chngeCtgry').click()
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, 'postad-step1-sbmt')))
driver.get(cat_override)
fake_wait()
driver.find_element_by_id('postad-step1-sbmt').submit()
fake_wait()
category_selected = True
if not category_selected:
raise Exception('No category configured for this ad and auto detection failed, cannot publish')
# add additional category fields
additional_category_options = ad.get("additional_category_options", {})
for element_id, value in additional_category_options.items():
try:
select_element = driver.find_element_by_css_selector(
'select[id$="{}"]'.format(element_id)
)
Select(select_element).select_by_visible_text(value)
except NoSuchElementException:
try:
driver.find_element_by_xpath("//input[@id='%s']" % element_id).send_keys(value)
except NoSuchElementException:
pass
text_area = driver.find_element_by_id("pstad-descrptn")
ad_suffix = config.get("glob_ad_suffix", "")
ad_prefix = config.get("glob_ad_prefix", "")
if ad.get("description_file", None) is not None:
description_file = ad.get("description_file")
with open(description_file, "r", encoding="utf-8") as f:
description_lines = f.readlines()
else:
desc = ad.get("desc")
description_lines = desc.split("\\n")
description_lines = [x.strip("\\n") for x in description_lines]
description_lines.append(ad_suffix)
description_lines.insert(0, ad_prefix)
for p in description_lines:
text_area.send_keys(p)
fake_wait()
if (ad['shipping_type']) != 'NONE':
try:
select_element = driver.find_element_by_css_selector('select[id$=".versand_s"]')
shipment_select = Select(select_element)
log.debug("\t shipping select found with id: %s" % select_element.get_attribute('id'))
if (ad['shipping_type']) == 'PICKUP':
shipment_select.select_by_visible_text("Nur Abholung")
if (ad['shipping_type']) == 'SHIPPING':
shipment_select.select_by_visible_text("Versand möglich")
fake_wait()
except NoSuchElementException:
pass
text_area = driver.find_element_by_id('pstad-price')
if ad["price_type"] != 'GIVE_AWAY':
text_area.send_keys(ad["price"])
price = driver.find_element_by_xpath("//input[@name='priceType' and @value='%s']" % ad["price_type"])
price.click()
fake_wait()
text_area = driver.find_element_by_id('pstad-zip')
text_area.clear()
if ad.get("zip", None) is not None:
text_area.send_keys(ad["zip"])
else:
text_area.send_keys(config["glob_zip"])
fake_wait()
if config["glob_phone_number"]:
text_area = driver.find_element_by_id('postad-phonenumber')
text_area.clear()
text_area.send_keys(config["glob_phone_number"])
fake_wait()
text_area = driver.find_element_by_id('postad-contactname')
text_area.clear()
text_area.send_keys(config["glob_contact_name"])
fake_wait()
if config["glob_street"]:
text_area = driver.find_element_by_id('pstad-street')
text_area.clear()
text_area.send_keys(config["glob_street"])
fake_wait()
# Upload images from photofiles
if "photofiles" in ad:
try:
fileup = driver.find_element_by_xpath("//input[@type='file']")
for path in ad["photofiles"]:
path_abs = config["glob_photo_path"] + path
uploaded_count = len(driver.find_elements_by_class_name("imagebox-new-thumbnail"))
log.debug("\tUploading image: %s" % path_abs)
fileup.send_keys(os.path.abspath(path_abs))
total_upload_time = 0
while uploaded_count == len(driver.find_elements_by_class_name("imagebox-new-thumbnail")) and \
total_upload_time < 30:
fake_wait(500)
total_upload_time += 0.5
if uploaded_count == len(driver.find_elements_by_class_name("imagebox-new-thumbnail")):
log.warning("\tCould not upload image: %s within %s seconds" % (path_abs, total_upload_time))
else:
log.debug("\tUploaded file in %s seconds" % total_upload_time)
except NoSuchElementException:
pass
# Upload images from directory
if "photo_dir" in ad:
try:
fileup = driver.find_element_by_xpath("//input[@type='file']")
path = ad["photo_dir"]
path_abs = os.path.join(config["glob_photo_path"], path)
if not path_abs.endswith("/"):
path_abs += "/"
for filename in sorted(os.listdir(path_abs)):
if not filename.lower().endswith((".jpg", ".jpeg", ".png", ".gif")):
continue
file_path_abs = path_abs + filename
uploaded_count = len(driver.find_elements_by_class_name("imagebox-new-thumbnail"))
log.debug("\tUploading image: %s" % file_path_abs)
fileup.send_keys(os.path.abspath(file_path_abs))
total_upload_time = 0
while uploaded_count == len(driver.find_elements_by_class_name("imagebox-new-thumbnail")) and \
total_upload_time < 60:
fake_wait(1000)
total_upload_time += 1
if uploaded_count == len(driver.find_elements_by_class_name("imagebox-new-thumbnail")):
log.warning("\tCould not upload image: %s within %s seconds" % (file_path_abs, total_upload_time))
else:
log.debug("\tUploaded file in %s seconds" % total_upload_time)
except NoSuchElementException as e:
log.error(e)
os.system('Say ' + str(e))
fake_wait()
submit_button = driver.find_element_by_id('pstad-frmprview')
if submit_button:
submit_button.click()
fake_wait()
has_captcha = post_ad_has_captcha(driver)
if has_captcha:
if interactive:
log.info("\t*** Manual captcha input needed! ***")
log.info("\tFill out captcha and submit, after that press Enter here to continue ...")
wait_key()
else:
log.info("\tCaptcha input needed, but running in non-interactive mode! Skipping ...")
fRc = False
if fRc:
try:
submit_button = driver.find_element_by_id('prview-btn-post')
if submit_button:
submit_button.click()
except NoSuchElementException:
pass
try:
parsed_q = urllib.parse.parse_qs(urllib.parse.urlparse(driver.current_url).query)
add_id = parsed_q.get('adId', None)[0]
log.info(f"\tPosted as: {driver.current_url}")
if "id" not in ad:
log.info(f"\tNew ad ID: {add_id}")
ad["date_published"] = datetime.utcnow().isoformat()
ad["id"] = add_id
ad["date_updated"] = datetime.utcnow().isoformat()
except:
pass
if fRc is False:
log.info("\tError publishing ad")
os.system('Say ' + 'Error publishing ad')
return fRc
def session_create(config):
log.info("Creating session")
options = webdriver.ChromeOptions()
if config.get('headless', False) is True:
log.info("Headless mode")
options.add_argument("--headless")
if os.path.isfile("./chrome-win/chrome.exe"):
log.info("Found ./chrome-win/chrome.exe")
options.binary_location = "./chrome-win/chrome.exe"
driver = webdriver.Chrome(options=options)
stealth(driver,
languages=["en-US", "en"],
vendor="Google Inc.",
platform="Win32",
webgl_vendor="Intel Inc.",
renderer="Intel Iris OpenGL Engine",
fix_hairline=True,
)
log.info("New session is: %s %s" % (driver.session_id, driver.command_executor._url))
return driver
def signal_handler(sig, frame):
print('Exiting script')
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
try:
aOpts, aArgs = getopt.gnu_getopt(sys.argv[1:], "ph", ["profile=", "help"])
except getopt.error as msg:
os.system('Say ' + str(msg))
print(msg)
print("For help use --help")
sys.exit(2)
sProfile = ""
for o, a in aOpts:
if o in "--profile":
sProfile = a
if not sProfile:
print("No profile specified")
sys.exit(2)
log.info('Script started')
log.info("Using profile: %s" % sProfile)
config = {}
profile_read(sProfile, config)
if config.get("headless") is None:
config["headless"] = False
updateInterval = config.get("update_interval", 4)
fForceUpdate = False
fDoLogin = True
dtNow = datetime.utcnow()
driver = session_create(config)
profile_write(sProfile, config)
login(config)
fake_wait(randint(1000, 4000))
for ad in config['ads']:
assert len(ad["title"]) > 9, "eBay restriction: Title must be at least 10 chars long"
for ad in config["ads"]:
fNeedsUpdate = False
log.info("Handling '%s'" % ad["title"])
if "date_updated" in ad:
# python < 3.7 do not support datetime.datetime_fromisoformat()
# https://stackoverflow.com/a/60852111/256002
if int(python_version_tuple()[1]) < 7:
from backports.datetime_fromisoformat import MonkeyPatch
MonkeyPatch.patch_fromisoformat()
dtLastUpdated = datetime.fromisoformat(ad["date_updated"])
else:
dtLastUpdated = dtNow
dtDiff = dtNow - dtLastUpdated
if "enabled" in ad and ad["enabled"] == "1":
if "date_published" in ad:
log.info("\tAlready published (%d days ago)" % dtDiff.days)
if dtDiff.days > updateInterval:
fNeedsUpdate = True
else:
log.info("\tNot published yet")
fNeedsUpdate = True
else:
log.info("\tDisabled, skipping")
if fNeedsUpdate or fForceUpdate:
# delete ad if it was published already
if "id" in ad or "date_published" in ad:
delete_ad(driver, ad)
fPosted = post_ad(driver, ad, True)
if not fPosted:
break
log.info("Waiting for handling next ad ...")
fake_wait(randint(2000, 6000))
profile_write(sProfile, config)
driver.close()
log.info("Script done")
|
993,762 | 3200d2b201ef1178d497ce192a3de245f66018d9 | import pandas as pd
import numpy as np
import os
#ランダムなtrainデータを生成
#入力:1クラスのデータ数data_num
def random_select(df, out_npy_name, data_num, class_num):
random_data = []
for class_i in range(class_num):
df_class_i = df[df.label == int(class_i)]
img_list = df_class_i.sample(n=data_num).img.values
random_data.extend(img_list)
npy_file_path = os.path.join("./npy_files", out_npy_name)
np.save(npy_file_path, random_data)
def remove_df(df, in_npy):
removed_index = []
np_file_path = os.path.join("./npy_files", in_npy)
img_list = np.load(np_file_path)
for img in img_list:
removed_index.append(df[df.img == img].index[0])
removed_df = df.drop(index = removed_index)
return removed_df
#入力:1クラスごとのdataframe、 クエリの数
#出力:画像名のリスト
def random_center(df_i, center_num):
query_list = df_i.sample(n=center_num).img.values
return query_list
#配列同士の距離を算出
def euclid(center, data):
diff = center-data
sq = np.square(diff)
total = np.sum(sq)
distance = np.sqrt(total)
return distance
# 入力: distを含まないdataframe, クラス数class_num
# 出力: クラスごとのdataframe
def df_by_class(df, class_num):
df_list = []
for class_i in range(class_num):
df_class_i = df[df.label == int(class_i)]
df_list.append(df_class_i)
return df_list
#入力:distを含まないdataframe、クエリー画像名query_img
#出力:distを追加したdataframe
def df_add_distance(df, query_img):
distance = []
count = 0
for index in range(len(df.feature)):
query_img_feature = df[df.img == query_img].feature.values[0]
d = euclid(query_img_feature, df.feature.values[index])
distance.append(np.array(d,dtype="float32"))
df_distance = df.copy()
df_distance["dist"] = distance
added_distance_df = df_distance.sort_values("dist")
return added_distance_df
#入力:distを含むdataframe, データ数num
#出力: 画像名のリストimg_list, 半径radius
def select_data(df, num):
img_list = df.img[1:num+1].values
radius = df.dist.values[num]
return img_list, radius
#訓練データにimg_listを加える
def add_data(img_list, in_npy_name, out_npy_name):
save_data = []
in_path = os.path.join("./npy_files", in_npy_name)
out_path = os.path.join("./npy_files",out_npy_name)
train_data = np.load(in_path)
save_data.extend(train_data)
save_data.extend(img_list)
np.save(out_path, save_data)
|
993,763 | 2e7f916bb251f82ffa762d4c13fb2fa46d47f779 | ../3.0.0/_downloads/custom_scale.py |
993,764 | 301207e5a6a2ad9e684f204e4175b5f978130a57 | # coding = utf-8
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import tensorflow as tf
#from prepare_data import get_data
from database.readdb import get_data
from lstm import build_rnn
def train(reload=False):
file_name = "save_model"
save_dir = "peotry_bigdb"
model_save_path = os.path.join(os.getcwd(), save_dir, file_name)
# build rnn
input_sequences = tf.placeholder(tf.int32, shape=[batch_size, None])
output_sequences = tf.placeholder(tf.int32, shape=[batch_size, None])
logits, probs, _, _, _ = build_rnn(batch_size=batch_size, vocab_size=vocab_size,
input_sequences=input_sequences)
targets = tf.reshape(output_sequences, [-1])
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits], [targets], [tf.ones_like(targets, dtype=tf.float32)], len(words))
cost = tf.reduce_mean(loss)
learning_rate = tf.Variable(0.002, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 5)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.apply_gradients(zip(grads, tvars))
global_step = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
if reload:
module_file = tf.train.latest_checkpoint(save_dir)
start_epoch = int(module_file.split('-')[-1])
saver.restore(sess, module_file)
print("reload sess from file successfully!")
else:
start_epoch = 0
for epoch in range(start_epoch, 50):
print("one more epoch, learning_rate decrease")
if global_step % 80 == 0:
sess.run(tf.assign(learning_rate, 0.002 * (0.9 ** epoch)))
epoch_steps = len(list(zip(X_data, Y_data)))
for step, (x, y) in enumerate(zip(X_data, Y_data)):
global_step = epoch * epoch_steps + step
_, los = sess.run([train_op, cost], feed_dict={
input_sequences: x,
output_sequences: y,
})
if global_step % 100 == 0:
print("epoch:%d steps:%d/%d loss:%3f" %(epoch, step, epoch_steps, los))
if global_step % 1000 == 0:
print(" ====== save model in " + model_save_path + " ====== ")
saver.save(sess, model_save_path, global_step=epoch)
if __name__ == '__main__':
batch_size = 5
#X_data, Y_data, words, word2idfunc = get_data(poetry_file='data/poetry.txt', batch_size=batch_size)
X_data, Y_data, words, word2idfunc = get_data(poetry_file= os.path.join(os.getcwd(), "database", "json"),
batch_size=batch_size, poet_index=2)
vocab_size = len(words) + 1
# input_size:(batch_size, feature_length)
train(reload=False) |
993,765 | 42282440e912f6862a481ef3912316e5e99b25ce | # coding: utf-8
import os.path
import ConfigParser
import logging
from utils import UnifiTLV
from utils import mac_string_2_array, ip_string_2_array,getuptime,get_ipv4addr,get_macaddr,_byteify
from pfsense_utils import pfsense_const, get_temp
from struct import pack, unpack
import socket
import binascii
import time
import psutil
import cryptoutils
import urllib2
import json
import basecommand
import stun
import psutil
import pfsense_config
import basecommand
DS_UNKNOWN=1
DS_ADOPTING=0
DS_READY=2
class BaseDevice:
def __init__(self,device="",type="",configfile=""):
self.configfile=configfile
self.mapfile=configfile.replace(".conf",".map")
#pfsense_const['cf_conf_path']='conf'
self.pfsenseConfig = pfsense_config.PfsenseConfig(pfsense_const['cf_conf_path']+'/config.xml')
if (not os.path.exists(configfile)):
self.createEmptyConfig()
if (not os.path.exists(self.mapfile)):
self.createEmptyMap()
self.reload_config()
self.reload_map()
self.lastError = "None"
self.firmware = self.config['gateway']['firmware']
self.device = device
self.type = type
self.state=DS_READY
self.broadcast_index = 0
self.interval = 10 * 1000
self.nextCommand =None
self.delayStart = int(round(time.time() * 1000)) - self.interval
if(self.config.has_key('gateway') and self.config['gateway'].has_key('lan_if')):
lan_if = self.config['gateway']['lan_if']
if_addrs = psutil.net_if_addrs()
macaddr = get_macaddr(if_addrs,lan_if)
ipv4 = get_ipv4addr(if_addrs,lan_if)
if macaddr is not None:
self.mac=macaddr.address.replace('-',':').lower()
if ipv4 is not None:
self.ip=ipv4.address
self.netmask=ipv4.netmask
def createEmptyConfig(self):
self.config = {
'global':{
'pid_file' : 'unifi-gateway.pid'
},
'gateway':{
'is_adopted':False,
'lan_if':self.pfsenseConfig.getDefaultLan()["if"],
'firmware':'4.4.44.5213871',
'showhosts':False
}
}
self.save_config()
def getDefaultMap(self,lan,wan):
pass
def createEmptyMap(self):
self.mapConfig = self.getDefaultMap(self.pfsenseConfig.getDefaultLan()["if"],self.pfsenseConfig.getDefaultWan()["if"])
self.save_map()
def getCurrentMessageType(self):
return -1
def append_last_error(self,message):
if self.lastError is not None:
message['last_error']=self.lastError
self.lastError = None
def sendinfo(self):
logging.debug("sendinfo")
if self.nextCommand is not None:
if self.nextCommand.type == basecommand.CMD_DISCOVER :
self.send_discover()
if self.nextCommand.type == basecommand.CMD_NOTIFY :
self.parseResponse(self._send_inform(self.nextCommand.data,False))
if self.nextCommand.type == basecommand.CMD_INFORM :
self.parseResponse(self._send_inform(self.nextCommand.data,False))
self.nextCommand = None
else:
currentMessage = self.getCurrentMessageType()
if currentMessage == -1: #brodcast
self.send_broadcast()
elif currentMessage == 0: #notify
self.send_notify()
elif currentMessage == 1: #discover
self.send_discover()
else:
self.send_inform()
self._send_stun()
def _send_inform(self, data,usecbc):
data = json.dumps(data)
headers = {
'Content-Type': 'application/x-binary',
'User-Agent': 'AirControl Agent v1.0'
}
url = self.getInformUrl()
logging.debug('Send inform request to {} : {}'.format(url, data))
try:
request = urllib2.Request(url, cryptoutils.encode_inform(self.getKey(),data,usecbc,self.mac), headers)
response = urllib2.urlopen(request)
result = cryptoutils.decode_inform(self.getKey(), response.read())
return result
except Exception as ex:
logging.warn(ex)
self.lastError = ex.message
return None
def send_broadcast(self):
logging.debug('Send broadcast message #{} from gateway {}'.format(self.broadcast_index, self.ip))
self.broadcast_index+=1
if self.broadcast_index>20 :
self.broadcast_index = 0
addrinfo = socket.getaddrinfo('233.89.188.1', None)[0] #233.89.188.1 wireshark show normal broadcast
sock = socket.socket(addrinfo[0], socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
sock.bind((self.ip, 0))
message = self.create_broadcast_message(self.broadcast_index)
sock.sendto(message, (addrinfo[4][0], 10001))
logging.debug('Send broadcast message #{} from gateway {}'.format(self.broadcast_index, self.ip))
def send_discover(self):
base = self.cerateInformMessage()
base['discovery_response']= True
base['state']= DS_UNKNOWN
self.parseResponse(self._send_inform(base,True))
def send_inform(self):
base = self.cerateInformMessage()
base['discovery_response']= False
base['state']= DS_READY
self.parseResponse(self._send_inform(base,False))
def send_notify(self,wasAdopted):
self.parseResponse(self._send_inform(self.createNotify(wasAdopted),True))
def parseResponse(self,data):
pass
def cfgversion(self):
return ""
def getKey(self):
return ""
def version(self):
return ""
def getInformUrl(self):
return "http://ubuntu-utils.digiteum.com:8080/inform"
def getInformIp(self):
return "192.168.99.11"
def getHostname(self):
return "UBNT"
def appendExtraBaseInfo(self,data):
pass
def appendExtraInformMessage(self,data):
pass
def cerateInformMessage(self):
base = self.createBaseInform()
base['sys_stats']=self.get_sys_stats()
base['system-stats']=self.get_system_stats()
self.appendExtraInformMessage(base)
return base
def createBaseInform(self):
ctime = time.time()
msg = {
"fingerprint": "b2:5b:e2:98:c3:b1:2e:2e:38:fd:f9:34:b7:72:9e:67",
"board_rev": 33,
"bootid": 1,
"bootrom_version": "unifi-enlarge-buf.-1-g63fe9b5d-dirty",
"cfgversion": self.cfgversion(),
"default": False,
"dualboot": True,
"hash_id": self.mac.replace(':', ''),
"hostname": self.getHostname(),
"inform_ip": self.getInformIp(),
"inform_url": self.getInformUrl(),
"ip": self.ip,
"isolated": False,
"kernel_version": "4.4.153",
"locating": False,
"mac": self.mac,
"manufacturer_id": 4,
"model": self.device,
"model_display": self.type,
"netmask": self.netmask,
"required_version": "3.4.1",
"selfrun_beacon": True,
"serial": self.mac.replace(':', ''),
"state": self.state,
"time": int(ctime),
"time_ms": int((ctime-int(ctime))*1000),
"uptime": getuptime(),
"version": self.firmware,
"connect_request_ip":self.ip,
"connect_request_port":57201
}
if self.lastError is not None:
msg['last_error']=self.lastError
self.lastError = None
self.appendExtraBaseInfo(msg)
return msg
def createNotify(self,reason,payload):
base = self.createBaseInform()
base['inform_as_notif']=True
base['notif_reason']=reason
base['notif_payload']=payload
base['state']=DS_ADOPTING
return base
def create_broadcast_message(self, version=2, command=6):
tlv = UnifiTLV()
tlv.add(1, bytearray(mac_string_2_array(self.mac)))
tlv.add(2, bytearray(mac_string_2_array(self.mac) + ip_string_2_array(self.ip)))
tlv.add(3, bytearray('{}.v{}'.format(self.device, self.firmware)))
tlv.add(10, bytearray([ord(c) for c in pack('!I', getuptime())]))
tlv.add(11, bytearray('UBNT'))
tlv.add(12, bytearray(self.device))
tlv.add(19, bytearray(mac_string_2_array(self.mac)))
tlv.add(18, bytearray([ord(c) for c in pack('!I', self.broadcast_index)]))
tlv.add(21, bytearray(self.device))
tlv.add(27, bytearray(self.firmware))
tlv.add(22, bytearray(self.firmware))
return tlv.get(version=version, command=command)
def get_sys_stats(self):
loadavg = psutil.getloadavg()
mem = psutil.virtual_memory()
return {
"loadavg_1": loadavg[0]+0.2,
"loadavg_15": loadavg[1]+0.3,
"loadavg_5": loadavg[2]+0.1,
"mem_buffer": 0,
"mem_total": mem.total,
"mem_used": mem.used
}
def _send_stun(self):
try:
if self.config.has_key('mgmt_cfg') and self.config['mgmt_cfg'].has_key('stun_url'):
client = stun.StunClient()
client.send_request(self.config['mgmt_cfg']['stun_url'])
result = client.receive_response()
client.close()
for item in result:
if 'MAPPED-ADDRESS' == item['name']:
self.config['gateway']['lan_ip']=item['ip']
self.config['gateway']['lan_port']=item['port']
self.save_config()
except Exception as ex:
logging.warn(ex)
self.lastError = ex.message
return None
def get_system_stats(self):
mem = psutil.virtual_memory()
return {
"cpu": psutil.cpu_percent(),
"mem": mem.percent,
"uptime": getuptime(),
"temps": {
"Board (CPU)": get_temp(),
"Board (PHY)": get_temp(),
"CPU": get_temp(),
"PHY": get_temp()
}
}
def reload_config(self):
with open(self.configfile) as config_file:
self.config = json.load(config_file,object_hook= _byteify)
def save_config(self):
with open(self.configfile, 'w') as config_file:
json.dump(self.config, config_file,indent=True,sort_keys=True)
def reload_map(self):
with open(self.mapfile) as config_file:
self.mapConfig = json.load(config_file,object_hook= _byteify)
def save_map(self):
with open(self.mapfile, 'w') as config_file:
json.dump(self.mapConfig, config_file,indent=True,sort_keys=True)
|
993,766 | 7393d9c31d04d9543f43556687b25be936185a5b | #!/usr/bin/python3 -u
# -*- coding: utf-8 -*-
from basepath import basepath
from conf import opts
import calendarlist
import calendars
import login
import logs
import untis
def args():
from oauth2client import tools
import argparse
tools.argparser.add_argument(
'untis_id_from',
help = 'Untis ID to change from. Class (CL_xxx), Room (CL_xxx) or Teacher (TR_xxx).',
)
tools.argparser.add_argument(
'untis_id_to',
help = 'Untis ID to change to. Class (CL_xxx), Room (CL_xxx) or Teacher (TR_xxx).',
)
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
return flags
def main():
flags = args()
logs.logs('h1', 'Logging in')
service = login.login(flags, opts)
logs.logs('h1', 'Fetching data')
active = calendarlist.list(service)
activemap = calendarlist.id2map(active)
idfrom = flags.untis_id_from
if idfrom not in activemap:
logs.logs('err', 'No such calendar: ' + idfrom)
import sys
sys.exit(1)
calendar = activemap[idfrom]
idto = flags.untis_id_to
logs.logs('h1', 'Renaming: ' + idfrom + '->' + idto)
calendars.patch(service, {
'id': calendar['id'],
'description': calendars.encode(idto),
})
main()
|
993,767 | b383a151f78bed2ad22a959f2ddb37e820c75d59 | #!/usr/bin/env python
# coding: utf-8
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#But-de-ce-notebook" data-toc-modified-id="But-de-ce-notebook-1"><span class="toc-item-num">1 </span>But de ce notebook</a></div><div class="lev1 toc-item"><a href="#Règles-du-Jap-Jap" data-toc-modified-id="Règles-du-Jap-Jap-2"><span class="toc-item-num">2 </span>Règles du <em>Jap Jap</em></a></div><div class="lev2 toc-item"><a href="#But-du-jeu" data-toc-modified-id="But-du-jeu-21"><span class="toc-item-num">2.1 </span>But du jeu</a></div><div class="lev2 toc-item"><a href="#Début-du-jeu" data-toc-modified-id="Début-du-jeu-22"><span class="toc-item-num">2.2 </span>Début du jeu</a></div><div class="lev2 toc-item"><a href="#Tour-de-jeu" data-toc-modified-id="Tour-de-jeu-23"><span class="toc-item-num">2.3 </span>Tour de jeu</a></div><div class="lev2 toc-item"><a href="#Fin-du-jeu" data-toc-modified-id="Fin-du-jeu-24"><span class="toc-item-num">2.4 </span>Fin du jeu</a></div><div class="lev1 toc-item"><a href="#Code-du-jeu" data-toc-modified-id="Code-du-jeu-3"><span class="toc-item-num">3 </span>Code du jeu</a></div><div class="lev2 toc-item"><a href="#Code-pour-représenter-une-carte-à-jouer" data-toc-modified-id="Code-pour-représenter-une-carte-à-jouer-31"><span class="toc-item-num">3.1 </span>Code pour représenter une carte à jouer</a></div><div class="lev2 toc-item"><a href="#Fin-du-jeu" data-toc-modified-id="Fin-du-jeu-32"><span class="toc-item-num">3.2 </span>Fin du jeu</a></div><div class="lev2 toc-item"><a href="#Actions" data-toc-modified-id="Actions-33"><span class="toc-item-num">3.3 </span>Actions</a></div><div class="lev2 toc-item"><a href="#Valider-un-coup" data-toc-modified-id="Valider-un-coup-34"><span class="toc-item-num">3.4 </span>Valider un coup</a></div><div class="lev2 toc-item"><a href="#Jeu-interactif" data-toc-modified-id="Jeu-interactif-35"><span class="toc-item-num">3.5 </span>Jeu interactif</a></div><div class="lev2 toc-item"><a href="#Etat-du-jeu" data-toc-modified-id="Etat-du-jeu-36"><span class="toc-item-num">3.6 </span>Etat du jeu</a></div><div class="lev2 toc-item"><a href="#Lancement-du-jeu-intéractif" data-toc-modified-id="Lancement-du-jeu-intéractif-37"><span class="toc-item-num">3.7 </span>Lancement du jeu intéractif</a></div><div class="lev1 toc-item"><a href="#Conclusion" data-toc-modified-id="Conclusion-4"><span class="toc-item-num">4 </span>Conclusion</a></div>
# ----
# # But de ce notebook
#
# - Je vais expliquer les règles d'un jeu de carte, le "Jap Jap", qu'on m'a appris pendant l'été,
# - Je veux simuler ce jeu, en Python, afin de calculer quelques statistiques sur le jeu,
# - J'aimerai essayer d'écrire une petite intelligence artificielle permettant de jouer contre l'ordinateur,
# - Le but est de faire un prototype d'une application web ou mobile qui permettrait de jouer contre son téléphone !
# ----
# # Règles du *Jap Jap*
#
# ## But du jeu
# - Le *Jap Jap* se joue à $n \geq 2$ joueur-euse-s (désignées par le mot neutre "personne"), avec un jeu de $52$ cartes classiques (4 couleurs, 1 à 10 + vallet/dame/roi).
# - Chaque partie du *Jap Jap* jeu se joue en plusieurs manches. A la fin de chaque manche, une personne gagne et les autres marquent des points. Le but est d'avoir le moins de point possible, et la première personne a atteindre $90$ points a perdu !
# - On peut rendre le jeu plus long en comptant la première personne à perdre $x \geq 1$ parties.
#
# ## Début du jeu
# - Chaque personne reçoit 5 cartes,
# - et on révèle la première carte de la pioche.
#
# ## Tour de jeu
# - Chaque personne joue l'une après l'autre, dans le sens horaire (anti trigonométrique),
# - A son tour, la personne a le choix entre jouer normalement, ou déclencher la fin de jeu si elle possède une main valant $v \leq 5$ points (voir "Fin du jeu" plus bas),
# - Jouer normalement consiste à jeter *une ou plusieurs* ($x \in \{1,\dots,5\}$) cartes de sa main dans la défausse, et prendre *une* carte et la remettre dans sa main. Elle peut choisir la carte du sommet de la pioche (qui est face cachée), ou *une* des $x' \in \{1,\dots,5\}$ cartes ayant été jetées par la personne précédente, ou bien la première carte de la défausse si c'est le début de la partie.
#
# ## Fin du jeu
# - Dès qu'une personne possède une main valant $v \leq 5$ points, elle peut dire *Jap Jap !* au lieu de jouer à son tour.
# + Si elle est la seule personne à avoir une telle main de moins de $5$ points, elle gagne !
# + Si une autre personne a une main de moins de $5$ points, elle peut dire *Contre Jap Jap !*, à condition d'avoir *strictement* moins de points que le *Jap Jap !* ou le *Contre Jap Jap !* précédent. La personne qui remporte la manche est celle qui a eu le *Contre Jap Jap !* de plus petite valeur.
# - La personne qui a gagné ne marque aucun point, et les autres ajoutent à leur total actuel de point
# - Si quelqu'un atteint $90$ points, elle perd la partie.
# ----
# # Code du jeu
# ## Code pour représenter une carte à jouer
# In[1]:
coeur = "♥"
treffle = "♣"
pique = "♠"
carreau = "♦"
couleurs = [coeur, treffle, pique, carreau]
# In[2]:
class Carte():
def __init__(self, valeur, couleur):
assert 1 <= valeur <= 13, "Erreur : valeur doit etre entre 1 et 13."
self.valeur = valeur
assert couleur in couleurs, "Erreur : couleur doit etre dans la liste {}.".format(couleurs)
self.couleur = couleur
def __str__(self):
val = str(self.valeur)
if self.valeur > 10:
val = {11: "V" , 12: "Q" , 13: "K"}[self.valeur]
return "{:>2}{}".format(val, self.couleur)
__repr__ = __str__
def val(self):
return self.valeur
# In[3]:
def valeur_main(liste_carte):
return sum(carte.val() for carte in liste_carte)
# In[4]:
import random
def nouveau_jeu():
jeu = [
Carte(valeur, couleur)
for valeur in range(1, 13+1)
for couleur in couleurs
]
random.shuffle(jeu)
return jeu
# In[5]:
nouveau_jeu()[:5]
valeur_main(_)
# ## Fin du jeu
# Pour représenter la fin du jeu :
# In[6]:
class FinDuneManche(Exception):
pass
# In[7]:
class FinDunePartie(Exception):
pass
# ## Actions
# Pour représenter une action choisie par une personne :
# In[8]:
class action():
def __init__(self, typeAction="piocher", choix=None):
assert typeAction in ["piocher", "choisir", "Jap Jap !"]
self.typeAction = typeAction
assert choix is None or choix in [0, 1, 2, 3, 4]
self.choix = choix
def __str__(self):
if self.est_piocher(): return "Piocher"
elif self.est_japjap(): return "Jap Jap !"
elif self.est_choisir(): return "Choisir #{}".format(self.choix)
def est_piocher(self):
return self.typeAction == "piocher"
def est_choisir(self):
return self.typeAction == "choisir"
def est_japjap(self):
return self.typeAction == "Jap Jap !"
action_piocher = action("piocher")
action_japjap = action("Jap Jap !")
action_choisir0 = action("choisir", 0)
action_choisir1 = action("choisir", 1)
action_choisir2 = action("choisir", 2)
action_choisir3 = action("choisir", 3)
action_choisir4 = action("choisir", 4)
# ## Valider un coup
# Pour savoir si une suite de valeurs est bien continue :
# In[9]:
def suite_valeurs_est_continue(valeurs):
vs = sorted(valeurs)
differences = [ vs[i + 1] - vs[i] for i in range(len(vs) - 1) ]
return all([d == 1 for d in differences])
# In[10]:
suite_valeurs_est_continue([5, 6, 7])
suite_valeurs_est_continue([5, 7, 8])
# Pour valider un coup choisie par une personne :
# In[11]:
def valide_le_coup(jetees):
assert 1 <= len(jetees) <= 5
# coup valide si une seule carte !
if len(jetees) == 1:
return True
# si plus d'une carte
elif len(jetees) >= 2:
couleurs_jetees = [carte.couleur for carte in jetees]
valeurs_jetees = sorted([carte.valeur for carte in jetees])
# coup valide si une seule couleur et une suite de valeurs croissantes et continues
if len(set(couleurs_jetees)) == 1:
return suite_valeurs_est_continue(valeurs_jetees)
# coup valide si une seule valeur et différentes couleurs
elif len(set(valeurs_jetees)) == 1:
return len(set(couleurs_jetees)) == len(couleurs_jetees)
return False
# Exemples de coups valides :
# In[12]:
valide_le_coup([Carte(4, coeur)])
# In[13]:
valide_le_coup([Carte(4, coeur), Carte(5, coeur)])
# In[14]:
valide_le_coup([Carte(4, coeur), Carte(5, coeur), Carte(3, coeur)])
# In[15]:
valide_le_coup([Carte(4, coeur), Carte(5, coeur), Carte(3, coeur), Carte(2, coeur), Carte(6, coeur)])
# In[16]:
valide_le_coup([Carte(4, coeur), Carte(4, carreau)])
# In[17]:
valide_le_coup([Carte(4, coeur), Carte(4, carreau), Carte(4, pique)])
# In[18]:
valide_le_coup([Carte(4, coeur), Carte(4, carreau), Carte(4, pique), Carte(4, treffle)])
# Exemples de coups pas valides :
# In[19]:
valide_le_coup([Carte(4, coeur), Carte(9, coeur)])
# In[20]:
valide_le_coup([Carte(4, coeur), Carte(4, coeur), Carte(3, coeur)])
# In[21]:
valide_le_coup([Carte(4, coeur), Carte(12, carreau)])
# In[22]:
valide_le_coup([Carte(4, coeur), Carte(4, carreau), Carte(4, pique)])
# In[23]:
valide_le_coup([Carte(4, coeur), Carte(4, carreau), Carte(4, pique), Carte(4, treffle)])
# ## Jeu interactif
# On va utiliser les widgets ipython pour construire le jeu interactif !
# In[24]:
# Voir https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Asynchronous.html#Waiting-for-user-interaction
get_ipython().run_line_magic('gui', 'asyncio')
# In[25]:
import asyncio
def wait_for_change(widget, value):
future = asyncio.Future()
def getvalue(change):
# make the new value available
future.set_result(change.new)
widget.unobserve(getvalue, value)
widget.observe(getvalue, value)
return future
# In[26]:
import ipywidgets as widgets
from IPython.display import display
style = {
'description_width': 'initial',
}
style2boutons = {
'description_width': 'initial',
'button_width': '50vw',
}
style3boutons = {
'description_width': 'initial',
'button_width': '33vw',
}
style4boutons = {
'description_width': 'initial',
'button_width': '25vw',
}
style5boutons = {
'description_width': 'initial',
'button_width': '20vw',
}
# Pour savoir quoi jouer :
# In[27]:
def piocher_ou_choisir_une_carte_visible():
return widgets.ToggleButtons(
options=["Une carte dans la pioche ", "Une carte du sommet de la défausse "],
index=0,
tooltips=["invisible", "visibles"],
icons=["question", "list-ol"],
description="Action ?",
style=style4boutons,
)
# In[28]:
bouton = piocher_ou_choisir_une_carte_visible()
display(bouton)
print("Choix :", bouton.index)
# Pour savoir quoi jeter :
# In[29]:
exemple_de_main = [Carte(10, coeur), Carte(11, coeur), Carte(11, pique)]
exemple_de_main
# In[30]:
def faire_japjap(main):
return widgets.ToggleButton(
value=False,
description="Jap Jap ? ({})".format(valeur_main(main)),
button_style="success",
tooltip="Votre main vaut moins de 5 points, donc vous pouvez terminer la partie !",
icon="check",
style=style,
)
# In[31]:
b = faire_japjap(exemple_de_main)
display(b)
print("Choix :", b.value)
# In[32]:
def quoi_jeter(main):
return widgets.SelectMultiple(
options=main,
index=[0],
description="Quoi jeter ?",
style=style,
)
# In[33]:
b = quoi_jeter(exemple_de_main)
display(b)
print("Choix :", b.index)
# In[34]:
from IPython.display import display
# In[35]:
def valider_action():
return widgets.ToggleButton(description="Valider l'action ?")
# Pour savoir quoi piocher :
# In[36]:
exemple_de_visibles = [Carte(11, pique), Carte(10, treffle)]
exemple_de_visibles
# In[37]:
def quoi_prendre(visibles):
return widgets.ToggleButtons(
options=visibles,
#index=0,
description="Prendre quelle carte du sommet ?",
style=style,
)
# In[38]:
quoi_prendre(exemple_de_visibles)
# On va tricher et afficher les cartes avec `display(Markdown(...))` plutôt que `print`, pour les avoir en couleurs.
# In[39]:
from IPython.display import Markdown
print("[Alice] Cartes en main : [ 6♦, 5♠, V♣, V♠, 1♣]")
# avec de la couleurs
display(Markdown("[Alice] Cartes en main : [ 6♦, 5♠, V♣, V♠, 1♣]"))
# Maintenant on peut tout combiner.
# In[40]:
async def demander_action(visibles=None, main=None, stockResultat=None):
display(Markdown("- Main : {} (valeur = {})".format(main, valeur_main(main))))
display(Markdown("- Sommet de la défausse {}".format(visibles)))
fait_japjap = False
# 1.a. si on peut faire jap jap, demander si on le fait ?
if valeur_main(main) <= 5:
print("Vous pouvez faire Jap Jap !")
bouton3 = faire_japjap(main)
validation = valider_action()
display(widgets.VBox([bouton3, validation]))
await wait_for_change(validation, 'value')
# print(" ==> Choix :", bouton3.value)
if bouton3.value:
fait_japjap = True
typeAction = "Jap Jap !"
jetees = None
# 1.b. quoi jouer
if not fait_japjap:
bouton1 = piocher_ou_choisir_une_carte_visible()
validation = valider_action()
display(widgets.VBox([bouton1, validation]))
await wait_for_change(validation, 'value')
piocher = bouton1.index == 0
# print(" ==> Choix :", bouton1.value)
# 2.a. si piocher, rien à faire pour savoir quoi piocher
if piocher:
print("Okay, vous piochez.")
typeAction = "piocher"
choix = None
# 2.b. si choisir carte
else:
typeAction = "choisir"
print("Okay, vous choisissez dans le sommet de la défausse.")
if len(visibles) > 1:
bouton2 = quoi_prendre(visibles)
validation = valider_action()
display(widgets.VBox([bouton2, validation]))
await wait_for_change(validation, 'value')
# print(" ==> Choix :", bouton2.index)
choix = bouton2.index
else:
choix = 0
# 3. choisir quoi jeter
if typeAction != "Jap Jap !":
if len(main) > 1:
pas_encore_de_coup = True
jetees = None
while pas_encore_de_coup or valide_le_coup(jetees):
bouton4 = quoi_jeter(main)
validation = valider_action()
display(widgets.VBox([bouton4, validation]))
await wait_for_change(validation, 'value')
# print(" ==> Choix :", bouton4.index)
jetees = bouton4.index
pas_encore_de_coup = False
if not valide_le_coup(jetees):
print("ERREUR ce coup n'est pas valide, on ne peut pas se débarasser de cet ensemble de cartes {}.".format(jetees))
else:
jetees = 0
action_choisie = action(typeAction=typeAction, choix=choix)
if stockResultat is not None:
stockResultat["action_choisie"] = action_choisie
stockResultat["jetees"] = jetees
return action_choisie, jetees
# In[41]:
if False:
stockResultat = {"action_choisie": None, "jetees": None}
asyncio.ensure_future(
demander_action(
visibles=exemple_de_visibles,
main=exemple_de_main,
stockResultat=stockResultat,
)
)
stockResultat
# In[42]:
def demander_action_et_donne_resultat(visibles=None, main=None):
stockResultat = {"action_choisie": None, "jetees": None}
asyncio.ensure_future(
demander_action(
visibles=visibles,
main=main,
stockResultat=stockResultat,
)
)
return stockResultat["action_choisie"], stockResultat["jetees"]
# ## Etat du jeu
# Maintenant on peut représenter un état du jeu.
# In[43]:
# on peut changer ici pour jouer moins longtemps !
scoreMax = 90
scoreMax = 10
# In[44]:
class EtatJeu():
def __init__(self, nbPersonnes=2, nomsPersonnes=None,
scoreMax=scoreMax, malusContreJapJap=25, nbCartesMax=5):
assert 2 <= nbPersonnes <= 5, "Le nombre de personnes pouvant jouer doit etre entre 2 et 5."
self.nbPersonnes = nbPersonnes
self.nomsPersonnes = nomsPersonnes
self.scoreMax = scoreMax
self.malusContreJapJap = malusContreJapJap
self.nbCartesMax = nbCartesMax
# on initialise le stockage interne
self.personnes = [personne for personne in range(nbPersonnes)]
self.scores = [
0 for personne in self.personnes
]
self.mains = [
[ ] for personne in self.personnes
]
self.visibles = []
self.jeu = nouveau_jeu()
def montrer_information_visibles(self):
print("- Nombre de carte dans la pioche :", len(self.jeu))
print("- Cartes visibles au sommet de la défausse :", len(self.visibles))
for personne in self.personnes:
nom = self.nomsPersonnes[personne] if self.nomsPersonnes is not None else personne
main = self.mains[personne]
score = self.scores[personne]
print(" + Personne {} a {} carte{} en main, et un score de {}.".format(
nom, len(main), "s" if len(main) > 1 else "", score)
)
def montrer_information_privee(self, personne=0):
main = self.mains[personne]
nom = self.nomsPersonnes[personne] if self.nomsPersonnes is not None else personne
display(Markdown("[{}] Carte{} en main : {}".format(nom, "s" if len(main) > 1 else "", main)))
# --- Mécanique de pioche et distribution initiale
def prendre_une_carte_pioche(self):
if len(self.jeu) <= 0:
raise FinDuneManche
premiere_carte = self.jeu.pop(0)
return premiere_carte
def debut_jeu(self):
self.distribuer_mains()
premiere_carte = self.prendre_une_carte_pioche()
self.visibles = [premiere_carte]
def donner_une_carte(self, personne=0):
premiere_carte = self.prendre_une_carte_pioche()
self.mains[personne].append(premiere_carte)
def distribuer_mains(self):
self.mains = [
[ ] for personne in self.personnes
]
premiere_personne = random.choice(self.personnes)
self.personnes = self.personnes[premiere_personne:] + self.personnes[:premiere_personne]
for nb_carte in range(self.nbCartesMax):
for personne in self.personnes:
self.donner_une_carte(personne)
# --- Fin d'une manche
def fin_dune_manche(self):
self.jeu = nouveau_jeu()
self.debut_jeu()
# --- Enchainer les tours de jeux
async def enchainer_les_tours(self):
try:
indice_actuel = 0
while len(self.jeu) > 0:
# dans la même manche, on joue chaque tour, pour la personne actuelle
personne_actuelle = self.personnes[indice_actuel]
# 1. on affiche ce qui est public, et privé
self.montrer_information_visibles()
self.montrer_information_privee(personne_actuelle)
# 2. on demande l'action choisie par la personne
# action_choisie, jetees = demander_action_et_donne_resultat(
action_choisie, jetees = await demander_action(
visibles = self.visibles,
main = self.mains[personne_actuelle],
)
# 3. on joue l'action
self.jouer(
personne = personne_actuelle,
action = action_choisie,
indices = jetees,
)
# personne suivante
indice_actuel = (indice_actuel + 1) % self.nbPersonnes
if len(self.jeu) <= 0:
print("\nIl n'y a plus de cartes dans la pioche, fin de la manche sans personne qui gagne.")
raise FinDuneManche
except FinDuneManche:
print("\nFin d'une manche.")
fin_dune_manche()
except FinDunePartie:
print("\n\nFin d'une partie.")
# --- Un tour de jeu
def jouer(self, personne=0, action=action_piocher, indices=None):
print(" ? Personne {} joue l'action {} avec les indices {} ...".format(personne, action, indices)) # DEBUG
if indices is not None:
jetees = [ self.mains[personne][indice] for indice in indices ]
assert valide_le_coup(jetees)
# et on en prend une nouvelle
if action.est_piocher():
# soit celle face cachée en sommet de pioche
premiere_carte = self.prendre_une_carte_pioche()
display(Markdown("=> Vous piochez la carte {}.".format(premiere_carte)))
self.mains[personne].append(premiere_carte)
if action.est_choisir():
# soit une des cartes précedemment visibles
choix = action.choix
carte_choisie = self.visibles.pop(choix)
display(Markdown("=> Vous récupérez la carte {}.".format(carte_choisie)))
self.mains[personne].append(carte_choisie)
if action.est_japjap():
# on vérifie que cette personne a bien un Jap Jap !
valeur_du_premier_japjap = valeur_main(self.mains[personne])
assert 1 <= valeur_du_premier_japjap <= 5
gagnante = personne
display(Markdown("=> Vous faites un Jap Jap, valant {} point{}.".format(valeur_du_premier_japjap, "s" if valeur_du_premier_japjap > 1 else "")))
contre_JapJap = False
# on vérifie les valeurs des mains des autres personnes
valeurs_mains = [valeur_main(main) for main in self.mains]
plus_petite_valeur = min([valeurs_mains[autre_personne] for autre_personne in [ p for p in personnes if p != gagnante ]])
if plus_petite_valeur < valeur_du_premier_japjap:
print("CONTRE JAP JAP !")
# si une personne a un jap jap plus petit, la personne ne gagne pas
contre_JapJap = True
# la personne gagnante est la première (ordre du jeu) à obtenir le jap jap
# de valeur minimale, et en cas d'égalité c'est la personne obtenant
# cette valeur en le nombre minimal de cartes !
gagnantes = [ p for p in personnes if valeurs_mains[p] == plus_petite_valeur ]
print("Les autres personnes ayant un Jap Jap de plus petite valeur sont {} !".format(gagnantes))
nombre_min_carte = min([len(self.mains[p]) for p in gagnantes])
gagnante = min([p for p in gagnantes if len(self.mains[p]) == nombre_min_carte])
print("La personne gagnant la manche est {}.".format(gagnante))
# on marque les scores
print("\nOn marque les scores !")
print("==> La personne {} a gagné ! Avec un Jap Jap de valeur {} !".format(gagnante, plus_petite_valeur))
for autre_personne in [ p for p in personnes if p != gagnante ]:
marque_point = valeur_main(self.mains[autre_personne])
print("- La personne {} n'a pas gagné, et marque {} points".format(autre_personne, marque_point))
self.scores[autre_personne] += marque_point
# si la personne s'est prise un contre jap jap, elle marque +25 et pas son total de cartes en main
print("- La personne {} n'a pas gagné et a subi un CONTRE JAP JAP ! Elle marque +25 points.")
if contre_JapJap:
self.scores[personne] -= valeur_main(self.mains[personne])
self.scores[personne] += self.malusContreJapJap
print("\nA la fin de cette manche :")
for personne in self.personnes:
nom = self.nomsPersonnes[personne] if self.nomsPersonnes is not None else personne
score = self.scores[personne]
print(" + Personne {} a un score de {}.".format(nom, score))
# si un score est >= 90
if max(self.scores) >= self.scoreMax:
# quelqu'un a perdu cette partie !
for personne in personnes:
score = self.scores[personne]
if score == max(self.scores):
nom = self.nomsPersonnes[personne] if self.nomsPersonnes is not None else personne
print("\n==> La personne {} a perdu, avec un score de {}.".format(nom, score))
raise FinDunePartie
raise FinDuneManche
# on pose les cartes jetées
self.visibles = jetees
# et on enlève les cartes jetées de sa main
nouvelle_main = self.mains[personne]
for carte_jetee in jetees:
nouvelle_main.remove(carte_jetee)
# et ça continue
# ## Lancement du jeu intéractif
# In[45]:
jeu = EtatJeu(nomsPersonnes=["Alice", "Bob"])
# In[46]:
jeu.debut_jeu()
# In[47]:
import asyncio
# In[51]:
asyncio.ensure_future(jeu.enchainer_les_tours())
# In[49]:
jeu.mains
# In[50]:
jeu.scores
# # Conclusion
# In[ ]:
|
993,768 | 4049482575730ad4bfa473e2e7ac4363ec5cfa75 | from django.shortcuts import render
from .models import compania
from django.shortcuts import render, get_object_or_404
from django.views import generic
# Create your views here.
def compania_list(request):
Compania = compania.objects.all()
return render(request, 'inventario/compania_list.html', {'Compania' : Compania})
class detail(generic.DetailView):
model = compania
template_name = 'inventario/compania_detail.html'
def compania_detail(request, compania_id):
Compania = get_object_or_404(compania, pk=compania_id)
return render(request, 'inventario/compania_detail.html', {'Compania': Compania})
|
993,769 | 52d2642631ddf4090e6cc333333c36928c19c25f | #coding:utf-8
import requests
import re
import urllib
from urllib.request import urlopen
import chardet
table1=[]
table2=[]
table3=[]
table4=[]
table5=[]
table6=[]
url='http://u9service.ufida.com.cn/servicehome/kmindex.aspx'
for i in range(110):
paramsload = {'ver': '0', 'mmdul': '0','mdul': '0','qnum': '','qtitle': '','qcontent': '','page': str(i+1)}
r = requests.get(url,params=paramsload)
regex1=r'<td width="40">(.*?)</td>'
regex2=r'<td width="140">(.*?)</a>'
regex3=r'<td width="30">(.*?)</td>'
regex4=r'问题现象:</font>(.*?)<br>'
regex5=r'解决方案:</font>(.*?)</td>'
regex6=r'<td width="70">(.*?)</td>'
pot1=re.compile(regex1)
pot2=re.compile(regex2)
pot3=re.compile(regex3)
pot4=re.compile(regex4)
pot5=re.compile(regex5)
pot6=re.compile(regex6)
table_code1=re.findall(pot1,r.content.decode())
table_code2=re.findall(pot2,r.content.decode())
table_code3=re.findall(pot3,r.content.decode())
table_code4=re.findall(pot4,r.content.decode())
table_code5=re.findall(pot5,r.content.decode())
table_code6=re.findall(pot6,r.content.decode())
table1.extend(table_code1)
table2.extend(table_code2)
table3.extend(table_code3)
table4.extend(table_code4)
table5.extend(table_code5)
table6.extend(table_code6)
file = open('test1.txt','w')
for j in table1:
file.write(j+'\n')
file.close()
file = open('test2.txt','w')
for j in table2:
file.write(j+'\n')
file.close()
file = open('test3.txt','w')
for j in table3:
file.write(j+'\n')
file.close()
file = open('test4.txt','w')
for j in table4:
file.write(j+'\n')
file.close()
file = open('test5.txt','w')
for j in table5:
file.write(j+'\n')
file.close()
file = open('test6.txt','w')
for j in table6:
file.write(j+'\n')
file.close()
|
993,770 | b24f56069a2949fb1fe307769d9ff1820c19741c |
# podajemy 2 liczby program ma dokonać jakiejś operacji matemeycznej (+ - dziel mnoż)
# podjamey tez operację w ,wyniku zlej operacji wyrzyca blad
def pobranie_danych():
liczba1 = float(input("Podaj liczbe A"))
liczba2 = float(input("Podaj liczbe B"))
operacja = input("Podaj jaką operację chcesz wykonać: mnożenie, dzlenia, dodawanie, odejmowanie ")
return liczba1, liczba2, operacja
def obsluga_dzialan (liczba1, liczba2, operacja):
if operacja=="+":
print (f"wynik dodawania liczb {liczba1} i {liczba2} wynosi {liczba1 + liczba2}")
elif operacja=="/":
if liczba2 ==0:
print("liczba 2 dne może być zerem podczas dzielenia")
else:
print (f"wynik dodawania liczb {liczba1} i {liczba2} wynosi {liczba1 / liczba2}")
raise ValueError("NIE WOLNO DIZELIC przez ZERO2")
elif operacja=="-":
print (f"wynik dodawania liczb {liczba1} i {liczba2} wynosi {liczba1 - liczba2}")
elif operacja=="*":
print (f"wynik dodawania liczb {liczba1} i {liczba2} wynosi {liczba1 * liczba2}")
elif operacja =="l":
pass #pozwala zostaawić miejsce na przyszlosc
else:
print ("podaleś niewłaściwe działanie proszę użyć działań w formacie +, - * , /")
raise ValueError("nieprawidłowa wartość dla parametru typ operacja")
dane = pobranie_danych()
obsluga_dzialan(*dane) |
993,771 | 5f1823ba297d3bfb7cb2d8b116bbac8a356adef3 | # SJTU EE208
import os
import re
import string
import sys
# import urllib.error
# import urllib.parse
# import urllib.request
import requests
from urllib.parse import urljoin
import time
from bs4 import BeautifulSoup
def valid_filename(s):
valid_chars = '-_.() %s%s' % (string.ascii_letters, string.digits)
s = ''.join(c for c in s if c in valid_chars)
return s
class mycount:
def __init__(self, n=0):
self.n = n
def __call__(self, i):
self.n += i
return self.n
get_page_count = mycount(0)
crawl_count = mycount(0)
def get_page(page):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36',
'Cookie': 'uuid_tt_dd=10_9949675910-1600261491577-371068; dc_session_id=10_1600261491577.490733; Hm_up_6bcd52f51e9b3dce32bec4a3997715ac=%7B%22islogin%22%3A%7B%22value%22%3A%220%22%2C%22scope%22%3A1%7D%2C%22isonline%22%3A%7B%22value%22%3A%220%22%2C%22scope%22%3A1%7D%2C%22isvip%22%3A%7B%22value%22%3A%220%22%2C%22scope%22%3A1%7D%7D; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_9949675910-1600261491577-371068; __gads=ID=ca4796933d209972:T=1600261492:S=ALNI_MbScM9-rzuqwKvRVhABQjkvI2SFjg; c-login-auto-interval=1601095475563; TY_SESSION_ID=943aef78-0594-47b3-8e20-77b1576e878f; c_segment=12; dc_sid=1a99b40cd7fb2b7ab4654c23c87ed1b1; c_first_ref=www.baidu.com; announcement=%257B%2522isLogin%2522%253Afalse%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Flive.csdn.net%252Froom%252Fyzkskaka%252F5n5O4pRs%253Futm_source%253D1598583200%2522%252C%2522announcementCount%2522%253A0%252C%2522announcementExpire%2522%253A3600000%257D; SESSION=fae7bc16-f4a8-488d-a8eb-6faabdf92f3b; log_Id_click=4; c_utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-2.channel_param; c_ref=https%3A//www.baidu.com/link; c_first_page=https%3A//blog.csdn.net/qq_32506963/article/details/78498157; c_page_id=default; log_Id_pv=75; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1601128198,1601128422,1601129147,1601129163; log_Id_view=126; dc_tos=qh9r71; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1601129198; c-login-auto=10',
}
print('正在爬取' + page)
try:
content = requests.get(urljoin('https://', page), headers=headers, timeout=2)
content.encoding = content.apparent_encoding
return content
except requests.exceptions.RequestException as e:
print(e, end=' ')
print(get_page_count(1))
return ''
def get_all_links(content, page): # 返回所有链接
links = []
for i in BeautifulSoup(content.text, features='lxml').findAll('a'):
url_with_get = i.get('href', '')
url = url_with_get.split('?')[0]
links.append(url)
return links
def union_dfs(a, b): # 将b中不在a中的元素逐项加入a,a为list
for e in b:
if e not in a:
a.append(e)
def add_page_to_folder(page, content, folder1, index_filename1): # 将网页存到文件夹里,将网址和对应的文件名写入index.txt中
index_filename = index_filename1 # index.txt中每行是'网址 对应的文件名'
folder = folder1 # 存放网页的文件夹
filename = valid_filename(page) # 将网址变成合法的文件名
with open(index_filename, 'a', encoding='utf-8') as index:
index.write(page + '\t' + filename + '\n')
if not os.path.exists(folder): # 如果文件夹不存在则新建
os.mkdir(folder)
with open(os.path.join(folder, filename), 'w',encoding='utf-8') as f:
f.write(content) # 将网页存入文件
def crawl(seed, max_page):
tocrawl = [seed]
crawled = []
count = 0
while tocrawl:
print('crawl' + str(crawl_count(1)))
page = tocrawl.pop()
if (page).endswith('.apk') or (page).endswith('.pdf') or (page).endswith('.jpg'):
print('爬取到非法后缀名:'+page)
continue
if page not in crawled:
# print(page)
content = get_page(page) # content包含page文本
if not content:
continue
add_page_to_folder(page, content.text, 'htmlw', 'index.txt')
outlinks = get_all_links(content, page)
union_dfs(tocrawl, outlinks)
crawled.append(page)
count += 1
...
else:
print('重复网页'+page+'已经在crawled内')
print('crawled的长度为'+str(len(crawled)))
if len(crawled) >= max_page:
break
# print('crawl =' + str(count))
return crawled
if __name__ == '__main__':
start = time.time()
seed = 'https://baike.baidu.com/'
# seed = 'https://cdn2.hubspot.net/hubfs/53/%5BConnect%5D%20Marketing%20Resources%20page/Creating%20an%20Integration%20Strategy%20That%20Goes%20Beyond%20the%20HubSpot%20Playbook_Workshop.pdf?__hstc=20629287.572674dc5db5b59170a499c344709d93.1489600428476.1544199168206.1549662406544.1004&__hssc=20629287.1.1549662406544&__hsfp=3532268881'
max_page = 100
'''
seed = sys.argv[1]
max_page = sys.argv[2]
'''
crawled = crawl(seed, max_page)
stop = time.time()
print('运行时间' + str(stop - start))
|
993,772 | 9cf4de5807cd3c5639242719b54bcb8e7cc2add4 | import uiautomator2
devices = uiautomator2.connect("D3H7N17B11010237")
devices.app_start("guoyuan.szkingdom.android.phone", "kds.szkingdom.modemain.android.phone.UserMainActivity")
# devices.xpath(
# '//*[@resource-id="guoyuan.szkingdom.android.phone:id/main_page_bottomBar_view"]/android.widget.LinearLayout[5]').click()
#
# devices(resourceId="guoyuan.szkingdom.android.phone:id/zjQieHuan").click_exists(10)
#
# devices(resourceId="khbz").send_keys("15240029081")
# devices(text="请输入您的交易密码").send_keys("123456")
# devices.xpath('//*[@resource-id="loginForm"]/android.view.View[10]').click()
devices(text="点击登录 >").click()
devices(text="请输入您的手机号码").send_keys("15240029081")
devices(text="获取验证码").click()
devices(text="请输入验证码").send_keys("11111")
devices(resourceId="guoyuan.szkingdom.android.phone:id/btn_register").click_exists(10)
|
993,773 | 3bfb46884c5d9d3a176ba41ab75d523d674270d6 | # Generated by Django 3.2.2 on 2021-05-29 10:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0026_reclamation'),
('operation', '0008_auto_20210529_1119'),
]
operations = [
migrations.AlterField(
model_name='operation',
name='idD',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='idD', to='account.use'),
),
migrations.AlterField(
model_name='operation',
name='idR',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='idR', to='account.use'),
),
]
|
993,774 | 5d1851336f359588396c416efde7348a37e5c93b | from collections import deque
import heapq as hq
class MazeRunner(object):
'''
A base class for the maze runners, with no search algorithm implemented.
'''
def __init__(self):
self.reset()
def __len__(self):
return len(self.path)
def reset(self):
''' Resets the node list and frontier.'''
self.came_from = {}
self.frontier = deque()
self.path = []
def search_maze(self, maze):
raise NotImplemented("subclass and implement this!")
def construct_path(self, loc):
''' Returns a list of all tiles visited to given loc, if possible.
Return None otherwise. '''
path = [loc]
if loc not in self.came_from:
return None
while self.came_from[loc]:
loc = self.came_from[loc]
path.append(loc)
return list(reversed(path))
def solve(self, maze):
''' Return a path through a solved maze.'''
for _ in self.search_maze(maze):
pass
return self.path
class BreathRunner(MazeRunner):
'''
A maze runner for the maze class using the breath-first algorithm.
'''
def search_maze(self, maze):
''' A generator the yields the next step in the maze
until getting to the end. using breath-first algorithm. '''
self.reset()
self.maze = maze
self.came_from[maze.start] = None
self.frontier.append(maze.start)
while self.frontier:
current = self.frontier.popleft()
yield current
# If we found the end
if current == maze.end:
# Return the path to the end
self.path = self.construct_path(current)
return
# Otherwise get all the new neighbours and add them to the frontier
for neighbour in maze.get_neighbours(current):
if neighbour not in self.came_from:
self.frontier.append(neighbour)
self.came_from[neighbour] = current
class RecursiveRunner(MazeRunner):
'''
A maze runner for the maze class using a procedural
recursive algorithm.
'''
def search_maze(self, maze):
''' A generator the yields the next step in the maze
until getting to the end, using recursive algorithm. '''
def search_maze_helper(loc):
''' A recursive function to search through the maze. '''
if self.path:
# Return if a solution was already found
return
yield loc
if loc == maze.end:
# Build a path
self.path = self.construct_path(loc)
return
neighbours = maze.get_neighbours(loc)
neighbours = sorted(neighbours, key=lambda n: n[0] * 1 + n[1] * 10)
for n in neighbours:
if n not in self.came_from:
self.came_from[n] = loc
yield from search_maze_helper(n)
self.reset()
self.maze = maze
self.came_from[maze.start] = None
yield from search_maze_helper(maze.start)
class GreedyFirstRunner(MazeRunner):
'''
A Maze runner using a Greedy Best-First algorithm.
If no endpoint is given all cells .
'''
def reset(self):
''' Resets the Greedy First-search runner. '''
super().reset()
self.frontier = []
def push_to_frontier(self, loc, priority=0):
hq.heappush(self.frontier, (priority, loc))
def get_from_frontier(self):
return hq.heappop(self.frontier)[1]
def distance_heuristic(self, loc):
''' The distance heuristic used calculates the
expected distance from the end.
Return 0 if no there's no end point. '''
if not self.end:
return 0
return abs(self.end[0] - loc[0]) + abs(self.end[1] - loc[1])
def search_maze(self, maze):
''' A generator that yields the next step in the maze
until getting to the end, using a greedy Best-First algorithm.
Maze must have an endpoint for the search to work.'''
self.reset()
self.maze = maze
self.end = maze.end
self.came_from[maze.start] = None
self.push_to_frontier(maze.start)
while self.frontier:
current = self.get_from_frontier()
yield current
# If we found the end
if current == maze.end:
# Return the path to the end
self.path = self.construct_path(current)
return
# Otherwise get all the new neighbours and add them to the frontier
for neighbour in maze.get_neighbours(current):
if neighbour not in self.came_from:
self.came_from[neighbour] = current
priority = self.distance_heuristic(neighbour)
self.push_to_frontier(neighbour, priority)
class AStarRunner(GreedyFirstRunner):
''' A Maze runner using the A* algorithm to find the shortest path.'''
def search_maze(self, maze):
''' A generator that yields the next step in the maze
until getting to the end, using a greedy Best-First algorithm.
Maze must have an endpoint for the search to work.'''
self.reset()
self.maze = maze
self.end = maze.end
self.start = maze.start
self.came_from[maze.start] = None
self.push_to_frontier(maze.start)
while self.frontier:
current = self.get_from_frontier()
yield current
# If we found the end
if current == maze.end:
# Return the path to the end
self.path = self.construct_path(current)
return
# Otherwise get all the new neighbours and add them to the frontier
new_cost = len(self.construct_path(current)) + 1
for neighbour in maze.get_neighbours(current):
old_path = self.construct_path(neighbour)
if not old_path or new_cost < len(old_path):
self.came_from[neighbour] = current
priority = new_cost + self.distance_heuristic(neighbour)
self.push_to_frontier(neighbour, priority)
class AStarTiebreakRunner(AStarRunner):
''' AStarRunner with a tiebreak inside the heuristic to prefer
nodes closer to the goal.'''
def distance_heuristic(self, loc):
dist = super().distance_heuristic(loc)
if dist == 0:
return dist
# Get the cross product of the vectors:
# (start, end), (loc, end)
# Which is also the area of the parallelogram formed by the vectors.
dy1 = loc[0] - self.end[0]
dx1 = loc[1] - self.end[1]
dy2 = self.start[0] - self.end[0]
dx2 = self.start[1] - self.end[1]
cross = abs(dx1*dy2 - dx2*dy1)
# Add the cross product as a small fraction to the calculation
# to break ties by picking the node closer to a straight line
# to the goal.
# Note that this makes the heuristic inadmissable!
# Though this will come into effect only in rare cases and where the expected
# Path length is bigger than 1000.
# This can be optimized by changing 0.001 to a smaller number that fits the
# maze size.
return dist + cross * 0.001
|
993,775 | d25692817a84c8370de6e32a6e7532ae97dcd82a | import math
test = ['G', 'B', 'R', 'R', 'B', 'R', 'G']
order = {
'R': 1,
'G': 2,
'B': 3
}
def batteries_included_sort(input_list):
l = input_list[:]
return sorted(l, key=lambda x: order[x])
def bubble_sort(input_list):
l = input_list[:]
for i in range(len(l)):
for j in range(len(l) - i - 1):
if order[l[j]] > order[l[j+1]]:
l[j], l[j+1] = l[j+1], l[j]
return l
def selection_sort(input_list):
l = input_list[:]
smallest = 0
for i in range(len(l)):
for j in range(i, len(l)):
if order[l[j]] < order[l[smallest]]:
smallest = j
l[i], l[smallest] = l[smallest], l[i]
return l
# this one obviously does more than swaps
def merge_sort(input_list):
l = input_list[:]
if len(l) == 1:
return l
else:
left = merge_sort(l[:math.floor(len(l)/2)])
right = merge_sort(l[math.floor(len(l)/2):])
l = []
count_l = 0
count_r = 0
while count_l < len(left) and count_r < len(right):
if order[left[count_l]] <= order[right[count_r]]:
l += left[count_l]
count_l += 1
else:
l += right[count_r]
count_r += 1
if count_l < len(left):
l += left[count_l:]
if count_r < len(right):
l += right[count_r:]
return l
if __name__ == '__main__':
print(test)
print(bubble_sort(test))
print(selection_sort(test))
print(merge_sort(test))
print(batteries_included_sort(test))
|
993,776 | 44182e651aeed5bdc9c1099671299140ff08d099 | #!/usr/bin/env python
TEST="""3
1 4 2
1 7 7
2 5 1"""
raw_input = iter(TEST.splitlines()).next
def solve(C,W):
gh = C/W
miss = 1
if C%W == 0:
miss = 0
rem = W-1
return (gh+miss+rem)
T = int(raw_input())
for case in range(1,T+1):
R,C,W = map(int, raw_input().strip().split())
assert(R==1)
print("Case #%s: %s" % (case, solve(C,W)))
|
993,777 | 501860942c6c0c5b291873ae18ddb379680efef5 | import logging
import numpy as np
import json
import requests
from pprint import pprint
from influxdb import InfluxDBClient, DataFrameClient
from influxdb.exceptions import InfluxDBClientError, InfluxDBServerError
import influxdb
from datadog import statsd
from cleanflux.utils.influx.date_manipulation import pd_timestamp_to_timestamp
from cleanflux.utils.influx.query_sqlparsing import sqlparse_query, get_cq_schema, get_cq_interval, get_cq_from, get_cq_into, parse_measurement_path
# ------------------------------------------------------------------------
# LIB PATCHING
def robustify_influxdb_client():
def custom_request(self, url, method='GET', params=None, data=None,
expected_response_code=200, headers=None):
"""Make a HTTP request to the InfluxDB API.
:param url: the path of the HTTP request, e.g. write, query, etc.
:type url: str
:param method: the HTTP method for the request, defaults to GET
:type method: str
:param params: additional parameters for the request, defaults to None
:type params: dict
:param data: the data of the request, defaults to None
:type data: str
:param expected_response_code: the expected response code of
the request, defaults to 200
:type expected_response_code: int
:param headers: headers to add to the request
:type headers: dict
:returns: the response from the request
:rtype: :class:`requests.Response`
:raises InfluxDBServerError: if the response code is any server error
code (5xx)
:raises InfluxDBClientError: if the response code is not the
same as `expected_response_code` and is not a server error code
"""
url = "{0}/{1}".format(self._baseurl, url)
if headers is None:
headers = self._headers
if params is None:
params = {}
if isinstance(data, (dict, list)):
data = json.dumps(data)
# Try to send the request more than once by default (see #103)
retry = True
_try = 0
while retry:
try:
response = self._session.request(
method=method,
url=url,
auth=(self._username, self._password),
params=params,
data=data,
headers=headers,
proxies=self._proxies,
verify=self._verify_ssl,
timeout=self._timeout
)
break
except requests.exceptions.ConnectionError as e:
self._session = requests.Session()
_try += 1
if self._retries != 0:
retry = _try < self._retries
except requests.exceptions.ChunkedEncodingError as e:
logging.warn("Case of broken HTTP session, retring w/ new session")
self._session = requests.Session()
_try += 1
if self._retries != 0:
retry = _try < self._retries
else:
raise requests.exceptions.ConnectionError
if 500 <= response.status_code < 600:
raise InfluxDBServerError(response.content)
elif response.status_code == expected_response_code:
return response
else:
raise InfluxDBClientError(response.content, response.status_code)
setattr(influxdb.InfluxDBClient, 'request', custom_request)
# ------------------------------------------------------------------------
# NUMPY DATA ENCODER
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
# ------------------------------------------------------------------------
# QUERYING: pandas FORMAT
@statsd.timed('timer_pd_query_influxdb', use_ms=True)
def pd_query(backend_host, backend_port, user, password, schema, query):
pd_influx_client = DataFrameClient(backend_host, backend_port, user, password, schema)
result_df_dict = pd_influx_client.query(query) # returns a dict, "<measurement>" => DataFrame
return result_df_dict
@statsd.timed('timer_rp_auto_detect', use_ms=True)
def get_rp_list(backend_host, backend_port, user, password, schema_list=[]):
# influx_client = InfluxDBClient(backend_host, backend_port, user, password)
pd_influx_client = DataFrameClient(backend_host, backend_port, user, password)
if not schema_list:
schema_list_raw = pd_influx_client.query('SHOW DATABASES')
schema_list = [ e['name'] for e in list(schema_list_raw.get_points(measurement='databases'))]
cq_list_raw = pd_influx_client.query('SHOW CONTINUOUS QUERIES')
rp_dict = {}
for schema in schema_list:
result_df_dict = pd_influx_client.query('SHOW RETENTION POLICIES ON "' + schema + '"')
rp_list = list(result_df_dict.get_points(measurement='results'))
cq_list = list(cq_list_raw.get_points(measurement=schema))
# enrich RPs whith intervals gotten from CQs
cq_into_rp_set = set()
for cq in cq_list:
parsed_cq = sqlparse_query(cq['query'])
from_m = parse_measurement_path(schema, get_cq_from(parsed_cq))
into = parse_measurement_path(schema, get_cq_into(parsed_cq))
if into['measurement'] != ':MEASUREMENT' \
and into['measurement'] != from_m['measurement']:
# NB: if insertion in another measurement, skip
continue
into_rp = into['rp']
cq_into_rp_set.add(from_m['rp'])
cq_into_rp_set.add(into_rp)
rp_conf_raw = next([rp, i] for i, rp in enumerate(rp_list) if rp['name'] == into_rp)
if not rp_conf_raw:
continue
rp_conf, rp_conf_i = rp_conf_raw
if 'interval' in rp_conf:
# NB: we assumme all CQ for all measurements with a same INTO RP use the same GROUP BY time interval
# this is a strong and limitative assumption
continue
rp_list[rp_conf_i]['interval'] = get_cq_interval(parsed_cq)
# remove RPs from rp_dict that don't match a CQ
active_rp_list = []
for i, rp in enumerate(rp_list):
if rp['name'] in cq_into_rp_set:
active_rp_list.append(rp)
if active_rp_list:
rp_dict[schema] = active_rp_list
return rp_dict
def get_nb_series_in_pd_result(resultset_list):
nb_series = 0
for resultset in resultset_list:
nb_series += len(resultset)
return nb_series
def pd_result_to_influx_result(resultset_list, precision):
output_dict = {
'results': []
}
for resultset in resultset_list:
query_dict = {
'series': []
}
for series in resultset:
tags = {}
if isinstance(series, tuple):
measurement = series[0]
for raw_tag in series[1]:
tags[raw_tag[0]] = raw_tag[1]
else:
measurement = series
df = resultset[series]
columns = df.columns.values.tolist()
all_columns = ['time'] + columns
series_dict = {
'name': measurement,
'columns': all_columns,
'values': []
}
if tags:
series_dict['tags'] = tags
for index, row in df.iterrows():
# TODO: should change precision according to param epoch (ns, ms ...)
row_values = [pd_timestamp_to_timestamp(index, precision)]
for column in columns:
value = row[column]
if np.isnan(value):
value = None
row_values.append(value)
# logging.debug("dump {0} -> {1}".format(index.value, row[column]))
series_dict['values'].append(row_values)
query_dict['series'].append(series_dict)
output_dict['results'].append(query_dict)
return json.dumps(output_dict, cls=NpEncoder)
|
993,778 | 0f2e13a9de8bb601be5f62098f5e71d10267cfe4 | class Node:
def __init__(self, val: int, left: 'Node' = None, right: 'Node' = None):
self.val = val
self.left = left
self.right = right
def greater(root: Node, val: int) -> bool:
ok = None
while root:
if root.val > val:
ok = root.val
root = root.left
else:
root = root.right
return ok
root = Node(19,
Node(7,
Node(3,
Node(2),
Node(5),
),
Node(11,
None,
Node(17,
Node(13),
None
)
)
),
Node(43,
Node(23,
None,
Node(37,
Node(29,
None,
Node(31)
),
Node(41)
)
),
Node(47,
None,
Node(53)
)
)
)
assert greater(root, 23) == 29
|
993,779 | f9a193064884c015c05d8628830a2e5d17191d2b | __pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x07\x00\x42\x0d\x0d\x0a\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x35\x0c\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe3\x50\x8c\x64\x26\x42\xd6\x01\xc1\x57\xe0\xf8\x31\x41\xaf\xc1\x30\x25\xfc\x16\xb4\xae\xb5\x10\x78\x81\x82\x6f\x7e\xc0\x3b\xae\x9a\xbd\x10\x77\x95\x8a\x54\x4c\x7d\x24\x42\x68\x7e\x29\xcd\x3d\x77\xc2\xe7\xe7\x1a\x20\x6f\x03\xf4\x29\x49\x01\x6f\xbd\xe7\xb7\x08\x2e\x73\x26\xa0\xc7\x1f\xfd\xf9\x8f\x03\xb8\x82\x56\x67\x72\xfa\x7b\xc7\xa0\x74\x1c\x86\x38\x24\x3e\xb5\xb9\x2b\xa7\xd7\x5e\x21\xe8\x22\x82\xed\xfd\x61\xa1\x66\x48\x46\xb4\x8d\x11\xfb\xd5\xe3\x9f\x07\x09\xf2\x79\x42\x1d\x0a\x08\x71\x03\x46\x50\xdf\x11\x67\x97\x21\xda\x81\x9d\xde\x35\x63\x05\x90\xc7\xe9\x1f\xff\x1b\x9c\x95\x9b\xff\x5d\x4b\xaa\x47\x1f\x6e\x3c\x46\x4e\x83\x49\x9f\xc1\x7a\x79\x41\x4d\x97\xb6\x1e\x88\xc3\xbe\x9a\xc4\x69\xda\xbc\x69\x50\x5d\x88\x5e\xc2\x3d\x5b\x12\x7a\x2a\x22\xda\xab\x89\xd9\xca\x21\xe5\xa1\x41\xb6\xad\x59\xd7\xa0\x25\x4a\x44\xad\xf7\x67\xf5\x66\x39\xe0\xa0\xcf\xde\xb7\xcd\xed\x2a\x43\x9b\x55\x4d\x6c\x30\x07\x9c\x20\x5a\x0a\x71\xa5\xde\x12\x9c\xe6\xd3\x6b\x75\x1f\x68\x14\x96\x05\xec\x94\x14\x4c\x49\x07\xf9\x84\xf8\x66\xea\xb0\x3c\xbc\xe4\x05\x17\x43\xeb\xc2\x47\x92\x0d\xb0\x65\x33\xeb\xbf\x02\x3f\x95\xd6\xbb\x7f\xd0\xdf\x50\x56\x0c\x2c\xfd\x78\x21\xf3\xf0\xb0\x2f\x15\x49\x94\x83\x57\xa6\xbe\xda\x16\xd6\x91\x2b\x2f\x8a\x08\x94\xa2\xe1\x30\x3b\xdc\xb8\xf9\x72\x70\xda\xa2\x82\x64\x7c\x78\xae\xeb\xe4\x46\x61\xd3\x99\xe7\x81\x17\x9b\x57\xda\x6e\x59\xd8\x99\x27\x98\xac\xa7\x99\x9c\x09\xa3\xdd\x46\x27\xcb\xe0\x53\x59\xe1\xeb\xa7\xc2\xb7\xbf\x5b\x02\x65\xae\x15\xde\x23\x63\x43\x20\x85\xa2\xd8\x08\x60\x25\x39\x80\xd9\x94\x71\xe7\x13\x51\x75\xb4\xdf\x1f\xe1\x86\x78\x26\x4c\x2d\x8c\x74\x0c\xfa\xb8\x1e\x68\x42\x17\xa7\xa2\xeb\x3d\xbb\xf5\x0a\x12\xbd\xb6\x5f\xa6\xd0\xb6\xcf\x58\xfb\x52\xfc\x18\x9a\xb7\x61\xf2\xbd\x14\xee\x50\xd3\xa6\x20\x83\x95\xec\xc8\x27\xce\xfd\x2e\x39\x97\xca\x92\x89\x4b\xf6\x15\x75\x5a\xb3\xd1\x3a\xc2\x9c\x48\x24\x3d\x2c\x59\x25\x2d\x48\x71\xc4\x21\x9b\x7b\x3a\x36\xed\x88\x11\x60\x72\x48\x28\x86\xab\xf5\x25\x89\x11\x8f\x35\xef\x4b\x0e\x44\xb1\x04\x85\xae\x58\x87\x3e\xcb\xd2\x37\xc0\xd7\x36\xbd\x01\xe2\x48\xba\xab\xb8\xb0\x71\x9b\x07\xfa\xf0\xca\x54\xf9\xab\x9d\x00\x28\xee\x26\x3a\x2d\x82\x33\xe8\x29\xbb\xdd\x33\x47\x12\x2e\x34\x36\xe6\x86\xf0\xd7\x28\xe5\x3d\xf1\xa0\xbf\x4c\xf1\xc0\x2d\xc4\xbb\x91\x09\x63\x34\x62\x58\xcf\x84\x14\x2f\xd2\xe2\xca\x90\x59\xd7\xac\x8e\x96\x87\xe2\xd9\x30\x9a\xcd\x23\x73\x43\xd9\xda\xcf\x20\x29\x0b\x61\x8c\x5c\x99\xe9\x0f\xa7\x97\x77\x6b\x77\x12\x6a\xeb\x17\xfb\x99\xa6\x33\x73\x82\x8d\xd0\x7c\x06\x78\x60\xd4\xd4\x4e\x88\xf9\x80\x68\xa3\x6b\xb1\x0b\x4c\xe2\xd1\x40\x17\x05\xbf\x17\xe8\x50\x37\x63\x53\xb3\x59\xfc\x78\xc7\x83\x38\x15\x2f\xe1\xeb\x1f\x51\xb2\x02\xc2\x54\xd2\x0c\x20\xd5\x0d\x7a\x11\x50\x3c\x94\xab\xa8\x46\xb6\xc6\x9d\xce\xd8\xd1\x6b\x80\x1f\xc2\x61\x6c\xd9\x0c\x35\x53\x3f\x94\x74\xd9\x7d\x85\x66\x26\x8d\x45\x92\x7f\x7c\x99\x26\x5b\x2e\x56\xf8\x4b\x33\x4a\x4a\x2b\xa5\x51\x02\x26\x61\x46\x37\xcc\x30\x97\x63\xe0\x74\x66\x18\x6f\x81\xc1\x7b\x4d\x0d\x04\xe7\x04\x29\x9b\x71\x29\xe1\x95\x99\x20\xf7\xf5\x3e\x4a\x92\x49\x7b\x2e\x72\x1a\x89\xf4\xfb\x7e\x05\xea\xea\x47\xac\x73\x5e\x62\xbb\x9c\x10\x32\xe6\x46\x12\x5f\x70\xab\x79\x1e\x0d\xfc\x76\xca\xc8\xd0\xe9\xa8\xa4\x07\x4f\xf0\xd4\xb4\x22\x1c\x25\xc0\xda\x91\x29\xe0\x5e\x2c\xb4\x82\x27\x48\x49\x8a\xe6\xc8\x8a\x33\x47\xc6\xdb\x90\x2f\x82\xfb\x7b\x08\x9e\xf5\x5c\x52\xb1\x59\x3a\xad\x1c\xd6\xad\x50\xbc\x33\x25\xa0\x1f\xca\x21\x5f\xdf\x4b\x39\xa0\x92\xd5\x13\xa8\xb2\x9f\x57\x37\x0e\x0c\xd0\xea\x28\x06\x9d\xa2\xa4\xe9\x32\x72\x22\x25\xe4\x91\x74\xac\x7a\xe0\x20\x6f\x48\x84\xcf\x4d\x57\x41\xac\xa0\x8a\xfa\x71\xc7\xfe\xa1\xfc\xf0\xa0\x65\xaa\xbb\xc7\x93\x73\x14\xdf\xa7\xb8\xf9\xfe\xa9\x63\xc4\xad\x9d\xa1\x92\x95\xb5\x83\x49\xa2\x85\x90\xec\x2d\xe8\x7d\x6c\xce\xe5\xc1\x7b\x80\x15\xa2\xf2\xad\xf2\x61\x49\xea\x44\x2e\xd8\x20\x60\x88\x58\x21\x78\x86\x90\x43\x32\x42\xd6\xa8\xd8\xa3\x6a\xac\xa3\x02\x5f\x5e\xb7\xfc\x90\x0b\xca\x88\xb5\x54\x16\xbc\x2f\x22\xf6\x98\x3a\x19\x94\x2c\x29\x62\x6e\x8f\x24\xa4\x5e\x87\x83\x80\x55\xb5\x4b\x26\x16\xbd\xb2\xef\x2e\x02\x64\x27\x1c\x92\xab\x24\xc8\xef\x9c\x02\x6b\x64\xa5\xa7\x51\x92\x32\x94\xa5\xb6\xb1\x0d\x27\x9f\xdc\xe4\x6f\xfc\x10\x34\x3b\xb3\xe9\x15\x14\xa1\x49\x35\x12\x6d\x7f\x7e\x5a\x26\x3a\xc3\x6c\xac\x86\xd5\x29\x1a\xe9\x9a\xa2\xb3\xcc\xd5\x13\xe9\x09\x70\x5d\x33\xcf\x01\xac\x05\xda\x29\xc1\x14\x82\xb4\x43\x2c\x98\xcb\xaa\x9e\xce\x6d\x53\x36\x69\x5f\xdb\x7d\x7c\x39\x36\xc1\x2a\xdf\x1b\x4f\x33\x35\xd9\xa8\xd0\xb4\x28\x80\xbe\x7c\x63\x00\x71\x1b\x04\x6a\xae\x50\x89\xf1\xb4\xdc\xb8\x0f\x14\x34\x7f\x28\xce\xb9\xb8\xb4\x50\x7f\x44\xf6\x36\x58\x7f\x95\xb0\x4c\xb9\x26\xdc\x08\xfb\x7c\xc8\x04\xc2\x7c\xcf\xe2\xf6\x47\x0f\x25\x8a\x99\x58\x13\x15\x4f\x53\x42\x72\x80\x9e\x1c\x46\x9c\xfe\x67\x9f\x05\xc1\x19\x2c\x95\x7d\x8c\x97\x0e\xc3\xd3\x07\xd7\x18\x6f\xef\x2d\xe0\x70\x08\x22\x11\x1f\xfd\x67\x71\xda\x81\x8b\xc5\x2b\x5e\xe5\xe4\x6b\x73\xb2\xe2\x8b\xee\xf8\xa3\x8b\xc7\xbb\x52\x7a\x5e\xfe\x1f\x47\xb3\x5e\x3b\x45\x7a\x41\x63\x14\xe1\xaf\x81\xd4\x55\xf6\x6f\x99\x4a\x33\x1c\x42\x10\xe3\x51\x84\x75\x78\xce\x93\xae\x5b\x1d\x85\x5e\x9e\x59\xdd\x32\x52\x27\xac\x0f\x06\xad\xd2\xd5\x78\xf4\x29\xd1\xf0\x04\x42\x79\x0f\x9e\xbf\x40\xab\xaf\xd8\x00\x39\x6a\x74\xe9\xdf\xf4\x0f\xdb\x81\x3e\x65\xb2\xd4\x42\xbf\xf2\xcb\xfd\x3e\x2d\xfc\x8c\xdb\x77\xdd\xa9\xb0\xb5\xab\xa5\x18\x49\x7c\xd6\xae\x6c\x69\x50\x77\xdf\x0e\x1b\x5b\x40\x32\xc7\x39\x42\x17\xd6\x6d\xb2\x91\xd7\x9f\xfd\x0c\xba\xd6\xb3\x97\xa6\x7a\x1c\xac\x89\xd5\x7c\x91\xf3\x11\xd8\x25\x40\x29\x39\xa7\x7b\x2d\xa3\xbc\xe5\x87\x38\x11\x4d\x4c\xf8\x07\xae\x54\xd1\x7e\xca\xbd\x47\xb0\xf8\xef\x8c\x08\x10\xf7\x4b\xca\x96\x50\x36\xa5\x1d\x68\x77\x65\xac\x0d\x6e\x68\x9d\x4e\x19\xa0\x89\xe0\x2d\x51\x2f\x83\x4b\x8e\xe5\x56\xba\x39\xcf\x67\x93\x5c\x5a\xed\x79\xab\x52\x01\xe0\xdf\x22\xdd\x91\x58\x02\x2e\x10\x66\x0e\x6b\x9d\x57\x9c\x02\xda\x86\xb1\x2e\xf6\xaa\xf1\x28\x8c\x94\x07\x56\xd3\xcd\xb9\xd4\xe4\xde\x3a\x2b\x1c\xb7\x61\xee\x1d\xe3\x3f\x28\x2b\xa7\x57\x11\xcb\x75\x85\x0e\x91\x0c\xac\x22\xe1\x62\x60\x78\x1c\x81\xa9\x23\xf8\xf6\x47\x0f\xfb\x75\xc6\x79\xfd\x6e\xbf\xea\xf8\x36\x7d\xa3\xaf\x10\xfc\x8b\x6f\x2f\x2b\x15\xa6\x0a\x0f\xa7\xa6\xf9\x5f\x9c\x2c\x48\x72\x71\x77\x84\xe4\x4b\x8d\x25\x15\x21\x87\x41\x8b\x80\x1d\x20\x32\xf6\x37\x94\xde\x19\x8c\xff\xaa\x92\x82\x4d\x7d\xc4\x34\xac\x3c\xca\x86\x7d\x8b\xf4\xde\x0a\x6b\x50\xad\x74\x14\x52\x99\xae\xae\x6a\xd2\x8c\x4c\x4f\xcd\xa4\x93\x36\xb7\x2f\x44\x4b\x19\xe8\x4f\x4e\x03\x03\x9a\x7a\x5d\x17\xeb\xa6\xab\xa0\x6f\xbb\x12\xb2\x3e\xfc\x9e\x1d\x26\xb6\x98\xee\x6b\x29\x1b\x4c\xc3\x46\x59\x5b\x1b\x6c\x11\xb0\xc5\x31\xa4\xb0\x1f\xe9\x2d\x64\xed\xa8\xf7\xb9\x5b\x7e\xa0\x50\xe9\x9c\xe2\xa7\xd0\x8d\xfa\x9c\x32\x7d\x77\x01\xc3\x14\x89\xab\x7a\x6d\x37\x6c\x22\x3f\x90\xeb\xe3\x15\x81\x9e\x1b\xc7\x4e\x9f\xc7\x1c\x3d\x6d\x50\x05\x52\x25\x0e\xaf\x87\x45\xed\xf4\x05\x51\x50\x3d\x8e\x12\x8b\x08\x7d\x13\x84\x4d\xc8\xdd\x61\xd0\xc4\xaf\xbc\x76\x8a\xf3\x56\xc8\x76\x15\x8c\x0e\x52\xb2\xc1\x80\xb0\x40\x81\x16\x3e\xdf\x46\x0a\x43\x15\x33\x00\x71\xf8\x56\x90\x4b\x3f\xd0\x59\x52\xfc\xa3\x12\x86\x0a\x5a\x7a\xbd\xa4\xa6\xaf\x3e\xdf\xc8\xb2\x56\x6b\xde\x5a\x6f\x08\x59\x38\xe5\xe5\xcb\xea\x78\xa6\x5a\xd5\x39\x11\x5f\x52\xcb\xbf\xa1\x2f\xce\x91\x1c\xb7\x1f\x5e\x30\x09\x44\xf7\xc9\x62\xd9\x78\x71\x7d\x1d\x48\x84\x37\x59\x20\xba\x6c\xa3\x16\x63\x68\x0b\xb1\x94\x1b\xdc\x6a\x07\x63\x99\xe1\x66\x97\xfb\x84\x65\x59\xf1\x97\xe2\x20\x21\x7a\x17\x3f\x52\xeb\x2e\x22\xdd\x3a\x1b\x80\x40\x70\xaa\x6b\xb9\x60\xa6\x5e\xbe\xcf\xed\x11\x98\x0c\x24\xc6\xb3\xc5\xa9\x19\xaf\x66\x5b\x47\x2a\x52\xec\x03\x49\x11\x06\x8f\x82\x2f\xb8\x71\xee\x83\xc9\xde\xf0\x34\x8d\x30\xe5\x40\x25\x2c\xb6\x82\xc1\xd8\xaa\x5d\x1d\xf0\x48\xef\xb0\xec\x07\x3a\x04\x53\x9c\xd9\xb1\x50\xbf\x95\xef\xbe\x2d\xe8\x86\x24\xa8\x08\x80\x18\x69\x00\x74\x3b\xca\x8e\xd3\x64\xf5\x12\x15\x23\x40\x4f\x1e\x77\x41\x31\x18\x48\x31\xab\x29\x24\x60\xb5\x9e\x48\xc4\x04\xb1\x7f\x4c\xdb\x51\x21\xf7\x63\x59\xa1\xee\xc1\x07\x70\x35\x77\x2e\x2f\x72\xbf\x38\xe5\x49\x0f\x50\x08\xfd\x2d\xf3\x0f\x51\x23\xd1\xd7\x15\x45\xfd\xaa\x4e\x5b\xde\xaa\x23\x56\xab\xbd\x27\x98\xf1\x5d\x29\xee\x49\x9e\xc8\xd5\x94\xc6\x43\x29\xed\x1d\x4e\x40\x4b\x72\x63\x17\x55\x68\x32\x2a\x59\x89\xde\x3b\x77\x9d\x5e\x59\xdb\x39\xaa\xa7\xf4\x8d\xce\xc5\x7e\x7b\xc3\x6b\x45\x8b\xa6\xf8\xbd\x92\xfb\x6c\x2c\xb3\x7e\x8b\x38\xe6\x52\xbc\x70\xe0\x8f\x0b\xaf\xd8\x80\xed\xe0\x93\x0a\xac\x52\xa0\x01\x73\xac\xc3\xde\x21\x86\x68\xa9\x44\xc0\xdc\x76\x89\x18\x19\x3d\x83\xcf\x65\x60\x80\xbb\x99\x1b\xe7\x7c\x27\xdc\x59\x86\xef\xf9\xe3\xed\x40\x6b\x1a\x0d\xec\x01\x2d\x41\x69\x1b\x15\x86\x32\x9b\x46\x34\x8e\xf0\x05\x8a\xc1\x8f\xad\xc6\x0e\x24\x7e\x16\xc9\x7b\xe9\x30\x12\xb0\x10\xa0\xe3\xc5\x4b\x5c\x7f\x52\x7d\xeb\x90\xf6\x7a\x9c\x01\x2b\x34\x87\x7f\xb4\xf9\x30\x0d\x60\x6b\xaa\xb7\x3e\x96\x02\xc1\x46\xe2\x52\x4a\xb6\x94\xcf\x9a\x07\x07\xb0\xff\x3a\x4b\xd4\x96\x56\x52\x1c\xc9\x28\x53\x32\x8e\x0f\x35\xb1\x4f\x56\x23\xe5\x1a\x23\x63\x68\x08\x9c\x9a\x14\x20\x79\x66\x55\xaa\x30\x95\xd2\x2f\x4b\xb1\x67\xb7\xd9\x4f\x71\xb9\x59\xc1\x99\x58\x08\xb3\x05\x50\x4d\xfd\xc1\xda\xb5\xde\x40\x3d\xbf\xfb\x82\xeb\xb4\x7e\x4c\x30\x10\x8d\xc7\x61\xc2\xf3\x5a\xd3\x72\x79\xf6\x8d\x7d\x0c\x35\x1d\x44\x31\x35\x57\x51\x28\xec\x96\x0d\xc5\xcc\x0c\x8a\xc9\x37\x05\xa7\xa0\x25\x3c\x8d\xab\xf4\xfd\x51\x13\x7f\x89\xb7\xb4\x39\xbe\x38\xa2\x56\x61\x60\x69\x3c\x8a\x67\xf1\x13\xf6\xf3\x77\xe5\x65\x4b\xa0\xbd\x40\xe0\xd7\xe2\x7e\xbd\x8d\x42\x8a\x29\x3e\x48\x01\xcb\x1f\x3b\x26\xa2\x8f\x67\xb0\x58\x2c\x7c\xc7\x25\xa0\x30\x48\x30\x16\x87\x98\xe9\x0b\x54\x16\x38\x0d\xd6\x73\x00\xca\x9c\xb9\xf9\xf4\xa8\x1c\x42\xe7\xfb\x9b\xcd\x9d\x89\x91\x24\x5a\x88\x19\xa3\x11\x96\x76\x73\x43\xb3\x28\xf8\xb9\x8b\x70\x44\x78\x2c\xf4\x2e\x13\xa3\x81\xa0\x1f\x1f\x6a\x82\xaf\x29\x79\xa1\xe5\x7b\x5d\x8a\x80\x85\x3c\xa4\x99\x80\xe5\x11\x2f\x3c\xdc\x21\x14\xab\x72\xbd\x66\x77\x61\xa0\xb9\xe7\x2e\x28\x27\xc0\x33\x80\xac\x4c\x3c\xd5\xc2\xc2\x52\x2f\x2a\x80\x0e\x40\xf4\x22\x51\xe3\x0a\x2e\x43\x29\x1a\x98\x68\x4d\x27\x74\x91\x1c\x02\xa3\x63\x5f\x91\xcd\x8b\x61\xcc\x4a\x6e\x04\xfa\x67\xca\xfa\xa4\x21\x4d\xc7\xf4\x59\xe5\xd3\x6f\x80\x5f\x55\x9c\xdc\x3f\xb5\x36\x67\x03\x80\x52\x41\x7b\x96\x88\xaa\xe4\x5f\xe5\x0d\x65\x3a\xe7\x03\xfc\x50\xe0\x2c\x8a\x46\x15\x21\x3c\x28\x4d\x53\xf8\xdd\x57\x60\x45\x4b\xfe\x92\x80\x2e\xd2\xc9\xf3\xc7\xa6\x8c\xdf\x96\xb1\x77\x4b\xac\x6f\xbf\x93\x0e\xf8\xce\x94\xc6\x79\xa5\x36\x32\x55\x9c\x94\xc7\xdd\x9b\x71\xe9\x37\x7c\x22\xe7\x28\x35\x3c\xd3\x6f\x67\x26\x1c\xc9\x58\x29\x46\xca\xa8\x31\x62\xb7\x07\x6a\x04\x81\x4e\x32\x2c\xad\x89\xc0\xcf\x0e\x31\x03\x38\x4c\xdc\xa8\xeb\xb2\xf6\xd5\x1f\x3d\x45\x8d\x90\x81\xcf\x67\x98\x28\xb2\x2b\x9b\x7d\xe7\x0a\x1e\x18\x31\xbe\x64\x2e\x74\x74\x85\x39\xd0\xfc\x87\xa8\x6b\x33\x9a\x75\xba\x42\xbb\xc4\xd3\xe1\x0b\x3c\x57\x1e\x8d\x9c\xc6\xfd\xce\x4a\x15\x93\xc9\x4b\xf7\x82\xa8\x75\x04\x28\xed\xa9\x50\xcb\x05\x80\xe3\x7f\x61\x70\x29\x8f\x99\xc9\x99\x5e\x43\x66\x76\x39\x5a\xb4\x3c\x85\xa8\xf5\xfc\x2a\xc5\xcd\x8b\x29\x37\x2b\xe0\x0f\xc6\x96\x6d\x6e\xed\xd2\x16\x9e\xc6\xd9\x7f\x59\xa2\xc5\xb3\xa4\x4a\xb0\x92\x57\x1f\xc1\x4e\x38\xcf\xe7\x4f\x8d\x49\x63\x2d\x72\x9c\xc9\x6a\xb6\x63\xbf\x67\x1c\xc5\x7f\x5a\x20\x53\xdc\xb7\xae\x67\x63\x07\x1c\x7b\xd2\x99\x95\xd2\xa1\x33\xd5\xb1\xed\x1b\xa4\xb0\xbe\x46\x85\x2d\x4e\xb7\x53\xf2\x9a\x00\xd5\xbb\x87\x7f\x8f\xbc\xa2\x3f\x69\xfc\xba\x86\x90\x9c\x82\x65\xb5\xdc\x3e\x82\xf5\xd8\x4d\x61\xd6\x90\xeb\x8a\x04\x37\xc6\x5a\x6d\x25\xc1\xdf\x32\x49\xd7\xb3\x7b\x59\x4c\x4e\x52\xf5\x20\xa0\x75\xf6\xcd\x5a\xf2\xbc\x7c\xe6\xcf\xcd\xd7\x35\xd6\x58\x10\x8b\x2a\xaf\x56\x08\xf1\x97\x23\xac\x3a\xad\xf7\x3e\x74\xf2\xeb\x95\x6b\xa1\xdd\xe4\x88\xab\x4a\x31\xe8\x4c\xc8\xeb\xce\x06\xec\x78\xfb\xb9\x29\xcc\xde\xfc\xd0\xd0\xc5\xaf\xf7\x7d\xc3\x4f\xe0\x3f\x2f\x95\x0b\xa7\xbb\xc5\x41\x3b\x75\xef\x2c\xef\x65\xf0\xf8\x2f\x71\x81\x0d\xe9\x6e\xff\x3a\xd2\x6a\x9a\x44\x60\x8f\x0b\xcd\xae\xd4\x58\xe7\xa9\xf2\xb7\xb3\x7a\x55\x51\xdb\x3e\x1c\x18\xe8\x75\x6e\x48\x73\x51\xa1\x0b\x2c\x1f\x3a\x41\xce\x82\xcc\x6e\xd9\x3f\xdc\x63\xda\x38\xcb\xc7\x65\xff\xee\xd8\x36\x64\x71\xa8\x78\x26\xb9\x98\xef\x35\xbb\x61\x78\xf2\x61\xdb\xbe\x5f\x59\x2d\xf0\x28\xd2\x7f\x8e\xc4\x9c\x4e\x8f\x2a\xfe\xab\xaf\x94\xfe\x53\x75\x84\x70\x3d\x94\x9b\xad\xaa\xfd\x2a\xd8\xe6\xb8\x1a\xc8\x4a\xcd\x44\x8b\xec\xd4\x04\xf8\xd5\x0d\x85\x14\xf4\x7e\x0c\xa0\xdd\x8d\x3b\x16\x84\xc9\x5f\x6f\x9e\xb3\xd3\x34\x71\xce\xa7\x11\x08\x28\x59\xde\xf2\x3a\xbf\x5c\xff\x15\x50\xdd\xe1\x50\x28\x06\x44\xeb\x90\xd9\x22\x17\x68\xce\xae\x99\x98\x0f\x3b\xa1\x50\x58\x66\x8f\x28\xb0\x71\x49\xed\xa2\x6e\x8e\x0e\xfc\x13\x70\x43\xc4\xa9\x1a\xf3\x94\x5b\xbf\x17\xda\x97\x2b\x86\x54\x33\x18\x3b\x5b\x96\x2a\x38\x5b\xd3\xbc\xaf\xe0\xdd\x92', 1) |
993,780 | df853a59e8af10adb40586ff572d68162a477e06 | #!/usr/bin/env python
"""
List all repos with specified webhook payload URL.
"""
import click
from openedx_webhooks.lib.github.utils import get_repos_with_webhook, repo_name
click.disable_unicode_literals_warning = True
@click.command()
@click.argument('payload-url')
@click.option(
'--exclude-inactive',
is_flag=True,
help="Include webhooks which aren't active"
)
def cli(payload_url, exclude_inactive):
"""
List all repos with specified webhook payload URL.
Note that you must set the environment variable $GITHUB_PERSONAL_TOKEN
to a valid token that you create in GitHub.
"""
repos = get_repos_with_webhook(
payload_url, exclude_inactive=exclude_inactive
)
for repo in repos:
click.echo(repo_name(repo))
if __name__ == '__main__':
cli()
|
993,781 | 9153c51f06cd371c76248956cffa594bb5069528 | #!/usr/bin/python3
"""
an matrix dividing function: matrix_divided():
>>> matrix_divided(m, 1)
m
"""
def matrix_divided(matrix, div):
"""
Returns a divided matrix
"""
if not matrix:
raise TypeError('matrix must be a matrix \
(list of lists) of integers/floats')
k = len(matrix[0])
if type(div) is not int and type(div) is not float:
raise TypeError('div must be a number')
elif div == 0:
raise ZeroDivisionError('division by zero')
else:
for x in matrix:
if not x:
raise TypeError('Each row of the \
matrix must have the same size')
if len(x) != k:
raise TypeError('Each row of the \
matrix must have the same size')
if type(x) is not list:
raise TypeError('matrix must be a matrix \
(list of lists) of integers/floats')
else:
for y in x:
if type(y) is not int and type(y) is not float:
raise TypeError('matrix must be a matrix \
(list of lists) of integers/floats')
return [[float('%.2f' % (y / div)) for y in x] for x in matrix]
|
993,782 | 8c64f32b827a0d0af0b22842f435cc9ee05b976a | #!/usr/bin/env python3
"""Define measure_runtime"""
import asyncio
import time
wait_n = __import__('1-concurrent_coroutines').wait_n
def measure_time(n: int, max_delay: int) -> float:
"""return the total_time / n """
t0 = time.time()
asyncio.run(wait_n(n, max_delay))
t1 = time.time()
total_time = t1 - t0
return total_time / n
|
993,783 | 819929bc191398a28b9d1c4ee93af3f3d00f9b47 | # standar libs
import cv2
import numpy as np
import serial
import time
from copy import deepcopy
# Roborregos libs
import sys
sys.path.insert(0, '../lib/')
import Larc_vision_2017 as rb
# CODE
mainFrame = []
clearedMainFrame = []
cap = cv2.VideoCapture(0)
# let camara calibrate light
for i in range(10):
cap.read()
def takePicture():
global mainFrame
global clearedMainFrame
# clear internal buffer
for i in range(4):
cap.grab()
# get new image
goodFrm, mainFrame = cap.read()
print "I took a pic"
if goodFrm:
clearedMainFrame = rb.clearImage(mainFrame)
else:
print ("There is an error with the camera")
return goodFrm
def getCowXcenter(left,right):
center = (left+right)/2
cv2.line(mainFrame,(center,0),(center,480),(0,255,255),1)
return center
def getXCenterFrame():
center = (mainFrame.shape[1])/2
cv2.line(mainFrame,(center,0),(center,480),(255,255,0),1)
return center
def getLimits(maxLenT):
left,right,up=rb.calcCowLimits(maxLenT)
drawLimits(left,right,up)
return left,right,up
def drawLimits(left,right,y):
global mainFrame
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.line(mainFrame,(left,0),(left,480),(255,0,0),3)
cv2.line(mainFrame,(right,0),(right,480),(255,0,0),3)
cv2.line(mainFrame,(0,y),(640,y),(255,0,0),3)
# cv2.putText(mainFrame,("diff L: " + str(left)),(30,20), font, 0.8,(0,0,255),1,cv2.LINE_AA)
# cv2.putText(mainFrame,("diff R: " + str(640-right)),(30,50), font, 0.8,(0,0,255),1,cv2.LINE_AA)
# cv2.putText(mainFrame,("diff Top: " + str(y)),(30,80), font, 0.8,(0,0,255),1,cv2.LINE_AA)
'''
MAIN
'''
if __name__ == "__main__":
while True:
analyse = raw_input("process photo? ")
if analyse == '1':
takePicture()
filteredImage = rb.clearImage(mainFrame)
this_time = time.time()
validation2, filtered = rb.detectCow(filteredImage)
validation,maxLenT,_ = rb.isThereACow(mainFrame,filtered)
print "HAAR: ",validation2
print "ALGORITHM: ",validation, len(maxLenT)
if validation:
print "COW FOUND"
tLevel = rb.getTissueTopLevel(maxLenT)
rb.drawCowSquares(mainFrame,200,50,200,tLevel)
A,B,theta = rb.ajusteDeCurvas(tLevel)
rb.drawSlope(mainFrame,A,B)
left,right,up=rb.calcCowLimits(maxLenT)
rb.drawLimits(mainFrame,left,right,up)
for sqr in maxLenT:
print sqr.getX(), sqr.getY()
cv2.imshow('im',mainFrame)
k = cv2.waitKey(0)
if k ==27:
break
cv2.destroyAllWindows()
cap.release()
cv2.destroyAllWindows()
|
993,784 | c91ed778783da515960a86b2a9b5dcf375cf26a3 | from enum import Enum
from .exceptions import SynsetError
class Synset(object):
class Pos(Enum):
NOUN = 0
VERB = 1
ADVERB = 2
ADJECTIVE = 3
def __str__(self):
dic_pos2chr = {'NOUN': 'n', 'VERB': 'v', 'ADVERB': 'r', 'ADJECTIVE': 'a'}
return dic_pos2chr[self.name]
def __repr__(self):
return self.name
class SumoType(Enum):
HYPERNYM = 0
EQUIVALENT = 1
INSTANCE = 2
BRACKET = 3
POINTS = 4
def __str__(self):
dic_stp2chr = {'HYPERNYM': '+', 'EQUIVALENT': '=', 'INSTANCE': '@', 'BRACKET': '[', 'POINTS': ':'}
return dic_stp2chr[self.name]
def __repr__(self):
return self.name
def __init__(self, id: str, pos=None, nonlexicalized=None, definition=None, stamp=None, sentiwn=None, domain=None,
sumo=None, sumotype=None, literals=None, literals_senses=None):
"""
Initialize a synset object:
Args:
id (str): The id of the synset.
pos (Pos, optional): The pos of the synset.
nonlexicalized (str): ?
definition (str, optional): The definition of synset.
stamp (str, optional): The stamp of the synset.
sentiwn (list of ints/floats, optional): The sentiwn of the synset.
domain (str, optional): The domain of the synset.
sumo (str, optional): The sumo of the synset.
sumotype (SumoType, optional): The type of sumo.
literals (dict, optional): The literals of synsets. First argument represents the literal and the second
one represents the sense.
Raises:
TypeError: If any argument has incorrect type.
"""
if not isinstance(id, str):
raise TypeError("Argument 'id' has incorrect type, expected str, got {}".format(type(id).__name__))
self._id = id
self._literals = [] if literals is None else literals
self._literals_senses = [] if literals_senses is None else literals_senses
self._pos = pos
self._definition = definition
self._stamp = stamp
self._domain = domain
self._sumo = sumo
self._sumotype = sumotype
self._sentiwn = sentiwn
self._nonlexicalized = nonlexicalized
@property
def id(self):
"""
Get/set the id(str) of this synset.
Getter returns the id.
Setter recieves a string containing the id.
"""
return self._id
@property
def literals(self):
"""
Get/set the literals of this synset.
Getter returns the literals of this synset.
Setters recieves the literals as list.
"""
return self._literals
@literals.setter
def literals(self, value: list):
if not isinstance(value, list):
raise TypeError("Argument 'value' has incorrect type, expected list, got {}".format(type(value).__name__))
for literal in value:
if not isinstance(literal, str):
raise TypeError("Argument 'literal-value' has incorrect type, expected str, got {}"
.format(type(literal).__name__))
self._literals_senses = ["" for i in range(len(value))]
self._literals = value
@property
def literals_senses(self):
"""
Get/set the senses for each literal of this synset. Senses's indexes correspond to literals's.
Setter returns the senses of each literal of this synset.
Setters recieves the senses as list.
"""
return self._literals_senses
@literals_senses.setter
def literals_senses(self, value: list):
if not isinstance(value, list):
raise TypeError("Argument 'value' has incorrect type, expected list, got {}".format(type(value).__name__))
for sense in value:
if not isinstance(sense, str):
raise TypeError("Argument 'sense-value' has incorrect type, expected str, got {}"
.format(type(sense).__name__))
self._literals_senses = value
@property
def sentiwn(self):
"""
Get/set the values for the SentiWordNet(list of floats/ints) of this synset.
Getter returns a list of 3 values for Positive, Negative, Objective.
Setter receives a list of 3 floats/ints to set the PNO values.
"""
return self._sentiwn
@sentiwn.setter
def sentiwn(self, value: list):
if not isinstance(value, list):
raise TypeError("Argument 'value' has incorrect type, expected list, got {}".format(type(value).__name__))
if not len(value) == 3:
raise ValueError("Argument 'value' expected a list of size 3, but got a list of size {} instead"
.format(len(value)))
if not all((isinstance(element, float) or (isinstance(element, int))
for element in value)):
raise ValueError("Argument's 'value' values must be of type float/int")
if not all(0 <= element <= 1 for element in value):
raise ValueError("Argument's 'value' values must have values between 0 and 1")
if not sum(value) == 1:
raise ValueError("Argument's 'value' values must add up to 1")
self._sentiwn = value
@property
def definition(self):
"""
Get/set the definition(str) of this synset.
Getter returns the definition of this synset.
Setter receives a string containing the definition.
"""
return self._definition
@definition.setter
def definition(self, value: str):
if not isinstance(value, str):
raise TypeError("Argument 'value' has incorrect type, expected str, got {}".format(type(value).__name__))
self._definition = value
@property
def pos(self):
"""
Get/set the pos(Pos) of this synset.
Getter returns the pos of this synset.
Setter receives the pos value of this synset.
"""
return self._pos
@pos.setter
def pos(self, value: Pos):
if not isinstance(value, self.Pos):
raise TypeError("Argument 'value' has incorrect type, expected str, got {}".format(type(value).__name__))
self._pos = value
@property
def domain(self):
"""
Gets/sets the domain of this synset.
Getter returns the domain of this synset.
Setter receives a string containing the domain.
"""
return self._domain
@domain.setter
def domain(self, value: str):
if not isinstance(value, str):
raise TypeError("Argument 'value' has incorrect type, expected str, got {}".format(type(value).__name__))
self._domain = value
@property
def sumo(self):
"""
Gets/sets the sumo of this synset.
Getter returns the sumo of this synset.
Setter receives a string containing the sumo.
"""
return self._sumo
@sumo.setter
def sumo(self, value: str):
if not isinstance(value, str):
raise TypeError("Argument 'value' has incorrect type, expected str, got {}".format(type(value).__name__))
self._sumo = value
@property
def sumotype(self):
"""
Gets/sets the sumotype(HYPERNYM, EQUIVALENT, INSTANCE) of this synset.
Getter returns the sumotype of this synset.
Setter receives a synset SumoType(HYPERNYM, EQUIVALENT, INSTANCE)
"""
return self._sumotype
@sumotype.setter
def sumotype(self, value: SumoType):
if not isinstance(value, self.SumoType):
raise TypeError("Argument 'value' has incorrect type, expected SumoType, got {}"
.format(type(value).__name__))
self._sumotype = value
@property
def nonlexicalized(self):
"""
Gets/sets the nonlexicalized attribute of this synset.
Getter returns the nonlexicalized attribute
Setter recieves a string containing the nonlexicalized value.
"""
return self._nonlexicalized
@nonlexicalized.setter
def nonlexicalized(self, value: bool):
if not isinstance(value, bool):
raise TypeError("Argument 'value' has incorrect type, expected bool, got {}".format(type(value).__name__))
self._nonlexicalized = value
@property
def stamp(self):
"""
Gets/sets the stamp of this synset.
Getter returns the stamp of this synset.
Setter recieves a string containing the stamp or None.
"""
return self._stamp
@stamp.setter
def stamp(self, value: str):
if not isinstance(value, str) and value is not None:
raise TypeError("Argument 'value' has incorrect type, expected str, got {}".format(type(value).__name__))
self._stamp = value
def add_literal(self, literal: str, sense: str=""):
"""
Add a literal to the synset.
Args:
literal (str): The value of the literal.
sense (str): Sense of the literal.
Raises:
SynsetError: If the literal is already in the synset.
"""
if literal in self._literals:
raise SynsetError("Literal '{}' is already in the synset".format(literal))
self._literals.append(literal)
self._literals_senses.append(sense)
def remove_literal(self, literal: str):
"""
Remove a literal from the synset.
Args:
literal (str): Literal of the synset.
Raises:
SynsetError: If there's no literal with this value in the synset.
"""
if literal not in self._literals:
raise SynsetError("literal '{}' is not in the synset".format(literal))
index = self._literals.index(literal)
self._literals.remove(literal)
del self._literals_senses[index]
def __repr__(self):
return "Synset(id={!r}, literals={!r}, definition={!r})".format(self._id, self._literals, self._definition)
def __eq__(self, other):
if isinstance(other, Synset):
if (self.id == other.id and
self.literals == other.literals and
self.literals_senses == other.literals_senses and
self.pos == other.pos and
self.definition == other.definition and
self.stamp == other.stamp and
self.domain == other.domain and
self.sumo == other.sumo and
self.sumotype == other.sumotype and
self.sentiwn == other.sentiwn and
self.nonlexicalized == other.nonlexicalized):
return True
return False |
993,785 | daf73300884086608a41f52a3e05ec4620b7d6b8 | from . import views
from django.urls import path, include
from src.views import Tambah_Posisi, Simpan_posisi, Editposisi
urlpatterns = [
path ('', views.pekerja_list, name='pekerja_list'),
path ('pekerja_form/', views.pekerja_form, name='pekerja_form'),
path ('<int:id>/', views.pekerja_form, name='pekerja_update'),
path ('hapus/<int:id>/', views.pekerja_hapus, name='pekerja hapus'),
path ('posisi_list/', views.posisi_list, name='posisi_list'),
path ('tambah/', Tambah_Posisi.as_view(), name='tambah_posisi'),
path ('simpan/', Simpan_posisi.as_view(), name='simpan_posisi'),
path ('hapus_posisi/<int:id>/', views.hapus_posisi, name='hapus_posisi'),
# path ('edit_posisi/<int:id>/', views.edit_posisi, name='edit_posisi'),
path ('edit/<int:id>/',Editposisi.as_view(), name='edit'),
path ('accounts/', include('django.contrib.auth.urls')),
path ('signup/', views.signup, name='signup')
] |
993,786 | 7e757c2a5ada706dc528e8ca5a8b0443b35ccfe4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#----
# Базовые параметры классов. Используются при генерации персонажей.
dict_class_abilityes = {
# Наибольшие параметры -- первые:
'Bard':['charisma','dexterity','constitution'],
'Barbarian':['strength','constitution','dexterity','charisma'],
'Cleric':['wisdom','strength','constitution','dexterity'],
'Druid':['wisdom','intelligence','constitution'],
'Fighter':['strength','constitution','dexterity','charisma'],
'Eldritch_Knight':['strength','constitution','intelligence','dexterity'],
'Battlemaster':['strength','constitution','dexterity','charisma'],
'Monk':['dexterity','wisdom','constitution','strength'],
'Paladin':['strength','charisma','constitution'],
'Ranger':['dexterity','wisdom','constitution','strength'],
'Rogue':['dexterity','charisma','constitution','intelligence'],
'Arcane_Tricker':['dexterity','intelligence','constitution','charisma'],
'Sorcerer':['charisma','dexterity','constitution'],
'Warlock':['charisma','dexterity','constitution'],
'Wizard':['intelligence','dexterity','constitution'],
# Класс обычных солдат.
'Warrior':['strength','dexterity','constitution'],
'Warrior-heavy':['strength','constitution','charisma'],
'Warrior-bowman':['dexterity','strength','constitution'],
'Warrior-pirate':['dexterity','charisma','strength','constitution'],
'Warrior-officer':['charisma','strength','dexterity','constitution'],
# Класс простолюдинов. Все параметры случайные.
'Commoner':[],
# Чудовища, параметры предопределены.
'Empyrean':[],
}
dict_class_saves = {
'Bard':['dexterity','charisma'],
'Barbarian':['strength','constitution'],
'Cleric':['wisdom','charisma'],
'Druid':['intelligence','wisdom'],
'Fighter':['strength','constitution'],
'Eldritch_Knight':['strength','constitution'],
'Battlemaster':['strength','constitution'],
'Monk':['strength','dexterity'],
'Paladin':['wisdom','charisma'],
'Ranger':['strength','dexterity'],
'Rogue':['dexterity','intelligence'],
'Arcane_Tricker':['dexterity','intelligence'],
'Sorcerer':['constitution','charisma'],
'Warlock':['wisdom','charisma'],
'Wizard':['intelligence','wisdom'],
# Классы обычных солдат
'Warrior':['strength','dexterity'],
'Warrior-heavy':['strength','constitution'],
'Warrior-bowman':['strength','dexterity'],
'Warrior-pirate':['dexterity','charisma'],
'Warrior-officer':['strength','charisma'],
# Класс простолюдинов. Бонусов к спасброскам нет.
'Commoner':[],
# Чудовища, параметры предопределены.
'Empyrean':['strength','intelligence','wisdom','charisma'],
}
#----
# Бонус мастерства и прочие растущие бонусы к способностям класса:
metadict_class_proficiency = {
# Ключ словаря -- кортеж (класс, уровень).
('Any',1):{'proficiency_bonus':2},
('Any',2):{'proficiency_bonus':2},
('Any',3):{'proficiency_bonus':2},
('Any',4):{'proficiency_bonus':2},
('Any',5):{'proficiency_bonus':3},
('Any',6):{'proficiency_bonus':3},
('Any',7):{'proficiency_bonus':3},
('Any',8):{'proficiency_bonus':3},
('Any',9):{'proficiency_bonus':4},
('Any',10):{'proficiency_bonus':4},
('Any',11):{'proficiency_bonus':4},
('Any',12):{'proficiency_bonus':4},
('Any',13):{'proficiency_bonus':5},
('Any',14):{'proficiency_bonus':5},
('Any',15):{'proficiency_bonus':5},
('Any',16):{'proficiency_bonus':5},
('Any',17):{'proficiency_bonus':6},
('Any',18):{'proficiency_bonus':6},
('Any',19):{'proficiency_bonus':6},
('Any',20):{'proficiency_bonus':6},
('Bard',1):{'bardic_inspiration':'1d6'},
('Bard',2):{'bardic_inspiration':'1d6','song_of_rest':'1d6'},
('Bard',3):{'bardic_inspiration':'1d6','song_of_rest':'1d6'},
('Bard',4):{'bardic_inspiration':'1d6','song_of_rest':'1d6'},
('Bard',5):{'bardic_inspiration':'1d8','song_of_rest':'1d6'},
('Bard',6):{'bardic_inspiration':'1d8','song_of_rest':'1d6'},
('Bard',7):{'bardic_inspiration':'1d8','song_of_rest':'1d6'},
('Bard',8):{'bardic_inspiration':'1d8','song_of_rest':'1d6'},
('Bard',9):{'bardic_inspiration':'1d8','song_of_rest':'1d8'},
('Bard',10):{'bardic_inspiration':'1d10','song_of_rest':'1d8'},
('Bard',11):{'bardic_inspiration':'1d10','song_of_rest':'1d8'},
('Bard',12):{'bardic_inspiration':'1d10','song_of_rest':'1d8'},
('Bard',13):{'bardic_inspiration':'1d10','song_of_rest':'1d10'},
('Bard',14):{'bardic_inspiration':'1d10','song_of_rest':'1d10'},
('Bard',15):{'bardic_inspiration':'1d12','song_of_rest':'1d10'},
('Bard',16):{'bardic_inspiration':'1d12','song_of_rest':'1d10'},
('Bard',17):{'bardic_inspiration':'1d12','song_of_rest':'1d12'},
('Bard',18):{'bardic_inspiration':'1d12','song_of_rest':'1d12'},
('Bard',19):{'bardic_inspiration':'1d12','song_of_rest':'1d12'},
('Bard',20):{'bardic_inspiration':'1d12','song_of_rest':'1d12'},
('Sorcerer',1):{'sorcery_points':0},
('Sorcerer',2):{'sorcery_points':2},
('Sorcerer',3):{'sorcery_points':3},
('Sorcerer',4):{'sorcery_points':4},
('Sorcerer',5):{'sorcery_points':5},
('Sorcerer',6):{'sorcery_points':6},
('Sorcerer',7):{'sorcery_points':7},
('Sorcerer',8):{'sorcery_points':8},
('Sorcerer',9):{'sorcery_points':9},
('Sorcerer',10):{'sorcery_points':10},
('Sorcerer',11):{'sorcery_points':11},
('Sorcerer',12):{'sorcery_points':12},
('Sorcerer',13):{'sorcery_points':13},
('Sorcerer',14):{'sorcery_points':14},
('Sorcerer',15):{'sorcery_points':15},
('Sorcerer',16):{'sorcery_points':16},
('Sorcerer',17):{'sorcery_points':17},
('Sorcerer',18):{'sorcery_points':18},
('Sorcerer',19):{'sorcery_points':19},
('Sorcerer',20):{'sorcery_points':20},
('Ranger',1):{},
('Ranger',2):{},
('Ranger',3):{},
('Ranger',4):{},
('Ranger',5):{},
('Ranger',6):{},
('Ranger',7):{},
('Ranger',8):{},
('Ranger',9):{},
('Ranger',10):{},
('Ranger',11):{},
('Ranger',12):{},
('Ranger',13):{},
('Ranger',14):{},
('Ranger',15):{},
('Ranger',16):{},
('Ranger',17):{},
('Ranger',18):{},
('Ranger',19):{},
('Ranger',20):{},
# На 20 уровне wild_shape не ограничен:
('Druid',1):{'wild_shape':2},
('Druid',2):{'wild_shape':2},
('Druid',3):{'wild_shape':2},
('Druid',4):{'wild_shape':2},
('Druid',5):{'wild_shape':2},
('Druid',6):{'wild_shape':2},
('Druid',7):{'wild_shape':2},
('Druid',8):{'wild_shape':2},
('Druid',9):{'wild_shape':2},
('Druid',10):{'wild_shape':2},
('Druid',11):{'wild_shape':2},
('Druid',12):{'wild_shape':2},
('Druid',13):{'wild_shape':2},
('Druid',14):{'wild_shape':2},
('Druid',15):{'wild_shape':2},
('Druid',16):{'wild_shape':2},
('Druid',17):{'wild_shape':2},
('Druid',18):{'wild_shape':2},
('Druid',19):{'wild_shape':2},
('Druid',20):{'wild_shape':2000},
('Cleric',1):{},
('Cleric',2):{'channel_divinity':1},
('Cleric',3):{'channel_divinity':1},
('Cleric',4):{'channel_divinity':1},
('Cleric',5):{'channel_divinity':1},
('Cleric',6):{'channel_divinity':2},
('Cleric',7):{'channel_divinity':2},
('Cleric',8):{'channel_divinity':2},
('Cleric',9):{'channel_divinity':2},
('Cleric',10):{'channel_divinity':2},
('Cleric',11):{'channel_divinity':2},
('Cleric',12):{'channel_divinity':2},
('Cleric',13):{'channel_divinity':2},
('Cleric',14):{'channel_divinity':2},
('Cleric',15):{'channel_divinity':2},
('Cleric',16):{'channel_divinity':2},
('Cleric',17):{'channel_divinity':2},
('Cleric',18):{'channel_divinity':3},
('Cleric',19):{'channel_divinity':3},
('Cleric',20):{'channel_divinity':3},
('Paladin',1):{},
('Paladin',2):{'channel_divinity':1},
('Paladin',3):{'channel_divinity':1},
('Paladin',4):{'channel_divinity':1},
('Paladin',5):{'channel_divinity':1},
('Paladin',6):{'channel_divinity':1},
('Paladin',7):{'channel_divinity':1},
('Paladin',8):{'channel_divinity':1},
('Paladin',9):{'channel_divinity':1},
('Paladin',10):{'channel_divinity':1},
('Paladin',11):{'channel_divinity':1},
('Paladin',12):{'channel_divinity':1},
('Paladin',13):{'channel_divinity':1},
('Paladin',14):{'channel_divinity':1},
('Paladin',15):{'channel_divinity':1},
('Paladin',16):{'channel_divinity':1},
('Paladin',17):{'channel_divinity':1},
('Paladin',18):{'channel_divinity':1},
('Paladin',19):{'channel_divinity':1},
('Paladin',20):{'channel_divinity':1},
('Warlock',1):{},
('Warlock',2):{},
('Warlock',3):{},
('Warlock',4):{},
('Warlock',5):{},
('Warlock',6):{},
('Warlock',7):{},
('Warlock',8):{},
('Warlock',9):{},
('Warlock',10):{},
('Warlock',11):{},
('Warlock',12):{},
('Warlock',13):{},
('Warlock',14):{},
('Warlock',15):{},
('Warlock',16):{},
('Warlock',17):{},
('Warlock',18):{},
('Warlock',19):{},
('Warlock',20):{},
# Arcane_Recovery только раз в день на коротком отдыхе:
('Wizard',1):{'Arcane_Recovery':1,},
('Wizard',2):{'Arcane_Recovery':1,},
('Wizard',3):{'Arcane_Recovery':1,},
('Wizard',4):{'Arcane_Recovery':1,},
('Wizard',5):{'Arcane_Recovery':1,},
('Wizard',6):{'Arcane_Recovery':1,},
('Wizard',7):{'Arcane_Recovery':1,},
('Wizard',8):{'Arcane_Recovery':1,},
('Wizard',9):{'Arcane_Recovery':1,},
('Wizard',10):{'Arcane_Recovery':1,},
('Wizard',11):{'Arcane_Recovery':1,},
('Wizard',12):{'Arcane_Recovery':1,},
('Wizard',13):{'Arcane_Recovery':1,},
('Wizard',14):{'Arcane_Recovery':1,},
('Wizard',15):{'Arcane_Recovery':1,},
('Wizard',16):{'Arcane_Recovery':1,},
('Wizard',17):{'Arcane_Recovery':1,},
('Wizard',18):{'Arcane_Recovery':1,},
('Wizard',19):{'Arcane_Recovery':1,},
('Wizard',20):{'Arcane_Recovery':1,},
# https://www.dandwiki.com/wiki/5e_SRD:Fighter
('Fighter',1):{'Second_Wind':1,},
('Fighter',2):{'Second_Wind':1,'Action_Surge':1,},
('Fighter',3):{'Second_Wind':1,'Action_Surge':1,},
('Fighter',4):{'Second_Wind':1,'Action_Surge':1,},
('Fighter',5):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,},
('Fighter',6):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,},
('Fighter',7):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,},
('Fighter',8):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,},
('Fighter',9):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,'Indomitable':1,},
('Fighter',10):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,'Indomitable':1,},
('Fighter',11):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':1,},
('Fighter',12):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':1,},
('Fighter',13):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,},
('Fighter',14):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,},
('Fighter',15):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,},
('Fighter',16):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,},
('Fighter',17):{'Second_Wind':1,'Action_Surge':2,'Extra_Attack':2,'Indomitable':3,},
('Fighter',18):{'Second_Wind':1,'Action_Surge':2,'Extra_Attack':2,'Indomitable':3,},
('Fighter',19):{'Second_Wind':1,'Action_Surge':2,'Extra_Attack':2,'Indomitable':3,},
('Fighter',20):{'Second_Wind':1,'Action_Surge':2,'Extra_Attack':3,'Indomitable':3,},
# Для Мистического рыцаря дальше добавляются слоты заклинаний:
('Eldritch_Knight',1):{'Second_Wind':1,},
('Eldritch_Knight',2):{'Second_Wind':1,'Action_Surge':1,},
('Eldritch_Knight',3):{'Second_Wind':1,'Action_Surge':1,},
('Eldritch_Knight',4):{'Second_Wind':1,'Action_Surge':1,},
('Eldritch_Knight',5):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,},
('Eldritch_Knight',6):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,},
('Eldritch_Knight',7):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,},
('Eldritch_Knight',8):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,},
('Eldritch_Knight',9):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,'Indomitable':1,},
('Eldritch_Knight',10):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,'Indomitable':1,},
('Eldritch_Knight',11):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':1,},
('Eldritch_Knight',12):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':1,},
('Eldritch_Knight',13):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,},
('Eldritch_Knight',14):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,},
('Eldritch_Knight',15):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,},
('Eldritch_Knight',16):{'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,},
('Eldritch_Knight',17):{'Second_Wind':1,'Action_Surge':2,'Extra_Attack':2,'Indomitable':3,},
('Eldritch_Knight',18):{'Second_Wind':1,'Action_Surge':2,'Extra_Attack':2,'Indomitable':3,},
('Eldritch_Knight',19):{'Second_Wind':1,'Action_Surge':2,'Extra_Attack':2,'Indomitable':3,},
('Eldritch_Knight',20):{'Second_Wind':1,'Action_Surge':2,'Extra_Attack':3,'Indomitable':3,},
('Battlemaster',1):{'Second_Wind':1,},
('Battlemaster',2):{'Second_Wind':1,'Action_Surge':1,},
('Battlemaster',3):{
'Second_Wind':1,'Action_Surge':1,
'superiority_dices':4,'superiority_dice':'1d8',},
('Battlemaster',4):{
'Second_Wind':1,'Action_Surge':1,
'superiority_dices':4,'superiority_dice':'1d8',},
('Battlemaster',5):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,
'superiority_dices':4,'superiority_dice':'1d8',},
('Battlemaster',6):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,
'superiority_dices':4,'superiority_dice':'1d8',},
('Battlemaster',7):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,
'superiority_dices':5,'superiority_dice':'1d8',},
('Battlemaster',8):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,
'superiority_dices':5,'superiority_dice':'1d8',},
('Battlemaster',9):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,'Indomitable':1,
'superiority_dices':5,'superiority_dice':'1d8',},
('Battlemaster',10):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':1,'Indomitable':1,
'superiority_dices':5,'superiority_dice':'1d10',},
('Battlemaster',11):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':1,
'superiority_dices':5,'superiority_dice':'1d10',},
('Battlemaster',12):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':1,
'superiority_dices':5,'superiority_dice':'1d10',},
('Battlemaster',13):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,
'superiority_dices':5,'superiority_dice':'1d10',},
('Battlemaster',14):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,
'superiority_dices':5,'superiority_dice':'1d10',},
('Battlemaster',15):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,
'superiority_dices':6,'superiority_dice':'1d10',},
('Battlemaster',16):{
'Second_Wind':1,'Action_Surge':1,'Extra_Attack':2,'Indomitable':2,
'superiority_dices':6,'superiority_dice':'1d10',},
('Battlemaster',17):{
'Second_Wind':1,'Action_Surge':2,'Extra_Attack':2,'Indomitable':3,
'superiority_dices':6,'superiority_dice':'1d10',},
('Battlemaster',18):{
'Second_Wind':1,'Action_Surge':2,'Extra_Attack':2,'Indomitable':3,
'superiority_dices':6,'superiority_dice':'1d12',},
('Battlemaster',19):{
'Second_Wind':1,'Action_Surge':2,'Extra_Attack':3,'Indomitable':3,
'superiority_dices':6,'superiority_dice':'1d12',},
('Battlemaster',20):{
'Second_Wind':1,'Action_Surge':2,'Extra_Attack':3,'Indomitable':3,
'superiority_dices':6,'superiority_dice':'1d12',},
('Barbarian',1):{'rages_max':2,'rage_damage':+2},
('Barbarian',2):{'rages_max':2,'rage_damage':+2},
('Barbarian',3):{'rages_max':3,'rage_damage':+2},
('Barbarian',4):{'rages_max':3,'rage_damage':+2},
('Barbarian',5):{'rages_max':3,'rage_damage':+2,'Extra_Attack':1,'unarmored_movement':+10,},
('Barbarian',6):{'rages_max':4,'rage_damage':+2,'Extra_Attack':1,'unarmored_movement':+10,},
('Barbarian',7):{'rages_max':4,'rage_damage':+2,'Extra_Attack':1,'unarmored_movement':+10,},
('Barbarian',8):{'rages_max':4,'rage_damage':+2,'Extra_Attack':1,'unarmored_movement':+10,},
('Barbarian',9):{'rages_max':4,'rage_damage':+3,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+1},
('Barbarian',10):{'rages_max':4,'rage_damage':+3,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+1},
('Barbarian',11):{'rages_max':4,'rage_damage':+3,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+1},
('Barbarian',12):{'rages_max':5,'rage_damage':+3,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+1},
('Barbarian',13):{'rages_max':5,'rage_damage':+3,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+2},
('Barbarian',14):{'rages_max':5,'rage_damage':+3,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+2},
('Barbarian',15):{'rages_max':5,'rage_damage':+3,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+2},
('Barbarian',16):{'rages_max':5,'rage_damage':+4,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+2},
('Barbarian',17):{'rages_max':6,'rage_damage':+4,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+3},
('Barbarian',18):{'rages_max':6,'rage_damage':+4,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+3},
('Barbarian',19):{'rages_max':6,'rage_damage':+4,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+3},
('Barbarian',20):{'rages_max':100,'rage_damage':+4,'Extra_Attack':1,'unarmored_movement':+10,
'brutal_critical':+3},
('Rogue',1):{'sneak_attack_dice':'1d6'},
('Rogue',2):{'sneak_attack_dice':'1d6'},
('Rogue',3):{'sneak_attack_dice':'2d6'},
('Rogue',4):{'sneak_attack_dice':'2d6'},
('Rogue',5):{'sneak_attack_dice':'3d6'},
('Rogue',6):{'sneak_attack_dice':'3d6'},
('Rogue',7):{'sneak_attack_dice':'4d6'},
('Rogue',8):{'sneak_attack_dice':'4d6'},
('Rogue',9):{'sneak_attack_dice':'5d6'},
('Rogue',10):{'sneak_attack_dice':'5d6'},
('Rogue',11):{'sneak_attack_dice':'6d6'},
('Rogue',12):{'sneak_attack_dice':'6d6'},
('Rogue',13):{'sneak_attack_dice':'7d6'},
('Rogue',14):{'sneak_attack_dice':'7d6'},
('Rogue',15):{'sneak_attack_dice':'8d6'},
('Rogue',16):{'sneak_attack_dice':'8d6'},
('Rogue',17):{'sneak_attack_dice':'9d6'},
('Rogue',18):{'sneak_attack_dice':'9d6'},
('Rogue',19):{'sneak_attack_dice':'10d6'},
('Rogue',20):{'sneak_attack_dice':'10d6'},
('Arcane_Tricker',1):{'sneak_attack_dice':'1d6'},
('Arcane_Tricker',2):{'sneak_attack_dice':'1d6'},
('Arcane_Tricker',3):{'sneak_attack_dice':'2d6'},
('Arcane_Tricker',4):{'sneak_attack_dice':'2d6'},
('Arcane_Tricker',5):{'sneak_attack_dice':'3d6'},
('Arcane_Tricker',6):{'sneak_attack_dice':'3d6'},
('Arcane_Tricker',7):{'sneak_attack_dice':'4d6'},
('Arcane_Tricker',8):{'sneak_attack_dice':'4d6'},
('Arcane_Tricker',9):{'sneak_attack_dice':'5d6'},
('Arcane_Tricker',10):{'sneak_attack_dice':'5d6'},
('Arcane_Tricker',11):{'sneak_attack_dice':'6d6'},
('Arcane_Tricker',12):{'sneak_attack_dice':'6d6'},
('Arcane_Tricker',13):{'sneak_attack_dice':'7d6'},
('Arcane_Tricker',14):{'sneak_attack_dice':'7d6'},
('Arcane_Tricker',15):{'sneak_attack_dice':'8d6'},
('Arcane_Tricker',16):{'sneak_attack_dice':'8d6'},
('Arcane_Tricker',17):{'sneak_attack_dice':'9d6'},
('Arcane_Tricker',18):{'sneak_attack_dice':'9d6'},
('Arcane_Tricker',19):{'sneak_attack_dice':'10d6'},
('Arcane_Tricker',20):{'sneak_attack_dice':'10d6'},
('Monk',1):{'martial_arts_dice':'1d4','ki_points_max':0,'unarmored_movement':0},
('Monk',2):{'martial_arts_dice':'1d4','ki_points_max':2,'unarmored_movement':+10},
('Monk',3):{'martial_arts_dice':'1d4','ki_points_max':3,'unarmored_movement':+10},
('Monk',4):{'martial_arts_dice':'1d4','ki_points_max':4,'unarmored_movement':+10},
('Monk',5):{'martial_arts_dice':'1d6','ki_points_max':5,'unarmored_movement':+10,
'Extra_Attack':1,},
('Monk',6):{'martial_arts_dice':'1d6','ki_points_max':6,'unarmored_movement':+15,
'Extra_Attack':1,},
('Monk',7):{'martial_arts_dice':'1d6','ki_points_max':7,'unarmored_movement':+15,
'Extra_Attack':1,},
('Monk',8):{'martial_arts_dice':'1d6','ki_points_max':8,'unarmored_movement':+15,
'Extra_Attack':1,},
('Monk',9):{'martial_arts_dice':'1d6','ki_points_max':9,'unarmored_movement':+15,
'Extra_Attack':1,},
('Monk',10):{'martial_arts_dice':'1d6','ki_points_max':10,'unarmored_movement':+20,
'Extra_Attack':1,},
('Monk',11):{'martial_arts_dice':'1d8','ki_points_max':11,'unarmored_movement':+20,
'Extra_Attack':1,},
('Monk',12):{'martial_arts_dice':'1d8','ki_points_max':12,'unarmored_movement':+20,
'Extra_Attack':1,},
('Monk',13):{'martial_arts_dice':'1d8','ki_points_max':13,'unarmored_movement':+20,
'Extra_Attack':1,},
('Monk',14):{'martial_arts_dice':'1d8','ki_points_max':14,'unarmored_movement':+25,
'Extra_Attack':1,},
('Monk',15):{'martial_arts_dice':'1d8','ki_points_max':15,'unarmored_movement':+25,
'Extra_Attack':1,},
('Monk',16):{'martial_arts_dice':'1d8','ki_points_max':16,'unarmored_movement':+25,
'Extra_Attack':1,},
('Monk',17):{'martial_arts_dice':'1d10','ki_points_max':17,'unarmored_movement':+25,
'Extra_Attack':1,},
('Monk',18):{'martial_arts_dice':'1d10','ki_points_max':18,'unarmored_movement':+30,
'Extra_Attack':1,},
('Monk',19):{'martial_arts_dice':'1d10','ki_points_max':19,'unarmored_movement':+30,
'Extra_Attack':1,},
('Monk',20):{'martial_arts_dice':'1d10','ki_points_max':20,'unarmored_movement':+30,
'Extra_Attack':1,},
('Warrior',1):{},
('Warrior',2):{},
('Warrior',3):{},
('Warrior',4):{},
('Warrior',5):{'Extra_Attack':1,},
('Warrior',6):{'Extra_Attack':1,},
('Warrior',7):{'Extra_Attack':1,},
('Warrior',8):{'Extra_Attack':1,},
('Warrior',9):{'Extra_Attack':1,},
('Warrior',10):{'Extra_Attack':1,},
('Warrior',11):{'Extra_Attack':2,},
('Warrior',12):{'Extra_Attack':2,},
('Warrior',13):{'Extra_Attack':2,},
('Warrior',14):{'Extra_Attack':2,},
('Warrior',15):{'Extra_Attack':2,},
('Warrior',16):{'Extra_Attack':2,},
('Warrior',17):{'Extra_Attack':2,},
('Warrior',18):{'Extra_Attack':2,},
('Warrior',19):{'Extra_Attack':2,},
('Warrior',20):{'Extra_Attack':2,},
('Warrior-heavy',1):{},
('Warrior-heavy',2):{},
('Warrior-heavy',3):{},
('Warrior-heavy',4):{},
('Warrior-heavy',5):{'Extra_Attack':1,},
('Warrior-heavy',6):{'Extra_Attack':1,},
('Warrior-heavy',7):{'Extra_Attack':1,},
('Warrior-heavy',8):{'Extra_Attack':1,},
('Warrior-heavy',9):{'Extra_Attack':1,},
('Warrior-heavy',10):{'Extra_Attack':1,},
('Warrior-heavy',11):{'Extra_Attack':2,},
('Warrior-heavy',12):{'Extra_Attack':2,},
('Warrior-heavy',13):{'Extra_Attack':2,},
('Warrior-heavy',14):{'Extra_Attack':2,},
('Warrior-heavy',15):{'Extra_Attack':2,},
('Warrior-heavy',16):{'Extra_Attack':2,},
('Warrior-heavy',17):{'Extra_Attack':2,},
('Warrior-heavy',18):{'Extra_Attack':2,},
('Warrior-heavy',19):{'Extra_Attack':2,},
('Warrior-heavy',20):{'Extra_Attack':2,},
('Warrior-pirate',1):{},
('Warrior-pirate',2):{},
('Warrior-pirate',3):{},
('Warrior-pirate',4):{},
('Warrior-pirate',5):{'Extra_Attack':1,},
('Warrior-pirate',6):{'Extra_Attack':1,},
('Warrior-pirate',7):{'Extra_Attack':1,},
('Warrior-pirate',8):{'Extra_Attack':1,},
('Warrior-pirate',9):{'Extra_Attack':1,},
('Warrior-pirate',10):{'Extra_Attack':1,},
('Warrior-pirate',11):{'Extra_Attack':2,},
('Warrior-pirate',12):{'Extra_Attack':2,},
('Warrior-pirate',13):{'Extra_Attack':2,},
('Warrior-pirate',14):{'Extra_Attack':2,},
('Warrior-pirate',15):{'Extra_Attack':2,},
('Warrior-pirate',16):{'Extra_Attack':2,},
('Warrior-pirate',17):{'Extra_Attack':2,},
('Warrior-pirate',18):{'Extra_Attack':2,},
('Warrior-pirate',19):{'Extra_Attack':2,},
('Warrior-pirate',20):{'Extra_Attack':2,},
('Warrior-bowman',1):{},
('Warrior-bowman',2):{},
('Warrior-bowman',3):{},
('Warrior-bowman',4):{},
('Warrior-bowman',5):{'Extra_Attack':1,},
('Warrior-bowman',6):{'Extra_Attack':1,},
('Warrior-bowman',7):{'Extra_Attack':1,},
('Warrior-bowman',8):{'Extra_Attack':1,},
('Warrior-bowman',9):{'Extra_Attack':1,},
('Warrior-bowman',10):{'Extra_Attack':1,},
('Warrior-bowman',11):{'Extra_Attack':2,},
('Warrior-bowman',12):{'Extra_Attack':2,},
('Warrior-bowman',13):{'Extra_Attack':2,},
('Warrior-bowman',14):{'Extra_Attack':2,},
('Warrior-bowman',15):{'Extra_Attack':2,},
('Warrior-bowman',16):{'Extra_Attack':2,},
('Warrior-bowman',17):{'Extra_Attack':2,},
('Warrior-bowman',18):{'Extra_Attack':2,},
('Warrior-bowman',19):{'Extra_Attack':2,},
('Warrior-bowman',20):{'Extra_Attack':2,},
('Warrior-officer',1):{},
('Warrior-officer',2):{},
('Warrior-officer',3):{},
('Warrior-officer',4):{},
('Warrior-officer',5):{'Extra_Attack':1,},
('Warrior-officer',6):{'Extra_Attack':1,},
('Warrior-officer',7):{'Extra_Attack':1,},
('Warrior-officer',8):{'Extra_Attack':1,},
('Warrior-officer',9):{'Extra_Attack':1,},
('Warrior-officer',10):{'Extra_Attack':1,},
('Warrior-officer',11):{'Extra_Attack':2,},
('Warrior-officer',12):{'Extra_Attack':2,},
('Warrior-officer',13):{'Extra_Attack':2,},
('Warrior-officer',14):{'Extra_Attack':2,},
('Warrior-officer',15):{'Extra_Attack':2,},
('Warrior-officer',16):{'Extra_Attack':2,},
('Warrior-officer',17):{'Extra_Attack':2,},
('Warrior-officer',18):{'Extra_Attack':2,},
('Warrior-officer',19):{'Extra_Attack':2,},
('Warrior-officer',20):{'Extra_Attack':2,},
('Commoner',1):{},
('Commoner',2):{},
('Commoner',3):{},
('Commoner',4):{},
('Commoner',5):{},
('Commoner',6):{},
('Commoner',7):{},
('Commoner',8):{},
('Commoner',9):{},
('Commoner',10):{},
('Commoner',11):{},
('Commoner',12):{},
('Commoner',13):{},
('Commoner',14):{},
('Commoner',15):{},
('Commoner',16):{},
('Commoner',17):{},
('Commoner',18):{},
('Commoner',19):{},
('Commoner',20):{},
# Всякие монстры:
('Empyrean',1):{},
('Empyrean',2):{},
('Empyrean',3):{},
('Empyrean',4):{},
('Empyrean',5):{},
('Empyrean',6):{},
('Empyrean',7):{},
('Empyrean',8):{},
('Empyrean',9):{},
('Empyrean',10):{},
('Empyrean',11):{},
('Empyrean',12):{},
('Empyrean',13):{},
('Empyrean',14):{},
('Empyrean',15):{},
('Empyrean',16):{},
('Empyrean',17):{},
('Empyrean',18):{},
('Empyrean',19):{},
('Empyrean',20):{},
}
#----
# Слоты заклинаний по уровням:
metadict_class_spells = {
# Слоты заклинаний по уровням:
# https://www.dandwiki.com/wiki/5e_SRD:Bard#Table:_The_bard
# Барды, клерики, друиды, волшебники и чародеи -- по слотам одинаковы:
# TODO: сделай уже "any", если подходящего нет.
('Any',1):{},
('Any',2):{},
('Any',3):{},
('Any',4):{},
('Any',5):{},
('Any',6):{},
('Any',7):{},
('Any',8):{},
('Any',9):{},
('Any',10):{},
('Any',11):{},
('Any',12):{},
('Any',13):{},
('Any',14):{},
('Any',15):{},
('Any',16):{},
('Any',17):{},
('Any',18):{},
('Any',19):{},
('Any',20):{},
('Bard',1):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',2):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',3):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',4):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',5):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',6):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',7):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',8):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',9):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',10):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',11):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',12):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Bard',13):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Bard',14):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Bard',15):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Bard',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Bard',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Bard',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Bard',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Bard',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':2,'8_lvl':1,'9_lvl':1},
# Друид
('Druid',1):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',2):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',3):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',4):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',5):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',6):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',7):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',8):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',9):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',10):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',11):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',12):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Druid',13):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Druid',14):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Druid',15):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Druid',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Druid',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Druid',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Druid',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Druid',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':2,'8_lvl':1,'9_lvl':1},
# Жрец, клирик
('Cleric',1):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',2):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',3):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',4):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',5):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',6):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',7):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',8):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',9):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',10):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',11):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',12):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Cleric',13):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Cleric',14):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Cleric',15):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Cleric',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Cleric',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Cleric',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Cleric',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Cleric',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':2,'8_lvl':1,'9_lvl':1},
# Волшебник
('Wizard',1):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',2):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',3):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',4):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',5):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',6):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',7):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',8):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',9):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',10):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',11):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',12):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Wizard',13):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Wizard',14):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Wizard',15):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Wizard',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Wizard',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Wizard',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Wizard',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Wizard',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':2,'8_lvl':1,'9_lvl':1},
# Чародей
('Sorcerer',1):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',2):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',3):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',4):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',5):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',6):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',7):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',8):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',9):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',10):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',11):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',12):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Sorcerer',13):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Sorcerer',14):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Sorcerer',15):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Sorcerer',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Sorcerer',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Sorcerer',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Sorcerer',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Sorcerer',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':2,'8_lvl':1,'9_lvl':1},
# Паладины и рейнджеры у нас полукастеры:
('Paladin',1):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Paladin',2):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Paladin',3):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Paladin',4):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Paladin',5):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Paladin',6):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Paladin',7):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Paladin',8):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Paladin',9):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0},
('Paladin',10):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0},
('Paladin',11):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0},
('Paladin',12):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0},
('Paladin',13):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0},
('Paladin',14):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0},
('Paladin',15):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0},
('Paladin',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0},
('Paladin',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1},
('Paladin',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1},
('Paladin',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2},
('Paladin',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2},
# Рейнджер
('Ranger',1):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Ranger',2):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Ranger',3):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Ranger',4):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Ranger',5):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Ranger',6):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Ranger',7):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Ranger',8):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Ranger',9):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0},
('Ranger',10):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0},
('Ranger',11):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0},
('Ranger',12):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0},
('Ranger',13):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0},
('Ranger',14):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0},
('Ranger',15):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0},
('Ranger',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0},
('Ranger',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1},
('Ranger',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1},
('Ranger',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2},
('Ranger',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2},
# Колдун (варлок) -- непонятно что:
# Уровни слотов его заклинаний растут вместе с ним:
('Warlock',1):{'1_lvl':1,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Warlock',2):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Warlock',3):{'1_lvl':0,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Warlock',4):{'1_lvl':0,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0},
('Warlock',5):{'1_lvl':0,'2_lvl':0,'3_lvl':2,'4_lvl':0,'5_lvl':0},
('Warlock',6):{'1_lvl':0,'2_lvl':0,'3_lvl':2,'4_lvl':0,'5_lvl':0},
('Warlock',7):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':2,'5_lvl':0},
('Warlock',8):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':2,'5_lvl':0},
('Warlock',9):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':2},
('Warlock',10):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':2},
('Warlock',11):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':3,
'6_lvl':1},
('Warlock',12):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':3,
'6_lvl':1},
('Warlock',13):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':3,
'6_lvl':1,'7_lvl':1},
('Warlock',14):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':3,
'6_lvl':1,'7_lvl':1},
('Warlock',15):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':3,
'6_lvl':1,'7_lvl':1,'8_lvl':1},
('Warlock',16):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':3,
'6_lvl':1,'7_lvl':1,'8_lvl':1},
('Warlock',17):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':4,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Warlock',18):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':4,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Warlock',19):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':4,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Warlock',20):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':4,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
# Архетипы, мистические рыцари и ловкачи -- недокастеры:
('Eldritch_Knight',1):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',2):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',3):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',4):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',5):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',6):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',7):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',8):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',9):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',10):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',11):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',12):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0},
('Eldritch_Knight',13):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0},
('Eldritch_Knight',14):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0},
('Eldritch_Knight',15):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0},
('Eldritch_Knight',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0},
('Eldritch_Knight',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0},
('Eldritch_Knight',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0},
('Eldritch_Knight',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1},
('Eldritch_Knight',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1},
# Мистический ловкач:
('Arcane_Tricker',1):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',2):{'1_lvl':0,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',3):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',4):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',5):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',6):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',7):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',8):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',9):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',10):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',11):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',12):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0},
('Arcane_Tricker',13):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0},
('Arcane_Tricker',14):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0},
('Arcane_Tricker',15):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0},
('Arcane_Tricker',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0},
('Arcane_Tricker',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0},
('Arcane_Tricker',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0},
('Arcane_Tricker',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1},
('Arcane_Tricker',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1},
('Rogue',1):{},
('Rogue',2):{},
('Rogue',3):{},
('Rogue',4):{},
('Rogue',5):{},
('Rogue',6):{},
('Rogue',7):{},
('Rogue',8):{},
('Rogue',9):{},
('Rogue',10):{},
('Rogue',11):{},
('Rogue',12):{},
('Rogue',13):{},
('Rogue',14):{},
('Rogue',15):{},
('Rogue',16):{},
('Rogue',17):{},
('Rogue',18):{},
('Rogue',19):{},
('Rogue',20):{},
('Monk',1):{},
('Monk',2):{},
('Monk',3):{},
('Monk',4):{},
('Monk',5):{},
('Monk',6):{},
('Monk',7):{},
('Monk',8):{},
('Monk',9):{},
('Monk',10):{},
('Monk',11):{},
('Monk',12):{},
('Monk',13):{},
('Monk',14):{},
('Monk',15):{},
('Monk',16):{},
('Monk',17):{},
('Monk',18):{},
('Monk',19):{},
('Monk',20):{},
('Fighter',1):{},
('Fighter',2):{},
('Fighter',3):{},
('Fighter',4):{},
('Fighter',5):{},
('Fighter',6):{},
('Fighter',7):{},
('Fighter',8):{},
('Fighter',9):{},
('Fighter',10):{},
('Fighter',11):{},
('Fighter',12):{},
('Fighter',13):{},
('Fighter',14):{},
('Fighter',15):{},
('Fighter',16):{},
('Fighter',17):{},
('Fighter',18):{},
('Fighter',19):{},
('Fighter',20):{},
('Battlemaster',1):{},
('Battlemaster',2):{},
('Battlemaster',3):{},
('Battlemaster',4):{},
('Battlemaster',5):{},
('Battlemaster',6):{},
('Battlemaster',7):{},
('Battlemaster',8):{},
('Battlemaster',9):{},
('Battlemaster',10):{},
('Battlemaster',11):{},
('Battlemaster',12):{},
('Battlemaster',13):{},
('Battlemaster',14):{},
('Battlemaster',15):{},
('Battlemaster',16):{},
('Battlemaster',17):{},
('Battlemaster',18):{},
('Battlemaster',19):{},
('Battlemaster',20):{},
('Barbarian',1):{},
('Barbarian',2):{},
('Barbarian',3):{},
('Barbarian',4):{},
('Barbarian',5):{},
('Barbarian',6):{},
('Barbarian',7):{},
('Barbarian',8):{},
('Barbarian',9):{},
('Barbarian',10):{},
('Barbarian',11):{},
('Barbarian',12):{},
('Barbarian',13):{},
('Barbarian',14):{},
('Barbarian',15):{},
('Barbarian',16):{},
('Barbarian',17):{},
('Barbarian',18):{},
('Barbarian',19):{},
('Barbarian',20):{},
('Warrior',1):{},
('Warrior',2):{},
('Warrior',3):{},
('Warrior',4):{},
('Warrior',5):{},
('Warrior',6):{},
('Warrior',7):{},
('Warrior',8):{},
('Warrior',9):{},
('Warrior',10):{},
('Warrior',11):{},
('Warrior',12):{},
('Warrior',13):{},
('Warrior',14):{},
('Warrior',15):{},
('Warrior',16):{},
('Warrior',17):{},
('Warrior',18):{},
('Warrior',19):{},
('Warrior',20):{},
('Warrior-heavy',1):{},
('Warrior-heavy',2):{},
('Warrior-heavy',3):{},
('Warrior-heavy',4):{},
('Warrior-heavy',5):{},
('Warrior-heavy',6):{},
('Warrior-heavy',7):{},
('Warrior-heavy',8):{},
('Warrior-heavy',9):{},
('Warrior-heavy',10):{},
('Warrior-heavy',11):{},
('Warrior-heavy',12):{},
('Warrior-heavy',13):{},
('Warrior-heavy',14):{},
('Warrior-heavy',15):{},
('Warrior-heavy',16):{},
('Warrior-heavy',17):{},
('Warrior-heavy',18):{},
('Warrior-heavy',19):{},
('Warrior-heavy',20):{},
('Warrior-bowman',1):{},
('Warrior-bowman',2):{},
('Warrior-bowman',3):{},
('Warrior-bowman',4):{},
('Warrior-bowman',5):{},
('Warrior-bowman',6):{},
('Warrior-bowman',7):{},
('Warrior-bowman',8):{},
('Warrior-bowman',9):{},
('Warrior-bowman',10):{},
('Warrior-bowman',11):{},
('Warrior-bowman',12):{},
('Warrior-bowman',13):{},
('Warrior-bowman',14):{},
('Warrior-bowman',15):{},
('Warrior-bowman',16):{},
('Warrior-bowman',17):{},
('Warrior-bowman',18):{},
('Warrior-bowman',19):{},
('Warrior-bowman',20):{},
('Warrior-officer',1):{},
('Warrior-officer',2):{},
('Warrior-officer',3):{},
('Warrior-officer',4):{},
('Warrior-officer',5):{},
('Warrior-officer',6):{},
('Warrior-officer',7):{},
('Warrior-officer',8):{},
('Warrior-officer',9):{},
('Warrior-officer',10):{},
('Warrior-officer',11):{},
('Warrior-officer',12):{},
('Warrior-officer',13):{},
('Warrior-officer',14):{},
('Warrior-officer',15):{},
('Warrior-officer',16):{},
('Warrior-officer',17):{},
('Warrior-officer',18):{},
('Warrior-officer',19):{},
('Warrior-officer',20):{},
('Commoner',1):{},
('Commoner',2):{},
('Commoner',3):{},
('Commoner',4):{},
('Commoner',5):{},
('Commoner',6):{},
('Commoner',7):{},
('Commoner',8):{},
('Commoner',9):{},
('Commoner',10):{},
('Commoner',11):{},
('Commoner',12):{},
('Commoner',13):{},
('Commoner',14):{},
('Commoner',15):{},
('Commoner',16):{},
('Commoner',17):{},
('Commoner',18):{},
('Commoner',19):{},
('Commoner',20):{},
# Эмпирей. Они у нас как жрецы.
('Empyrean',1):{'1_lvl':2,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',2):{'1_lvl':3,'2_lvl':0,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',3):{'1_lvl':4,'2_lvl':2,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',4):{'1_lvl':4,'2_lvl':3,'3_lvl':0,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',5):{'1_lvl':4,'2_lvl':3,'3_lvl':2,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',6):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':0,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',7):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':1,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',8):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':2,'5_lvl':0,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',9):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':1,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',10):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':0,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',11):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',12):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':0,'8_lvl':0,'9_lvl':0},
('Empyrean',13):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Empyrean',14):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':0,'9_lvl':0},
('Empyrean',15):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Empyrean',16):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':0},
('Empyrean',17):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':2,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Empyrean',18):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':1,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Empyrean',19):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':1,'8_lvl':1,'9_lvl':1},
('Empyrean',20):{'1_lvl':4,'2_lvl':3,'3_lvl':3,'4_lvl':3,'5_lvl':3,
'6_lvl':2,'7_lvl':2,'8_lvl':1,'9_lvl':1},
}
|
993,787 | 2b1d7a7f8b634f56528e2bbadc99b0a6770c83c0 | print("======= Keliling dan Luas Jajar Genjang =======")
print("\n1. Keliling Jajar Genjang\n2. Luas Jajar Genjang")
menu = int(input("\nInputkan Nomor Menu = "))
if(menu == 1):
a = int(input("\nInputkan sisi pertama = "))
b = int(input("Inputkan sisi kedua = "))
keliling = 2 * (a + b)
print("\nKeliling jajar genjang adalah ", keliling)
elif(menu == 2):
a = int(input("\nInputkan alas = "))
t = int(input("Inputkan tinggi = "))
luas = a * t
print("\nLuas jajar genjang adalah ", luas)
else:
print("Nomor Menu Tidak Tersedia") |
993,788 | 3faea38989c89a7c1c7925abe6f0cfdc76a00ded | from profiler.models import SimpleType, DetailedType
def run(*args):
print 'Received args=', args
if len(args) != 1 or not args[0] or args[0] == '':
raise Exception('[ERROR] Output file must be passed as param. Only this param should be used.')
output_csv = ''
for stype in SimpleType.objects.extra(order_by = ['global_order']).all():
for dtype in stype.detailedtype_set.extra(order_by = ['order_in_type']).all():
output_csv += stype.name + ', '
output_csv += dtype.name + ', '
if dtype.values_regex:
output_csv += '"{0}", '.format(dtype.values_regex)
else:
output_csv += ','
if dtype.values_dictionary:
output_csv += '"{0}", '.format(dtype.values_dictionary.replace('"', '\\"'))
else:
output_csv += ','
output_csv += '{0}, {1}'.format(dtype.dictionary_is_file, dtype.accept_missing_values)
output_csv += ', "{0}"'.format(dtype.dictionary_type)
output_csv += '\n'
print 'CSV:', output_csv
with open(args[0], "w") as text_file:
text_file.write(output_csv)
print 'File ', args[0], ' saved.'
|
993,789 | 5b1759c254a3fdc7e024d06414272d0cf0962196 | from vt_manager.communication.sfa.rspecs.elements.element import Element
class HardwareType(Element):
fields = [
'name'
]
|
993,790 | bfefb6ace50e01c20a35cd564815620f05a41f20 | import sys
class OnPath:
"""
Test utility to put a pytest path on the Python search path.
"""
def __init__(self, path):
self.path = str(path)
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_val, exc_tb):
sys.path.remove(self.path)
|
993,791 | 24f49c907f0625e94e47b68b586fa9519e439769 | from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from ui.models import Location
from pysnmp.entity.rfc3413.oneliner import cmdgen
class Command(BaseCommand):
help = '''Capture network status of all the hosts within the specified
network and map hostname with ip address for registered locations'''
args = '<network_prefix network_prefix...>'
def handle(self, *args, **options):
if len(args) < 1:
raise CommandError("Please pass atleast one network prefix to " +
"perform the SNMP walk for mapping network " +
"hostnames with ip addresses.")
cmdGen = cmdgen.CommandGenerator()
for network_prefix in args:
for i in range(256):
ip_address = str(network_prefix) + '.' + str(i)
try:
errorIndication, errorStatus, errorIndex, varBindTable = cmdGen.nextCmd(
cmdgen.CommunityData(settings.SNMP_COMMUNITY_STRING),
cmdgen.UdpTransportTarget((ip_address, 161)),
'1.3.6.1.4.1.25071.1.2.6.1.1.2',
'1.3.6.1.4.1.25071.1.1.2.1.1.3',)
if errorIndication:
print(str(ip_address) + ': ' + str(errorIndication))
else:
if errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBindTable[-1][int(errorIndex)-1] or '?'))
else:
hostname = str(varBindTable[0][0][1])
try:
location = Location.objects.get(hostname__iexact=hostname)
if location.ip_address != ip_address:
location.ip_address = ip_address
location.save()
print 'Updated IP address for host - %s' % location.hostname
except ObjectDoesNotExist:
print 'Location with hostname <' + hostname + '> does not exist.'
except Exception as e:
print 'Error while querying - ' + ip_address
print 'Error message - %s' % str(e.message)
|
993,792 | 71f62c026381a82010cd14d7fc73c6795216bcec |
import cx_Freeze
executables = [
# name of your game script
cx_Freeze.Executable("FACE_EYES_SMILE_DETECTION.py")
]
cx_Freeze.setup(
name = "Face, Eyes & Smile Detection",
version='0.1',
options= {"build_exe": {"packages":["numpy", 'cv2'],
"include_files":["eye.xml", "smile.xml", "frontalface_default.xml"]}},
description = "Must have a web-cam to see your face!!!",
executables = executables)
|
993,793 | a0a5aac603db932cab9d1461c31dcd6bc3e95e76 | from Xlib import display
import time
from matplotlib import pyplot as plt
import numpy as np
from pynput import keyboard
import threading, time
from datetime import datetime
import csv
import math
def on_press(key):
global pressed_key
# if pressed_key == 0:
# try:
# print('alphanumeric key {0} pressed'.format(
# key.char))
# except AttributeError:
# print('special key {0} pressed'.format(
# key))
def on_release(key):
global pressed_key
global label
pressed_key = pressed_key + 1
print('{0} released {1}'.format( key,pressed_key))
if pressed_key == 1:
print(">>>>>>>record")
if pressed_key == 2:
print("-show")
# listener.stop()
if pressed_key == 4:
try:
if(key.char == 'y'):
print("-Ask to recorded : which label ?")
else :
if(key.char == 'n'):
print("-Ask to not recorded")
pressed_key = 0
else:
print("Accepted caratere y/n")
pressed_key = pressed_key - 1
except AttributeError:
print('Bad caractere')
pressed_key = pressed_key - 1
if pressed_key == 5:
label = key.char
print("-Corresponding label {0} {1}".format(key,label))
pressed_key = pressed_key + 1
# if key == keyboard.Key.esc:
# # Stop listener
# pressed_key = 0
# print(">> stopped :record")
# return False
# listener = null
def thread1():
# Collect events until released
global listener
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
def process_data(XTab,YTab):
XTabCopy = XTab
YTabCopy = YTab
# print("XDebut {}".format(XTab))
if len(XTabCopy) < 17 :
print("Not enough data")
else:
while len(XTabCopy) > 17:
d_min = 10000;
p_min = 0
p = p_min;
i = p_min;
i = i + 1
last = len(XTabCopy) -1
while ( i < last) :
d = math.sqrt(math.pow(XTabCopy[p] - XTabCopy[i] , 2) + math.pow(YTabCopy[p] - YTabCopy[i], 2));
if d < d_min :
d_min = d;
p_min = p;
p = i;
i = i+1
p = p_min;
i = p_min+1;
ptX = (XTabCopy[p] + XTabCopy[i]) / 2
ptY = (YTabCopy[p] + YTabCopy[i]) / 2
XTabCopy[i] = ptX
YTabCopy[i] = ptY
XTabCopy.pop(p)
YTabCopy.pop(p)
return XTabCopy,YTabCopy
def process_speed(XTab,YTab):
i = 0
vx = []
vy = []
while i < len(XTab)-1 :
vx.append(XTab[i+1] - XTab[i])
vy.append(YTab[i+1] - YTab[i])
i = i + 1
return vx,vy
pressed_key = 0
threading.Thread(target = thread1).start()
XTab = []
YTab = []
vx = [0]
vy = [0]
plt.ion()
# start data collection
record = 0
start = datetime.now()
with open('record.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
# listener.stop()
spamwriter.writerow(['vx','vy','label'])
while True:
if pressed_key == 1:
d = datetime.now() - start;
start = datetime.now()
if len(YTab) >150 :
del XTab[0]
del YTab[0]
data = display.Display().screen().root.query_pointer()._data
XTab.append(int(data["root_x"]))
YTab.append(int(1080-data["root_y"]))
plt.figure(1)
plt.cla()
plt.plot(XTab, YTab,'b-')
plt.axis([0, 1920, 0, 1080])
plt.pause(0.0005)
if pressed_key == 2:
XTab_copy,YTab_copy =process_data(XTab,YTab)
vx,vy = process_speed(XTab_copy,YTab_copy)
print('vx = {}'.format((vx)))
plt.figure(1)
plt.cla()
plt.plot(XTab, YTab,'b-')
plt.plot(XTab_copy, YTab_copy,'r*')
plt.axis([0, 1920, 0, 1080])
plt.figure(2)
plt.cla()
plt.plot(vx,'r')
plt.plot(vy,'b')
plt.show()
plt.pause(0.0005)
XTab = []
YTab = []
pressed_key = 3
print('-Keep the data?')
if pressed_key == 6:
print("label {}".format(label))
spamwriter.writerow( vx + vy +[label] )
time.sleep(0.1)
pressed_key = 0
|
993,794 | 4488a8f4d5ae8ed42eb2f5c0511017248c43f5a8 | import torch
#vector
t2 = torch.tensor([1.,2,3,4])
print(t2)
#matrix
t3 = torch.tensor([[5., 6], [7, 8], [9, 10]])
print(t3)
#3d array
t4 = torch.tensor([
[[11, 12, 13],
[13, 14, 15]],
[[15, 16, 17],
[17, 18, 19.]]])
print(t4)
print(t2.shape, t3.shape, t4.shape)
# tensor operations and gradients or derivatives
x = torch.tensor(3.)
w = torch.tensor(4., requires_grad=True)
b = torch.tensor(5., requires_grad=True)
y = w * x + b
print(y, y.size)
print(y.backward())
print('dy/dw', w.grad)
#numpy integrated with torch
import numpy as np
x = np.array([[1, 2], [3, 4]])
# array into tensor
y = torch.from_numpy(x)
print(x.dtype, y.dtype)
z = y.numpy()
print(z)
|
993,795 | 0a6a448dca3d53de8710e6f2a341795881a40df7 | def main():
# Create a dict and populate using a for loop.
# my_dict={}
# for i in range(1, 101):
# dictionary[i]=i**3
"""
Dictionary comprehensions
{ key:value for value in iterable if condition }
{ [part 1] [part 2] [part 3] }
part 1: represent every key and value to put in the new dictionary.
part 2: Loop to extract the elements of any iterable.
part 3: Condition to filter the elements of the loop.
# """
# my_dict = {i: i**3 for i in range(1, 101) if i % 3 != 0}
# print(my_dict)
# challenge: Create a dictionary comprehension which keys are the first
if __name__ == "__main__":
main() |
993,796 | a1a05c629660ed1956d986cddc99869e2da59fa8 | alphabet = "абвгде"
letters_list = list(alphabet) # список из строки
letters_tuple = tuple(alphabet) # кортеж из строки
print("letters_list = ", letters_list)
print("letters_tuple = ", letters_tuple)
print(str(letters_list)) # а так не работает
print('/'.join(letters_list))
my_dict = dict((('key1', 'value1'), ('key2', 'value2'), ('key3', 'value3')))
print("my_dict = ", my_dict)
fruits = ['orange', 'apple', 'pea', 'orange', 'apple', 'apple']
unique_fruits = list(set(fruits))
print("---", unique_fruits)
people = {'name': "Василий", 'middle_name': 'Васильевич', 'surname': 'Чапаев', 'hobbies': ['лыжи', 'чтение']}
print(list(people.keys()))
print(people.values())
|
993,797 | ff89af627c981e755e361ee0996eaf01ec69af9b | class PrirodaOptimizer():
def __init__(self, relative=False, basis='L1', memory=200, tmp_dir='/tmp', tmp_ram=0): # L1 аналог CCPVZ
self.relative = relative # аргументы для настройки расчета: базис (релятив и нерелятивисткие)
self.basis = basis
self.memory = memory
self.tmp_dir = tmp_dir
self.tmp_ram = tmp_ram
def fit(self, x, y=None):
"""Затычка, потому что СКЛ так требует"""
return self # сам объект(как бы после обучения)
def transform(self, x, y=None):
...
|
993,798 | a5dde2903700b2781e9b419a397132c47880e96d | import cv2
import numpy as np
from goprocam import GoProCamera
from goprocam import constants
cascPath="/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
gpCam = GoProCamera.GoPro()
#gpCam.gpControlSet(constants.Stream.BIT_RATE, constants.Stream.BitRate.B2_4Mbps)
#gpCam.gpControlSet(constants.Stream.WINDOW_SIZE, constants.Stream.WindowSize.W480)
cap = cv2.VideoCapture("udp://127.0.0.1:10000")
while True:
# Capture frame-by-frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow("GoPro OpenCV", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
cap.release()
cv2.destroyAllWindows() |
993,799 | f6a26746a0be9f2c4248cadc2fbfcb7cdd006551 | import autolamella.acquire
import autolamella.add_samples
import autolamella.align
import autolamella.autoscript
import autolamella.conversions
import autolamella.data
import autolamella.display
import autolamella.fiducial
import autolamella.interactive
import autolamella.milling
import autolamella.user_input
import autolamella.validate
from autolamella.main import main
from autolamella._version import __version__
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.