text stringlengths 38 1.54M |
|---|
from tkinter import *
import numpy as np
from PIL import Image, ImageTk
from datetime import datetime
now = datetime.now()
# Palettes de couleurs
#----------------------
col_hex = ["#03071e", "#370617", "#6a040f", "#9d0208", "#d00000", "#dc2f02", "#e85d04", "#f48c06", "#faa307", "#ffba08"]
#col_hex = ["#386641", "#6a994e", "#a7c957", "#f2e8cf", "#bc4749"]
#col_hex = ["#0081a7", "#00afb9", "#fdfcdc", "#fed9b7", "#f07167"]
#col_hex = ["#7400b8", "#6930c3", "#5e60ce", "#5390d9", "#4ea8de", "#48bfe3", "#56cfe1", "#64dfdf", "#72efdd", "#80ffdb"]
#col_hex = ["#10451d", "#155d27", "#1a7431", "#208b3a", "#25a244", "#2dc653", "#4ad66d", "#6ede8a", "#92e6a7", "#b7efc5"]
#col_hex = ["#af4d98", "#d66ba0", "#e5a9a9", "#f4e4ba", "#9df7e5"]
colors = np.zeros((len(col_hex), 3), dtype=np.uint8)
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
for i in range(len(col_hex)):
colors[i, :] = hex_to_rgb(col_hex[i])
# Définition des constantes
#---------------------------
#x_top = 1
#y_left = -2
#x_height = 2
#y_width = 3
x_top = 1
y_left = -2
x_height = 2
y_width = 3
sizeX = 1080
sizeY = 1920
degP = 3
iters = 150
degP = min(degP, len(col_hex))
# Préparation de l'Affichage
#----------------------------
fen = Tk()
fen.geometry(str(sizeY + 10) + "x" + str(sizeX + 10))
can = Canvas(fen, width = sizeY, height = sizeX, bg = 'black')
can.pack()
# Initialisation des points
#---------------------------
print("Initialisation des points...")
XY = np.zeros((sizeX, sizeY), dtype=np.complex_)
Z = np.zeros((sizeX, sizeY), dtype=np.complex_)
for i in range(sizeX):
for j in range(sizeY):
XY[i, j] = complex(y_left + y_width * (j / sizeY), x_top - x_height * (i / sizeX))
# Initialisation des racines
#----------------------------
#print("Initialisation des racines...")
#roots = np.zeros((degP,), dtype=np.complex_)
#for i in range(degP):
#roots[i] = max(sizeX, sizeY) * (complex(np.random.random() - 0.5, np.random.random() - 0.5))
#print("Racine ", i, ": ", roots[i])
# Calcul du polynôme à partir des racines
#-----------------------------------------
print("Calcul du polynôme à partir des racines...")
P = np.array([0, 0, 1])
#P = np.zeros((degP + 1,), dtype=np.complex_)
#P[0] = 1
#for i in range(degP):
#P = np.concatenate(([0], P[:-1])) - roots[i] * P
# Sauvegarde de la distance parcourue par chaque point à la dernière itération (pour le contraste)
#--------------------------------------------------------------------------------------------------
S = np.zeros(XY.shape, dtype=np.complex_)
# Préparation de l'image
#------------------------
print("Préparation de l'image...")
R = np.zeros((sizeX, sizeY), dtype=np.float_)
# Calcul de l'algorithme de Newton
#----------------------------------
print("Récursions...")
for i in range(iters):
Z_tmp = np.zeros(Z.shape, dtype=np.complex_)
for j in range(degP):
Z_tmp += P[j] * np.power(Z, j)
Z_tmp += XY
#print(np.abs(Z[300:320, 750:770]))
#print(R[300:320, 750:770])
Z = (np.abs(Z_tmp) < 4) * Z_tmp
R = (R == 0) * (Z == 0) * (i / iters) + (R != 0) * R
XY = (Z != 0) * XY
# Calcul de l'image
#-------------------
print("Calcul de l'image...")
S = np.abs(Z)
imageExport = np.zeros((sizeX, sizeY, 3), dtype=np.uint8)
for j in range(1,2):
imageExport[:, :, j] += np.uint8(255 * np.power(R, 0.5))
# Sauvegarde de l'image
#-----------------------
print("Sauvegarde de l'image...")
dt_string = now.strftime("%d%m%Y%H%M%S")
PIL_image = Image.fromarray( np.uint8( imageExport ) )
PIL_image.save("./Images/"+dt_string+".png")
print("Terminé!")
# Affichage de l'image
#----------------------
img = ImageTk.PhotoImage(Image.fromarray(np.uint8(imageExport)))
can.create_image(0, 0, anchor=NW, image = img)
fen.mainloop()
|
from speedysvc.serialisation.RawSerialisation import \
RawSerialisation
class ServerProviderBase:
___init = False
def __init__(self, server_methods):
"""
TODO!!!! ===========================================================
:param server_inst:
"""
# Couldn't see much reason to have an abstract base class here,
# as the "serve" logic is implementation-specific
self.server_methods = server_methods
self.port = server_methods.port
self.name = server_methods.name
assert not self.___init, \
f"{self.__class__} has already been started!"
self.___init = True
def handle_fn(self, cmd, args):
fn = getattr(self.server_methods, cmd.decode('ascii'))
# Use the serialiser to decode the arguments,
# before encoding the return value of the RPC call
if fn.serialiser == RawSerialisation:
# Special case: if the data is just raw bytes
# (not a list of parameters) treat it as just
# a single parameter
args = (args,)
else:
args = fn.serialiser.loads(args)
result = fn(*args)
result = fn.serialiser.dumps(result)
return result
|
from newsplease import NewsPlease
def get_news_attributes(news_url):
article = NewsPlease.from_url(news_url)
return article |
"""
LeetCode 211
"""
class Node:
def __init__(self, is_word=False):
self.is_word = is_word
self.next = dict()
class WordDictionary:
def __init__(self):
self.root = Node()
def addWord(self, word: str):
cur = self.root
for i in range(len(word)):
c = word[i]
if not cur.next.get(c):
cur.next[c] = Node(False)
cur = cur.next[c]
if not cur.is_word:
cur.is_word = True
def search(self, word: str) -> bool:
return self.match(self.root, word, 0)
def match(self, node: Node, word: str, index: int) -> bool:
if index == len(word):
return node.is_word
c = word[index]
if not c == '.':
if not node.next.get(c):
return False
return self.match(node.next.get(c), word, index+1)
else:
for nextChar in node.next.keys():
if self.match(node.next.get(nextChar), word, index + 1):
return True
return False
|
from django.contrib import admin
from .models import Contact
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
list_display = (
'position_title',
'full_name',
'show_delegates',
'show_partners',
'show_judges',
)
ordering = ['position_title'] |
import argparse
from src import ProblemState
from src.input_parser import InputParser
class AutonomousTheoremProver(object):
def __init__(self, _problem_state: ProblemState):
self.problem_state = _problem_state
def prove(self):
"""
Autonomous Theorem Prover
=========================
Pseudo code for procedure of resolution is explained
Symbols:
1) KB: Knowledge base
2) :math:`{\\alpha}`: Predicates to be proved
Aim: Prove KB -> :math:`{\\alpha}`
Code:
* CLAUSES <- CONVERT_TO_CNF( KB + :math:`{\\alpha}` )
* while EMPTY_RESOLVENT not in CLAUSES do
* select two distinct clauses {c_i} and {c_j} in CLAUSES
* compute a resolvent {r_ij} of {c_i} and {c_j}
* CLAUSES <- CLAUSES + {r_ij}
* end while
* return satisfaction
"""
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='File name to parse and create problem base',
type=argparse.FileType('r'), required=True)
args = parser.parse_args()
# Get filename
_file = args.file
# Parse problem state
problem_state = InputParser.parse(_file)
# Prove the theorem
AutonomousTheoremProver(problem_state).prove()
|
#This webscraping code is designed to spider data from ca.gov about reservoir levels.
#The user inputs a reservoir code from this site (https://cdec.water.ca.gov/misc/daily_res.html)
#Along with a starting date and a number of months from the starting date.
#The program reads html with BS4, cleans the data, takes the resulting water levels, and then averages them.
# Import necessary libraries
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
import re
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
#Get Input to build URL
code = input('Enter Reservoir Code: (CAPS) ')
end = input('Enter End Date: (YYYY-MM-DD) ')
months = input('Enter number of months: ')
url = "https://cdec.water.ca.gov/dynamicapp/QueryMonthly?s="
url1 = url + code
url2 = url1 + "&end=" + end
url3 = url2 + "&span=" + months + "months"
#Read constructed URL
html = urllib.request.urlopen(url3, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
#Extract relevant tags and place in new non-BS4 item
stringlist = list()
soup1 = soup('font')
for tag in soup1:
tag = str(tag)
if '000000' in tag:
stringlist.append(tag)
#Clean data: Slice out what you don't want
cleanlist = list()
for x in stringlist:
y = x[22:-7]
if "," in y:
cleanlist.append(y)
#Clean data: Only take storage numbers; these are even values for index
index = 0
storagelist = list()
for x in cleanlist:
if index == 0:
storagelist.append(cleanlist[0])
index = index + 2
else:
if index >= ((float(months) * 2) - 1):
break
else:
storagelist.append(cleanlist[index])
index = index + 2
continue
# Format and prep numbers for final calculation
denominator = 0
total = 0
for x in storagelist:
x = re.sub(',' , '' , x)
x = float(x)
print(x)
total = x + total
denominator = denominator + 1
#Make final calculation
average = total / denominator
print("Average storage for specified period: ", average, "Acre Feet")
|
# -*- coding: utf-8 -*-
""" wrapper functions for DensOp """
import ctypes
from ctypes.util import find_library
import pathlib
import math
import numpy as np
import qlazy.config as cfg
from qlazy.util import get_lib_ext, densop_check_args
from qlazy.QState import QState
from qlazy.DensOp import DensOp
lib = ctypes.CDLL(str(pathlib.Path(__file__).with_name('libqlz.'+get_lib_ext())))
libc = ctypes.CDLL(find_library("c"), mode=ctypes.RTLD_GLOBAL)
def densop_init(qstate=None, prob=None):
""" initialize DensOp object """
if qstate is None:
qstate = []
if prob is None:
prob = []
num = len(qstate)
densop = None
c_densop = ctypes.c_void_p(densop)
DoubleArray = ctypes.c_double * num
prob_array = DoubleArray(*prob)
QStateArray = QState * num
qstate_array = QStateArray(*qstate)
lib.densop_init.restype = ctypes.c_bool
lib.densop_init.argtypes = [ctypes.POINTER(QState),
ctypes.POINTER(ctypes.c_double),
ctypes.c_int,
ctypes.POINTER(ctypes.c_void_p)]
ret = lib.densop_init(qstate_array, prob_array,
ctypes.c_int(num), c_densop)
if ret is False:
raise ValueError("can't initialize DensOp object.")
return c_densop
def densop_init_with_matrix(matrix=None):
""" initialize DensOp object with matrix """
densop = None
c_densop = ctypes.c_void_p(densop)
row = len(matrix)
col = row
size = row * col
mat_complex = list(matrix.flatten())
mat_real = [0.0 for _ in range(size)]
mat_imag = [0.0 for _ in range(size)]
for i in range(size):
mat_real[i] = mat_complex[i].real
mat_imag[i] = mat_complex[i].imag
DoubleArray = ctypes.c_double * size
c_mat_real = DoubleArray(*mat_real)
c_mat_imag = DoubleArray(*mat_imag)
lib.densop_init_with_matrix.restype = ctypes.c_bool
lib.densop_init_with_matrix.argtypes = [DoubleArray, DoubleArray,
ctypes.c_int, ctypes.c_int,
ctypes.POINTER(ctypes.c_void_p)]
ret = lib.densop_init_with_matrix(c_mat_real, c_mat_imag,
ctypes.c_int(row), ctypes.c_int(col),
c_densop)
if ret is False:
raise ValueError("can't initialize densop with matrix.")
return c_densop
def densop_get_elm(de):
""" get elements of density operator """
try:
elm = None
c_elm = ctypes.c_void_p(elm)
lib.densop_get_elm.restype = ctypes.c_bool
lib.densop_get_elm.argtypes = [ctypes.POINTER(DensOp),
ctypes.POINTER(ctypes.c_void_p)]
ret = lib.densop_get_elm(ctypes.byref(de), c_elm)
if ret is False:
raise ValueError("can't get densop elements.")
o = ctypes.cast(c_elm.value, ctypes.POINTER(ctypes.c_double))
size = de.row * de.col
out = [0] * size
for i in range(size):
out[i] = complex(round(o[2*i], 8), round(o[2*i+1], 8))
libc.free.argtypes = [ctypes.POINTER(ctypes.c_double)]
libc.free(o)
return np.array(out).reshape([de.row, de.col])
except Exception:
raise ValueError("can't get densop elements.")
def densop_reset(de, qid=None):
""" reset density operator """
if qid is None or qid == []:
qnum = int(math.log2(de.row))
qid = list(range(qnum))
try:
qubit_num = len(qid)
qubit_id = [0 for _ in range(qubit_num)]
for i, q in enumerate(qid):
qubit_id[i] = q
IntArray = ctypes.c_int * qubit_num
qid_array = IntArray(*qubit_id)
lib.densop_reset.restype = ctypes.c_bool
lib.densop_reset.argtypes = [ctypes.POINTER(DensOp), ctypes.c_int, IntArray]
ret = lib.densop_reset(ctypes.byref(de), ctypes.c_int(qubit_num), qid_array)
if ret is False:
raise ValueError("can't reset.")
except Exception:
raise ValueError("can't reset.")
def densop_print(de, nonzero=False):
""" print density operator """
try:
lib.densop_print.restype = ctypes.c_bool
lib.densop_print.argtypes = [ctypes.POINTER(DensOp), ctypes.c_bool]
ret = lib.densop_print(ctypes.byref(de), ctypes.c_bool(nonzero))
if ret is False:
raise ValueError("can't print densop.")
except Exception:
raise ValueError("can't print densop.")
def densop_copy(de):
""" copy density operator """
try:
densop = None
c_densop = ctypes.c_void_p(densop)
lib.densop_copy.restype = ctypes.c_bool
lib.densop_copy.argtypes = [ctypes.POINTER(DensOp),
ctypes.POINTER(ctypes.c_void_p)]
ret = lib.densop_copy(ctypes.byref(de), c_densop)
if ret is False:
raise ValueError("can't copy densop.")
return c_densop
except Exception:
raise ValueError("can't copy densop.")
def densop_add(de, densop=None):
""" add density operator """
try:
lib.densop_add.restype = ctypes.c_bool
lib.densop_add.argtypes = [ctypes.POINTER(DensOp), ctypes.POINTER(DensOp)]
ret = lib.densop_add(ctypes.byref(de), ctypes.byref(densop))
if ret is False:
raise ValueError("can't add densop.")
except Exception:
raise ValueError("can't add densop.")
def densop_mul(de, factor=0.0):
""" mul density operator """
try:
lib.densop_mul.restype = ctypes.c_bool
lib.densop_mul.argtypes = [ctypes.POINTER(DensOp), ctypes.c_double]
ret = lib.densop_mul(ctypes.byref(de), ctypes.c_double(factor))
if ret is False:
raise ValueError("can't mul densop.")
except Exception:
raise ValueError("can't mul densop.")
def densop_trace(de):
""" trace of density operator """
try:
real = 0.0
imag = 0.0
c_real = ctypes.c_double(real)
c_imag = ctypes.c_double(imag)
lib.densop_trace.restype = ctypes.c_bool
lib.densop_trace.argtypes = [ctypes.POINTER(DensOp),
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double)]
ret = lib.densop_trace(ctypes.byref(de), ctypes.byref(c_real),
ctypes.byref(c_imag))
if ret is False:
raise ValueError("can't get trace of densop.")
real = round(c_real.value, 8)
imag = round(c_imag.value, 8)
if abs(imag) > cfg.EPS:
raise ValueError("can't get trace of densop.")
return real
except Exception:
raise ValueError("can't get trace of densop.")
def densop_sqtrace(de):
""" square trace of density operator """
try:
real = 0.0
imag = 0.0
c_real = ctypes.c_double(real)
c_imag = ctypes.c_double(imag)
lib.densop_sqtrace.restype = ctypes.c_bool
lib.densop_sqtrace.argtypes = [ctypes.POINTER(DensOp),
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double)]
ret = lib.densop_sqtrace(ctypes.byref(de), ctypes.byref(c_real),
ctypes.byref(c_imag))
if ret is False:
raise ValueError("can't get square trace of densop.")
real = round(c_real.value, 8)
imag = round(c_imag.value, 8)
if abs(imag) > cfg.EPS:
raise ValueError("can't get square trace of densop.")
return real
except Exception:
raise ValueError("can't get square trace of densop.")
def densop_patrace(de, qid=None):
""" partial trace of density operator """
try:
if qid is None:
raise ValueError("qid must be set.")
densop = None
c_densop = ctypes.c_void_p(densop)
qubit_num = len(qid)
qubit_id = [0 for _ in range(qubit_num)]
for i, q in enumerate(qid):
qubit_id[i] = q
IntArray = ctypes.c_int * qubit_num
qid_array = IntArray(*qubit_id)
lib.densop_patrace.restype = ctypes.c_bool
lib.densop_patrace.argtypes = [ctypes.POINTER(DensOp),
ctypes.c_int, IntArray,
ctypes.POINTER(ctypes.c_void_p)]
ret = lib.densop_patrace(ctypes.byref(de), ctypes.c_int(qubit_num),
qid_array, c_densop)
if ret is False:
raise ValueError("can't get partial trace of densop.")
return c_densop
except Exception:
raise ValueError("can't get partial trace of densop.")
def densop_tensor_product(de_0, de_1):
""" tensor product of density operator """
try:
densop_out = None
c_densop_out = ctypes.c_void_p(densop_out)
lib.densop_tensor_product.restype = ctypes.c_bool
lib.densop_tensor_product.argtypes = [ctypes.POINTER(DensOp),
ctypes.POINTER(DensOp),
ctypes.POINTER(ctypes.c_void_p)]
ret = lib.densop_tensor_product(ctypes.byref(de_0), ctypes. byref(de_1),
c_densop_out)
if ret is False:
raise ValueError("can't get tensor product.")
return c_densop_out
except Exception:
raise ValueError("can't get tensor product.")
def densop_apply_matrix(de, matrix=None, qid=None, dire='both'):
""" operate matrix to the density operator """
if matrix is None:
raise ValueError("matrix must be set.")
if qid is None or qid == []:
qnum = int(math.log2(de.row))
qid = list(range(qnum))
if dire == 'left':
adire = cfg.LEFT
elif dire == 'right':
adire = cfg.RIGHT
elif dire == 'both':
adire = cfg.BOTH
else:
raise ValueError("unknown dire string (set 'left', 'right' or 'both').")
try:
qubit_num = len(qid)
qubit_id = [0 for _ in range(qubit_num)]
for i, q in enumerate(qid):
qubit_id[i] = q
IntArray = ctypes.c_int * qubit_num
qid_array = IntArray(*qubit_id)
row = len(matrix) # dimension of the unitary matrix
col = row
size = row * col
# set array of matrix
mat_complex = []
for mat_row in matrix:
for mat_elm in mat_row:
mat_complex.append(mat_elm)
mat_real = [0.0 for _ in range(size)]
mat_imag = [0.0 for _ in range(size)]
for i in range(size):
mat_real[i] = mat_complex[i].real
mat_imag[i] = mat_complex[i].imag
DoubleArray = ctypes.c_double * size
c_mat_real = DoubleArray(*mat_real)
c_mat_imag = DoubleArray(*mat_imag)
lib.densop_apply_matrix.restype = ctypes.c_bool
lib.densop_apply_matrix.argtypes = [ctypes.POINTER(DensOp),
ctypes.c_int, IntArray,
ctypes.c_int,
DoubleArray, DoubleArray,
ctypes.c_int, ctypes.c_int]
ret = lib.densop_apply_matrix(ctypes.byref(de),
ctypes.c_int(qubit_num), qid_array,
ctypes.c_int(adire), c_mat_real, c_mat_imag,
ctypes.c_int(row), ctypes.c_int(col))
if ret is False:
raise ValueError("can't apply matrix.")
except Exception:
raise ValueError("can't apply matrix.")
def densop_probability(de, matrix=None, qid=None, matrix_type=None):
""" probability of the observable (matrix repr.) for the density operator """
if matrix is None:
raise ValueError("matrix must be set.")
if (matrix.shape[0] > de.row or matrix.shape[1] > de.col):
raise ValueError("matrix size is too large.")
if qid is None or qid == []:
qnum = int(math.log2(de.row))
qid = list(range(qnum))
if matrix_type == 'kraus':
mtype = cfg.KRAUS
elif matrix_type == 'povm':
mtype = cfg.POVM
else:
raise ValueError("matrix_type is unknown (set 'kraus' or 'povm')")
try:
qubit_num = len(qid)
qubit_id = [0 for _ in range(qubit_num)]
for i, q in enumerate(qid):
qubit_id[i] = q
IntArray = ctypes.c_int * qubit_num
qid_array = IntArray(*qubit_id)
row = len(matrix) # dimension of the unitary matrix
col = row
size = row * col
# set array of matrix
mat_complex = list(matrix.flatten())
mat_real = [0.0 for _ in range(size)]
mat_imag = [0.0 for _ in range(size)]
for i in range(size):
mat_real[i] = mat_complex[i].real
mat_imag[i] = mat_complex[i].imag
DoubleArray = ctypes.c_double * size
c_mat_real = DoubleArray(*mat_real)
c_mat_imag = DoubleArray(*mat_imag)
prob = 0.0
c_prob = ctypes.c_double(prob)
lib.densop_probability.restype = ctypes.c_bool
lib.densop_probability.argtypes = [ctypes.POINTER(DensOp),
ctypes.c_int, IntArray,
ctypes.c_int, DoubleArray, DoubleArray,
ctypes.c_int, ctypes.c_int,
ctypes.POINTER(ctypes.c_double)]
ret = lib.densop_probability(ctypes.byref(de),
ctypes.c_int(qubit_num), qid_array,
ctypes.c_int(mtype), c_mat_real, c_mat_imag,
ctypes.c_int(row), ctypes.c_int(col),
ctypes.byref(c_prob))
if ret is False:
raise ValueError("can't calculate probability.")
prob = round(c_prob.value, 8)
return prob
except Exception:
raise ValueError("can't calculate probability.")
def densop_operate_qgate(de, kind=None, qid=None, phase=cfg.DEF_PHASE,
gphase=cfg.DEF_GPHASE, factor=cfg.DEF_FACTOR):
""" operate quantum gate to the density operator """
# error check
densop_check_args(de, kind=kind, qid=qid)
qubit_id = [0 for _ in range(2)]
for i, q in enumerate(qid):
qubit_id[i] = q
IntArray = ctypes.c_int * 2
qid_array = IntArray(*qubit_id)
lib.densop_operate_qgate.restype = ctypes.c_bool
lib.densop_operate_qgate.argtypes = [ctypes.POINTER(DensOp), ctypes.c_int,
ctypes.c_double, ctypes.c_double,
ctypes.c_double, IntArray]
ret = lib.densop_operate_qgate(ctypes.byref(de), ctypes.c_int(kind),
ctypes.c_double(phase), ctypes.c_double(gphase),
ctypes.c_double(factor), qid_array)
if ret is False:
raise ValueError("can't operate the quantum gate.")
def densop_free(de):
""" free memory of the density operator """
lib.densop_free.argtypes = [ctypes.POINTER(DensOp)]
lib.densop_free(ctypes.byref(de))
|
#@+leo-ver=4-thin
#@+node:zorcanda!.20051208141646:@thin Phaser.py
import javax.swing as swing
import java.awt.event as aevent
import java.awt as awt
import java
class Phaser( swing.JPanel, aevent.ActionListener ):
'''This class gradually phases a component into the gui when added.
Also does phasing out if requested.'''
def __init__( self, component ):
swing.JPanel.__init__( self, awt.GridLayout( 1,1 ) )
self.add( component )
self.component = component
self.increments = 20
self.waitperiod = 1000/self.increments
self.timer = swing.Timer( self.waitperiod, self )
self.timer.start()
self.image = None
self.cmp_to_focus = None
self.phasingIn = True
self.setOpaque( True )
def setComponentToFocus( self, widget ):
self.cmp_to_focus = widget
def phaseRemove( self ):
myself = self
class runner( java.lang.Runnable ):
def run( self ):
myself.timer = swing.Timer( myself.waitperiod, myself )
myself.takePictureOfSelf()
myself.setOpaque( False )
myself.remove( myself.component )
myself.increments = 20
myself.timer.start()
swing.SwingUtilities.invokeLater( runner() )
def takePictureOfSelf( self ):
psize = self.getSize()
bi = awt.image.BufferedImage( psize.width, psize.height, awt.image.BufferedImage.TYPE_INT_RGB )
g = bi.createGraphics()
self.super__paint( g )
g.dispose()
self.image = bi
def actionPerformed( self, event ):
self.repaint()
def paint( self, g ):
if self.image == None:
self.takePictureOfSelf()
self.setOpaque( False )
self.remove( self.component )
self.timer.start()
if self.increments != 0:
if self.phasingIn:
self.phaseIn( g )
return
else:
self.phaseOut( g )
return
if self.component.getParent() is None:
self.add( self.component )
if self.cmp_to_focus:
self.cmp_to_focus.requestFocus()
self.super__paint( g )
#@ @+others
#@+node:zorcanda!.20051209201211:phaseIn
def phaseIn( self, g ):
alpha = 1.0/self.increments
self.increments -= 1
if self.increments == 0:
self.timer.stop()
self.phasingIn = False
self.setOpaque( True )
self.repaint()
ac = awt.AlphaComposite.getInstance( awt.AlphaComposite.SRC_OVER, alpha )
composite = g.getComposite()
g.setComposite( ac )
g.drawImage( self.image, 0, 0, None )
g.setComposite( composite )
return
#@-node:zorcanda!.20051209201211:phaseIn
#@+node:zorcanda!.20051209201328:phaseOut
def phaseOut( self, g ):
alpha = self.increments * .05
self.increments -= 1
if self.increments == 0:
self.timer.stop()
self.phasingIn = False
self.getParent().remove( self )
return
ac = awt.AlphaComposite.getInstance( awt.AlphaComposite.SRC_OVER, alpha )
composite = g.getComposite()
g.setComposite( ac )
g.drawImage( self.image, 0, 0, None )
g.setComposite( composite )
return
#@nonl
#@-node:zorcanda!.20051209201328:phaseOut
#@-others
#@-node:zorcanda!.20051208141646:@thin Phaser.py
#@-leo
|
"""
Group_4 Capstone Project
Copyright (c) 2019
Licensed Northeatern unniversity
"""
import numpy as np
# Create a function to make column values(string) to consistent
def flag_brands(col_name, file_name, dest_col, Dict):
"""
:param col_name: Column Name to perform changes
:param file_name: Data frame Name
:param dest_col: Column where changes to apply
:param Dict: contains values searching values and replacing values
:return:
"""
for k, v in Dict.items():
file_name[dest_col] = np.where(col_name.str.contains(k), v, file_name[dest_col])
return
# Create function to replce values in column by using dictionary values
def Change_values(col_name,Dict):
"""
:param col_name: Column Name to perform changes
:param Dict:contains values searching values and replacing values
:return: Returns column with replacing values
"""
return col_name.replace(Dict, inplace=True) |
import scrapy
import pickle
import zlib
import time
import re
from lxml import html
from bson.binary import Binary
from NeoScrapy.items import BitCoinTalkLink, BitCoinTalkComment, BitCoinTalkUserProfile, BitCoinTalkUserStat, \
BitCoinTalkUserHistory
class BttSpider(scrapy.Spider):
name = 'bitcointalk'
allowed_domains = ['bitcointalk.org']
def __init__(self, func, **kwargs):
"""
:param func: 必填参数,指定该spider实例的功能。
ANNLINK : 获取announcement 模块下的所有帖子
COMMENT : 获取帖子下的评论,必须参数 ids | array
USER : 获取用户基本信息,所有评论,发起的帖子,统计信息, 必须参数 ids | array
"""
super(BttSpider, self).__init__(func, **kwargs)
self.ANN_LINK_URL = 'https://bitcointalk.org/index.php?board=159.'
self.COMMENT_URL = 'https://bitcointalk.org/index.php?topic={0}'
self.USER_PROFILE_URL = 'https://bitcointalk.org/index.php?action=profile;u={0}'
self.USER_POST_URL = 'https://bitcointalk.org/index.php?action=profile;u={0};sa=showPosts'
self.USER_START_URL = 'https://bitcointalk.org/index.php?action=profile;threads;u={0};sa=showPosts'
self.USER_STAT_URL = 'https://bitcointalk.org/index.php?action=profile;u={0};sa=statPanel'
if not func in ['ANNLINK', 'COMMENT', 'USER_PROFILE', 'USER_HISTORY']:
return
self.func = func
if 'ids' in kwargs:
self.ids = kwargs['ids']
def start_requests(self):
if self.func == 'ANNLINK':
scrapy.Request(self.ANN_LINK_URL, callback=self.ann_link_parse, meta={'index': 0})
if self.func == 'COMMENT':
if isinstance(self.ids, list):
for link_id in self.ids:
scrapy.Request(self.COMMENT_URL.format(link_id), callback=self.comment_parse,
meta={'index': 0, 'link_id': link_id})
if self.func == 'USER_PROFILE':
if isinstance(self.ids, list):
for user_id in self.ids:
scrapy.Request(self.USER_PROFILE_URL, callback=self.user_profile_parse, meta={'user_id': user_id})
scrapy.Request(self.USER_STAT_URL, callback=self.user_stat_parse, meta={'user_id': user_id})
if self.func == 'USER_HISTORY':
if isinstance(self.ids, list):
for user_id in self.ids:
scrapy.Request(self.USER_START_URL, callback=self.user_history_parse,
meta={'user_id': user_id, 'index': 0, 'start': True})
scrapy.Request(self.USER_POST_URL, callback=self.user_history_parse,
meta={'user_id': user_id, 'index': 0, 'start': False})
def ann_link_parse(self, response):
tree = html.fromstring(response.text)
# index为0标识第一页,计算page总数并依次获取后续页面
if response.meta['index'] == 0:
# 首页数据组织方式和后续页面不同,所以xpath路径也不一样
xpath_str = '//*[@id="bodyarea"]/div[3]/table/tr'
page = int(tree.xpath('//*[@id="toppages"]/a[last()]')[0].text)
for i in range(1, page):
url = self.ANN_LINK_URL + str(i * 4) + '0'
yield scrapy.Request(url, callback=self.ann_link_parse, meta={'index': i})
else:
xpath_str = '//*[@id="bodyarea"]/div[2]/table/tr'
for tr in tree.xpath(xpath_str)[1:]:
link = BitCoinTalkLink()
try:
td_list = tr.findall('td')
link['title'] = self.text_format(td_list[2].find('*a').text)
link['link_url'] = self.text_format(td_list[2].find('*a').attrib['href'])
link['id'] = link['link_url'][link['link_url'].index('=') + 1:link['link_url'].rfind('.')]
link['started_by'] = self.text_format(td_list[3].find('a').text)
link['profile_url'] = self.text_format(td_list[3].find('a').attrib['href'])
link['user_id'] = link['profile_url'][link['profile_url'].rfind('=') + 1:]
link['replies'] = int(self.text_format(td_list[4].text))
link['views'] = int(self.text_format(td_list[5].text))
except Exception as e:
self.logger.error('parse an single link from page {0} {1}'.format(response.url, str(e)))
yield link
def comment_parse(self, response):
tree = html.fromstring(response.text)
link_id = response.meta['link_id']
# index为0标识第一页,计算page总数并依次获取后续页面
if response.meta['index'] == 0:
# 只有一页,这时候td标签下没有<a>标签
# 页数较少,一般小于25页时,最后一个<a>标签内容为'all',此时倒数第二个<a>标签的内容为总页数
# 页数较多,没有'all'最后一个<a>标签的内容为总页数
if len(tree.xpath('//*[@id="bodyarea"]/table[1]/tr/td/a')) == 0:
page = 1
elif tree.xpath('//*[@id="bodyarea"]/table[1]/tr/td/a[last()]')[0].text == 'All':
page = int(tree.xpath('//*[@id="bodyarea"]/table[1]/tr/td/a[last()-1]')[0].text)
else:
page = int(tree.xpath('//*[@id="bodyarea"]/table[1]/tr/td/a[last()]')[0].text)
for i in range(1, page):
url = self.COMMENT_URL.format(link_id + '.' + str(i * 2) + '0')
yield scrapy.Request(url, callback=self.comment_parse, meta={'index': i, 'link_id': link_id})
for tr in tree.xpath('//*[@id="quickModForm"]/table[1]/tr'):
comment = BitCoinTalkComment()
# 遍历quickModForm下的每一个tr对象
# 寻找 <td class='poster_info'>、<td class='td_headerandpost'>、<a class='message_number'>有三种情况:
# 1.如果没有以上任何一项,则认为该条tr的内容不是我们想要的内容
# 2.以上三条数据都可以找到,但是实际上该条数据已经被删除了,在页面是不显示的。这时判断message_url类似于:
# https://bitcointalk.org/index.php?topic=1509506136.msg1509506136#msg1509506136
# 3.以上两种情况都不存在则认为该条记录为合格的数据
try:
poster_info_list = tr.xpath('descendant::*[@class="poster_info"]')
td_headerandpost_list = tr.xpath('descendant::*[@class="td_headerandpost"]')
message_list = tr.xpath('descendant::*[@class="message_number"]')
if len(poster_info_list) == 0 or len(td_headerandpost_list) == 0 or len(message_list) == 0:
continue
message_url = message_list[0].attrib['href']
topic = message_url[message_url.index('=') + 1:message_url.rfind('.')]
message_id = message_url[message_url.index('#') + 1:]
if 'msg' + topic == message_id:
continue
# 评论id信息
comment['message_id'] = message_id
comment['link_id'] = link_id
# 左边author一列的信息
poster_info = poster_info_list[0]
comment['author'] = poster_info.find('b/a').text
user_profile_url = poster_info.find('b/a').attrib['href']
comment['user_profile_url'] = user_profile_url
comment['user_id'] = user_profile_url[user_profile_url.rfind('=') + 1:]
comment['grade'] = self.text_format(poster_info.find('div').text)
for text in poster_info.itertext():
f_text = self.text_format(text)
if f_text.startswith('Activity'):
try:
comment['activity'] = int(f_text[f_text.index(':') + 2:])
except ValueError as e:
self.logger.error('parse user activity num error {0} {1}'.format(response.url, str(e)))
# 右边poster信息
# 每个帖子一楼是帖子内容(主贴),dom结构和后续评论的结构不同。一楼显示的时间为帖子最后修改的时间。
hearder_post = td_headerandpost_list[0]
if message_list[0].text == '#1':
comment['original'] = True
time_str = hearder_post.xpath('descendant::*[@class="smalltext"]')[0].text_content()
comment['time'] = self.time_format(time_str)
comment['title'] = hearder_post.xpath('descendant::*[@class="subject"]/a/text()')[0]
html_string = html.tostring(hearder_post.xpath('descendant::*[@class="post"]')[0])
comment['content'] = Binary(zlib.compress(pickle.dumps(html_string)))
except Exception as e:
self.logger.error('comment parse error {0} {1}'.format(response.url, str(e)))
yield comment
def user_profile_parse(self, response):
tree = html.fromstring(response.text)
user = BitCoinTalkUserProfile()
user['data']["id"] = response.meta['user_id']
for tr in tree.xpath('//*[@class="windowbg"]/table/tr'):
try:
if len(tr.findall('td')) < 2:
continue
key = self.text_format(tr.findall('td/b')[0].text)
value = self.text_format(tr.findall('td')[1].text)
if key == 'Date Registered:' or key == 'Last Active:' or key == 'Local Time:':
value = self.time_format(value)
if key == 'Activity:' or key == 'Posts:' or key == 'Age:':
try:
value = int(value)
except Exception as e:
self.logger.error('transfer string to int error {0} {1}'.format(response.url, str(e)))
user['data'][key] = value
except Exception as e:
self.logger.error('parse user profile error {0} {1}'.format(response.url, str(e)))
yield user
def user_stat_parse(self, response):
tree = html.fromstring(response.text)
user_id = response.meta['user_id']
stat = BitCoinTalkUserStat()
panels = tree.xpath('//*[@id="bodyarea"]//*[@class="windowbg2"]')
general_stat = {}
for tr in panels[0].xpath('table/tr'):
try:
key = self.text_format(tr.findall('td')[0].text)
value = self.text_format(tr.findall('td')[1].text)
if key == 'Total Time Spent Online:':
value = self.calc_total_time(value)
else:
value = int(re.sub(r'[^0-9]', '', value))
general_stat[key] = value
except Exception as e:
self.logger.error('parse user general stat error {0} {1}'.format(response.url, str(e)))
post_stat = {}
for tr in panels[2].xpath('table/tr'):
try:
key = self.text_format(tr.findall('td')[0].find('a').text)
value = self.text_format(tr.findall('td')[2].text)
post_stat[key] = int(value)
except Exception as e:
self.logger.error('parse user post stat error {0} {1}'.format(response.url, str(e)))
activity_stat = {}
for tr in panels[3].xpath('table/tr'):
try:
key = self.text_format(tr.findall('td')[0].find('a').text)
value = self.text_format(tr.findall('td')[2].text).replace('%', '')
activity_stat[key] = value
except Exception as e:
self.logger.error('parse user activity stat error {0} {1}'.format(response.url, str(e)))
stat['data'] = {
'user_id': user_id,
'general_stat': general_stat,
'post_stat': post_stat,
'activity_stat': activity_stat,
'type': 'origin_user_stat'
}
yield stat
def user_history_parse(self, response):
tree = html.fromstring(response.text)
user_id = response.meta['user_id']
if response.meta['start']:
url_base = self.USER_START_URL
else:
url_base = self.USER_POST_URL
if response.meta['index'] == 0:
if len(tree.xpath('//*[@class="catbg3"]/a[last()]')) == 0:
page = 1
elif tree.xpath('//*[@class="catbg3"]/a[last()]')[0].text == 'All':
page = int(tree.xpath('//*[@class="catbg3"]/a[last()-1]')[0].text)
else:
page = int(tree.xpath('//*[@class="catbg3"]/a[last()]')[0].text)
for i in range(1, page):
yield scrapy.Request(url_base.format(user_id) + ';start={0}'.format(i * 20),
meta={'user_id': user_id, 'index': i, 'start': response.meta['start']})
tables = tree.xpath('//*[@id="bodyarea"]/table/tr/td/table')
del tables[0]
del tables[-1]
for table in tables:
post = BitCoinTalkUserHistory()
post['start'] = response.meta['start']
try:
post['data']['user_id'] = response.meta['user_id']
post['data']['module1'] = table.xpath('descendant::*[@class="titlebg2"]//a')[0].text
post['data']['module2'] = table.xpath('descendant::*[@class="titlebg2"]//a')[1].text
post['data']['module3'] = table.xpath('descendant::*[@class="titlebg2"]//a')[2].text
href = table.xpath('descendant::*[@class="titlebg2"]//a')[2].attrib['href']
post['data']['href'] = href
post['data']['topic_id'] = href[href.index('=') + 1:href.rfind('.')]
post['data']['msg_id'] = href[href.index('#') + 1:]
html_string = html.tostring(table.xpath('descendant::*[@class="post"]')[0])
post['data']['content'] = Binary(zlib.compress(pickle.dumps(html_string)))
except Exception as e:
self.logger.error('parse user history stat error {0} {1}'.format(response.url, str(e)))
yield post
@staticmethod
def time_format(time_str):
if time_str is None:
return None
if time_str.startswith('Today'):
time_str = time_str.replace('Today at', time.strftime("%B %d, %Y,", time.localtime()))
return time.mktime(time.strptime(time_str, '%B %d, %Y, %I:%M:%S %p'))
@staticmethod
def text_format(text):
if text == 'N/A':
return None
if text is None:
return None
return text.replace('\n', '').replace('\t', '').strip()
def calc_total_time(self, time_str):
try:
if 'days' in time_str:
pattern = r'(\d*) days, (\d*) hours and (\d*) minutes.'
match_obj = re.match(pattern, time_str)
day = int(match_obj.group(1))
hour = int(match_obj.group(2))
minute = int(match_obj.group(3))
elif 'hours' in time_str:
pattern = r'(\d*) hours and (\d*) minutes.'
match_obj = re.match(pattern, time_str)
day = 0
hour = int(match_obj.group(1))
minute = int(match_obj.group(2))
else:
pattern = r'(\d*) minutes.'
match_obj = re.match(pattern, time_str)
day = 0
hour = 0
minute = int(match_obj.group(1))
except Exception as e:
self.logger.error('calc total time error ' + time_str + ' ' + str(e))
return None
return day * 60 * 24 + hour * 60 + minute
|
from odoo import models, fields, tools, api, _
from odoo.modules.module import get_module_resource
import base64
import re
from odoo.exceptions import ValidationError
from lxml import etree
from odoo.tools.misc import DEFAULT_SERVER_DATE_FORMAT
class ResUsersStudent(models.Model):
_inherit = "res.users"
_description = 'Adds a student field to the user and professor'
student_ids = fields.One2many('faculty.student', 'user_id', string='Estudiante')
professor_ids = fields.One2many('faculty.professor', 'user_id', string='Profesor')
student_id_computed = fields.Many2one(comodel_name='faculty.student',compute='_compute_student_id_computed', string='Estudiante')
@api.depends('student_ids')
def _compute_student_id_computed(self):
for rec in self:
if rec.student_ids:
rec.student_id_computed = self.env['faculty.student'].browse(rec.student_ids[0].id)
professor_id_computed = fields.Many2one(comodel_name='faculty.professor',compute='_compute_professor_id_computed', string='Profesor')
@api.depends('professor_ids')
def _compute_professor_id_computed(self):
for rec in self:
if rec.professor_ids:
rec.professor_id_computed = self.env['faculty.professor'].browse(rec.professor_ids[0].id)
is_faculty_student = fields.Boolean(compute='_compute_is_faculty_student', search="_search_is_faculty_student", string='Es estudiante?')
def _compute_is_faculty_student(self):
for rec in self:
rec.is_faculty_student = rec.has_group("faculty.group_faculty_student") and not rec.has_group("faculty.group_faculty_professor") and not rec.has_group("faculty.group_faculty_admin")
def _search_is_faculty_student(self, operator, value):
users = self.env['res.users'].sudo().search([])
list_users=[]
for rec in users:
if rec.has_group("faculty.group_faculty_student") and not rec.has_group("faculty.group_faculty_professor") and not rec.has_group("faculty.group_faculty_admin"):
list_users.append(rec.id)
return [('id','in', list_users)]
is_faculty_professor = fields.Boolean(compute='_compute_is_faculty_student', search="_search_is_faculty_professor", string='Es profesor?')
def _compute_is_faculty_professor(self):
for rec in self:
rec.is_faculty_professor = rec.has_group("faculty.group_faculty_professor") and not rec.has_group("faculty.group_faculty_student") and not rec.has_group("faculty.group_faculty_admin")
def _search_is_faculty_professor(self, operator, value):
users = self.env['res.users'].sudo().search([])
list_users=[]
for rec in users:
if rec.has_group("faculty.group_faculty_professor") and not rec.has_group("faculty.group_faculty_student") and not rec.has_group("faculty.group_faculty_admin"):
list_users.append(rec.id)
return [('id','in', list_users)] |
# Module: get_matrix.py
# Description: get matrix combined pro and rna data together
import pandas as pd
import hashlib
import os
data_dir = "/home/amber/Documents/Final-project/data/"
train_pro = data_dir + "a_total_pro.csv"
train_rna = data_dir + "a_total_rna.csv"
tab_file = data_dir + "a_sum_tab.csv"
outputfile3 = data_dir + "a_total_matrix.csv"
outputfile2 = data_dir + "a_rna_matrix.csv"
outputfile1 = data_dir + "a_pro_matrix.csv"
d_tab = pd.read_csv(tab_file, sep=",")
d_tra_pro = pd.read_csv(train_pro, sep=",")
d_tra_rna = pd.read_csv(train_rna, sep=',')
columns = ['sample', 'Proteomics', 'RNAseq']
tab_df = d_tab[columns]
column_headers = list(d_tra_pro.columns.values)
print("Proteomicas dimensions: %d" % (len(column_headers)-1))
columns = column_headers
train_pro_df = d_tra_pro[columns]
column_headers = list(d_tra_rna.columns.values)
print("RNA-seq dimensions: %d" % (len(column_headers)-1))
columns = column_headers
train_rna_df = d_tra_rna[columns]
# y_data = df.pop('label').values
# print(column_headers)
result1 = pd.merge(tab_df, train_pro_df, on='Proteomics', how="left")
result1 = result1.drop(['RNAseq'], axis=1)
# print(len(list(result1.columns.values)))
result2 = pd.merge(tab_df, train_rna_df, on='RNAseq', how="left")
result2 = result2.drop(['Proteomics'], axis=1)
# print(len(list(result2.columns.values)))
# print(type(result1))
# print(type(result2))
result3 = pd.merge(result1, result2, on='sample', how="left")
result1.to_csv(outputfile1, index=False)
result2.to_csv(outputfile2, index=False)
result3.to_csv(outputfile3, index=False)
print("Matrix combined with proteomics and RNA data completed!")
print("Total dimensions: %d" % (len(list(result3.columns.values))-3)) |
# Dependencies
import pandas as pd
import numpy as np
from adtk.detector import ThresholdAD
from adtk.visualization import plot
from adtk.data import validate_series
# Import ecommerce data and specify date column for datetime indexing
DATE_COL = 'date'
csv_data = 'ecommerce_data.csv'
def load_data():
global data
data = pd.read_csv(csv_data)
data[DATE_COL] = pd.to_datetime(data[DATE_COL])
return data
load_data()
# NOT NEEDED AS ADTK HANDLES DATETIME INDEXING
# # data vis
# chart_data = data[['date', 'sessions']]
# chart_data.head
# # Convert df date colum to pd.Datetime and swap out date for datetime index in df2
# datetime_series = pd.to_datetime(chart_data['date'])
# datetime_index = pd.DatetimeIndex(datetime_series.values)
# df2=data.set_index(datetime_index)
# df2.drop('date',axis=1,inplace=True)
# # validate and data vis
# chart_data = df2[['sessions']]
# print(chart_data)
data = pd.read_csv(csv_data, index_col=DATE_COL, parse_dates=True)
s = data['sessions']
s = validate_series(s)
# Threshhold analysis
threshold_ad = ThresholdAD(high=100000, low=60000)
anomalies = threshold_ad.detect(s)
# Visualise threshold AD
plot(s, anomaly=anomalies, ts_linewidth=1, ts_markersize=3, anomaly_markersize=5, anomaly_color='red', anomaly_tag="marker"); |
N, Ma, Mb = [int(_) for _ in input().split()]
T = [[int(_) for _ in input().split()] for i in range(N)]
dp = [[float('inf') for i in range(401)] for i in range(401)]
dp[0][0] = 0
sum_a = 0
sum_b = 0
for x in range(N):
a, b, c = T[x]
sum_a += a
sum_b += b
for i in list(range(a, sum_a+1))[::-1]:
for j in list(range(b, sum_b+1))[::-1]:
dp[i][j] = min(dp[i][j], dp[i-a][j-b]+c)
ans = float('inf')
for i in range(1, sum_a+1):
for j in range(1, sum_b+1):
if i * Mb == j * Ma:
ans = min(ans, dp[i][j])
print(ans if ans != float('inf') else -1) |
card = "Process={spin} PChannel={PChannel} VegasNc0=1000000 MReso={mass} GaReso={width} DecayMode1={DecayMode1} DecayMode2={DecayMode2} {couplings} LHAPDF=NNPDF30_lo_as_0130/NNPDF30_lo_as_0130.info ReadCSmax"
cardname = "{spin_forname}{prod}To{decaymode}To{decaymode_final}_{width_forname}_M-{mass}_13TeV-JHUgenV6.input"
masses = [300, 350, 400, 500, 600, 750, 900, 1200, 1500, 2000, 3000, 4000, 5000]
widthfractions_leptonic = [0.00014, 0.056]
widthfractions_hadronic = [0.00014, 0.10]
spins = [0, 2]
PChannels = [0, 1]
DecayMode1s = 1, 8
couplingsspin0 = "ghz1=0,0 ghzgs2=1,0"
couplingsspin0gammastargamma = "ghz1=0,0 ghgsgs2=1,0"
couplingsspin2 = "a1=1,0 b1=1,0"
def makecard(mass, widthfraction, spin, PChannel, DecayMode1, DecayMode2, couplings):
themap = {
"mass": mass,
"width": widthfraction*mass,
"width_forname": "width" + str(widthfraction).replace(".", "p"),
"spin": spin,
"PChannel": PChannel,
"DecayMode1": DecayMode1,
"DecayMode2": DecayMode2,
"couplings": couplings,
}
if mass not in masses: raise ValueError("Invalid mass {}".format(mass))
if PChannel not in PChannels: raise ValueError("Invalid PChannel {}".format(PChannel))
if DecayMode2 != 7: raise ValueError("Invalid DecayMode2 {}".format(DecayMode2))
if DecayMode1 == 1:
themap["decaymode_final"] = "2QG"
if widthfraction not in widthfractions_hadronic: raise ValueError("Invalid widthfraction {} for hadronic Z".format(widthfraction))
elif DecayMode1 == 8:
themap["decaymode_final"] = "2LG"
if widthfraction not in widthfractions_leptonic: raise ValueError("Invalid widthfraction {} for leptonic Z".format(widthfraction))
else:
raise ValueError("Invalid DecayMode1 {}".format(DecayMode1))
if spin == 0:
if PChannel != 0: raise ValueError("Invalid PChannel {} for spin 0".format(PChannel))
themap["spin_forname"] = "Higgs0PM"
themap["prod"] = ""
if couplings == couplingsspin0:
themap["decaymode"] = "ZG"
elif couplings == couplingsspin0gammastargamma and mass == 750:
themap["decaymode"] = "GG"
else:
raise ValueError("Invalid couplings {}".format(couplings))
elif spin == 2:
themap["spin_forname"] = "Graviton2PM"
themap["decaymode"] = "ZG"
if PChannel == 0:
themap["prod"] = ""
elif PChannel == 1:
themap["prod"] = "qqbar"
else:
raise ValueError("Invalid PChannel {}".format(PChannel))
if couplings != couplingsspin2: raise ValueError("Invalid couplings {}".format(couplings))
else:
raise ValueError("Invalid spin {}".format(spin))
with open(cardname.format(**themap), "w") as f:
f.write(card.format(**themap))
DecayMode2 = 7
for mass in masses:
for spin in spins:
for PChannel in PChannels:
for DecayMode1 in DecayMode1s:
for widthfraction in set(widthfractions_hadronic+widthfractions_leptonic):
for couplings in couplingsspin0, couplingsspin0gammastargamma, couplingsspin2:
try:
makecard(mass, widthfraction, spin, PChannel, DecayMode1, DecayMode2, couplings)
except ValueError:
pass
|
# oop
# Classes should be singular
class PlayerCharacter:
# class object attribute (static not dynamic) doesn't change
membership = True
# constructor method/instantiate/init
def __init__(self, name,age):
if (PlayerCharacter.membership):
# self allows you to have a reference to something that hasn't been created yet IE player1
self.name = name # attribute/property
self.age = age
def shout(self):
print(f'My name is {self.name}!')
player1 = PlayerCharacter('Cindy', 40)
print(player1.shout())
# shows blueprint
# help(player1)
|
""" Unit tests for the xlrx module
@author: David Megginson
@organization: UN Centre for Humanitarian Data
@license: Public Domain
@date: Started 2020-03-20
"""
import unittest
import xlsxr
from . import resolve_path
class TestWorkbook(unittest.TestCase):
def setUp(self):
self.workbook = xlsxr.Workbook(filename=resolve_path("simple.xlsx"))
def test_open_workbook_from_stream(self):
with open(resolve_path("simple.xlsx"), "rb") as input:
xlsxr.Workbook(stream=input)
def test_open_workbook_from_url(self):
xlsxr.Workbook(url="https://github.com/davidmegginson/xlsx-reader/blob/main/tests/files/simple.xlsx?raw=true")
def test_open_non_excel_archive(self):
with self.assertRaises(TypeError):
xlsxr.Workbook(filename=resolve_path("not-excel.zip"))
def test_sheet_count(self):
self.assertEqual(1, len(self.workbook.sheets))
def test_shared_strings(self):
self.assertTrue('UNICEF' in self.workbook.shared_strings)
self.assertTrue('Sector/Cluster' in self.workbook.shared_strings)
def test_relations(self):
self.assertTrue('rId2' in self.workbook.relations)
def test_get_sheet(self):
self.assertIsNotNone(self.workbook.sheets[0])
|
# 크루스칼에 대해 들어가기 전 '서로소 집합'이란 것에 대해 알아야 합니다
# 서로소집합이란 겹치는 원소가 없는 집합들입니다
# 즉 정점이 5개 있는 그래프는 초기에 정점 하나 씩, 5개의 집합이 있는 서로소 집합이 생깁니다
# 서로소 집합 자료구조를 이용해서 Union-find 알고리즘을 이용하면 두 개의 원소가 같은 집합인지 판단할 수 있습니다
# 크루스칼이란 프림처럼 확장하며 탐색하는 게 아닌 거리에 따라 최솟값부터 연결하는 구조입니다
# 크루스칼은 프림과 다르게 양방향인것을 의식해서 start, end를 각각 추가안해주고 한번만 연결해줘도 괜찮습니다(union)
# MST (minimum spanning tree)가 되기 위해선 cycle(순환 구조)가 생기면 안됩니다
import sys
def find(x):
if x != Vroot[x]:
Vroot[x] = find(Vroot[x])
return Vroot[x]
def union(x, y):
x = find(x)
y = find(y)
if x != y:
if y >= x:
Vroot[y] = x
else:
Vroot[x] = y
V, E = map(int, sys.stdin.readline().split())
Vroot = [i for i in range(V+1)] # 각 정점의 root
Elist = [] # 간선들의 list
for _ in range(E):
Elist.append(list(map(int, input().split())))
Elist.sort(key=lambda x: x[2])
ans = 0 # 가중치 합
cnt = 0 # 간선이 모두 연결되었는지 cnt
for start, end, weight in Elist:
sRoot = find(start)
eRoot = find(end)
if sRoot != eRoot:
union(start, end)
cnt += 1
ans += weight
if cnt == V-1:
break
# print(Vroot)
# print(Elist)
print(ans)
|
from itertools import islice
import os
import pandas as pd
#Extracting Proteins in a file
def ExtractingProteins(m):
f = open('proteins.txt','w')
infile = open(m, 'r')
text = infile.readlines()
for j in (text):
print (j[0:6])
f.write(j[0:6])
f.write('\n')
f.close()
ExtractingProteins("Bakers.txt")
#Reading Proteins in a list for Using in Sequnce Parsing from YEAST.fasta
lines = [line.rstrip('\n') for line in open('proteins.txt')]
#Parsing Sequences from YEAST.fasta
def ExtractingSequences(m):
count=0
pp=0
ind=0
f = open('sequences.txt','w')
infile = open(m, 'r')
text = infile.readlines()
for j in (text):
pp=pp+1
if any(word in j[4:10] for word in lines):
f.write(j[4:10])
f.write(',')
count=count+1
ind=pp
for i in range(100):
if (text[ind][0]==">"):
break
else:
f.write(text[ind].rstrip('\n'))
ind=ind+1
f.write('\n')
f.close()
print (count)
ExtractingSequences("YEAST.fasta")
#Replacing semicolons with comma
with open('Bakers.txt', 'r+') as f:
text = f.read()
f.seek(0)
f.truncate()
f.write(text.replace(' ; ', ','))
#Renaming to a csv file
os.rename('Bakers.txt', 'Bakers.csv')
m = lambda x: map(str.strip, x.split(','))
#Reading Bakers.csv into pandas Dataframe
with open('Bakers.csv') as f:
df = pd.DataFrame(
[[x, y] for x, *ys in map(m, f.readlines()) for y in ys if y],
columns=['ProteinID', 'Class']
)
#Renaming to a csv file
os.rename('sequences.txt', 'sequences.csv')
#Reading sequences.csv into pandas Dataframe
with open('sequences.csv') as f:
df2 = pd.DataFrame(
[[x, y] for x, *ys in map(m, f.readlines()) for y in ys if y],
columns=['ProteinID', 'Sequence']
)
#merge both Dataframes into a single table
df_new=pd.merge(df, df2, on='ProteinID')
columnsTitles=["ProteinID","Sequence","Class"]
df_new=df_new.reindex(columns=columnsTitles)
#Extracting top 20 frequenct classes (in our case its 21)
countt=df_new['Class'].value_counts()
#Choose all Classes above frequency of 54
df_neww=df_new.groupby('Class')['ProteinID','Sequence','Class'].filter(lambda x: len(x) > 54)
df_neww = df_neww.reset_index(drop=True)
#Saving all values to Dataset.csv
df_neww.to_csv("Dataset.csv", encoding='utf-8', index=False,header=False)
#Saving Class and Sequences into train.csv for usage in ANN
df_neww['Class'] = df_neww['Class'].str[3:]
df_neww.to_csv("train.csv", encoding='utf-8', index=False,columns=['Class','Sequence'],header=False)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#2016 root <root@VM-17-202-debian>
from math import sqrt
nums = {int(sqrt(x)) for x in range(30)}
print nums
#prints > set([0, 1, 2, 3, 4, 5])
|
from flask import Blueprint
register = Blueprint('register', __name__)
from app.register import views |
def resp_success_status(msg: str, **data) -> dict:
"""
返回成功
:param msg:
:param data:
:return:
"""
if data and data != {}:
return {
'code': 200,
'message': f'{msg} 成功',
'data': data
}
else:
return {
'code': 200,
'message': f'{msg} 成功',
}
def resp_error_status(msg: str) -> dict:
"""
返回失败
:param msg:
:return:
"""
return {
'code': 300,
'message': msg,
}
def jwt_response_payload_handler(token, user=None, request=None, role=None):
"""
自定义jwt认证成功返回数据
:token 返回的jwt
:user 当前登录的用户信息[对象]
:request 当前本次客户端提交过来的数据
:role 角色
"""
if user.first_name:
name = user.first_name
else:
name = user.username
print('AAA= ', {
"authenticated": 'true',
'id': user.id,
'name': name,
'username': user.username,
'email': user.email,
'token': token,
})
return {
"authenticated": 'true',
'id': user.id,
'name': name,
'username': user.username,
'email': user.email,
'token': token,
}
|
def main():
# escribe tu código abajo de esta línea
"""
Dame el número de mensajes: 38
Dame el número de megas: 3.1
Dame el número de minutos: 78
El costo mensual es: 95.28
"""
num_mensajes = int(input("Dame el número de mensajes: "))
num_megas = float(input("Dame el número de megas: "))
num_minutos = int(input("Dame el número de minutos: "))
costo_mensual = ((num_mensajes*0.8) + (num_megas*0.8) + (num_minutos*0.8))
print("El costo mensual es:", costo_mensual)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from email.mime import base
from email.mime import multipart
import os
class EmailLibrary(object):
ROBOT_LIBRARY_SCOPE = 'Global'
def __init__(self):
print 'send email utility'
def send_mime_mail (self,from_user,from_password,to, subject, text, attach):
msg = multipart()
part = base('application', 'octet-stream')
part.set_payload(open(attach, 'rb').read()) #파일 첨부
part.add_header('Content-Disposition', 'attachment; filename="%s"' %os.path.basename(attach))
msg.attach(part) #메시지에 추가
def send_mail_smtp(self,from_addr,from_password,to_addr, subject, text):
#서버정보
SMTPServer = smtplib.SMTP('smtp.gmail.com', 587)
SMTPServer.ehlo()
SMTPServer.starttls()
SMTPServer.login(from_addr,from_password)
#이메일 내용
SMTPMessage = MIMEText(text)
SMTPMessage ['Subject'] = subject
SMTPMessage ['To'] = to_addr
#메일전송
SMTPServer.sendmail(from_addr, to_addr, SMTPMessage.as_string())
#서버연결종료
SMTPServer.quit()
|
import sys
if '../..' not in sys.path:
sys.path.append('../..')
import numpy as np
import matplotlib.pyplot as plt
import sec_emission_model_furman_pivi as fp
import mystyle as ms
from scipy.constants import e as qe
plt.close('all')
ms.mystyle(12)
linewid = 2
me = 9.10938356e-31
def del_elas_ECLOUD(energy, R_0=0.7, E_max=332., E_0=150.):
del_elas = R_0 * ((np.sqrt(energy) - np.sqrt(energy + E_0)) / (np.sqrt(energy) + np.sqrt(energy + E_0)))**2
return del_elas
def del_true_ECLOUD(energy, del_max, s=1.35, E_max=332., costheta=1.):
angular_factor = np.exp(0.5 * (1. - costheta))
E_max_tilde = E_max * (1. + 0.7 * (1. - costheta))
x = energy / E_max_tilde
del_true = del_max * s * x / (s - 1 + x**s)
return del_true * angular_factor
furman_pivi_surface_tweak = {
'use_modified_sigmaE': False,
'use_ECLOUD_theta0_dependence': True,
'use_ECLOUD_energy': False,
'conserve_energy': False,
'exclude_rediffused': True,
'choice': 'poisson',
'M_cut': 10,
'p_n': np.array([1.21963859, 1.66070543, 1.21935223, 1.09987752, 4.28158656, 1.02052557, 1.0247471, 1.02307995, 29.93491271, 1.02045612]),
'eps_n': np.array([7.44033631e+00, 2.47339424e+00, 7.45004962e+00, 1.63618903e+01, 4.97986255e-01, 7.96170380e+01, 6.60354258e+01, 7.08053955e+01, 5.64779654e-02, 7.98873331e+01]),
# Parameters for backscattered electrons
'p1EInf': 0.002158, # Changed this
'p1Ehat': 0.709633, # Changed this
'eEHat': 0.,
'w': 46.028959, # Changed this
'p': 0.468907, # Changed this
'e1': 0., # Changed this
'e2': 2.,
'sigmaE': 2.,
# Parameters for rediffused electrons
'p1RInf': 0.2,
'eR': 0.041,
'r': 0.104,
'q': 0.5,
'r1': 0.26,
'r2': 2.,
# Parameters for true secondaries
'deltaTSHat': 1.8,
'eHat0': 332.,
's': 1.35,
't1': 0.5, # 0.706340, # Changed this
't2': 1., #0.715223, # Changed this
't3': 0.7,
't4': 1.}
furman_pivi_surface_LHC = {
'use_modified_sigmaE': False,
'use_ECLOUD_theta0_dependence': False,
'use_ECLOUD_energy': False,
'conserve_energy': False,
'exclude_rediffused': False,
'choice': 'poisson',
'M_cut': 10,
'p_n': np.array([2.5, 3.3, 2.5, 2.5, 2.8, 1.3, 1.5, 1.5, 1.5, 1.5]),
'eps_n': np.array([1.5, 1.75, 1., 3.75, 8.5, 11.5, 2.5, 3., 2.5, 3.]),
# Parameters for backscattered electrons
'p1EInf': 0.02,
'p1Ehat': 0.496,
'eEHat': 0.,
'w': 60.86,
'p': 1.,
'e1': 0.26,
'e2': 2.,
'sigmaE': 2.,
# Parameters for rediffused electrons
'p1RInf': 0.2,
'eR': 0.041,
'r': 0.104,
'q': 0.5,
'r1': 0.26,
'r2': 2.,
# Parameters for true secondaries
'deltaTSHat': 1.8848,
'eHat0': 332.,
's': 1.35,
't1': 0.5, # t1 and t2 based on taylor expansion
't2': 1., # of PyECLOUD formula for E_max(theta)
't3': 0.7,
't4': 1.}
furman_pivi_surface = {
'use_modified_sigmaE': False,
'use_ECLOUD_theta0_dependence': False,
'use_ECLOUD_energy': False,
'conserve_energy': False,
'exclude_rediffused': False,
'choice': 'poisson',
'M_cut': 10,
'p_n': np.array([2.5, 3.3, 2.5, 2.5, 2.8, 1.3, 1.5, 1.5, 1.5, 1.5]),
'eps_n': np.array([1.5, 1.75, 1., 3.75, 8.5, 11.5, 2.5, 3., 2.5, 3.]),
'p1EInf': 0.02,
'p1Ehat': 0.496,
'eEHat': 0.,
'w': 60.86,
'p': 1.,
'e1': 0.26,
'e2': 2.,
'sigmaE': 2.,
'p1RInf': 0.2,
'eR': 0.041,
'r': 0.104,
'q': 0.5,
'r1': 0.26,
'r2': 2.,
'deltaTSHat': 1.6 - 0.22, #1.8848,
'eHat0': 276.8,
's': 1.54,
't1': 0.66,
't2': 0.8,
't3': 0.7,
't4': 1.}
# Scaled py POSINST to del_tot_max = 1.6
furman_pivi_surface_scaled = {
'use_modified_sigmaE': False,
'use_ECLOUD_theta0_dependence': False,
'use_ECLOUD_energy': False,
'conserve_energy': False,
'exclude_rediffused': False,
'choice': 'poisson',
'M_cut': 10,
'p_n': np.array([2.5, 3.3, 2.5, 2.5, 2.8, 1.3, 1.5, 1.5, 1.5, 1.5]),
'eps_n': np.array([1.5, 1.75, 1., 3.75, 8.5, 11.5, 2.5, 3., 2.5, 3.]),
'p1EInf': 0.015294, # Changed this
'p1Ehat': 0.382362, # Changed this
'eEHat': 0.,
'w': 60.86,
'p': 1.,
'e1': 0.26,
'e2': 2.,
'sigmaE': 2.,
'p1RInf': 0.152945, # Changed this
'eR': 0.041,
'r': 0.104,
'q': 0.5,
'r1': 0.26,
'r2': 2.,
'deltaTSHat': 1.441353, # Changed this
'eHat0': 276.8,
's': 1.54,
't1': 0.66,
't2': 0.8,
't3': 0.7,
't4': 1.}
flag_costheta_delta_scale = True
flag_costheta_Emax_shift = True
sey_mod = fp.SEY_model_furman_pivi(E_th=35., sigmafit=1.0828, mufit=1.6636, secondary_angle_distribution='cosine_3D',
switch_no_increase_energy=0, thresh_low_energy=-1,
furman_pivi_surface=furman_pivi_surface, flag_costheta_delta_scale=flag_costheta_delta_scale,
flag_costheta_Emax_shift=flag_costheta_Emax_shift)
def extract_sey_curves(n_rep, E_impact_eV_test, cos_theta_test, charge, mass):
deltas = {}
for etype in list(sey_mod.event_types.keys()):
etype_name = sey_mod.event_types[etype]
deltas[etype_name] = np.zeros((len(cos_theta_test), len(E_impact_eV_test)))
print('Extracting SEY curves...')
for i_ct, ct in enumerate(cos_theta_test):
print(('%d/%d' % (i_ct + 1, len(cos_theta_test))))
for i_ene, Ene in enumerate(E_impact_eV_test):
nel_impact = np.ones(n_rep)
# Assuming normal is along x
v_mod = np.sqrt(2 * Ene * qe / mass) * np.ones_like(nel_impact)
vx = v_mod * ct
vy = v_mod * np.sqrt(1 - ct * ct)
nel_emit_tot_events, event_type, event_info,\
nel_replace, x_replace, y_replace, z_replace, vx_replace, vy_replace, vz_replace, i_seg_replace,\
nel_new_MPs, x_new_MPs, y_new_MPs, z_new_MPs, vx_new_MPs, vy_new_MPs, vz_new_MPs, i_seg_new_MPs =\
sey_mod.impacts_on_surface(
mass=mass, nel_impact=nel_impact, x_impact=nel_impact * 0, y_impact=nel_impact * 0, z_impact=nel_impact * 0,
vx_impact=vx * np.ones_like(nel_impact),
vy_impact=vy * np.ones_like(nel_impact),
vz_impact=nel_impact * 0,
Norm_x=np.ones_like(nel_impact), Norm_y=np.zeros_like(nel_impact),
i_found=np.int_(np.ones_like(nel_impact)),
v_impact_n=vx * np.ones_like(nel_impact),
E_impact_eV=Ene * np.ones_like(nel_impact),
costheta_impact=ct * np.ones_like(nel_impact),
nel_mp_th=1,
flag_seg=True)
for etype in list(sey_mod.event_types.keys()):
etype_name = sey_mod.event_types[etype]
thisdelta = deltas[etype_name]
thisdelta[i_ct, i_ene] = np.sum(
nel_emit_tot_events[event_type == etype]) / np.sum(nel_impact)
deltas[etype_name] = thisdelta
print('Done extracting SEY curves.')
return deltas
cos_theta_test = np.linspace(0, 1., 10)
E_impact_eV_test = np.array(list(np.arange(0, 499., 5.)) + list(np.arange(500., 2000, 25.)))
n_rep = int(1e3)
deltas = extract_sey_curves(n_rep, E_impact_eV_test, cos_theta_test, charge=qe, mass=me)
del_true_mat = deltas['true']
del_elast_mat = deltas['elast']
del_rediff_mat = deltas['rediff']
del_absorb_mat = deltas['absorb']
plt.close('all')
ms.mystyle_arial()
fig1 = plt.figure(1, figsize=(3 * 8, 2 * 8))
fig1.set_facecolor('w')
sp1 = fig1.add_subplot(2, 3, 1)
sp2 = fig1.add_subplot(2, 3, 2, sharex=sp1)
sp3 = fig1.add_subplot(2, 3, 3, sharex=sp1)
sp4 = fig1.add_subplot(2, 3, 4, sharex=sp1)
sp5 = fig1.add_subplot(2, 3, 5, sharex=sp1)
sp6 = fig1.add_subplot(2, 3, 6, sharex=sp1)
for i_ct, ct in enumerate(cos_theta_test):
thiscol = ms.colorprog(i_ct, len(cos_theta_test))
label = 'costheta=%.2f' % ct
sp1.plot(E_impact_eV_test, del_true_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)
sp2.plot(E_impact_eV_test, del_elast_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)
sp3.plot(E_impact_eV_test, del_rediff_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)
sp4.plot(E_impact_eV_test, del_absorb_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)
sp5.plot(E_impact_eV_test, del_true_mat[i_ct, :] + del_rediff_mat[i_ct, :] + del_elast_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)
sp6.plot(E_impact_eV_test, del_true_mat[i_ct, :] + del_elast_mat[i_ct, :], color=thiscol, label=label, linewidth=linewid)
sp3.plot(0, 0, 'white', label='Model')
sp1.set_ylabel('Delta true')
sp2.set_ylabel('Delta elast')
sp3.set_ylabel('Delta rediff')
sp4.set_ylabel('Delta absorb')
sp5.set_ylabel('Delta total')
sp6.set_ylabel(r'$\delta_{ts} + \delta_{e}$')
for sp in [sp1, sp2, sp3, sp4, sp5, sp6]:
sp.grid(True)
sp.set_xlabel('Electron energy [eV]')
plt.subplots_adjust(right=0.99, left=.05)
test_obj = sey_mod
energy = np.linspace(0., 2000, num=int(1e5))
for costheta in np.linspace(0, 1., 10):
delta_ts_vec = test_obj.delta_ts(energy, costheta)
delta_e_vec = test_obj.delta_e(energy, costheta)
delta_r_vec = test_obj.delta_r(energy, costheta)
sp2.plot(energy, delta_e_vec, color='k', linewidth=linewid)
sp3.plot(energy, delta_r_vec, color='k', linewidth=linewid)
sp1.plot(energy, delta_ts_vec, color='k', linewidth=linewid)
sp5.plot(energy, delta_r_vec + delta_ts_vec + delta_e_vec, color='k', linewidth=linewid)
sp6.plot(energy, delta_ts_vec + delta_e_vec, color='k', linewidth=linewid)
sp2.plot(energy, del_elas_ECLOUD(energy), '--', color='r', linewidth=linewid, label='ECLOUD model')
for ct in cos_theta_test:
sp1.plot(energy, del_true_ECLOUD(energy, del_max=test_obj.deltaTSHat, costheta=ct), '--', color='r', linewidth=linewid, label='ECLOUD model')
sp2.legend(loc='best', prop={'size': 14})
plt.suptitle('SEY extraction tests: Furman-Pivi model \nexclude_rediffused=%s' % str(sey_mod.exclude_rediffused), fontsize=30)
plt.show()
|
#
# MLDB-832-select_star.py
# mldb.ai inc, 2015
# this file is part of mldb. copyright 2015 mldb.ai inc. all rights reserved.
#
from mldb import mldb
"""
This test checks that different select statements return the right columns
"""
def check_res(res, value):
assert res.status_code == value, res.text
return res.json()
ds1 = mldb.create_dataset({
'type': 'sparse.mutable',
'id': 'd1'})
ds1.record_row('row_0', [['a', 0, 0],
['b', 1, 0],
['c', 2, 0],
['x1', 3, 0],
['x2', 4, 0],
['x3', 5, 0]])
ds1.commit()
def run_query(select, expected):
""" run a query using `select` on and compare the (sorted) column names
with the `expected` column names
ex:
select = 'a, c, b'
expected = 'a b c'
"""
expected = expected.split()
out = mldb.query('SELECT {} FROM {}'.format(select, 'd1'))
cols = sorted(out[0][1:])
if cols != expected:
mldb.log('{} != {}'.format(cols, expected))
mldb.log('output was')
mldb.log(out)
assert False
return out
# simple queries like in the doc
run_query('*',
'a b c x1 x2 x3')
run_query('{*} as *',
'a b c x1 x2 x3')
run_query('{*\n} as *',
'a b c x1 x2 x3')
run_query('{{*} as *} as *',
'a b c x1 x2 x3')
# following test case shows the bug from MLDB-1205
run_query('{{*} as *\n} as *',
'a b c x1 x2 x3')
run_query('a,b,c,x1',
'a b c x1')
run_query('* EXCLUDING (a)',
'b c x1 x2 x3')
run_query('* EXCLUDING (a,c)',
'b x1 x2 x3')
run_query('* EXCLUDING (x*)',
'a b c')
run_query('* EXCLUDING (a, x*)',
'b c')
run_query('x* AS y*',
'y1 y2 y3')
run_query('x* EXCLUDING(x3) AS y*',
'y1 y2')
# not sure that's how this one should behave
run_query('a, a', 'a')
# simple using object(select ...)
run_query('{*} as z',
'z.a z.b z.c z.x1 z.x2 z.x3')
run_query('{a,b} as z',
'z.a z.b')
run_query('{x*} as z',
'z.x1 z.x2 z.x3')
run_query('a,b,c, {x*} as z',
'a b c z.x1 z.x2 z.x3')
# Now a few with functions
conf = {'type': 'sql.expression',
'params': {
'expression': '{x*} as z'
}}
res = mldb.put('/v1/functions/xs_as_z', conf)
check_res(res, 201)
run_query('a,b,c, xs_as_z({x*}) as *',
'a b c z.x1 z.x2 z.x3')
run_query('xs_as_z({*}) as *',
'z.x1 z.x2 z.x3')
run_query('xs_as_z({*})[z] as *',
'x1 x2 x3')
run_query('xs_as_z({*})[z] as z',
'z.x1 z.x2 z.x3')
#run_query('xs_as_z({*})[z as *] as *',
# 'x1 x2 x3')
run_query('xs_as_z({*})[{z.x1, z.x2}] as *',
'z.x1 z.x2')
run_query('xs_as_z({*}) as *', 'z.x1 z.x2 z.x3')
# here I'm "calling" the function twice in another function
res = mldb.put('/v1/functions/twice', {
'type' : 'sql.expression',
'params': {
'expression': 'xs_as_z({x*})[z] as w,'
'xs_as_z({x*})[z] as z'
}})
check_res(res, 201)
run_query('twice({*}) as *',
'w.x1 w.x2 w.x3 z.x1 z.x2 z.x3')
# same thing but once
res = mldb.put('/v1/functions/once', {
'type' : 'sql.expression',
'params': {
'expression': 'xs_as_z({x*})[z] as w,'
}})
check_res(res, 201)
run_query('once({*}) as *', 'w.x1 w.x2 w.x3')
request.set_return('success')
|
import re, collections, html5lib
from selenium import webdriver
from bs4 import BeautifulSoup
from enchant import DictWithPWL
from enchant.checker import SpellChecker
# ---- Inicia selenium ----
driver = webdriver.Firefox ()
url = "file:///C:/Users/QUALITY/Desktop/tst.html"
driver.get(url)
rawHTML = driver.page_source
driver.quit()
# ---- Termina selenium (codigo html declarado en rawHTML) ----
# ---- Inicia extraccion de texto ----
soup = BeautifulSoup(rawHTML, "html5lib")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text().encode('utf-8')
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split("\n"))
text = "\n".join(chunk for chunk in chunks if chunk)
# ---- Termina extraccion de texto (declarada en text) ----
# ---- Inicia "formateo de header de archivo de texto" ----
f = open ("c:/1/test.txt", "w")
f.write("Reporte de errores ortograficos.\n--------------------------------------\nObtenido de: ")
f.write(url)
f.write("\n--------------------------------------\n\n")
# ---- Termina "formateo de header de archivo de texto" ----
# ---- Inicia "impresion de errores ortograficos" ----
dic = DictWithPWL("es_MX")
chkr = SpellChecker(dic)
chkr.set_text(text)
for error in chkr:
f.write("*")
f.write(error.word)
f.write("\n")
f.close ()
# ---- Termina "impresion de errores ortograficos" se cierra .txt ----
|
from BallGame import *
from Wall import *
from Obstacle import *
##from RedObstacle import *
##from PurpleObstacle import *
from GreenObstacle import *
game = BallGame(width=800, height=650, maxSpeed=250,
moveDir='mouse dir')
## moveDir='to mouse')
for i in range(10):
w = Wall (400, 325+i*50, 400, 5)
game.addWall (w)
for i in range(10):
b = Obstacle (maxSpeed=150)
game.addObstacle (b)
b2 = GreenObstacle (maxSpeed=50, ballRadius=20)
game.addObstacle (b2)
game.run()
|
from flask_wtf import FlaskForm
from wtforms import Form, BooleanField, StringField, TextAreaField, validators
from wtforms.validators import DataRequired
class MusicLibrary(FlaskForm):
title = StringField('Song Title', validators=[DataRequired()])
band = StringField('Band Name', validators=[DataRequired()])
genre = StringField('Music Type', validators=[DataRequired()]) |
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
from flask import Blueprint
import handlers
coolness_api = Blueprint('coolness_api', __name__)
@coolness_api.route('/coolness', methods=['POST'])
def CreateSomethingCool():
"""
It is handler for POST /coolness
"""
return handlers.CreateSomethingCoolHandler()
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Tanmya
#
# Created: 04/08/2019
# Copyright: (c) Tanmya 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
a=int(input())
b=int(input())
if a>b:
print(a-b)
else:
print(a+b)
|
#!/usr/bin/env python
###############################################################################
#
# Main Application for 1D Image
#
###############################################################################
import sys
import numpy
from math import *
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
from MplFigureCanvas import Qt4MplPlotView
# from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
# from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as NavigationToolbar
#
# from matplotlib.figure import Figure
# from mpl_toolkits.mplot3d import Axes3D
#class MainApp1D(QtGui.QMainWindow): QMainWindow has a self-defined layout. No more layout can be declared on it
class MainApp1Dextent(QtGui.QWidget):
""" Main application for 1-D (x-y) plot
"""
def __init__(self, parent=None):
""" Initialization
"""
# call constructor
#QtGui.QMainWindow.__init__(self, parent)
QtGui.QWidget.__init__(self, parent)
# set up GUI
self.resize(800,600)
# - functional widget
self.ui_canvas = Qt4MplPlotView(self)
self.ui_lineEdit1 = QtGui.QLineEdit(self)
self.ui_button1 = QtGui.QPushButton(self)
self.ui_button1.setText("Add Line")
self.ui_button2 = QtGui.QPushButton(self)
self.ui_button2.setObjectName(_fromUtf8("ui_button2"))
self.ui_button2.setText('Delete Last Line')
self.ui_button3 = QtGui.QPushButton(self)
self.ui_button3.setObjectName(_fromUtf8("ui_button3"))
self.ui_button3.setText('Change Last Line Style')
self.ui_combobox_lineStyle = QtGui.QComboBox(self)
self.ui_combobox_lineStyle.addItems(self.ui_canvas.getLineStyleList())
self.ui_combobox_color = QtGui.QComboBox(self)
self.ui_combobox_color.addItems(self.ui_canvas.getLineBasicColorList())
self.ui_combobox_marker = QtGui.QComboBox(self)
self.ui_combobox_marker.addItems(self.ui_canvas.getLineMarkerList())
self.ui_checkbox1 = QtGui.QCheckBox(self)
self.ui_checkbox1.setObjectName(_fromUtf8("ui_checkbox1"))
self.ui_checkbox1.setText('Enable position tracker')
# - layout
self.ui_hbox = QtGui.QHBoxLayout(self)
self.ui_vbox = QtGui.QVBoxLayout()
self.ui_hbox.addLayout(self.ui_vbox)
self.ui_vbox_ctrl= QtGui.QVBoxLayout()
self.ui_hbox.addLayout(self.ui_vbox_ctrl)
self.ui_vbox.addWidget(self.ui_canvas)
self.ui_vbox.addWidget(self.ui_button1)
self.ui_vbox.addWidget(self.ui_lineEdit1)
self.ui_vbox_ctrl.addWidget(self.ui_button2)
self.ui_vbox_ctrl.addWidget(self.ui_button3)
self.ui_vbox_ctrl.addWidget(self.ui_combobox_lineStyle)
self.ui_vbox_ctrl.addWidget(self.ui_combobox_color)
self.ui_vbox_ctrl.addWidget(self.ui_combobox_marker)
self.ui_vbox_ctrl.addWidget(self.ui_checkbox1)
# define event handlers
self.connect(self.ui_button1, QtCore.SIGNAL('clicked()'),
self.doPlot)
self.connect(self.ui_button2, QtCore.SIGNAL('clicked()'),
self.doDeleteLine)
self.connect(self.ui_button3, QtCore.SIGNAL('clicked()'),
self.doChangeLine)
# define event handlers for matplotlib canvas
self.ui_canvas.canvas.mpl_connect('button_press_event', self.on_mouseDownEvent)
self.ui_canvas.canvas.mpl_connect('motion_notify_event', self.on_mouseMotion)
return
def doDeleteLine(self):
""" Delete last added line
"""
ikey = self.ui_canvas.getLastPlotIndexKey()
self.ui_canvas.removePlot(ikey)
self.ui_canvas.draw()
return
def doChangeLine(self):
""" Change the color and value of last line
"""
# Line value
vecx = None
vecy = None
linelist = str(self.ui_lineEdit1.text()).split(";")
for line in linelist:
line = line.strip()
if len(line) == 0:
continue
print "Parse: ", line
terms = line.split(",")
if len(terms) != 4:
print "Unsupported.. must be : y=f(x), xmin, dx, xmax"
equation = terms[0].strip()
if equation.count('=') > 0:
equation = equation.split('=')[1]
xmin = float(terms[1])
dx = float(terms[2])
xmax = float(terms[3])
# initialize vecx and vecy
vecx = numpy.arange(xmin, xmax, dx)
vecy = numpy.empty(len(vecx))
for i in xrange(len(vecx)):
x = vecx[i]
vecy[i] = eval(equation)
print x, vecy[i]
break
# ENDFOR
# Get last line
ikey = self.ui_canvas.getLastPlotIndexKey()
# Do something
linestyle = str(self.ui_combobox_lineStyle.currentText())
color = str(self.ui_combobox_color.currentText())
linemarker = str(self.ui_combobox_marker.currentText())
# extra process on color
if linemarker.count(' (') > 0:
linemarker = linemarker.split(' (')[0]
self.ui_canvas.updateLine(ikey, vecx, vecy, linecolor=color, linestyle=linestyle, marker=linemarker, markercolor=color)
# Commit
self.ui_canvas.draw()
return
def doPlot(self):
""" Plot
"""
# parse input
linelist = str(self.ui_lineEdit1.text()).split(";")
for line in linelist:
line = line.strip()
if len(line) == 0:
continue
print "Parse: ", line
terms = line.split(",")
if len(terms) != 4:
print "Unsupported.. must be : y=f(x), xmin, dx, xmax"
equation = terms[0].strip()
if equation.count('=') > 0:
equation = equation.split('=')[1]
xmin = float(terms[1])
dx = float(terms[2])
xmax = float(terms[3])
# initialize vecx and vecy
vecx = numpy.arange(xmin, xmax, dx)
vecy = numpy.empty(len(vecx))
#print "vecx: ", vecx
#print "vecy: ", vecy
print "Size of X = %d" % (len(vecx))
for i in xrange(len(vecx)):
x = vecx[i]
vecy[i] = eval(equation)
print x, vecy[i]
self.ui_canvas.add_plot_1d(vecx, vecy, label="y=%s"%(equation))
# ENDFOR
# Draw !
self.ui_canvas.draw()
def on_mouseDownEvent(self, event):
""" Respond to pick up a value with mouse down event
"""
x = event.xdata
y = event.ydata
#doshow = self.ui_checkbox1.isChecked()
if x is not None and y is not None:
msg = "You've clicked on a bar with coords:\n %f, %f" % (x, y)
QtGui.QMessageBox.information(self, "Click!", msg)
return
def on_mouseMotion(self, event):
"""
"""
doshow = self.ui_checkbox1.isChecked()
if doshow is True:
print "Mouse is moving to ", event.xdata, event.ydata
if __name__ == "__main__":
mainApp = QtGui.QApplication(sys.argv)
myapp = MainApp1Dextent()
myapp.show()
sys.exit(mainApp.exec_())
|
import os
__version__ = '1.0.6'
__license__ = 'MIT'
__author__ = 'mmsa12'
name = "EmoTFIDF"
PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__)) |
n=input('Introduza um qualquer valor: ')
print('Introduziu uma letra maiúscula', n.isupper())
print('Introduziu um número ou uma letra', n.isalnum())
print('Introduziu um número decimal', n.isdecimal())
print('Introduziu uma letra minúscula', n.islower()) |
# Generated by Django 3.1.2 on 2020-10-22 13:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('MyFacebook', '0008_auto_20201022_1338'),
]
operations = [
migrations.AlterField(
model_name='details',
name='Password',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='details',
name='Username',
field=models.CharField(blank=True, max_length=100),
),
]
|
###########################################################
# Author: Orion Crocker
# Filename: __init__.py
# Date: 05/18/20
#
# client init
# self explanatory
###########################################################
import sys
from client.irc_client import IRCClient
assert sys.version_info[0] == 3, "Requires python3"
|
# If Conditions
is_male = False
is_tall = True
if is_male or is_tall:
print("You are a male or tall or both")
else:
print("You are neither male nor tall")
if is_male and is_tall:
print("You are a tall male")
elif is_male and not(is_tall):
print("You are a short male")
elif not(is_male) and is_tall:
print("You are not a male but tall")
else:
print("You are either not male or not tall or not both")
# If statements and comparisons
def max_num(num1, num2, num3):
if num1 >= num2 and num1 >= num3:
return num1
elif num2 >= num1 and num2 >= num3:
return num2
else:
return num3
print(max_num(3, 400, 5))
|
from django import forms
from django.shortcuts import get_object_or_404
from django.forms import ModelForm, ChoiceField
from .models import League
class LeagueCreateForm(forms.ModelForm):
class Meta:
model = League
fields = ['name', 'password', 'owner_amount', 'skater_amount', 'goalie_amount', 'draft_goalies']
|
from django.contrib import admin
from league.models import Owner, Player, Team, League, Transaction, trade, team_trade, add, drop, add_drop, Roster
admin.site.register(Owner)
admin.site.register(Player)
admin.site.register(Team)
admin.site.register(League)
admin.site.register(Transaction)
admin.site.register(trade)
admin.site.register(team_trade)
admin.site.register(add)
admin.site.register(drop)
admin.site.register(add_drop)
admin.site.register(Roster) |
from flix.core2.markerList import MarkerList
from flix.web.serverSession import ServerSession
import flix
import flix.core2.mode
import flix.fileServices
import flix.fileServices.fileLocal
import flix.fileServices.repathDefault
import flix.web.serverSession
import flix.plugins.pluginDecorator
from flix.core2.markerList import MarkerList
from flix.plugins.toShotgun import ToShotgun
from flix.plugins.toMov import ToMov
import flix.remote
import os
import sys
import inspect
_UPPER_LINE_Y_OFFSET = 150
_LOWER_LINE_Y = 50
_UPPER_LINE_H = 100
_LOWER_LINE_H = 100
_SEQUENCE_KEY = 'Sequence'
_VERSION_KEY = 'Version'
_START_FRAME_KEY = 'StartFrame'
_END_FRAME_KEY = 'EndFrame'
_PRERENDER_KEY = 'Prerendered'
_FONT_COLOR_KEY = 'FontColor'
_FONT_SIZE_KEY = 'FontSize'
_SHOTS_KEY = 'Shots'
_SCRIPT_NAME_KEY = 'ScriptName'
_PANEL_PATH_KEY = 'PanelPath'
## Shamelessly stolen from flix.rendering.__init__
_flixNuke = None
def FlixNuke():
global _flixNuke
if _flixNuke is None:
config = os.environ['FLIX_NUKE']
configClass = config.split('.')[-1]
configModule = '.'.join(config.split('.')[0:-1])
tmp = __import__(configModule, globals(), locals(), [configClass], -1)
_flixNuke = getattr(tmp, configClass)()
return _flixNuke
def validateUnicodeChars(text):
for char in text:
if ord(unicode(char)) > 128:
text = text.replace(char, str(ord(unicode(char))))
return text
## ToDo - Give this class a proper name
class NukeTestEA(ToShotgun):
"""
A Flix plugin for compositing slate information and uploaded it to Shotgun
Attributes:
fileService: I don't know what this is
fileServiceLocal: I don't know what this is
serverSession: I don't know what this is
repath: I don't know what this is
panelList: A list of I don't know what this is
shotlist: The ShotCutList to operate on
kargs: I don't know what this is.
mode:
sg:
sgShow:
sgSeq:
seqUrl: I don't what this is. Looks like it's used for Shotgun comments
markIn:
markOut:
"""
## Note - markIn and markOut probably shouldn't be attributes, but that's
## how the Foundry initially wrote them and I'm not sure how they're used
## elsewhere in the class inheritance heirarchy, so they stay for now.
def __init__(self):
"""
Initialize the NukeTestEA plugin
It should be noted that this does not necessarily initialize all of
the class' attribute variables, as horrifying as that sounds.
"""
self.fileService = flix.fileServices.FileService()
self.fileServiceLocal = flix.fileServices.fileLocal.FileLocal()
self.serverSession = flix.web.serverSession.ServerSession()
self.repath = flix.fileServices.repathDefault.RepathDefault()
self.panelList = []
# load the icon
iconPath = flix.core2.mode.Mode().get('[FLIX_CONFIG_FOLDER]')+'/plugins/icons/custom_icon.png'
icon = self.fileService.loadByteArray(iconPath)
## Flix follows what seems to be a very bizarre design pattern.
## The top level parent for this class does not implement __init__() so
## we cannot call super() to initialize common attributes.
## Instead the top level parent implements an init() function.
## My only guess is that this is to pretend the top level parent
## is an abstract class and prevent direct instantion.
## But why not use the abc module to make a real abstract base class?
self.init(label='Nuke Test EA',
icon=icon,
tooltip='This Button Is Only A Test',
group='Export',
pluginPath='flixConfig.plugins.nukeTestEA.NukeTestEA')
## ToDo - Remove this when we have things working and are willing to enter
## production. The following code is a partially commented out version of
## the ToShotgun.execute() function.
## In the end, that should be the code that runs, not this.
##
## I messed up and edited the code. I don't think the above approach would
## actually work, anyways. There doesn't seem to be a way to access the
## Marker information from within createMov(), and we need the marker in
## order to pull the shot label. It seems like that should be accessible
## via the Shot object itself, since the UI displays it when selecting a
## panel in Flix, but that doesn't seem to be the case.
def execute(self, shotCutList, selection, additionalData=None):
if not additionalData is None:
referenceCutList = additionalData.get('reference', None)
comment = additionalData.get('comment', "")
else:
comment = ''
flix.logger.log(comment)
## ToDo - Do we even need this comment stuff? It never gets used.
# comment = unicode(urllib.unquote_plus(comment))
self.shotList = flix.core2.shotCutList.ShotCutList.fromFile(shotCutList.defaultPath())
self.kargs = {'show' :self.shotList.show,
'sequence':self.shotList.sequence,
'branch' :self.shotList.branch,
'version' :self.shotList.version}
self.mode = self.shotList.mode
self.addProgress(4)
## ToDo - Move this stuff to it's own function
## It may be possible to just delete this - it also gets set when
## calling getPrerenderedStatus(), which is also when it gets used.
self.sg = self._connectToSG(self.mode)
self.removeProgress()
self.seqUrl = self.getSequenceUrl()
## Question for Foundry - These lines are effectively duplicated in
## getPrerenderedStatus(). Should only one or the other be used?
## Should they be declared in __init__ and have this and the calls
## in getPrerenderedStatus() moved to a setter instead?
# validate that all information for Flix has an equivalent in shotgun
#self.sgShow = self._validateShow(self.shotList.show)
#self.sgSeq = self._validateSeqs(self.shotList.sequence)
self.removeProgress()
## Let it be known that getMarkerList() does not return a MarkerList.
## It instead returns a list of ShotCutLists.
## Because I don't know.
shotCutLists = self.getMarkerList(self.shotList)
self.removeProgress()
## ToDo - If shotCustLists is None, bad things happen
## shotCutLists should never be None - if getMarkerList fails to create
## a ShotCutList, it returns an empty list.
## I don't think the empty list can break this function, but it does
## have disturbing implications for uses elsewhere.
if not shotCutLists:
shotCutLists.append(self.shotList)
## This is bad. I'm just hoping that markerList is the same length and
## matches self.shotList index-for-index. Ideally this would be handled
## inside toMov() so that the shot and marker are definitely matched.
## Calling fromShotCutList on the ShotCutList that gets passed returns
## an empty list, however.
markerList = MarkerList.fromShotCutList(self.shotList)
self.addProgress(len(shotCutLists))
self.markIn = 1
self.markOut = 0
## In the original Flix code this is adapted from, scl and shotCutLists
## were named 'markerShot' and 'markerShotLists'. Those names were lies
## and have been changed here to reflect the actual object types.
for i, scl in enumerate(shotCutLists):
## Question for Foundry - When is a ShotCutList not a Marker?
## What does it mean for a ShotCutList to be a Marker?
## All tests so far have never entered the else clause of the
## following statement. Is that expected?
if scl.isMarker:
for shot in scl:
self.markOut += int(round(shot.duration))
## ToDo - Uncomment this once we're willing to check things
## against Shotgun again
#self.sgShot = self._validateShot(scl)
#self._validateVersion(scl)
flix.logger.log('CALLING NUKETESTEA.CREATEMOV')
movie = self.createMov(scl, markerList[i])
## The += was taken from regular Flix code, but I think it's
## a bug, since it very quickly results in markIn being
## thousands of frames ahead of markOut.
# self.markIn += self.markOut
self.markIn = self.markOut
## ToDo - Uncomment this once we're willing to
## check against/upload to Shotgun again
#if not self.sgShot:
# self.sgShot = self.createSgShot(scl)
#sgVersion = self.uploadSeqVersion(movie, self.sgShot, comment=comment)
else:
pass # ToDo - Debug. Remove later.
#self._validateVersion(scl)
#self.sgTask = self._validateTask()
#movie = self.createMov(scl)
#sgVersion = self.uploadSeqVersion(movie, self.sgSeq,
# self.sgTask, comment=comment)
## Why does this time out after compositing the first sequence?
## Why does it composite the first sequence multiple times?
## ToDo - Uncomment this stuff once we're compositing
# self.playlist = self._validatePlaylist()
# self.updatePlaylist(self.playlist, sgVersion)
self.removeProgress()
## ToDo - Uncomment this once we're composing and willing to upload
## to Shotgun
# ToShotgun.toFile(self, **self.kargs)
#
# autoMarkOmittedShots = self.mode['[sgAutoMarkOmittedShots]'] == "1"
#
# if autoMarkOmittedShots:
# self.autoMarkOmittedShots()
self.removeProgress()
def createMov(self, shotCutList, marker):
"""
Composite the slate information onto a ShotCutList
Args:
shotCutList: The ShotCutList to composite
marker: The Marker corresponding to shotCutList
Returns:
The path to the composited movie
"""
## ToDo - I'm not sure if the description of the return value above is
## correct, as I have not yet been able to test that far.
## Get information to composite
fontColor = self.mode['[burnInTopFontColor]']
fontSize = self.mode['[burnInTopFontSize]']
prerendered = self.getPrerenderedString(shotCutList)
version = shotCutList.version
## ToDo - Remove the target folder override when done testing
nukeFolder = self.mode['[localFolder]']
# nukeFolder = 'G:/flix/temp/'
shots = []
for shot in shotCutList:
kargs = {'beat': shot.beat,
'setup': shot.setup,
'version': shot.version,
'frame': '%04d'}
path = shotCutList.mode.get('[recipeCompedFile]', kargs)
shotInfo = {_PANEL_PATH_KEY: path,
_START_FRAME_KEY: shot.markInFrame,
_END_FRAME_KEY: shot.markOutFrame}
shots.append(shotInfo)
# ToDo - Consider turning info_dict into a class to make behavior
# data-driven and more customizable
## Can probably delete most of this. Keeping for now for debug purposes.
info_dict = {}
info_dict[_SEQUENCE_KEY] = marker.name
info_dict[_VERSION_KEY] = str(version)
info_dict[_START_FRAME_KEY] = self.markIn # This number is seriously messed up
info_dict[_END_FRAME_KEY] = self.markOut
info_dict[_PRERENDER_KEY] = prerendered
info_dict[_FONT_COLOR_KEY] = fontColor
info_dict[_FONT_SIZE_KEY] = fontSize
info_dict[_SHOTS_KEY] = shots
mov = ToMovEA()
flix.logger.log('\n\n>>>>>>>>>> CALLING TOMOVEA>COMPBUILD() <<<<<<<<<<\n')
mov.compBuild(shotCutList, info_dict)
## ToDo - Uncomment this when we are ready to test compositing.
movPath = self.repath.localize(mov.movieName)
# movPath = 'G:/You/Might/Want/To/Look/At/nukeTestEA/ToDos'
flix.logger.log('movPath: {}'.format(movPath))
return movPath
def getPrerenderedString(self, shotCutList):
"""
Query Shotgun if a sequence should be prerendered and format as str
Args:
shotCutList: the ShotCutList to pull information for
Returns:
'Prerendered' if the sequence should be prerendered
'Realtime' otherwise
"""
prerendered = self.getPrerenderedStatus(shotCutList)
if prerendered:
return 'Prerendered'
return 'Realtime'
def getPrerenderedStatus(self, shotCutList):
"""
Query Shotgun for if a sequence should be prerendered or realtime
Args:
shotCutList: the ShotCutList to pull information for
Returns:
True if the sequence should be prerendered
False otherwise
"""
self.sg = self._connectToSG(shotCutList.mode)
self.sgShow = self._validateShow(shotCutList.show)
self.sgSeq = self._validateSeqs(shotCutList.sequence)
pFilters = [['id', 'is', self.sgSeq['id']]]
pFields = ['sg_pre_rendered']
shot = self.sg.find_one('Sequence', pFilters, pFields)
prerendered = False
if shot:
prerendered = True
# if shot['sg_pre_rendered']:
# prerendered = True
return prerendered
## This class should probably be moved to its own module, but I don't want to
## deal with sys.path not containing the plugins folder at the moment.
class ToMovEA(ToMov):
def __init__(self):
super(ToMovEA, self).__init__()
def compBuild(self, shotList, info_dict):
if isinstance(shotList, tuple):
self.indices = shotList[1]
shotCutList = shotList[0]
else:
shotCutList = shotList
mode = shotCutList.mode
with mode.using({'branch':shotCutList.branch}):
self.setKargs(shotCutList, mode)
self.movieName = self._getMovName(mode)
self.fileService.copyToLocal(self.movieName)
# if self.fileServiceLocal.exists(self.movieName):
# flix.logger.log('Movie already exists. Returing early.')
# return
jpgFilename = '{0}.%04d.jpg'.format(os.path.basename(self.movieName))
self.imgSeqOutput = '/'.join([self.movieName.replace(os.path.splitext(self.movieName)[-1], '.tmp'), jpgFilename])
self.movieName = self.movieName.replace(os.path.splitext(self.movieName)[-1], '.tmp{0}'.format(os.path.splitext(self.movieName)[-1]))
self.createCompDir(mode)
self.updateNukeComp(shotCutList, mode)
## Try to comp evertying directly to a movie instead of comping and then merging an image sequence
## Note: this loses the audio and all of that
# test_destination, ext = os.path.splitext(self.nukeComp['destination'])
# test_destination, ext = os.path.splitext(test_destination)
# self.nukeComp['destination'] = test_destination
destination = self.repath.localize(self.nukeComp['destination'])
flix.logger.log('destination localized: {}'.format(destination))
## Hopefully all of this destination modification stuff is not needed when the code is run on the server
# destination = os.path.basename(self.nukeComp['destination'])
# basepath = os.path.join('G:/flix/temp/test/', info_dict[_SEQUENCE_KEY]) + '/';
# try:
# os.makedirs(basepath)
# except OSError:
# if not os.path.isdir(basepath):
# raise
# destination = os.path.join(basepath, destination)
self.imgSeqOutput = destination
self.nukeComp['destination'] = destination
flix.logger.log('\n\nCreating image sequence\n')
flix.logger.log('self.imgSeqOutput: {}'.format(self.imgSeqOutput))
flix.logger.log('self.nukeComp["destination"]: {}'.format(self.nukeComp['destination']))
self.build_nuke_network(info_dict,
validateUnicodeChars(self.nukeComp['destination']),
validateUnicodeChars(self.nukeComp['scriptName']))
flix.logger.log('\n\nFinished creating image sequence\n')
## ToDo - uncomment things below this line when ready to test them
# self._getCompedSoundFile(shotCutList)
# self.createMovie(shotCutList.show,
# shotCutList.sequence,
# self.movieName,
# self.imgSeqOutput,
# self.nukeComp['lastFrame'],
# self.soundFile)
## Fun fact: None of the information in self.nukeComp makes it through
## the self.build_nuke_network() call. The dictionary is just empty.
# self.fileServiceLocal.forceRemoveFolder(os.path.dirname(self.imgSeqOutput))
# self.fileServiceLocal.rename(self.movieName, self.movieName.replace('.tmp', '')
# self.movieName = self.movieName.replace()
## Flix is unable to import the nuke module locally. This decorator
## should fix that problem by pushing the function to the Nuke server.
## Note that this means any logged information from this scope will appear
## in the remote log and not the local one.
@flix.remote.autoRemote(flix.remote.kProcGroupNuke, lockProc=False)
def build_nuke_network(self, info_dict, destination, script_name):
"""
Construct a node network in Nuke for compositing slate information.
This function is no longer being used and is only kept around for the
purpose of referencing how to set up a node network in Nuke.
Args:
info_dict: the dictionary containing the slate information
comp_info: self.nukeComp['destination']
Returns:
False if errors were encountered in trying to construct the network
True otherwise
"""
## Nuke doesn't like to be imported locally.
## It only works when run on the server.
try:
import nuke
except:
return False
def _make_text_node(node_name, text, x=0, y=0, w=200, h=100):
"""
Create a text node in Nuke with the given text and dimensions
Args:
node_name: the name to give the node
text: the message text to assign the node
Kwargs:
x: The x coordinate to give the left side of the text's bounding box
y: The y coordinate to give the bottom of the text's bounding box
w: The width to give the bounding box
h: The height to give the bounding box
Returns:
A Nuke Text node
"""
## Create a Text node rather than the newer Text2 to avoid the
## Text2's 'freetype' nonsense moving the text arbitrarily
node = nuke.createNode('Text')
node['message'].setText(text)
node['box'].setX(x)
node['box'].setY(y)
node['box'].setR(x+w)
node['box'].setT(y+h)
return node
flix.logger.log('\n>>>>> COMPOSITING {} <<<<<\n\n'.format(info_dict[_SEQUENCE_KEY]))
flixNuke = FlixNuke()
flixNuke.clearScript()
nuke.root().knob('first_frame').setValue(info_dict[_START_FRAME_KEY])
nuke.root().knob('last_frame').setValue(info_dict[_END_FRAME_KEY])
nuke.root().knob('frame').setValue(info_dict[_START_FRAME_KEY])
read_nodes = []
for shot in info_dict[_SHOTS_KEY]:
read_node = nuke.nodes.Read(name='FileInput')
read_node.setXpos(1)
read_node.setYpos(1)
## ToDo - Switch which version of envized is used once we have
## a dev server up to test composition on - the environment variable
## isn't set on my local machine
## The envize functions normally strip out the garbage folder flix
## automatically adds to the start of paths for reasons unknown
panelPath = shot[_PANEL_PATH_KEY]
panelPath = panelPath[len('/shots'):]
panelPath = panelPath + ' {0}-{1}'.format(shot[_START_FRAME_KEY],
shot[_END_FRAME_KEY])
# envized = 'G:/flix/flixProjects' + panelPath
envized = '/Applications/Flix/flixProjects' + panelPath
envized = self.repath.envizeForNuke(envized)
flix.logger.log('envized: {}'.format(envized))
read_node['file'].fromUserText(envized)
read_nodes.append(read_node)
image_width = read_nodes[0].format().width()
image_height = read_nodes[0].format().height()
upper_y = image_height - _UPPER_LINE_Y_OFFSET
sequence_node = _make_text_node('SequenceText',
info_dict[_SEQUENCE_KEY],
x=50,
y=upper_y,
w=1150,
h=_UPPER_LINE_H)
prerender_node = _make_text_node('PrerenderText',
info_dict[_PRERENDER_KEY],
x=1200,
y=upper_y,
w=600,
h=_UPPER_LINE_H)
date_node = _make_text_node('DateText',
'[date %D]',
x=50,
y=_LOWER_LINE_Y,
w=450,
h=_LOWER_LINE_H)
version_node = _make_text_node('VersionText',
'Version {}'.format(info_dict[_VERSION_KEY]),
x=350,
y=_LOWER_LINE_Y,
w=400,
h=_LOWER_LINE_H)
write_node = nuke.createNode('Write')
destination = self.repath.localize(destination)
## Ensure that there is a folder to save composited images to
dirname = os.path.dirname(destination)
try:
os.makedirs(dirname)
except OSError:
if not os.path.isdir(dirname):
raise
write_node['file'].fromUserText(destination)
sequence_node.setInput(0, read_nodes[0])
prerender_node.setInput(0, sequence_node)
date_node.setInput(0, prerender_node)
version_node.setInput(0, date_node)
write_node.setInput(0, version_node)
script_name = self.repath.localize(script_name)
flix.logger.log('Saving nuke recipe file as {}'.format(script_name))
nuke.scriptSave(script_name)
nuke.execute(write_node['name'].getValue())
FlixNuke().clearScript()
flix.logger.log('\n>>>>> FINISHED COMPOSITING {} <<<<<\n\n'.format(info_dict[_SEQUENCE_KEY]))
return True
## May not need this stuff. Stolen from elsewhere in Flix.
# def ffmpegExtractCalculateDeadline(self, show, sequence, movieName, imgSequence, duration, audioFile=""):
# totalDuration = len(self.fileServiceLocal.listFolder(os.path.dirname(imgSequence)))
# flix.logger.log('ffmpeg deadline %s' % totalDuration)
# return max(100, totalDuration * 5)
# @flix.remote.autoRemote(flix.remote.kProcGroupMov, calculateDeadlineFunction=ffmpegExtractCalculateDeadline)
# def createMovie(self, show, sequence, movieName, imgSequence, duration, audioFile=""):
# mode = flix.core2.mode.Mode(show, sequence)
# ffmpeg = os.environ.get('FLIX_FFMPEG')
# imgSequence = self.repath.localize(imgSequence)
# self.fileService.copyToLocal(movieName)
# movieName = self.repath.localize(movieName)
# if self.fileServiceLocal.exists(movieName):
# return
# frameRate = float(self._getFrameRate(mode))
# if audioFile:
# audioFile = "-i '%s' -c:a copy"%self.repath.localize(audioFile)
# self.setLibraryPaths()
# command = '"%(ffmpeg)s" -f image2 -framerate %(frameRate)s -i "%(imgSequence)s" %(audioFile)s -vf fps=fps=%(frameRate)s -frames:v %(duration)s "%(movieName)s"'%locals()
# flix.logger.log('>>>>>ffmpeg command: {}'.format(command))
# result = flix.utilities.osUtils.OSUtils.quickCall(command, cwd=os.path.dirname(ffmpeg)) |
from flask import Flask, render_template
from modules import convert_to_dict
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
elements_list = convert_to_dict("Periodic_Table.csv")
@app.route('/')
def index():
ids_list = []
name_list = []
for element in elements_list:
ids_list.append(element['atomic_number'])
name_list.append(element['name'])
pairs_list = zip(ids_list, name_list)
return render_template('index.html', pairs=pairs_list, the_title="List of Elements")
@app.route('/element/<num>')
def detail(num):
for element in elements_list:
if element['atomic_number'] == num:
elem_dict = element
break
return render_template('element.html', elem=elem_dict, the_title=elem_dict['name'])
if __name__ == '__main__':
app.run(debug=True)
|
from regression_tests import *
# Little endian 64-bit dynamic library
class TestMachoDynamicLE(Test):
settings = TestSettings(
tool='fileinfo',
input='macho_x86_64.dylib',
args='--json --verbose'
)
def setUp(self):
assert self.fileinfo.succeeded
def test_analyze_basic_info(self):
self.assertEqual(self.fileinfo.output['fileFormat'], 'Mach-O')
self.assertEqual(self.fileinfo.output['fileClass'], '64-bit')
self.assertEqual(self.fileinfo.output['fileType'], 'Dynamic library')
self.assertEqual(self.fileinfo.output['architecture'], 'x86-64')
self.assertEqual(self.fileinfo.output['endianness'], 'Little endian')
def test_analyze_segments_sections(self):
self.assertEqual(self.fileinfo.output['declaredNumberOfSegments'], '2')
self.assertEqual(self.fileinfo.output['declaredNumberOfSections'], '3')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['name'], '__text')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['address'], '0xf50')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['offset'], '0xf50')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][1]['name'], '__unwind_info')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][2]['name'], '__eh_frame')
def test_analyze_exports(self):
self.assertEqual(self.fileinfo.output['exportTable']['numberOfExports'], '1')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['address'], '0xf50')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['name'], '__Z9factoriali')
# Big endian 64-bit relocatable binary
class TestMachoObjBE(Test):
settings = TestSettings(
tool='fileinfo',
input='macho_ppc64.o',
args='--json --verbose'
)
def setUp(self):
assert self.fileinfo.succeeded
def test_analyze_basic_info(self):
self.assertEqual(self.fileinfo.output['fileFormat'], 'Mach-O')
self.assertEqual(self.fileinfo.output['fileClass'], '64-bit')
self.assertEqual(self.fileinfo.output['fileType'], 'Relocatable file')
self.assertEqual(self.fileinfo.output['architecture'], 'PowerPC (big endian, 64-bit mode)')
self.assertEqual(self.fileinfo.output['endianness'], 'Big endian')
def test_analyze_segments(self):
self.assertEqual(self.fileinfo.output['declaredNumberOfSegments'], '1')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['offset'], '0x260')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['sizeInFile'], '0x74')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['sizeInMemory'], '0x74')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['virtualAddress'], '0')
def test_analyze_sections(self):
self.assertEqual(self.fileinfo.output['declaredNumberOfSections'], '5')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['name'], '__text')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['address'], '0')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['alignmentInMemory'], '0x4')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['numberOfRelocationEntries'], '5')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['relocationEntriesOffset'], '0x2d4')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['offset'], '0x260')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['sizeInFile'], '0x3c')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['sizeInMemory'], '0x3c')
def test_analyze_symbols(self):
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][0]['address'], '0x60')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][0]['associatedSectionIndex'], '2')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][0]['name'], '_b')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][1]['name'], '_f')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][2]['name'], '_a')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][3]['name'], '_c')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][4]['name'], '_g')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][5]['associatedSize'], '4')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][5]['name'], 'dyld_stub_binding_helper')
def test_analyze_exports(self):
self.assertEqual(self.fileinfo.output['exportTable']['numberOfExports'], '2')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['address'], '0x60')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['name'], '_b')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][1]['name'], '_f')
def test_analyze_imports(self):
self.assertEqual(self.fileinfo.output['importTable']['numberOfImports'], '2')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['address'], '0x6c')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['name'], '_g')
self.assertEqual(self.fileinfo.output['importTable']['imports'][1]['name'], '_c')
def test_analyze_relocation_tables(self):
self.assertEqual(self.fileinfo.output['relocationTables'][0]['numberOfRelocations'], '3')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['offset'], '0x24')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['type'], '14')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['index'], '1')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['offset'], '0x1c')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['type'], '12')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][2]['index'], '2')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][2]['offset'], '0x18')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][2]['type'], '3')
# Binary with old style entry point command (LC_UNIXTHREAD) and dynamic symbol table (LC_DYSYMTAB)
class TestAckMachoComplete(Test):
settings = TestSettings(
tool='fileinfo',
input='macho_ack',
args='--json --verbose'
)
def setUp(self):
assert self.fileinfo.succeeded
def test_analyze_basic_info(self):
self.assertEqual(self.fileinfo.output['fileFormat'], 'Mach-O')
self.assertEqual(self.fileinfo.output['fileClass'], '32-bit')
self.assertEqual(self.fileinfo.output['fileType'], 'Executable file')
self.assertEqual(self.fileinfo.output['architecture'], 'x86')
self.assertEqual(self.fileinfo.output['endianness'], 'Little endian')
# LC_UNIXTHREAD test
def test_analyze_entry_point_info(self):
self.assertEqual(self.fileinfo.output['entryPoint']['address'], '0x1e10')
self.assertEqual(self.fileinfo.output['entryPoint']['offset'], '0xe10')
self.assertEqual(self.fileinfo.output['entryPoint']['sectionIndex'], '0')
self.assertEqual(self.fileinfo.output['entryPoint']['sectionName'], '__text')
def test_analyze_segments(self):
self.assertEqual(self.fileinfo.output['declaredNumberOfSegments'], '4')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['offset'], '0')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['sizeInFile'], '0')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['sizeInMemory'], '0x1000')
self.assertEqual(self.fileinfo.output['segmentTable'][0]['virtualAddress'], '0')
self.assertEqual(self.fileinfo.output['segmentTable'][1]['offset'], '0')
self.assertEqual(self.fileinfo.output['segmentTable'][1]['sizeInFile'], '0x1000')
self.assertEqual(self.fileinfo.output['segmentTable'][1]['sizeInMemory'], '0x1000')
self.assertEqual(self.fileinfo.output['segmentTable'][1]['virtualAddress'], '0x1000')
self.assertEqual(self.fileinfo.output['segmentTable'][2]['offset'], '0x1000')
self.assertEqual(self.fileinfo.output['segmentTable'][2]['sizeInFile'], '0x1000')
self.assertEqual(self.fileinfo.output['segmentTable'][3]['offset'], '0x2000')
self.assertEqual(self.fileinfo.output['segmentTable'][3]['sizeInFile'], '0x168')
def test_analyze_sections(self):
self.assertEqual(self.fileinfo.output['declaredNumberOfSections'], '9')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['name'], '__text')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['address'], '0x1e10')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['alignmentInMemory'], '0x10')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['numberOfRelocationEntries'], '0')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['relocationEntriesOffset'], '0')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['offset'], '0xe10')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['sizeInFile'], '0x147')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][0]['sizeInMemory'], '0x147')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][3]['name'], '__cstring')
self.assertEqual(self.fileinfo.output['sectionTable']['sections'][6]['name'], '__la_symbol_ptr')
def test_analyze_symbols(self):
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][0]['address'], '0x1e50')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][0]['associatedSectionIndex'], '0')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][0]['name'], 'dyld_stub_binding_helper')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][1]['name'], '__dyld_func_lookup')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][2]['name'], 'dyld__mach_header')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][3]['name'], '_NXArgc')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][4]['name'], '_NXArgv')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][5]['name'], '___progname')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][6]['name'], '__mh_execute_header')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][7]['name'], '_ack')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][8]['name'], '_environ')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][9]['name'], '_main')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][10]['name'], 'start')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][11]['name'], '_exit')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][12]['name'], '_printf')
self.assertEqual(self.fileinfo.output['symbolTables'][0]['symbols'][13]['name'], '_scanf')
# LC_DYSYMTAB (imports)
def test_analyze_exports(self):
self.assertEqual(self.fileinfo.output['exportTable']['numberOfExports'], '8')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['address'], '0x202c')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['name'], '_NXArgc')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][1]['name'], '_NXArgv')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][2]['name'], '___progname')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][3]['name'], '__mh_execute_header')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][4]['name'], '_ack')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][5]['name'], '_environ')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][6]['name'], '_main')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][7]['name'], 'start')
# LC_DYSYMTAB test (imports)
def test_analyze_imports(self):
self.assertEqual(self.fileinfo.output['importTable']['numberOfImports'], '3')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['address'], '0x201c')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['libraryName'], 'libSystem')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['name'], '_exit')
self.assertEqual(self.fileinfo.output['importTable']['imports'][1]['name'], '_printf')
self.assertEqual(self.fileinfo.output['importTable']['imports'][2]['name'], '_scanf')
# Binary file with new style entry point command (LC_LOAD) and dynamic symbol table (LC_DYLD_INFO)
class TestMachoNewStyle(Test):
settings = TestSettings(
tool='fileinfo',
input='macho_x86',
args='--json --verbose'
)
def setUp(self):
assert self.fileinfo.succeeded
def test_analyze_basic_info(self):
self.assertEqual(self.fileinfo.output['fileFormat'], 'Mach-O')
self.assertEqual(self.fileinfo.output['fileClass'], '32-bit')
self.assertEqual(self.fileinfo.output['fileType'], 'Executable file')
self.assertEqual(self.fileinfo.output['architecture'], 'x86')
self.assertEqual(self.fileinfo.output['endianness'], 'Little endian')
# LC_MAIN test
def test_analyze_entry_point_info(self):
self.assertEqual(self.fileinfo.output['entryPoint']['address'], '0x1f60')
self.assertEqual(self.fileinfo.output['entryPoint']['offset'], '0xf60')
self.assertEqual(self.fileinfo.output['entryPoint']['sectionIndex'], '0')
self.assertEqual(self.fileinfo.output['entryPoint']['sectionName'], '__text')
# LC_DYLD_INFO test (imports)
def test_analyze_imports(self):
self.assertEqual(self.fileinfo.output['importTable']['numberOfImports'], '2')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['address'], '0x2000')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['libraryName'], 'libSystem')
self.assertEqual(self.fileinfo.output['importTable']['imports'][0]['name'], 'dyld_stub_binder')
self.assertEqual(self.fileinfo.output['importTable']['imports'][1]['address'], '0x2008')
self.assertEqual(self.fileinfo.output['importTable']['imports'][1]['index'], '1')
self.assertEqual(self.fileinfo.output['importTable']['imports'][1]['libraryName'], 'libSystem')
self.assertEqual(self.fileinfo.output['importTable']['imports'][1]['name'], '_printf')
# LC_DYLD_INFO test (exports)
def test_analyze_exports(self):
self.assertEqual(self.fileinfo.output['exportTable']['numberOfExports'], '2')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['address'], '0x1000')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][0]['name'], '__mh_execute_header')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][1]['address'], '0x1f60')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][1]['index'], '1')
self.assertEqual(self.fileinfo.output['exportTable']['exports'][1]['name'], '_main')
class TestMachoARMRelocs(Test):
settings = TestSettings(
tool='fileinfo',
input='macho_reloc_arm32.o',
args='--json --verbose'
)
def test_analyze_relocation_tables(self):
self.assertEqual(self.fileinfo.output['relocationTables'][0]['numberOfRelocations'], '2')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['offset'], '0x28')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['symbolName'], '_test_function')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['type'], '6')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['index'], '1')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['offset'], '0x1e')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['type'], '6')
class TestMachoARMScattRelocs(Test):
settings = TestSettings(
tool='fileinfo',
input='macho_scatt_reloc_arm32.o',
args='--json --verbose'
)
def test_analyze_relocation_tables(self):
self.assertEqual(self.fileinfo.output['relocationTables'][0]['numberOfRelocations'], '3')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['offset'], '0xe')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['symbolName'], '_NSLog')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['type'], '6')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['index'], '1')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['offset'], '0x8')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][1]['type'], '9')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][2]['index'], '2')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][2]['offset'], '0x2')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][2]['type'], '9')
class TestMachox86Relocs(Test):
settings = TestSettings(
tool='fileinfo',
input='macho_reloc_x86.o',
args='--json --verbose'
)
def test_analyze_relocation_tables(self):
self.assertEqual(self.fileinfo.output['relocationTables'][0]['numberOfRelocations'], '1')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['index'], '0')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['offset'], '0x40')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['symbolName'], '_test_function')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][0]['type'], '0')
class TestMachox64Relocs(Test):
settings = TestSettings(
tool='fileinfo',
input='macho_reloc_x86_64.o',
args='--json --verbose'
)
def test_analyze_relocation_tables(self):
self.assertEqual(self.fileinfo.output['relocationTables'][0]['numberOfRelocations'], '20')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][18]['index'], '18')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][18]['offset'], '0x115')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][18]['symbolName'], '___dso_handle')
self.assertEqual(self.fileinfo.output['relocationTables'][0]['relocations'][18]['type'], '1')
|
from django import forms
class HashtagForm(forms.Form):
hashtag = forms.CharField(widget=forms.TextInput(attrs={'autofocus': 'autofocus', 'id': 'hashtag'}))
|
# python元组
list = ("chenweilong",1,3.1415926,"longbaba")
tlist = ("tlist",123)
print(list)#输出完整元组
print(list[0])#第一个
print(list[0:3])#第一个到第三个
print(list[1:])#到结尾
print(tlist * 2)
print(list + tlist)
list[0] = "longlong"#元组元素不可改变 他是只读的 |
from random import randrange
def shuffle(myList):
taken, newList = myList[:], myList[:]
length = len(myList)
for i in range(length):
taken[i] = ''
iterations = 0
for i in range(length):
correct = False
while not correct:
rnum = randrange(0, length)
if rnum not in taken:
print(rnum)
print(taken)
newList[rnum] = myList[i]
taken[i] = rnum
iterations += 1
correct = True
return newList
myList = [1,2,3,4]
print(shuffle(myList)) |
import torch as t
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, preds, targets):
pos_mask = (targets == 1).float()
#print("num", pos_mask.sum())
pos_num = t.clamp(pos_mask.sum(), min=1.0)
preds = t.clamp(preds, min=1e-8, max=1-1e-8)
print("cls_max:", preds.max())
print("cls_min:", preds.min())
print("cls_num:", t.sum(preds>0.6))
print("\n")
pos_weights = t.pow(1 - preds, self.gamma) * self.alpha
pos_loss = - t.log(preds) * pos_weights
neg_weights = t.pow(preds, self.gamma) * (1 - self.alpha)
neg_loss = -t.log(1-preds) * neg_weights
loss = t.where(pos_mask > 0, pos_loss, neg_loss)
loss = loss.sum() / pos_num
return loss
"""
class BinaryCrossEntropy(nn.Module):
def __init__(self):
super(BinaryCrossEntropy, self).__init__()
def forward(self, preds, targets, pos_mask):
print("cnt_max:", preds.max())
print("cnt_min:", preds.min())
print("\n")
pos_preds = t.clamp(preds[pos_mask], min=1e-8, max=1-1e-8)
pos_num = len(pos_preds)
if pos_num < 1:
pos_num = 1
#print("num", pos_num)
pos_targets = targets[pos_mask]
loss = - t.log(pos_preds) * pos_targets
loss = loss.sum() / pos_num
return loss
"""
#regLoss use IouLoss and CenternessLoss use BCE in paper
"""
class GiouLoss(nn.Module):
def __init__(self):
super(GiouLoss, self).__init__()
def forward(self, preds, targets):
pass
"""
class SmoothL1(nn.Module):
def __init__(self):
super(SmoothL1, self).__init__()
def forward(self, preds, targets, pos_mask):
preds = preds[pos_mask]
targets = targets[pos_mask]
print("reg_out_max:", preds.max())
print("reg_out_min:", preds.min())
print("\n")
#print("reg_labels", t.sum(targets<=0))
dif = t.abs(preds - targets)
loss = t.where(dif <= 1, 0.5 * t.pow(dif, 2.0), dif - 0.5)
pos_num = len(preds)
if pos_num < 1:
pos_num = 1
#print("num", pos_num)
loss = loss.sum() / pos_num
return loss
class loss(nn.Module):
def __init__(self, giou=False):
super(loss, self).__init__()
self.giou = giou
if giou:
self.regLoss = GiouLoss()
else:
self.regLoss = SmoothL1()
self.clsLoss = FocalLoss()
#self.cntLoss = BinaryCrossEntropy()
self.cntLoss = SmoothL1()
def forward(self, preds, targets):
cls, cnt, reg = preds
cls_gt, cnt_gt, reg_gt = targets
#print("gt", t.sum((cls_gt.sum(-1) == 1).sum()))
#print("gt2", t.sum((cls_gt.sum(-1) > 1).sum()))
pos_mask = cls_gt.sum(-1) > 0
#cnt_loss shoud use BCE and reg_loss use GIOU
cls_loss = self.clsLoss(cls, cls_gt)
cnt_loss = self.cntLoss(cnt, cnt_gt, pos_mask)
reg_loss = self.regLoss(reg, reg_gt, pos_mask)
print("cls_loss:", cls_loss)
print("cnt_loss:", cnt_loss)
print("reg_loss:", reg_loss)
loss = cls_loss + cnt_loss + 0.01 * reg_loss
return loss
|
# Installed Package Python(3.7) ...!
#
# import numpy
# import matplotlib
# import cv2
# import scipy
# import skimage |
from tree import *
class Node:
def __init__(self,key):
self.data = key
self.left = None
self.right = None
def childrenSum(root):
left = 0
right = 0
if root is None or root.left is None and root.right is None:
return 1
else:
if root.left is not None:
left = root.left.data
if root.right is not None:
right = root.right.data
if root.data == left+right and childrenSum(root.left) and childrenSum(root.right):
return 1
else:
return 0
root = Node(10)
root.left = Node(8)
root.right = Node(2)
root.left.left = Node(3)
root.left.right = Node(5)
root.right.left = Node(2)
print(childrenSum(root))
|
class DNode:
def __init__(self, val):
self.val = val
self.next = None
self.prev = None
def traverse(self):
vals = []
while(self != None):
vals.append(self.val)
self = self.next
return vals
def delete(self):
if (self == None):
return
if (self.prev != None and self.next != None):
if (self.prev == None):
self.next.prev = None
elif (self.next == None):
self.prev.next = None
else:
self.prev.next = self.next
self.next.prev = self.prev
self.val = None
self.next = None
self.prev = None
class Node:
def __init__(self,val):
self.val = val
self.next = None
def traverse(self):
while(self != None):
print(self.val)
self = self.next
def insert(self,val):
if(self == None):
return Node(val)
else:
new_node = Node(val)
new_node.next = self
return new_node
def delete(self, node):
if(node == None or self == None):
return
if (node != self):
iter_node = self
while(iter_node.next != node):
iter_node = iter_node.next
#print(iter_node.val)
iter_node.next = node.next
node.next = None
node.val = None
return
def reverse(self):
if (self == None):
print("ERROR: Passed None parameter into reverse")
return
elif (self.next == None):
return self
else:
iter_node = self.next
node = self
next_node = iter_node
self = iter_node
iter_node = iter_node.next
next_node.next = node
node.next = None
while(iter_node != None):
#print(self.val, iter_node.val)
node = self
next_node = iter_node
self = iter_node
iter_node = iter_node.next
next_node.next = node
return self
def remove_duplicates(head):
node_dict = {}
iter_node = head
node_dict[iter_node.val] = 1
prev_node = iter_node
iter_node = iter_node.next
while(iter_node != None):
if(iter_node.val in node_dict.keys()):
prev_node = remove_node(prev_node,iter_node)
iter_node = prev_node.next
else:
node_dict[iter_node.val] = 1
prev_node = iter_node
iter_node = iter_node.next
return head
def get_kth(head,k):
if k == 1:
return head.val
else:
for i in range(k-1):
head = head.next
return head.val
def get_nodes(head, val):
node_list = []
iter_node = head
while(iter_node != None):
if iter_node.val == val:
node_list.append(iter_node)
iter_node = iter_node.next
return node_list
def list_length(head):
length = 1
while head.next != None:
length += 1
head = head.next
return length
|
import time
from multiprocessing import Process
import os
import config
from messagebroker import MessageBroker
from worker import Worker
CICLE_SLEEP_TIME = config.GENERAL['cycle_sleep_time']
MAX_PROCESS = config.GENERAL['max_process']
MAX_GENERAL_ERRORS = config.ERROR_HANDLER['max_general_errors']
GENERAL_ERROR_TIMEOUT = config.ERROR_HANDLER['general_errors_retry_timeout']
EXTRACTED_PATH = config.GENERAL["extracted_path"]
TRANSFORMED_PATH = config.GENERAL["transformed_path"]
def transformer_sum_def(x, y):
return x+y
# pre execute tasks function
def pre_execute():
# create data directories
if not os.path.isdir(EXTRACTED_PATH):
os.mkdir(EXTRACTED_PATH)
if not os.path.isdir(TRANSFORMED_PATH):
os.makedirs(TRANSFORMED_PATH)
# Transformer worker
# runs the transformer flow:
# 1 - processed files extracted
# 2 - warn progress to loader service
def worker(ch, method, properties, body, total_errors=0):
message_broker = MessageBroker()
worker = Worker()
# extract file name from the message queue
file = body.decode("utf-8").strip()
print(' ==========> [%s] Cycle start -' % os.getpid())
if total_errors >= MAX_GENERAL_ERRORS:
print('max error reached')
ch.basic_ack(delivery_tag=method.delivery_tag)
return
# flow
try:
# worker.transform(file)
ch.basic_ack(delivery_tag=method.delivery_tag)
message_broker.warn_loader(file)
# Fail Safe, in case of Exceptions try again with
# the MAX_GENERAL_ERRORS limit
except Exception as e:
total_errors += 1
print('A Exception occured[%s]: Waiting to recover %s seconds' % (
e, GENERAL_ERROR_TIMEOUT))
time.sleep(GENERAL_ERROR_TIMEOUT)
worker(ch, method, properties, body, total_errors)
print(' ==========> [%s] Cycle end -' % os.getpid())
# worker
def main():
# instance rabbitmq
message_broker = MessageBroker()
# start Rabbitmq Listener
# listening extractor queue
print('[*] Worker [%s] consuming queue... ' % os.getpid())
message_broker.consume(callback=worker)
def daemon():
process_list = []
for process_index in range(MAX_PROCESS):
p = Process(target=main)
p.start()
process_list.append(p)
for process_index in range(MAX_PROCESS):
started_p = process_list[process_index]
started_p.join()
print('== ENDED ==')
print(process_list)
# main service entry point
if __name__ == "__main__":
pre_execute()
daemon()
|
def rho (decay_str):
while True:
if ' rho ' in decay_str:
decay_str = decay_str.replace(" rho ", " rho(770) ")
if ' rho0 ' in decay_str:
decay_str = decay_str.replace(" rho0 ", " rho(770) ")
if decay_str.find(" rho ") == -1:
if decay_str.find(" rho0 ") == -1:
break
return decay_str |
"""
jsonrpc11base tests
"""
import json
import jsonrpc11base
from jsonrpc11base.service_description import ServiceDescription
from jsonrpc11base.errors import APIError
import pytest
import os
class MyError(APIError):
code = 123
message = "My error"
def __init__(self, id):
self.error = {
'id': id
}
SCHEMA_DIR = os.path.join(os.path.dirname(__file__), '../data/schema/api')
@pytest.fixture(scope='module')
def service():
service_description = ServiceDescription(
'Test Service',
'https://github.com/kbase/kbase-jsonrpc11base/test',
summary='An test JSON-RPC 1.1 service',
version='1.0'
)
# Our service instance
service = jsonrpc11base.JSONRPCService(
description=service_description,
schema_dir=SCHEMA_DIR,
validate_params=True,
validate_result=True
)
# Add testing methods to the service.
# Note that each method needs param schema in data/schema
def method1(params, options):
return params
def method2(params, options):
raise MyError(456)
service.add(method1)
service.add(method2)
return service
# -------------------------------
# Ensure acceptable forms all work
# Happy path testing
# Note id omitted from all requests, to
# keep them simpler
# -------------------------------
# Test direct usage of call which deals with the
# actual payload, a JSON string.
#
# In most cases we test using call_py, which consumes the
# parsed request.
# Test all forms of params.
# Params may be any valid JSON, so we have a method to
# handle each one!
def test_array_param(service):
"""
Test valid jsonrpc multiple argument calls.
"""
params = {
'version': '1.1',
'method': 'method1',
'params': {
'param1': 'hello'
}
}
res_str = service.call(json.dumps(params))
result = json.loads(res_str)
assert result['version'] == "1.1"
print('RESULT', result)
assert result['result']['param1'] == 'hello'
def test_api_error(service):
"""
When a method accepts no args, but they are provided,
should return error.
"""
res = service.call_py({
"version": "1.1",
"method": "method2",
"params": {
"param1": "hi"
}
})
assert res['error']['name'] == 'APIError'
assert res['error']['message'] == 'My error'
assert res['error']['code'] == 123
assert res['error']['error']['id'] == 456
# API Errors
def test_own_api_error():
with pytest.raises(MyError) as me:
raise MyError(456)
err = me.value.to_json()
assert err['name'] == 'APIError'
assert err['code'] == 123
assert err['message'] == 'My error'
assert err['error']['id'] == 456
def test_base_api_error():
""" Although not recommended, one may use the APIError directly"""
with pytest.raises(APIError) as apie:
raise APIError()
err = apie.value.to_json()
assert err['name'] == 'APIError'
assert err['code'] == 1
assert err['message'] == 'API error'
|
import unittest
from ray.rllib.algorithms.registry import (
POLICIES,
get_policy_class,
get_policy_class_name,
ALGORITHMS_CLASS_TO_NAME,
ALGORITHMS,
)
class TestPolicies(unittest.TestCase):
def test_load_policies(self):
for name in POLICIES.keys():
self.assertIsNotNone(get_policy_class(name))
def test_get_eager_traced_class_name(self):
from ray.rllib.algorithms.ppo.ppo_tf_policy import PPOTF2Policy
traced = PPOTF2Policy.with_tracing()
self.assertEqual(get_policy_class_name(traced), "PPOTF2Policy")
def test_registered_algorithm_names(self):
"""All RLlib registered algorithms should have their name listed in the
registry dictionary."""
for class_name in ALGORITHMS_CLASS_TO_NAME.keys():
registered_name = ALGORITHMS_CLASS_TO_NAME[class_name]
algo_class, _ = ALGORITHMS[registered_name]()
self.assertEqual(class_name, algo_class.__name__)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
from collections import defaultdict
from collections import defaultdict
from collections import defaultdict
class UCTNode:
def __init__(self, state, parent=None):
"""
- n_visits is the number of visits in this node
- n_a is a dictionary {key, value} where key is the action taken from
this node and value is the number of times this action was chosen.
- q_a is a dictionary {key, value} where key is the action taken from
this node and value is the mean reward of the simulation that passed
through this node and used action 'a'.
- p_a is a dictionary {key, value} where key is the action taken from
this node and value is the probability given by the trained network
to choose this action. Value updated in the expand_children() method
from the MCTS class.
- parent is a Node object. 'None' if this node is the root of the tree.
- children is a dictionary {key, value} where key is the action
taken from this node and value is the resulting node after applying
the action.
- action_taken is the action taken from parent to get to this node.
"""
self.state = state
self.n_visits = 0
self.n_a = {}
self.q_a = {}
self.p_a = {}
# These will initialize value = 0 for whichever keys yet to be added.
self.n_a = defaultdict(lambda: 0, self.n_a)
self.q_a = defaultdict(lambda: 0, self.q_a)
self.p_a = defaultdict(lambda: 0, self.q_a)
self.parent = parent
self.children = {}
self.action_taken = None
def is_expanded(self):
"""Return a boolean."""
return len(self.children) > 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Omar Quimbaya'
SITENAME = 'San Antonio Developers'
SITEURL = ''
THEME = 'themes/brutalist'
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (
('Slack', 'https://join.slack.com/t/sanantoniodevs/shared_invite/MjE2ODI0NDQ4NjkwLTE1MDA5MDgwODctMzcxMmZhNjE0Zg'),
)
# Social widget
SOCIAL = (('Twitter', 'https://twitter.com/SA_Devs'),
)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
MARKDOWN = {
'extension_configs': {
'markdown.extensions.codehilite': {'css_class': 'highlight'},
'markdown.extensions.extra': {},
'markdown.extensions.meta': {},
},
'output_format': 'html5',
}
|
from colorama import init, Back, Fore, Style
class ColourfulPrint:
def __init__(self):
init(autoreset=True)
@staticmethod
def format_p1_name(text):
return Fore.CYAN + Style.BRIGHT + text
@staticmethod
def format_p2_name(text):
return Fore.RED + Style.BRIGHT + text
@staticmethod
def format_standard_text(text):
return Fore.WHITE + Style.BRIGHT + text
@staticmethod
def format_move_rock(text):
return Back.GREEN + Fore.YELLOW + text
@staticmethod
def format_move_paper(text):
return Back.WHITE + Fore.BLACK + Style.DIM + text
@staticmethod
def format_move_scissors(text):
return Back.MAGENTA + Style.BRIGHT + text
@staticmethod
def print_draw(text):
print(Back.YELLOW + Fore.BLACK + text)
@staticmethod
def print_p1_wins(text):
print(Back.BLUE + Style.BRIGHT + text)
@staticmethod
def print_p2_wins(text):
print(Back.RED + Style.BRIGHT + text)
@staticmethod
def print_game_text(text):
print(Back.GREEN + Fore.WHITE + Style.BRIGHT + text)
@staticmethod
def print_warning(text):
print(Back.WHITE + Fore.RED + text)
|
'''
Given a non-negative number - N. Print N!
Input Format
Input contains a number - N.
Constraints
0 <= N <= 10
Output Format
Print Factorial of N.
Sample Input 0
5
Sample Output 0
120
'''
number = int(input())
factorial = 1
if number < 0:
print("0")
else:
for itr in range(1, number+1):
factorial = factorial * itr
print(factorial) |
import numpy
import elect
import bound_reader
print()
print(elect.add(1, 2))
print()
print(elect.magic_array())
print()
print(elect.magic_decade())
print()
print(elect.magic_matrix())
print()
print(elect.random_cube(3,3,3,3.45))
print()
print(elect.random_cube(3,3,3,3))
print()
print(elect.subtract(3.34, 7.98))
print()
print(elect.subtract(3, 2))
print()
print(elect.binary_array())
print()
print('Get ready for this: they\'re moving it.', bound_reader.add(3, 6))
print()
|
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route('/')
def home():
return render_template('./index.html')
@app.route('/index.html')
def home2():
return home()
@app.route('/<file_name>')
def page_loader(file_name):
return render_template(f'./{file_name}')
@app.route('/submit_form', methods = ['POST', 'GET'])
def submit_form():
if request.method == 'POST':
data = request.form.to_dict()
persist_contact(data)
return redirect('thankyou.html')
else:
return 'try again'
def persist_contact(data):
email = data.get('email')
subject = data.get('subject')
message = data.get('message')
with open('./database.txt', 'a') as file:
file.write(f'{email}|{subject}|{message}\n')
|
from glob import glob
from os.path import basename, splitext
from setuptools import find_packages, setup
PROJECT_URL = 'https://github.com/melexis/warnings-plugin'
requires = [
'junitparser>=1.0.0,<2.0',
'ruamel.yaml>=0.17.21',
]
setup(
name='mlx.warnings',
url=PROJECT_URL,
use_scm_version={
'write_to': 'src/mlx/__warnings_version__.py'
},
setup_requires=['setuptools_scm'],
author='Bavo Van Achte',
author_email='bavo.van.achte@gmail.com',
description='Command-line alternative for https://github.com/jenkinsci/warnings-plugin. '
'Useable with plugin-less CI systems.',
long_description=open("README.rst").read(),
long_description_content_type='text/x-rst',
project_urls={
'Documentation': 'https://melexis.github.io/warnings-plugin',
'Source': 'https://github.com/melexis/warnings-plugin',
'Tracker': 'https://github.com/melexis/warnings-plugin/issues',
},
zip_safe=False,
license='Apache License, Version 2.0',
platforms='any',
packages=find_packages('src'),
package_dir={'': 'src'},
entry_points={'console_scripts': ['mlx-warnings = mlx.warnings:main']},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
install_requires=requires,
namespace_packages=['mlx'],
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
# 'Programming Language :: Python :: Implementation :: CPython',
# 'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=['Gitlab CI', 'warnings', 'CI'],
)
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from apps.courses.models import Evaluation
__all__ = (
"notify_user_about_new_evaluation",
)
# pylint: disable=unused-argument
# sender and **kwargs it is required arguments for signal
@receiver(post_save, sender=Evaluation)
def notify_user_about_new_evaluation(
sender: Evaluation,
instance: Evaluation,
**kwargs,
):
"""Send a notification to owner of solution after create a evaluation.
Args:
sender: Evaluation model class.
instance: instance of class.
**kwargs: any keyword arguments.
"""
channel_layer = get_channel_layer()
solution = instance.solution
evaluator = str(instance.owner)
user_pk = solution.owner.pk
async_to_sync(channel_layer.group_send)(
f"user_{user_pk}",
{
"type": "notification_message",
"task_title": solution.task.title,
"evaluator": evaluator,
"user_id": user_pk,
}
)
|
import numpy as np
import cvxpy as cp
import sys
import os
import json
MINM = -100000000
#Hyperparameters
DELTA = 0.001
GAMMA = 0.999
#Define all pos
all_pos=[]
all_pos.append('C')
all_pos.append('N')
all_pos.append('S')
all_pos.append('E')
all_pos.append('W')
#Define all state
all_states=[]
all_states.append('D')
all_states.append('R')
max_mat = 3
max_arrow = 4
max_health = 5
STEP_COST = -20
total_states = 600
#Reward for each action
action_reward = {
'UP': STEP_COST,
'LEFT': STEP_COST,
'DOWN': STEP_COST,
'RIGHT': STEP_COST,
'STAY': STEP_COST,
'SHOOT': STEP_COST,
'HIT': STEP_COST,
'CRAFT': STEP_COST,
'GATHER': STEP_COST,
'NONE': STEP_COST
}
#Define rewards for all states
rewards = {}
for s in all_pos:
rewards[s] = 0
#Dictionnary of possible actions. We have two "end" states (1,2 and 2,2)
actions = {
'W':('STAY', 'RIGHT', 'SHOOT'),
'N':('STAY', 'DOWN', 'CRAFT'),
'E':('STAY', 'LEFT', 'SHOOT', 'HIT'),
'S':('UP', 'STAY', 'GATHER'),
'C':('STAY', 'UP', 'DOWN', 'RIGHT', 'LEFT', 'SHOOT', 'HIT')
}
#Dictionary for integer mapping of pos of IJ
pos_map = {
'C' : 0,
'N' : 1,
'S' : 2,
'E' : 3,
'W' : 4
}
#Dictionary for integer mapping of states of MM
state_map = {
'D' : 0,
'R' : 1
}
#Dictionary for integer mapping of actions
action_map = {
'UP': 0,
'LEFT': 1,
'DOWN': 2,
'RIGHT': 3,
'STAY': 4,
'SHOOT': 5,
'HIT': 6,
'CRAFT': 7,
'GATHER': 8,
'NONE': 9
}
final_dict = {
"a" : "Empty",
"r" : "Empty",
"alpha" : "Empty",
"x" : "Empty",
"policy" : "Empty",
"objective" : "Empty",
}
#Define an initial policy
policy={}
for s in actions.keys():
policy[s] = np.random.choice(actions[s])
print(policy)
#Define success probabilities for states
probability = {}
for s in all_pos:
if s == 'E' or s == 'W':
probability[s] = 1
else:
probability[s] = 0.85
print(probability)
#Define fail action for states
fail = {}
for s in all_pos:
if s == 'E' or s == 'W':
fail[s] = s
else:
fail[s] = 'E'
print(fail)
# convert tuple to number
def tupletonum(p,m,a,s,h):
num = p*120 + m*40 + a*10 + s*5 + h
return num
# function to convert tuple to state values
def numtotuple(num):
h = num%5
s = (int(num/5))%2
a = (int((int(num/5))/2))%4
m = (int((int((int(num/5))/2))/4))%3
p = (int((int((int((int(num/5))/2))/4))/3))%5
return (p,m,a,s,h)
def prob(a,p,m,arr,s,h,p1,m1,arr1,s1,h1):
p_MM = 0
var = 0
if s == 'D' and s1 == 'R':
p_MM = 0.2
elif s == 'D' and s1 == 'D':
p_MM = 0.8
elif s == 'R' and s1 == 'R':
p_MM = 0.5
elif s == 'R' and s1 == 'D':
p_MM = 0.5
var = 1
if var == 1 and (p == 'C' or p == 'E'):
if arr1!=0 or m!=m1:
return 0
elif (h+1)!=h1 and (h!=4 or h1!=4):
return 0
# (h+1)==h1 or (h==4 and h1==4)
else:
var = 2
if a == 'UP':
if m!=m1 or (var!=2 and arr!=arr1) or (var!=2 and h!=h1):
return 0
if p == 'S':
if p1 == 'C':
return probability[p]*p_MM
elif p1 == fail[p]:
return (1-probability[p])*p_MM
else:
return 0
elif p == 'C':
if var == 2:
if p1 == p:
return p_MM
else:
return 0
if p1 == 'N':
return probability[p]*p_MM
elif p1 == fail[p]:
return (1-probability[p])*p_MM
else:
return 0
else:
return 0
elif a == 'DOWN':
if m!=m1 or (var!=2 and arr!=arr1) or (var!=2 and h!=h1):
return 0
if p == 'N':
if p1 == 'C':
return probability[p]*p_MM
elif p1 == fail[p]:
return (1-probability[p])*p_MM
else:
return 0
elif p == 'C':
if var == 2:
if p1 == p:
return p_MM
else:
return 0
if p1 == 'S':
return probability[p]*p_MM
elif p1 == fail[p]:
return (1-probability[p])*p_MM
else:
return 0
else:
return 0
elif a == 'LEFT':
if m!=m1 or (var!=2 and arr!=arr1) or (var!=2 and h!=h1):
return 0
if p == 'E':
if var == 2:
if p1 == p:
return p_MM
else:
return 0
if (p1 == 'C'):
return probability[p]*p_MM
elif p1 == fail[p]:
return (1-probability[p])*p_MM
else:
return 0
elif p == 'C':
if var == 2:
if p1 == p:
return p_MM
else:
return 0
if p1 == 'W':
return probability[p]*p_MM
elif p1 == fail[p]:
return (1-probability[p])*p_MM
else:
return 0
else:
return 0
elif a == 'RIGHT':
if m!=m1 or (var!=2 and arr!=arr1) or (var!=2 and h!=h1):
return 0
if p == 'W':
if p1 == 'C':
return probability[p]*p_MM
elif p1 == fail[p]:
return (1-probability[p])*p_MM
else:
return 0
elif p == 'C':
if var == 2:
if p1 == p:
return p_MM
else:
return 0
if p1 == 'E':
return 1*p_MM
else:
return 0
else:
return 0
elif a == 'STAY':
if m!=m1 or (var!=2 and arr!=arr1) or (var!=2 and h!=h1):
return 0
if var == 2 and (p == 'C' or p == 'E'):
if p1 == p:
return p_MM
else:
return 0
if p1 == p:
# print('reached 1')
return probability[p]*p_MM
elif p1 == fail[p]:
# print('reached 2')
return (1-probability[p])*p_MM
else:
return 0
elif a == 'SHOOT':
if p!=p1 or m!=m1:
return 0
if var == 2 and (p == 'C' or p == 'E'):
if p == 'C':
return p_MM
elif p == 'E':
return p_MM
if arr == arr1+1 and (p == 'C' or p == 'E' or p == 'W'):
if p == 'C':
if h == h1+1:
return 0.5*p_MM
elif h == h1:
return 0.5*p_MM
else:
return 0
elif p == 'E':
if h == h1+1:
return 0.9*p_MM
elif h == h1:
return 0.1*p_MM
else:
return 0
elif p == 'W':
if h == h1+1:
return 0.25*p_MM
elif h == h1:
return 0.75*p_MM
else:
return 0
else:
return 0
elif a == 'HIT':
if p!=p1 or m!=m1 or (var!=2 and arr!=arr1):
return 0
if var == 2 and (p == 'C' or p == 'E'):
if p == 'C':
return p_MM
elif p == 'E':
return p_MM
if p == 'C':
if h == h1+2 or (h == h1+1 and h1 == 0):
return 0.1*p_MM
elif h == h1:
return 0.9*p_MM
else:
return 0
elif p == 'E':
if h == h1+2 or (h == h1+1 and h1 == 0):
return 0.2*p_MM
elif h == h1:
return 0.8*p_MM
else:
return 0
else:
return 0
elif a == 'CRAFT':
if p!=p1 or h!=h1 or m==0:
return 0
if p == 'N' and m>=1 and m == m1+1:
if arr == 0:
if arr+1 == arr1:
return 0.5*p_MM
elif arr+2 == arr1:
return 0.35*p_MM
elif arr+3 == arr1:
return 0.15*p_MM
else:
return 0
elif arr == 1:
if arr+1 == arr1:
return 0.5*p_MM
elif arr+2 == arr1:
return 0.5*p_MM
else:
return 0
elif arr == 2:
if arr+1 == arr1:
return p_MM
else:
return 0
elif arr == 3:
if arr == arr1:
return p_MM
else:
return 0
else:
return 0
elif a == 'GATHER':
if p!=p1 or h!=h1 or arr!=arr1:
return 0
if p == 'S':
if m+1 == m1 and m<=1:
return 0.75*p_MM
elif m == m1 and m<=1:
return 0.25*p_MM
elif m == m1 and m==2:
return p_MM
else:
return 0
else:
return 0
elif a == 'NONE':
return 0
print(prob('CRAFT','N',1,3,'D',2,'N',1,3,'R',1))
def numtotuple(num):
h = num%5
s = (int(num/5))%2
a = (int((int(num/5))/2))%4
m = (int((int((int(num/5))/2))/4))%3
p = (int((int((int((int(num/5))/2))/4))/3))%5
return (p,m,a,s,h)
# print(numtotuple(444))
print(numtotuple(193))
infinity = 1e8
# Positions of IJ
W, N, E, S, C = (0, 1, 2, 3, 4)
direction = ("West", "North", "East", "South", "Center")
name_pos = ["W", "N", "E", "S", "C"]
NUM_POS = 5
# Material count can be 0,1,2
MATERIAL_MAX = 3
# Arrow count can be 0,1,2,3
ARROW_MAX = 4
# MM state can ve DORMANT(D), or READY(R)
Dormant, Ready = (0, 1)
mm_states = ("D", "R")
MM_STATES = 2
# MM health can be {0,25,50,75,100}
mm_health = [0, 25, 50, 75, 100]
MM_HLTH_NUM = 5
DISCOUNT = 0.999
BELLMAN_ERROR = 0.001
STEP_COST = -5
MMHIT_REWARD = -40
BOSS_REWARD = 50
def get_index(pos, m, a, s, h):
index = pos * MATERIAL_MAX * ARROW_MAX * MM_STATES * MM_HLTH_NUM
index += m * ARROW_MAX * MM_STATES * MM_HLTH_NUM
index += a * MM_STATES * MM_HLTH_NUM
index += s * MM_HLTH_NUM
index += h
return index
print(get_index(3,2,0,0,4)) |
import theano
import theano.tensor as T
import numpy
import cPickle
# Similarity functions ----------------------------
def L1sim(left,right):
return -T.sum(T.sqrt(T.sqr(left-right)),axis=1)
def L2sim(left,right):
return -T.sqrt(T.sum(T.sqr(left-right),axis=1))
def dotsim(left,right):
return T.sum(left*right,axis=1)
# -------------------------------------------------
# Costs -------------------------------------------
def margincost(pos,neg,marge=1.0):
out = neg - pos + marge
return T.sum(out * (out>0)),out>0
def validcost(pos,neg):
# Corresponds to the error without marge.
out = neg - pos
return T.sum(out * (out>0)),out>0, T.sum(out * (out<0))
# -------------------------------------------------
# Activation functions ----------------------------
def htanh(x):
return -1. * (x<-1.) + x * (x<1.) * (x>=-1.) + 1. * (x>=1)
def hsigm(x):
return x * (x<1) * (x>0) + 1. * (x>=1)
def rect(x):
return x*(x>0)
def sigm(x):
return T.nnet.sigmoid(x)
def tanh(x):
return T.tanh(x)
def lin(x):
return x
# -------------------------------------------------
# Layers ------------------------------------------
class Layer(object):
def __init__(self, rng, act, n_inp, n_out, Winit = None, tag=''):
self.act = eval(act)
self.actstr = act
self.n_inp = n_inp
self.n_out = n_out
# init param
if Winit == None:
wbound = numpy.sqrt(6./(n_inp+n_out))
W_values = numpy.asarray( rng.uniform( low = -wbound, high = wbound, \
size = (n_inp, n_out)), dtype = theano.config.floatX)
self.W = theano.shared(value = W_values, name = 'W'+tag)
else:
self.W = theano.shared(value = Winit, name = 'W'+tag)
self.params = [self.W]
def __call__(self,x):
return self.act(T.dot(x, self.W))
def save(self,path):
f = open(path,'w')
cPickle.dump(self,f,-1)
f.close()
class Layercomb(object):
def __init__(self, rng, act, n_inp1, n_inp2 , n_out, W1init = None, W2init = None, binit = None):
self.act = eval(act)
self.actstr = act
self.n_inp1 = n_inp1
self.n_inp2 = n_inp2
self.n_out = n_out
self.layer1 = Layer(rng, 'lin', n_inp1, n_out, Winit = W1init, tag = '1')
self.layer2 = Layer(rng, 'lin', n_inp2, n_out, Winit = W2init, tag = '2')
if binit == None:
b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
self.b = theano.shared(value= b_values, name = 'b')
else:
self.b = theano.shared(value = binit, name = 'b')
self.params = self.layer1.params + self.layer2.params + [self.b]
def __call__(self,x,y):
return self.act(self.layer1(x) + self.layer2(y) + self.b)
def save(self,path):
f = open(path,'w')
cPickle.dump(self,f,-1)
f.close()
class MLP(object):
def __init__(self, rng, act, n_inp1, n_inp2, n_hid, n_out, W1init = None, W2init = None, b12init = None, W3init = None, b3init = None):
self.act = eval(act)
self.actstr = act
self.n_inp1 = n_inp1
self.n_inp2 = n_inp2
self.n_hid = n_hid
self.n_out = n_out
self.layer12 = Layercomb(rng, act, n_inp1, n_inp2, n_hid, W1init = W1init, W2init = W2init, binit = b12init)
self.layer3 = Layer(rng, 'lin', n_hid, n_out, Winit = W3init, tag = '3')
if b3init == None:
b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
self.b = theano.shared(value= b_values, name = 'b')
else:
self.b = theano.shared(value = b3init, name = 'b')
self.params = self.layer12.params + self.layer3.params + [self.b]
def __call__(self,x,y):
return self.layer3(self.layer12(x,y)) + self.b
def save(self,path):
f = open(path,'w')
cPickle.dump(self,f,-1)
f.close()
class Quadlayer(object):
def __init__(self, rng, n_inp1, n_inp2, n_hid, n_out, W1init = None, b1init = None, W2init = None, b2init = None, W3init = None, b3init = None):
self.n_inp1 = n_inp1
self.n_inp2 = n_inp2
self.n_hid = n_hid
self.n_out = n_out
if W1init == None:
wbound = numpy.sqrt(6./(n_inp1+n_hid))
W_values = numpy.asarray( rng.uniform( low = -wbound, high = wbound, \
size = (n_inp1, n_hid)), dtype = theano.config.floatX)
self.W1 = theano.shared(value = W_values, name = 'W1')
else:
self.W1 = theano.shared(value = W1init, name = 'W1')
if b1init == None:
b_values = numpy.zeros((n_hid,), dtype= theano.config.floatX)
self.b1 = theano.shared(value= b_values, name = 'b1')
else:
self.b1 = theano.shared(value = b1init, name = 'b1')
if W2init == None:
wbound = numpy.sqrt(6./(n_inp2+n_hid))
W_values = numpy.asarray( rng.uniform( low = -wbound, high = wbound, \
size = (n_inp2, n_hid)), dtype = theano.config.floatX)
self.W2 = theano.shared(value = W_values, name = 'W2')
else:
self.W2 = theano.shared(value = W2init, name = 'W2')
if b2init == None:
b_values = numpy.zeros((n_hid,), dtype= theano.config.floatX)
self.b2 = theano.shared(value= b_values, name = 'b2')
else:
self.b2 = theano.shared(value = b2init, name = 'b2')
if W3init == None:
wbound = numpy.sqrt(6./(n_hid+n_out))
W_values = numpy.asarray( rng.uniform( low = -wbound, high = wbound, \
size = (n_hid, n_out)), dtype = theano.config.floatX)
self.W3 = theano.shared(value = W_values, name = 'W3')
else:
self.W3 = theano.shared(value = W3init, name = 'W3')
if b3init == None:
b_values = numpy.zeros((n_out,), dtype= theano.config.floatX)
self.b3 = theano.shared(value= b_values, name = 'b3')
else:
self.b3 = theano.shared(value = b3init, name = 'b3')
self.params = [self.W1,self.b1,self.W2,self.b2,self.W3,self.b3]
def __call__(self,x,y):
return T.dot((T.dot(x,self.W1) + self.b1) * (T.dot(y,self.W2) + self.b2), self.W3 ) + self.b3
def save(self,path):
f = open(path,'w')
cPickle.dump(self,f,-1)
f.close()
class Id(object):
def __init__(self):
self.params = []
def __call__(self,x,y):
return x
def save(self,path):
pass
class Embedd(object):
def __init__(self,rng,N,D,Einit = None):
self.N = N
self.D = D
if Einit == None:
wbound = numpy.sqrt(6)
W_values = numpy.asarray( rng.uniform( low = -wbound, high = wbound, \
size = (D, N)), dtype = theano.config.floatX)
self.E = theano.shared(value = W_values/numpy.sqrt(numpy.sum(W_values * W_values,axis=0)), name = 'E')
self.updates = {self.E:self.E/T.sqrt(T.sum(self.E * self.E,axis=0))}
self.norma = theano.function([],[],updates = self.updates)
def normalize(self):
self.norma()
# ---------------------------------------
def SimilarityFunctionl(fnsim,embeddings,leftop,rightop):
# Creation of scoring function on sparse matrices lhs,rel,rhs.
idxrel = theano.sparse.csr_matrix('idxrel')
idxright = theano.sparse.csr_matrix('idxright')
idxleft = theano.sparse.csr_matrix('idxleft')
lhs = (theano.sparse.dot(embeddings.E,idxleft).T).reshape((1,embeddings.D))
rhs = (theano.sparse.dot(embeddings.E,idxright).T).reshape((1,embeddings.D))
rel = (theano.sparse.dot(embeddings.E,idxrel).T).reshape((1,embeddings.D))
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
return theano.function([idxleft,idxright,idxrel],[simi])
# Creation of ccoring function with respect to the complete list of embeddings (or a subtensor of it defined by subtensorspec)
# if adding = True the scoring function has 2 more arguments: idxadd which contains the indexes to add, and sc the scaling:
# example: you want to ask ( [__us_NN_1,__army_NN_1] , [__attack_VB_1], [???,__village_NN_1] ) idxadd represents __village_NN_1
# and sc represent the values of ??? (here 1/2) so that the sum of each member is 1 (to do a mean pool).
# Ask for the right member
def SimilarityFunctionrightl(fnsim,embeddings,leftop,rightop,subtensorspec = None, adding = False):
# Scoring fuynction with respect to the complete list of embeddings (or a subtensor of it defined by subtensorspec)
# if adding = True the scoring function has 2 more arguments
idxrel = theano.sparse.csr_matrix('idxrel')
idxleft = theano.sparse.csr_matrix('idxleft')
lhs = (theano.sparse.dot(embeddings.E,idxleft).T).reshape((1,embeddings.D))
if not adding:
if subtensorspec == None:
rhs = embeddings.E.T
else:
rhs = embeddings.E[:,:subtensorspec].T
else:
idxadd = theano.sparse.csr_matrix('idxadd')
sc = T.scalar('sc')
if subtensorspec == None:
rhs = embeddings.E.T * sc + (theano.sparse.dot(embeddings.E,idxadd).T).reshape((1,embeddings.D))
else:
rhs = embeddings.E[:,:subtensorspec].T * sc + (theano.sparse.dot(embeddings.E,idxadd).T).reshape((1,embeddings.D))
rel = (theano.sparse.dot(embeddings.E,idxrel).T).reshape((1,embeddings.D))
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
if not adding:
return theano.function([idxleft,idxrel],[simi])
else:
return theano.function([idxleft,idxrel,idxadd,sc],[simi])
# Ask for the left member
def SimilarityFunctionleftl(fnsim,embeddings,leftop,rightop,subtensorspec = None, adding = False):
idxrel = theano.sparse.csr_matrix('idxrel')
idxright = theano.sparse.csr_matrix('idxright')
rhs = (theano.sparse.dot(embeddings.E,idxright).T).reshape((1,embeddings.D))
if not adding:
if subtensorspec == None:
lhs = embeddings.E.T
else:
lhs = embeddings.E[:,:subtensorspec].T
else:
idxadd = theano.sparse.csr_matrix('idxadd')
sc = T.scalar('sc')
if subtensorspec == None:
lhs = embeddings.E.T * sc + (theano.sparse.dot(embeddings.E,idxadd).T).reshape((1,embeddings.D))
else:
lhs = embeddings.E[:,:subtensorspec].T * sc + (theano.sparse.dot(embeddings.E,idxadd).T).reshape((1,embeddings.D))
rel = (theano.sparse.dot(embeddings.E,idxrel).T).reshape((1,embeddings.D))
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
if not adding:
return theano.function([idxright,idxrel],[simi])
else:
return theano.function([idxright,idxrel,idxadd,sc],[simi])
# Ask for the relation member
def SimilarityFunctionrell(fnsim,embeddings,leftop,rightop,subtensorspec = None, adding = False):
idxright = theano.sparse.csr_matrix('idxright')
idxleft = theano.sparse.csr_matrix('idxleft')
lhs = (theano.sparse.dot(embeddings.E,idxleft).T).reshape((1,embeddings.D))
if not adding:
if subtensorspec == None:
rel = embeddings.E.T
else:
rel = embeddings.E[:,:subtensorspec].T
else:
idxadd = theano.sparse.csr_matrix('idxadd')
sc = T.scalar('sc')
if subtensorspec == None:
rel = embeddings.E.T * sc + (theano.sparse.dot(embeddings.E,idxadd).T).reshape((1,embeddings.D))
else:
rel = embeddings.E[:,:subtensorspec].T * sc + (theano.sparse.dot(embeddings.E,idxadd).T).reshape((1,embeddings.D))
rhs = (theano.sparse.dot(embeddings.E,idxright).T).reshape((1,embeddings.D))
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
if not adding:
return theano.function([idxleft,idxright],[simi])
else:
return theano.function([idxleft,idxright,idxadd,sc],[simi])
# Creation of scoring function on indexes (not on sparse matrices)
def SimilarityFunction(fnsim,embeddings,leftop,rightop):
idxrel = T.iscalar('idxrel')
idxright = T.iscalar('idxright')
idxleft = T.iscalar('idxleft')
lhs = (embeddings.E[:,idxleft]).reshape((1,embeddings.D))
rhs = (embeddings.E[:,idxright]).reshape((1,embeddings.D))
rel = (embeddings.E[:,idxrel]).reshape((1,embeddings.D))
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
return theano.function([idxleft,idxright,idxrel],[simi])
# Ask for the right member
def SimilarityFunctionright(fnsim,embeddings,leftop,rightop,subtensorspec = None):
idxrel = T.iscalar('idxrel')
idxleft = T.iscalar('idxleft')
lhs = (embeddings.E[:,idxleft]).reshape((1,embeddings.D))
if subtensorspec != None:
rhs = (embeddings.E[:,:subtensorspec]).T
else:
rhs = embeddings.E.T
rel = (embeddings.E[:,idxrel]).reshape((1,embeddings.D))
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
return theano.function([idxleft,idxrel],[simi])
# Ask for the left member
def SimilarityFunctionleft(fnsim,embeddings,leftop,rightop,subtensorspec = None):
idxrel = T.iscalar('idxrel')
idxright = T.iscalar('idxright')
rhs = (embeddings.E[:,idxright]).reshape((1,embeddings.D))
if subtensorspec != None:
lhs = (embeddings.E[:,:subtensorspec]).T
else:
lhs = embeddings.E.T
rel = (embeddings.E[:,idxrel]).reshape((1,embeddings.D))
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
return theano.function([idxright,idxrel],[simi])
# Ask for the relation member
def SimilarityFunctionrel(fnsim,embeddings,leftop,rightop,subtensorspec = None):
idxright = T.iscalar('idxrel')
idxleft = T.iscalar('idxleft')
lhs = (embeddings.E[:,idxleft]).reshape((1,embeddings.D))
rel = embeddings.E.T
if subtensorspec != None:
rel = (embeddings.E[:,:subtensorspec]).T
else:
rel = embeddings.E.T
rhs = (embeddings.E[:,idxright]).reshape((1,embeddings.D))
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
return theano.function([idxleft,idxright],[simi])
# get the N most probable words given by a scoring function simfn with part1 and part2 its 2 input members (in a list of synset or lemme or concept).
# typ = 1 : ??? part2 part1 (simfn built with SimilarityFunctionleftl)
# typ = 2 : part1 part2 ??? (simfn built with SimilarityFunctionrightl)
# typ = 3 : part1 ??? part2 (simfn built with SimilarityFunctionrell)
# emb = True : distance(part1,???) (you have to build a model with Id() layers with a L2 scoring function, then simfn built from SimilarityFunctionrightl)
def getnclosest(N, idx2lemme, lemme2idx, idx2synset, synset2idx, synset2def, synset2concept, concept2synset, simfn, part1, part2, typ = 1, emb = False):
idx1 = []
str1 = []
idx2 = []
str2 = []
vec1 = scipy.sparse.lil_matrix((numpy.max(lemme2idx.values())+1,1),dtype=theano.config.floatX)
for i in part1:
if i in lemme2idx.keys():
idx1 += [lemme2idx[i]]
vec1[idx1[-1],0] += 1/float(len(part1))
str1 += ['-'+i]
elif i in synset2idx.keys():
idx1 += [synset2idx[i]]
vec1[idx1[-1],0] += 1/float(len(part1))
str1 += ['-'+synset2concept[i]]
else:
idx1 += [synset2idx[concept2synset[i]]]
vec1[idx1[-1],0] += 1/float(len(part1))
str1 += ['-'+i]
vec2=scipy.sparse.lil_matrix((numpy.max(lemme2idx.values())+1,1),dtype=theano.config.floatX)
for i in part2:
if i in lemme2idx.keys():
idx2 += [lemme2idx[i]]
vec2[idx2[-1],0] += 1/float(len(part2))
str2 += ['-'+i]
elif i in synset2idx.keys():
idx2 += [synset2idx[i]]
vec2[idx2[-1],0] += 1/float(len(part2))
str2 += ['-'+synset2concept[i]]
else:
idx2 += [synset2idx[concept2synset[i]]]
vec2[idx2[-1],0] += 1/float(len(part2))
str2 += ['-'+i]
ll = (simfn(vec1,vec2)[0]).flatten()
llo = numpy.argsort(ll)[::-1]
llt = ll[llo]
tt = ''
txt1 =''
for i in str1:
txt1 += i
txt2 = ''
for i in str2:
txt2 += i
if emb:
tt += 'Similar to: %s\n'%( txt1 )
else:
if typ == 1:
tt += '???? %s %s\n'%( txt2, txt1 )
elif typ == 2:
tt += '%s %s ????\n'%( txt1, txt2 )
elif typ == 3:
tt += '%s ???? %s\n'%( txt1, txt2 )
for i in range(N):
if llo[i] in idx2lemme.keys():
stro = idx2lemme[llo[i]]
elif idx2synset[llo[i]][0] == '_':
stro = llo[i]
else:
stro = synset2concept[idx2synset[llo[i]]] + ' : ' + synset2def[idx2synset[llo[i]]]
tt += 'Rank %s %s %s\n'%(i+1,llt[i],stro)
return tt
import theano.sparse
import scipy.sparse
# The training function creation:
# relb = true, negative sample for the realtion member.
# lrparams = learning rate for all the parameters of the model.
# lrembeddings = learning rate for the embeddings.
# inpposl = sparse matrix of the lhs.
# inposr = sparse matrix of the rhs
# inposo = sparse matrix of the relation
# inpposln = sparse matrix of the negatif samples for the lhs
# inpposrn = sparse matrix of the negatif samples for the rhs
# inpposon = sparse matrix of the negatif samples for the relation
def TrainFunction(fnsim,embeddings, leftop, rightop, marge = 1.0, relb = True):
# inputs
inpposr = theano.sparse.csr_matrix()
inpposl = theano.sparse.csr_matrix()
inpposo = theano.sparse.csr_matrix()
inpposln = theano.sparse.csr_matrix()
inpposrn = theano.sparse.csr_matrix()
inpposon = theano.sparse.csr_matrix()
lrparams = T.scalar('lrparams')
lrembeddings = T.scalar('lrembeddings')
# graph
lhs = theano.sparse.dot(embeddings.E,inpposl).T
rhs = theano.sparse.dot(embeddings.E,inpposr).T
rel = theano.sparse.dot(embeddings.E,inpposo).T
lhsn = theano.sparse.dot(embeddings.E,inpposln).T
rhsn = theano.sparse.dot(embeddings.E,inpposrn).T
reln = theano.sparse.dot(embeddings.E,inpposon).T
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
siminl = fnsim(leftop(lhsn,rel),rightop(rhs,rel))
siminr = fnsim(leftop(lhs,rel),rightop(rhsn,rel))
simino = fnsim(leftop(lhs,reln),rightop(rhs,reln))
costl,outl = margincost(simi,siminl,marge)
costr,outr = margincost(simi,siminr,marge)
costo,outo = margincost(simi,simino,marge)
if relb:
cost = costl + costr + costo
else:
cost = costl + costr
out = T.concatenate([outl,outr,outo])
if hasattr(fnsim,'params'):
gradientsparams = T.grad(cost, leftop.params + rightop.params + fnsim.params)
updates = dict((i,i-lrparams*j) for i,j in zip(leftop.params + rightop.params + fnsim.params, gradientsparams))
else:
gradientsparams = T.grad(cost, leftop.params + rightop.params)
updates = dict((i,i-lrparams*j) for i,j in zip(leftop.params + rightop.params, gradientsparams))
gradientsembeddings = T.grad(cost, embeddings.E)
newE = embeddings.E - lrembeddings * gradientsembeddings
############### scaling variants
#updates = dict((i,i-lrparams/(1+T.cast(T.sum(out),dtype=theano.config.floatX))*j) for i,j in zip(leftop.params + rightop.params, gradientsparams))
#maskE = T.vector('maskE')
#newE = (embeddings.E - lrembeddings/(1+maskE*T.cast(T.sum(out),dtype=theano.config.floatX)) * gradientsembeddings)
###############
#newEnorm = newE / T.sqrt(T.sum(newE*newE,axis=0))
updates.update({embeddings.E:newE})
return theano.function([lrparams,lrembeddings,inpposl, inpposr, inpposo, inpposln, inpposrn,inpposon], [cost,costl,costr,costo,T.sum(out),T.sum(outl),T.sum(outr),T.sum(outo),lhs,rhs,rel,simi,siminl,siminr,simino],updates=updates)
# Function returning the binary vector representing: cost>0
def ForwardFunction(fnsim,embeddings, leftop, rightop, marge = 1.0):
# inputs
inpposr = theano.sparse.csr_matrix()
inpposl = theano.sparse.csr_matrix()
inpposo = theano.sparse.csr_matrix()
inpposln = theano.sparse.csr_matrix()
inpposrn = theano.sparse.csr_matrix()
inpposon = theano.sparse.csr_matrix()
# graph
lhs = theano.sparse.dot(embeddings.E,inpposl).T
rhs = theano.sparse.dot(embeddings.E,inpposr).T
rel = theano.sparse.dot(embeddings.E,inpposo).T
lhsn = theano.sparse.dot(embeddings.E,inpposln).T
rhsn = theano.sparse.dot(embeddings.E,inpposrn).T
reln = theano.sparse.dot(embeddings.E,inpposon).T
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
siminl = fnsim(leftop(lhsn,rel),rightop(rhs,rel))
siminr = fnsim(leftop(lhs,rel),rightop(rhsn,rel))
simino = fnsim(leftop(lhs,reln),rightop(rhs,reln))
costl,outl = margincost(simi,siminl,marge)
costr,outr = margincost(simi,siminr,marge)
costo,outo = margincost(simi,simino,marge)
return theano.function([inpposl, inpposr, inpposo, inpposln, inpposrn,inpposon], [outl,outr,outo])
# Function returning the score over lhs,rhs and rel sparse matrices
def BatchSimilarityFunction(fnsim,embeddings, leftop, rightop):
# inputs
inpposr = theano.sparse.csr_matrix()
inpposl = theano.sparse.csr_matrix()
inpposo = theano.sparse.csr_matrix()
# graph
lhs = theano.sparse.dot(embeddings.E,inpposl).T
rhs = theano.sparse.dot(embeddings.E,inpposr).T
rel = theano.sparse.dot(embeddings.E,inpposo).T
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
return theano.function([inpposl, inpposr, inpposo], [simi])
# Function doing the forward on a batch and returning all information (without updating):
def BatchValidFunction(fnsim,embeddings, leftop, rightop):
# inputs
inpposr = theano.sparse.csr_matrix()
inpposl = theano.sparse.csr_matrix()
inpposo = theano.sparse.csr_matrix()
inpposln = theano.sparse.csr_matrix()
inpposrn = theano.sparse.csr_matrix()
inpposon = theano.sparse.csr_matrix()
# graph
lhs = theano.sparse.dot(embeddings.E,inpposl).T
rhs = theano.sparse.dot(embeddings.E,inpposr).T
rel = theano.sparse.dot(embeddings.E,inpposo).T
lhsn = theano.sparse.dot(embeddings.E,inpposln).T
rhsn = theano.sparse.dot(embeddings.E,inpposrn).T
reln = theano.sparse.dot(embeddings.E,inpposon).T
simi = fnsim(leftop(lhs,rel),rightop(rhs,rel))
siminl = fnsim(leftop(lhsn,rel),rightop(rhs,rel))
siminr = fnsim(leftop(lhs,rel),rightop(rhsn,rel))
simino = fnsim(leftop(lhs,reln),rightop(rhs,reln))
costl,outl,margel = validcost(simi,siminl)
costr,outr,marger = validcost(simi,siminr)
costo,outo,margeo = validcost(simi,simino)
cost = costl + costr + costo
out = T.concatenate([outl,outr,outo])
return theano.function([inpposl, inpposr, inpposo, inpposln, inpposrn,inpposon], [cost,costl,costr,costo,T.sum(out),T.sum(outl),T.sum(outr),T.sum(outo),margel,marger,margeo,lhs,rhs,rel,simi,siminl,siminr,simino])
# Compute the mean rank of the lhs and rhs, over a list of lhs, rhs and rel indexes.
# Only works when there is one word per member (WordNet)
# sl build with SimilarityFunctionleft
# sr build with SimilarityFunctionright
def calctestval(sl,sr,idxtl,idxtr,idxto):
errl = []
errr = []
for l,o,r in zip(idxtl,idxto,idxtr):
errl += [numpy.argsort(numpy.argsort((sl(r,o)[0]).flatten())[::-1]).flatten()[l]]
errr += [numpy.argsort(numpy.argsort((sr(l,o)[0]).flatten())[::-1]).flatten()[r]]
return numpy.mean(errl+errr),numpy.std(errl+errr),numpy.mean(errl),numpy.std(errl),numpy.mean(errr),numpy.std(errr)
# The same but returns the ranking lists instead of their mean and std.
def calctestval2(sl,sr,idxtl,idxtr,idxto):
errl = []
errr = []
for l,o,r in zip(idxtl,idxto,idxtr):
errl += [numpy.argsort(numpy.argsort((sl(r,o)[0]).flatten())[::-1]).flatten()[l]]
errr += [numpy.argsort(numpy.argsort((sr(l,o)[0]).flatten())[::-1]).flatten()[r]]
return errl,errr
# Similar but works with sparse index matrices (posl,posr,poso) = (lhs,rhs,rel)
# replace the whole member by one word.
# sl build with SimilarityFunctionleftl
# sr build with SimilarityFunctionrightl
# so build with SimilarityFunctionrell
def calctestscore(sl,sr,so,posl,posr,poso):
errl = []
errr = []
erro = []
for i in range(posl.shape[1]):
rankl = numpy.argsort((sl(posr[:,i],poso[:,i])[0]).flatten())
for l in posl[:,i].nonzero()[0]:
errl += [numpy.argsort(rankl[::-1]).flatten()[l]]
rankr = numpy.argsort((sr(posl[:,i],poso[:,i])[0]).flatten())
for r in posr[:,i].nonzero()[0]:
errr += [numpy.argsort(rankr[::-1]).flatten()[r]]
ranko = numpy.argsort((so(posl[:,i],posr[:,i])[0]).flatten())
for o in poso[:,i].nonzero()[0]:
erro += [numpy.argsort(ranko[::-1]).flatten()[0]]
return numpy.mean(errl+errr+erro),numpy.std(errl+errr+erro),numpy.mean(errl),numpy.std(errl),numpy.mean(errr),numpy.std(errr),numpy.mean(erro),numpy.std(erro)
import copy
# Similar but works with sparse index matrices (posl,posr,poso) = (lhs,rhs,rel)
# AND replace only ONE word per member (does ALL combinations)
# sl build with SimilarityFunctionleftl (with the adding argument = True)
# sr build with SimilarityFunctionrightl (with the adding argument = True)
# so build with SimilarityFunctionrell (with the adding argument = True)
def calctestscore2(sl,sr,so,posl,posr,poso):
errl = []
errr = []
erro = []
for i in range(posl.shape[1]):
lnz = posl[:,i].nonzero()[0]
for j in lnz:
val = posl[j,i]
tmpadd = copy.deepcopy(posl[:,i])
tmpadd[j,0] = 0.0
rankl = numpy.argsort((sl(posr[:,i],poso[:,i],tmpadd,val)[0]).flatten())
errl += [numpy.argsort(rankl[::-1]).flatten()[j]]
rnz = posr[:,i].nonzero()[0]
for j in rnz:
val = posr[j,i]
tmpadd = copy.deepcopy(posr[:,i])
tmpadd[j,0] = 0.0
rankr = numpy.argsort((sr(posl[:,i],poso[:,i],tmpadd,val)[0]).flatten())
errr += [numpy.argsort(rankr[::-1]).flatten()[j]]
onz = poso[:,i].nonzero()[0]
for j in onz:
val = poso[j,i]
tmpadd = copy.deepcopy(poso[:,i])
tmpadd[j,0] = 0.0
ranko = numpy.argsort((so(posl[:,i],posr[:,i],tmpadd,val)[0]).flatten())
erro += [numpy.argsort(ranko[::-1]).flatten()[j]]
return numpy.mean(errl+errr+erro),numpy.std(errl+errr+erro),numpy.mean(errl),numpy.std(errl),numpy.mean(errr),numpy.std(errr),numpy.mean(erro),numpy.std(erro)
# The same :
# Similar but works with sparse index matrices (posl,posr,poso) = (lhs,rhs,rel)
# AND replace only ONE word per member (does ALL combinations)
# sl build with SimilarityFunctionleftl (with the adding argument = True)
# sr build with SimilarityFunctionrightl (with the adding argument = True)
# so build with SimilarityFunctionrell (with the adding argument = True)
# But compares with the index correspondance sparse matrices: (poslc,posrc,posoc)
# (you give lemmas in input and find the ranking of synsets).
def calctestscore3(sl,sr,so,posl,posr,poso,poslc,posrc,posoc):
errl = []
errr = []
erro = []
for i in range(posl.shape[1]):
lnz = posl[:,i].nonzero()[0]
for j in lnz:
val = posl[j,i]
tmpadd = copy.deepcopy(posl[:,i])
tmpadd[j,0] = 0.0
rankl = numpy.argsort((sl(posr[:,i],poso[:,i],tmpadd,val)[0]).flatten())
errl += [numpy.argsort(rankl[::-1]).flatten()[poslc[j,i]]]
rnz = posr[:,i].nonzero()[0]
for j in rnz:
val = posr[j,i]
tmpadd = copy.deepcopy(posr[:,i])
tmpadd[j,0] = 0.0
rankr = numpy.argsort((sr(posl[:,i],poso[:,i],tmpadd,val)[0]).flatten())
errr += [numpy.argsort(rankr[::-1]).flatten()[posrc[j,i]]]
onz = poso[:,i].nonzero()[0]
for j in onz:
val = poso[j,i]
tmpadd = copy.deepcopy(poso[:,i])
tmpadd[j,0] = 0.0
ranko = numpy.argsort((so(posl[:,i],posr[:,i],tmpadd,val)[0]).flatten())
erro += [numpy.argsort(ranko[::-1]).flatten()[posoc[j,i]]]
return numpy.mean(errl+errr+erro),numpy.std(errl+errr+erro),numpy.mean(errl),numpy.std(errl),numpy.mean(errr),numpy.std(errr),numpy.mean(erro),numpy.std(erro)
# The same but return ranking lists instead of their mean and std.
def calctestscore4(sl,sr,so,posl,posr,poso,poslc,posrc,posoc):
errl = []
errr = []
erro = []
for i in range(posl.shape[1]):
lnz = posl[:,i].nonzero()[0]
for j in lnz:
val = posl[j,i]
tmpadd = copy.deepcopy(posl[:,i])
tmpadd[j,0] = 0.0
rankl = numpy.argsort((sl(posr[:,i],poso[:,i],tmpadd,val)[0]).flatten())
errl += [numpy.argsort(rankl[::-1]).flatten()[poslc[j,i]]]
rnz = posr[:,i].nonzero()[0]
for j in rnz:
val = posr[j,i]
tmpadd = copy.deepcopy(posr[:,i])
tmpadd[j,0] = 0.0
rankr = numpy.argsort((sr(posl[:,i],poso[:,i],tmpadd,val)[0]).flatten())
errr += [numpy.argsort(rankr[::-1]).flatten()[posrc[j,i]]]
onz = poso[:,i].nonzero()[0]
for j in onz:
val = poso[j,i]
tmpadd = copy.deepcopy(poso[:,i])
tmpadd[j,0] = 0.0
ranko = numpy.argsort((so(posl[:,i],posr[:,i],tmpadd,val)[0]).flatten())
erro += [numpy.argsort(ranko[::-1]).flatten()[posoc[j,i]]]
return errl,errr,erro
|
import json
class TreeNode(object):
def __init__(self, text, id, depth, parent=None, children=None):
self.text = text
self.id = id
self.depth = depth
self.children = children
self.parent = parent
self.objs = None
@staticmethod
def from_json(path):
with open(path, 'r') as fp:
hier = json.load(fp)
root = construct_tree(hier[0], depth=0)
return root
def __repr__(self):
child_id = None if self.children is None else [x.id for x in self.children]
par_id = None if self.parent is None else self.parent.id
return "text: {}, ID: {}, depth: {}, parent: {}, children: {}".format(self.text, self.id, self.depth, par_id, child_id)
def print_hier(self, depth=0):
prefix = " " * (depth - 1) + "|---" * min(depth, 1)
s = "text: {} ID: {}".format(self.text, self.id)
if self.objs is not None:
s += " objs: {}".format(self.objs)
print(prefix + s)
if self.children is not None:
for child in self.children:
child.print_hier(depth + 1)
def query_node_by_id(self, id):
"""query node by given id"""
if self.id == id:
return self
if self.children is not None:
for child in self.children:
res = child.query_node_by_id(id)
if res is not None:
return res
return None
def query_parent_id(self, depth=1):
"""query its parent id at given depth"""
if self.depth == depth:
return self.id
if self.parent is not None:
return self.parent.query_parent_id(depth)
return None
def query_id_by_depth(self, depth):
if self.depth == depth:
return [self.id]
elif self.depth > depth:
return []
else:
res = []
if self.children is not None:
for child in self.children:
child_res = child.query_id_by_depth(depth)
res.extend(child_res)
return res
def collect_objs(self):
if self.objs is not None:
return self.objs
else:
res = []
if self.children is not None:
for child in self.children:
child_res = child.collect_objs()
res.extend(child_res)
return res
def construct_tree(item, depth=0):
text = item['text']
id = item['id']
node = TreeNode(text, id, depth)
if 'objs' in item.keys():
node.objs = item['objs']
if 'children' in item.keys():
children = []
for child in item['children']:
child_node = construct_tree(child, depth + 1)
child_node.parent = node
children.append(child_node)
else:
children = None
node.children = children
return node
if __name__ == '__main__':
path = "/home/megaBeast/Desktop/partnet_data/PartNet/38037/result.json"
root = TreeNode.from_json(path)
root.print_hier()
node = root.query_node_by_id(5)
print(node)
print(node.collect_objs())
print(root.query_id_by_depth(depth=1))
par_d1 = node.query_parent_id(depth=1)
print(par_d1)
|
from django.urls import path
from ec import views
urlpatterns = [
path('item/', views.ItemListView.as_view(), name='item_list'),
path('item/<slug>', views.ItemDetailView.as_view(), name='item_detail'),
path('additem/<slug>', views.addItem, name='additem'),
path('order/', views.OrderView.as_view(), name='order'),
path('removeitem/<slug>', views.removeItem, name='removeitem'),
path('removesingleitem/<slug>', views.removeSingleItem, name='removesingleitem'),
path('payment/', views.PaymentView.as_view(), name='payment'),
path('thanks/', views.ThanksView.as_view(), name='thanks'),
] |
#!/usr/bin/env python3
#
# Copyright 2020 by Philip N. Garner
#
# See the file COPYING for the licence associated with this software.
#
# Author(s):
# Phil Garner, October 2020
#
# Oracle: https://bescherelle.com/conjugueur.php
# List of 'verb' forms of verbs with auxiliary être.
# First two are venir and aller
auxEtre = [
'viendr', 'ir', # Come, go
'entrer', 'sortir', # Enter, go out
'arriver', 'partir', # Arrive, leave
'monter', 'decendre', # Climb, descend
'naître', 'mourr', # Be born, die
'rester', 'passer', # Stay, pass
'tomber', 'retourner' # Fall, return
]
# Prepend a string onto each element of an array
def prepend(pre, arr):
def concat(a):
return pre + a
return list(map(concat, arr))
# Base class for all verbs
class Verb:
# The auxiliary for the perfect &c
def aux(self):
if self.verb in auxEtre:
return 'être'
return 'avoir'
def __init__(self, stem, verb=None):
self.stem = stem # The stem, for most tenses
self.verb = verb # The longer stem, for future and conditional
def sconj(self, suff):
return prepend(self.stem, suff)
def vconj(self, suff):
return prepend(self.verb, suff)
# The regular conjugations, based on either the stem or the verb itself
class Regular(Verb):
# Indicative present
preS = ['e', 'es', 'e']
preP = ['ons', 'ez', 'ent']
# Indicative imperfect
impS = ['ais', 'ais', 'ait']
impP = ['ions', 'iez', 'aient']
# Indicative simple past
pasS = ['ai', 'as', 'a']
pasP = ['âmes', 'âtes', 'èrent']
# Indicative simple future
futS = pasS
futP = ['ons', 'ez', 'ont']
# Subjunctive present
subPreS = preS
subPreP = ['ions', 'iez', 'ent']
# Subjunctive imperfect
subImpS = ['asse', 'asses', 'ât']
subImpP = ['assions', 'assiez', 'assent']
# Participles
parPre = ['ant']
parPas = ['é']
def indPresentS(self):
return self.sconj(self.preS)
def indPresentP(self):
return self.sconj(self.preP)
def indPresent(self):
return self.indPresentS() + self.indPresentP()
def indImperfect(self):
return self.sconj(self.impS + self.impP)
def indSimplePast(self):
return self.sconj(self.pasS + self.pasP)
def indSimpleFuture(self):
return self.vconj(self.futS + self.futP)
def conditional(self):
# As indicative imperfect, but with the longer stem
return self.vconj(self.impS + self.impP)
def subPresent(self):
return self.sconj(self.subPreS + self.subPreP)
def subImperfect(self):
return self.sconj(self.subImpS + self.subImpP)
def partPresent(self):
return self.sconj(self.parPre)
def partPast(self):
return self.sconj(self.parPas)
def participles(self):
return self.partPresent() + self.partPast()
# A regular base where the Indicative simple Past and Subjunctive Imperfect
# change conjugation with a leading i-
# This is the case for group 2 and many group 3 verbs
# Also set the past participle to i since it seems to work for several verbs
# (group 2, sortir, partir)
class BaseIPSI(Regular):
pasS = ['is', 'is', 'it']
pasP = ['îmes', 'îtes', 'irent']
subImpS = ['isse', 'isses', 'ît']
subImpP = ['issions', 'issiez', 'issent']
parPas = ['i']
# Another regular base similar to BaseIPSI, but based on a leading u-
# Again, the past participle is also set to u
class BaseUPSI(Regular):
pasS = ['us', 'us', 'ut']
pasP = ['ûmes', 'ûtes', 'urent']
subImpS = ['usse', 'usses', 'ût']
subImpP = ['ussions', 'ussiez', 'ussent']
parPas = ['u']
# The regular group 2, ending in -ir, e.g., finir
# Characterised by lots of -iss- in the conjugations
class RegularIR(BaseIPSI):
preS = ['is', 'is', 'it']
preP = ['issons', 'issez', 'issent']
parPre = ['issant']
def indImperfect(self):
return Regular(self.stem+'iss').indImperfect()
def subPresent(self):
return Regular(self.stem+'iss').subPresent()
# A regular group 3 verb class ending in -re, e.g., repondre, descendre
class RegularRE(BaseIPSI):
preS = ['s', 's', '']
parPas = ['u']
class Être(Regular):
def __init__(self):
self.stem = 'ét'
self.verb = 'ser'
def indPresent(self):
return ['suis', 'es', 'est', 'sommes', 'êtes', 'sont']
def indSimplePast(self):
return BaseUPSI('f').indSimplePast()
def subPresent(self):
return ['sois', 'sois', 'soit', 'soyons', 'soyez', 'soient']
def subImperfect(self):
return BaseUPSI('f').subImperfect()
class Avoir(Regular):
def __init__(self):
self.stem = 'av'
self.verb = 'aur'
def indPresent(self):
return ['ai', 'as', 'a', 'avons', 'avez', 'ont']
def indSimplePast(self):
return BaseUPSI('e').indSimplePast()
def subPresent(self):
return ['aie', 'aies', 'ait', 'ayons', 'ayez', 'aient']
def subImperfect(self):
return BaseUPSI('e').subImperfect()
def partPresent(self):
return ['ayant']
def partPast(self):
return ['eu']
class Faire(Regular):
def __init__(self):
self.stem = 'fais'
self.verb = 'fer'
def indPresent(self):
return ['fais', 'fais', 'fait', 'faisons', 'faites', 'font']
def indSimplePast(self):
return ['fis', 'fis', 'fit', 'fimes', 'fites', 'firent']
def partPast(self):
return ['fait']
class Voir(Regular):
def __init__(self):
self.stem = 'voy'
self.verb = 'verr'
def indPresent(self):
return ['vois', 'vois', 'voit', 'voyons', 'voyez', 'voient']
def indSimplePast(self):
return ['vis', 'vis', 'vit', 'vîmes', 'vîtes', 'virent']
def subPresent(self):
return ['voie', 'voies', 'voie', 'voyions', 'voyiez', 'voient']
def subImperfect(self):
return ['visse', 'visses', 'vît', 'vissions', 'vissiez', 'vissent']
def partPast(self):
return ['vu']
class Pouvoir(Regular):
def __init__(self):
self.stem = 'pouv'
self.verb = 'pourr'
def indPresent(self):
return ['peux', 'peux', 'peut', 'pouvons', 'pouvez', 'peuvent']
def indSimplePast(self):
return BaseUPSI('p').indSimplePast()
def subPresent(self):
return ['puisse', 'puisses', 'puisse',
'puissions', 'puissiez', 'puissent']
def subImperfect(self):
return BaseUPSI('p').subImperfect()
def partPast(self):
return ['pu']
class Vouloir(BaseUPSI):
def __init__(self):
self.stem = 'voul'
self.verb = 'voudr'
def indPresent(self):
return ['veux', 'veux', 'veut', 'voulons', 'voulez', 'veulent']
def subPresent(self):
return ['veuille', 'veuilles', 'veuille',
'voulions', 'vouliez', 'veuillent']
class Savoir(Regular):
def __init__(self):
self.stem = 'sav'
self.verb = 'saur'
def indPresentS(self):
return ['sais', 'sais', 'sait']
def indSimplePast(self):
return BaseUPSI('s').indSimplePast()
def subPresent(self):
return Regular('sach').subPresent()
def subImperfect(self):
return BaseUPSI('s').subImperfect()
def partPresent(self):
return ['sachant']
def partPast(self):
return ['su']
class Aller(Regular):
def __init__(self):
self.stem = 'all'
self.verb = 'ir'
def indPresent(self):
return ['vais', 'vas', 'va', 'allons', 'allez', 'vont']
def subPresent(self):
return ['aille', 'ailles', 'aille', 'allions', 'alliez', 'aillent']
class Sortir(BaseIPSI):
def __init__(self):
self.stem = 'sort'
self.verb = 'sortir'
def indPresentS(self):
return ['sors', 'sors', 'sort']
class Partir(BaseIPSI):
def __init__(self):
self.stem = 'part'
self.verb = 'partir'
def indPresentS(self):
return ['pars', 'pars', 'part']
class Naître(BaseIPSI):
def __init__(self):
self.stem = 'naiss'
self.verb = 'naîtr'
def indPresentS(self):
return ['nais', 'nais', 'naît']
def indSimplePast(self):
return BaseIPSI('naqu').indSimplePast()
def subImperfect(self):
return BaseIPSI('naqu').subImperfect()
def partPast(self):
return ['né']
class Mourir(BaseUPSI):
def __init__(self):
self.stem = 'mour'
self.verb = 'mourr'
def indPresentS(self):
return ['meurs', 'meurs', 'meurt']
def subPresent(self):
return ['meure', 'meures', 'meure',
'mourions', 'mouriez', 'meurent']
def partPast(self):
return ['mort']
class Venir(Regular):
def __init__(self):
self.stem = 'ven'
self.verb = 'viendr'
def indPresent(self):
return ['viens', 'viens', 'vient', 'venons', 'venez', 'viennent']
def indSimplePast(self):
return ['vins', 'vins', 'vint', 'vînmes', 'vîntes', 'vinrent']
def subPresent(self):
return ['vienne', 'viennes', 'vienne',
'venions', 'veniez', 'viennent']
def subImperfect(self):
return ['vinsse', 'vinsses', 'vînt',
'vinssions', 'vinssiez', 'vinssent']
def partPast(self):
return ['venu']
class Conduire(BaseIPSI):
def __init__(self):
self.stem = 'conduis'
self.verb = 'conduir'
def indPresentS(self):
return ['conduis', 'conduis', 'conduit']
def partPast(self):
return ['conduit']
def split_stem(verb):
stem = verb[:-2]
suff = verb[-2:]
return [stem, suff]
def toClass(verb):
# First check if the (irregular) verb exists as an explicit class
cverb = verb.capitalize()
if cverb in globals():
return globals()[cverb]()
# Otherwise, try a (regular) solution via the suffix
[stem, suff] = split_stem(verb)
if suff == 'er':
return Regular(stem, verb)
elif suff == 'ir':
return RegularIR(stem, verb)
elif suff == 're':
return RegularRE(stem, stem+'r')
print('Unknown suffix: %s (%s)' % (verb, suff))
exit()
def format2(conj):
print(' je {0:22}tu {1:22}elle {2}'.format(
conj[0], conj[1], conj[2]))
print(' nous {0:20}vous {1:20}elles {2}'.format(
conj[3], conj[4], conj[5]))
def conjSimple(verb):
v = toClass(verb)
print(verb.capitalize())
print(' en {0:22}on {1} {2}'.format(
v.partPresent()[0], toClass(v.aux()).indPresent()[2], v.partPast()[0]))
print('Ind. Present'); format2(v.indPresent())
print('Ind. Imperfect'); format2(v.indImperfect())
print('Ind. Simple Past'); format2(v.indSimplePast())
print('Ind. Simple Future'); format2(v.indSimpleFuture())
print('Conditional'); format2(v.conditional())
print('Sub. Present'); format2(v.subPresent())
print('Sub. Imperfect'); format2(v.subImperfect())
# Tests
import unittest
import filecmp
import sys
class TestSimple(unittest.TestCase):
verbs = ['rester', 'passer', 'finir', 'descendre',
'être', 'avoir', 'faire', 'voir', 'pouvoir', 'vouloir',
'savoir', 'aller', 'sortir', 'partir', 'naître', 'mourir',
'venir', 'conduire']
def testSimple(self):
out = 'testSimple-out.txt'
ref = 'testSimple-ref.txt'
with open(out, 'w') as f:
sys.stdout = f
for verb in self.verbs:
conjSimple(verb)
self.assertTrue(filecmp.cmp(out, ref))
# This is the main program
import argparse
ap = argparse.ArgumentParser("conjugate")
ap.add_argument('verb', type=str, nargs='*', help='verbs to conjugate')
ap.add_argument('-t', action='store_true', help='run the tests')
arg = ap.parse_args()
for verb in arg.verb:
conjSimple(verb)
if arg.t:
suite = unittest.TestLoader().loadTestsFromTestCase(TestSimple)
runner = unittest.TextTestRunner()
runner.run(suite)
|
from unittest import mock
import pytest
from movies.exceptions import MovieDoesNotExist, MultipleMoviesExist
from movies.services import (
generate_list_movies_with_people,
generate_movie_data_with_people,
)
class TestListMovieWithPeopleService:
@pytest.fixture(autouse=True)
def init_fixtures(self):
pass
@mock.patch(
"external_services.api_clients.ghibli_api.GhibliAPIClient.get_movies",
autospec=True,
)
@mock.patch(
"external_services.api_clients.ghibli_api.GhibliAPIClient.get_people",
autospec=True,
)
def test_generate_list_movies_with_people_success(
self,
mock_people_api_call,
mock_movies_api_call,
static_movies_data,
static_people_data,
):
mock_movies_api_call.return_value = static_movies_data
mock_people_api_call.return_value = static_people_data
service_response_data = generate_list_movies_with_people()
assert len(service_response_data) == 1
assert isinstance(service_response_data, list)
response_data_element_keys = set(service_response_data[0].keys())
ideal_response_keys = {
"title",
"release_date",
"vehicles",
"description",
"producer",
"locations",
"rt_score",
"species",
"id",
"people",
"url",
"director",
}
assert response_data_element_keys == ideal_response_keys
@mock.patch(
"external_services.api_clients.ghibli_api.GhibliAPIClient.get_movies",
autospec=True,
)
@mock.patch(
"external_services.api_clients.ghibli_api.GhibliAPIClient.get_people",
autospec=True,
)
def test_generate_movie_data_with_people_success(
self,
mock_people_api_call,
mock_movies_api_call,
static_movies_data,
static_people_data,
static_data_movie_uuid,
):
mock_movies_api_call.return_value = static_movies_data
mock_people_api_call.return_value = static_people_data
service_response = generate_movie_data_with_people(
static_data_movie_uuid
)
assert isinstance(service_response, dict)
ideal_response_keys = {
"id",
"title",
"release_date",
"url",
"producer",
"species",
"locations",
"vehicles",
"people",
"rt_score",
"director",
"description",
}
assert set(service_response.keys()) == ideal_response_keys
@mock.patch(
"external_services.api_clients.ghibli_api.GhibliAPIClient.get_movies",
autospec=True,
)
@mock.patch(
"external_services.api_clients.ghibli_api.GhibliAPIClient.get_people",
autospec=True,
)
def test_generate_movie_data_with_people_fails_for_duplicate_uuid(
self,
mock_people_api_call,
mock_movies_api_call,
static_movies_data_with_duplicates,
static_data_movie_uuid,
):
mock_movies_api_call.return_value = static_movies_data_with_duplicates
mock_people_api_call.return_value = []
with pytest.raises(MultipleMoviesExist) as exc_info:
generate_movie_data_with_people(static_data_movie_uuid)
assert (
str(exc_info.value)
== "External API returned more than one movies for UUID 2baf70d1-42bb-4437-b551-e5fed5a87abe!"
)
@mock.patch(
"external_services.api_clients.ghibli_api.GhibliAPIClient.get_movies",
autospec=True,
)
@mock.patch(
"external_services.api_clients.ghibli_api.GhibliAPIClient.get_people",
autospec=True,
)
def test_generate_movie_data_with_people_fails_for_movie_uuid_does_exist(
self,
mock_people_api_call,
mock_movies_api_call,
static_data_movie_uuid,
):
mock_movies_api_call.return_value = []
mock_people_api_call.return_value = []
with pytest.raises(MovieDoesNotExist) as exc_info:
generate_movie_data_with_people(static_data_movie_uuid)
assert (
str(exc_info.value)
== "Movie ID 2baf70d1-42bb-4437-b551-e5fed5a87abe not present in external API!"
)
|
name = "Tam H. Nguyen"
age = 32 # Not a lie, not very young
height = 175 #cm, in inches = 175/2.54 = 68.897637795275591
height_in_inches = height / 2.54
weight = 65 #kg, in pound: lb = 65 * 2.20462 = 143.3 lb
weight_in_pound = weight * 2.20462
eyes = "Black"
teeth = "Yellow"
hair = "Black"
print(f"Let's talk about {name}.")
print(f"He's {height} inches tall.")
print(f"He's {weight} pounds heavy.")
print(f"Actually that's not heavy.")
print(f"He's got {eyes} eyes and {hair} hair.")
print(f"His teeth are usually {teeth} depending on the coffee.")
# This line is tricky, try to get it exactly right
total = age + height + weight
print(f"If I add {age}, {height}, {weight} I get {total}.")
|
from flask import Flask
from flask_restful import Api
import logging
from resources.parking_spot import ParkingSpot
from resources.reservation import Reservation, AddReservation
app = Flask(__name__)
api = Api(app)
api.add_resource(Reservation, '/reservation/<int:confirmation_num>')
api.add_resource(AddReservation, '/reservation')
api.add_resource(ParkingSpot, '/parking_spots/<string:latitude>/<string:longitude>/<string:radius>')
if __name__ == '__main__':
logging.basicConfig(filename='myapp.log', level=logging.INFO)
logging.info('started parking reservation app')
app.run(port=5000, debug=True) |
'''Que tal un bucle for que cruze dos listas, seria genial no?, vamos a ver'''
creditos_inscritos={'Juan':18,'Maria':17,'Estiben':15,'Carolina':20,'Kelly':12, 'Emmanuel':20,'Luis':18,'Patricia':17}
programa={'Juan':'Ing Ambiental','Maria':'Fisica Pura','Estiben':'Administracion','Carolina':'Contaduria','Kelly':'Enfermeria',
'Emmanuel':'ing Sistemas','Luis':'Ing Sistemas','Patricia':'Periodismo'}
for i in creditos_inscritos:
print (i,'\n',
'inscribio:',creditos_inscritos[i],'creditos',
'\n En el programa de:',programa[i])
#fijate que el orden de impresion en totalmente aleatorio
print('\n \n')
'''-------------------Fijate en otro ejemplo de inventarios--------------------------'''
precios = {
"banano": 4,
"manzana": 2,
"naranja": 1.5,
"pera": 3
}
inventario = {
"banano": 6,
"manzana": 0,
"naranja": 32,
"pera": 15
}
cuenta=0
for i in precios:
print (i)
print (' valor total de inventario:',precios[i]*inventario[i])
cuenta+=precios[i]*inventario[i]
print ('En total podemos ganar:',cuenta,'si vendemos todo')
|
import sys
sys.path.append('../../')
from challenge import Challenge
from generate import generate_input
# Challenge(
# filename: str,
# code: str,
# challenge_input: str, <-- 默认不带input
# host_path=None, <-- 指定存放代码的路径,默认为当前路径
# timeout=None, <-- 单例超时时间,默认为5s
# cpu=None, <-- CPU个数,默认为1核
# memory=None, <-- 内存限制, 默认30M
# memory_swap=None)) <-- 交换空间, 默认100M
challenge_input = generate_input()
helloWorld = Challenge(
'helloworld.py',
'''while True:
try:
s=input().split() # error code
print(int(s[0])+int(s[1]))
except:
break''',
challenge_input
)
helloWorld.initBox()
result = helloWorld.box.run()
print('result:', result)
helloWorld.box.clear_file() |
#!/usr/bin/python
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.link import TCLink
from mininet.log import setLogLevel
class StarTopo(Topo):
'''
A simple star topology
'''
def build(self, n=3):
switch = self.addSwitch('s1')
for h in xrange(n):
host = self.addHost('h%s' % (h + 1))
self.addLink(host, switch)
def dumpConfigs(net):
'''
Run ifconfig on each host
'''
for hostName, host in net.items():
ifcfgOut = host.cmd('ifconfig')
print hostName + ':'
print ifcfgOut
def testStar():
'''
Test dumpConfigs() on a star topology
'''
topo = StarTopo(n=5)
net = Mininet(topo)
net.start()
print 'Here we go...'
dumpConfigs(net)
print 'Alright, done.'
net.pingAll()
net.stop()
if __name__ == '__main__':
setLogLevel('info')
testStar()
pass
|
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QWidget, QFileDialog, QMessageBox
from PyQt5 import uic
from qtpy import QtWidgets
from semantic_similarity.graph_creator import build_graph
from semantic_similarity.main import compute_similarity
class FileManager(QWidget):
def __init__(self, parent, wv_from_bin):
super(FileManager, self).__init__()
self.ui = uic.loadUi('files.ui', self)
self.parent = parent
self.actions_file_name = None
self.wv_from_bin = wv_from_bin
self.calculate_button = self.findChild(QtWidgets.QPushButton, 'calculate_button')
self.calculate_button.clicked.connect(self.calculate)
self.choose_file_button = self.findChild(QtWidgets.QPushButton, 'choose_file_button')
self.choose_file_button.clicked.connect(self.choose_file)
self.selected_file_text = self.findChild(QtWidgets.QTextBrowser, 'selected_file_text')
@pyqtSlot()
def calculate(self):
with open(self.selected_file_text.toPlainText()) as file:
with open('report.txt', 'w') as report:
for line in file.readlines():
words = line.split(" ")
word1 = words[0]
word2 = words[1][:-1]
g, max_depth, root, dist1, dist2, lch_concept, max_lch_path_length = build_graph(word1, word2)
if max_lch_path_length != 0:
alpha_coef = (dist1 - dist2) / max_lch_path_length
else:
alpha_coef = 0
sim = compute_similarity(self.wv_from_bin, word1, word2)
report.write(word1 + ' ' + word2 + ' ' + str(alpha_coef) + ' ' + str(10*sim))
report.write('\n')
self.show_success_popup()
@pyqtSlot()
def choose_file(self):
self.actions_file_name, _ = QFileDialog.getOpenFileName()
if self.actions_file_name:
self.selected_file_text.setText(self.actions_file_name)
@staticmethod
def show_success_popup():
msg = QMessageBox()
msg.setWindowTitle("Success")
msg.setText("The report has been successfully created!")
_ = msg.exec_()
|
# pr3_3_2
# MFCC 参数比较
from scipy.signal import *
import matplotlib.pylab as plt
from Universal import *
from MFCC import *
import numpy as np
import librosa
import math
def mel_dist(x1, x2, fs, num, wlen, inc):
"""
计算两信号x1,x2的MFCC参数和距离
:param x1: signal 1
:param x2: signal 2
:param fs: sample frequency
:param num: the number we select in MFCC
:param wlen: frame length
:param inc: frame shift
:return Dcep: distance
:return Ccep1, Ccep2: num MFCC
"""
M = MFCC()
ccc1 = M.mfcc(x1, Fs, num, wlen, inc) # MFCC
ccc2 = M.mfcc(x2, Fs, num, wlen, inc)
fn1 = np.shape(ccc1)[0] # frame number
Ccep1 = ccc1[:, 0 : num]
Ccep2 = ccc2[:, 0 : num]
Dcep = np.zeros(fn1) # distance
for i in range(fn1):
Cn1 = Ccep1[i, :]
Cn2 = Ccep2[i, :]
Dstu = 0
for k in range(num):
Dstu = Dstu + (Cn1[k] - Cn2[k]) ** 2
Dcep[i] = np.sqrt(Dstu)
return Dcep, Ccep1, Ccep2
if __name__ == '__main__':
x1, Fs = librosa.load('s1.wav', sr = 8000)
x2, _ = librosa.load('s2.wav', sr = 8000)
x3, _ = librosa.load('a1.wav', sr=8000)
wlen = 200 # frame length
inc = 80 # frame shift
num = 16
x1 = x1 / np.max(x1) # normalized
x2 = x2 / np.max(x2)
x3 = x3 / np.max(x3)
Dcep, Ccep1, Ccep2 = mel_dist(x1, x2, Fs, 16, wlen, inc)
plt.figure(1, dpi = 600)
plt.plot(Ccep1[2, :], Ccep2[2, :], 'k+')
plt.plot(Ccep1[6, :], Ccep2[6, :], 'rx')
plt.plot(Ccep1[11, :], Ccep2[11, :], 'b^')
plt.plot(Ccep1[15, :], Ccep2[15, :], 'gh')
plt.legend(['3rd frame', '7th frame', '12th frame', '16th frame'], loc='best')
plt.xlabel('Signal x1')
plt.ylabel('Signal x2')
plt.axis([-12, 12, -12, 12])
plt.plot([-12, 12], [-12, 12], 'k--')
plt.title('The Distance of x1 & x2')
plt.savefig('images/mel_dist_12.png')
plt.show()
Dcep, Ccep1, Ccep2 = mel_dist(x1, x3, Fs, 16, wlen, inc)
plt.figure(2, dpi = 600)
plt.plot(Ccep1[2, :], Ccep2[2, :], 'k+')
plt.plot(Ccep1[6, :], Ccep2[6, :], 'rx')
plt.plot(Ccep1[11, :], Ccep2[11, :], 'b^')
plt.plot(Ccep1[15, :], Ccep2[15, :], 'gh')
plt.legend(['3rd frame', '7th frame', '12th frame', '16th frame'], loc='best')
plt.xlabel('Signal x1')
plt.ylabel('Signal x3')
plt.axis([-12, 12, -12, 12])
plt.plot([-12, 12], [-12, 12], 'k--')
plt.title('The Distance of x1 & x3')
plt.savefig('images/mel_dist_13.png')
plt.show()
|
from xml.etree import ElementTree as ET
import wolframalpha
import urllib
# Consts
app_id = ''
# Variables
client = wolframalpha.Client(app_id)
def get_current_time(place):
timeRes = client.query('time in ' + place)
timePod = timeRes.pods[1]
currentTime = timePod.text
return currentTime
def get_location_icon_url(place):
flagRes = client.query('Flag of ' + place)
flagPod = flagRes.pods[1]
flagUrl = flagPod.main.node._children[1].get('src')
return flagUrl
def make_temp_location_file(flagUrl, temp_flag_file_name):
urllib.urlretrieve(flagUrl, temp_flag_file_name)
def get_location_item(place):
temp_flag_file_name = 'temp_flag.gif'
currentTime = get_current_time(place)
try:
locationIconUrl = get_location_icon_url(place)
make_temp_location_file(locationIconUrl, temp_flag_file_name)
except IndexError:
temp_flag_file_name = 'icon.png'
xml_items = ET.Element('items')
xml_item = ET.SubElement(xml_items, 'item')
xml_item.set('uid', 'convert')
xml_item.set('arg', place)
xml_item_title = ET.SubElement(xml_item, 'title')
xml_item_title.text = place + ' - ' + currentTime
xml_item_icon = ET.SubElement(xml_item, 'icon')
xml_item_icon.text = temp_flag_file_name
return ET.tostring(xml_items) |
#########################################################
# #
# Chirs Weir #
# Tondiggidy Simonutti #
# T0ng Liu #
# Codigail Doyle #
# #
# RPI Software Design and Documentation Fall 2014 #
# #
# Yggdrasil.py #
# run with ~$ python Yggdrasil.py #
# #
#########################################################
import socket
import time
import os
import signal
import sys
import stat
from conf import conf
#for catching ctrl-c and ensuring children exit
def signal_handler(signal, frame):
print "Caught ctrl-c, killing children..."
print "Goodbye",os.getpid()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
#main class
class Yggdrasil:
def __init__(self):
self.mname = conf.identity #identity string from conf.py
self.myggdrasilIp = '' #default IP (your own)
self.myggdrasilPort = conf.selfPort #server port from conf.py
self.mpluginInfo = [] #array for storing plugin info
#sets up plugins
def setUpPlugins(self):
#ip and port for bifrost (default)
feedHandlerInfo = ('bifrost',conf.bifrostIP, conf.bifrostPort, socket.socket(socket.AF_INET, socket.SOCK_STREAM), socket.socket(socket.AF_INET, socket.SOCK_STREAM))
self.mpluginInfo.append(feedHandlerInfo) #adds to plugin info (new plugin added to list)
#print "Config data for: ",self.mpluginInfo[0] #debug
#below is how you would add another plugin if odin worked the same way
#odinInfo = ('odin',conf.odinIP, conf.odinPort, socket.socket(socket.AF_INET, socket.SOCK_STREAM), socket.socket(socket.AF_INET, socket.SOCK_STREAM))
#self.mpluginInfo.append(odinInfo) #etc etc\
#print "Config data for: ",self.mpluginInfo[1]
#these two methods are provided for compatibility with the plugin class
def method(data): #yggdrasil provides the same methods as a plugin class, but doesn't need this one
return "This plugin's method has not been implemented yet"
def handShake(self): #yggdrasil initiates the handshake, so this method isn't needed either
return None
def start(self): #start server
self.setUpPlugins() #get plugin configs
BUFFER_SIZE = 102400 #buffer for xmldata
for plugin in self.mpluginInfo: #for every plugin we have...
rc=os.fork() #create a new thread
if rc==0: #if child
pluginFound=0 #look for plugin at the specified IP (in the config)
#for the handshake
prompt = "I am " + self.mname #ident to send to plugin
#attempt to connect to plugin
while pluginFound==0:
try:
plugin[3].connect((plugin[1],plugin[2]))
pluginFound=1
except:
#if we can't connect, try again
print "Looking for", plugin[0], "..."
time.sleep(5) #don't spam people
#now we're connected, send handshake
plugin[3].send(prompt)
#receive response
data = plugin[3].recv(BUFFER_SIZE)
print "response: " + data
#verify it's the right plugin
if(data == "I am ", plugin[0]):
print "sending connection information for", plugin[0]
#tell the plugin the available port
plugin[3].send(str(plugin[2]+1))
else:
# Connection is undefined, loop around and check next connection
print "Not", plugin[0]
recData = "I DON'T KNOW YOU!"
conn.send(recData)
#boot up yggdrasil server application
print "ip=",self.myggdrasilIp," port=",plugin[2]+1
plugin[4].bind((self.myggdrasilIp, plugin[2]+1))
plugin[4].listen(1)
#close down client connection to the plugin, indicating we are ready for it to connect to us now
plugin[3].close()
conn, addr = plugin[4].accept()
#accept client connection from plugin
print 'Connection from:',addr
while 1:
#send plugin the xml
if plugin[0]=="bifrost": #since plugins are handled differently, we take care of that here
xmlfile = open("odin") #open the xml dumped from odin
xmldat = xmlfile.read()
print "sending bifrost XML: ",xmldat,"ENDDATA"
conn.send(xmldat)
if plugin[0]=="odin": #support for a possible odin plugin
xmlfile = open("bifrost")
xmldat = xmlfile.read()
try:
conn.send(xmldat)
except:
print "odin has left"
#receive data back from bifrost
print "receving data"
data = conn.recv(BUFFER_SIZE)
f = open(plugin[0],'w')
f.write(data) #dump data to odin (or whatever plugins are listening)
f.close()
#if not data: break
#print data for debug purposes
#print "received data from ",plugin[0],":", data
print "got data: ",data
time.sleep(5) #avoid spam!
#quit on close
conn.close()
else:
print "Spawing child process with pid ",rc
while 1:
#print "I'm a babysitter. I kill the children when I leave. Probably get terrible reviews on craigslist..."
time.sleep(1)
#this is the parent process, it watches the children and catches ctrl-c to kill them
# main start
if __name__ == '__main__':
a = Yggdrasil()
a.start()
|
import csv
from pprint import pprint
# Get the source data
data = list(csv.DictReader(open("./statestyle/data.csv", "r")))
# Create the normalizer
crosswalk = {}
for row in data:
for key, value in row.items():
if value and key not in ['type', 'stateface']:
crosswalk[value] = row
crosswalk[value.lower()] = row
try:
crosswalk[int(value)] = row
except:
pass
print "CROSSWALK = ",
pprint(crosswalk)
|
#!/usr/bin/python
#########################################################################
# acceptor_test.py
#########################################################################
import server
import os
import unittest
import pickle
import socket
import time
import message
from message import MESSAGE_TYPE
class acceptor_test(unittest.TestCase):
def setUp(self):
"""
Bring up just enough infrastructure to set up a test
acceptor and send it messages
"""
# Instantiate a server instance
self.server_list = [
{
"host": "localhost",
"internal_port": 9001,
"client_port": 9002
},
{
"host": "localhost",
"internal_port": 9003,
"client_port": 9004
}
]
self.paxos_server = server.PAXOS_member(0, self.server_list)
# Insert a wait for the server to come online
time.sleep(1)
# create a dummy server of test
self.dummy_server_id = 1
# start a test remote inter-server socket on 9003
try:
self.dummy_server_socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self.dummy_server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.dummy_server_socket.bind(('localhost', 9003))
self.dummy_server_socket.listen(5)
except Exception, e:
os.system("lsof -n -i")
os.system("ps -a")
self.dummy_server_socket.close()
emsg = "[Info] Failed to bind socket for dummy server: " + str(e)
raise ValueError(emsg)
# initialize the acceptor which should initiate a connection to 9003
self.acceptor_process = self.paxos_server.launch_acceptor_process()
# create a test socket to inject messages to the acceptor
self.message_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.message_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.message_socket.connect(('localhost', 9001))
# accept the incoming connection that should have been made
# from 9001 to 9003
self.acceptor_connection, acceptor_address = (
self.dummy_server_socket.accept())
def send_prepare(self, prop, ins, rmsg_type, rmsg_prop, rmsg_ins):
"""
Helper function, send prepare message to server, check
returned message
- prop: proposal number to be sent
- ins: instance number to be sent
- rmsg_type: expected returned message type
- rmsg_prop: expected returned message proposal number
- rmsg_ins: expected returned message instane number
"""
msg = message.message(
MESSAGE_TYPE.PREPARE, prop, ins, None, self.dummy_server_id)
self.message_socket.send(pickle.dumps(msg))
print "[Info] Sent a proposal to acceptor..."
rmsgs = self.acceptor_connection.recv(1000)
rmsg = pickle.loads(rmsgs)
assert isinstance(rmsg, message.message)
print "[Info] Received a response from server..."
assert rmsg.msg_type == rmsg_type
if rmsg.proposal != rmsg_prop:
print (rmsg.proposal, rmsg_prop)
assert rmsg.proposal == rmsg_prop
assert rmsg.instance == rmsg_ins
return rmsg
def send_accept(self, prop, ins, cid, value,
rmsg_type, rmsg_prop, rmsg_ins, rmsg_cid):
"""
Helper function, send accept message to server, check
returned message
- prop: proposal number to be sent
- ins: instance number to be sent
- cid: client id to be sent
- value: value to be sent
- rmsg_type: expected returned message type
- rmsg_prop: expected returned message proposal number
- rmsg_ins: expected returned message instane number
- rmsg_cid: expected returned message client id
"""
msg = message.message(MESSAGE_TYPE.ACCEPT, prop, ins,
value, self.dummy_server_id, cid)
self.message_socket.send(pickle.dumps(msg))
rmsgs = self.acceptor_connection.recv(1000)
rmsg = pickle.loads(rmsgs)
assert isinstance(rmsg, message.message)
assert rmsg.msg_type == rmsg_type
assert rmsg.proposal == rmsg_prop
assert rmsg.instance == rmsg_ins
assert rmsg.client_id == rmsg_cid
return rmsg
def test_bring_up(self):
"""
Test acceptor for graceful bring up and exit
"""
print "\n\n[Info] ##########[BRING UP TEST]##########\n"
def test_single_proposal_prepare(self):
"""
Issues a single proposal and tests if response
is received
"""
print "\n\n[Info] ##########[SINGLE PROPOSAL TEST]##########\n"
# craft the message, proposal = 0, instance = 1
self.send_prepare(0, 1, MESSAGE_TYPE.PREPARE_ACK, 0, 1)
def test_multiple_proposal_prepare(self):
"""
Issues multiple proposals for the same instance and tests
if correct responses are received
"""
print "\n\n[Info] ##########[MULTIPLE PROPOSAL TEST]##########\n"
# send and receive a valid proposal, proposal = 1, instance = 0
self.send_prepare(1, 0, MESSAGE_TYPE.PREPARE_ACK, 1, 0)
print "[Info] First prepare request successful..."
# send and receive another valid proposal, proposal = 3, instance = 0
self.send_prepare(3, 0, MESSAGE_TYPE.PREPARE_ACK, 3, 0)
print "[Info] Second prepare request successful..."
# send an not receive a lower numbered proposal
proposal, instance = 2, 0
msg = message.message(MESSAGE_TYPE.PREPARE,
proposal, instance, None, self.dummy_server_id)
self.message_socket.send(pickle.dumps(msg))
try:
self.acceptor_connection.settimeout(1.0)
self.acceptor_connection.recv(1000)
assert False # time out should happen
except Exception, e:
print e
pass
print "[Info] Fourth prepare request test successful..."
# send a higher number proposal just to make sure
# the proposer didn't die, proposal = 11, instance = 0
self.send_prepare(11, 0, MESSAGE_TYPE.PREPARE_ACK, 11, 0)
print "[Info] Fifth prepare request successful..."
def test_multiple_instance_prepare(self):
"""
Test multiple instances
"""
print "\n\n[Info] ##########[MULTIPLE INSTANCE PREAPRE TEST]########\n"
# send an initial instance number, proposal = 0, instance = 0
self.send_prepare(0, 0, MESSAGE_TYPE.PREPARE_ACK, 0, 0)
# send a higher number proposal, proposal = 5, instance = 0
self.send_prepare(5, 0, MESSAGE_TYPE.PREPARE_ACK, 5, 0)
# send a different instance, proposal = 1, instance = 2
self.send_prepare(1, 2, MESSAGE_TYPE.PREPARE_ACK, 1, 2)
# send original instance with lower proposal number
proposal, instance = 3, 0
msg = message.message(MESSAGE_TYPE.PREPARE, proposal, instance,
None, self.dummy_server_id)
self.message_socket.send(pickle.dumps(msg))
self.acceptor_connection.settimeout(1.0)
try:
pickle.loads(self.acceptor_connection.recv(1000))
assert(False)
except Exception, e:
print e
pass
# send to new instance with higher proposal number
# proposal = 7, instance = 2
self.send_prepare(7, 2, MESSAGE_TYPE.PREPARE_ACK, 7, 2)
def test_multiple_prepare_accept(self):
"""
Attempt prepare and then accept of same proposal number
"""
# send a prepare request, proposal = 5, instance = 1
self.send_prepare(5, 1, MESSAGE_TYPE.PREPARE_ACK, 5, 1)
# send a prepare request, proposal = 9, instance = 3
self.send_prepare(9, 3, MESSAGE_TYPE.PREPARE_ACK, 9, 3)
# send an accept request, proposal = 5, instance = 1, client_id = 9
self.send_accept(5, 1, 9, 5, MESSAGE_TYPE.ACCEPT_ACK, 5, 1, 9)
# send a prepare request, proposal = 0, instance = 2
self.send_prepare(0, 2, MESSAGE_TYPE.PREPARE_ACK, 0, 2)
# send a accept request, proposal = 9, instance = 3, client_id = 9
self.send_accept(9, 3, 9, 5, MESSAGE_TYPE.ACCEPT_ACK, 9, 3, 9)
# send a accept request, proposal = 0, instance = 2, client_id = 9
self.send_accept(0, 2, 9, 5, MESSAGE_TYPE.ACCEPT_ACK, 0, 2, 9)
def test_reject_accept(self):
"""
Test case where prepare goes through but another proposal
fires a high proposal before accept comes through
"""
print "\n\n[Info] ##########[SINGLE PREPARE ACCEPT TEST]##########\n"
# send a prepare message, proposal = 6, instance = 0
self.send_prepare(6, 0, MESSAGE_TYPE.PREPARE_ACK, 6, 0)
# send another prepare message, proposal = 8; instance = 0
self.send_prepare(8, 0, MESSAGE_TYPE.PREPARE_ACK, 8, 0)
# send an accept message which should get rejected
proposal, instance, client_id = 6, 0, 9
msg = message.message(MESSAGE_TYPE.ACCEPT, proposal, instance,
None, self.dummy_server_id, client_id)
self.message_socket.send(pickle.dumps(msg))
self.acceptor_connection.settimeout(1.0)
try:
pickle.loads(self.acceptor_connection.recv(1000))
assert(False)
except Exception, e:
print e
pass
print "[Info] send actual valid accept req"
# send an actual accept message which should get accepted
# proposal = 8, instance = 0, client_id = 9
self.send_accept(8, 0, 9, 5, MESSAGE_TYPE.ACCEPT_ACK, 8, 0, 9)
def test_return_prepare_nack(self):
"""
Test that acceptor will return highest proposal accepted and its
value correctly.
"""
# send a prepare message, proposal = 1, instance = 0
self.send_prepare(1, 0, MESSAGE_TYPE.PREPARE_ACK, 1, 0)
# send an accept message, proposal = 1, instance = 0, value = 5
self.send_accept(1, 0, 0, 5, MESSAGE_TYPE.ACCEPT_ACK, 1, 0, 0)
# send a prepare message, proposal = 3, instance = 0,
rmsg = self.send_prepare(3, 0, MESSAGE_TYPE.PREPARE_NACK, 3, 0)
assert rmsg.r_proposal == 1
print "rmsg.value : {}".format(rmsg.value)
assert rmsg.value == 5
def tearDown(self):
"""
Tear down infrastructure and exit
"""
# shut down the acceptor by sending an exit message from 9003 to 9001
msg = message.message(message.MESSAGE_TYPE.EXIT,
None, None, None, None, None)
self.message_socket.send(pickle.dumps(msg))
print "[Info] Issued a shutdown message..."
# clean up the sockets
self.message_socket.close()
self.dummy_server_socket.close()
self.acceptor_connection.close()
# attempt to join the processes
try:
self.acceptor_process.join(1)
except Exception, e:
print e
assert(False)
# terminate the connection process
self.paxos_server.listening_process.terminate()
if __name__ == '__main__':
unittest.main()
|
##---------------------------------------------------------
##
## Goods and Services Tax
##
## Define a function with one parameter, a number representing
## a wholesale price, that returns the GST component to be added
## to that wholesale price (where GST is 10%).
##
## The tests below tell us how your function is expected to
## behave when called. The main program of this file runs
## the tests, so after you've developed your function you
## can just "run" this file to see if you've succeeded.
##
## Observation: Computer-based floating point arithmetic doesn't always
## produce mathematically-precise results (see Appendix B of the Python
## Tutorial). Therefore, we've rounded off the answers to two decimal
## places in the tests.
#---------------------------------------------------------
# These are the tests your function must pass.
#
"""
>>> round(gst(100), 2) # Test 1 - normal case
10.0
>>> round(gst(123.56), 2) # Test 2 - normal case
12.36
>>> round(gst(100 + 50), 2) # Test 3 - wholesale price as an expression
15.0
>>> round(gst(0), 2) # Test 4 - boundary case (price is zero)
0.0
>>> round(gst(123456789), 2) # Test 5 - price is large number
12345678.9
"""
#---------------------------------------------------------
# Your solution
#
#### DEFINE YOUR gst FUNCTION HERE
#---------------------------------------------------------
# This main program executes the tests above when this
# file is run.
#
from doctest import testmod, REPORT_ONLY_FIRST_FAILURE
print testmod(verbose = False,
optionflags = REPORT_ONLY_FIRST_FAILURE)
|
# Q1, A
def reverse_word(word):
"""
請寫一個程式把裡面的字串反過來。
>>> reverse_word("junyiacademy")
'ymedacaiynuj'
"""
return "".join(reversed(word))
# Q1, B
def reverse_sentence(sentence):
"""
請寫一個程式把裡面的字串,每個單字本身做反轉,但是單字的順序不變。
>>> reverse_sentence("flipped class room is important")
'deppilf ssalc moor si tnatropmi'
"""
reversed_words = [reverse_word(w) for w in sentence.split(" ")]
reversed_sentence = " ".join(reversed_words)
return reversed_sentence
# Q2
def count_numbers(end_number):
"""
請寫一個程式,Input 是一個數字,Output 是從 1 到這個數字,
扣除掉所有 3 的倍數以及 5 的倍數,但是需要保留同時是 3 和 5 的倍數的總數字數。
>>> count_numbers(15)
9
"""
def is_matched(n):
if n % 15 == 0:
return True
elif n % 3 == 0:
return False
elif n % 5 == 0:
return False
return True
numbers_matched = filter(is_matched, range(1, end_number + 1))
return sum(1 for _ in numbers_matched)
# Q3
"""
3.房間裡有三個袋子,一個只裝鉛筆,一個只裝原子筆,第三個有鉛筆也有原子筆,袋子是不透明的,單從
袋子的外表上看不出任何差異,你不知道哪一個袋子裝了什麼。除了袋子上各貼了一個標示("鉛筆"、"原子
筆"、"混和"),而且標示都是錯的(ex. 標有鉛筆的袋子一定不是只裝鉛筆)。
你只能選一個袋子,然後拿出裡面一支筆看是鉛筆還是原子筆,然後你要推論出這三個袋子分別的情況。請
列出您的作法,以及解釋為什麼這樣可以找到答案。
A:
1. 先從貼"混和"的袋子拿出一枝筆
2. 如果那枝筆是鉛筆,代表貼"混合"的袋子其實是鉛筆(因為這袋不是鉛筆就是原子筆)
3. 因為混合袋是鉛筆,代表貼"原子筆"的袋子一定是混合(因為只能是混合或鉛筆,但已經不可能是鉛筆)
4. 最後,貼"鉛筆"的袋子就一定是原子筆
如果最開始從"混和"的袋子拿出來的是原子筆,那也是用同樣方法推論。
"""
# Q4
"""
4.有三個人一起到迪士尼遊玩,中午肚子餓了,去餐廳點了一份現在最夯的冰雪奇緣雙人組,要價 900 元,
付錢後,服務生發現今天套餐大特價,只要 750 元,因此服務生應該退還 150 元給這三個人,但是這位服務
生一時鬼迷心翹,決定按扛 60 元,只退了 90 元給這三個遊客。
那麼: 三人各出 300 元 - 服務生還給他們一人 30 元 = 三人各出 270 元 270元 × 3人 + 服務生私吞的 60 元
= 810 + 60 = 870 !? 怎麼不是 900 元呢?還有 30元去哪了呢? 請用敘述的方式,儘量清楚解釋問題出在哪
裡。
A:
這個等式列錯了,應該是這樣
三個人最後共出 810 元 == 服務生私吞 60 元 + 迪士尼收了 750 元
810 = 60 + 750
所以 810 + 60 邏輯上沒道理
不是三個人和服務生付錢給迪士尼
而是三個人付錢給服務生(私吞)和迪士尼
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import numpy as np
A = [[0, 0, 1, 0, 1],
[1, 1, 0, 1, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 1, 0, 1, 0]]
B = np.linalg.matrix_power(A,6)
C = np.linalg.matrix_power(A,3)
print B
i = 3
print B[i-1]
for i in range (1,6):
print "Number of combinations of symbol ", i, " = ", np.sum(B[i-1]) * np.sum(C[:, i-1])
|
import json
import os
import pickle
import pandas as pd
from util.classifier import load_candidates
from util.get_keys import get_keys
from util.get_one import get_one
from util.preprocessor import labels_to_lowercase
class Level2Module:
def __init__(self, l1_module):
"""
Returns a new, untrained Level2Module.
:param l1_module: a trained Level1MLModule or Level1SymbolicModule that
this Level2Module refers to when classifying new data
"""
self.l1_module = l1_module
self.dictionary = None
def retrain(self, raw_df):
"""
Retrains this Level2Module on the given data. Raises a ValueError if the
given DataFrame is empty.
- Converts all labels in the given DataFrame to lowercase
- Populates this Level2Module's dictionary's keys with all level_1
labels in the given DataFrame, excluding "*not found"
- Maps each level_1 label in this Level2Module's dictionary to a List of
level_2 labels that have appeared in the given DataFrame along with
the level_1 label
:param raw_df: a DataFrame containing the raw training data extracted
from the database
- required columns: {"level_1", "level_2"}
:return: None
"""
if raw_df.empty:
raise ValueError("Cannot retrain Level2Module on empty set.")
print("Level2Module: Started retraining")
df = labels_to_lowercase(raw_df)
self.dictionary = {}
for index, row in df.iterrows():
raw_l1_label = row["level_1"]
l1_labels = raw_l1_label.split(" or ")
l2_label = row["level_2"]
if raw_l1_label not in self.dictionary:
self.dictionary[raw_l1_label] = set()
self.dictionary[raw_l1_label].add(l2_label)
for l1_label in l1_labels:
if l1_label not in self.dictionary:
self.dictionary[l1_label] = set()
self.dictionary[l1_label].add(l2_label)
print("Level2Module: Finished retraining")
def classify(self, raw_df, observations=False, return_all=False):
"""
Classifies the given data. Raises a ValueError if this Level2Module has
not been trained.
:param raw_df: a DataFrame containing the raw test data extracted from
the database
- required columns: {"test_key", "result_key", "obs_seq_nbr" (if
observations is True), "candidates"}
:param observations: True if the data is given at the observation level,
False if the data is given at the test level
:param return_all: True to return all candidate organisms tagged by
MetaMap, False to return only the most likely candidate organism
:return: a DataFrame containing the classification results
- columns: {"test_key", "result_key", "obs_seq_nbr" (if observations is
True), "level_2_pred"}
"""
if not self._is_trained():
raise ValueError("Level2Module is not trained.")
keys = get_keys(observations)
l1_results = self.l1_module.classify(raw_df, observations)
if "level_1_symbolic_pred" in l1_results:
l1_results.rename(
columns={"level_1_symbolic_pred": "level_1_pred"}, inplace=True)
elif "level_1_ml_pred" in l1_results:
l1_results.rename(
columns={"level_1_ml_pred": "level_1_pred"}, inplace=True)
df = pd.merge(raw_df, l1_results, how="inner", on=keys)
df["level_2_pred"] = df.apply(
lambda row: self._classify_row(row, return_all),
axis=1
)
result = df.loc[:, keys + ["level_2_pred"]]
return result
def _classify_row(self, row, return_all):
"""
Classifies the given row.
Precondition: this Level2Module has been trained.
:param row: the data row to classify
- required columns: {"level_1_pred", "candidates"}
:param return_all: True to return all candidate organisms tagged by
MetaMap, False to return only the most likely candidate organism
:return: the classification (the most likely organism if return_all is
False, or a string representation of the List of all candidate organisms
tagged by MetaMap)
"""
# check level 1
level_1 = row["level_1_pred"]
if level_1 == "*not found":
return "*not found"
elif level_1 not in self.dictionary:
return "*no further diff"
# preprocess candidates
candidates = load_candidates(row["candidates"])
candidates = [candidate.lower() for candidate in candidates]
# ----------------------------------------------------------------------
# predict
if return_all:
return json.dumps(candidates)
if not candidates:
return "*not further diff"
for candidate in candidates:
words = candidate.split()
for i in range(len(words), 1 - 1, -1):
level_2 = " ".join(words[:i])
if level_2 in self.dictionary[level_1]:
return level_2
return get_one(candidates)
def _is_trained(self):
"""
Returns True iff the retrain method has been called on this Level2Module
instance at least once.
:return: whether this Level2Module has been trained
"""
return self.dictionary is not None
def load_from_file(self, filepath):
"""
Loads the dictionary stored in the pickle file at the given path into
this Level2Module, overwriting this Level2Module's current dictionary.
:param filepath: the absolute path to the pickle file to load the
dictionary from
:return: this Level2Module
"""
with open(filepath, "rb") as file:
self.dictionary = pickle.load(file)
return self
def save_to_file(self, filepath):
"""
Saves this Level2Module's dictionary to the pickle file at the given
path, overwriting the file if it already exists.
:param filepath: the absolute path to the pickle file to save the
dictionary to
:return: None
"""
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "wb") as file:
pickle.dump(self.dictionary, file)
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import os
import shutil
import sys
import tempfile
import unittest
from io import BytesIO
import PIL
import mxnet as mx
import numpy as np
import pytest
from helper.pixel2pixel_service import UnetGenerator
from mms.model_service.mxnet_model_service import MXNetBaseService, GluonImperativeBaseService
curr_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(curr_path + '/../..')
def empty_file(path):
open(path, 'a').close()
def module_dir(tmpdir):
path = '{}/test'.format(tmpdir)
os.mkdir(path)
empty_file('{}/test-symbol.json'.format(path))
empty_file('{}/test-0000.params'.format(path))
empty_file('{}/synset.txt'.format(path))
with open('{}/signature.json'.format(path), 'w') as sig:
signature = {
"input_type": "image/jpeg",
"inputs": [
{
'data_name': 'data1',
'data_shape': [1, 3, 64, 64]
},
{
'data_name': 'data2',
'data_shape': [1, 3, 32, 32]
}
],
"output_type": "application/json",
"outputs": [
{
'data_name': 'softmax',
'data_shape': [1, 10]
}
]
}
json.dump(signature, sig)
return path
def create_symbolic_manifest(path):
with open('{}/MANIFEST.json'.format(path), 'w') as man:
manifest = {
"Engine": {
"MXNet": 0.12
},
"Model-Archive-Description": "test",
"License": "Apache 2.0",
"Model-Archive-Version": 0.1,
"Model-Server": 0.1,
"Model": {
"Description": "test",
"Service": "test",
"Symbol": "",
"Parameters": "test-0000.params",
"Signature": "signature.json",
"Model-Name": "test",
"Model-Format": "MXNet-Symbolic"
}
}
json.dump(manifest, man)
def create_imperative_manifest(path):
with open('{}/MANIFEST.json'.format(path), 'w') as man:
manifest = {
"Engine": {
"MXNet": 0.12
},
"Model-Archive-Description": "test",
"License": "Apache 2.0",
"Model-Archive-Version": 0.1,
"Model-Server": 0.1,
"Model": {
"Description": "test",
"Service": "test",
"Symbol": "",
"Parameters": "",
"Signature": "signature.json",
"Model-Name": "test",
"Model-Format": "Gluon-Imperative"
}
}
json.dump(manifest, man)
class TestService(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def _train_and_export(self, path):
model_path = curr_path + '/' + path
if not os.path.isdir(model_path):
os.mkdir(model_path)
num_class = 10
data1 = mx.sym.Variable('data1')
data2 = mx.sym.Variable('data2')
conv1 = mx.sym.Convolution(data=data1, kernel=(2, 2), num_filter=2, stride=(2, 2))
conv2 = mx.sym.Convolution(data=data2, kernel=(3, 3), num_filter=3, stride=(1, 1))
pooling1 = mx.sym.Pooling(data=conv1, kernel=(2, 2), stride=(1, 1), pool_type="avg")
pooling2 = mx.sym.Pooling(data=conv2, kernel=(2, 2), stride=(1, 1), pool_type="max")
flatten1 = mx.sym.flatten(data=pooling1)
flatten2 = mx.sym.flatten(data=pooling2)
summary = mx.sym.sum(data=flatten1, axis=1) + mx.sym.sum(data=flatten2, axis=1)
fc = mx.sym.FullyConnected(data=summary, num_hidden=num_class)
sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
dshape1 = (10, 3, 64, 64)
dshape2 = (10, 3, 32, 32)
lshape = (10,)
mod = mx.mod.Module(symbol=sym, data_names=('data1', 'data2'),
label_names=('softmax_label',))
mod.bind(data_shapes=[('data1', dshape1), ('data2', dshape2)],
label_shapes=[('softmax_label', lshape)])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate': 0.01})
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(5, 15, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
mod.backward()
mod.update()
with open('%s/synset.txt' % model_path, 'w') as synset:
for i in range(10):
synset.write('test label %d\n' % i)
def _write_image(self, img_arr):
img_arr = mx.nd.transpose(img_arr, (1, 2, 0)).astype(np.uint8).asnumpy()
mode = 'RGB'
image = PIL.Image.fromarray(img_arr, mode)
output = BytesIO()
image.save(output, format='jpeg')
return output.getvalue()
def test_vision_init(self):
path = 'test'
self._train_and_export(path)
model_path = curr_path + '/' + path
os.system('rm -rf %s' % model_path)
def test_vision_inference(self):
path = 'test'
self._train_and_export(path)
os.system('rm -rf %s/test' % curr_path)
def test_gluon_inference(self):
path = 'gluon'
model_name = 'gluon1'
model_path = curr_path + '/' + path
os.mkdir(model_path)
ctx = mx.cpu()
net_g = UnetGenerator(in_channels=3, num_downs=8)
data = mx.nd.random_uniform(0, 255, shape=(1, 3, 256, 256))
net_g.initialize(mx.init.Normal(0.02), ctx=ctx)
net_g(data)
net_g.save_params('%s/%s.params' % (model_path, model_name))
with open('%s/signature.json' % model_path, 'w') as sig:
signature = {
"input_type": "image/jpeg",
"inputs": [
{
'data_name': 'data',
'data_shape': [1, 3, 256, 256]
},
],
"output_type": "image/jpeg",
"outputs": [
{
'data_name': 'output',
'data_shape': [1, 3, 256, 256]
}
]
}
json.dump(signature, sig)
cmd = 'python %s/../../export_model.py --model-name %s --model-path %s' \
% (curr_path, model_name, model_path)
os.system(cmd)
os.system('rm -rf %s %s/%s.model %s/%s' % (model_path, os.getcwd(),
model_name, os.getcwd(), model_name))
def test_mxnet_model_service(self):
mod_dir = module_dir(self.test_dir)
if mod_dir.startswith('~'):
model_path = os.path.expanduser(mod_dir)
else:
model_path = mod_dir
create_symbolic_manifest(model_path)
manifest = json.load(open(os.path.join(model_path, 'MANIFEST.json')))
with pytest.raises(Exception):
MXNetBaseService('test', model_path, manifest)
os.system('rm -rf %s' % model_path)
def test_gluon_model_service(self):
mod_dir = module_dir(self.test_dir)
if mod_dir.startswith('~'):
model_path = os.path.expanduser(mod_dir)
else:
model_path = mod_dir
create_imperative_manifest(model_path)
manifest = json.load(open(os.path.join(model_path, 'MANIFEST.json')))
GluonImperativeBaseService('test', model_path, manifest,
mx.gluon.model_zoo.vision.alexnet(pretrained=True))
os.system('rm -rf %s' % model_path)
def runTest(self):
self.test_vision_init()
self.test_vision_inference()
self.test_gluon_inference()
self.test_mxnet_model_service()
self.test_gluon_model_service()
self.test_incorrect_service()
|
import turtle
def make_window(colr, ttle):
""" Make a window with color and title """
w = turtle.Screen()
w.bgcolor(colr)
w.title(ttle)
return w
def make_turtle(colr, sz):
""" Make a turtle with color and size """
t = turtle.Turtle()
t.color(colr)
t.pensize(sz)
return t
def cesaro_torn_line(t, order, size):
""" Make a turtle draw a cesaro torn line """
if order == 0:
t.forward(size)
else:
for angle in [85, -170, 85, 0]:
cesaro_torn_line(t, order-1, size/3)
t.right(angle)
def cesaro_square(t, order, size):
""" Make a turtle draw a cesaro square """
for _ in range(4):
cesaro_torn_line(t, order, size)
t.right(90)
order = int(input('Your order: '))
wn = make_window('lightgreen', 'Cesaro Recursive')
tess = make_turtle('blue', 1)
tess.speed(10)
cesaro_torn_line(tess, order, 300)
cesaro_square(tess, order, 300)
wn.mainloop()
|
import numpy as np
class Doctor:
def __init__(self, identifier):
self.id = identifier
self.status = "available"
self.schedule ={}
for slot in np.arange(8.0, 17.0, .5):
self.schedule[slot] = 'open'
def __str__(self):
return "Hello I am " + self.id
def assign_schedule(self, patient, timeslot):
if self.status == 'notavailable':
print("This doctor is not available.")
return False
if self.schedule[timeslot] != 'open':
print('This appointment time is not available.')
return False
else:
self.schedule[timeslot] = patient
return True
def change_doctor_availability(self, status):
self.status = status
def show_open_appointments(self):
for x, y in self.schedule.items():
if y == 'open':
print("Appointment at time: " + str(x) + " is " + y)
else:
print("Appointment at time: " + str(x) + " is not open")
def show_doctor_schedule(self):
for x, y in self.schedule.items():
if y == 'open':
print("Appointment at time: " + str(x) + " is " + y)
else:
print("Appointment at time: " + str(x) + " with patient: ")
print(y)
|
import random
inside = 0
outside = 0
pointlist = []
def setup():
#size(500, 500)
fullScreen()
background(51)
colorMode(HSB, 100)
translate(width/2, height/2)
fill(color(40, 50, 50))
circle(0, 0, height)
def draw():
translate(width/2, height/2)
stroke(color(0, 100, 100))
global inside
global outside
for i in range(1000):
rand_x = random.uniform(-height/2, height/2)
rand_y = random.uniform(-height/2, height/2)
point(rand_x, rand_y)
if rand_x*rand_x + rand_y*rand_y >= (height/2)*(height/2):
outside += 1
else:
inside += 1
estimate = 4 * (float(inside)/(float(inside) + float(outside)))
# print("Pi estimate is: " + str(estimate))
push()
rectMode(CENTER)
fill(color(0, 0, 100))
rect(0, -10, 320, 50)
textAlign(CENTER)
fill(color(0, 0, 0))
textSize(32)
text(str("{:8}".format(estimate)), 0, 0)
pop()
|
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, String, DateTime
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('sqlite:///messagedb.sqlite3')
Base = declarative_base()
Session = sessionmaker(bind=engine)
class Message(Base):
__tablename__ = 'message'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, nullable=False)
message = Column(String(length=141))
date_posted = Column(DateTime)
def to_dict(self):
return {'id': self.id, 'user_id': self.user_id, 'message': self.message,
'date_posted': self.date_posted.isoformat()}
def post_message(user_id, message):
s = Session()
m = Message()
m.user_id = user_id
m.message = message
m.date_posted = datetime.now()
s.add(m)
s.commit()
message_dict = m.to_dict()
s.close()
return message_dict
def search_messages(search):
s = Session()
messages_query = s.query(Message).filter(Message.message.ilike('%{}%'.format(search))).order_by(
Message.date_posted.desc())
# in real life, don't allow wildcard search injection like this
messages = [message.to_dict() for message in messages_query]
s.close()
return messages
def list_messages():
s = Session()
messages_query = s.query(Message).order_by(Message.date_posted.desc())
messages = [message.to_dict() for message in messages_query]
s.close()
return messages
def list_user_messages(user_id):
s = Session()
messages_query = s.query(Message).filter(Message.user_id == user_id).order_by(Message.date_posted.desc())
messages = [message.to_dict() for message in messages_query]
s.close()
return messages
|
"""
This file solves the following problem: given a tree and a subtree find the sequence of operations m_{\alpha \beta},
such that m_{\alpha \beta}(subtree) = tree.
"""
from basic import *
from tree_polynomial import TreePolynomial
class MOperation:
# This is the class of operations m_{\alpha, \beta}
def __init__(self, type_, generator, position, shuffle=Permutation()):
self.type = type_ # type is a string, can be 'above' meaning the generator grafted from above or 'below'
self.generator = generator # object of type TypeOfVertex
self.position = position
self.shuffle = shuffle
def __str__(self):
if self.type == 'above':
return 'operation: graft ' + self.generator.label + ' ' + self.type + ' on position ' + str(self.position) + '; shuffle: ' + str(self.shuffle)
else:
return 'operation: graft the tree on ' + self.generator.label + ' in slot number ' + str(self.position) + '; shuffle: ' + str(self.shuffle)
def apply_to_tree(self, tree):
new_corolla = ColoredTree(self.generator.create_vertex())
if self.type == 'above':
return graft(tree, new_corolla, self.position, self.shuffle)
if self.type == 'below':
return graft(new_corolla, tree, self.position, self.shuffle)
def can_be_applied_to(self, tree):
if self.type == 'above':
# find target leaf:
target_leaf = None
for leaf in tree.leaves:
if leaf.label == self.position:
target_leaf = leaf
# check colour compatibility:
if self.generator.output_color != target_leaf.color:
return False
# check shuffle property:
candidate = self.apply_to_tree(tree)
if not candidate.is_shuffle_tree:
return False
if self.type == 'below':
# check colour compatibility:
bottom_color = self.generator.input_colors[self.position - 1]
top_color = tree.root.output_color
if bottom_color != top_color:
return False
# check shuffle property:
candidate = self.apply_to_tree(tree)
if not candidate.is_shuffle_tree:
return False
return True
class SeqMOperation:
# Wrapping for a sequence of m_operations
def __init__(self):
self.operations = []
def append_operation(self, m_operation):
self.operations.append(m_operation)
def extend(self, other):
self.operations.extend(other.operations)
def __str__(self):
res = 'Seq of operations:'
for operation in self.operations:
res += '\n' + str(operation)
return res
def apply_to_tree(self, tree):
cur_tree = tree
for operation in self.operations:
cur_tree = operation.apply_to_tree(cur_tree)
return cur_tree
def can_be_applied_to(self, tree):
cur_tree = tree
for operation in self.operations:
if not operation.can_be_applied_to(cur_tree):
return False
cur_tree = operation.apply_to_tree(cur_tree)
return True
def apply_to_poly(self, poly):
res = TreePolynomial()
for i, tree in enumerate(poly.trees):
cf = poly.coeffs[i]
res = res + TreePolynomial([self.apply_to_tree(tree)], [cf])
return res
def compute_permutation(tree_start, tree_goal):
start = [leaf.label for leaf in tree_start.leaves]
goal = [leaf.label for leaf in tree_goal.leaves]
p_start = Permutation(*start)
p_goal = Permutation(*goal)
res = p_goal * p_start.inverse()
return res
def process_one_vertex_above(tree, subtree, border_vertex, position_of_target_vertex, operations_log):
target_vertex = border_vertex.children[position_of_target_vertex]
index_of_target_edge_in_tree = border_vertex.min_descendant_list[position_of_target_vertex]
index_of_target_edge_in_subtree = subtree.relabelling_dict[index_of_target_edge_in_tree]
# getting new subtree:
new_subtree_set = subtree.subtree_set + [target_vertex]
new_subtree = tree.find_subtree_by_set(new_subtree_set)
# computing permutation:
tree_start = graft(subtree.normal_subtree, ColoredTree(target_vertex.type.create_vertex()), index_of_target_edge_in_subtree)
shuffle = compute_permutation(tree_start, new_subtree.normal_subtree)
# setting operation:
operation = MOperation('above', target_vertex.type, index_of_target_edge_in_subtree, shuffle)
operations_log.append_operation(operation)
return new_subtree
def process_border(tree, subtree, operations_log):
while subtree.border:
border_vertex = subtree.border[0]
for child_position in range(len(border_vertex.children)):
child = border_vertex.children[child_position]
if child not in subtree.subtree_set and not isinstance(child, Leaf):
subtree = process_one_vertex_above(tree, subtree, border_vertex, child_position, operations_log)
return subtree
def process_one_vertex_below(tree, subtree, operations_log):
subtree_root = subtree.subtree_set[0]
# getting new subtree:
new_subtree_set = [subtree_root.parent] + subtree.subtree_set
new_subtree = tree.find_subtree_by_set(new_subtree_set)
# computing the permutation:
tree_start = graft(ColoredTree(subtree_root.parent.type.create_vertex()), subtree.normal_subtree, subtree_root.index_in_parent)
shuffle = compute_permutation(tree_start, new_subtree.normal_subtree)
operation = MOperation('below', subtree_root.parent.type, subtree_root.index_in_parent, shuffle)
operations_log.append_operation(operation)
new_subtree = process_border(tree, new_subtree, operations_log)
return new_subtree
def get_seq_of_m(tree, subtree):
operation_log = SeqMOperation()
new_subtree = process_border(tree, subtree, operation_log)
subtree_root = new_subtree.subtree_set[0]
while subtree_root.parent:
new_subtree = process_one_vertex_below(tree, new_subtree, operation_log)
subtree_root = new_subtree.subtree_set[0]
return operation_log
|
class Solution:
def createSortedArray(self, instructions: List[int]) -> int:
m = max(instructions)
bit = [0] * (m + 1)
def u(x):
while x <= m:
bit[x] += 1
x += x & -x
def g(x):
s = 0
while x > 0:
s += bit[x]
x -= x & -x
return s
ans = 0
for i, v in enumerate(instructions):
ans += min(g(v - 1), i - g(v))
u(v)
return ans % (10 ** 9 + 7) |
#!/usr/bin/env python
"""This script queries The Hive for SentinelOne generated cases older
than seven days, then checks if the resolved status is True in the
SentinelOne console. Finally it closes the associated case in TheHive
"""
import time
import re
import sys
import yaml
import datetime
import requests
from thehive4py.api import TheHiveApi
from thehive4py.query import And, Eq
# environment variables from config
with open("config.yml", "r") as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
S1API = cfg["sentinelone"]["apikey"]
S1WEB = cfg["sentinelone"]["serverurl"]
API = TheHiveApi((cfg["hive"]["serverurl"]), (cfg["hive"]["apikey"]))
def check_status(query):
"""Checks status of Hive Cases
Queries for TheHive for SentinelOne generated cases older than seven days.
Queries SentinelOne console to check threat_status resolution.
"""
if query.status_code == 200:
data = {}
i, inc, cnt = 0, 0, 0
while i < len(query.json()):
check_date = datetime.date.today() - datetime.timedelta(days=7)
if (query.json()[i]["createdAt"] / 1000) < time.mktime(check_date.timetuple()):
tasks = API.get_case_tasks(query.json()[i]["id"])
while inc < len(tasks.json()):
if (tasks.json()[inc]["status"] == ("Waiting")) or (tasks.json()[inc]["status"] == ("InProgress")):
cnt += 1
inc += 1
match = re.search(r"\*\*id\*\*\s+(\S+)", query.json()[i]["description"])
threat_status = requests.get(
str(S1WEB) + "/web/api/v2.0/threats/" + str(match.group(1)) + "/forensics?apiToken=" + str(S1API)
)
data[(i)] = {
"sirpId": query.json()[i]["id"],
"owner": query.json()[i]["owner"],
"createdAt": (
time.strftime("%m/%d/%Y %H:%M:%S", time.gmtime(query.json()[i]["createdAt"] / 1000.0))
),
"totalTasks": len(tasks.json()),
"pendingTasks": cnt,
"sentinelId": match.group(1),
"SentinelResolved": threat_status.json()["data"]["result"]["resolved"],
}
i += 1
else:
print("fubard")
update_sirp(data)
def update_sirp(data):
"""Auto Closes The Hive cases that meet criteria
Posts case closure
"""
i = 0
while i < len(data):
if data[i]["SentinelResolved"] is True:
API.case.update(
data[i]["sirpId"],
status="Resolved",
resolutionStatus="Other",
summary="Resolved at Sentinel One Console, autoclosed",
tags=["SentinelOne API"],
)
else:
pass
i += 1
RESPONSE = API.find_cases(query=And(Eq("status", "Open"), Eq("owner", "sentinelone")), range="all", sort=[])
check_status(RESPONSE)
sys.exit()
|
import html
import os
import sys
from mastodon import Mastodon
import path
import ruamel.yaml
import twitter
def read_config():
cfg_path = path.Path(os.path.expanduser("~/.config/bm_bot.yml"))
return ruamel.yaml.load(cfg_path.text(), ruamel.yaml.RoundTripLoader)
def write_config(config):
cfg_path = path.Path(os.path.expanduser("~/.config/bm_bot.yml"))
cfg_path.write_text(ruamel.yaml.dump(config, Dumper=ruamel.yaml.RoundTripDumper))
class TwitterClient:
def __init__(self, config):
auth_dict = config["twitter"]["auth"]
keys = ["token", "token_secret",
"api_key", "api_secret"]
auth_values = (auth_dict[key] for key in keys)
auth = twitter.OAuth(*auth_values)
self.api = twitter.Twitter(auth=auth)
def get_tweets_since(self, since_id):
# Note: two upper-case i
for json_data in self.api.statuses.user_timeline(
screen_name="BiIIMurray", since_id=since_id):
yield json_data
class MastodonClient:
def __init__(self, config):
mastodon_conf = config["mastodon"]
instance_url = mastodon_conf["instance_url"]
auth_conf = mastodon_conf["auth"]
client_id = auth_conf["client_id"]
client_secret = auth_conf["client_secret"]
token = auth_conf["token"]
self.mastodon = Mastodon(
client_id=client_id,
client_secret=client_secret,
access_token=token,
api_base_url=instance_url
)
def toot(self, text):
ret = self.mastodon.toot(text)
if "error" in ret:
sys.exit(ret)
class Bot:
def __init__(self):
self.config = read_config()
self.twitter_client = TwitterClient(self.config)
self.mastodon_client = MastodonClient(self.config)
def execute(self):
def has_url(tweet):
return bool(tweet["entities"].get("urls"))
def is_reply(tweet):
return tweet.get("in_reply_to_screen_name") or \
tweet.get("in_reply_to_status_id")
last_tweet_id = self.config["last_tweet_id"]
tweets = list(self.twitter_client.get_tweets_since(last_tweet_id))
if not tweets:
print("No new tweet found")
return
print("Processing", len(tweets), "new tweets")
interesting_tweets = [x for x in tweets if not (has_url(x) or is_reply(x))]
n = len(interesting_tweets)
for i, tweet in enumerate(interesting_tweets):
print("Tooting", "%d/%d" % (i+1, n))
unescaped_text = html.unescape(tweet["text"])
self.mastodon_client.toot(unescaped_text)
self.config["last_tweet_id"] = tweets[0]['id']
write_config(self.config)
def main():
bot = Bot()
bot.execute()
if __name__ == "__main__":
main()
|
# Generated by Django 3.1.4 on 2021-01-05 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0012_comments_likescount'),
]
operations = [
migrations.AlterField(
model_name='comments',
name='id_social',
field=models.BigIntegerField(default=0),
),
]
|
tables = [[x*y for x in range(1,13)] for y in range(1,13)]
for table in tables:
print(''.join([str(x).rjust(4) for x in table]).lstrip())
|
EMAIL_HOST = 'smtp.naver.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'allieuslee@naver.com'
EMAIL_HOST_PASSWORD = '1029shake'
EMAIL_USE_TLS = True
|
import unittest
from credential import Credential
class TestCredential(unittest.TestCase):
def setUp(self):
self.new_credential = Credential("steve", "king", "123456", "email@gmail.com") # create Account object
def test_init(self):
self.assertEqual(self.new_credential.credential_name, "steve")
self.assertEqual(self.new_credential.usr_name, "king")
self.assertEqual(self.new_credential.password, "123456")
self.assertEqual(self.new_credential.email, "email@gmail.com")
def test_save_credential(self):
self.new_credential.save_credential() # saving the new account
self.assertEqual(len(Credential.credential_list), 1)
def tearDown(self):
Credential.credential_list = []
def test_save_multiple_credential(self):
self.new_credential.save_credential()
test_credential = Credential("Test", "user", "9000", "test@user.com")
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list), 2)
def test_delete_credential(self):
self.new_credential.save_credential()
test_credential = Credential("Test", "user", "9000", "test@user.com")
test_credential.save_credential()
self.new_credential.delete_credential()
self.assertEqual(len(Credential.credential_list), 1)
def test_find_credential_by_credential_name(self):
self.new_credential.save_credential()
test_credential = Credential("Test", "user", "9000", "test@user.com")
test_credential.save_credential()
found_credential = Credential.find_by_name("Test")
self.assertEqual(found_credential.email, test_credential.email)
def test_credential_exists(self):
self.new_credential.save_credential()
test_credential = Credential("Test", "user", "9000", "test@user.com") # new account
test_credential.save_credential()
credential_exists = Credential.credential_exist("9000")
self.assertTrue(credential_exists)
if __name__ == '__main__':
unittest.main()
|
from Stepper import stepper
testStepper = stepper([2, 3, 4]) #[stepPin, directionPin, enablePin]
testStepper.step(360*19*4, "left",0.0005); #steps, dir, speed, stayOn
|
class MinHeap:
def __init__(self):
self.heap_list = [None]
self.count = 0
# HEAP HELPER METHODS
# DO NOT CHANGE!
def parent_idx(self, idx):
return idx // 2
def left_child_idx(self, idx):
return idx * 2
def right_child_idx(self, idx):
return idx * 2 + 1
# END OF HEAP HELPER METHODS
def add(self, element):
self.count += 1
print("Adding: {0} to {1}".format(element, self.heap_list))
self.heap_list.append(element)
self.heapify_up()
def heapify_up(self):
print("Heapifying up...")
|
""" Created by Shahen Kosyan 2/14/17"""
if __name__ == '__main__':
n = int(input())
home_airport = input()
flights = []
while n > 0:
n -= 1
_from, _to = input().split('->')
flights.append(_from)
flights.append(_to)
count = 0
for i in range(len(flights)):
if flights[i] == home_airport:
count += 1
if count % 2:
print('contest')
else:
print('home')
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2016
# --------------------------------------------------------------------------
# gendoc: ignore
def as_df(what, **kwargs):
'''
Returns a `pandas.DataFrame` representation of an object.
Attributes:
what: The object to represent as an object.
**kwargs: Additional parameters for the conversion.
Returns:
A `pandas.DataFrame` representation of an object or None if a
representation could not be found.
'''
try:
return what.__as_df__(**kwargs)
except AttributeError:
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.