text stringlengths 38 1.54M |
|---|
#-*- coding:utf-8 -*-
"""
Created on 2020/03/03
------
@author: Chao Li; Mengshi Dong; Shaoqiang Han; Lili Tang; Ning Yang; Peng Zhang; Weixiang Liu
Email: lichao19870617@gmail.com; dongmengshi1990@163.com; 867727390@qq.com;
lilyseyo@gmail.com; 1157663200@qq.com; 1597403028@qq.com; wxliu@szu.edu.cn.
"""
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='easylearn',
version='0.1.12.alpha',
description=(
'This project is designed for machine learning in resting-state fMRI field'
),
long_description=long_description,
long_description_content_type="text/markdown",
author='Chao Li',
author_email='lichao19870617@gmail.com',
maintainer='Chao Li; Mengshi Dong; Shaoqiang Han; Lili Tang; Ning Yang; Peng Zhang',
maintainer_email='lichao19870617@gmail.com',
license='MIT License',
packages=find_packages(),
platforms=["all"],
url='https://github.com/easylearn-fmri/',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Natural Language :: English',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
python_requires='>=3.5',
install_requires=[
'joblib',
'numpy',
'pandas',
'python-dateutil',
'pytz',
'scikit-learn',
'scipy',
'six',
'nibabel',
'imbalanced-learn',
'skrebate',
'matplotlib',
],
) |
# -*- coding: utf-8 -*-
import re
from nltk.corpus import stopwords
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
import os
import math
import pandas as pd
from pandas import ExcelWriter
def stopword_filtered(words):
filtered_list = []
stop_set = set(stopwords.words('english'))
# print(stop_set)
for w in words:
if w not in stop_set:
filtered_list.append(w)
return filtered_list
def bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=50):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return bigrams
def stopword_filtered(words):
filtered_list = []
stop_set = set(stopwords.words('english'))
# print(stop_set)
for w in words:
if w not in stop_set:
filtered_list.append(w)
return filtered_list
def bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=50):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return bigrams
def term_freq(word_list): # words in a document 词频TF(t,d)
word_dict = {}
for w in word_list:
if w in word_dict:
word_dict[w] += 1
else:
word_dict[w] = 1
# calculate #words in total and then calculate the frequency
word_num = sum(word_dict.values())
for w in word_dict.keys():
word_dict[w] /= word_num
return word_dict
def inv_doc_freq(term_set, doc_name2word_list):#逆文档频率IDF(t,D)
doc_num = len(doc_name2word_list)
idf_dict = {}
# term in all doc
for w in term_set:
doc_count = 1
# find the appearing frenquency among all documents
for word_list in doc_name2word_list.values():
if w in word_list:
doc_count += 1
idf_dict[w] = math.log(doc_num / doc_count)
return idf_dict
if __name__ == '__main__':
doc_name2word_list = {}
doc_name2tf_dict = {}
term_set = set()
rootdir = os.path.dirname(os.path.abspath(__file__))
doc_name_list = [item for item in os.listdir(rootdir) if item.endswith('.txt')]
for doc_name in doc_name_list:
with open(os.path.join(rootdir, doc_name), 'r',errors='ignore') as f:
content = ''
for line in f.readlines():
content += line
content = content.strip()
content = re.sub('[^A-Za-z\s]', ' ', content)
content = content.lower()
word_list1 = content.split()
#doc_name2word_list[doc_name] = word_list
filtered_list = stopword_filtered(word_list1)
word_list = bigram_word_feats(filtered_list)
doc_name2tf_dict[doc_name] = term_freq(word_list)
doc_name2word_list[doc_name] = word_list
term_set = term_set | set(word_list)
#word_list = bigram_word_feats(doc_name2word_list)
#print(b)
#print(idf_dict)
idf_dict = inv_doc_freq(term_set, doc_name2word_list)
term_list = list(term_set)
tf_idf = pd.DataFrame(columns=doc_name_list, index=term_list)
print(word_list)
for (doc_name, word_list) in doc_name2word_list.items():
for w in term_set:
if w in word_list:#将两个结果相乘,得到tf-idf值
tf_idf.loc[w, doc_name] = doc_name2tf_dict[doc_name][w] * idf_dict[w]
else:
tf_idf.loc[w, doc_name] = 0
# output
writer = ExcelWriter('tfidf_result.xlsx')
tf_idf.to_excel(writer, 'tfidf')
writer.save()
print('File Output Success')
|
"""
Jason Slocum
5.28.2014
DPW
Final API
"""
import webapp2
from pages import Page
import urllib2 #open url, request receive and open info obtained there
from xml.dom import minidom#parse xml
class MainHandler(webapp2.RequestHandler):
"""controls the interaction between the model and the view"""
def get(self):
p = FormPage()#creating the page
p.inputs = [['city', 'text', 'City'], ['state', 'text', 'State'], ['Submit', 'submit']]#creates the form inputs and contains their name, type and placeholder
if self.request.GET:
em = EstateModel()#creating the model
em.city = self.request.GET['city']#this sends the city that the user enters from the view to the model
em.state = self.request.GET['state']#this sends the state that the user enters from the view to the model
em.callApi()#calls the callAPI function
ev = EstateView()#creating the view
ev.edos = em.dos#takes data objects from EstateModel and gives them to the EstateView
p._body = ev.content#takes the content from the view and adds it to the body
self.response.write(p.print_out())#displays the form
class EstateView(object):
''' this class handles how the info is shown to the user'''
def __init__(self):
self.__edos = []
self.__content = '<br />'
def update(self):#this function goes through the array self.__edos and updates the info with the content from the api
for do in self.__edos:
self.__content += "<div id='maincontent'>"
self.__content += "<h2 id='cta'>" + "Enter a location to find out where you belong!" + "</h2>"
self.__content += "<div class='box'>"
self.__content += "<h2>" + "Affordability of Homes in " + do.location + "</h2>"
self.__content += "<h3>" + "Single Family Homes" + "</h3>"
self.__content += "<p>" + "The average single family home is valued at: $" + do.value + "<br />"
self.__content += "While the national average for a comparable home is: $" + do.value2 + "</p>"
self.__content += "<h3>" + "Two Bedroom Homes" + "</h3>"
self.__content += "<p>" + "The average two bedroom home is valued at: $" + do.value5 + "<br />"
self.__content += "While the national average for a comparable home is: $" + do.value6 + "</p>"
self.__content += "<h3>" + "Three Bedroom Homes" + "</h3>"
self.__content += "<p>" + "The average three bedroom home is valued at: $" + do.value7 + "<br />"
self.__content += "While the national average for a comparable home is: $" + do.value8 + "</p>"
self.__content += "<h3>" + "Four Bedroom Homes" + "</h3>"
self.__content += "<p>" + "The average four bedroom home is valued at: $" + do.value9 + "<br />"
self.__content += "While the national average for a comparable home is: $" + do.value10 + "</p>"
self.__content += "<h3>" + "Condos" + "</h3>"
self.__content += "<p>" + "The average condo is valued at: $" + do.value3 + "<br />"
self.__content += "While the national average for a comparable condo is: $" + do.value4 + "</p>"
self.__content += "<a href='" + do.forSale + "'>Check Out Current Home Listings!</a>" + "</div>"
self.__content += "</div>"
#getters and setter that return the content and call the update function
@property
def content(self):
return self.__content
@property
def edos(self):
pass
@edos.setter
def edos(self, arr):
self.__edos = arr
self.update()
class EstateModel(object):
'''this model will handle fetching parsing and sorting data from the Zillow api '''
def __init__(self):#this function gets the api and hold the users city and state info
self.__url = "http://www.zillow.com/webservice/GetDemographics.htm?zws-id=X1-ZWz1dtxmglnsi3_4aijl&state="#api url with the key
self.__city = ""
self.__state = ""
self.__xmldoc = ""
def callApi(self):
request = urllib2.Request(self.__url+self.__state+"&city="+self.__city)#requests and loads data from the api
opener = urllib2.build_opener()#use urllib2 to create an object to get the url
result = opener.open(request)#use the url to get a result - request info from the api
self.__xmldoc = minidom.parse(result)#parsing the data
list = self.__xmldoc.getElementsByTagName('response')#creates the variable 'list' and gets the tag 'response' from the xml
self._dos = []#holds the info collected from the api
for tag in list:
do = EstateData()#calls the EstateData function
do.value = tag.getElementsByTagName('value')[2].firstChild.nodeValue
do.value2 = tag.getElementsByTagName('value')[3].firstChild.nodeValue
do.value5 = tag.getElementsByTagName('value')[6].firstChild.nodeValue
do.value6 = tag.getElementsByTagName('value')[7].firstChild.nodeValue
do.value7 = tag.getElementsByTagName('value')[8].firstChild.nodeValue
do.value8 = tag.getElementsByTagName('value')[9].firstChild.nodeValue
do.value9 = tag.getElementsByTagName('value')[10].firstChild.nodeValue
do.value10 = tag.getElementsByTagName('value')[11].firstChild.nodeValue
do.value3 = tag.getElementsByTagName('value')[4].firstChild.nodeValue
do.value4 = tag.getElementsByTagName('value')[5].firstChild.nodeValue
do.forSale = tag.getElementsByTagName('forSale')[0].firstChild.nodeValue
do.location = tag.getElementsByTagName('city')[0].firstChild.nodeValue
self._dos.append(do)#adds all of the data objects to _dos
#getters and setters that return the city, state and objects
@property
def dos(self):
return self._dos
@property
def state(self):
pass
@state.setter
def state(self, s):
self.__state = s
@property
def city(self):
pass
@city.setter
def city(self, c):
self.__city = c
class EstateData(object):
'''this data object holds the data fetched by the model and shown by the view '''
def __init__(self):
self.value = ''
self.value2 = ''
self.value3 = ''
self.value4 = ''
self.forSale = ''
self.value5 = ''
self.value6 = ''
self.value7 = ''
self.value8 = ''
self.value9 = ''
self.value10 = ''
self.location = ''
class FormPage(Page):#FormPage class, inherits from the Page class
"""this page sets up the basic html"""
def __init__(self):
super(FormPage, self).__init__()#Page.__init__()
self._form_open = '<form method="GET">'
self._form_close = '</form>'
self.__inputs = []
self._form_inputs = ''
@property#getter
def inputs(self):
pass
@inputs.setter#setter
def inputs(self, arr):
self.__inputs = arr
for item in arr:#loops through the items in the array, if there is a third item it will be added otherwise it will close the tag
self._form_inputs += '<input type="' + item[1] + '" name="' + item[0]
try:
self._form_inputs += '" placeholder="' + item[2] + '" />'
except:
self._form_inputs += '" />'
def print_out(self):#this function creates the html by returning the necessary attributes
return self._head + self._form_open + self._form_inputs + self._form_close + self._body + self._close
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
import socket
import tkinter
from tkinter import *
from threading import Thread
import time
def receive():
while True:
try:
msg = s.recv(1024).decode("utf8")
msg_list.insert(tkinter.END,msg)
except:
print("There is an error receiving the message")
break
window = Tk()
window.title("Chat Room Application")
window.config()
def send():
msg = my_msg.get()
my_msg.set("")
s.send(bytes(msg, "utf8"))
if msg == "/quit":
s.close()
window.quit()
def on_closing():
my_msg.set("/quit")
send()
message_frame = Frame(window, height=100, width=100)
message_frame.pack()
my_msg = StringVar()
my_msg.set("")
scroll_bar = Scrollbar(message_frame)
msg_list = Listbox(message_frame, height=15, width=100, yscrollcommand=scroll_bar.set)
scroll_bar.pack(side=RIGHT, fill=Y)
msg_list.pack(side=LEFT, fill=BOTH)
label = Label(window, text="Enter the message: ",font="Aeria")
label.pack()
entry_field = Entry(window, textvariable=my_msg, fg="red", width=50)
entry_field.pack()
send_button = Button(window, text="Send", font="Arial", fg="white", command=send)
send_button.pack()
quit_button = Button(window, text="Quit", font="Arial", fg="white", command=on_closing)
quit_button.pack()
window.protocol("WM_DELETE_WINDOW", on_closing)
host = "localhost"
port = 8080
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
receive_thread = Thread(target=receive)
receive_thread.start()
mainloop() |
#import mhat
LARGE = 1e12
POSITIVE_INFINITY = float("inf")
NEGATIVE_INFINITY = float("-inf")
PI = 3.141592653589793238462643383279502884197169399375
PI2 = PI * 2.0
RADIANS_TO_DEGREES = 180.0 / PI
DEGREES_TO_RADIANS = PI / 180.0 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import copy
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def predict(image):
y_logit = tf.placeholder(tf.float32, [None,11])
y_pred = tf.nn.softmax(y_logit)
with tf.Session() as sess:
model = tf.train.import_meta_graph('training/model.meta')
model.restore(sess, tf.train.latest_checkpoint('training/./'))
graph = tf.get_default_graph()
logit_pred = tf.get_collection('logit_predict')
X = graph.get_tensor_by_name('input/image:0')
image = np.reshape(image, (-1,28,28,1))
feed_dict = {X: image}
scores = sess.run(logit_pred, feed_dict)
result = sess.run(y_pred, feed_dict= {y_logit: scores[0]})
number_pred = np.argmax(result, axis= 1)
return number_pred
def clean_image(cell):
#~ mask_im = np.zeros(cell.shape, dtype = 'uint8')
out_im = np.zeros((28,28), dtype = 'uint8')
_, contours, hierarchy = cv2.findContours(cell.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 0:
return None
largest_contour = max(contours, key= cv2.contourArea)
row, col = cell.shape
if ( cv2.contourArea(largest_contour) < .04 *row * col ):
return None
x,y,w,h = cv2.boundingRect(largest_contour)
mask_im = cell[y:y+h,x:x+w]
if h > 28 or w > 28:
return cv2.resize(mask_im, (28, 28))
else:
out_im[14-h//2:14-h//2+h, 14-w//2:14-w//2+w] = mask_im
return out_im
def l2_dist(pt1, pt2):
return np.sqrt(((pt1[0] - pt2[0]) ** 2) + ((pt1[1] - pt2[1]) ** 2))
def pipeline(in_image):
rows, cols = in_image.shape
filtered_im = cv2.GaussianBlur(in_image, (5, 5), 0)
binarized_im = cv2.adaptiveThreshold(filtered_im,255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,3)
connectivity = 4
cc_info = cv2.connectedComponentsWithStats(binarized_im,
connectivity, cv2.CV_32S)
labels_im = cc_info[1]
stats = cc_info[2]
stats_area = stats[:, cv2.CC_STAT_AREA]
# Label of largest area excluding the background
largest_label = np.argmax(stats_area[1:])+1
# Extract connected component with largest area
binarized_im[labels_im == largest_label] = 255
binarized_im[labels_im != largest_label] = 0
positions = np.where(binarized_im == 255)
positions = np.asarray(positions).T
positions[:,0], positions[:,1] = positions[:,1], positions[:,0].copy()
positions = positions.tolist()
width_adj = lambda pos: cols - 1 - pos[0]
tl_positions = positions
tr_positions = [[width_adj(pos), pos[1]] for pos in tl_positions]
tr_positions.sort(key= lambda pt: pt[0]+pt[1])
tl_positions.sort(key= lambda pt: pt[0]+pt[1])
tr = [width_adj(tr_positions[0]), tr_positions[0][1]]
bl = [width_adj(tr_positions[-1]), tr_positions[-1][1]]
tl = tl_positions[0]
br = tl_positions[-1]
rect = np.array([tl, tr, br, bl], dtype = "float32")
pts = rect.astype(int)
width1 = l2_dist(br, bl)
width2 = l2_dist(tr, tl)
height1 = l2_dist(br, tr)
height2 = l2_dist(tl, bl)
max_width = max(int(width1), int(width2))
max_height = max(int(height1), int(height2))
dst = np.array([[0, 0],[max_height - 1, 0],[max_height - 1, max_width - 1], [0, max_width - 1]], dtype = "float32")
transform_mat = cv2.getPerspectiveTransform(rect, dst)
sudoku_ext_im = cv2.warpPerspective(in_image, transform_mat, (max_height,max_width))
sudoku_bin_im = cv2.adaptiveThreshold(sudoku_ext_im, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,3)
cv2.imshow('sudoku', sudoku_bin_im )
cv2.waitKey(0)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(int(max_width/9),1))
horizontal = cv2.erode(sudoku_bin_im,kernel,iterations = 1)
horizontal = cv2.dilate(horizontal,kernel,iterations = 2)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(1,int(max_height/9)))
vertical = cv2.erode(sudoku_bin_im,kernel,iterations = 1)
vertical = cv2.dilate(vertical,kernel,iterations = 2)
intersection_lines = cv2.bitwise_or(vertical, horizontal)
numbers_im = sudoku_bin_im - intersection_lines
numbers_im = cv2.medianBlur(numbers_im,3)
cell_height = int(max_height/9)
cell_width = int(max_width/9)
return numbers_im,cell_height,cell_width
def main():
sudoku_im = cv2.imread('sudoku-0.jpg', cv2.IMREAD_GRAYSCALE)
rows, cols = sudoku_im.shape
numbers_im,cell_height,cell_width = pipeline(sudoku_im)
numbers = []
cell_resize = []
cv2.destroyAllWindows()
return
def test():
image = cv2.imread('t.png', cv2.IMREAD_GRAYSCALE)
image = np.reshape(image, (-1,28,28,1))
print(predict(image))
if __name__ == '__main__':
main()
#~ test()
|
result = False
def isAnagram(s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
counter = 0
my_dict = {'a':0, 'b':0, 'c':0, 'd':0, 'e':0, 'f':0, 'g':0, 'h':0, 'i':0, 'j':0, 'k':0, 'l':0, 'm':0, 'n':0, 'o':0, 'p':0, 'q':0, 'r':0, 's':0, 't':0, 'u':0, 'v':0, 'w':0, 'x':0, 'y':0, 'z':0 }
if len(s) != len(t):
return False
for i in s:
for (key,value) in my_dict.items():
if i == key:
my_dict[key] += 1
for i in t:
for (key,value) in my_dict.items():
if i == key:
my_dict[key] -= 1
for (key,value) in my_dict.items():
if value != 0:
counter += 1
if counter != 0:
return False
else:
return True
result = isAnagram("anagram", "nagaram")
print(result)
|
import time
from optparse import OptionParser
from math import ceil, log
CROSS_OVER = 1
#hello christi
# standard matrix multiplication
def matrixProduct(X, Y):
n = len(X)
C = [[0 for i in xrange(n)] for j in xrange(n)]
for i in xrange(n):
for k in xrange(n):
for j in xrange(n):
C[i][j] += X[i][k] * Y[k][j]
return C
# Helper functions
def add(X, Y):
C = [[X[i][j] + Y[i][j] for j in range(len(X[0]))] for i in range(len(X))]
return C
def subtract(X, Y):
C = [[X[i][j] - Y[i][j] for j in range(len(X[0]))] for i in range(len(X))]
return C
# Strassen Algorithm
def strassenAlg(X, Y):
n = len(X)
if n <= CROSS_OVER:
return matrixProduct(X, Y)
elif n % 2 == 0:
# dimesnion of submatrices
half_n = n/2
# initialize sub-matrices of X
A = [[0 for j in xrange(0, half_n)] for i in xrange(0, half_n)]
B = [[0 for j in xrange(0, half_n)] for i in xrange(0, half_n)]
C = [[0 for j in xrange(0, half_n)] for i in xrange(0, half_n)]
D = [[0 for j in xrange(0, half_n)] for i in xrange(0, half_n)]
# initialize sub-matrices of Y
E = [[0 for j in xrange(0, half_n)] for i in xrange(0, half_n)]
F = [[0 for j in xrange(0, half_n)] for i in xrange(0, half_n)]
G = [[0 for j in xrange(0, half_n)] for i in xrange(0, half_n)]
H = [[0 for j in xrange(0, half_n)] for i in xrange(0, half_n)]
# dividing the matrices in 4 sub-matrices:
for i in xrange(0, half_n):
for j in xrange(0, half_n):
A[i][j] = X[i][j] # top left
B[i][j] = X[i][j + half_n] # top right
C[i][j] = X[i + half_n][j] # bottom left
D[i][j] = X[i + half_n][j + half_n] # bottom right
E[i][j] = Y[i][j] # top left
F[i][j] = Y[i][j + half_n] # top right
G[i][j] = Y[i + half_n][j] # bottom left
H[i][j] = Y[i + half_n][j + half_n] # bottom right
# Calculating p1 to p7:
p1 = strassenAlg(A, subtract(F, H)) # p1 = A(F - H)
p2 = strassenAlg(add(A, B), H) # p2 = (A + B)H
p3 = strassenAlg(add(C, D), E) # p3 = (C + D)E
p4 = strassenAlg(D, subtract(G, E)) # p4 = D(G-E)
p5 = strassenAlg(add(A, D), add(E, H)) # p5 = (A + D)(E + H)
p6 = strassenAlg(subtract(B, D), add(G, H)) # p6 = (B - D)(G + H)
p7 = strassenAlg(subtract(C, A), add(E, F)) # p7 = (A - C)(E + F)
# calculating submatrices of C
AE_plus_BG = subtract(add(add(p5, p4), p6), p2)
AF_plus_BH = add(p1, p2)
CE_plus_DG = add(p3, p4)
CF_plus_DH = subtract(add(add(p5, p1), p7), p3)
# Grouping the results obtained in a single matrix:
C = [[0 for j in xrange(0, n)] for i in xrange(0, n)]
for i in xrange(0, half_n):
for j in xrange(0, half_n):
C[i][j] = AE_plus_BG[i][j]
C[i][j + half_n] = AF_plus_BH[i][j]
C[i + half_n][j] = CE_plus_DG[i][j]
C[i + half_n][j + half_n] = CF_plus_DH[i][j]
return C
else:
EvenX = [[0 for i in xrange(n+1)] for j in xrange(n+1)]
EvenY = [[0 for i in xrange(n+1)] for j in xrange(n+1)]
for i in xrange(n):
for j in xrange(n):
EvenX[i][j] = X[i][j]
EvenY[i][j] = Y[i][j]
EvenC = strassenAlg(EvenX, EvenY)
C = [[0 for i in xrange(n)] for j in xrange(n)]
for i in xrange(n):
for j in xrange(n):
C[i][j] = EvenC[i][j]
return C
# prints out a matrix
def printMatrix(matrix):
for line in matrix:
print "\t".join(map(str,line))
variable = []
count = 600
flag = 0
print "Hello"
while (count < 700):
A = [[1 for j in xrange(0, count)] for i in xrange(0, count)]
B = [[1 for j in xrange(0, count)] for i in xrange(0, count)]
start = time.time()
C = strassenAlg(A, B)
end = time.time()
t1 = (end - start)
start = time.time()
D = matrixProduct(A,B)
end = time.time()
t2 = (end - start)
variable.append((t1, t2, count))
count = count + 1
print t1
print t2
if t1 < t2 and flag < 1:
flag = 2
print "cross:%d" % count
|
# Generated by Django 2.2.1 on 2019-09-11 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tickets', '0007_auto_20190911_1136'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='quantity_2',
field=models.IntegerField(blank=True, null=True),
),
]
|
#!usr/bin/env python
#coding:utf-8
import os
import time
from base.relay08 import Relayed_device
from base.baseflash import Command
from base.baseflash import check_adb_device
from base.dumbase import base
path = r'/home/buildbot/flashfile/flash'
cmd_list = [r'sudo fastboot boot loader.efi',r'sudo fastboot oem unlock',r'sudo fastboot flash gpt gpt.bin',r'sudo fastboot erase misc',r'sudo fastboot erase persistent',r'sudo fastboot erase metadata',
r'sudo fastboot format config',r'sudo fastboot format cache',r'sudo fastboot flash boot boot.img',r'sudo fastboot flash system system.img',r'sudo fastboot flash bootloader bootloader',
r'sudo fastboot oem verified',r'sudo fastboot format data',r'sudo fastboot continue']
def set_up():
base.unlock()
base.setup()
base.back()
def main():
Command('sudo chmod 777 /dev/ttyRelayCard').start()
#close devices
relay_devices=Relayed_device('/dev/ttyRelayCard','65')
relay_devices.power_off()
relay_devices.write('6E')
relay_devices.close()
time.sleep(20)
#start relycard
relay_devices=Relayed_device('/dev/ttyRelayCard','65','69')
#enter in dnx model
relay_devices.enter_dnx()
relay_devices.close()
#close relay ports
relay_devices=Relayed_device('/dev/ttyRelayCard','6f','73')
relay_devices.close_dnx()
relay_devices.close()
#copy flash file
# flashfile = Flashfile(path)
# flashfile.extract_flash_file(os.path.join(path,'flash.zip'))
#flash devices
os.chdir(path)
for i in cmd_list:
print(i)
Command(i).start(80)
time.sleep(120)
base.setting_afterflash()
time.sleep(2)
set_up()
try:
main()
VERDICT = SUCCESS
except Exception as msg:
VERDICT = FAILURE
|
# search through the titles first
# if books returned list them
# then tags
# dict tuple: [{title, [pages]}]
# list pages the tags show up on
import sys, json, os, pprint
pp = pprint.PrettyPrinter(indent=4)
# json_data = []
# for f in sys.argv[1:]:
# with open(f) as data_file:
# json_data.append(json.load(data_file))
def search_titles(json_data, query):
# rm stopwords
pquery = query
stopwords = ['the', 'a', 'of', 'and', 'or', 'an']
# punctuation = ['\'', '\"']
for sw in pquery.split():
if sw in stopwords:
pquery.replace(sw, "")
# for p in pquery:
# print p
# if p in punctuation:
# pquery.replace(p, "")
# print "THIS IS PQUERY ", pquery
book_matches = []
for bk in json_data:
for word in pquery.split():
# print "WORD ", word
if word.lower() in bk['title'].lower():
# print bk['title'].lower().encode("utf8", "ignore")
book_matches.append(bk)
return book_matches
def search_tags(json_data, query):
tag_results=[]
# print json_data
for book in json_data:
# print book
# print "PAGES ", book['pages']
for pg in book['pages']:
# print "PPPPPGGGG ", pg
if pg['tags'] == None:
continue;
else:
for tag in pg['tags']:
# print tag['term']
if str(tag['term']).lower()==query.lower():
if tag['term'] not in tag_results:
tag_results.append({
'title': pg['title'],
'nid': "https://etc.princeton.edu/abcbooks/node/"+ pg['nid'],
})
# print tag_results
return tag_results
def search_text(json_data, query):
text_results=[]
# print json_data
for book in json_data:
# print book
# print "PAGES ", book['pages']
for pg in book['pages']:
# print "PPPPPGGGG ", pg
if pg['text'] == None:
continue;
else:
for wrd in pg['text'].split():
# print tag['term']
if wrd.lower()==query.lower():
if wrd not in text_results:
text_results.append({
'title': pg['title'],
'nid': "https://etc.princeton.edu/abcbooks/node/"+ pg['nid'],
})
return text_results
def main():
json_data = []
results=[]
for f in sys.argv[1:-1]:
with open(f) as data_file:
# print f
json_data.append(json.load(data_file))
# pp.pprint(json_data)
results = search_tags(json_data, "racism")
results2 = search_titles(json_data, "aBc")
results3 = search_text(json_data, 'pie')
# pp.pprint("There's something going on.")
# pp.pprint(results)
# pp.pprint(results3)
main()
|
import threading
import time
import pygame
from color_constants import GREEN, BLUE, CRIMSON, CORNSILK3, FORESTGREEN, WHITE
class Visualization(object):
CH_1 = 1 # Broken relay
CH_2 = 2 # Red box
CH_4 = 4 # Blue box
CH_8 = 8 # Green box
CH_16 = 16 # Santa
CH_32 = 32 # Dog
CH_64 = 64 # 1 tree
CH_128 = 128 # 2 trees
CHANNELS = (CH_1, CH_2, CH_4, CH_8, CH_16, CH_32, CH_64, CH_128)
WIDTH = 50
HEIGHT = 50
MIN_WIDTH = 400
ON = 0
OFF = 2
GREEN_BOX = CH_8
SANTA = CH_16
BLUE_BOX = CH_4
RED_BOX = CH_2
DOG = CH_32
ONE_TREE = CH_64
TWO_TREES = CH_128
POS_TWO_TREES = 0
POS_ONE_TREE = 1
POS_DOG = 2
POS_RED_BOX = 3
POS_BLUE_BOX = 4
POS_SANTA = 5
POS_GREEN_BOX = 6
POS_CH_1 = 7
# The display is big-endian. 128 bit is on left, 1 bit on right.
locations = {
CH_1: [POS_CH_1 * WIDTH, 0, WIDTH, HEIGHT],
GREEN_BOX: [POS_GREEN_BOX * WIDTH, 0, WIDTH, HEIGHT],
SANTA: [POS_SANTA * WIDTH, 0, WIDTH, HEIGHT],
BLUE_BOX: [POS_BLUE_BOX * WIDTH, 0, WIDTH, HEIGHT],
RED_BOX: [POS_RED_BOX * WIDTH, 0, WIDTH, HEIGHT],
DOG: [POS_DOG * WIDTH, 0, WIDTH, HEIGHT],
ONE_TREE: [[POS_ONE_TREE*WIDTH + WIDTH/2, 0], [POS_ONE_TREE*WIDTH, HEIGHT],
[POS_ONE_TREE*WIDTH + WIDTH-1, HEIGHT]],
TWO_TREES: [[POS_TWO_TREES*WIDTH + WIDTH/2, 0], [POS_TWO_TREES*WIDTH, HEIGHT],
[POS_TWO_TREES*WIDTH + WIDTH-1, HEIGHT]],
}
shapes = {
CH_1: lambda s, w: None,
GREEN_BOX: lambda s, w: pygame.draw.rect(s, GREEN, Visualization.locations[Visualization.GREEN_BOX], w),
SANTA: lambda s, w: pygame.draw.ellipse(s, CRIMSON, Visualization.locations[Visualization.SANTA], w),
BLUE_BOX: lambda s, w: pygame.draw.rect(s, BLUE, Visualization.locations[Visualization.BLUE_BOX], w),
RED_BOX: lambda s, w: pygame.draw.rect(s, CRIMSON, Visualization.locations[Visualization.RED_BOX], w),
DOG: lambda s, w: pygame.draw.ellipse(s, CORNSILK3, Visualization.locations[Visualization.DOG], w),
ONE_TREE: lambda s, w: pygame.draw.polygon(s, FORESTGREEN, Visualization.locations[Visualization.ONE_TREE], w),
TWO_TREES: lambda s, w: pygame.draw.polygon(s, FORESTGREEN, Visualization.locations[Visualization.TWO_TREES], w),
}
def __init__(self):
self.screen = None
self.relays = 0
self.done = False
self.lock = threading.Lock()
def update(self, relays):
try:
self.lock.acquire(True)
self.relays = relays
self.lock.release()
except:
self.lock.release()
raise
def start(self):
pygame.init()
size = (max(self.WIDTH * len(self.locations), Visualization.MIN_WIDTH), self.HEIGHT)
self.screen = pygame.display.set_mode(size)
pygame.display.set_caption("Test Window")
clock = pygame.time.Clock()
while not self.done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
self._draw()
clock.tick(60)
time.sleep(0)
pygame.quit()
def stop(self):
self.done = True
def _draw(self):
self.screen.fill(WHITE)
for ch in self.CHANNELS:
try:
self.lock.acquire(True)
if self.relays & ch == 0:
width = Visualization.OFF
else:
width = Visualization.ON
finally:
self.lock.release()
self.shapes[ch](self.screen, width)
pygame.display.flip()
if __name__ == '__main__':
Visualization().start()
|
# (c) 2013-2014 mPlane Consortium (http://www.ict-mplane.eu)
# Author: Danilo Cicalese
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Implements anycast geolocation for integration into
the mPlane reference implementation.
"""
from datetime import datetime
import mplane.model
import mplane.scheduler
from mplane.components.anycast_mod.anycast import Anycast
import os
anycast_data=os.getcwd()+"/mplane/components/anycast_mod/anycast_census.csv"
def services(ip4addr = None):
services = []
if ip4addr is not None:
services.append(AnycastService(anycast_detection(ip4addr)))
services.append(AnycastService(anycast_enumeration(ip4addr)))
services.append(AnycastService(anycast_geolocation(ip4addr)))
return services
def anycast_detection(ipaddr):
cap = mplane.model.Capability(label="anycast-detection-ip4", when="now")
cap.add_parameter("source.ip4",ipaddr)
cap.add_parameter("destination.ip4") #listIp
cap.add_result_column("anycast")
return cap
def anycast_enumeration(ipaddr):
cap = mplane.model.Capability(label="anycast-enumeration-ip4", when="now")
cap.add_parameter("source.ip4",ipaddr)
cap.add_parameter("destination.ip4") #listIp
cap.add_result_column("anycast")
cap.add_result_column("anycast_enumeration")
return cap
def anycast_geolocation(ipaddr):
cap = mplane.model.Capability(label="anycast-geolocation-ip4", when="now")
cap.add_parameter("source.ip4",ipaddr)
cap.add_parameter("destination.ip4") #listIp
cap.add_result_column("anycast")
cap.add_result_column("anycast_geolocation")
return cap
class AnycastService(mplane.scheduler.Service):
def __init__(self, cap):
if not cap.has_parameter("source.ip4"):
raise ValueError("capability not acceptable")
super(AnycastService, self).__init__(cap)
def run(self, spec, check_interrupt):
"""
Execute this Service
"""
dipaddr = spec.get_parameter_value("destination.ip4")
ip24=".".join(str(dipaddr).split(".")[:3])+".0"
anycast=Anycast(anycast_data)
start_time = str(datetime.utcnow())
if "anycast-detection-ip4" in spec.get_label():
result=anycast.detection(ip24)
elif "anycast-enumeration-ip4" in spec.get_label():
result=anycast.enumeration(ip24)
elif "anycast-geolocation-ip4" in spec.get_label():
result=anycast.geolocation(ip24)
end_time = str(datetime.utcnow())
# derive a result from the specification
res = mplane.model.Result(specification=spec)
# put actual start and end time into result
res.set_when(mplane.model.When(start_time, end_time))
if result:
res.set_result_value("anycast",True)
if res.has_result_column("anycast_enumeration"):
res.set_result_value("anycast_enumeration", result)
if res.has_result_column("anycast_geolocation"):
res.set_result_value("anycast_geolocation", result)
else:
res.set_result_value("anycast",False)
return res
|
from cs50 import SQL
import csv
import sys
db = SQL("sqlite:///students.db")
def main():
# Check command-line arguments for valid syntax
if len(sys.argv) != 2:
print('Error: Incorrect usage at command line. Format is import.py database.csv')
sys.exit(1)
# Open CSV file given by command-line argument
with open(sys.argv[1], 'r', newline='') as csvfile:
# Initialise variables for fields
first = 'None'
middle = 'None'
last = 'None'
house = 'None'
birth = 1990
# For each row, parse name using csv.reader
reader = csv.reader(csvfile)
next(reader)
for row in reader:
# Split name into separate variables
name = row[0].split(" ")
first = name[0]
if len(name) == 2:
middle = 'None'
else:
middle = name[1]
last = name[-1]
# Read remaining variables
house = row[1]
birth = row[2]
# Insert each student into the students table of students.db
if len(name) == 2:
db.execute("INSERT INTO students (first, last, house, birth) VALUES (?, ?, ?, ?)", first, last, house, birth)
else:
db.execute("INSERT INTO students (first, middle, last, house, birth) VALUES (?, ?, ?, ?, ?)", first, middle, last, house, birth)
main() |
from django.contrib.auth import authenticate
from django.http import Http404
from rest_framework import status
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import AllowAny
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from company.models import Company
from company.serializers import CompanySerializer
from core.serializers import FileSerializer
from form.models import Form
from form.serializers import FormSerializer
from .serializers import *
from .utils import get_json_user, get_profile_info, get_access_token_and_email
class SignUpView(APIView):
permission_classes = (AllowAny,)
def post(self, request):
serializer = UserProfileSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
user = authenticate(email=request.data.get('email'), password=request.data.get('password'))
if not user:
raise Http404
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': token.key}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class LoginView(APIView):
permission_classes = (AllowAny,)
def post(self, request):
serializer = AuthCredentialsSerializers(data=request.data)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
email = validated_data['email']
password = validated_data['password']
user = authenticate(email=email, password=password)
if not user:
raise Http404
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': token.key}, status=status.HTTP_200_OK)
class MyProfileView(APIView):
permission_classes = (IsAuthenticated,)
authentication_classes = (TokenAuthentication,)
def get(self, request):
serializer = UserProfileSerializer(self.request.user)
serializer_data = serializer.data
form = Form.objects.filter(profile=self.request.user).order_by('-id')
serializer_data['form'] = {} if not form else FormSerializer(form[0]).data
companies = Company.objects.filter(profile=self.request.user)
serializer_data['companies'] = [] if not companies else CompanySerializer(instance=companies, many=True).data
return Response(serializer_data, status=status.HTTP_200_OK)
def put(self, request):
serializer = UserProfileSerializer(instance=self.request.user, data=self.request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UploadPhotoView(APIView):
permission_classes = (IsAuthenticated,)
authentication_classes = (TokenAuthentication,)
@staticmethod
def get_object(pk):
try:
return UserProfile.objects.get(pk=pk)
except UserProfile.DoesNotExist:
raise Http404
def post(self, request, pk):
serializer = FileSerializer(data=request.data)
if serializer.is_valid():
file = serializer.save()
user = self.get_object(pk)
user.photo = file.file
user.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OAuthVKView(APIView):
permission_classes = (AllowAny,)
@staticmethod
def get_token(user):
if not user:
raise Http404
token, _ = Token.objects.get_or_create(user=user)
return token.key
def post(self, request):
serializer = VKCodeSerializers(data=request.data)
serializer.is_valid(raise_exception=True)
validated_data = serializer.validated_data
code = validated_data['code']
access_token, email, user_id = get_access_token_and_email(code)
if access_token is None:
return Response({'error': 'code is expired or invalid'}, status=status.HTTP_401_UNAUTHORIZED)
response = get_profile_info(access_token, user_id)
user_json = None
if response.get('response') is not None:
user_json = get_json_user(response.get('response')[0], email)
elif response.get('error') is not None:
return Response({'error': response.get('error').get('error_msg')}, status=status.HTTP_401_UNAUTHORIZED)
user = UserProfile.objects.filter(email=email)
if not user:
serializer = UserProfileSerializer(data=user_json)
if serializer.is_valid():
user = serializer.save()
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
user = user[0]
return Response({'token': self.get_token(user)}, status=status.HTTP_201_CREATED)
|
#! coding: utf-8
from __future__ import absolute_import, unicode_literals
from os.path import dirname
import environ
from django_datajsonar import strings
from .api.api import *
from .api.metadata import *
from elasticsearch_dsl.connections import connections
SETTINGS_DIR = environ.Path(__file__) - 1
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path(dirname(dirname(dirname(__file__))))
env = environ.Env()
environ.Env.read_env(SETTINGS_DIR('.env'))
connections.create_connection(hosts=env("ES_URLS", default=DEFAULT_ES_URL).split(","),
timeout=100)
DEBUG = True
ADMINS = ()
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Argentina/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es-ar'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = str(APPS_DIR('media'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/series/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/series/static/'
# Additional locations of static files
STATICFILES_DIRS = (
str(ROOT_DIR.path('series_tiempo_ar_api/static')),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9!n$10$pksr3j5dv*4bc21ke$%0$zs18+vse=al8dpfzi_9w4y'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'admin_reorder.middleware.ModelAdminReorder',
)
ANONYMOUS_USER_ID = -1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
ROOT_URLCONF = 'series_tiempo_ar_api.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'conf.wsgi.application'
def export_vars(_):
data = {
'API_VERSION': env('API_VERSION', default='local')
}
return data
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
str(APPS_DIR.path('templates')),
],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'conf.settings.base.export_vars',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'debug': True
},
},
]
DJANGO_BASE_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'admin_shortcuts',
'django.contrib.admin',
)
VENDOR_APPS = (
"django_rq",
'sendfile',
'des',
'scheduler',
'django_datajsonar',
'solo',
'minio_storage',
'admin_reorder',
'corsheaders',
)
APPS = (
'series_tiempo_ar_api.apps.api.apps.ApiConfig',
'series_tiempo_ar_api.apps.analytics',
'series_tiempo_ar_api.apps.management.apps.ManagementConfig',
'series_tiempo_ar_api.apps.metadata.apps.MetadataConfig',
'series_tiempo_ar_api.apps.dump',
'series_tiempo_ar_api.libs.indexing',
'series_tiempo_ar_api.libs.custom_admins',
)
INSTALLED_APPS = DJANGO_BASE_APPS + VENDOR_APPS + APPS
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
"formatters": {
"rq_console": {
"format": "%(asctime)s %(message)s",
"datefmt": "%H:%M:%S",
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
}
},
'handlers': {
"rq_console": {
"level": "DEBUG",
"class": "rq.utils.ColorizingStreamHandler",
"formatter": "rq_console",
"exclude": ["%(asctime)s"],
},
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
},
'apps': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'filters': ['require_debug_true'],
"formatter": "simple",
},
'production': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'filters': ['require_debug_false'],
"formatter": "verbose",
},
},
'loggers': {
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'series_tiempo_ar_api': {
'handlers': ['apps', 'production'],
'level': 'INFO',
'propagate': False,
},
"rq.worker": {
"handlers": ["rq_console", ],
"level": "DEBUG"
},
}
}
# EMAILS
EMAIL_BACKEND = 'des.backends.ConfiguredEmailBackend'
DEFAULT_REDIS_HOST = env("DEFAULT_REDIS_HOST", default="localhost")
DEFAULT_REDIS_PORT = env("DEFAULT_REDIS_PORT", default="6379")
DEFAULT_REDIS_DB = env("DEFAULT_REDIS_DB", default="0")
REDIS_SETTINGS = {
'HOST': DEFAULT_REDIS_HOST,
'PORT': DEFAULT_REDIS_PORT,
'DB': DEFAULT_REDIS_DB,
}
# Colas de Redis. Existe una por tarea asincrónica a ejecutar.
RQ_QUEUE_NAMES = [
'default',
'upkeep',
'dj_indexing',
'indexing',
'meta_indexing',
'csv_dump',
'xlsx_dump',
'sql_dump',
'dta_dump',
'analytics',
'integration_test',
'api_index',
'api_report',
'analytics',
'hits_indicators',
]
RQ_QUEUES = {name: REDIS_SETTINGS for name in RQ_QUEUE_NAMES}
ENV_TYPE = env('ENV_TYPE', default='')
# Tarea a ser croneada para indexación. Defaults para uso local, en producción se deben setear estas variables!
IMPORT_ANALYTICS_SCRIPT_PATH = env('IMPORT_ANALYTICS_CMD_PATH', default='/bin/true import_analytics')
INDEX_METADATA_SCRIPT_PATH = env('INDEX_METADATA_CMD_PATH', default='/bin/true index_metadata')
INTEGRATION_TEST_SCRIPT_PATH = env('INTEGRATION_TEST_CMD_PATH', default='/bin/true integration_test')
# Config de Django-datajsonar
DATAJSON_AR_TIME_SERIES_ONLY = True
DATAJSON_AR_DISTRIBUTION_STORAGE = 'minio_storage.storage.MinioMediaStorage'
# Minio: File system distribuido, usado para correr tareas que generan archivos en un ambiente, y leerlos
# desde el web server
MINIO_STORAGE_ENDPOINT = env('MINIO_STORAGE_ENDPOINT', default="localhost:9000")
MINIO_STORAGE_USE_HTTPS = False
MINIO_STORAGE_MEDIA_BUCKET_NAME = env('MINIO_STORAGE_BUCKET_NAME', default='tsapi.dev.media.bucket')
MINIO_STORAGE_AUTO_CREATE_MEDIA_BUCKET = True
STAGES_TITLES = {
'READ_DATAJSON_COMPLETE': 'Read Datajson (corrida completa)',
'READ_DATAJSON_METADATA': 'Read Datajson (sólo metadatos)',
'API_INDEX': 'Indexación de datos (sólo actualizados)',
'API_INDEX_FORCE': 'Indexación de datos (forzar indexación)',
'DUMPS_CSV': 'Generación de dumps CSV',
'DUMPS_XLSX': 'Generación de dumps XLSX',
'DUMPS_SQL': 'Generación de dumps SQL',
'DUMPS_DTA': 'Generación de dumps DTA',
'METADATA_INDEX': 'Indexación de metadatos',
'INTEGRATION_TEST': 'Test de integración',
'INDEXATION_REPORT': 'Reporte de indexación',
'IMPORT_ANALYTICS': 'Importado de analytics',
'HITS_INDICATORS': 'Cálculo de indicadores de popularidad',
}
# Stages asincrónicos a ejecutar con el Synchronizer de Django-datajsonar
DATAJSONAR_STAGES = {
STAGES_TITLES['READ_DATAJSON_COMPLETE']: {
'callable_str': 'django_datajsonar.tasks.schedule_full_read_task',
'queue': 'indexing',
'task': 'django_datajsonar.models.ReadDataJsonTask',
},
STAGES_TITLES['READ_DATAJSON_METADATA']: {
'callable_str': 'django_datajsonar.tasks.schedule_metadata_read_task',
'queue': 'indexing',
'task': 'django_datajsonar.models.ReadDataJsonTask',
},
STAGES_TITLES['API_INDEX']: {
'callable_str': 'series_tiempo_ar_api.apps.management.tasks.indexation.schedule_api_indexing',
'queue': 'api_index',
'task': 'series_tiempo_ar_api.apps.management.models.IndexDataTask',
},
STAGES_TITLES['API_INDEX_FORCE']: {
'callable_str': 'series_tiempo_ar_api.apps.management.tasks.indexation.schedule_force_api_indexing',
'queue': 'api_index',
'task': 'series_tiempo_ar_api.apps.management.models.IndexDataTask',
},
STAGES_TITLES['DUMPS_CSV']: {
'callable_str': 'series_tiempo_ar_api.apps.dump.tasks.enqueue_write_csv_task',
'queue': 'csv_dump',
'task': 'series_tiempo_ar_api.apps.dump.models.GenerateDumpTask',
},
STAGES_TITLES['DUMPS_XLSX']: {
'callable_str': 'series_tiempo_ar_api.apps.dump.tasks.enqueue_write_xlsx_task',
'queue': 'xlsx_dump',
'task': 'series_tiempo_ar_api.apps.dump.models.GenerateDumpTask',
},
STAGES_TITLES['DUMPS_SQL']: {
'callable_str': 'series_tiempo_ar_api.apps.dump.tasks.enqueue_write_sql_task',
'queue': 'sql_dump',
'task': 'series_tiempo_ar_api.apps.dump.models.GenerateDumpTask',
},
STAGES_TITLES['DUMPS_DTA']: {
'callable_str': 'series_tiempo_ar_api.apps.dump.tasks.enqueue_write_dta_task',
'queue': 'dta_dump',
'task': 'series_tiempo_ar_api.apps.dump.models.GenerateDumpTask',
},
STAGES_TITLES['METADATA_INDEX']: {
'callable_str': 'series_tiempo_ar_api.apps.metadata.indexer.metadata_indexer.enqueue_new_index_metadata_task',
'queue': 'meta_indexing',
'task': 'series_tiempo_ar_api.apps.metadata.models.IndexMetadataTask',
},
STAGES_TITLES['INTEGRATION_TEST']: {
'callable_str': 'series_tiempo_ar_api.apps.management.tasks.integration_test.enqueue_new_integration_test',
'queue': 'integration_test',
'task': 'series_tiempo_ar_api.apps.management.models.IntegrationTestTask',
},
STAGES_TITLES['INDEXATION_REPORT']: {
'callable_str': 'series_tiempo_ar_api.libs.indexing.tasks.send_indexation_report_email',
'queue': 'api_report',
'task': 'series_tiempo_ar_api.apps.management.models.IndexDataTask',
},
STAGES_TITLES['IMPORT_ANALYTICS']: {
'callable_str': 'series_tiempo_ar_api.apps.analytics.tasks.enqueue_new_import_analytics_task',
'queue': 'analytics',
},
STAGES_TITLES['HITS_INDICATORS']: {
'callable_str': 'series_tiempo_ar_api.apps.analytics.tasks.enqueue_new_calculate_hits_indicators_task',
'queue': 'hits_indicators'
},
}
SYNCHRO_DEFAULT_CONF = [
{
'title': 'Corrida completa (lunes a viernes - 00 - forzar)',
'stages': [STAGES_TITLES['READ_DATAJSON_COMPLETE'], STAGES_TITLES['API_INDEX_FORCE'],
STAGES_TITLES['METADATA_INDEX'],
STAGES_TITLES['DUMPS_CSV'], STAGES_TITLES['DUMPS_XLSX'], STAGES_TITLES['DUMPS_SQL'],
STAGES_TITLES['DUMPS_DTA'], STAGES_TITLES['INTEGRATION_TEST'], STAGES_TITLES['INDEXATION_REPORT']],
'scheduled_time': '00:15',
'week_days': strings.WEEK_DAYS
},
{
'title': 'Importado de Analytics',
'stages': [STAGES_TITLES['IMPORT_ANALYTICS'], STAGES_TITLES['HITS_INDICATORS']],
'scheduled_time': '00:05'
},
{
'title': 'Corrida intermedia (lunes a viernes - 08)',
'stages': [STAGES_TITLES['READ_DATAJSON_COMPLETE'], STAGES_TITLES['API_INDEX'], STAGES_TITLES['METADATA_INDEX']],
'scheduled_time': '08:00',
'week_days': strings.WEEK_DAYS
},
{
'title': 'Corrida intermedia (lunes a viernes - 11)',
'stages': [STAGES_TITLES['READ_DATAJSON_COMPLETE'], STAGES_TITLES['API_INDEX'], STAGES_TITLES['METADATA_INDEX']],
'scheduled_time': '11:00',
'week_days': strings.WEEK_DAYS
},
{
'title': 'Corrida intermedia (lunes a viernes - 14)',
'stages': [STAGES_TITLES['READ_DATAJSON_COMPLETE'], STAGES_TITLES['API_INDEX'], STAGES_TITLES['METADATA_INDEX']],
'scheduled_time': '14:00',
'week_days': strings.WEEK_DAYS
},
{
'title': 'Corrida intermedia (lunes a viernes - 18)',
'stages': [STAGES_TITLES['READ_DATAJSON_COMPLETE'], STAGES_TITLES['API_INDEX'], STAGES_TITLES['METADATA_INDEX']],
'scheduled_time': '18:00',
'week_days': strings.WEEK_DAYS
},
]
ADMIN_REORDER = (
'auth',
'django_datajsonar',
'management',
'metadata',
'dump',
'analytics',
{'app': 'des', 'label': 'Configuración correo'},
'scheduler',
'sites',
)
LOGIN_URL = 'admin:login'
DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL')
ADMIN_SHORTCUTS = [
{
'shortcuts': [
{
'title': 'Home',
'url_name': 'admin:index',
},
{
'title': 'Nodos',
'url_name': 'admin:django_datajsonar_node_changelist',
'icon': 'university',
},
{
'title': 'Usuarios',
'url_name': 'admin:auth_user_changelist',
},
{
'title': 'Datasets',
'url_name': 'admin:django_datajsonar_dataset_changelist',
'icon': 'database',
},
{
'title': 'Distribuciones',
'url_name': 'admin:django_datajsonar_distribution_changelist',
'icon': 'file-alt',
},
{
'title': 'Series',
'url_name': 'admin:django_datajsonar_field_changelist',
'icon': 'list'
}
]
},
{
'title': 'Rutinas',
'shortcuts': [
{
'title': 'Lectura de nodos',
'url_name': 'admin:django_datajsonar_readdatajsontask_changelist',
'icon': 'search',
},
{
'title': 'Indexación de datos',
'url_name': 'admin:management_indexdatatask_changelist',
'icon': 'lightbulb',
},
{
'title': 'Tareas programadas',
'url_name': 'admin:django_datajsonar_synchronizer_changelist',
'icon': 'cogs',
},
]
},
]
ADMIN_SHORTCUTS_SETTINGS = {
'show_on_all_pages': True,
'hide_app_list': False,
'open_new_window': False,
}
|
# Generated by Django 2.2.24 on 2021-08-06 21:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('remplissages', '0002_auto_20210805_1048'),
]
operations = [
migrations.AddField(
model_name='site',
name='description',
field=models.TextField(blank=True, help_text='ajouter une description du site', null=True, verbose_name='Description'),
),
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(blank=True, help_text='ajouter une photo', null=True, upload_to='images/profile/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='profile',
name='phone',
field=models.PositiveIntegerField(blank=True, default=0, help_text='le numéro de télephone doit avpir 9 chiffres', null=True),
),
migrations.AlterField(
model_name='site',
name='image',
field=models.ImageField(blank=True, help_text='ajouter une image', null=True, upload_to='images/site/%Y/%m/%d/'),
),
migrations.AlterField(
model_name='site',
name='nom_site_evangelisation',
field=models.CharField(help_text='le nom du site doit avoir au moins 03 caractères', max_length=200, verbose_name="site d'évangélisation"),
),
]
|
import logging
import os
import sys
import threading
import time
import unittest
from aorta.const import CHANNEL
from aorta.const import HOST
from aorta.const import PORT
from aorta.message import Message
from aorta.listener import Listener
import aorta.backends
logger = logging.getLogger()
logger.level = logging.DEBUG
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
PROTON_INSTALLED = hasattr(aorta.backends, 'qpid_proton')
@unittest.skipIf(not PROTON_INSTALLED, "Skipping Apache Proton tests (not installed).")
class AortaTestCase(unittest.TestCase):
channel = CHANNEL
def setUp(self):
self.backend = aorta.backends.qpid_proton.MessagingBackend()
self.url = "{0}:{1}/{2}".format(HOST, PORT, CHANNEL)
def test_destroy(self):
self.backend.listen(self.url)
self.backend.start()
for i in range(10):
self.backend.send_message(self.url, Message(body="Hello world!"), block=True)
self.backend.destroy()
def test_send(self):
self.backend.send_message(self.url, Message(body="Hello world!"), block=True)
self.backend.send_message(self.url, Message(body="Hello world!"), block=True)
self.backend.send_message(self.url, Message(body="Hello world!"), block=True)
self.backend.send_message(self.url, Message(body="Hello world!"), block=True)
self.assertEqual(self.backend.deliveries, 4)
def test_recv(self):
self.backend.listen(self.url)
self.backend.send_message(self.url, Message(body="Hello world!"))
receiver_id, msg = self.backend.get()
self.assertEqual(msg.body, "Hello world!")
def test_orphaned_received(self):
self.backend.listen(self.url)
self.backend.start()
self.backend.send_message(self.url, Message(body="Hello world!"), block=True)
def test_listener(self):
listener = Listener(self.url, backend=self.backend)
listener.start()
self.backend.send_message(self.url, Message(body="Hello world!"), block=True)
listener.wait()
def test_backend_dispatch_catches_fatal_exception(self):
listener = ExceptionOnDispatchRaisingListener(
self.url, backend=self.backend)
listener.start()
self.backend.send_message(self.url, Message(body="Hello world!"), block=True)
listener.wait()
class ExceptionOnDispatchRaisingListener(Listener):
def dispatch(self, *args, **kwargs):
self._Listener__event.set() # to prevent wait() from blocking indefinatly.
raise Exception
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
# In[1]:
import sys
if sys.version_info[0] == 2:
import Tkinter as tk
import tkFont as font
else:
import tkinter as tk
from tkinter import font
import PIL.Image, PIL.ImageTk
import cv2
import numpy as np
from keras.models import load_model
from scipy.misc import imresize
from skimage.transform import resize, rotate
import math
import face
import h5py
import os, signal
from threading import Thread, Event
from time import sleep
from scipy.io import wavfile
from scipy.ndimage.filters import maximum_filter1d, gaussian_filter
IMAGE_SIZE = (128, 128)
IOD = 40.0
# In[2]:
class MyVideoCapture:
def __init__(self, video_source=0):
self.vid = cv2.VideoCapture(video_source)
if not self.vid.isOpened(): raise ValueError("Unable to open video source", video_source)
self.width, self.height = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH), self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT) # Get video source width and height
self.ind_run_demo, self.ind_recog_face, self.ind_track_face, self.ind_caption = 0, 0, 0, 0
self.caption = ""
self.roboFace = face.Face(x_weight=0.8, y_weight=0.2)
#################################################################
# Set up tracker
self.tracker = cv2.TrackerMedianFlow_create()
self.Tracking_Period=5 # set tracking period before re-initialisation in seconds
# Load Neural Net model and meanFace
self.model = load_model('../face_detection/trained/pretrained_CelebA_normalised0203-05.h5')
self.meanFace = np.load('../face_detection/mean_face_normalised.npy')
# Load Face Cascade and Eye Cascade classifiers
self.face_cascade = cv2.CascadeClassifier('../face_detection/haarcascade_frontalface_alt.xml')
self.eye_cascade = cv2.CascadeClassifier('../face_detection/haarcascade_eye.xml')
#################################################################
# Set Speed for smoother movement
self.roboFace.setSpeedAll(100)
self.roboFace.setSpeedHead(80)
self.flag = Event()
self.flag.clear()
#################################################################
self.roboFace.neutral()
self.probStream = None
self.saidNothing = 0
self.t1 = cv2.getTickCount()
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.waiting_phrases=["Hi! Is anybody here?","Greetings human! Nice to meet you! ","My name is roboface! I am a friendly robot!","Hello! It's a pleasure to meet you!","I feel so lonely!"]
def __del__(self): # Release the video source when the object is destroyed
if self.vid.isOpened(): self.vid.release()
def get_frame(self):
if self.vid.isOpened():
ret, frame = self.vid.read()
if self.ind_run_demo == 1:
self.ind_track_face = 0 # turn off independent tracker if demo is on
if self.flag.isSet() == False:
self.caption=""
self.run_demo(frame)
if self.flag.isSet(): _, _, _ = self.detectFace(frame);
if self.ind_recog_face == 1: self.recog_faces(frame);
if self.ind_track_face == 1: _, _, _ = self.detectFace(frame);
if self.ind_caption == 1: self.show_caption(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# frame = resize(frame, (self.canvas_width,600), mode='edge')
# Return a boolean success flag and the current frame converted to BGR
# if ret: return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if ret: return (ret, frame)
else: return (ret, None)
else: return (ret, None)
def imgCrop(self, image, cropBox, boxScale=1):
'''
Crop an area around the detected face (by OpenCV) in order to feed it into the prediction algorithm (NN).
'''
off = 90
y = max(cropBox[1] - 3 * off, 0)
x = max(cropBox[0] - 2 * off, 0)
off = 50
y = max(cropBox[1] - 3 * off, y)
x = max(cropBox[0] - 2 * off, x)
off = 20
y = max(cropBox[1] - 3 * off, y)
x = max(cropBox[0] - 2 * off, x)
cropped = image[y:cropBox[1] + cropBox[3] + 90, x:cropBox[0] + cropBox[2] + 30]
dims = cropped.shape
return cropped, x, y
# Normalize faces usinginter-ocular distance i.o.d
def normaliseImage(self, image, eyes, xcrop, ycrop):
# resite, such that i.o.d is always same
left_eye, right_eye = eyes[0] + np.array([xcrop, ycrop, 0, 0]), eyes[1] + np.array([xcrop, ycrop, 0, 0])
scale = IOD / np.linalg.norm(left_eye - right_eye)
left_eye, right_eye = scale * left_eye, scale * right_eye
im = resize(image, (int(scale * image.shape[0]), int(scale * image.shape[1])), mode='edge')
# rotate to keep inter ocular line horizontal
diff = np.subtract(left_eye, right_eye)
angle = math.atan2(diff[0], diff[1])
im = rotate(im, -angle, center=(left_eye[0], left_eye[1]), preserve_range=True, mode='edge')
# new resizing for making the image compatible with the trained NN.
iod = np.linalg.norm(left_eye - right_eye)
xmin, xmax = int(left_eye[0] - 1.6 * iod), int(left_eye[0] + 2 * iod)
ymin, ymax = int(left_eye[1] - 1.3 * iod), int(right_eye[1] + 1.3 * iod)
xmin, xmax = max(0, xmin), min(im.shape[0], xmax)
ymin, ymax = max(0, ymin), min(im.shape[1], ymax)
im = im[xmin:xmax, ymin:ymax, :]
try:
im = resize(im, IMAGE_SIZE, mode='edge')
except:
return None
return im
#######################################################################
# Definition of Track_face:
# Tracks only the face of the active user with MEDIANFLOW at opencv
# and returns the bounding box of the face
#######################################################################
def Track_face(self, frame):
ok, bbox = self.tracker.update(frame)
if ok==False:
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,20), self.font, 0.75,(0,0,255),2)
ok, bbox, faces = self.init_tracker(frame)
print("Track_Face - Tracker Re-Initialisation")
print("Number of Detected faces: ",len(faces))
return ok, bbox
def find_faces(self, frame):
bbox = None
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
#calculate the rectangle with the biggest area
#biggest area means that its the closest face to the robot
max_area=0
for face in faces:
area=face[2]*face[3]
if area>max_area:
max_area=area
bbox=tuple(face)
return bbox, faces # return biggest box and all faces
# Draw bounding box on all faces
def recog_faces(self, frame):
_, faces = self.find_faces(frame)
for face in faces:
(x, y, w, h) = face
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
# initialize tracker
def init_tracker(self, frame):
bbox, faces = self.find_faces(frame)
self.tracker = cv2.TrackerMedianFlow_create()
# re-initialise the tracker in case of lost target
ok = self.tracker.init(frame, bbox)
if len(faces)==0: ok=False
return ok, bbox, faces
# Re-initialization of Tracker after given period of time
def Re_Init_Tracker(self, frame):
cv2.putText(frame, "Re-Initialising Tracking", (100,50), self.font, 0.75,(255,0,0),2)
ok, _, faces = self.init_tracker(frame)
print("Tracker Re-Initialisation")
print("Number of Detected faces: ",len(faces))
t1 = cv2.getTickCount()
return ok, t1
def detectFace(self, image):
# http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
unaltered_image = image.copy()
eyes = None
normalised_image = None
# Track face in current frame
ok, bbox = self.Track_face(image)
#print(ok,bbox)
if ok==True and bbox is not None:
(x, y, w, h) = bbox
x=int(x)
y=int(y)
w=int(w)
h=int(h)
bbox=(x, y, w, h)
# show face bounding box on Webcam Preview
cv2.rectangle(image, (int(x), int(y)), (int(x) + int(w), int(y) + int(h)), (0, 0, 255), 3)
roi_gray = gray[y:y + h, x:x + w]
roi_color = image[y:y + h, x:x + w]
# normalise image in order to predict on it
# croppedImage = imgCrop(image, face, boxScale=1)
# detect eyes for Inter Oculat Distance
eyes = self.eye_cascade.detectMultiScale(roi_gray)
if len(eyes) == 2:
left_eye = eyes[0][0:2] + x
right_eye = eyes[1][0:2] + y
eyex = int((left_eye[0] + right_eye[0]) * .5)
eyey = int((left_eye[1] + right_eye[1]) * .5)
self.roboFace.moveHead(eyex, eyey)
# suggestion: skip this frame as prediction, so return None, image
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
if len(eyes) == 2 and np.abs(eyes[0, 1] - eyes[1, 1]) < 10:
offset1 = np.sqrt((eyes[0, 2] ** 2 + eyes[0, 3] ** 2)) * 0.5
offset2 = np.sqrt((eyes[1, 2] ** 2 + eyes[1, 3] ** 2)) * 0.5
real_eyes = eyes + np.array([[x + offset1, y + offset1, 0, 0], [x + offset2, y + offset2, 0, 0]])
real_eyes = np.sort(real_eyes, axis=0)
cropped_image, xcrop, ycrop = self.imgCrop(unaltered_image, bbox)
normalised_image = self.normaliseImage(cropped_image, real_eyes, -xcrop, -ycrop)
return normalised_image, image, ok
def mapAttributes(self, classes):
'''F
Map the output probabilities to the correpsonding names, like 'smile', etc.
'''
with open('../face_detection/wanted_attributes_normalised.txt', 'r') as f:
attributes = f.read()
attributes = attributes.strip('\n').split(' ')
result = []
for i, cl in enumerate(classes):
if cl == True:
result.append(attributes[i])
return result
################################################################################
# Declaration of: say - Talk - MoveLips
################################################################################
def Undersampled_Lip_Tragectory(self, phrase, Sleep_Time):
A = "espeak -z -s 80 -v female5 -w test.wav "
A = A + "'" + phrase + "'"
os.system(A)
samplerate, data = wavfile.read('test.wav')
dt = 1 / float(samplerate)
times = np.arange(len(data)) / float(samplerate)
N = len(times)
max_data = maximum_filter1d(data, size=1000)
max_data = gaussian_filter(max_data, sigma=100)
max_Amplitude = 10
Amplitude = max_Amplitude * (max_data / float(np.max(max_data)))
n = Sleep_Time * samplerate
Amp = []
T = []
i = 0
while (i * n < N):
Amp.append(Amplitude[int(i * n)])
T.append(times[int(i * n)])
i = i + 1
Amp = np.array(Amp)
T = np.array(T)
return Amp, T
def MoveLips(self, Sleep_Time, Amplitude, flag):
self.roboFace.setSpeedLips(127)
sleep(0.5)
i = 0
while flag.isSet() and i < len(Amplitude):
self.roboFace.moveLips(int(Amplitude[i]))
sleep(Sleep_Time)
i = i + 1
if ~flag.isSet():
self.roboFace.moveLips(0)
sleep(0.05)
def Talk(self, phrase, flag):
A = "espeak -z -s 80 -v female5 "
A = A + "'" + phrase + "'"
os.system(A)
flag.clear()
def say(self, phrase, flag):
phrase = phrase.replace("'", " ")
self.caption=phrase
flag.set()
Sleep_Time = 0.05
Amplitude, Time = self.Undersampled_Lip_Tragectory(phrase, Sleep_Time)
thread_movement = Thread(target=self.MoveLips, args=(Sleep_Time, Amplitude, flag))
thread_talk = Thread(target=self.Talk, args=(phrase, flag))
thread_talk.start()
thread_movement.start()
################################################################################
# End of Declaration: say - Talk - MoveLips
################################################################################
def sayDoSomething(self, pred_attr):
talk = {'Smiling': 'I like it when people smile at me!',
'Female': 'You are a female, am I right?',
'Male': 'You are a male, am I right?',
'Wearing_Earrings': 'You are wearing beautiful earrings today!',
'Wearing_Lipstick': 'I see you are wearing lipstick today. Pretty!',
'Blond_Hair': 'Nice blond hair!',
'Eyeglasses': 'You are wearing eyeglasses!',
'Brown_Hair': 'You have nice brown hair!',
'Black_Hair': 'You have nice black hair!',
'Gray_Hair': 'Gray Hair! You must be a wise man!',
'Wavy_Hair': 'You have nice wavy hair!',
'Straight_Hair': 'You have nice straight hair.'
}
if 'Smiling' in pred_attr:
self.roboFace.happy(moveHead=False,movelips=False)
elif 'Black_Hair' in pred_attr:
self.roboFace.angry(moveHead=False,movelips=False)
elif 'Eyeglasses' in pred_attr:
self.roboFace.unsure(moveHead=False,movelips=False)
else:
self.roboFace.neutral(moveHead=False,movelips=False)
index = np.random.randint(0, len(pred_attr))
self.say(talk[pred_attr[index]], self.flag)
def getProbaStream(self, probStream, probs):
if probStream == None:
probStream = probs
else:
probStream = np.vstack((probStream, probs))
return probStream
def run_demo(self, frame):
normalised_image, frame, ok = self.detectFace(frame)
# if a face is detected and the normalisation was successful, predict on it
if normalised_image is not None:
normalised_image = normalised_image[:, :, ::-1]
# subtract mean face
X_test = np.expand_dims(normalised_image, axis=0)
X_test -= self.meanFace
classes = self.model.predict_classes(X_test, batch_size=32, verbose=0)
proba = self.model.predict_proba(X_test, batch_size=32, verbose=0)
# pred_attr = mapAttributes((proba > 0.6)[0])
# print( proba)
# print(pred_attr)
self.probStream = self.getProbaStream(self.probStream, proba)
if self.saidNothing == 0 and self.probStream.shape[0] < 10:
self.saidNothing += 1
ret, frame = self.vid.read()
elif self.probStream.shape[0] > 10 and len(self.probStream.shape) >= 2:
meanProbs = np.mean(self.probStream, axis=0)
pred_attr = self.mapAttributes(meanProbs > 0.6)
best = []
if meanProbs[0] > meanProbs[1] and meanProbs[0] > meanProbs[4] and meanProbs[0] > meanProbs[2]:
best.append('Black_Hair')
elif meanProbs[1] > meanProbs[0] and meanProbs[1] > meanProbs[4] and meanProbs[1] > meanProbs[2]:
best.append('Blond_Hair')
elif meanProbs[2] > meanProbs[0] and meanProbs[2] > meanProbs[1]:
best.append('Brown_Hair')
if meanProbs[9] < meanProbs[10]:
best.append('Straight_Hair')
else:
best.append('Wavy_Hair')
if meanProbs[3] > 0.6:
best.append('Eyeglasses')
if meanProbs[8] > 0.6:
best.append('Smiling')
if meanProbs[11] > 0.2:
best.append('Wearing_Earrings')
if meanProbs[12] > 0.2:
best.append('Wearing_Lipstick')
if meanProbs[5] < 0.25:
best.append('Female')
elif meanProbs[12] < 0.11 and meanProbs[11] < 0.11 and meanProbs[5] > 0.85:
best.append('Male')
print(meanProbs)
print("BEST", best)
# end NN stuff
# postprocessing and reaction step
self.sayDoSomething(best)
self.saidNothing = 0
#while self.flag.isSet():
# _, frame, ok = self.detectFace(frame)
# self.probStream = None
# ret, frame = self.vid.read()
elif self.saidNothing > 150:
self.saidNothing = 0
self.roboFace.sad()
if ok==False:
index = np.random.randint(0, len(self.waiting_phrases))
self.say(self.waiting_phrases[index], self.flag)
#say("Hi! Is anybody here?", flag)
elif ok==True:
# self.say("I cannot detect your eyes.", self.flag)
self.say("Could you please open your eyes?", self.flag)
# while self.flag.isSet():
# _, frame, ok = self.detectFace(frame)
# self.probStream = None
# ret, frame = self.vid.read()
#if process == None:
# process = subprocess.Popen(['rhythmbox', 'creepyMusic.mp3'])
else:
self.saidNothing += 1
# Re-Initialise Tracker after predetermined Tracking period 'Tracking_Period' (seconds)
t2 = cv2.getTickCount()
if (t2 - self.t1) / cv2.getTickFrequency() > self.Tracking_Period:
ok, self.t1 = self.Re_Init_Tracker(frame)
def recog_faces(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
for face in faces:
(x, y, w, h) = face
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
def show_caption(self, frame):
textlen = cv2.getTextSize(self.caption, self.font, 1, 2)[0][0]
posx, posy = int((self.width-textlen)/2), int(self.height-20)
cv2.putText(frame, self.caption, (posx,posy), self.font, 0.75,(0,0,255),2)
class App:
def __init__(self, window, window_title):
self.window = window
self.window.title(window_title)
# self.vid = MyVideoCapture('testvideo.avi') # open video source
self.vid = MyVideoCapture()
# Create a canvas that can fit the above video source size
self.canvas_width, self.canvas_height = 1024, 552 # reduced by the 2 pixels of menu bar
# self.canvas_width, self.canvas_height = 1024, 640
self.canvas = tk.Canvas(self.window, width = self.canvas_width, height = self.canvas_height)
# self.canvas = tk.Canvas(self.window, width = self.vid.width, height = self.vid.height)
self.canvas.pack()
# values to accommodate for the title label
self.lblxlenadd, self.lblylen = 10, 31 # 21 corresponds to height=1 in show_title_label
# standard button configuration
self.btnwidth, self.btnheight = 18, 3
self.btnxlen, self.btnylen = 192, 60
self.btnypos_list = [] # menu button y positions
for i in range(7):
self.btnypos_list.append(self.lblylen+self.btnylen*i)
self.fonttype = font.Font(family="Helvetica", size=12, weight=font.BOLD)
# Initialize ALL menu items here
self.submenu_btnlist = None
self.btn_demo_r, self.btn_demo_g = None, None # red and green buttons
self.btn_recog_r, self.btn_recog_g = None, None
self.btn_tracker_r, self.btn_tracker_g = None, None
self.btn_face = None # make a roboface has a submenu, do not need red or green buttons
self.btn_face_neutral, self.btn_face_happy, self.btn_face_unsure, self.btn_face_sad = None, None, None, None
self.btn_face_angry, self.btn_face_angry2 = None, None
self.btn_caption_r, self.btn_caption_g = None, None
self.btn_mode_r, self.btn_mode_g, self.btn_mode_value = None, None, False
self.btn_mode_pin, self.btn_mode_user = 1234, 0 # PIN number to toggle to fullscreen mode
self.btn_mode_1, self.btn_mode_2, self.btn_mode_3 = None, None, None
self.btn_mode_4, self.btn_mode_5, self.btn_mode_6, self.btn_mode_enter = None, None, None, None
self.btn_quit = None
self.btn_quit_pin, self.btn_quit_user = 1234, 0 # PIN number to quit
self.btn_quit_1, self.btn_quit_2, self.btn_quit_3 = None, None, None
self.btn_quit_4, self.btn_quit_5, self.btn_quit_6, self.btn_quit_enter = None, None, None, None
self.btn_back = None
# List main menu items here AFTER individual submenu items are declared above
# red button object green button object button text method activated by btn
self.mmenu = [[self.btn_demo_r, self.btn_demo_g, "Demo", self.set_demo_value],
[self.btn_recog_r, self.btn_recog_g, "Face Detection", self.set_recog_value],
[self.btn_tracker_r,self.btn_tracker_g,"Face Tracker", self.set_track_value],
[self.btn_face, None, "Make a RoboFace", self.roboface_submenu],
[self.btn_caption_r,self.btn_caption_g,"Caption", self.set_caption_value],
[self.btn_mode_r, self.btn_mode_g, "Fullscreen Mode", self.mode_submenu],
[self.btn_quit, None, "Quit", self.quit_submenu]]
self.mainmenu()
self.mmenu_btn_r, self.mmenu_btn_g, self.mmenu_txt, self.mmenu_cmd = self.build_list(self.mmenu)
# After it is called once, the update method will be automatically called every delay milliseconds
self.delay = 1
self.update()
self.window.mainloop()
def build_list(self, menulist):
btnlist_r, btnlist_g, txtlist, cmdlist = [], [], [], []
for i in menulist:
btnlist_r.append(i[0])
btnlist_g.append(i[1])
txtlist.append(i[2])
cmdlist.append(i[3])
return btnlist_r, btnlist_g, txtlist, cmdlist
# This is the looping method that updates the canvas with image obtained from the
# video class via self.vid.get_frame
def update(self):
ret, frame = self.vid.get_frame()
if ret:
self.photo = PIL.ImageTk.PhotoImage(PIL.Image.fromarray(frame))
self.canvas.create_image(0, 0, image = self.photo, anchor = tk.NW)
self.window.after(self.delay, self.update)
# A title label is added at the top of the button list to distinguish btw main- and sub-menu
def show_title_label(self, title):
self.lbl_title = tk.Label(text=title, width=25, height=2, bg="yellow", anchor="w", font=self.fonttype)
self.lbl_title.place(x=self.canvas_width-self.btnxlen, y=0)
# "Back" button is only added when there is a submenu
# it is added at the end of all submenu buttons, and is smaller, to distinguish it from others
def add_back_button(self,submenu_btnlist):
if self.btn_back is None:
self.btn_back = tk.Button(self.window, text="Back", width=16, height=2,
font=self.fonttype, command=self.back_roboface_submenu)
self.btn_back.place(x=self.canvas_width-170, y=self.lblylen+self.btnylen*len(submenu_btnlist)+5)
self.submenu_btnlist = submenu_btnlist
# When toggling between main and submenu, we use this method to hide the other buttons
def hide_buttons(self,btnlist):
for i in btnlist:
if i is not None:
i.place_forget()
# depending on the val parameter, we set the color and whether we want the red/green button
# num1 or num2 = 0 means we want the red button (corresponding to the self.mmenu element)
# num1 or num2 = 1 means green button
# num1 is the button we want to show, num2 is the button we want to hide
def button_val(self, val):
if val == -1:
num1 = 0; num2 = 0; color = "gray";
elif val == 1:
num1 = 1; num2 = 0; color = "green";
elif val == 0:
num1 = 0; num2 = 1; color = "red";
return num1, num2, color
# create a button
def button(self, menu_list, i, val, txt=None):
num1, num2, color = self.button_val(val)
if txt is None: txt = menu_list[i][2]
if val == 1: txt = txt + "\nis on";
elif val == 0: txt = txt + "\nis off";
if menu_list[i][num1] is None:
menu_list[i][num1] = tk.Button(self.window, width=self.btnwidth, height=self.btnheight, #wraplength=150,
fg="white", bg=color, activebackground=color, font=self.fonttype,
text=txt, command=menu_list[i][3])
menu_list[i][num1].place(x=self.canvas_width-self.btnxlen, y=self.btnypos_list[i])
if color == "green" or color == "red":
menu_list[i][num2].place_forget()
# create all buttons in the menu list
def menu_buttons(self, menu_list, submenu):
for i in range(len(menu_list)):
val = -1
if submenu != 1:
if i == 0: val = self.vid.ind_run_demo;
elif i == 1: val = self.vid.ind_recog_face;
elif i == 2: val = self.vid.ind_track_face;
elif i == 4: val = self.vid.ind_caption;
elif i == 5: val = self.btn_mode_value;
num, _, color = self.button_val(val)
if submenu == 1: color = "gray";
txt = menu_list[i][2]
if val == 1: txt = txt + "\nis on";
elif val == 0: txt = txt + "\nis off";
if menu_list[i][num] is None:
menu_list[i][num] = tk.Button(self.window, width=self.btnwidth, height=self.btnheight, #wraplength=80,
fg="white", bg=color, activebackground=color, font=self.fonttype,
text=txt, command=menu_list[i][3])
menu_list[i][num].place(x=self.canvas_width-self.btnxlen, y=self.btnypos_list[i])
# create main menu buttons
def mainmenu(self):
self.show_title_label("Main Menu")
if self.submenu_btnlist is not None:
self.btn_back.place_forget()
self.hide_buttons(self.submenu_btnlist)
self.submenu_btnlist = None
self.menu_buttons(self.mmenu,0)
# create "Make a Roboface" submenu
def submenu(self, title_label, menu_list):
self.show_title_label(title_label)
self.hide_buttons(self.mmenu_btn_r)
self.hide_buttons(self.mmenu_btn_g)
self.menu_buttons(menu_list,1)
btn_list, _, _, _ = self.build_list(menu_list)
self.add_back_button(btn_list)
def set_demo_value(self): # method tied to the "Demo" button
self.vid.ind_run_demo = not self.vid.ind_run_demo
if self.vid.ind_run_demo == 1:
if self.vid.ind_track_face == 1: self.set_track_value() # if tracker running, turn it off
elif self.vid.ind_run_demo == 0:
self.vid.roboFace.neutral()
self.button(self.mmenu, 0, self.vid.ind_run_demo)
def set_recog_value(self): # method tied to the "Face Detection" button
self.vid.ind_recog_face = not self.vid.ind_recog_face
self.button(self.mmenu, 1, self.vid.ind_recog_face)
def set_track_value(self): # method tied to the "Face Tracker" button
self.vid.ind_track_face = not self.vid.ind_track_face
if self.vid.ind_track_face == 1:
if self.vid.ind_run_demo == 1: self.set_demo_value() # if demo running, turn it off
self.button(self.mmenu, 2, self.vid.ind_track_face)
def roboface_submenu(self): # method tied to the "Make a RoboFace" button
if self.vid.ind_run_demo == 1: self.set_demo_value() # if demo is on, turn it off
if self.vid.ind_recog_face == 1: self.set_recog_value() # if recog is on, turn it off
if self.vid.ind_track_face == 1: self.set_track_value() # if tracker running, turn it off
if self.vid.ind_caption == 1: self.set_caption_value() # if caption is on, turn it off
self.submenu("Make a RoboFace",
[[self.btn_face_neutral, None, "Neutral", self.vid.roboFace.neutral],
[self.btn_face_happy, None, "Happy", self.vid.roboFace.happy],
[self.btn_face_unsure, None, "Unsure", self.vid.roboFace.unsure],
[self.btn_face_sad, None, "Sad", self.vid.roboFace.sad],
[self.btn_face_angry2, None, "Angry (No Lips)", self.angry_nolips],
[self.btn_face_angry, None, "Angry", self.vid.roboFace.angry]])
def angry_nolips(self):
self.vid.roboFace.angry(movelips = False)
def back_roboface_submenu(self): # method tied to the "Back" button in RoboFace submenu
self.vid.roboFace.neutral()
self.mainmenu()
def set_caption_value(self): # method tied to the "Caption" button
self.vid.ind_caption = not self.vid.ind_caption
self.button(self.mmenu, 4, self.vid.ind_caption)
def mode_submenu(self):
self.submenu("Fullscreen PIN",
[[self.btn_mode_1, None, "1", self.set_mode_1],
[self.btn_mode_2, None, "2", self.set_mode_2],
[self.btn_mode_3, None, "3", self.set_mode_3],
[self.btn_mode_4, None, "4", self.set_mode_4],
[self.btn_mode_5, None, "5", self.set_mode_5],
[self.btn_mode_6, None, "6", self.set_mode_6],
[self.btn_mode_enter, None, "Enter", self.set_mode_enter]])
def set_mode_1(self): self.btn_mode_user += 1000
def set_mode_2(self): self.btn_mode_user += 100
def set_mode_3(self): self.btn_mode_user += 10
def set_mode_4(self): self.btn_mode_user += 1
def set_mode_5(self): self.btn_mode_user += 10000
def set_mode_6(self): self.btn_mode_user += 10000
def set_mode_enter(self):
if self.btn_mode_pin == self.btn_mode_user:
self.set_mode_value()
self.btn_mode_user = 0 # reset user-entered pin
self.mainmenu()
def set_mode_value(self): # method tied to the "Fullscreen Mode' button
self.btn_mode_value = not self.btn_mode_value
self.window.attributes("-fullscreen", self.btn_mode_value)
self.button(self.mmenu, 5, self.btn_mode_value)
def quit_command(self): # method tied to "Quit" button
self.vid.roboFace.neutral()
self.window.destroy()
def quit_submenu(self):
self.submenu("Quit PIN",
[[self.btn_quit_1, None, "1", self.set_quit_1],
[self.btn_quit_2, None, "2", self.set_quit_2],
[self.btn_quit_3, None, "3", self.set_quit_3],
[self.btn_quit_4, None, "4", self.set_quit_4],
[self.btn_quit_5, None, "5", self.set_quit_5],
[self.btn_quit_6, None, "6", self.set_quit_6],
[self.btn_quit_enter, None, "Enter", self.set_quit_enter]])
def set_quit_1(self): self.btn_quit_user += 1000
def set_quit_2(self): self.btn_quit_user += 100
def set_quit_3(self): self.btn_quit_user += 10
def set_quit_4(self): self.btn_quit_user += 1
def set_quit_5(self): self.btn_quit_user += 10000
def set_quit_6(self): self.btn_quit_user += 10000
def set_quit_enter(self):
if self.btn_quit_pin == self.btn_quit_user:
self.quit_command()
else:
self.btn_quit_user = 0 # reset user-entered pin
self.mainmenu()
App(tk.Tk(), "Webcam") # Create a window and pass it to the Application object
|
import datetime
import scrapy
from config import BASE_URL
TODAY = datetime.datetime.now()
class TGHeadlinesSpider(scrapy.Spider):
name = 'tg-headlines'
allowed_domains = ['theguardian.com']
def __init__(self, *args, **kwargs):
super(TGHeadlinesSpider, self).__init__(*args, **kwargs)
date = getattr(self, 'date', False)
if len(kwargs) <= 1 and not date:
self.start_urls = [BASE_URL]
else:
if date :
# Runned with : scrapy runspider headlines.py -a date=2020-03-27
# Runned with : scrapy runspider headlines.py -a date=YYYY-MM-DD
date = datetime.datetime.strptime(date, "%Y-%m-%d")
else:
# Runned with : scrapy runspider headlines.py -a year=2020 -a month=2 -a day=2
# Runned with : scrapy runspider headlines.py -a year=2020 -a month=2 -a day=16
date = datetime.datetime(
int(getattr(self, 'year', TODAY.year)),
int(getattr(self, 'month', TODAY.month)),
int(getattr(self, 'day', TODAY.day))
)
self.start_urls = [f'https://www.theguardian.com/environment/{date.strftime("%Y")}/{date.strftime("%b").lower()}/{date.strftime("%d")}/all']
def parse(self, response):
articles = response.css(".fc-item__container") # Extract the articles
for article in articles: # Loop over the articles
title = article.xpath('a/text()').extract()
link = article.xpath('a/@href').extract()
tag = article.xpath('.//span[@class="fc-item__kicker"]/text()').extract()
image = article.xpath('.//source/@srcset').extract_first()
yield {
"title":title,
"link":link,
"tag":tag,
"image":image
}
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Profile(models.Model):
username = models.CharField(max_length=200, default="username")
talk = models.NullBooleanField()
sleep = models.NullBooleanField()
drink = models.NullBooleanField()
child = models.NullBooleanField()
class Seat(models.Model):
number = models.IntegerField(default=0)
profile = models.ForeignKey(
'Profile',
on_delete=models.SET_NULL,
null=True,
blank=True
)
def getnum():
return self.number
|
#!/bin/env python3
import datetime
from jobconfig import *
import raconfig
DATA_INICIO = datetime.datetime(2018, 9, 19)
QUANTIDADE_DIAS_PLANEJAR = 7
class Bandeira:
def __init__(self, bandeira, centros_fornecedores, planejadores_mrp, aplicaPasso10=True):
self.bandeira = bandeira
self.centros_fornecedores = centros_fornecedores
self.planejadores_mrp = planejadores_mrp
self.aplicaPasso10 = aplicaPasso10
BANDEIRAS = {
'4002': Bandeira('4002',
centros_fornecedores='B607;B552'.split(';'),
planejadores_mrp='A02;01F;02F;05F;06F;07F;08F;09F;10F;11F;14F;17F;18F;20F;24F;25F;26F;29F;30F;31F;32F;35F;37F;40F;43F;44F;45F;48F;50F;50F;57F;61F;62F;67F;68F;80F;82F;83F;84F;F01:F99'.split(';')),
'4000': Bandeira('4000',
centros_fornecedores='B001;B098;B184;B191;B289'.split(';'),
planejadores_mrp=['A02']),
'4003': Bandeira('4003',
centros_fornecedores=['B703'],
planejadores_mrp=[],
aplicaPasso10=False),
}
def create_loja_from_line(line):
BANDEIRA = 0
LOJA = 1
HORARIO_INICIO = 2
reg = [x.strip() for x in line.split('\t') ]
hora, minuto, segundo = map(int,reg[HORARIO_INICIO].split(':'))
return Loja(reg[LOJA], BANDEIRAS[reg[BANDEIRA]],
DATA_INICIO.replace(hour=hora, minute=minuto, second=segundo))
LOJAS = map(create_loja_from_line, raconfig.LOJAS.splitlines());
class Loja:
SEMANAL = 'W'
DIARIO = 'T'
PERIODO_ATUAL = 'A'
def __init__(self, id, bandeira, data_inicio):
self.id = id
self.bandeira = bandeira
self.data_inicio = data_inicio
self.DATA_EXECUCAO_FROM = data_inicio.strftime('%Y%m%d')
def get_id(self):
return self.id
def get_filial(self):
return 'A'
def get_tipos_mrp(self):
return raconfig.TIPOS_MRP
def criar_paso_1(self):
step = Step('ZVPRG_ANULA_POS_SOL_PEDIDO', RAConfig.USUARIO)
step.add_screen_item(ScreenItemDeltaDate(datetime.timedelta(days=1), 'SO_BADAT', self.DATA_EXECUCAO_FROM, 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('SO_WERKS', self.get_id(), 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('PA_BSART', 'NB'))
return step
def criar_paso_2(self):
step = Step('RMPROG00', RAConfig.USUARIO)
step.add_screen_item(ScreenItem('WERK', self.get_id(), 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('VLFKZ', self.get_filial()))
step.add_screen_item(ScreenItem('PERKZ', Loja.DIARIO))
step.add_screen_item(ScreenItem('PRDUR', Loja.PERIODO_ATUAL))
step.add_screen_item(ScreenItem('UPDTE', 'X'))
step.add_screen_item(ScreenItem('PROTO', ' '))
return step
def criar_paso_3(self):
step = Step('RMPROG00', RAConfig.USUARIO)
step.add_screen_item(ScreenItem('WERK', self.get_id(), 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('VLFKZ', self.get_filial()))
step.add_screen_item(ScreenItem('PERKZ', Loja.SEMANAL))
step.add_screen_item(ScreenItem('PRDUR', Loja.PERIODO_ATUAL))
step.add_screen_item(ScreenItem('UPDTE', 'X'))
step.add_screen_item(ScreenItem('PROTO', ' '))
return step
def criar_paso_4(self):
step = Step('RMMRP000', RAConfig.USUARIO)
step.add_screen_item(ScreenItem('WERKS', self.get_id()))
step.add_screen_item(ScreenItem('VERSL', 'NETCH'))
step.add_screen_item(ScreenItem('BANER', '1'))
step.add_screen_item(ScreenItem('LIFKZ', '1'))
step.add_screen_item(ScreenItem('DISER', '1'))
step.add_screen_item(ScreenItem('PLMOD', '3'))
step.add_screen_item(ScreenItem('TRMPL', '1'))
step.add_screen_item(ScreenItemDeltaDate(datetime.timedelta(days=1), 'DISPD', self.DATA_EXECUCAO_FROM))
step.add_screen_item(ScreenItem('PARAL', 'X'))
return step
def criar_paso_5(self):
step = Step('RWRPLPRO', RAConfig.USUARIO)
step.add_screen_item(ScreenItem('CUSTRNGE', self.get_id(), 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('G_ATP', 'X'))
step.add_screen_item(ScreenItem('G_FCAST', 'X'))
step.add_screen_item(ScreenItemDeltaDate(datetime.timedelta(days=1), 'G_DAY', self.DATA_EXECUCAO_FROM))
step.add_screen_item(ScreenItem('G_PLHOR', '5'))
step.add_screen_item(ScreenItem('G_NETCH', ' '))
step.add_screen_item(ScreenItem('G_PKT', '10000'))
step.add_screen_item(ScreenItem('G_TOL', '5'))
return step
def criar_paso_6(self):
step = Step('ZPRG_CAPI_GERA_CACHE_ZRIS', RAConfig.USUARIO)
step.add_screen_item(ScreenItemDeltaDate(datetime.timedelta(days=1), 'S_BADAT', self.DATA_EXECUCAO_FROM, 'S','I','EQ'))
step.add_screen_item(ScreenItem('S_WERKS', self.get_id(), 'S', 'I', 'EQ'))
return step
def criar_paso_7(self):
step = Step('/1CADMC/SAP_LSMW_CONV_00000794', RAConfig.USUARIO)
step.add_screen_item(ScreenItemDeltaDate(datetime.timedelta(days=1), 'P_FECHA', self.DATA_EXECUCAO_FROM))
step.add_screen_item(ScreenItem('S_WERKS', self.get_id(), 'S', 'I', 'EQ'))
for tipo_mrp in self.get_tipos_mrp():
step.add_screen_item(ScreenItem('S_DISMM', tipo_mrp, 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('P_FILEPT', 'X'))
step.add_screen_item(ScreenItem('P_TRFCPT', ' '))
step.add_screen_item(ScreenItem('P_PACKGE', '50'))
return step
def criar_paso_8(self):
step = Step('ZVPRG_VALPED', RAConfig.USUARIO)
step.add_screen_item(ScreenItemDeltaDate(datetime.timedelta(days=1), 'S_BADAT', self.DATA_EXECUCAO_FROM, 'S','I','EQ'))
step.add_screen_item(ScreenItem('S_WERKS', self.get_id(), 'S', 'I', 'EQ'))
return step
def criar_paso_9(self):
step = Step('RM06BB30', RAConfig.USUARIO)
step.add_screen_item(ScreenItem('S_EKORG', '4005', 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('S_WERKS', self.get_id(), 'S', 'I', 'EQ'))
for centro_fornecedor in self.bandeira.centros_fornecedores:
step.add_screen_item(ScreenItem('S_RESWK', centro_fornecedor, 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('P_GEKGRP', 'X'))
step.add_screen_item(ScreenItem('P_GLFDAT', 'X'))
step.add_screen_item(ScreenItem('P_GWERKS', 'X'))
step.add_screen_item(ScreenItem('P_GLTSNR', 'X'))
step.add_screen_item(ScreenItem('P_GLGORT', 'X'))
step.add_screen_item(ScreenItem('P_GBUKRS', 'X'))
step.add_screen_item(ScreenItem('P_GKONNR', 'X'))
step.add_screen_item(ScreenItem('P_DETPRO', '2'))
step.add_screen_item(ScreenItem('P_POSERR', 'X'))
step.add_screen_item(ScreenItem('P_SEBAKZ', '2'))
for planejador_mrp in self.bandeira.planejadores_mrp:
step.add_screen_item(ScreenItem('S_DISPO', planejador_mrp, 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('P_VRTYPK', 'X'))
return step
def criar_paso_10(self):
step = Step('RM06BB30', RAConfig.USUARIO)
step.add_screen_item(ScreenItem('S_EKORG', '4005', 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('S_WERKS', self.get_id(), 'S', 'I', 'EQ'))
for centro_fornecedor in self.bandeira.centros_fornecedores:
step.add_screen_item(ScreenItem('S_RESWK', centro_fornecedor, 'S', 'I', 'EQ'))
step.add_screen_item(ScreenItem('P_GEKGRP', 'X'))
step.add_screen_item(ScreenItem('P_GLFDAT', 'X'))
step.add_screen_item(ScreenItem('P_GWERKS', 'X'))
step.add_screen_item(ScreenItem('P_GLTSNR', 'X'))
step.add_screen_item(ScreenItem('P_GLGORT', 'X'))
step.add_screen_item(ScreenItem('P_GBUKRS', 'X'))
step.add_screen_item(ScreenItem('P_GKONNR', 'X'))
step.add_screen_item(ScreenItem('P_DETPRO', '2'))
step.add_screen_item(ScreenItem('P_POSERR', 'X'))
step.add_screen_item(ScreenItem('P_SEBAKZ', '2'))
step.add_screen_item(ScreenItem('P_VRTYPK', 'X'))
return step
def aplicaPasso10(self):
return self.bandeira.aplicaPasso10
def criar_job_ra(self):
job = Job('CORRIDA_RA_%s' % self.get_id(), start_datetime=self.data_inicio)
job.add_step(self.criar_paso_1())
job.add_step(self.criar_paso_2())
job.add_step(self.criar_paso_3())
job.add_step(self.criar_paso_4())
job.add_step(self.criar_paso_5())
job.add_step(self.criar_paso_6())
job.add_step(self.criar_paso_7())
job.add_step(self.criar_paso_9())
if self.aplicaPasso10():
job.add_step(self.criar_paso_10())
return job
class RAConfig(Config):
USUARIO = 'BDC_RETAIL'
def __init__(self):
super(RAConfig, self).__init__()
EXECUTION_INTERVAL = 24*60*60
DATA_EXECUCAO_TO = (DATA_INICIO + datetime.timedelta(days=(QUANTIDADE_DIAS_PLANEJAR-1))).strftime('%Y%m%d')
jobs = []
for loja in LOJAS:
job = loja.criar_job_ra()
jobs.append(job)
while (jobs[0].steps[0].get_screen_item('SO_BADAT').low <= DATA_EXECUCAO_TO):
for job in jobs:
self.add_job(job)
jobs = [ job.next(ExecutionInterval(datetime.timedelta(seconds=(EXECUTION_INTERVAL)))) for job in jobs ]
c = RAConfig()
c.save('./ra.xls')
|
from wcontrol.conf.config import BMI, BFP, MUSCLE, VISCERAL
class results(object):
def __init__(self, control, gender):
self.weight = ''
self.bmi = self.get_bmi(control.bmi)
self.fat = self.get_fat(control.fat, gender)
self.muscle = self.get_muscle(control.muscle, gender)
self.rmr = ''
self.visceral = self.get_visceral(control.visceral)
self.bodyage = ''
def get_bmi(self, bmi):
if not bmi:
return '-'
for limit, msg in BMI:
if bmi <= limit:
return msg
def get_fat(self, fat, gender):
if not fat:
return '-'
for limit_w, limit_m, msg in BFP:
if gender == 'Female' and fat <= limit_w:
return msg
if gender == 'Male' and fat <= limit_m:
return msg
def get_muscle(self, muscle, gender):
if not muscle:
return '-'
for limit_w, limit_m, msg in MUSCLE:
if gender == 'Female' and muscle <= limit_w:
return msg
if gender == 'Male' and muscle <= limit_m:
return msg
def get_visceral(self, visceral):
if not visceral:
return '-'
for limit, msg in VISCERAL:
if visceral <= limit:
return msg
def __getitem__(self, index):
if index == 0:
return self.weight
if index == 1:
return self.bmi
if index == 2:
return self.fat
if index == 3:
return self.muscle
if index == 4:
return self.visceral
if index == 5:
return self.rmr
if index == 6:
return self.bodyage
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Userpune(models.Manager):
def get_queryset(self):
return super(Userpune, self).get_queryset().filter(city='Apune')
class Userprofile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
address = models.CharField(max_length=100)
city = models.CharField(max_length=100)
phone = models.IntegerField()
image = models.ImageField(upload_to="profile_image", blank=True)
def __str__(self):
return self.user.username
|
# Ask user how many days they worked
ndays = input("Enter the number of days worked: ")
pay = 0.005
for day in range(1,ndays+1):
pay *= 2.0
print("Day", day, "Salary", pay)
print("Your salary on day", ndays, "would be $%.2f" % pay)
|
from django.conf.urls import url
from django.urls import path, include
from apps.scoreboard import views
app_name = "scoreboard"
urlpatterns = [
url(r'^(?P<event_id>\d+)/$', views.scoreboard, name='scoreboard'),
url(r'^$', views.scoreboard, name='scoreboard'),
]
|
# -*- coding: utf-8 -*-
import json
import urllib
import base64
import logging
import hashlib
import words
from Crypto.Signature import PKCS1_v1_5
from Crypto.PublicKey import RSA
from Crypto.Hash import MD5
from tornado.httpclient import HTTPRequest
from util_tools import gen_order_id
from model.order_model import ThirdPayOrders
from model.table_base import Wanted
from functools import partial
from pay.ipay import PayInterface
from pay.sun_pay import SunPay
from pay.fanwei_pay import FanWeiPay
from pay.zhuoyue_pay import ZhuoYuePay
from pay.yuechang_pay import YueChangPay
from pay.weiyun import WeiYunPay
from pay.zhongzhi import ZhongZhiPay
from pay.beiai import BeiAiPay
from pay.maiguang import MaiGuangPay
from pay.duoyile import DuoYiLePay
class FaFaPay(object):
def __init__(self, obj):
self.mch_no = obj['FF']['mch_no']
self.key = obj['FF']['key']
def get_sign(self, err, msg, mch_id, order_no, trans_no, attach, price, ts):
return hashlib.md5(
''.join((err, msg or '', mch_id, order_no, trans_no, attach or '', price, ts, self.key)).encode(
'utf-8')).hexdigest()
class WQBPay(PayInterface):
def __init__(self, obj, notify_url, dd_conf):
self.mch_no = obj['mch_no']
self.key = obj['key']
self.prepay_url = obj['prepay_url']
super(WQBPay, self).__init__(notify_url, dd_conf)
self.notify_url = '{}/{}'.format(self.notify_url, words.WeiQianBao)
self.success_text = 'ok'
def process_prepay(self, uid, cid, price, pkg, pid, ext_params, callback=None):
oid = gen_order_id(words.WeiQianBao)
self.save_order(uid, oid, cid, pkg, price, words.WeiQianBao, product=pid)
out = self.make_prepay_data(oid, '', self.build_prepay_request(price, oid, ext_params))
if callback:
callback(out)
else:
return out
def build_prepay_request(self, price, order_no, pay_type):
price = '{:.2f}'.format(price / 100.0)
sign = hashlib.md5('{}|{}|{}'.format(self.mch_no, price, self.key)).hexdigest()
return '{}?{}'.format(self.prepay_url, urllib.urlencode({
'mchno': self.mch_no,
'money': price,
'orderno': order_no,
'payType': pay_type,
'notifyUrl': self.notify_url,
'sign': sign
}))
def process_notify(self, handler, callback):
result = handler.get_argument('result')
price = handler.get_argument('money')
pay_mch_no = handler.get_argument('paymentno')
order_no = handler.get_argument('orderno')
paytime = handler.get_argument('paytime')
pay_type = handler.get_argument('payType')
sign = handler.get_argument('sign2')
if result != '1':
callback(self.success_text)
elif sign != self.get_sign(price, pay_mch_no):
logging.info('invalid sign, expect:[{}], actual[{}]'.format(self.get_sign(price, pay_mch_no), sign))
callback('fail')
else:
order = ThirdPayOrders()
order.order_id = order_no
order.success = Wanted
order.channel = Wanted
order.update_from_db(callback=partial(
self.on_update_over, order, (None, None, '{{"time":"{}"}}'.format(paytime)), callback))
def get_sign(self, money, pay_mch_no):
return hashlib.md5('|'.join((money, self.mch_no, pay_mch_no, self.key)).encode('utf-8')).hexdigest()
class PayFactory(object):
def __init__(self, conf_file, domain, dd_conf):
with open(conf_file) as f:
obj = json.load(f)
self._init_sun_pay_(obj)
self.fafa = FaFaPay(obj)
self.inst_map = {
words.WeiQianBao: WQBPay(obj[words.WeiQianBao], domain, dd_conf),
words.SunPayTag: SunPay(obj[words.SunPayTag], domain, dd_conf),
words.FanWeiPayTag: FanWeiPay(obj[words.FanWeiPayTag], domain, dd_conf),
words.ZhuoYuePayTag: ZhuoYuePay(obj[words.ZhuoYuePayTag], domain, dd_conf),
words.YueChangPayTag: YueChangPay(obj[words.YueChangPayTag], domain, dd_conf),
words.WeiYunPayTag: WeiYunPay(obj[words.WeiYunPayTag], domain, dd_conf),
words.ZhongZhiPayTag: ZhongZhiPay(obj[words.ZhongZhiPayTag], domain, dd_conf),
words.BeiAiPayTag: BeiAiPay(obj[words.BeiAiPayTag], domain, dd_conf),
words.MaiGuangTag: MaiGuangPay(obj[words.MaiGuangTag], domain, dd_conf),
words.DuoYiLeTag: DuoYiLePay(obj[words.DuoYiLeTag], domain, dd_conf)
}
def process_prepay(self, sdk, uid, cid, price, pkg, pid, ext_params, callback):
if sdk not in self.inst_map:
return False
self.inst_map[sdk].process_prepay(uid, cid, price, pkg, pid, ext_params, callback)
return True
def process_notify(self, handler, sdk, callback):
if sdk in self.inst_map:
self.inst_map[sdk].process_notify(handler, callback)
else:
logging.warning('API [{}] not exist.'.format(sdk))
callback('fail')
def _init_sun_pay_(self, obj):
self.sun_signer = PKCS1_v1_5.new(RSA.importKey(obj['SUN']['private_key']))
self.sun_verifier = PKCS1_v1_5.new(RSA.importKey(obj['SUN']['public_key']))
self.sun_mch_no = obj['SUN']['mch_no']
self.sun_df_pay = obj['SUN']['default_pay_type']
self.sun_prepay = obj['SUN']['prepay_url']
self.sun_query = obj['SUN']['query_url']
def build_sun_prepay_request(self, order_id, product, product_name, price, pay_type, notify_url, ext_data=''):
keys = (
'mchNo', 'mchOrderNo', 'productId', 'productName', 'price', 'payType',
'returnUrl', 'notifyUrl', 'mark', 'sign')
vals = [
self.sun_mch_no, order_id, product, product_name, '{:.2f}'.format(price / 100.0),
pay_type or self.sun_df_pay, "", notify_url, ext_data
]
str_to_sign = '|'.join(vals)
sign = base64.b64encode(self.sun_signer.sign(MD5.new(str_to_sign.encode('utf-8'))))
vals.append(sign)
query = {}
for i, key in enumerate(keys):
query[key] = vals[i].encode('utf-8')
logging.info('curl -X POST "{}" -d \'{}\''.format(self.sun_prepay, urllib.urlencode(query)))
return HTTPRequest(self.sun_prepay, method='POST', body=urllib.urlencode(query))
def check_sun_sign(self, pay_type, order_id, trans_id, price, end_time, mark, result, src_sign):
str_2_sign = '|'.join((pay_type, order_id, trans_id, price, end_time, mark, result))
return self.sun_verifier.verify(MD5.new(str_2_sign), base64.b64decode(src_sign))
def build_sun_query_request(self, order_id):
s = '{}|{}'.format(self.sun_mch_no, order_id)
sign = base64.b64encode(self.sun_signer.sign(MD5.new(s)))
param_str = urllib.urlencode({
'mchNo': self.sun_mch_no,
'mchOrderNo': order_id,
'sign': sign
})
logging.info('query to SunServer:[curl -X POST "{}" -d \'{}\']'.format(self.sun_query, param_str))
return HTTPRequest(self.sun_query, method='POST', body=param_str)
|
from acp_times import open_time
from acp_times import close_time
import arrow
start = arrow.get("2017-01-01T00:00")
# invalid input
# def test_open_bad_time():
assert (open_time(0, 1200, start) is None)
# def test_close_bad_time():
assert (open_time(0, 1500, start) is None)
# def test_open_control_greater():
assert (open_time(500, 200, start) is None)
# def test_open_control_nagative():
assert (open_time(-100, 200, start) is None)
# def test_close_control_greater():
assert (open_time(250, 200, start) is None)
def test_close_nagative():
assert (close_time(-100, 200, start) is None)
def check_200():
assert (close_time(200, 200, start) == start.replace(hours=+ 13, minutes=+30))
def check_300():
assert (close_time(300, 300, start) == start.replace(hours=+ 20, ))
def check_400():
assert (close_time(400, 400, start) == start.replace(hours=+ 27))
def check_600():
assert (close_time(600, 600, start) == start.replace(hours=+ 40))
def check_1000():
assert (close_time(1000, 1000, start) == start.replace(hours=+ 75))
def check_0():
assert (close_time(0, 400, start) == start.replace(hours=+ 1))
def test_open_ten():
assert (open_time(220, 200, start) == open_time(200, 200, start))
def test_close_ten():
assert (close_time(220, 200, start) == close_time(200, 200, start))
|
import pandas as pd
import numpy as np
import re
# 固定参数
## 底
base = 1
## 番数
fan_amt = {'门清': 1, '自摸': 1, '庄': 1, '七对': 1, '一条龙': 1, '没混': 1, '海底捞': 1, '海捞': 1,
'杠开花': 1, '杠开': 1, '杠上开花': 1, '捉五魁': 1, '捉五': 1, '混一色': 1, '混清': 1,
'豪华七对': 2, '豪七': 2, '本混龙': 2, '十三幺': 3, '双豪七': 3, '清一色': 3, '风一色': 4, '三豪七': 4}
## 锅外钱数
# money_amt_out = {'四混': 40, '天胡': 80, '地胡': 80, '炸胡': -30, '跟一轮': 10}
## 锅内钱数
# money_amt_in = {'明杠': 2, '暗杠': 4}
# 计算函数
def Mahjong_settle(hu_type,base = 2):
# type = list(set(fan_amt).intersection(set(re.split('[,,]', re.sub(' ', '', hu_type)))))
# other_out = zhuang_out = 0
# is_zhuang = list(set(['庄']).intersection(set(re.split('[,,\\000]', hu_type))))
# is_upstairs = list(set(['楼上']).intersection(set(re.split('[,,\\000]', hu_type))))
type = list(set(fan_amt).intersection(set(hu_type)))
other_out = zhuang_out = 0
is_zhuang = list(set(['庄']).intersection(set(hu_type)))
is_upstairs = list(set(['楼上']).intersection(set(hu_type)))
if len(is_zhuang) != 0:
other_out = -2**np.sum([fan_amt[mm] for mm in type])/2*2**len(is_upstairs)*base
win = 2**np.sum([fan_amt[mm] for mm in type])*3/2*2**len(is_upstairs)*base
else:
other_out = -2**np.sum([fan_amt[mm] for mm in type])/2*2**len(is_upstairs)*base
zhuang_out = -2*2**np.sum([fan_amt[mm] for mm in type])/2*2**len(is_upstairs)*base
win = 2*2**np.sum([fan_amt[mm] for mm in type])*2**len(is_upstairs)*base
is_upstairs = ",".join(is_upstairs)
type.append(is_upstairs)
type = ",".join(type)
res = [{"role":'赢家', "limit": win},
{"role":'庄家', "limit": zhuang_out},
{"role":'非庄家', "limit": other_out},
{"role":'牌型', "limit": type}]
return res
if __name__ == "__main__":
# 输入参数
## 是否庄家胡牌
is_zhuang = False
## 是否楼上
is_upstairs = False
## 胡牌类型
hu_type = "明杠,本混龙, 明杠,暗杠, 123 ,楼上,庄"
hu_type = "本混龙"
hu_type = "门清,没混,捉五魁,一条龙"
Mahjong_settle(hu_type)
hu_type = "门清,没混,捉五魁,一条龙,庄"
hu_type = ["门清","没混","捉五魁","一条龙","庄"]
Mahjong_settle(hu_type)
hu_type = ["门清","没混","捉五魁","一条龙","庄","楼上"]
Mahjong_settle(hu_type, base = 2)
hu_type = ["门清","没混","捉五魁","一条龙","庄"]
Mahjong_settle(hu_type)
hu_type = ["门清","没混","捉五魁","一条龙"]
Mahjong_settle(hu_type)
hu_type = ["门清","没混","捉五魁","一条龙","楼上"]
Mahjong_settle(hu_type)
|
def get_f1(prec, recall):
return 2.0 * prec * recall / (prec + recall)
k = int(input())
cm = [[int(x) for x in input().split()] for _ in range(k)]
sums = [sum(line) for line in cm]
all_sum = sum(sums)
micro_f, precW, recallW = 0, 0, 0
for idx, line in enumerate(cm):
TP = line[idx]
FN = sums[idx] - TP
FP = -TP
for line2 in cm:
FP += line2[idx]
if TP == 0:
continue
prec = 1.0 * TP / (TP + FP)
recall = 1.0 * TP / (TP + FN)
f = get_f1(prec, recall)
micro_f += sums[idx] * f / all_sum
precW += TP * sums[idx] / ((TP + FP) * all_sum)
recallW += TP / all_sum
macro_f = get_f1(precW, recallW)
print(macro_f, micro_f, sep='\n')
|
import settings
import game
def main():
cfg = settings.Settings()
g = game.Game(cfg)
g.setup()
g.main_loop()
main()
|
"""A simple web server that accepts POSTS containing a list of feed urls,
and returns the titles of those feeds.
"""
import eventlet
feedparser = eventlet.import_patched('feedparser')
# the pool provides a safety limit on our concurrency
pool = eventlet.GreenPool()
def fetch_title(url):
d = feedparser.parse(url)
return d.feed.get('title', '')
def app(environ, start_response):
if environ['REQUEST_METHOD'] != 'POST':
start_response('403 Forbidden', [])
return []
# the pile collects the result of a concurrent operation -- in this case,
# the collection of feed titles
pile = eventlet.GreenPile(pool)
for line in environ['wsgi.input'].readlines():
url = line.strip()
if url:
pile.spawn(fetch_title, url)
# since the pile is an iterator over the results,
# you can use it in all sorts of great Pythonic ways
titles = '\n'.join(pile)
start_response('200 OK', [('Content-type', 'text/plain')])
return [titles]
if __name__ == '__main__':
from eventlet import wsgi
wsgi.server(eventlet.listen(('localhost', 9010)), app)
|
from os import lstat
import sqlite3
import json
import codecs
conn = sqlite3.connect('hospitals.sqlite')
cur = conn.cursor()
cur.execute('SELECT * FROM Locations')
fhand = codecs.open('location.js', 'w', "utf-8")
fhand.write("const myData = [\n")
count = 0
for row in cur :
data = str(row[1])
loc=data.split(",")
lat=float(loc[0])
lng = float(loc[1])
if lat == 0 or lng == 0 : continue
Name = row[0]
try:
print(Name,lat,lng)
count += 1
if count > 1 : fhand.write(",\n")
output = "{'Name': '"+Name+"',\n"+"'Latitude': "+str(lat)+",\n"+"'Longitude': "+str(lng)+"}"
fhand.write(output)
except:
continue
fhand.write("\n];\n")
cur.close()
fhand.close()
print(count, "records written to location.js")
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
self.listLen = 0
def addToTail(self, data):
self.listLen += 1
if self.head is None:
self.head = Node(data)
self.tail = self.head
else:
self.tail.next = Node(data)
self.tail = self.tail.next
return True
def addToHead(self, data):
self.listLen += 1
node = Node(data)
if self.head is not None:
node.next = self.head
else:
self.tail = node
self.head = node
return True
def addToIndex(self, data, index):
if index < 0 or index > self.listLen:
return False
if index == 0:
return self.addToHead(data)
if index == self.listLen:
return self.addToTail(data)
self.listLen += 1
node = Node(data)
currentNode = self.head
for i in range(index-1):
currentNode = currentNode.next
node.next = currentNode.next
currentNode.next = node
return True
def removeTail(self):
node = self.head
if node is None:
return False
self.listLen -= 1
if node.next is None:
self.head = None
self.tail = None
return True
while node.next.next is not None:
node = node.next
self.tail = node
node.next = None
return True
def removeHead(self):
if self.head is None:
return False
self.listLen -= 1
self.head = self.head.next
if self.head is None:
self.tail = None
return True
def removeIndex(self, index):
if index < 0 or index > self.listLen:
return False
if index == 0:
return self.removeHead()
if index == self.listLen:
return self.removeTail()
node = self.head
for i in range(index-1):
node = node.next
node.next = node.next.next
return True
def find(self, data):
node = self.head
while node is not None:
if node.data == data:
return node
node = node.next
return None
list = LinkedList()
list.addToTail('1')
list.addToTail('2')
list.addToTail('3')
list.addToHead('4')
list.addToIndex('5', 3)
list.removeTail()
list.removeHead()
list.removeIndex(3)
|
thistuple = ("apple", "banana", "cherry")
print(thistuple[1])
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[2:5])
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[:4])
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[2:])
thistuple = ("apple", "banana", "cherry", "orange", "kiwi", "melon", "mango")
print(thistuple[-4:-1])
thistuple = ("apple", "banana", "cherry")
if "apple" in thistuple:
print("Yes, 'apple' is in the fruits tuple")
x = ("apple", "banana", "cherry")
y = list(x)
y[1] = "kiwi"
x = tuple(y)
print(x)
thistuple = ("apple", "banana", "cherry")
thistuple.append("orange") # This will raise an error
print(thistuple)
#YOU ONLY CAN DO THIS THING:
thistuple = ("apple", "banana", "cherry")
y = list(thistuple)
y.append("orange")
thistuple = tuple(y)
thistuple = ("apple", "banana", "cherry")
y = list(thistuple)
y.remove("apple")
thistuple = tuple(y)
'''
thistuple = ("apple", "banana", "cherry")
del thistuple
print(thistuple) #this will raise an error because the tuple no longer exists
''' |
graph = {'A': set(['B', 'C']),'B': set(['A', 'D', 'E']),'C': set(['A', 'F']),'D': set(['B']),'E': set(['B', 'F']),'F': set(['C', 'E'])}
# first implementation
def dfs(graph, start):
visited, stack = set(), [start]
while stack:
vertex = stack.pop()
if vertex not in visited:
visited.add(vertex)
stack.extend(graph[vertex] - visited)
return visited
print("First Implementation: ")
print(dfs(graph, 'A')) # should output {'E', 'D', 'F', 'A', 'C', 'B'}, but letters don't have to be in that order
print()
# second implementation
def dfs(graph, start, visited=None):
if visited is None:
visited = set()
visited.add(start)
for next in graph[start] - visited:
dfs(graph, next, visited)
return visited
print("Second Implementation: ")
print(dfs(graph, 'C')) # should output {'E', 'D', 'F', 'A', 'C', 'B'}, but not necessarily in that order
print()
# above, each of the implementation outputs all of the nodes in the graph, because all of the nodes in the given graph are somehow connected to each other
# actually stating all the possible paths between two given points
def dfs_paths(graph, start, goal):
stack = [(start, [start])]
while stack:
(vertex, path) = stack.pop()
for next in graph[vertex] - set(path):
if next == goal:
yield path + [next]
else:
stack.append((next, path + [next]))
print("Stating paths:")
print(list(dfs_paths(graph, 'A', 'F'))) # [['A', 'C', 'F'], ['A', 'B', 'E', 'F']], though not necessarily in that order
print()
|
from numpy import *
from physics import *
from scipy.interpolate import interp1d
def sigma1DNorm(chan,rs):
if chan == 'sWave':
return 1.
elif chan == 'pWave':
return rs*1e-11/sqrt(100.)
def getstructform(chan,structFormType):
if chan == 'sWave':
structFormFName = (
{'rho_eff_Einasto_subs':'structFormData/rho_eff_Einasto_subs.txt',
'rho_eff_Einasto_no_subs':'structFormData/rho_eff_Einasto_no_subs.txt',
'rho_eff_NFW_subs':'structFormData/rho_eff_NFW_subs_corrected.txt',
'rho_eff_NFW_no_subs':'structFormData/rho_eff_NFW_no_subs.txt'}
)
elif chan == 'pWave':
structFormFName = (
{'rho_eff_Einasto_subs':'structFormData/rho_eff_Einasto_subs_pwave.txt',
'rho_eff_NFW_subs':'structFormData/rho_eff_NFW_subs_pwave.txt'
}
)
a = loadtxt(structFormFName[structFormType])
# Convert to 1 + z
rhoEff = vstack((a[:,0],a[:,2]))
rhoEff[0,:] += 1
# Convert the density to eV, and also the fact that the densities are normalized to rhoM instead of rhoDM
rhoEff[1,:] *= 1e9*rhoDM/1.50389e3
interpRhoEff = interp1d(rhoEff[0,:],rhoEff[1,:])
maxrsInterp = 52
def structform(rs):
rho = zeros(rs.size)
interpRhoInd = where(rs <= maxrsInterp)
anaRhoInd = where(rs > maxrsInterp)
if chan == 'sWave':
rho[interpRhoInd] = interpRhoEff(rs[interpRhoInd])
rho[anaRhoInd] = rhoDM*rs[anaRhoInd]**3
elif chan == 'pWave':
rho[interpRhoInd] = interpRhoEff(rs[interpRhoInd])*rs[interpRhoInd]
rho[anaRhoInd] = rhoDM*rs[anaRhoInd]**3*sigma1DNorm(chan,rs[anaRhoInd])
return rho
return structform
|
from ZeroScenarioHelper import *
def main():
SetCodePage("ms932")
CreateScenaFile(
"t4020.bin", # FileName
"t4020", # MapName
"t4020", # Location
0x005D, # MapIndex
"ed7124",
0x00002000, # Flags
("", "", "", "", "", ""), # include
0x00, # PlaceNameNumber
0x00, # PreInitFunctionIndex
b'\x00\xff\xff', # Unknown_51
# Information
[0, 0, -1000, 0, 0, 2500, 34000, 262, 30, 45, 0, 360, 260000, 0, 0, 0, 0, 1, 93, 0, 2, 0, 3],
)
BuildStringList((
"t4020", # 0
"クイント老人", # 1
))
AddCharChip((
"chr/ch20000.itc", # 00
))
DeclNpc(260260, 0, 250, 0, 257, 0x0, 0, 0, 0, 0, 0, 0, 4, 255, 0)
ScpFunction((
"Function_0_E8", # 00, 0
"Function_1_1A0", # 01, 1
"Function_2_1CB", # 02, 2
"Function_3_31C", # 03, 3
"Function_4_32F", # 04, 4
"Function_5_CCC", # 05, 5
"Function_6_FB1", # 06, 6
"Function_7_10A9", # 07, 7
"Function_8_194E", # 08, 8
"Function_9_1F31", # 09, 9
"Function_10_2171", # 0A, 10
"Function_11_21A4", # 0B, 11
"Function_12_21D4", # 0C, 12
))
def Function_0_E8(): pass
label("Function_0_E8")
RunExpression(0x2, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Switch(
(scpexpr(EXPR_GET_RESULT, 0x2), scpexpr(EXPR_END)),
(0, "loc_128"),
(1, "loc_134"),
(2, "loc_140"),
(3, "loc_14C"),
(4, "loc_158"),
(5, "loc_164"),
(6, "loc_170"),
(SWITCH_DEFAULT, "loc_17C"),
)
label("loc_128")
OP_A0(0xFE, 1450, 0x0, 0xFB)
Jump("loc_188")
label("loc_134")
OP_A0(0xFE, 1550, 0x0, 0xFB)
Jump("loc_188")
label("loc_140")
OP_A0(0xFE, 1600, 0x0, 0xFB)
Jump("loc_188")
label("loc_14C")
OP_A0(0xFE, 1400, 0x0, 0xFB)
Jump("loc_188")
label("loc_158")
OP_A0(0xFE, 1650, 0x0, 0xFB)
Jump("loc_188")
label("loc_164")
OP_A0(0xFE, 1350, 0x0, 0xFB)
Jump("loc_188")
label("loc_170")
OP_A0(0xFE, 1500, 0x0, 0xFB)
Jump("loc_188")
label("loc_17C")
OP_A0(0xFE, 1500, 0x0, 0xFB)
Jump("loc_188")
label("loc_188")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_19F")
OP_A0(0xFE, 1500, 0x0, 0xFB)
Jump("loc_188")
label("loc_19F")
Return()
# Function_0_E8 end
def Function_1_1A0(): pass
label("Function_1_1A0")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_1CA")
OP_94(0xFE, 0x3F516, 0xFFFFFA38, 0x3FD0E, 0x8B6, 0x3E8)
Sleep(600)
Jump("Function_1_1A0")
label("loc_1CA")
Return()
# Function_1_1A0 end
def Function_2_1CB(): pass
label("Function_2_1CB")
BeginChrThread(0x8, 0, 0, 1)
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x1, 0x7)"), scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x10)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_1E9")
Event(0, 9)
label("loc_1E9")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xE0, 0)), scpexpr(EXPR_END)), "loc_209")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x10)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_204")
SetChrFlags(0x8, 0x80)
label("loc_204")
Jump("loc_31B")
label("loc_209")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 6)), scpexpr(EXPR_END)), "loc_242")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x2)"), scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x10)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_23D")
SetChrPos(0x8, 260000, 0, 2000, 270)
BeginChrThread(0x8, 0, 0, 0)
label("loc_23D")
Jump("loc_31B")
label("loc_242")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC2, 2)), scpexpr(EXPR_END)), "loc_27B")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x2)"), scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x10)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_276")
SetChrPos(0x8, 260000, 0, 2000, 270)
BeginChrThread(0x8, 0, 0, 0)
label("loc_276")
Jump("loc_31B")
label("loc_27B")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 0)), scpexpr(EXPR_END)), "loc_28E")
SetChrFlags(0x8, 0x80)
Jump("loc_31B")
label("loc_28E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA8, 2)), scpexpr(EXPR_END)), "loc_2A1")
SetChrFlags(0x8, 0x80)
Jump("loc_31B")
label("loc_2A1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA7, 6)), scpexpr(EXPR_END)), "loc_2B4")
SetChrFlags(0x8, 0x80)
Jump("loc_31B")
label("loc_2B4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 3)), scpexpr(EXPR_END)), "loc_2C2")
Jump("loc_31B")
label("loc_2C2")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA1, 4)), scpexpr(EXPR_END)), "loc_2D5")
SetChrFlags(0x8, 0x80)
Jump("loc_31B")
label("loc_2D5")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 2)), scpexpr(EXPR_END)), "loc_2E3")
Jump("loc_31B")
label("loc_2E3")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 0)), scpexpr(EXPR_END)), "loc_2F1")
Jump("loc_31B")
label("loc_2F1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x82, 0)), scpexpr(EXPR_END)), "loc_304")
SetChrFlags(0x8, 0x80)
Jump("loc_31B")
label("loc_304")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x80, 0)), scpexpr(EXPR_END)), "loc_312")
Jump("loc_31B")
label("loc_312")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x64, 1)), scpexpr(EXPR_END)), "loc_31B")
label("loc_31B")
Return()
# Function_2_1CB end
def Function_3_31C(): pass
label("Function_3_31C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xE0, 0)), scpexpr(EXPR_END)), "loc_32E")
OP_7D(0xFF, 0xD2, 0xC8, 0x0, 0x0)
label("loc_32E")
Return()
# Function_3_31C end
def Function_4_32F(): pass
label("Function_4_32F")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x10)"), scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 1)), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_3C6")
TalkBegin(0xFE)
#C0001
ChrTalk(
0xFE,
(
"お前たちのおかげで\x01",
"大事な友人たちに墓参りもできた。\x01",
"本当にありがとうな。\x02",
)
)
CloseMessageWindow()
#C0002
ChrTalk(
0xFE,
(
"また何かあったら\x01",
"連絡させていただこう。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
label("loc_3C6")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x1, 0x2)"), scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x1, 0x4)"), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x1, 0x5)"), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x10)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_3F4")
Call(0, 8)
Return()
label("loc_3F4")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x1, 0x1)"), scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x10)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_4E8")
TalkBegin(0xFE)
#C0003
ChrTalk(
0xFE,
(
"何度も依頼を確認するのは\x01",
"未熟を曝すようで感心しないぞ。\x02",
)
)
CloseMessageWindow()
#C0004
ChrTalk(
0xFE,
(
"花の場所は手帳に\x01",
"メモをとっていたのだろう?\x01",
"そちらを確認するといい。\x02",
)
)
CloseMessageWindow()
#C0005
ChrTalk(
0xFE,
(
"3つの花を全て集めたら、\x01",
"私の所に持ってきてほしい。\x01",
"よろしく頼んだぞ。\x02",
)
)
CloseMessageWindow()
TalkEnd(0xFE)
Return()
label("loc_4E8")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x2)"), scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x1, 0x1)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_NEQUZ_I64), scpexpr(EXPR_END)), "loc_501")
Call(0, 5)
Return()
label("loc_501")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xE0, 0)), scpexpr(EXPR_END)), "loc_70C")
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x0, 0x10)"), scpexpr(EXPR_END)), "loc_694")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xEA, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_5FD")
#C0006
ChrTalk(
0xFE,
(
"こんな夕方は、\x01",
"ガイのことを思い出すな。\x02",
)
)
CloseMessageWindow()
#C0007
ChrTalk(
0xFE,
(
"ロイド、お前がいつか\x01",
"一人前になったら……\x01",
"そのときは一緒に酒を飲もう。\x02",
)
)
CloseMessageWindow()
#C0008
ChrTalk(
0xFE,
(
"私がガイの好きだった酒を\x01",
"おごろうじゃないか。\x02",
)
)
CloseMessageWindow()
#C0009
ChrTalk(
0x101,
"#0004Fええ、楽しみにしています。\x02",
)
CloseMessageWindow()
SetScenarioFlags(0xEA, 0)
Jump("loc_68F")
label("loc_5FD")
#C0010
ChrTalk(
0xFE,
(
"ガイが休みの日は、\x01",
"よく酒を持ち込んで\x01",
"ここで飲み明かしたものだ。\x02",
)
)
CloseMessageWindow()
#C0011
ChrTalk(
0xFE,
(
"ロイド、お前がいつか\x01",
"一人前になったら……\x01",
"そのときは一緒に酒を飲もう。\x02",
)
)
CloseMessageWindow()
label("loc_68F")
Jump("loc_707")
label("loc_694")
#C0012
ChrTalk(
0xFE,
(
"こんな夕方は、\x01",
"奴のことを思い出すな……\x02",
)
)
CloseMessageWindow()
#C0013
ChrTalk(
0xFE,
(
"……ほっほっほ、\x01",
"ムダ話をしてしまったようだ。\x01",
"忘れてくれたまえ。\x02",
)
)
CloseMessageWindow()
label("loc_707")
Jump("loc_CC8")
label("loc_70C")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC3, 6)), scpexpr(EXPR_END)), "loc_781")
#C0014
ChrTalk(
0xFE,
"さて……今日も一日が始まるな。\x02",
)
CloseMessageWindow()
#C0015
ChrTalk(
0xFE,
(
"随分昔に家族を亡くした私には\x01",
"この墓守の仕事がすべてなのだよ。\x02",
)
)
CloseMessageWindow()
Jump("loc_CC8")
label("loc_781")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC2, 2)), scpexpr(EXPR_END)), "loc_7F6")
#C0016
ChrTalk(
0xFE,
(
"この国には口には出さなくとも、\x01",
"大きな傷を抱えた者がごまんといる。\x02",
)
)
CloseMessageWindow()
#C0017
ChrTalk(
0xFE,
"……心に留めておくといい。\x02",
)
CloseMessageWindow()
Jump("loc_CC8")
label("loc_7F6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xC0, 0)), scpexpr(EXPR_END)), "loc_804")
Jump("loc_CC8")
label("loc_804")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA8, 2)), scpexpr(EXPR_END)), "loc_812")
Jump("loc_CC8")
label("loc_812")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA7, 6)), scpexpr(EXPR_END)), "loc_820")
Jump("loc_CC8")
label("loc_820")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA3, 3)), scpexpr(EXPR_END)), "loc_8C8")
#C0018
ChrTalk(
0xFE,
(
"さっき街へ買出しに行ったら\x01",
"港湾区に人だかりができていてな。\x02",
)
)
CloseMessageWindow()
#C0019
ChrTalk(
0xFE,
(
"ミシュラム保養地で\x01",
"遊んで回るつもりなのだろう。\x02",
)
)
CloseMessageWindow()
#C0020
ChrTalk(
0xFE,
"……私ももう少し若ければな……\x02",
)
CloseMessageWindow()
Jump("loc_CC8")
label("loc_8C8")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA1, 4)), scpexpr(EXPR_END)), "loc_8D6")
Jump("loc_CC8")
label("loc_8D6")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 2)), scpexpr(EXPR_END)), "loc_967")
#C0021
ChrTalk(
0xFE,
(
"墓参りに来ている夫人に\x01",
"記念祭中くらいは\x01",
"楽しんではどうかと言ってみたが……\x02",
)
)
CloseMessageWindow()
#C0022
ChrTalk(
0xFE,
(
"……差し出がましい真似を\x01",
"してしまったかな……\x02",
)
)
CloseMessageWindow()
Jump("loc_CC8")
label("loc_967")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0xA0, 0)), scpexpr(EXPR_END)), "loc_A80")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_A35")
#C0023
ChrTalk(
0xFE,
(
"昨日は思った以上に\x01",
"賑やかなミサになった。\x02",
)
)
CloseMessageWindow()
#C0024
ChrTalk(
0xFE,
(
"参拝客の殆どは、\x01",
"ミサの後にこの墓地に\x01",
"祈りに来ていてな。\x02",
)
)
CloseMessageWindow()
#C0025
ChrTalk(
0xFE,
(
"創立70年……\x01",
"やはり今年は誰にとっても、\x01",
"特別な年なのだろうな。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_A7B")
label("loc_A35")
#C0026
ChrTalk(
0xFE,
(
"創立70年……\x01",
"やはり今年は誰にとっても、\x01",
"特別な年なのだろうな。\x02",
)
)
CloseMessageWindow()
label("loc_A7B")
Jump("loc_CC8")
label("loc_A80")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x82, 0)), scpexpr(EXPR_END)), "loc_A8E")
Jump("loc_CC8")
label("loc_A8E")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x80, 0)), scpexpr(EXPR_END)), "loc_B85")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_B27")
#C0027
ChrTalk(
0xFE,
(
"そういえば、\x01",
"来月はもう創立記念祭か。\x02",
)
)
CloseMessageWindow()
#C0028
ChrTalk(
0xFE,
"……時が経つのは早いもんだ。\x02",
)
CloseMessageWindow()
#C0029
ChrTalk(
0xFE,
(
"こんな所で過ごしているから\x01",
"かもしれんがな。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_B80")
label("loc_B27")
#C0030
ChrTalk(
0xFE,
"来月はもう創立記念祭か……\x02",
)
CloseMessageWindow()
#C0031
ChrTalk(
0xFE,
(
"去年の記念祭が\x01",
"まるで昨日のことのように思えるわい。\x02",
)
)
CloseMessageWindow()
label("loc_B80")
Jump("loc_CC8")
label("loc_B85")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x64, 1)), scpexpr(EXPR_END)), "loc_CC8")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_C58")
#C0032
ChrTalk(
0xFE,
(
"クロスベル自治州では\x01",
"帝国と共和国の狭間で\x01",
"数々の争いに巻き込まれてきた……\x02",
)
)
CloseMessageWindow()
#C0033
ChrTalk(
0xFE,
(
"時代の奔流に飲み込まれ\x01",
"潰えてしまった命も多い。\x02",
)
)
CloseMessageWindow()
#C0034
ChrTalk(
0xFE,
(
"どうか彼らのために\x01",
"祈りを捧げてやって欲しい。\x02",
)
)
CloseMessageWindow()
SetScenarioFlags(0x0, 0)
Jump("loc_CC8")
label("loc_C58")
#C0035
ChrTalk(
0xFE,
(
"クロスベル市は発展において\x01",
"多くの命を犠牲にしてきた……\x02",
)
)
CloseMessageWindow()
#C0036
ChrTalk(
0xFE,
(
"どうか彼らのために\x01",
"祈りを捧げてやって欲しい。\x02",
)
)
CloseMessageWindow()
label("loc_CC8")
TalkEnd(0xFE)
Return()
# Function_4_32F end
def Function_5_CCC(): pass
label("Function_5_CCC")
EventBegin(0x0)
Fade(500)
OP_4B(0x8, 0xFF)
OP_68(259100, 1700, -800, 0)
MoveCamera(45, 30, 0, 0)
OP_6E(260, 0)
SetCameraDistance(33530, 0)
SetChrPos(0x101, 259000, 0, -300, 0)
SetChrPos(0x102, 260200, 0, -300, 0)
SetChrPos(0x103, 259000, 0, -1500, 0)
SetChrPos(0x104, 260200, 0, -1500, 0)
Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x9)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_D74")
SetChrPos(0x10A, 257760, 0, -580, 45)
ClearChrFlags(0x4, 0x80)
ClearChrBattleFlags(0x4, 0x8000)
label("loc_D74")
OP_93(0x8, 0xB4, 0x0)
SetChrSubChip(0x8, 0x0)
OP_0D()
Jc((scpexpr(EXPR_EXEC_OP, "OP_2A(0x2E, 0x1, 0x0)"), scpexpr(EXPR_EQUZ), scpexpr(EXPR_END)), "loc_ECB")
#C0037
ChrTalk(
0x101,
(
"#12P#0000Fすみません。\x01",
"クイントさん、でしょうか。\x02",
)
)
CloseMessageWindow()
OP_63(0x8, 0x0, 2000, 0x26, 0x26, 0xFA, 0x1)
Sleep(1000)
#C0038
ChrTalk(
0x8,
"#5P……警察かな?\x02",
)
CloseMessageWindow()
#C0039
ChrTalk(
0x101,
(
"#12P#0000Fあ、はい。\x01",
"支援要請を受けてきました\x01",
"特務支援課の者です。\x02",
)
)
CloseMessageWindow()
#C0040
ChrTalk(
0x8,
"#5P……ふむ。\x02",
)
CloseMessageWindow()
#C0041
ChrTalk(
0x8,
(
"#5P君たちに任せたい仕事は、\x01",
"お供えをするための\x01",
"3種類の花を集める事だ。\x02",
)
)
CloseMessageWindow()
#C0042
ChrTalk(
0x8,
"#5P早速、引き受けてもらえるかね?\x02",
)
CloseMessageWindow()
OP_29(0x2E, 0x1, 0x0)
Jump("loc_F63")
label("loc_ECB")
#C0043
ChrTalk(
0x8,
(
"#5Pふむ、他の用事は\x01",
"片付いたのかな?\x02",
)
)
CloseMessageWindow()
#C0044
ChrTalk(
0x8,
(
"#5P君たちに任せたい仕事は、\x01",
"お供えをするための\x01",
"3種類の花を集める事だ。\x02",
)
)
CloseMessageWindow()
#C0045
ChrTalk(
0x8,
"#5P引き受けてもらえるかね?\x02",
)
CloseMessageWindow()
label("loc_F63")
Call(0, 6)
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_F78")
Call(0, 7)
label("loc_F78")
Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x9)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_F92")
SetChrFlags(0x4, 0x80)
SetChrBattleFlags(0x4, 0x8000)
label("loc_F92")
SetChrPos(0x0, 259600, 0, -300, 0)
OP_93(0x8, 0x10E, 0x0)
OP_4C(0x8, 0xFF)
EventEnd(0x5)
Return()
# Function_5_CCC end
def Function_6_FB1(): pass
label("Function_6_FB1")
FadeToDark(300, 0, 100)
Menu(
0,
-1,
-1,
0,
(
"【引き受ける】\x01", # 0
"【やめる】\x01", # 1
)
)
MenuEnd(0x0)
OP_60(0x0)
FadeToBright(300, 0)
Jc((scpexpr(EXPR_GET_RESULT, 0x0), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_NEQ), scpexpr(EXPR_END)), "loc_10A8")
#C0046
ChrTalk(
0x101,
(
"#12P#0006F申し訳ないんですが……\x01",
"今、別件の用事があって\x01",
"すぐには……\x02",
)
)
CloseMessageWindow()
#C0047
ChrTalk(
0x8,
(
"#5Pふむ、そうか。\x01",
"しかたあるまい。\x02",
)
)
CloseMessageWindow()
#C0048
ChrTalk(
0x8,
(
"#5P引き受けられるようになったら\x01",
"再び訪ねてきてくれ。\x02",
)
)
CloseMessageWindow()
label("loc_10A8")
Return()
# Function_6_FB1 end
def Function_7_10A9(): pass
label("Function_7_10A9")
#C0049
ChrTalk(
0x101,
(
"#12P#0000F了解しました、\x01",
"お引き受けします。\x02",
)
)
CloseMessageWindow()
#C0050
ChrTalk(
0x8,
"#5Pうむ、よろしい。\x02",
)
CloseMessageWindow()
#C0051
ChrTalk(
0x8,
(
"#5P早速だが、\x01",
"君たちに集めてほしい\x01",
"3種類の花を教えよう。\x02",
)
)
CloseMessageWindow()
#C0052
ChrTalk(
0x8,
"#5Pメモの準備はよろしいかな?\x02",
)
CloseMessageWindow()
#C0053
ChrTalk(
0x101,
"#12P#0005Fあ、はい。\x02",
)
CloseMessageWindow()
OP_93(0x101, 0xB4, 0x1F4)
#C0054
ChrTalk(
0x101,
"#12P#0000Fティオ、よろしく頼むよ。\x02",
)
CloseMessageWindow()
#C0055
ChrTalk(
0x103,
"#12P#0203F了解です。\x02",
)
CloseMessageWindow()
OP_93(0x101, 0x0, 0x1F4)
#C0056
ChrTalk(
0x8,
(
"#5P1つは『レヴァスの花』。\x01",
"西クロスベル街道の\x01",
"警察学校付近に咲いている。\x02",
)
)
CloseMessageWindow()
#C0057
ChrTalk(
0x8,
(
"#5P2つめは『リクエムの花』。\x01",
"こちらはクロスベル市西通りの\x01",
"タリーズ商店で取り扱っている。\x02",
)
)
CloseMessageWindow()
#C0058
ChrTalk(
0x8,
(
"#5P最後に『フィネルの花』。\x01",
"東クロスベル街道に出てすぐの\x01",
"見張り台の足元に咲いている。\x02",
)
)
CloseMessageWindow()
#C0059
ChrTalk(
0x8,
(
"#5P付近には似た花が咲いているかもしれん。\x01",
"見落とさぬよう、気を付けることだ。\x02",
)
)
CloseMessageWindow()
#C0060
ChrTalk(
0x8,
(
"#5P……これらを全て集めたら、\x01",
"私の所に持ってきてほしい。\x02",
)
)
CloseMessageWindow()
#C0061
ChrTalk(
0x103,
(
"#12P#0203F……場所はメモしました。\x02\x03",
"#0200Fところで……\x01",
"なぜこの3種類の花なのですか?\x02",
)
)
CloseMessageWindow()
TurnDirection(0x102, 0x103, 500)
#C0062
ChrTalk(
0x102,
(
"#0100Fこのクロスベル自治州では\x01",
"黄、青、白の3色は\x01",
"『鎮魂』を表すとされているの。\x02\x03",
"葬儀の際にはその3色の花で\x01",
"花束を作って、死者に手向けるのが\x01",
"クロスベルの伝統なのよ。\x02",
)
)
CloseMessageWindow()
#C0063
ChrTalk(
0x104,
"#12P#0305Fへぇ、花言葉ってやつか?\x02",
)
CloseMessageWindow()
#C0064
ChrTalk(
0x103,
(
"#12P#0203F微妙に違うと\x01",
"思いますけど……\x02\x03",
"#0200Fどちらかというと\x01",
"クロスベルの土地柄に\x01",
"よるものかと。\x02",
)
)
CloseMessageWindow()
#C0065
ChrTalk(
0x101,
(
"#12P#0003F(そういえば、兄貴の葬式でも\x01",
" 3色の花束が手向けられたっけ……)\x02",
)
)
CloseMessageWindow()
#C0066
ChrTalk(
0x8,
"#5P…………………………\x02",
)
CloseMessageWindow()
OP_93(0x102, 0x0, 0x1F4)
OP_63(0x101, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
OP_63(0x102, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
OP_63(0x103, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
OP_63(0x104, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
Sleep(1000)
#C0067
ChrTalk(
0x101,
"#12P#0005Fあ、あの……\x02",
)
CloseMessageWindow()
#C0068
ChrTalk(
0x104,
"#12P#0305Fどうしたんッスか、黙って。\x02",
)
CloseMessageWindow()
#C0069
ChrTalk(
0x8,
(
"#5Pいや、なに。\x01",
"物を知らない若者たちに\x01",
"少しばかり落胆していたのだよ。\x02",
)
)
CloseMessageWindow()
#C0070
ChrTalk(
0x8,
(
"#5Pそんなことでは\x01",
"先が思いやられるぞ。\x02",
)
)
CloseMessageWindow()
#C0071
ChrTalk(
0x101,
"#12P#0005Fえ、えっと……\x02",
)
CloseMessageWindow()
#C0072
ChrTalk(
0x104,
(
"#12P#0300Fはは、すいませんねぇ。\x01",
"俺なんかはクロスベル出身じゃ\x01",
"ないもんで。\x02",
)
)
CloseMessageWindow()
#C0073
ChrTalk(
0x8,
"#5Pふむ、まぁいいわい。\x02",
)
CloseMessageWindow()
#C0074
ChrTalk(
0x8,
(
"#5P用件は伝えた。\x01",
"早速取り掛かってくれ。\x02",
)
)
CloseMessageWindow()
#C0075
ChrTalk(
0x101,
"#12P#0005Fりょ、了解しました。\x02",
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x9)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_18EA")
#C0076
ChrTalk(
0x10A,
"#6P#0603F………………………………\x02",
)
CloseMessageWindow()
def lambda_17A5():
OP_93(0xFE, 0x10E, 0x1F4)
ExitThread()
QueueWorkItem(0x101, 1, lambda_17A5)
Sleep(50)
def lambda_17B5():
OP_93(0xFE, 0x10E, 0x1F4)
ExitThread()
QueueWorkItem(0x102, 1, lambda_17B5)
Sleep(50)
def lambda_17C5():
OP_93(0xFE, 0x10E, 0x1F4)
ExitThread()
QueueWorkItem(0x104, 1, lambda_17C5)
Sleep(50)
def lambda_17D5():
OP_93(0xFE, 0x10E, 0x1F4)
ExitThread()
QueueWorkItem(0x103, 1, lambda_17D5)
#C0077
ChrTalk(
0x101,
(
"#11P#0012Fえっと……\x01",
"そういうわけ、なんですが……\x02",
)
)
CloseMessageWindow()
#C0078
ChrTalk(
0x10A,
(
"#6P#0600F……フン。\x01",
"なにがそういうわけ、だ。\x02\x03",
"#0606Fまぁいい、ここまで聞いて\x01",
"依頼を受けないわけにもいくまい。\x02\x03",
"#0600Fやるならやるで、完璧に片付けて\x01",
"さっさと捜査を再開するぞ。\x02",
)
)
CloseMessageWindow()
#C0079
ChrTalk(
0x104,
"#12P#0309Fあいよ、了解ッス!\x02",
)
CloseMessageWindow()
label("loc_18EA")
FadeToDark(300, 0, 100)
Sound(80, 0, 100, 0)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
#A0080
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x2),
"クエスト【鎮魂の花集め】\x07\x00",
"を開始した!\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(14, 280, 60, 3)
OP_29(0x2E, 0x1, 0x1)
Return()
# Function_7_10A9 end
def Function_8_194E(): pass
label("Function_8_194E")
EventBegin(0x0)
FadeToDark(1000, 0, -1)
OP_0D()
Sleep(500)
OP_4B(0x8, 0xFF)
OP_68(259100, 1700, -800, 0)
MoveCamera(45, 30, 0, 0)
OP_6E(260, 0)
SetCameraDistance(33530, 0)
SetChrPos(0x101, 259000, 0, -300, 0)
SetChrPos(0x102, 260200, 0, -300, 0)
SetChrPos(0x103, 259000, 0, -1500, 0)
SetChrPos(0x104, 260200, 0, -1500, 0)
Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x9)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_19FF")
SetChrPos(0x10A, 257760, 0, -580, 45)
ClearChrFlags(0x4, 0x80)
ClearChrBattleFlags(0x4, 0x8000)
label("loc_19FF")
OP_93(0x8, 0xB4, 0x0)
SetChrSubChip(0x8, 0x0)
SetChrFlags(0x8, 0x40)
FadeToBright(500, 0)
OP_0D()
#C0081
ChrTalk(
0x101,
(
"#12P#0000Fクイントさん、\x01",
"ただいま戻りました。\x02",
)
)
CloseMessageWindow()
#C0082
ChrTalk(
0x8,
(
"#5Pうむ。\x01",
"なかなか早かったじゃないか。\x02",
)
)
CloseMessageWindow()
#C0083
ChrTalk(
0x8,
"#5P花は全て集まったのかな?\x02",
)
CloseMessageWindow()
#C0084
ChrTalk(
0x101,
"#12P#0000Fええ、確認をお願いします。\x02",
)
CloseMessageWindow()
FadeToDark(300, 0, 100)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
Sound(17, 0, 100, 0)
#A0085
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_ITEM, 0x349),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"を渡した。\x02\x03",
scpstr(SCPSTR_CODE_ITEM, 0x34A),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"を渡した。\x02\x03",
scpstr(SCPSTR_CODE_ITEM, 0x34B),
scpstr(SCPSTR_CODE_COLOR, 0x0),
"を渡した。\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(14, 280, 60, 3)
OP_5A()
SubItemNumber(0x349, 1)
SubItemNumber(0x34A, 1)
SubItemNumber(0x34B, 1)
#C0086
ChrTalk(
0x8,
(
"#5P……うむ、確かに。\x01",
"3種類すべて集まっているようだ。\x02",
)
)
CloseMessageWindow()
#C0087
ChrTalk(
0x8,
(
"#5Pご苦労だったな、\x01",
"特務支援課の諸君。\x02",
)
)
CloseMessageWindow()
#C0088
ChrTalk(
0x101,
(
"#12P#0000Fいえいえ、\x01",
"依頼は依頼ですから。\x02",
)
)
CloseMessageWindow()
#C0089
ChrTalk(
0x104,
(
"#12P#0306Fは~、意外と大変だったな。\x02\x03",
"雑貨屋に花がねぇなんて\x01",
"ハプニングもあったしよ。\x02",
)
)
CloseMessageWindow()
#C0090
ChrTalk(
0x103,
"#12P#0200Fまぁ、なんとかなりましたけどね。\x02",
)
CloseMessageWindow()
#C0091
ChrTalk(
0x102,
"#0100Fお爺さん、一つ聞いてもいいですか?\x02",
)
CloseMessageWindow()
#C0092
ChrTalk(
0x8,
"#5P……なんだね?\x02",
)
CloseMessageWindow()
#C0093
ChrTalk(
0x102,
(
"#0103F最近の風習では、\x01",
"葬儀でもないのに3色の花を揃える\x01",
"ということは珍しいことです。\x02\x03",
"#0100F今回は、なぜ3色の花束を?\x02",
)
)
CloseMessageWindow()
#C0094
ChrTalk(
0x8,
(
"#5Pふむ、確かに今回は\x01",
"葬儀があるわけでもなんでもないが……\x02",
)
)
CloseMessageWindow()
#C0095
ChrTalk(
0x8,
(
"#5P……折角だ、今からこの花を供えるから\x01",
"君たちもついてきたまえ。\x02",
)
)
CloseMessageWindow()
#C0096
ChrTalk(
0x8,
"#5P理由はそのときに話そう。\x02",
)
CloseMessageWindow()
BeginChrThread(0x8, 3, 0, 10)
Sleep(700)
BeginChrThread(0x101, 3, 0, 11)
BeginChrThread(0x102, 3, 0, 12)
Sleep(500)
BeginChrThread(0x103, 3, 0, 11)
BeginChrThread(0x104, 3, 0, 12)
Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x9)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_1E07")
Sleep(200)
def lambda_1DFF():
OP_93(0xFE, 0x87, 0x1F4)
ExitThread()
QueueWorkItem(0x10A, 1, lambda_1DFF)
label("loc_1E07")
WaitChrThread(0x8, 3)
OP_63(0x101, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
OP_63(0x104, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
OP_63(0x103, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
OP_63(0x102, 0x0, 2000, 0x0, 0x1, 0xFA, 0x2)
Sound(29, 0, 100, 0)
Sleep(1000)
#C0097
ChrTalk(
0x103,
"#6P#0205F……どういうことですか?\x02",
)
CloseMessageWindow()
#C0098
ChrTalk(
0x101,
(
"#5P#0005Fさ、さあ……\x02\x03",
"#0003Fよく分からないけど、\x01",
"とにかく行ってみようか。\x02",
)
)
CloseMessageWindow()
Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x9)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_1F16")
#C0099
ChrTalk(
0x10A,
"#6P#0603F(……まさか……)\x02",
)
CloseMessageWindow()
label("loc_1F16")
FadeToDark(1000, 0, -1)
OP_0D()
OP_29(0x2E, 0x1, 0x7)
NewScene("t4100", 0, 0, 0)
IdleLoop()
Return()
# Function_8_194E end
def Function_9_1F31(): pass
label("Function_9_1F31")
EventBegin(0x0)
FadeToDark(0, 0, -1)
OP_68(259100, 1700, -800, 0)
MoveCamera(45, 30, 0, 0)
OP_6E(260, 0)
SetCameraDistance(33530, 0)
SetChrPos(0x101, 259000, 0, -300, 0)
SetChrPos(0x102, 260200, 0, -300, 0)
SetChrPos(0x103, 259000, 0, -1500, 0)
SetChrPos(0x104, 260200, 0, -1500, 0)
Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x9)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_1FDA")
SetChrPos(0x10A, 257760, 0, -580, 45)
ClearChrFlags(0x4, 0x80)
ClearChrBattleFlags(0x4, 0x8000)
label("loc_1FDA")
OP_93(0x8, 0xB4, 0x0)
OP_4B(0x8, 0xFF)
SetChrSubChip(0x8, 0x0)
SetCameraDistance(30720, 3000)
FadeToBright(500, 0)
OP_0D()
#C0100
ChrTalk(
0x8,
(
"#5Pお前たちのおかげで\x01",
"大事な友人たちに墓参りもできた。\x01",
"本当にありがとうな。\x02",
)
)
CloseMessageWindow()
#C0101
ChrTalk(
0x8,
(
"#5Pまた何かあったら\x01",
"連絡させていただこう。\x02",
)
)
CloseMessageWindow()
#C0102
ChrTalk(
0x101,
(
"#12P#0000Fええ、お待ちしています。\x01",
"……それでは。\x02",
)
)
CloseMessageWindow()
FadeToDark(300, 0, 100)
Sound(9, 0, 100, 0)
SetMessageWindowPos(-1, -1, -1, -1)
SetChrName("")
#A0103
AnonymousTalk(
0xFF,
(
scpstr(SCPSTR_CODE_COLOR, 0x2),
"クエスト【鎮魂の花集め】\x07\x00",
"を達成した!\x02",
)
)
CloseMessageWindow()
OP_57(0x0)
FadeToBright(300, 0)
SetMessageWindowPos(14, 280, 60, 3)
FadeToDark(1000, 0, -1)
OP_0D()
SetChrPos(0x0, -350, 0, 12250, 0)
OP_29(0x2E, 0x4, 0x10)
SetScenarioFlags(0x0, 1)
Sleep(500)
SetChrPos(0x0, 259600, 0, -300, 0)
OP_93(0x8, 0x10E, 0x0)
OP_4C(0x8, 0xFF)
Jc((scpexpr(EXPR_EXEC_OP, "GetPartyIndex(0x9)"), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_NEG), scpexpr(EXPR_GTR), scpexpr(EXPR_END)), "loc_216E")
SetChrFlags(0x4, 0x80)
SetChrBattleFlags(0x4, 0x8000)
label("loc_216E")
EventEnd(0x5)
Return()
# Function_9_1F31 end
def Function_10_2171(): pass
label("Function_10_2171")
def lambda_2176():
OP_98(0xFE, 0x0, 0x0, 0xFFFFE4A8, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_2176)
Sleep(2500)
def lambda_2193():
OP_A7(0xFE, 0xFF, 0xFF, 0xFF, 0x0, 0x1F4)
ExitThread()
QueueWorkItem(0xFE, 2, lambda_2193)
WaitChrThread(0xFE, 1)
Return()
# Function_10_2171 end
def Function_11_21A4(): pass
label("Function_11_21A4")
def lambda_21A9():
OP_98(0xFE, 0xFFFFFED4, 0x0, 0x0, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_21A9)
WaitChrThread(0xFE, 1)
def lambda_21C7():
OP_93(0xFE, 0x87, 0x1F4)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_21C7)
WaitChrThread(0xFE, 1)
Return()
# Function_11_21A4 end
def Function_12_21D4(): pass
label("Function_12_21D4")
def lambda_21D9():
OP_98(0xFE, 0x12C, 0x0, 0x0, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_21D9)
WaitChrThread(0xFE, 1)
def lambda_21F7():
OP_93(0xFE, 0xE1, 0x1F4)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_21F7)
WaitChrThread(0xFE, 1)
Return()
# Function_12_21D4 end
SaveToFile()
Try(main)
|
#!/usr/local/python
#author: george karystianis
import sys
import re
new_file = open(sys.argv[3],'w')
mega={}
mega2={}
for line in open(sys.argv[1],'r'):
if "dose_number" in line:
continue
if "amount\n" == line:
break
boundary = line.find('pr.')
boundary2 = line.find('.txt')
line_id=line[boundary+3:boundary2]
dose_number_min = line[boundary2+5:].strip()
dose_number_max = dose_number_min
regex= re.compile('([0-9]+|[0-9]+.[0-9]+)(?:\s+)?(?:or|-|to)(?:\s+)?([0-9]+|[0-9]+.[0-9]+)')
regex2= re.compile('([0-9]+|[0-9]+.[0-9]+)(?:\s+)?(?:/)(?:\s+)?([0-9]+|[0-9]+.[0-9]+)')
hello = re.search(regex, line)
hello2 = re.search(regex2, line)
if hello:
dose_number_min = str(hello.group(1))
dose_number_max = str(hello.group(2))
if float(dose_number_min) > float(dose_number_max):
new_dose_number_min = dose_number_max
new_dose_number_max = dose_number_min
dose_number_min = new_dose_number_min
dose_number_max = new_dose_number_max
dose_number_min = str(dose_number_min)
dose_number_max = str(dose_number_max)
if hello2:
dose_number_min= float(hello2.group(1)) / float(hello2.group(2))
dose_number_min = str(round(dose_number_min,1))
dose_number_min = dose_number_min
dose_number_max = dose_number_min
dose_number_min = str(dose_number_min)
dose_number_max = str(dose_number_max)
if ".0" in dose_number_min: ## this is to do 2.0 into 2 so we wont lose true positives
dose_number_min = int(round(float(dose_number_min)))
dose_number_min = str(dose_number_min)
dose_number_min = dose_number_min
dose_number_max = dose_number_min
mega[line_id] = dose_number_min
mega2[line_id] = dose_number_max
else:
mega[line_id] = dose_number_min
mega2[line_id] = dose_number_max
new_file.write("{0:7}{1:2}{2:70}{3:3}{4:16}{5:2}{6:}".format('text_id',' | ','text', ' | ', 'DN_min','|','DN_max')+"\n")
for line in open(sys.argv[2],'r'):
info = line.split('\t')
# i can get the elements from here. now i am focusing on the second column
#dose_number2 = info[3].strip()
line_id2=info[0]
text_info = info[1].strip('"').strip('"\n')
print text_info
if 'textid' in line:
continue
if line_id2 not in mega:
new_file.write("{0:7}{1:2}{2:70}{3:3}{4:16}{5:2}{6:}".format(line_id2,' | ',text_info, ' | ', '?','|','?')+"\n")
else:
# print line
new_file.write("{0:7}{1:2}{2:70}{3:3}{4:16}{5:2}{6:}".format(line_id2,' | ',text_info, ' | ', mega[line_id2],'|',mega2[line_id2])+"\n")
new_file.close()
|
import numpy as np
import matplotlib.pyplot as plt
A_shape = 200, 300, 3
A = np.zeros(A_shape, dtype=np.uint8)
A[:, :, :] = 255
r = 3/5 * 100
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if (i-A.shape[0]/2)**2 + (j-A.shape[1]/2)**2 <= r**2:
A[i, j, 0] = 255
A[i, j, 1] = 204
A[i, j, 2] = 0
plt.imshow(A)
plt.show() |
import vrep, math, time
#Configura conexão com o Vrep, seleciona modo síncrono e inicia simulação
clientID = vrep.simxStart('127.0.0.1', 19997 , True, True, 5000, 5)
vrep.simxSynchronous(clientID, False)
vrep.simxStartSimulation(clientID, vrep.simx_opmode_oneshot_wait)
#Handlers para os objetos no Vrep
ret, roda_d = vrep.simxGetObjectHandle(clientID, "roda_d_joint", vrep.simx_opmode_oneshot_wait)
ret, roda_e = vrep.simxGetObjectHandle(clientID, "roda_e_joint", vrep.simx_opmode_oneshot_wait)
lado = 8.2
def mover(vel):
vrep.simxSetJointTargetVelocity(clientID, roda_d, math.radians(vel), vrep.simx_opmode_oneshot_wait)
vrep.simxSetJointTargetVelocity(clientID, roda_e, math.radians(vel), vrep.simx_opmode_oneshot_wait)
def parar():
vrep.simxSetJointTargetVelocity(clientID, roda_d, 0, vrep.simx_opmode_oneshot_wait)
vrep.simxSetJointTargetVelocity(clientID, roda_e, 0, vrep.simx_opmode_oneshot_wait)
def arco_d(vd, raio):
ve = ((2 * raio * vd) - (lado * vd) )/(lado + (2 * raio))
vrep.simxSetJointTargetVelocity(clientID, roda_d, math.radians(vd), vrep.simx_opmode_oneshot_wait)
vrep.simxSetJointTargetVelocity(clientID, roda_e, math.radians(ve), vrep.simx_opmode_oneshot_wait)
def arco_e(ve, raio):
vd = ((2 * raio * ve) - (lado * ve) )/(lado + (2 * raio))
vrep.simxSetJointTargetVelocity(clientID, roda_d, math.radians(vd), vrep.simx_opmode_oneshot_wait)
vrep.simxSetJointTargetVelocity(clientID, roda_e, math.radians(ve), vrep.simx_opmode_oneshot_wait)
#Rotina
mover(-150)
time.sleep(3)
parar()
time.sleep(1)
mover(150)
time.sleep(3)
parar()
time.sleep(1)
arco_e(-150, 10) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from time import gmtime, strftime
from array import array
import re
import shutil
import json
import time
def log(txt):
print txt
try:
file_stat = open('status.json','r')
file_cmd = open('cmd.json','r')
except:
log("No files json. Script is close")
raise SystemExit(1)
try:
status_json = file_stat.read()
cmd_json = file_cmd.read()
except:
log("Read files json error. Script is close")
raise SystemExit(1)
try:
status = json.loads(status_json)
cmd = json.loads(cmd_json)
except:
log("Error decode json files. Script is close")
raise SystemExit(1)
def socket_quest(cmd1, status1, rw):
try:
sock = socket.socket()
sock.connect(('127.0.0.1', 9090))
if rw:
st_json = json.dumps(cmd1)
sock.send(st_json)
else:
sock.send("none")
data = sock.recv(1024)
status2 = json.loads(data)
for i in range(26):
status1['rpi'][i] = status2['rpi'][i]
status1['relay'][i] = status2['relay'][i]
status1['time'] = status2['time']
status1['arduino'] = status2['arduino']
status1['start'] = status2['start']
sock.close()
return 1
except:
log("No socket server to quest script")
return 0
socket_connect = 0
class HttpProcessor(BaseHTTPRequestHandler):
def do_GET(self):
address = self.path
self.send_response(200)
self.send_header('content-type','text/html')
self.end_headers()
readwrite = True
if len(address) > 1:
if address.find("start") == 1:
log("CMD START")
cmd["start"] = 1
elif address.find("reset") == 1:
log("CMD RESET")
cmd["reset"] = 1
elif address.find("relay_") == 1:
log("CMD "+ address)
find = re.findall('(\d+)', address)
number = int(find[0]) - 1
if number < 26:
cmd["relay"][0] = number
cmd["relay"][1] = 1
elif address.find("favicon.ico"):
readwtite = False
try:
favicon = open('sinners.png','r')
self.wfile.write(favicon.read())
except:
log("No favicon.ico file")
self.wfile.write('<script language="JavaScript">window.location.href = "/"</script>')
socket_quest(cmd, status, readwrite)
cmd["start"] = 0
cmd["reset"] = 0
cmd["relay"][0] = 0
cmd["relay"][1] = 0
else:
socket_connect = socket_quest(cmd, status, False)
self.wfile.write('<!DOCTYPE "html"><html><head><title>Quest Sinners</title><meta http-equiv="Refresh" content="20" />')
self.wfile.write("</head><body>")
self.wfile.write(strftime('%d %b %Y %H:%M:%S', gmtime()))
self.wfile.write("</body>")
self.wfile.write('<form method="get" action="/reset"><button type="submit">Reset Quest</button></form>')
if status['start']:
color = 'green'
else:
color = 'red'
time_temp = "00:00"
if status['start'] > 0:
timer = time.localtime(time.time() - status['time'])
time_temp = time.strftime('%M:%S', timer)
ReleTable = '<form method="get" action="/start"><button type="submit">Start Quest</button><span style="background:' + color
ReleTable += '"> State</span> '+ time_temp +'</form>'
if status["arduino"]:
ReleTable +='Arduino: <span style="background: green">Connect</span>'
else:
ReleTable += 'Arduino: <span style="background: red">Disconnect</span>'
if socket_connect:
ReleTable +=' Socket: <span style="background: green">Connect</span>'
else:
ReleTable += ' Socket: <span style="background: red">Disconnect</span>'
ReleTable += '<TABLE BORDER="1" CELLSPACING="0"><CAPTION>Quest state</CAPTION>'
ReleTable += "<TR>";
ReleTable += '<TH>#</TH><TH>In/Out</TH><TH>Val</TH><TH>Name</TH>'
ReleTable += '<TH>#</TH><TH>In/Out</TH><TH>Val</TH><TH>Name</TH>'
ReleTable += '<TH>#</TH><TH>In/Out</TH><TH>Val</TH><TH>Name</TH>'
ReleTable += '<TH>#</TH><TH>In/Out</TH><TH>Val</TH><TH>Name</TH>'
ReleTable += "</TR>"
RelayString = []
for i in range(26):
if status["relay"][i]:
color = 'green'
else:
color = "red"
temp = "<TD>"+ str(i+1)+'</TD><TD><form method="get" action="/relay_'+str(i + 1)+'"><button type="submit">Relay '
temp += str(i + 1) + '</button></form>' + "</TD>"
temp += '<TD><span style="background: ' + color + '">' + str(status["relay"][i]) + "</span></TD><TD>" + "none" + "</TD>"
RelayString.append(temp)
InputString = [];
for i in range(26):
if status["rpi"][i]:
color = 'green'
else:
color = "red"
temp = "<TD>"+ str(i+1)+'</TD><TD>Input'+str(i+1)+'</TD><TD><span style="background:' + color + '">' + str(status["rpi"][i])
temp += "</span></TD><TD>" + 'none' + "</TD>"
InputString.append(temp)
for i in range(13):
ReleTable += "<TR>"
ReleTable += RelayString[i] + RelayString[i + 13] + InputString[i] + InputString[i + 13];
ReleTable += "</TR>"
ReleTable += "</TABLE>"
self.wfile.write(ReleTable)
self.wfile.write("</body>")
self.wfile.write("</html>")
try:
serv = HTTPServer(("192.168.100.31",80),HttpProcessor)
serv.serve_forever()
except:
#serv.shutdown()
log("Shutdown")
|
from tkinter import *
root =Tk()
root.title("CALCULATOR")
"""inputs"""
num = Entry(root, text="Enter A Number", width=40,borderwidth=5)
num.grid(row = 1, column = 0, columnspan =3)
"""NUMERIC"""
def numb(num_pass):
temp = num.get()
num.delete(0, END)
num.insert(0, str(temp) + str(num_pass))
"""ARITHMETIC OPERATOR"""
def plus():
first_plus = num.get()
global all_new
all_new = int(first_plus)
num.delete(0, END)
def minus():
return
""" first_sub = num.get()
global f_num
sub = int(first_sub)
num.delete(0,END)
"""
def multi():
return
""" first_number = num.get()
global mul
mul = int(first_number)
num.delete(0,END)
"""
"""RESULT"""
def equal():
second_number = num.get()
something_new = (all_new) + int(second_number)
num.delete(0,END)
num.insert(0, str(something_new) )
"""ARITH_Buttons"""
plus_b = Button(root, text="+" , width=8 , height = 3 , command=plus ,bg="Grey" , fg="red")
minus_b = Button(root, text="-" , width=8 , height = 3 , command=minus ,bg="Grey" , fg="red")
multi_b = Button(root, text="*" , width=8 , height = 3 , command=multi ,bg="Grey" , fg="red")
equal_b = Button(root, text="=" , width=20 , height = 3 , command=equal ,bg="Blue" , fg="red")
"""NUM_BUTTON"""
button1 = Button(root, text="1" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(1) )
button2 = Button(root, text="2" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(2) )
button3 = Button(root, text="3" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(3) )
button4 = Button(root, text="4" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(4) )
button5 = Button(root, text="5" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(5) )
button6 = Button(root, text="6" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(6) )
button7 = Button(root, text="7" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(7) )
button8 = Button(root, text="8" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(8) )
button9 = Button(root, text="9" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(9) )
button0 = Button(root, text="0" , width = 8 , height = 3 , bg="lightgrey", command=lambda: numb(0) )
"""PACKING"""
button1.grid(row = 2 , column = 0 )
button2.grid(row = 2 , column = 1 )
button3.grid(row = 2 , column = 2 )
button4.grid(row = 3 , column = 0 )
button5.grid(row = 3 , column = 1 )
button6.grid(row = 3 , column = 2 )
button7.grid(row = 4 , column = 0 )
button8.grid(row = 4 , column = 1 )
button9.grid(row = 4 , column = 2 )
button0.grid(row = 5 , column = 0, columnspan=1)
equal_b.grid(row = 5 , column = 1, columnspan=2)
plus_b.grid (row = 6 , column = 0)
minus_b.grid(row = 6 , column = 1)
multi_b.grid(row = 6 , column = 2)
root.mainloop() |
"""Demonstrates how to use the StatViewAssist class
This sample requires an already loaded configuration with at least 2 connected vports.
"""
from ixnetwork_restpy import SessionAssistant, StatViewAssistant
session_assistant = SessionAssistant(
IpAddress="127.0.0.1",
UserName="admin",
Password="admin",
LogLevel=SessionAssistant.LOGLEVEL_INFO,
ClearConfig=False,
)
ixnetwork = session_assistant.Ixnetwork
ixnetwork.info("negative test")
try:
session_assistant.StatViewAssistant("my test view", Timeout=5)
except Exception as e:
ixnetwork.info(e)
# get a list of all current statistic views that can be used in the StatViewAssistant
print(StatViewAssistant.GetViewNames(ixnetwork))
# create a stat view assistant for a statistics view
port_statistics = session_assistant.StatViewAssistant("Port Statistics")
# print all the rows for a statistics view
print(port_statistics)
# add a filter so that only a single row is retrieved
port_statistics.AddRowFilter("Port Name", StatViewAssistant.REGEX, "Port 1$")
print(port_statistics)
# demonstrate cell access
port_statistics.ClearRowFilters()
rows = port_statistics.Rows
# get the cell value at row 0, column 'Port Name'
print(rows[0]["Port Name"])
# get the cell value at row 1, column 'Stat Name'
print(rows[1]["Stat Name"])
# get the cell value at the first row that matches a regex of 'case insensitive endswith port 1', column 'Frames Tx.'
print(rows["(?i)port 1$"]["Frames Tx."])
ixnetwork.info("check that all ipv4 protocols are up")
protocols_summary = session_assistant.StatViewAssistant("Protocols Summary")
protocols_summary.AddRowFilter("Protocol Type", StatViewAssistant.REGEX, "(?i)^ipv4?")
protocols_summary.CheckCondition("Sessions Not Started", StatViewAssistant.EQUAL, 0)
protocols_summary.CheckCondition("Sessions Down", StatViewAssistant.EQUAL, 0)
ixnetwork.info("traffic stat check")
traffic_statistics = session_assistant.StatViewAssistant("Traffic Item Statistics")
tx_frames = traffic_statistics.Rows[0]["Tx Frames"]
ixnetwork.info("tx frames: %s" % tx_frames)
ixnetwork.info("drilldown sample")
ixnetwork.info(traffic_statistics.DrillDownOptions())
ixnetwork.info(traffic_statistics.TargetRowFilters())
drilldown = traffic_statistics.Drilldown(
0,
traffic_statistics.DrillDownOptions()[0],
traffic_statistics.TargetRowFilters()[0],
)
print(drilldown)
|
from .attacker import Attacker
HYDRA_HEALTH = {
"hydra1": 5946906,
"hydra2": 17699730,
"hydra3": 54533157,
"hydra4": 106981939,
"hydra5": 220606896
}
HYDRA_ELEMENTS = ['light', 'darkness', 'earth', 'water', 'fire', 'wind']
class Hydra(object):
def __init__(self, hydra_type: str, element: str):
self.hydra_type = hydra_type
self.element = element
self.health_remaining = HYDRA_HEALTH[hydra_type]
self.attackers = []
def add_attacker(self, attacker: Attacker):
try:
self.attackers.index(attacker)
except ValueError:
self.attackers.append(attacker)
self.attackers.sort()
def remove_attacker(self, order):
for i in range(len(self.attackers)):
if self.attackers[i].order == order:
del self.attackers[i]
def use_attack(self, attacker: Attacker):
try:
index = self.attackers.index(attacker)
self.attackers[index].used = True
except ValueError:
return
def set_health_remaining(self, health):
self.health_remaining = health
def _attackers_as_string(self):
string = 'attackers: [\n'
for attacker in self.attackers:
string += str(attacker) + '\n'
string += ']\n'
return string
def __str__(self):
return 'element: %s\n' \
'health_remaining: %s\n' \
'%s' % (
self.element, str(self.health_remaining), self._attackers_as_string())
def generate_base_hydras():
base_hydras = dict()
for key in HYDRA_HEALTH.keys():
base_hydras[key] = generate_base_hydra(key)
return base_hydras
def generate_base_hydra(key):
hydras = []
for element in HYDRA_ELEMENTS:
hydra = Hydra(key, element)
hydras.append(hydra)
return hydras
|
# -*- coding: UTF-8 -*-
import unittest
from romme import conversion as conv
republican_leap_years = {3, 7, 11}
# Let's do exhaustive tests because they aren't so many dates
def all_dates():
for y in range(1, 14+1):
for m in range(1, 13+1):
max_days = 30
if m == 13:
max_days = 6 if y in republican_leap_years else 5
for d in range(1, max_days+1):
yield (y, m, d)
class TestConvertion(unittest.TestCase):
def test_all_julian_republican(self):
for y, m, d in all_dates():
jd = conv._republican_ymd_to_julian_day(y, m, d)
self.assertEqual((y, m, d),
conv._julian_day_to_republican_ymd(jd))
def test_gregorian_to_republican(self):
# see https://fr.wikipedia.org/wiki/An_I_du_calendrier_r%C3%A9publicain
# for examples
self.assertEqual((1, 1, 1),
conv.gregorian_to_republican(1792, 9, 22))
self.assertEqual((1, 12, 30),
conv.gregorian_to_republican(1793, 9, 16))
self.assertEqual((4, 1, 1),
conv.gregorian_to_republican(1795, 9, 23))
def test_republican_to_gregorian(self):
self.assertEqual((1792, 9, 22),
conv.republican_to_gregorian(1, 1, 1))
self.assertEqual((1793, 9, 16),
conv.republican_to_gregorian(1, 12, 30))
self.assertEqual((1795, 9, 23),
conv.republican_to_gregorian(4, 1, 1))
def test_all_republican_gregorian(self):
for rep in all_dates():
greg = conv.republican_to_gregorian(*rep)
rep2 = conv.gregorian_to_republican(*greg)
self.assertEqual(rep, rep2, "%d-%d-%d" % greg)
|
from telebot.types import InlineKeyboardMarkup,InlineKeyboardButton, ReplyKeyboardMarkup
inline_key = InlineKeyboardMarkup()
btn = InlineKeyboardButton('Да',callback_data='yes')
btn1 = InlineKeyboardButton('Нет',callback_data='no')
inline_key.add(btn,btn1) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 9 07:16:48 2017
SOLUTIONS TECHNICAL INTERVIEW PRACTICE
@author: jpinzon
"""
'''
QUESTION 1:
Given two strings s and t, determine whether some anagram of t is a
substring of s. For example: if s = "udacity" and t = "ad", then the
function returns True. Your function definition should look like:
question1(s, t) and return a boolean True or False.
'''
def question1(s, t):
import itertools
s = s.lower()
t = t.lower()
anag_s = ["".join(anag_l) for anag_l in itertools.permutations(t)]
anag_test = False
if anag_test == False:
for anag in anag_s:
if anag in s:
anag_test = True
return anag_test
# TESTS
s = 'da'
t = 'udacity'
print('Question 1 Test 1: \n', question1(t, s))# True
print('Question 1 Test 1: \n', question1('competition', 'attiino')) # False
print('Question 1 Test 1: \n', question1("millos", 'Mil')) # True
'''
QUESTION 2
Given a string a, find the longest palindromic substring contained in a.
Your function definition should look like question2(a), and return a string.
'''
def question2(a):
import string
string_txt = ''.join(l for l in a if l not in string.punctuation).replace(' ','').lower()
palindromes = []
substrings = [string_txt[a:a+k] for k in range(1,1+len(string_txt))
for a in range(1+len(string_txt)-k)]
for sub_str in substrings:
if len(sub_str) > 1:
if str(sub_str) == str(sub_str)[::-1]:
palindromes.append(sub_str)
if len(palindromes)>0:
res = max(palindromes, key = len)
return res
else:
print(' No palimdromes found in the string')
return
# TESTS
print('Question 2 Test 1: \n', question2('ada is redivider') )
#redivider
print('Question 2 Test 2: \n', question2('redivider'))
# redivider
print('Question 2 Test 3: \n', question2('A new order began, a more Roman age bred Rowena'))
#aneworderbeganamoreromanagebredrowena
print('Question 2 Test 4:')
print(question2('House'))
#No palimdromes found in the string
'''
QUESTION 3
Given an undirected graph G, find the minimum spanning tree within G.
A minimum spanning tree connects all vertices in a graph with the smallest
possible total weight of edges.
Your function should take in and return an
adjacency list structured like this:
{'A': [('B', 2)],
'B': [('A', 2), ('C', 5)],
'C': [('B', 5)]}
Vertices are represented as unique strings.
The function definition should be question3(G)
'''
def question3(G):
nodes = list(G.keys())
mst={}
main_node = (nodes[0])
GM = G[main_node]
node_ele = []
while main_node in nodes:
node_ele.append(main_node)
mst[main_node]=[]
distances = [(dist, value) for (value, dist) in GM]
min_dist, node = min(distances)
min_node = (node, min_dist)
mst[main_node] = min_node
nodes.remove(main_node)
main_node = node
GM = G[main_node]
GM = [i for i in GM if i[0] not in node_ele]
print((nodes))
if len(GM) ==0:
return(mst)
g1 = {'A': [('B', 2), ('C',1)],
'B': [('A', 2), ('C', 5)],
'C': [('B', 5)]}
g2 = {'A': [('B', 3), ('E', 1), ('D',4)],
'B': [('A', 3), ('C', 9), ('D', 2), ('E', 2)],
'C': [('B', 9), ('D', 3), ('E', 7)],
'D': [('B', 2), ('C', 3)],
'E': [('A', 1), ('B', 2), ('C', 7)]}
g3 = {'A': [('B',1), ('C',2)],
'B': [('C',1),('D',3)],
'C': [('D',4)],
'D': [('C',4)],
'E': [('F',1)],
'F': [('C',2)]}
# TESTS
print('Question 3 Test 1: \n', question3(g1))
#{'A': ('C', 1), 'B': ('A', 2), 'C': ('B', 5)}
print('Question 3 Test 2: \n', question3(g2))
# {'A': ('E', 1), 'E': ('B', 2), 'B': ('D', 2), 'D': ('C', 3)}
print('Question 3 Test 3: \n', question3(g3))
#{'A': ('B', 1), 'B': ('C', 1), 'C': ('D', 4)}
'''
QUESTION 4
Find the least common ancestor between two nodes on a binary search tree.
The least common ancestor is the farthest node from the root that is an ancestor
of both nodes. For example, the root is a common ancestor of all nodes on the
tree, but if both nodes are descendents of the root's left child, then that
left child might be the lowest common ancestor. You can assume that both nodes
are in the tree, and the tree itself adheres to all BST properties.
The function definition should look like question4(T, r, n1, n2), where T is
the tree represented as a matrix, where the index of the list is equal to the
integer stored in that node and a 1 represents a child node, r is a non-negative
integer representing the root, and n1 and n2 are non-negative integers
representing the two nodes in no particular order. For example, one test case
might be
question4([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0]],
3,
1,
4)
and the answer would be 3.
'''
def question4(T, r, n1, n2):
if r in (None, n1, n2):
return r
i, d = sorted([n1, n2])
if r < i |r > d:
return i
while not i <= r <= d:
r = T.left if i <= r else T.right
return r
# TESTS
print('Question 4 Test 1: \n', question4([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0]],
2,
4,
1))
# Result = 2
print('Question 4 Test 2: \n', question4([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0]],
3,
5,
2))
# Result = 3
print('Question 4 Test 3: \n', question4([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0]],
3,
3,
1))
# Result = 3
print('Question 4 Test 4: \n', question4([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 0]],
3,
5,
4))
# Result = 4
'''
QUESTION 5
Find the element in a singly linked list that's m elements from the end.
For example, if a linked list has 5 elements, the 3rd element from the end is
the 3rd element. The function definition should look like question5(ll, m),
where ll is the first node of a linked list and m is the "mth number from
the end". You should copy/paste the Node class below to use as a
representation of a node in the linked list. Return the value of the node at
that position.
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
'''
class node:
def __init__(self,data=None):
self.data=data
self.next=None
# The following functions taken from:
# https://github.com/bfaure/Python_Data_Structures/blob/master/Linked_List/main.py
import numpy as np
class linked_list:
def __init__(self):
self.head=node()
def append(self,data):
new_node=node(data)
cur=self.head
while cur.next!=None:
cur=cur.next
cur.next=new_node
def length(self):
cur=self.head
total=0
while cur.next!=None:
total+=1
cur=cur.next
return total
def get(self,index):
if index>=self.length():
print ("ERROR: 'Get' Index out of range!")
return None
cur_idx=0
cur_node=self.head
while True:
cur_node=cur_node.next
if cur_idx==index: return cur_node.data
cur_idx+=1
################################
# I present two solutions for this question: question5 and question5a
def create_llist(i_value, n_nodes=10):
''' i_value = first node '''
l = linked_list()
l.append(i_value)
for i in range(n_nodes-1):
l.append(np.random.randint(0,100))
return l
def question5(ll, m):
l_dict={}
l = create_llist(ll, 10)
if m > l.length():
print ('m is larger than the lenght of the list')
return
for i in range(0, l.length()):
l_dict[i]=l.get(i)
pos = l.length() - (m)
val = l_dict[pos]
#return (l_dict, val)
return (val)
def question5a(ll, m):
l = create_llist(ll, 10)
if m > l.length():
print ('m is larger than the lenght of the list')
return
pos = l.length() - (m)
aa = eval('l.head.next.'+'next.'*pos+'data')
return aa
# TESTS - Results can not be given as the list is created within the function from random numbers
print('Question 5 Test 1: \n', question5(5,1))
# last value
print('Question 5 Test 2: \n',question5(8,10) )
# first value. should be the same as ll
print('Question 5 Test 3: \n',question5(5,11) )
# out of range node
print('Question 5 Test 4: \n',question5(3,2))
print('Question 5 Test 5: \n',question5(4,7))
print('Question 5a Test 1: \n',question5a(5,1))
# last value
print('Question 5a Test 2: \n',question5a(8,10) )
# first value. should be the same as ll
print('Question 5a Test 3: \n',question5a(5,11) )
# out of range node
print('Question 5a Test 4: \n',question5a(3,2))
print('Question 5a Test 5: \n',question5a(4,7)) |
import pandas as pd
import os
from requests import get
from bs4 import BeautifulSoup
class MovieTitlesExtractor:
__URL = 'https://www.taquillaespana.es/estadisticas/peliculas-espanolas-mas-taquilleras-de-todos-los-tiempos/'
__DIR = './files'
__FILENAME = 'movie_titles.csv'
__COLUMNS = ['Movie', 'Year']
# Public methods
def generate_csv(self):
self.__html_content()
self.__extract_data()
self.__create_data_frame()
self.__write_csv()
def read_csv(self):
return pd.read_csv(f'{self.__DIR}/{self.__FILENAME}', header = 0, dtype = str)
# Private methods
def __html_content(self):
self.html = BeautifulSoup(get(self.__URL).content, 'html.parser')
def __extract_data(self):
data = {
'movies': self.html.findAll('td', { 'class': 'column-2' }),
'years': self.html.findAll('td', { 'class': 'column-3' })
}
self.data = list(map(list, zip(data['movies'], data['years'])))
def __create_data_frame(self):
self.movies = pd.DataFrame(columns = self.__COLUMNS)
for record in self.data:
self.movies = self.movies.append(
{ self.__COLUMNS[0]: record[0].text, self.__COLUMNS[1]: record[1].text },
ignore_index = True
)
def __write_csv(self):
if not os.path.exists(self.__DIR):
os.makedirs(self.__DIR)
self.movies.to_csv(f'{self.__DIR}/{self.__FILENAME}', index = False)
|
from idaapi import *
from idautils import *
from idc import *
from ugo.structs import add_struct, add_struct_member
# ADD PCLNENTRY STRUCT
class myplugin_t(idaapi.plugin_t):
flags = idaapi.PLUGIN_UNL
comment = "This is a comment"
help = "This is help"
wanted_name = "My Python plugin"
wanted_hotkey = "Alt-F8"
def init(self):
idaapi.msg("init() called!\n")
return idaapi.PLUGIN_OK
def run(self, arg):
idaapi.msg("run() called with %d!\n" % arg)
def term(self):
idaapi.msg("term() called!\n")
def PLUGIN_ENTRY():
return myplugin_t()
def PLUGIN_ENTRY():
runtime_pclntab = LocByName("runtime.pclntab")
sid = add_struct("pclnentry")
err = add_struct_member(sid, "function", flags=(FF_0OFF | FF_QWORD)) # this is how to do pointers
err = add_struct_member(sid, "dataOff", flags=(FF_0OFF | FF_QWORD), metadata=runtime_pclntab)
curr = beginning = runtime_pclntab + 0x20
pclntab_size = Dword(runtime_pclntab + 0x8)
print(pclntab_size)
for i in range(pclntab_size):
MakeStructEx(curr, -1, "pclnentry")
curr += 0x10
"""
def MakeStructEx(ea, size, strname):
\"""
Convert the current item to a structure instance
@param ea: linear address
@param size: structure size in bytes. -1 means that the size
will be calculated automatically
@param strname: name of a structure type
@return: 1-ok, 0-failure
\"""
strid = idaapi.get_struc_id(strname)
if size == -1:
size = idaapi.get_struc_size(strid)
return idaapi.doStruct(ea, size, strid)
""" |
from ..q import get_q
from .s import get_s
def get_letters():
return get_q() + ["r"] + get_s()
|
import requests
import json
with open('my.json') as fs:
data=json.load(fs)
f = requests.post('http://127.0.0.1:9999/web/getCoord?width=100&height=100&offset=11',data=json.dumps(data))
result = f.text
print(result) |
"""
2021-3-28
"""
import os
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import os
from tqdm import tqdm
# from local_ernie_model import Local_Bert_score
# from global_gcn_model import Global_Fai_score
from global_score_model_3 import Global_Fai_score
from local_cnn_model_11 import Local_Fai_score
# from local_cnn_att_model import Local_Fai_score
# from local_esim_model import Local_Fai_score
from utils import extract_data_from_dataloader, Fmeasure, eval
# from data_sememe_loader_f import *
# from data_glove_loader_f_2 import *
from data_global_glove_loader import *
# from data_hands_loader_f import *
# from data_test_loader_f import get_test_loader
import datetime
import math
import argparse
from torch.nn.init import kaiming_normal, uniform
import gensim
import warnings
import logging
# from ESIM import ESIM
# import paddle.fluid.dygraph as D
# from ernie.tokenizing_ernie import ErnieTokenizer
# from ernie.modeling_ernie import ErnieModel
from transformers import AlbertTokenizer, AlbertModel
from transformers import BertTokenizer
# D.guard().__enter__()
torch.set_printoptions(threshold=3)
# model initialization
def weight_init(m):
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal(m.weight.data)
elif isinstance(m, nn.Linear):
kaiming_normal(m.weight)
m.bias.data.zero_()
# def get_save_path(local_path):
# list_dir = os.listdir(local_path)
# # 返回最新保存的文件
# return list_dir[-1]
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
warnings.filterwarnings('ignore')
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--epoch', default=20)
arg_parser.add_argument('--LR', default=0.01)
arg_parser.add_argument('--window_context', default=100) # 上下文长度取7
# arg_parser.add_argument('--window_doc', default=100)
arg_parser.add_argument('--window_doc', default=300) # 文本长度取30
arg_parser.add_argument('--window_title', default=10) # mention长度取8
arg_parser.add_argument('--window_body', default=512) # body长度取8
arg_parser.add_argument('--filter_num', default=128)
arg_parser.add_argument('--filter_window', default=5)
arg_parser.add_argument('--embedding', default=300)
arg_parser.add_argument('--lamda', default=0.01)
# arg_parser.add_argument('--cuda_device', required=True, default='0')
arg_parser.add_argument('--cuda_device', default=0)
# arg_parser.add_argument('--nohup', required=True, default="")
arg_parser.add_argument('--nohup', default="")
arg_parser.add_argument('--batch', default=1000)
# arg_parser.add_argument('--weight_decay', required=True, default=1e-5)
arg_parser.add_argument('--weight_decay', default=1e-5)
arg_parser.add_argument('--embedding_finetune', default=1)
# arg_parser.add_argument('--local_model_loc', required=True, default='./model_save')
# arg_parser.add_argument('--local_model_loc', default='./model_save/')
arg_parser.add_argument('--data_root', default="./data")
arg_parser.add_argument('--local_model_loc', default='./model_save/aquaint_combine_att_entity/0.896.pkl')
arg_parser.add_argument('--global_model_loc', default='./global_model_save/aquaint_gcn/')
args = arg_parser.parse_args()
torch.manual_seed(1)
EPOCH = int(args.epoch)
LR = float(args.LR)
WEIGHT_DECAY = float(args.weight_decay)
WINDOW_CONTEXT = int(args.window_context)
WINDOW_DOC = int(args.window_doc)
WINDOW_BODY = int(args.window_body)
WINDOW_TITLE = int(args.window_title)
FILTER_NUM = int(args.filter_num)
FILTER_WINDOW = int(args.filter_window)
EMBEDDING = int(args.embedding)
LAMDA = float(args.lamda)
BATCH = int(args.batch)
FINETUNE = bool(int(args.embedding_finetune))
LOCAL_MODEL_LOC = str(args.local_model_loc)
ROOT = str(args.data_root)
# torch.cuda.set_device(int(args.cuda_device))
# np.set_printoptions(threshold=np.NaN)
print('Epoch num: ' + str(EPOCH))
print('Learning rate: ' + str(LR))
print('Weight decay: ' + str(WEIGHT_DECAY))
print('Context window: ' + str(WINDOW_CONTEXT))
print('Document window: ' + str(WINDOW_DOC))
print('Title window: ' + str(WINDOW_TITLE))
print('Body window: ' + str(WINDOW_BODY))
print('Filter number: ' + str(FILTER_NUM))
print('Filter window: ' + str(FILTER_WINDOW))
print('Embedding dim: ' + str(EMBEDDING))
print('Lambda: ' + str(LAMDA))
print('Is finetune embedding: ' + str(FINETUNE))
print('Data root: ' + str(ROOT))
config = {'df':0.5,
'dr':0.3,
'n_loops':10,
'n_rels': 5,
'emb_dims':768,
'ent_ent_comp':'bilinear',
'ctx_comp':'bow',
'mulrel_type':'rel-norm',
'first_head_uniform':False,
'use_pad_ent':False,
'use_stargmax':False,
'use_local':True,
'use_local_only':False,
'freeze_local':False}
print("#######Data loading#######")
# tokenizer = BertTokenizer.from_pretrained('bert-base-chinese', cache_dir='./transformers/')
tokenizer = AlbertTokenizer.from_pretrained("albert-base-v2", cache_dir="./transformers/")
# tokenizer.model_max_length = 1024
model = AlbertModel.from_pretrained("albert-base-v2", cache_dir="transformers/")
# esim_model = ESIM()
data_loader_train = get_loader(ROOT, WINDOW_TITLE, WINDOW_CONTEXT, WINDOW_DOC, WINDOW_BODY, val=False,
test=False, shuffle=True, num_workers=0, tokenizer=tokenizer, dataset="aida_train")
data_loader_val = get_loader(ROOT, WINDOW_TITLE, WINDOW_CONTEXT, WINDOW_DOC, WINDOW_BODY, val=True,
test=False, shuffle=False, num_workers=0, tokenizer=tokenizer, dataset="aquaint")
# data_loader_test = get_test_loader(ROOT, WINDOW_TITLE, WINDOW_CONTEXT, WINDOW_DOC, WINDOW_BODY, val=False,
# test=True, shuffle=True, num_workers=0, tokenizer=tokenizer)
TrainFileNum = len(data_loader_train)
print('Train data size: ', len(data_loader_train))
print('Dev data size: ', len(data_loader_val))
# print('Test data size: ', len(data_loader_test))
# doc_men = get_mentionNum(path='./output/doc_mentionNum.pkl')
# doc_men = pd.read_pickle('./data/dev_menNum.pkl')
# weight_numpy = np.load(file='./data/tecent_word_embedding.npy')
# weight_numpy[0] = np.zeros(shape=200, dtype=weight_numpy.dtype)
# embed = nn.Embedding.from_pretrained(torch.FloatTensor(weight_numpy)).cpu()
print("#######Model Initialization#######")
local_model = Local_Fai_score().cuda()
cnn_score = Global_Fai_score()
cnn_score = cnn_score.cuda()
# cnn_score_dict = cnn_score.state_dict()
# cnn_score = torch.nn.DataParallel(cnn_score)
# pretrained_model_state = torch.load(LOCAL_MODEL_LOC)['model_state_dict']
# pretrained_dict = {k: v for k, v in pretrained_model_state.items() if k in cnn_score_dict.keys() and k != 'embed.weight'}
# pretrained_dict = {k: v for k, v in pretrained_model_state.items() if k in cnn_score_dict.keys()}
# cnn_score_dict.update(pretrained_dict)
# cnn_score.load_state_dict(cnn_score_dict)
local_model.load_state_dict(torch.load(LOCAL_MODEL_LOC)['model_state_dict'])
# cnn_score.load_state_dict(torch.load(LOCAL_MODEL_LOC)['model_state_dict'])
# cnn_score.apply(weight_init)
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn_score.parameters(), lr=LR, weight_decay=WEIGHT_DECAY)
print(cnn_score)
print("current_device(): ", torch.cuda.current_device())
print("#######Training...#######")
train_unk = 0
dev_unk = 0
epoch_count = 0
starttime = datetime.datetime.now()
last_acc = 0
# last_acc = torch.load(LOCAL_MODEL_LOC)['last_acc']
word = []
for epoch in range(epoch_count, EPOCH):
epoch_count += 1
print("****************epoch " + str(epoch_count) + "...****************")
file_count = 0
loss_sum = 0
true_train = 0
men_train = 0
for k in tqdm(data_loader_train):
file_count += 1
y_label, mention_entity, entity_entity, m, n, mention, mention_vec, context_vec, doc_vec, \
title_vec, body_vec, filename, sfeats, m2c_prior, entity_sr, mentions2entity, new_context, hand_features = extract_data_from_dataloader(k, finetune=False)
if m == 0:
train_unk += 1
continue
cnn_score.train()
y_true = torch.Tensor(y_label.numpy())
fai_local_score_temp, fai_local_score_softmax_temp, fai_local_score_uniform_temp = local_model(
mention_entity,
m, n,
mention_vec,
context_vec,
doc_vec,
title_vec,
body_vec,
sfeats,
m2c_prior, mentions2entity, new_context, hand_features)
s, gcn_output, local_score_norm, global_score = cnn_score(mention_entity, entity_entity, m, n,
mention_vec, context_vec,
doc_vec, title_vec,
body_vec,sfeats, m2c_prior,entity_sr,
mentions2entity, new_context, hand_features, fai_local_score_softmax_temp)
y_true_index = []
for y_t_i in range(m):
for y_t_j in range(n):
if int(y_true[y_t_i][y_t_j]) == 1:
y_true_index.append(y_t_j)
y_train = []
men_train += m
for i in range(m):
y_train.append(np.argmax(global_score[i].detach().cpu().numpy()))
for i in range(m):
if int(y_label[i][int(y_train[i])]) == 1:
true_train += 1
# print(len(y_true_index))
y_true_index = Variable(torch.LongTensor(y_true_index)).cuda()
loss = loss_function(global_score, y_true_index)
loss_sum += loss.cpu().data
optimizer.zero_grad()
loss.backward()
optimizer.step()
# testing
if file_count % BATCH == 0 or file_count == TrainFileNum:
print("*****-----Train Acc-----*****")
print(float(true_train) / men_train)
true_train = 0 # 计算每进一次batch时的train_data上的acc
men_train = 0
print("*****Eval*****")
cnn_score.eval()
count_true = 0
count_label = 0
total_mentions = []
actual_mentions = []
actual_correct = []
flag = False
endtime = datetime.datetime.now()
print("time: " + str((endtime - starttime).total_seconds()))
print("#######Computing score...#######")
test_file_c = 0
for k_test in data_loader_val:
correct_temp = 0
y_label, mention_entity, entity_entity, m, n, mention, mention_vec, context_vec, \
doc_vec, title_vec, body_vec, filename, sfeats, m2c_prior, entity_sr, mentions2entity, new_context, hand_features = extract_data_from_dataloader(k_test, finetune=False)
if m == 0:
dev_unk += 1
continue
test_file_c += 1
y_true = torch.Tensor(y_label.detach().numpy())
# if m != int(doc_men[filename]):
# print(str(m)+"|||"+str(doc_men[filename]))
# print('erooooooooor!!')
fai_local_score_temp, fai_local_score_softmax_temp, fai_local_score_uniform_temp = local_model(
mention_entity,
m, n,
mention_vec,
context_vec,
doc_vec,
title_vec,
body_vec,
sfeats,
m2c_prior, mentions2entity, new_context, hand_features)
fai_s, fai_gcn_output, fai_local_score_norm, fai_global_score = cnn_score(mention_entity, entity_entity,
m, n,
mention_vec,
context_vec,
doc_vec,
title_vec,
body_vec,
sfeats,
m2c_prior, entity_sr,
mentions2entity, new_context, hand_features, fai_local_score_softmax_temp)
fai_score = fai_global_score.cpu().data
y_forecase = []
y_local = []
count_label += m
for i in range(m):
y_forecase.append(np.argmax(fai_score[i].numpy()))
for i in range(m):
if int(y_label[i][int(y_forecase[i])]) == 1:
count_true += 1
correct_temp += 1
y_true = []
# total_mentions.append(int(doc_totalmen[filename]))
# actual_mentions.append(int(doc_men[filename]))
total_mentions.append(int(m))
# total_mentions.append(int(doc_men[filename]))
actual_mentions.append(int(m))
actual_correct.append(correct_temp)
# print("total_men:" + str(doc_totalmen[filename]) + "|||actual_men:" + str(
# doc_men[filename]) + "|||correct:" + str(correct_temp))
print(str(filename)+"|||"+"total_men: " + str(m) + "|||actual_mention: " + str(len(fai_score)) +
"|||correct mention: " + str(correct_temp))
for i in range(m):
y_true_temp = []
for j in range(n):
if (int(y_label[i][j]) == 1):
y_true_temp.append(j)
y_true.append(y_true_temp)
acc, eval_mi_prec, eval_ma_prec, eval_mi_rec, eval_ma_rec, eval_mi_f1, eval_ma_f1 = Fmeasure(count_true,
count_label,
actual_mentions,
total_mentions,
actual_correct)
if eval_mi_f1 > last_acc:
model_f = str(eval_mi_f1)
model_f = model_f[:model_f.index(".")+4]
# model_f = os.path.join(LOCAL_MODEL_LOC, str(model_f)+'.pkl')
print("model_f: ", model_f)
print("***** Save Model *****")
PATH = './global_model_save/aquaint_combine_att_entity/' + str(model_f) + '.pkl'
# PATH = '/home/baoxin/CCKS2020/model_save/'+str(model_f)+'.pkl'
# print("PATH: ", PATH)
checkpoint_dict = {"epoch_count":epoch_count,
"model_state_dict":cnn_score.state_dict(),
"optimizer_state_dict":optimizer.state_dict(),
"last_acc": last_acc
}
# torch.save(cnn_score.state_dict(), PATH)
torch.save(checkpoint_dict, PATH)
last_acc = eval_mi_f1
flag = True
# 写入文档中
with open('./global_metrics/aquaint_combine_att_entity.txt', 'a', encoding='utf-8') as writer:
acc_text = "epoch: " + str(epoch_count)+"|||step: " + str(file_count) + "|||loss: " + str(float(loss_sum)) + "|||acc: " + str(acc)
eval_mi_text = "eval_mi_prec: " + str(eval_mi_prec) + "|||eval_mi_rec: " + str(eval_mi_rec) + "|||eval_mi_f1: " + str(eval_mi_f1)
eval_ma_text = "eval_ma_prec: " + str(eval_ma_prec) + "|||eval_ma_rec: " + str(eval_ma_rec) + "|||eval_ma_f1: " + str(eval_ma_f1)
writer_text = acc_text + '\r\n' + eval_mi_text + '\r\n' + eval_ma_text
writer.write(writer_text)
endtime = datetime.datetime.now()
print("time:" + str((endtime - starttime).total_seconds()) + "|||epoch: " + str(epoch_count) +
"|||step: " + str(file_count) + "|||loss: " + str(float(loss_sum)) + "|||acc: " + str(acc))
print("eval_mi_prec: " + str(eval_mi_prec) + "|||eval_mi_rec: " + str(eval_mi_rec) + "|||eval_mi_f1: " +
str(eval_mi_f1))
print("eval_ma_prec: " + str(eval_ma_prec) + "|||eval_ma_rec: " + str(eval_ma_rec) + "|||eval_ma_f1: " +
str(eval_ma_f1))
count_true = 0
count_label = 0
total_mentions = []
actual_mentions = []
endtime = datetime.datetime.now()
print("time: " + str((endtime - starttime).total_seconds()))
# predict on test data set
# print("*****Test*****")
#if flag:
# eval(cnn_score=cnn_score, data_loader_test=data_loader_test, eval_mi_f1=eval_mi_f1)
# for i in range(2, 8):
# eval(ROOT, bert_score, data_loader_test, i, True, None, 0, 0, 0)
print("train_unk: ", train_unk)
print("dev_unk: ", dev_unk)
print("***** Finish Training The Model *****")
|
seq = "CTTGAACGCGTCCCGGCTTG"
length = len(seq)
def mutate(base):
assert base in ['A', 'T', 'C', 'G']
if base == 'A':
return ['T', 'C', 'G']
elif base == 'T':
return ['A', 'C', 'G']
elif base == 'C':
return ['A', 'T', 'G']
else:
return ['A', 'T', 'C']
for i in range(length - 2):
for j in range(i+1,length-1):
for k in range(j+1,length):
for k2 in mutate(seq[k]):
for j2 in mutate(seq[j]):
for i2 in mutate(seq[i]):
print ('\t'.join(list(seq[:i] + i2 + seq[i+1:j] + j2 + seq[j+1:k] + k2 + seq[k+1:]))) |
class GlobalKeyword:
scores = 0
def update_scores(new_score):
global scores
scores = new_score
print(scores)
if __name__ == '__main__':
GlobalKeyword()
|
__author__ = 'Michael Redmond'
import vtk
class ModelBaseData(object):
def __init__(self):
super(ModelBaseData, self).__init__()
self.data = vtk.vtkUnstructuredGrid()
self.points = None
self.global_ids = vtk.vtkIdTypeArray()
self.global_ids.SetName("global_ids")
self.visible = vtk.vtkIntArray()
self.visible.SetName("visible")
self.original_ids = vtk.vtkIntArray()
self.original_ids.SetName("original_ids")
self.basic_types = vtk.vtkIntArray()
self.basic_types.SetName("basic_types")
self.basic_shapes = vtk.vtkIntArray()
self.basic_shapes.SetName("basic_shapes")
self.data.GetCellData().SetGlobalIds(self.global_ids)
self.data.GetCellData().AddArray(self.visible)
self.data.GetCellData().AddArray(self.original_ids)
self.data.GetCellData().AddArray(self.global_ids)
self.data.GetCellData().AddArray(self.basic_types)
self.data.GetCellData().AddArray(self.basic_shapes)
def set_points(self, points):
self.points = points
self.data.SetPoints(points)
def reset(self):
self.data.Reset()
self.global_ids.Reset()
self.visible.Reset()
self.original_ids.Reset()
self.data.GetCellData().SetGlobalIds(self.global_ids)
self.data.GetCellData().AddArray(self.visible)
self.data.GetCellData().AddArray(self.original_ids)
self.points = None
def squeeze(self):
self.data.Squeeze()
self.global_ids.Squeeze()
self.visible.Squeeze()
self.original_ids.Squeeze()
self.basic_types.Squeeze()
def update(self):
self.data.Modified()
class ModelDataHelper(object):
def __init__(self, data):
super(ModelDataHelper, self).__init__()
self.data = data
self.points = vtk.vtkPoints()
self.data.SetPoints(self.points)
self.global_ids1 = vtk.vtkIdTypeArray()
#self.global_ids1.SetName("global_ids")
self.global_ids2 = vtk.vtkIntArray()
self.global_ids2.SetName("global_ids")
self.visible = vtk.vtkIntArray()
self.visible.SetName("visible")
self.original_ids = vtk.vtkIntArray()
self.original_ids.SetName("original_ids")
self.basic_types = vtk.vtkIntArray()
self.basic_types.SetName("basic_types")
self.basic_shapes = vtk.vtkIntArray()
self.basic_shapes.SetName("basic_shapes")
self.data.GetCellData().SetGlobalIds(self.global_ids1)
self.data.GetCellData().AddArray(self.visible)
self.data.GetCellData().AddArray(self.original_ids)
self.data.GetCellData().AddArray(self.global_ids2)
self.data.GetCellData().AddArray(self.basic_types)
self.data.GetCellData().AddArray(self.basic_shapes)
def set_points(self, points):
self.points = points
self.data.SetPoints(points)
def reset(self):
self.data.Reset()
self.global_ids1.Reset()
self.visible.Reset()
self.original_ids.Reset()
self.basic_types.Reset()
self.basic_shapes.Reset()
self.data.GetCellData().SetGlobalIds(self.global_ids1)
self.data.GetCellData().AddArray(self.visible)
self.data.GetCellData().AddArray(self.original_ids)
self.data.GetCellData().AddArray(self.global_ids1)
self.data.GetCellData().AddArray(self.basic_types)
self.data.GetCellData().AddArray(self.basic_shapes)
self.points = None
def squeeze(self):
self.data.Squeeze()
self.global_ids1.Squeeze()
self.visible.Squeeze()
self.original_ids.Squeeze()
self.basic_types.Squeeze()
self.basic_shapes.Squeeze()
def update(self):
self.data.Modified()
|
import agent
import environment
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
env = environment.Environment()
learning_rate = 0.01
gamma = 0.99
MAX_EPISODES = 50000
MAX_STEPS = 10
ACTION_SIZE = 200 # per step 2 dollars
u1 = 100
# Q table contains states x acitons
# states contain
Q = np.zeros((MAX_STEPS * ACTION_SIZE, ACTION_SIZE))
def state_index(time, cur_bid):
return time * ACTION_SIZE + cur_bid
def epsilon_exploration(state_idx, cur_bid, epsilon):
if random.uniform(0, 1) < epsilon:
if random.uniform(0, 1) < 0.5:
return cur_bid + 2
else:
return cur_bid + 4
else:
# obtain optimal action based on Q table
act_idx = np.argmax(Q[state_idx, :])
return act_idx * 2
def update_q_table(state_idx, action, reward, next_state_idx):
next_val = np.max(Q[new_state_idx, :])
action_idx = (int)(action / 2)
curr_val = Q[state_idx, action_idx]
Q[state_idx, action_idx] += learning_rate * (reward + gamma * next_val - curr_val)
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
for _ep in range(MAX_EPISODES):
print ('EPISODE :- ' + str(_ep))
random_agent = agent.Agent()
u2 = random_agent.utility
observation, cur_bid = env.reset(u1, u2)
bid1 = 0
bid2 = 0
action = 0
total_reward = 0
for _ in range(MAX_STEPS):
time, old_bid = observation
state_idx = state_index(time, old_bid)
# print("=================Random Agent Turn=================")
bid2 = random_agent.action(cur_bid)
# print("Action taken: %f"%bid2)
new_observation, reward, done, cur_bid = env.step(bid1, bid2,idx=2)
total_reward += reward
# print("===============Feedback to learned agent round===============")
# print("Observation:")
# print(new_observation)
# print("Reward: %f, Currnt Bid: %f"%(reward, cur_bid))
new_time, new_bid = new_observation
new_state_idx = state_index(new_time, new_bid)
# update q table
update_q_table(state_idx, action, reward, new_state_idx)
if done:
break
observation = new_observation
# print("=================Learned Agent Turn=================")
action = epsilon_exploration(new_state_idx, new_bid, epsilon)
epsilon *= epsilon
# print("Action: %f"%action)
bid1 = action
_, _, done, cur_bid = env.step(bid1, bid2, idx=1)
# print("===============Feedback to random agent round===============")
# print("Currnt Bid: %f"%cur_bid)
df = pd.DataFrame(Q)
pd.to_csv("q_table.csv") |
#!/usr/bin/python3
"""This module creates a class named Square"""
class Square:
"""A class named Square
Attributes:
attr1 (size): size of square
"""
def __init__(self, size):
"""
Args:
size: size for __size attribute of class instance
"""
self.__size = size
|
#!/usr/bin/env python3
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Import generated translations from Google's internal Translation Console."""
from __future__ import print_function
import glob
import logging
import os
import sys
import nassh
import libdot
import filter_translations
def get_parser():
"""Get a command line parser."""
parser = libdot.ArgumentParser(description=__doc__)
parser.add_argument('--skip-git', dest='run_git',
action='store_false', default=True,
help='Skip creating a new git commit.')
parser.add_argument('builddir',
help='Input path to the compiled locale messages.')
parser.add_argument('sourcedir', nargs='?',
default=os.path.join(nassh.DIR, '_locales'),
help='Output path to nassh/_locales/ directory.')
return parser
def main(argv):
"""The main func!"""
parser = get_parser()
opts = parser.parse_args(argv)
opts.builddir = os.path.abspath(opts.builddir)
opts.sourcedir = os.path.abspath(opts.sourcedir)
# Find new translations. Do it here for sanity checking.
new_locales = glob.glob(os.path.join(opts.builddir, '*', 'messages.json'))
if not new_locales:
parser.error("builddir doesn't seem to contain any translations")
# Clear existing translation files.
logging.info('Clearing existing translation files')
os.makedirs(opts.sourcedir, exist_ok=True)
for locale in os.listdir(opts.sourcedir):
locale_dir = os.path.join(opts.sourcedir, locale)
path = os.path.join(locale_dir, 'messages.json')
libdot.unlink(path)
# Prune empty dirs.
try:
os.rmdir(locale_dir)
except OSError:
pass
# Copy over the new translations.
logging.info('Importing new translation files')
for in_locale in new_locales:
locale = os.path.basename(os.path.dirname(in_locale))
out_locale = os.path.join(opts.sourcedir, locale, 'messages.json')
os.makedirs(os.path.dirname(out_locale), exist_ok=True)
filter_translations.reformat(in_locale, output=out_locale)
# Generate git commits automatically.
if opts.run_git:
libdot.run(['git', 'checkout', '-f', 'en/'], cwd=opts.sourcedir)
libdot.run(['git', 'add', '.'], cwd=opts.sourcedir)
libdot.run(['git', 'commit', '-m', 'nassh: update translations', '.'],
cwd=opts.sourcedir)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
import numpy as np
import cv2
import random
import math
# running virtual environment on windows
###############################################
# virtualenv env
# .\env\Scripts\activate.bat
# python app.py
# read the image file
img = cv2.imread('maps/atl-2.png', 2)
# convert to binary image
ret, bw_img = cv2.threshold(img, 254, 255, cv2.THRESH_BINARY)
# take a random sample of widths
width = 0
for i in range(100):
index = random.randint(0,len(bw_img)-1)
first = np.where(bw_img[index]==255)
if (first[0].size != 0):
width += first[0][-1] - first[0][0]
width = math.floor(width / 100)
# perform dilation (width/2) times
kernel = np.ones((3,3), np.uint8)
img_dilation = cv2.dilate(bw_img, kernel, iterations=width*2)
img_erosion = cv2.erode(img_dilation, kernel, iterations=math.floor(width*2))
cv2.imshow('Dilation', img_erosion)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import os
base = os.path.join(os.path.dirname(__file__), '../sample_htdocs')
with open(base + "/index.html", "rb") as f:
root_index = f.read()
with open(base + "/subdir1/index.html", "rb") as f:
subdir1_index = f.read()
with open(base + "/subdir1/subdir11/maoyo.giaogiao", "rb") as f:
maoyo_giaogiao = f.read()
with open(base + "/kitten.jpg", "rb") as f:
kitten = f.read()
|
# Generated by Django 3.1.8 on 2021-04-16 07:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('lockers', '0002_qr'),
]
operations = [
migrations.CreateModel(
name='PersonalQR',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(default='', max_length=50)),
('recipient', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
from django.urls import path
from tasks import views
urlpatterns = [
path('', views.TasksListView.as_view(), name='tasks_list'),
path('create/', views.TaskCreateView.as_view(), name='task_create'),
path(
'<int:pk>/update/',
views.TaskUpdateView.as_view(),
name='task_update',
),
path(
'<int:pk>/delete/',
views.TaskDeleteView.as_view(),
name='task_delete',
),
path('<int:pk>/', views.TaskReadView.as_view(), name='task_read'),
]
|
from flask_babel import lazy_gettext
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms import ValidationError
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from ..models import User
class LoginForm(FlaskForm):
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField(lazy_gettext('Password'), validators=[DataRequired()])
remember_me = BooleanField(lazy_gettext('Keep me logged in'))
submit = SubmitField(lazy_gettext('Log in'))
class RegistrationForm(FlaskForm):
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Length(1, 64), Email()])
username = StringField(lazy_gettext('Username'), validators=[
DataRequired(), Length(1, 32), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, lazy_gettext(
'Usernames must have only letters, numbers, dots or underscores'))])
password = PasswordField(lazy_gettext('Password'), validators=[
DataRequired(), EqualTo('password2', message=lazy_gettext('Passwords must match.'))])
password2 = PasswordField(lazy_gettext('Confirm password'), validators=[DataRequired()])
submit = SubmitField(lazy_gettext('Register'))
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError(lazy_gettext('Email already registered.'))
def validate_username(self, field):
if User.query.filter_by(username_normalized=field.data.lower()).first():
raise ValidationError(lazy_gettext('Username already in use.'))
class ChangePasswordForm(FlaskForm):
old_password = PasswordField(lazy_gettext('Old password'), validators=[DataRequired()])
new_password = PasswordField(lazy_gettext('New password'), validators=[
DataRequired(), EqualTo('new_password2', message=lazy_gettext('Passwords must match.'))])
new_password2 = PasswordField(lazy_gettext('Confirm new password'), validators=[DataRequired()])
submit = SubmitField(lazy_gettext('Change password'))
def validate_old_password(self, field):
if not current_user.verify_password(field.data):
raise ValidationError(lazy_gettext('Invalid old password.'))
def validate_new_password(self, field):
if self.old_password.data == field.data:
raise ValidationError(lazy_gettext('New password is the same as old password.'))
class ChangeEmailForm(FlaskForm):
new_email = StringField(lazy_gettext('New email'), validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField(lazy_gettext('Password'), validators=[DataRequired()])
submit = SubmitField(lazy_gettext('Change email'))
def validate_new_email(self, field):
if current_user.email == field.data.lower():
raise ValidationError(lazy_gettext('This is your current email.'))
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError(lazy_gettext('Email already registered.'))
def validate_password(self, field):
if not current_user.verify_password(field.data):
raise ValidationError(lazy_gettext('Invalid password.'))
class ResetPasswordRequestForm(FlaskForm):
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Length(1, 64), Email()])
submit = SubmitField(lazy_gettext('Reset password'))
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first() is None:
raise ValidationError(lazy_gettext('Unknown email address.'))
class ResetPasswordForm(FlaskForm):
email = StringField(lazy_gettext('Email'), validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField(lazy_gettext('New password'), validators=[
DataRequired(), EqualTo('password2', message=lazy_gettext('Passwords must match.'))])
password2 = PasswordField(lazy_gettext('Confirm password'), validators=[DataRequired()])
submit = SubmitField(lazy_gettext('Reset password'))
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first() is None:
raise ValidationError(lazy_gettext('Unknown email address.'))
|
num = input("Enter the X and Y? ")
X, Y = num.split(',')
result = []
for i in range(int(X)):
result.append([])
for j in range(int(Y)):
result[i].append(i *j)
print(result) |
# Generate Documents
# O(n+m)
# n = len(characters) | m = len(document)
def generateDocument(characters, document):
counter = {}
for char in characters:
if char in counter:
counter[char] += 1
else:
counter[char] = 1
for char in document:
if not char in counter or counter[char] <= 0:
return False
counter[char] -= 1
return True
|
import pytest
from src.bot import *
bot = telegram.Bot(token=config.BOT_TOKEN)
updater = Updater(token=config.BOT_TOKEN)
def test_main():
result = main()
assert result == 0
def test_main_menu():
global bot
res = main_menu()
assert res == 0
def start(bot, update):
bot.send_message(chat_id=update.message.chat_id, text=main_menu_message())
update.message.reply_text('Choose the option:', reply_markup=main_menu_keyboard())
# Function to ask user its details to register,initiated on /register command
def login(bot, update):
query = update.callback_query
details = read_dao.get_user(query.message.from_user.id)
if details is None:
# bot.send_chat_action(chat_id=query.effective_user.id, action=telegram.ChatAction.TYPING)
bot.send_message(chat_id=query.message.chat_id, text='Enter your details in the following format : '
'username, password, name')
else:
bot.send_message(chat_id=update.message.chat_id, text='We already have your credentials, let\'s move on!')
logging.debug("Login is " + config.USERNAME)
logging.debug("Password is " + config.PASSWORD)
config.USERNAME = details[0]
config.PASSWORD = details[1]
def saveuserDetails(bot, update):
userid = update.message.from_user.id
username, password, name = update.message.text.split(',')
write_dao.save_user([username, password, name, userid])
def one_by_one(bot, update):
query = update.callback_query
next_apt(bot, update)
def create_rental_table(apts):
text = """| Address | Rent | Size | Number of rooms | Link |\n| --- | --- | --- | --- | --- |\n"""
for apt in apts:
text = text + "| " + str(apt.address[0]) + " | " + str(apt.rent[0]) + " sek" + " | " + str(
apt.msize[0]) + " | " + str(apt.rooms[0]) + " | " + str(apt.link[0]) + " |\n"
print(text)
return text + ""
def show_all(bot, update):
query = update.callback_query
apts = read_dao.get_all_objects()
bot.edit_message_text(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text=create_rental_table(apts),
reply_markup=main_menu_keyboard(), parse_mode=telegram.ParseMode.MARKDOWN)
def apply_filters(bot, update):
query = update.callback_query
bot.edit_message_text(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text=filters_menu_message(),
reply_markup=filters_menu_keyboard())
def filters_menu_message():
return "Select filters you want to apply"
def main_menu_message():
return "Hey! This is Uppsala Housing bot.\n\n We will help you find your new home"
def build_menu(buttons,
n_cols,
header_buttons=None,
footer_buttons=None):
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, header_buttons)
if footer_buttons:
menu.append(footer_buttons)
return menu
def next_apt(bot, update):
query = update.callback_query
global apts, current
print(current)
apt = apts[current]
current -= current
print(current)
bot.send_photo(chat_id=query.message.chat_id, photo=str(apt.imagelink[0]))
message = "Adress is " + str(apt.address[0]) + "\n" + "Number of rooms is " + str(
apt.rooms[0]) + "\n" + "Size is is " + str(apt.msize[0]) + "\n" + "Rent is " + str(
apt.rent[0]) + "\n" + "View details here " + str(apt.link[0])
bot.send_message(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text=message,
reply_markup=one_by_one_keyboard())
def previous_apt(bot, update):
query = update.callback_query
global apts, current
print(current)
apt = apts[current]
current += current
print(current)
bot.send_photo(chat_id=query.message.chat_id, photo=str(apt.imagelink[0]))
message = "Adress is " + str(apt.address[0]) + "\n" + "Number of rooms is " + str(
apt.rooms[0]) + "\n" + "Size is is " + str(apt.msize[0]) + "\n" + "Rent is " + str(
apt.rent[0]) + "\n" + "View details here " + str(apt.link[0])
bot.send_message(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text=message,
reply_markup=one_by_one_keyboard())
def rent_filter(bot, update):
bot.send_message("Type in min and max rent price separated by comma")
config.MIN_RENT, config.MAX_RENT = update.message.text.split(',')
def room_filter(bot, update):
bot.send_message("Type in number of rooms")
config.ROOMS = update.message.text
def main_menu_keyboard():
keyboard = [[InlineKeyboardButton('Set login details', callback_data='login')],
[InlineKeyboardButton('Apply filters', callback_data='choose filters')],
[InlineKeyboardButton('Show one by one', callback_data='one_by_one')],
[InlineKeyboardButton('Show all', callback_data='show_all')]]
return InlineKeyboardMarkup(keyboard)
def filters_menu_keyboard():
keyboard = [[InlineKeyboardButton('Rent', callback_data='rent_filter')],
[InlineKeyboardButton('Number of Rooms', callback_data='room_filter')]]
return InlineKeyboardMarkup(keyboard)
def one_by_one_keyboard():
keyboard = [[InlineKeyboardButton('Previous', callback_data='previous_apt')],
[InlineKeyboardButton('Next', callback_data='next_apt')]]
return InlineKeyboardMarkup(keyboard)
def main():
updater.dispatcher.add_handler(CommandHandler('start', start))
updater.dispatcher.add_handler(CallbackQueryHandler(login, pattern='login'))
updater.dispatcher.add_handler(CallbackQueryHandler(show_all, pattern='show_all'))
updater.dispatcher.add_handler(CallbackQueryHandler(one_by_one, pattern='one_by_one'))
updater.dispatcher.add_handler(CallbackQueryHandler(apply_filters, pattern='apply_filters'))
updater.dispatcher.add_handler(CallbackQueryHandler(rent_filter, pattern='rent_filter'))
updater.dispatcher.add_handler(CallbackQueryHandler(room_filter, pattern='room_filter'))
updater.dispatcher.add_handler(CallbackQueryHandler(next_apt, pattern='next_apt'))
updater.dispatcher.add_handler(CallbackQueryHandler(previous_apt, pattern='previous_apt'))
updater.dispatcher.add_handler(MessageHandler(Filters.text, saveuserDetails), group=0)
updater.start_polling()
main()
|
import pandas as pd
import numpy as np
import re, nltk
from nltk.stem.porter import PorterStemmer
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.neighbors import *
from sklearn.metrics import classification_report
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
train_data_df = pd.read_csv('train.csv',delimiter=',',header = None)
test_data_df = pd.read_csv('test.csv',header = None ,delimiter=",")
train_data_df.columns = ["TripType","VisitNumber","Weekday","Upc","ScanCount","DepartmentDescription","FinelineNumber"]
test_data_df.columns = ["VisitNumber","Weekday","Upc","ScanCount","DepartmentDescription","FinelineNumber"]
train_data_df = train_data_df.fillna(0)
test_data_df = test_data_df.fillna(0)
train_data_df1 = train_data_df.drop('TripType', 1)
train_data_df1 = train_data_df1.drop('Weekday', 1)
train_data_df1 = train_data_df1.drop('DepartmentDescription', 1)
train_data_df1 = np.array(train_data_df1)
test_data_df1 = test_data_df.drop('Weekday',1)
test_data_df1 = test_data_df1.drop('DepartmentDescription',1)
"""
X_train, X_test, y_train, y_test = train_test_split(train_data_df1, train_data_df.TripType, random_state=2)
#my_model = LinearSVC(penalty = 'l2',dual = True,C=0.7,loss='hinge')
my_model = LogisticRegression(penalty = 'l1')
my_model = my_model.fit(X=X_train, y=y_train)
test_pred = my_model.predict(X_test)
print classification_report(test_pred,y_test)
"""
my_model = LogisticRegression(penalty = 'l1')
my_model = my_model.fit(X=train_data_df1, y=train_data_df.TripType)
test_pred = my_model.predict(test_data_df1)
spl = []
for i in range(len(test_pred)) :
spl.append(i)
rows = (((test_data_df1.VisitNumber).tolist())[-1]) + 1
results = np.zeros((rows,45))
for Vnum, Trtype in zip(test_data_df.VisitNumber[spl], test_pred[spl]) :
#print Vnum,"-->",Trtype,"\n"
results[Vnum][Trtype] += float(1)
fw = open("TotalResult.txt","w")
for i in range(rows) :
res = str(i)
for j in range(45) :
res += "," + str(int(results[i][j]))
fw.write(res+"\n")
|
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import sys
from selenium import webdriver
driver_profile = webdriver.FirefoxProfile()
driver_profile.set_preference('browser.privatebrowsing.autostart', True)
driver = webdriver.Firefox(firefox_profile=driver_profile)
driver.get(sys.argv[1])
class MyHandler(FileSystemEventHandler):
def dispatch(self, event):
driver.refresh()
print event.src_path
if __name__ == "__main__":
path = sys.argv[2]
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
import argparse
import configparser
import json
import os
import requests
import urllib
from jira import JIRA
jira_user = ''
jira_password = ''
ini_file = ''
parser = argparse.ArgumentParser()
parser.add_argument('--user_name', required=True, help='Enter JIRA user name')
parser.add_argument('--password', required=True, help='Enter JIRA password')
parser.add_argument('--ini_file', required=True, help='Enter script configuration/ini file name with extension. Provide full path if file is not in the same directory as this script. Example: config.ini or /Users/user/Desktop/config.ini')
args = parser.parse_args()
jira_user = args.user_name
jira_password = args.password
ini_file = args.ini_file
config = configparser.ConfigParser()
config.read(ini_file)
pivotal_url = config['PIVOTAL']['url']
pivotal_project = config['PIVOTAL']['project']
pivotal_xtracker_token = config['PIVOTAL']['xtracker_token']
jira_url = config['JIRA']['url']
jira_project = config['JIRA']['project']
jira_bearer_token= config['JIRA']['bearer_token']
jira_pivotalid_field = config['JIRA']['pivotalid_field']
jira_pivotalid_hiden_field = config['JIRA']['pivotalid_hiden_field']
log_dir = config['DEFAULT']['log_dir']
global_dir = config['DEFAULT']['global_dir']
pivotal_api_url = pivotal_url + '/services/v5/projects/' + pivotal_project + '/stories/'
jira_api_url = jira_url + '/rest/api/latest'
pivotal_attachments = (('fields', 'comments(attachments)'),)
application_json = {'Content-Type': 'application/json',}
jira = JIRA(jira_url, basic_auth=(jira_user, jira_password))
project_jira_issues = jira.search_issues('project='+jira_project,
startAt=0,
maxResults=0,
json_result=True)
project_jira_issues_count = project_jira_issues["total"]
starting_point = 0
all_cases_with_no_piv_id = []
log_file = open(log_dir + jira_project + '_cases_with_no_pivotal_ids.txt', 'w')
while project_jira_issues_count > 0:
#----------------------------
# Pagination control settings
#----------------------------
pagination = (
('startAt', starting_point),
('maxResults', 1000),
)
project_jira_issues_count = project_jira_issues_count - 1000
starting_point = starting_point + 1000
#----------------------------------------------------------------
# Get all issues in project include hiden Pivotal ID custom field
# (JSON formated chunks based on pagination max restriction)
#----------------------------------------------------------------
all_issues_obj = requests.get(jira_api_url + '/search?jql=project=%22' + jira_project + '%22&fields=attachment&fields=' + jira_pivotalid_hiden_field, headers=application_json, params=pagination, auth=(jira_user, jira_password))
all_issues = json.loads(all_issues_obj.content)
issue_index = len(all_issues["issues"]) - 1
a = -1
pivotal_jira_issue_ids = []
#------------------------------------------------------
# Create list of Pivotal to Jira ID collection mappings
#------------------------------------------------------
while issue_index > a:
issue_map = {}
if all_issues["issues"][issue_index]["fields"]["attachment"]:
attachments_dir = global_dir + all_issues["issues"][issue_index]["fields"][jira_pivotalid_hiden_field]
issue_map['piv_id'] = all_issues["issues"][issue_index]["fields"][jira_pivotalid_hiden_field]
issue_map['jira_id'] = all_issues["issues"][issue_index]["key"]
pivotal_jira_issue_ids.append(issue_map)
if not os.path.exists(attachments_dir):
os.makedirs(attachments_dir)
length =length - 1
cases_with_no_piv_id = []
#-----------------------------------------------
# Upload attachments from Pivotal to Jira issues
# (delete attachment links created by importer)
#-----------------------------------------------
for issue_id in pivotal_jira_issue_ids:
attachments_dir = global_dir + issue_id['piv_id']
pivotal_issue_info = json.loads(requests.get(pivotal_api_url + issue_id['piv_id'], headers=pivotal_xtracker_token, params=pivotal_attachments).content)
jira_issue_info = requests.get(jira_api_url + '/issue/' + issue_id['jira_id'], headers=application_json, auth=(jira_user, jira_password))
jira_issue_json = json.loads(jira_issue_info.content)
jira_attachments_info = jira_issue_json["fields"]["attachment"]
for pivotal_comment in pivotal_issue_info["comments"]:
if pivotal_comment["attachments"]:
count = 0
for pivotal_attachment in pivotal_comment["attachments"]:
for attempt in range(3):
try:
download_response = requests.get(pivotal_url + pivotal_attachment["download_url"], headers=pivotal_xtracker_token, allow_redirects=True, timeout=10)
except:
download_response = requests.get(pivotal_url + pivotal_attachment["download_url"], headers=pivotal_xtracker_token, allow_redirects=True, timeout=10)
else:
break
attachment_url = r.url
with open(attachments_dir+'/'+pivotal_attachment["filename"], 'wb') as attachment_file:
attachment_file.write(download_response.content)
#-------------------------------------------
# Delete incorrect attachment link if exists
#-------------------------------------------
for jira_attachment in jira_attachments_info:
if jira_attachment["filename"] == pivotal_attachment["filename"]:
to_be_deleted = jira_attachment["self"]
print("DELETING")
print(to_be_deleted)
delete_old_attachment = requests.delete(to_be_deleted, headers=jira_bearer_token)
count = count+1
print(count)
print("ADDING")
print(d["filename"])
jira.add_attachment(issue=jira_issue_json["key"], attachment=attachments_dir+'/'+pivotal_attachment["filename"])
#---------------------------------------------------------
# Write all JIRA issue IDs whith no Pivotal ID to log file
#---------------------------------------------------------
for i in all_cases_with_no_piv_id:
log_file.write("%s%s\n" % (i))
log_file.close
|
from optparse import OptionParser
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.chrome.service as service
import inspect
import time
from postman_tests import PostmanTests
class PostmanTestsHistory(PostmanTests):
def test_1_save_request_to_history(self):
self.set_url_field(self.browser, "http://localhost:5000/get?val=1")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
code_data_value = self.get_codemirror_value(self.browser)
if code_data_value.find("get") > 0:
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request")
value = self.browser.execute_script("return arguments[0].innerHTML", first_history_item)
if value.find("http://localhost:5000/get?val=1") > 0:
return True
else:
return False
else:
return False
def test_2_load_request_from_history(self):
self.set_url_field(self.browser, "")
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request")
first_history_item.click()
try:
w = WebDriverWait(self.browser, 10)
w.until(lambda browser: self.browser.find_element_by_id("url").get_attribute("value") == "http://localhost:5000/get?val=1")
return True
except:
return False
def test_3_delete_request_from_history(self):
first_history_item = self.browser.find_element_by_css_selector("#history-items li:nth-of-type(1) .request-actions .request-actions-delete")
first_history_item.click()
history_items = self.browser.find_elements_by_css_selector("#history-items li")
if len(history_items) == 0:
return True
else:
return False
def test_4_clear_history(self):
self.set_url_field(self.browser, "http://localhost:5000/html?val=1")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
# Waits for the response
self.get_codemirror_value(self.browser)
self.set_url_field(self.browser, "http://localhost:5000/html?val=2")
method_select = self.browser.find_element_by_id("request-method-selector")
Select(method_select).select_by_value("GET")
send_button = self.browser.find_element_by_id("submit-request")
send_button.click()
# Waits for the response
self.get_codemirror_value(self.browser)
clear_all_button = self.browser.find_element_by_css_selector("#history-options .history-actions-delete")
clear_all_button.click()
history_items = self.browser.find_elements_by_css_selector("#history-items li")
if len(history_items) == 0:
return True
else:
return False
PostmanTestsHistory().run()
|
#This script generates hindi/telugu subtitle file from parallel translated files using eng srt file
from argparse import ArgumentParser
import re
import os
import sys
import pysrt
import nltk
import logger as log
parser = ArgumentParser(description='This script will align Subtitle translation files\n\r'+
"How to Run?\n" +
"python3 " + sys.argv[0] + " -i=input.srt" + " -s=srctext.txt -t=target.txt"
)
parser.add_argument("-i", "--input", dest="inputfile",
help="provide .srt file name",required=True)
parser.add_argument("-s", "--source", dest="sourcefile",
help="provide sentence aligned source file",required=True)
parser.add_argument("-t", "--target", dest="targetfile",
help="provide sentence aligned target file",required=True)
parser.add_argument("-l", "--lang", dest="lang",
help="provide 3 letter language code", required=False)
parser.add_argument("-m", "--method heuristic", dest="h",
help="Use heuristic approach -h=y",required=False)
log.logging.info("Parsing command line arguments")
args = parser.parse_args()
inputfile = args.inputfile
sourcefile = args.sourcefile
targetfile = args.targetfile
lang = args.lang
h = args.h
log.logging.info("Received following arguments: inputfile=%s, source file=%s, target file=%s, lang=%s" %(inputfile, sourcefile, targetfile, lang))
if(h is None):
h = 'y'
else:
h = h.lower()
new_line = []
new_line2 = []
timeline = []
timeline_hash = {}
src_tgt_hash = {}
final_out = []
outfp = open("tmp_hash.txt","w")
outfp1 = open("not_substituted.txt", "w")
def srctgthash(s,t):
with open(s) as fp1:
slines = fp1.read().split("\n")
with open(t) as fp2:
tlines = fp2.read().split("\n")
#print(len(slines),len(tlines))
l1 = len(slines)
l2 = len(tlines)
if(l1 != l2):
log.logging.info("Exiting because source file and target file line numbers mismatched.")
print("Source file and target file line numbers mismatch!")
exit()
for s, t in zip(slines, tlines):
s = re.sub(r'(\d+)\.(\d+)', r'\1#DOT\2', s)
s = re.sub(r'!', r'#FACT', s)
#print(s,t)
s = re.sub(r'[\-,\.\'\"\-]', " ", s)
s = s.strip()
t = t.strip()
s = re.sub(r'\s+',' ', s)
t = re.sub(r' ?\| ?', '|', t, flags = re.MULTILINE)
t = re.sub(r'\(\(.+?\)\)', lambda x:x.group().replace(" ","####"), t)
s = re.sub(r'^ ', '', s)
s = re.sub(r' $', '', s)
src_tgt_hash[s.lower()] = t
def extractTextFromSRT(i):
srtfilename = i
subs = pysrt.open(srtfilename)
ts_start = []
ts_end = []
remaining_text = ''
front_text = ''
#print(subs)
count = 1
for sub in subs:
timeline_start = str(sub.start)
timeline_end = str(sub.end)
cur_text = sub.text
cur_text = re.sub(r'\n', ' ' ,cur_text)
cur_text = re.sub(r'(\d+)\.(\d+)', r'\1#DOT\2', cur_text)
cur_text = re.sub(r'!', r'#FACT', cur_text)
cur_text = re.sub(r'([\-,\.\'\"\-])', r"\1 ", cur_text)
sub_placeholder2 = "##" + str(count) + ""
sub_placeholder = str(sub.start) + " --> " + str(sub.end)
timeline_hash[sub_placeholder2] = str(sub.start) + " --> " + str(sub.end)
#new_line.append("[" + str(sub.start) + " --> " + str(sub.end) + "]")
new_line.append(sub_placeholder)
new_line.append(cur_text)
new_line2.append(sub_placeholder2)
new_line2.append(cur_text)
#new_line[-1] = new_line[-1].strip() + cur_text
count = count + 1
def alignSRT():
count = 1
#print(count)
for line in new_line:
if(re.search(r'-->',line)):
count = count + 1
else:
sentences = nltk.tokenize.sent_tokenize(line)
for sentence in sentences:
st = sentence.lower()
st = re.sub(r'[\-,\.\'\"\-]', "", st)
st = st.strip()
st = re.sub(r'\s+',' ', st)
st = re.sub(r'^ ', '', st)
st = re.sub(r' $', '', st)
if(st in src_tgt_hash):
t = src_tgt_hash[st]
t = re.sub(r'####', ' ', t)
print(t, end='')
else:
print(sentence, end='')
print("\n")
def alignSRT2():
count = 1
#print(new_line)
eng_sub = ' '.join(new_line2)
eng_sub = re.sub(r'\[?MUSIC\]?', 'MUSIC.', eng_sub, flags=re.IGNORECASE)
#eng_sub = re.sub(r'([ \-\+])([a-z])\.', r'\1\2.\n', eng_sub)
#print(eng_sub)
sentences = nltk.tokenize.sent_tokenize(eng_sub)
log.logging.info("After tokenization of english sentences, sent=%s" %('\n'.join(sentences)))
#sentences = eng_sub.split("]")
count = 1
if(lang == "tel"):
words = ['the', 'in', 'a', 'that', 'to', 'as', 'into', 'at']
else:
words = []
#print(sentences)
for s in sentences:
s = s.lower()
log.logging.info("Current sentence after lower case=%s" %(s))
#print(s)
if(1):
s_original = s
s = re.sub(r'(\d+)\.(\d+)', r'\1#DOT\2', s)
s = re.sub(r'!', r'#FACT', s)
indices = re.finditer(r'##\d+', s)
s = re.sub(r'##\d+', '', s)
s_tmp = s
s = re.sub(r'[\-,\.\'\"\-]', " ", s)
s = s.strip()
s = re.sub(r'\s+',' ', s)
s = re.sub(r'^ ', '', s)
s = re.sub(r' $', '', s)
#print(s)
if(s == "music."):
s = "music"
#print("hello"+s)
if(s in src_tgt_hash):
#print("Im ahre")
s_trans = src_tgt_hash[s]
else:
write_out = re.sub(r' +', ' ', s_tmp)
outfp1.write(write_out + "\n")
outfp1.write(s_original + "\n")
s_trans = s_tmp
log.logging.info("After finding in hash target text=%s" %(s_trans))
#print(s_original)
for w in words:
s_original = re.sub(r' '+w+' ', ' ', s_original)
#print(s_original)
space_split = s_original.split(" ")
space_split_trans = s_trans.split(" ")
#print("Iam ", s_original)
if(re.search(r'##\d+', s_original)):
for i in indices:
#print(i, s_original)
if(i is None):
final_trans = ' '.join(space_split_trans)
break
#print(space_split_trans, "ii")
insert_ph = i.group()
char_index = i.start()
#print(insert_ph, char_index)
#s_trans = s_trans[:char_index] + insert_ph + s_trans[char_index:]
word_index = space_split.index(insert_ph)
insert_ph = timeline_hash[insert_ph]
target_index = word_index-1
if(target_index < 0):
target_index = 0
space_split_trans.insert(target_index, insert_ph)
#final_trans = '\n' + str(count) + '\n' + ' '.join(space_split_trans)
final_trans = ' '.join(space_split_trans)
count = count + 1
final_trans = re.sub(r' +', ' ', final_trans)
#print("1",final_trans)
#print(s, i.start(), i.group())
#final_trans = re.sub(r'(\n)+', '\n', final_trans)
final_out.append(final_trans)
else:
#print("")
#print(s_trans +"\n")
final_out.append(s_trans)
else:
print(s+"\n")
def printhash():
for k in src_tgt_hash:
outfp.write(k + "\t" + src_tgt_hash[k] + "\n")
log.logging.info("Going to making hash from source file and target file")
srctgthash(sourcefile, targetfile)
log.logging.info("Going to extract text from srt file")
extractTextFromSRT(inputfile)
log.logging.info("After text extraction from srt, text=%s" %("\n".join(new_line2)))
if(h == 'y'):
log.logging.info("Going into align function")
alignSRT2()
else:
alignSRT()
#print(final_out)
count = 1
for p in final_out:
p = re.sub(r'####', ' ', p)
#p = re.sub(r'[^>] (\d)', r'\n\n\1', p)
#p = re.sub(r'--> (\d\d:\d\d:\d\d,\d\d\d)', r'--> \1\n', p)
mall = re.split(r'(\d\d:\d\d:\d\d,\d\d\d --> \d\d:\d\d:\d\d,\d\d\d)', p)
for m in mall:
m = re.sub(r' +', ' ', m)
m = re.sub(r'^ ', '', m)
m = re.sub(r' $', '', m)
if(m):
if(re.search(r'\d\d:\d\d:\d\d,\d\d\d --> \d\d:\d\d:\d\d,\d\d\d', m)):
if(count == 1):
print(count)
else:
print("\n", count, sep='')
count = count + 1
print(m)
#print(p + "\n")
printhash()
#print(timeline_hash)
#exit(0)
outfp.close()
outfp1.close() |
def pay(hours, baserate):
if (hours<=40):
pay = hours * baserate
else:
pay = (40 * baserate) + ((hours - 40) * (baserate * 1.5))
return pay
def main():
hours, baserate = eval(input("Please input hours and hourly rate: "))
print ("Total pay: " + str(pay(hours, baserate)))
main() |
# Generated by Django 3.1.7 on 2021-03-03 19:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0019_auto_20210301_2027'),
]
operations = [
migrations.AddField(
model_name='messages',
name='receiver_name',
field=models.TextField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='messages',
name='sender_name',
field=models.TextField(blank=True, max_length=100, null=True),
),
]
|
# file: image.py
# author: Muhammad Mushfiqur Rahman <mushfiq.rahman@tum.de>
# date: 05-21-2019
# This is a image model file.
import sys, os
import uuid
from database.dbutility import time_iso8601
from database.models.project import Project
class Image(object):
def __init__(self):
#All object parameters have the same name as on the database
#self.project = Project()
self.Image_Id = uuid.uuid4().hex
self.Ext_Image = ""
self.Dir_Image = ""
self.Web_Url = ""
self.Project_Id = ""
self.Labeled = 0
self.Critical = 0
self.Labeling_Running = False
self.Quality_Labeling = 0
self.Is_Deleted = False
self.Date_Image = time_iso8601()
self.Orig_Image_Name = ""
|
import pandas as pd
import os
ratings_small_input_file = os.path.join(os.path.dirname(__file__), "../../", "input", "ratings_small.csv")
ratings_input_file = os.path.join(os.path.dirname(__file__), "../../", "input", "ratings.csv")
movies_input_file = os.path.join(os.path.dirname(__file__), "../../", "input", "movies_metadata.csv")
def get_ratings_small():
ratings = pd.read_csv(ratings_small_input_file)
return ratings
def get_ratings(size=-1):
ratings = pd.read_csv(ratings_input_file)
if size > 0:
ratings = ratings[:size]
return ratings
def _drop_odd_ids(movies):
# Delete odd ids (e.g.: dates)
odd_ids = [id for id in movies['id'] if len(id) > 6]
movies = movies.drop(movies.loc[movies['id'].isin(odd_ids)].index, axis=0)
return movies
def get_movies_ids():
movies = pd.read_csv(movies_input_file)
movies = _drop_odd_ids(movies)
movies_ids = movies['id'].apply(int)
return movies_ids
def get_movies_from_ids(movies_ids):
movies = pd.read_csv(movies_input_file)
movies = _drop_odd_ids(movies)
movies['id'] = movies['id'].apply(int)
return movies.loc[movies['id'].isin(movies_ids)]
|
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
plt.plot([1,2,3,2,1,2,3,4,5,6,5,4,3,2,1])
plt.show()
|
#!/usr/bin/python
import argparse
import sys
import os
import re
def getReadFromFasta(fin):
output = ""
for line in fin:
if line.strip("\n").strip() != "":
if line[0] != ">":
output += line
else:
if output != "":
yield output
output = line
yield output
## =================================================================
## Given a fasta file, find the sequence for the specific region of
## a sequence with given ID.
## Return -1 if the sequence does not exist or its sequence does not
## include the specified region.
## =================================================================
def get_contig(fastaFile, seqID, start, end, limit):
''' Given a fasta file, find the sequence for the specific region of
a sequence with given ID.
Return -1 if the sequence does not exist or its sequence does not
include the specified region.
Input: fastaFile - file that includes all the sequences
seqID - ID of the sequence of interest
start - 1-based starting position
end - 1-based ending position
Output: sequence string if found, -1 if not found
'''
if not os.path.exists(fastaFile):
sys.stderr.write("fastaFile {} does not exist!\n".format(fastaFile))
return -1
if end != -1 and start > end:
sys.stderr.write("Please check input for starting and ending positiosn.\n")
return -1
fasta = open(fastaFile,'r')
line = fasta.readline()
count = 0
nextline = ''
while line != "" and (limit==-1 or count < limit):
if line[0] == '>':
thisID = line.strip().split()[0][1:] # sequence ID until the first space
if seqID in thisID :
#print thisID
count += 1
seq = ""
sys.stdout.write('>{}\n'.format(thisID))
nextline = fasta.readline().strip("\n")
while nextline!='' and nextline[0] != '>' and (end==-1 or len(seq)<=end):
seq += nextline
nextline = fasta.readline().strip("\n")
if start <= end and end <= len(seq):
sys.stdout.write('{}\n'.format(seq[(start - 1):end]))
elif end == -1:
sys.stdout.write('{}\n'.format(seq[(start - 1):]))
else:
sys.stderr.write("end position passed end of the sequence!\n")
sys.stdout.write('{}\n'.format(seq[(start - 1):]))
return -1
if nextline != '' and nextline[0] == '>':
line = nextline # look into header line that was read just now
nextline = ''
else:
line = fasta.readline()
if seq == "":
sys.stderr.write("sequence {} was not found in file {}!\n".format(seqID, fastaFile))
return -1
fasta.close()
def get_IDs(id_file):
seqIDs = []
with open(id_file, 'r') as ids:
for line in ids:
seqIDs.append(line.strip("\n").split()[0])
seqIDs = map(int,list(set(seqIDs)))
seqIDs.sort()
seqIDs = map(str, seqIDs)
sys.stderr.write("Number of IDs to extract: {}\n".format(len(seqIDs)) )
return seqIDs
def get_contigs(fastaFile, seqIDs):
index = 0
with open(fastaFile, 'r') as fin:
for readLines in getReadFromFasta(fin):
seq_id = readLines.split()[0][1:] # sequence name
if seq_id == seqIDs[index]:
sys.stdout.write(readLines)
index += 1
if index == len(seqIDs):
break
## =================================================================
## argument parser
## =================================================================
parser = argparse.ArgumentParser(description="fetch sequence from fasta file given an ID and the starting, ending positions",
prog = 'get_contig', #program name
prefix_chars='-', # prefix for options
fromfile_prefix_chars='@', # if options are read from file, '@args.txt'
conflict_handler='resolve', # for handling conflict options
add_help=True, # include help in the options
formatter_class=argparse.ArgumentDefaultsHelpFormatter # print default values for options in help message
)
## input files and directories
parser.add_argument("-i","--in",help="input fasta file",dest='fastaFile',required=True)
parser.add_argument("-f","--id_file",help="file with reads/contigs ids to extract",dest='id_file')
parser.add_argument("-n","--name",help="ID of the sequence to find",dest='seqID')
parser.add_argument("-s","--start",help="1-based starting position of the sequence",dest='start',default=1, type=int)
parser.add_argument("-e","--end",help="1-based ending position of the sequence",dest='end',default=-1, type=int)
parser.add_argument("-l","--limit",help="maximum number of sequences to return",dest='limit',default=-1, type=int)
## output directory
#parser.add_argument("-o","--out",help="output directory",dest='outputDir',required=True)
## =================================================================
## main function
## =================================================================
def main(argv=None):
if argv is None:
args = parser.parse_args()
if args.seqID is None and args.id_file is None:
sys.exit("At least a sequence ID or a file of IDs is needed.\n")
if args.seqID is not None:
get_contig(args.fastaFile,args.seqID,args.start,args.end, args.limit)
elif args.id_file is not None:
seqIDs = get_IDs(args.id_file)
get_contigs(args.fastaFile, seqIDs)
##==============================================================
## call from command line (instead of interactively)
##==============================================================
if __name__ == '__main__':
sys.exit(main())
|
import sys
assert sys.version_info[0] >= 3, "Python 3 required."
from collections import OrderedDict
def label_to_nl(label_file, nl_file, range_min, range_max):
labels = []
try:
of = open(label_file, "rt")
labels = of.readlines()
except IOError:
print("skipped: "+label_file)
return
labs = {}
sout = ""
for line in labels:
words = line.split()
if (words[0] == "al"):
adr = int(words[1], 16)
sym = words[2]
sym = sym.lstrip('.')
if (sym[0] == '_' and sym[1] == '_'):
continue # skip compiler internals
if (adr >= range_min and adr <= range_max):
if (adr in labs):
# multiple symbol
text = labs[adr]
textsplit = text.split()
if (sym not in textsplit):
text = text + " " + sym
labs[adr] = text
else:
labs[adr] = sym
for (adr, sym) in labs.items():
sout += ("$%04X#%s#\n" % (adr, sym))
open(nl_file, "wt").write(sout)
print("debug symbols: " + nl_file)
if __name__ == "__main__":
label_to_nl("temp\\palette_labels.txt", "temp\\palette.nes.ram.nl", 0x0000, 0x7FF)
label_to_nl("temp\\palette_labels.txt", "temp\\palette.nes.0.nl", 0x8000, 0xBFFF)
label_to_nl("temp\\palette_labels.txt", "temp\\palette.nes.1.nl", 0xC000, 0xFFFF)
|
import pandas as pd
import mido
def midi2nmat(path):
"""Returns a Note Matrix from a given MIDI files"""
assert (type(path) is str), "Filepath must be a string: %r" % path
mid = mido.MidiFile(path)
tpb = mid.ticks_per_beat
output=pd.DataFrame(columns=["Type", "Voice", "Pitch", "Velocity", "Ticks"])
for track in mid.tracks:
midiframe = pd.DataFrame(columns=["Type", "Voice", "Pitch", "Velocity", "Ticks"])
for msg in track:
if msg.type == "note_on":
df = pd.DataFrame({"Type":msg.type, "Voice":msg.channel, "Pitch":msg.note, "Velocity":msg.velocity, "Ticks":msg.time},\
index=[0])
else:
df = pd.DataFrame({"Type":msg.type, "Ticks":msg.time},\
index=[0])
midiframe = midiframe.append(df, ignore_index=True)
midiframe["Time"] = pd.Series(midiframe["Ticks"].cumsum() / tpb, index=midiframe.index)
note_on = midiframe.loc[(midiframe["Velocity"] != 0) & (midiframe["Type"] == "note_on")]
note_off = midiframe.loc[(midiframe["Velocity"] == 0) & (midiframe["Type"] == "note_on")]
newdex = range(0,len(note_off))
note_off = note_off.reset_index(drop=True)
note_on = note_on.reset_index(drop=True)
note_on["Duration"] = pd.Series(note_off["Time"] - note_on["Time"], index = note_on.index)
output=output.append(note_on[["Pitch", "Time", "Velocity", "Voice", "Duration"]])
output.reset_index(drop=True, inplace=True)
return output
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
# path('upload', NewAudio.as_view())),
path('uploads', views.index, name='index'),
path('subscriptions', views.index, name='index'),
# path('<int:id>', AudioListen.as_view()),
# path('comment', CommentView.as_view()),
# path('get_audio/<file_name>', AudioFileView.as_view()),
] |
class Product:
def __init__(self):
self.name='Iphone'
self.description='High Price'
self.price = 1000
pl=Product()
print(pl.name)
print(pl.description)
print(pl.price)
|
#!/usr/bin/env python3
nums = map(int, input().split())
def metasum(nums):
cs, ms = next(nums), next(nums)
n = sum(metasum(nums) for i in range(cs))
n += sum(next(nums) for i in range(ms))
return n
print(metasum(nums))
|
import numpy as np
from utils import im2col
class ConvolutionalNet():
"""
Description:
The definition of our naive CNN, it is made of
One convolution layer
One ReLU layer
One pooling layer
One fully connected layer
One softmax layer / output layer
It is loosely based on the ninth chapter of the excellent deep learning book
(http://www.deeplearningbook.org/contents/convnets.html)
Good read on the subject:
Deep Learning Book (http://www.deeplearningbook.org/)
Understanding the difficulty of training deep feedforward neural neural network (http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf)
Delving Deep into Rectifiers (https://arxiv.org/pdf/1502.01852.pdf)
"""
def __init__(self, kernel_count, kernel_size, input_size, output_size):
"""
Description: The ConvNet contructor.
Parameters:
kernel_count -> The number of kernel to use
kernel_size -> The size of the kernel to use (always a square)
output_size -> The number of classes
input_size -> A tuple that defines the size of the input
"""
self.kernel_count = kernel_count
self.kernel_size = kernel_size
self.input_size = input_size
self.output_size = output_size
# Random initialization of our kernels based on a gaussian distribution with a std of 1.0
# See https://arxiv.org/pdf/1502.01852.pdf Page 3
self.kernels = [
np.random.normal(size=(kernel_size, kernel_size)) for x in range(kernel_count)
]
def __convolution(self, inputs, stride=1, padding=0):
"""
Description: Convolution layer
"""
new_size = (np.shape(inputs)[1] - self.kernel_size + 2 * padding) / stride + 1
tile_col = im2col(inputs, self.kernel_size, stride, padding)
kernel_col = np.reshape(self.kernel_count, -1)
result = np.dot(tile_col, kernel_col)
return np.reshape(self.kernel_count, new_size, new_size)
def __max_pool(self, inputs, size, stride, padding):
"""
Description: Max pool layer
Parameters:
inputs -> The input of size [batch_size] x [filter] x [shape_x] x [shape_y]
size -> The size of the tiling
stride -> The applied translation at each step
padding -> The padding (padding with 0 so the last column isn't left out)
"""
inp_sp = np.shape(inputs)
# We reshape it so every filter is considered an image.
tile_col = im2col(reshaped, size, stride=stride, padding=padding)
# We take the max of each column
max_ids = np.argmax(tile_col, axis=0)
# We get the resulting 1 x 10240 vector
result = tile_col[max_ids, range(max_ids.size)]
new_size = (inp_sp[2] - size + 2 * padding) / stride + 1
result = np.reshape(result, (new_size, new_size, inp_sp[0]))
# Make it from 16 x 16 x 10 to 10 x 16 x 16
return np.transpose(result, (2, 0, 1))
def __avg_pool(self, inputs, size, stride, padding):
"""
(Copy & paste of the max pool code with np.mean instead of np.argmax)
Description: Average pool layer
Parameters:
inputs -> The input of size [batch_size] x [filter] x [shape_x] x [shape_y]
size -> The size of the tiling
stride -> The applied translation at each step
padding -> The padding (padding with 0 so the last column isn't left out)
"""
inp_sp = np.shape(inputs)
tile_col = im2col(reshaped, size, stride=stride, padding=padding)
max_ids = np.mean(tile_col, axis=0)
result = tile_col[max_ids, range(max_ids.size)]
new_size = (inp_sp[2] - size + 2 * padding) / stride + 1
result = np.reshape(result, (new_size, new_size, inp_sp[0]))
return np.transpose(result, (2, 0, 1))
def __rectified_linear(self, inputs):
"""
Description: Rectified Linear Unit layer (ReLU)
"""
return np.maximum(inputs, 0, inputs)
def __fully_connected(self, inputs, weights):
"""
Description: Fully connected layer
Parameters:
unit_count -> The number of units in the layer
"""
return np.dot(inputs, np.reshape(weights, (np.shape(inputs), np.shape(self.unit_count))))
def __softmax(self, inputs):
"""
Description: Softmax function for the output layer
"""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def __forwardpropagation(self, inputs):
"""
Description: Gives a response based on input
"""
# My goal was to do something like this, but it's unreadable
# return _fully_connected(_max_pooling(_rectified_linear(_convolution(inputs, kernels))))
res_conv = self.__convolution(inputs)
res_relu = self.__rectified_linear(res_conv)
res_pool = self.__avg_pool(res_relu)
res_full = self.__fully_connected(res_pool, self.full_connected_weights)
return self.__softmax(res_full)
def __backpropagation(self, mean_squared_error):
"""
Description: Weight adjusting algorithm
"""
def train(self, data, labels, batch_size, iteration_count, alpha):
"""
Description: Train the ConvNet
Parameters:
data -> The data to be used for training
labels -> The labels for the data
batch_size -> The size of a batch used for one iteration
iteration_count -> The number of iterations for a full training
alpha -> The learning rate alpha
"""
# For the sake of simplicity we use Mean Squared Error
for x in range(iteration_count):
print('Iteration #{}'.format(x))
errors = np.zeros((batch_size, self.output_size))
for y in range(batch_size):
errors[y, :] = (self.__forwardpropagation(data[x * batch_size + y]) - labels[x * batch_size + y])**2
self.__backpropagation(np.mean(errors, axis=1))
def test(self, data, labels):
"""
Description: Test the ConvNet
Parameters:
data -> The data to be used for testing
labels -> The labels to be used for testing
"""
good = 0
for x in range(np.shape(data)[0]):
if np.argmax(feedforward(data[x, :])) == np.argmax(labels[x, :]):
good += 1
print('The network successfully identified {} / {} examples.'.format(good, np.shape(data)[0]))
|
"""
======================COPYRIGHT/LICENSE START==========================
EditPeakAliasing.py: Part of the CcpNmr Analysis program
Copyright (C) 2003-2010 Wayne Boucher and Tim Stevens (University of Cambridge)
=======================================================================
The CCPN license can be found in ../../../../license/CCPN.license.
======================COPYRIGHT/LICENSE END============================
for further information, please contact :
- CCPN website (http://www.ccpn.ac.uk/)
- email: ccpn@bioc.cam.ac.uk
- contact the authors: wb104@bioc.cam.ac.uk, tjs23@cam.ac.uk
=======================================================================
If you are using this software for academic purposes, we suggest
quoting the following references:
===========================REFERENCE START=============================
R. Fogh, J. Ionides, E. Ulrich, W. Boucher, W. Vranken, J.P. Linge, M.
Habeck, W. Rieping, T.N. Bhat, J. Westbrook, K. Henrick, G. Gilliland,
H. Berman, J. Thornton, M. Nilges, J. Markley and E. Laue (2002). The
CCPN project: An interim report on a data model for the NMR community
(Progress report). Nature Struct. Biol. 9, 416-418.
Wim F. Vranken, Wayne Boucher, Tim J. Stevens, Rasmus
H. Fogh, Anne Pajon, Miguel Llinas, Eldon L. Ulrich, John L. Markley, John
Ionides and Ernest D. Laue (2005). The CCPN Data Model for NMR Spectroscopy:
Development of a Software Pipeline. Proteins 59, 687 - 696.
===========================REFERENCE END===============================
"""
from memops.general import Implementation
from memops.gui.ButtonList import UtilityButtonList
from memops.gui.IntEntry import IntEntry
from memops.gui.LabelDivider import LabelDivider
from memops.gui.ScrolledMatrix import ScrolledMatrix
from ccpnmr.analysis.core.AssignmentBasic import aliasedPeakDimPosition
from ccpnmr.analysis.popups.BasePopup import BasePopup
from ccpnmr.analysis.core.PeakBasic import setPeakDimNumAliasing
from ccpnmr.analysis.core.UnitConverter import unit_converter
class EditPeakAliasingPopup(BasePopup):
"""
**Move Aliased Peaks to Their Underlying Resonance Position**
This popup window is used to move aliased 'ghost' peaks to their real
underlying resonance locations by adding or subtracting a whole number of
spectrum widths to the position in one or more peak dimensions. This is used
when a resonance, that causes a peak, lies outside the normal recorded bounds
of the spectrum but the peak nonetheless still appears within the spectrum, as
an aliased signal that re-appears as if wrapped back onto the opposite side of
the spectrum.
The minimum and maximum aliased frequency values for a spectrum dimension, as
edited in the "Referencing" table of the main Spectra_ popup, may be set
extend the contour display beyond the normal sweep width (ppm range) of the
spectrum and thus cover the real ppm position of peaks that have been
unaliased. In such instances the contours are extended by tiling; one or more
copies of the contours (not mirror image) are made and placed sequentially
next to the normal, fundamental region. If a peak is unaliased to a position
outside the displayed spectrum limits then the contour display will naturally
be extended to cover the new peak position; all peaks will be visible within
the spectrum by default. However, the user may at any time reset the minimum
and maximum aliased frequency for a spectrum display (see the Spectra_ popup);
deleting all values will reset bounds to the original sweep with, but any
values may be chosen, within reason.
A peak can be moved to the unaliased position of its underlying resonances by
editing the "Num. Aliasing" column of this popup; double-clicking and typing in
the appropriate number. When this number is changed for a peak dimension the peak
will be instantly moved to its new location. An aliasing value of zero means that
the peak lies within the sweep width of the spectrum. For a ppm scale having a
positive aliasing value will *reduce* the ppm value, placing the peak a number of
spectrum widths above or to the right of the spectrum bounds. Likewise a negative
aliasing value will place a peak at a higher ppm value; below or to the left.
Often aliasing values will be 1 or -1, where a peak has just fallen off the edge
of a spectrum. For example a glycine amide nitrogen really at 100 ppm may be just
outside the top of a 15N HSQC and be wrapped back into the bottom to appear as a
peak at around 135 ppm, which means that the aliasing value should be set to 1,
moving the peaks position up by a sweep with of 35 ppm, from 135 ppm to 100 ppm.
The sign of the aliasing number may seem to be backwards, but it is perhaps the
ppm scale that is 'backwards'. More complex aliasing is often seen in 3D 13C
spectra where the 13C axis can be folded (wrapped) to reduce the sweep width that
needs to be recorded, but still avoiding peak overlap because of the way shifts
correlate. For example a 13C HSQC NOESY may be recorded with a sweep with that
covers the CA, CB range 25-75 ppm but aliased methyl carbons below 25 ppm and
aromatic carbons between 110 ppm and 140 ppm will be present; the methyls will
have aliasing values of 1, and the aromatics -1 or -2.
It should be noted that picking peaks in the tiled copies of a contour display,
i.e. outside the sweep width, will automatically set the aliasing value for the
peak to reflect the displayed chemical shift value. Thus, the user does not need
to explicitly unalias the peak position. Any peaks that are moved by virtue of
being unaliased will have their contribution to the chemical shifts, of any
assigned resonances, adjusted automatically. Chemical shift values are always
calculated using the underlying resonance positions, not the apparent peak
position. Also, if it is known that many peaks share the same aliasing values,
i.e. are in the same sweep width tile, then the user can propagate the aliasing
value from one peak to many others in a single step via the right-click window
menu; "Peak::Unaliasing propagate".
.. _Spectra: EditSpectrumPopup.html
"""
def __init__(self, parent, peak=None, *args, **kw):
self.peak = peak
self.peakDim = None
self.guiParent = parent
BasePopup.__init__(self, parent=parent, title="Edit Peak Aliasing", **kw)
def body(self, guiFrame):
self.geometry("500x250")
self.numAliasingEntry = IntEntry(self,text='', returnCallback = self.setNumAliasing, width=4)
guiFrame.expandGrid(1,0)
div = LabelDivider(guiFrame, text='Peak Dimension Positions', grid=(0,0))
utilButtons = UtilityButtonList(guiFrame, doClone=False,
closeCmd=self.close,
helpUrl=self.help_url, grid=(0,1))
tipTexts = ['The peak/spectrum dimension number',
'The kind of isotope measured in the dimension',
'The position of the peak in this dimension, in units of ppm',
'The frequency position of the peak in this dimension, in units of Hz',
'The data point position (in the spectrum matrix) of the peak in this dimension',
'Sets the number of spectrum sweep withs to add to the peak dimension position to locate it at its real ppm value. Note an aliasing of "1" moves a peak to a lower ppm',
'The assignment annotation for the peak dimension']
headingList = ['Dimension','Isotope','ppm','Hz','Points','Num.\nAliasing','Annotation']
editWidgets = [None, None, None, None, None, self.numAliasingEntry, None]
editGetCallbacks = [None, None, None, None, None, self.getNumAliasing, None]
editSetCallbacks = [None, None, None, None, None, self.setNumAliasing, None]
self.scrolledMatrix = ScrolledMatrix(guiFrame, tipTexts=tipTexts,
editSetCallbacks=editSetCallbacks,
editGetCallbacks=editGetCallbacks,
editWidgets=editWidgets,
initialCols=5,
initialRows=3,
headingList=headingList,
callback=self.selectCell,
grid=(1,0), gridSpan=(1,2))
for func in ('__init__','delete','setAnnotation',
'setNumAliasing','setPosition'):
self.registerNotify(self.updateAfter, 'ccp.nmr.Nmr.PeakDim', func)
self.waiting = False
self.updateAfter(self.peak)
def open(self):
self.updateAfter()
BasePopup.open(self)
def setNumAliasing(self, event):
value = self.numAliasingEntry.get()
if (value is not None) and self.peakDim:
setPeakDimNumAliasing(self.peakDim, value)
self.updateAfter()
def getNumAliasing(self, peakDim):
if peakDim:
self.numAliasingEntry.set(peakDim.numAliasing)
def selectCell(self, object, row, col):
self.peakDim = object
def updateAfter(self, object=None):
if object:
if object.className == 'Peak':
self.peak = object
elif object.peak is not self.peak:
# object is peakDim & function was called by notifier
# return if not my peak
return
if self.waiting:
return
else:
self.waiting = True
self.after_idle(self.update)
def update(self):
objectList = []
textMatrix = []
colorMatrix = []
colors = [None] * 7
colors[5] = '#B0FFB0'
if self.peak:
for peakDim in self.peak.sortedPeakDims():
dataDimRef = peakDim.dataDimRef
if dataDimRef:
objectList.append(peakDim)
else:
textMatrix.append([])
for peakDim in objectList:
dataDimRef = peakDim.dataDimRef
expDimRef = dataDimRef.expDimRef
position = aliasedPeakDimPosition(peakDim)
datum = [peakDim.dim,
'/'.join(expDimRef.isotopeCodes),
unit_converter[('point','ppm')](position,dataDimRef),
unit_converter[('point','Hz') ](position,dataDimRef),
position,
peakDim.numAliasing,
peakDim.annotation]
textMatrix.append(datum)
colorMatrix.append(colors)
self.scrolledMatrix.update(objectList=objectList,
textMatrix=textMatrix,
colorMatrix=colorMatrix)
self.waiting = False
def destroy(self):
for func in ('__init__','delete','setAnnotation',
'setNumAliasing','setPosition'):
self.unregisterNotify(self.updateAfter, 'ccp.nmr.Nmr.PeakDim', func)
BasePopup.destroy(self)
|
#This example talks about all the basic comaprision operators
x = 2
y = 3
print(x == y)
print(x+1 == y)
print(x < y)
print(x > y)
print(x >= y)
print(x <= y)
print(x != y)
def compare_integors(x, y) :
if x == y or x < y :
return 'x is less than or equal to y'
return 'y is always greater than x'
print(compare_integors(2, 3))
|
#!/usr/bin/env python
# -*- coding: CP1252 -*-
#
# generated by wxGlade 0.6.8 (standalone edition) on Tue Oct 07 12:53:43 2014
#
import wx,wx.grid,win32com.client,json,os,subprocess,win32clipboard,time,win32con
from threading import Thread
from Queue import Queue, Empty
class NonBlockingStreamReader:
def __init__(self, stream):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
'''
self._s = stream
self._q = Queue()
def _populateQueue(stream, queue):
'''
Collect lines from 'stream' and put them in 'queue'.
'''
while True:
line = stream.readline()
if line:
queue.put(line)
else:
break
self._t = Thread(target = _populateQueue,
args = (self._s, self._q))
self._t.daemon = True
self._t.start() #start collecting lines from the stream
def readline(self, timeout = None):
try:
return self._q.get(block = timeout is not None,
timeout = timeout)
except Empty:
return None
# Content of this block not found. Did you rename this class?
pass
# Content of this block not found. Did you rename this class?
pass
class Macro(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: Macro.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.label_1 = wx.StaticText(self, wx.ID_ANY, "Create your own macro\nenter key combinations to send as though you had typed them\nUse ^ for ctrl\nUse % for alt\nUse + for shift\nUse {} for keys eg {ESC} or {TAB}\nUse |*| # to repeat a command\nUse CTRL+F12 to stop a running macro\Use ALT+F12 to start a macro")
self.MACRO_NAME = wx.StaticText(self, wx.ID_ANY, "Macro Name")
self.button_1 = wx.Button(self, wx.ID_ANY, "Load")
self.button_2 = wx.Button(self, wx.ID_ANY, "Save")
self.button_3 = wx.Button(self, wx.ID_ANY, "Test Windows")
self.button_4 = wx.Button(self, wx.ID_ANY, "Run")
self.label_2 = wx.StaticText(self, wx.ID_ANY, "Repeat #")
self.REPEAT_NUMBER = wx.TextCtrl(self, wx.ID_ANY, "1")
self.label_4 = wx.StaticText(self, wx.ID_ANY, "Time Delay")
self.TIME_DELAY = wx.TextCtrl(self, wx.ID_ANY, ".1")
self.label_3 = wx.StaticText(self, wx.ID_ANY, "Window List")
self.WINDOW_NAMES = wx.TextCtrl(self, wx.ID_ANY, "", style=wx.TE_MULTILINE)
self.button_5 = wx.Button(self, wx.ID_ANY, "Insert Row")
self.button_6 = wx.Button(self, wx.ID_ANY, "Clear Row")
self.VIEWER = wx.grid.Grid(self, wx.ID_ANY, size=(1, 1))
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.onLoad, self.button_1)
self.Bind(wx.EVT_BUTTON, self.onSave, self.button_2)
self.Bind(wx.EVT_BUTTON, self.testWindows, self.button_3)
self.Bind(wx.EVT_BUTTON, self.onRun, self.button_4)
self.Bind(wx.EVT_TEXT, self.saveRepeat, self.REPEAT_NUMBER)
self.Bind(wx.EVT_TEXT, self.saveTimeDelay, self.TIME_DELAY)
self.Bind(wx.EVT_TEXT, self.saveWindows, self.WINDOW_NAMES)
self.Bind(wx.EVT_BUTTON, self.onInsert, self.button_5)
self.Bind(wx.EVT_BUTTON, self.onClearRow, self.button_6)
self.Bind(wx.grid.EVT_GRID_CMD_CELL_CHANGE, self.onCellChange, self.VIEWER)
# end wxGlade
self.hotKeyId=150
self.RegisterHotKey(self.hotKeyId,win32con.MOD_CONTROL, win32con.VK_F12)
self.Bind(wx.EVT_HOTKEY, self.onKey, id=self.hotKeyId)
self.RegisterHotKey(self.hotKeyId+1,win32con.MOD_ALT, win32con.VK_F12)
self.Bind(wx.EVT_HOTKEY, self.onRun, id=self.hotKeyId+1)
self.settings = {}
self.recur = False
self.p=None
def __set_properties(self):
# begin wxGlade: Macro.__set_properties
self.SetTitle("Macro")
self.REPEAT_NUMBER.SetToolTipString("Enter how many times you want the macro to run")
self.VIEWER.CreateGrid(1, 1)
self.VIEWER.SetRowLabelSize(20)
self.VIEWER.SetColLabelValue(0, "Command")
# end wxGlade
def __do_layout(self):
# begin wxGlade: Macro.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_4 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.label_1, 0, wx.EXPAND, 0)
sizer_1.Add(self.MACRO_NAME, 0, 0, 0)
sizer_2.Add(self.button_1, 0, 0, 0)
sizer_2.Add(self.button_2, 0, 0, 0)
sizer_2.Add(self.button_3, 0, 0, 0)
sizer_2.Add(self.button_4, 0, 0, 0)
sizer_2.Add(self.label_2, 0, 0, 0)
sizer_2.Add(self.REPEAT_NUMBER, 0, 0, 0)
sizer_2.Add(self.label_4, 0, 0, 0)
sizer_2.Add(self.TIME_DELAY, 0, 0, 0)
sizer_1.Add(sizer_2, 0, wx.EXPAND, 0)
sizer_3.Add(self.label_3, 0, 0, 0)
sizer_3.Add(self.WINDOW_NAMES, 1, wx.EXPAND, 0)
sizer_1.Add(sizer_3, 1, wx.EXPAND, 0)
sizer_4.Add(self.button_5, 0, 0, 0)
sizer_4.Add(self.button_6, 0, 0, 0)
sizer_1.Add(sizer_4, 0, wx.EXPAND, 0)
sizer_1.Add(self.VIEWER, 4, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# end wxGlade
def onKey(self, event):
if self.p:
self.p.kill()
self.retval='Killed'
else:
wx.MessageBox('No macro running','Results',wx.ICON_INFORMATION)
event.Skip()
def onLoad(self, event): # wxGlade: Macro.<event_handler>
dia = wx.FileDialog(None,message = "Select File", wildcard = 'macro (*.macro)|*.macro|All files (*.*)|*.*', style = wx.OPEN)
if dia.ShowModal() == wx.ID_OK:
filepath = dia.GetPaths()[0]
i = open(filepath)
self.settings = json.loads(i.read())
i.close()
if self.settings:
self.MACRO_NAME.Label = self.settings['MACRO_NAME']
self.WINDOW_NAMES.Value = self.settings['WINDOW_NAMES']
self.REPEAT_NUMBER.Value = self.settings['REPEAT_NUMBER']
self.TIME_DELAY.Value = self.settings['TIME_DELAY']
self.loadViewer(self.settings['VIEWER'])
event.Skip()
def loadViewer(self,viewer_list):
self.recur = True
for i in range(self.VIEWER.GetNumberRows()-1,-1,-1):
self.VIEWER.DeleteRows()
for i in range(0,len(viewer_list)):
self.VIEWER.AppendRows()
self.VIEWER.SetCellValue(i,0,viewer_list[i])
self.VIEWER.AppendRows()
self.recur = False
def onSave(self, event): # wxGlade: Macro.<event_handler>
dia = wx.FileDialog(None,message = "Select Output File", wildcard = 'macro (*.macro)|*.macro|All files (*.*)|*.*', style = wx.SAVE|wx.OVERWRITE_PROMPT)
if dia.ShowModal() == wx.ID_OK:
filepath = dia.GetPaths()[0]
self.MACRO_NAME.Label = filepath
self.settings['MACRO_NAME'] = self.MACRO_NAME.Label
self.settings['VIEWER'] = [self.VIEWER.GetCellValue(i,0) for i in range(0,self.VIEWER.GetNumberRows()) if self.VIEWER.GetCellValue(i,0)]
self.settings['REPEAT_NUMBER'] = self.REPEAT_NUMBER.Value
self.settings['TIME_DELAY']=self.TIME_DELAY.Value
o=open(filepath,'w')
o.write(json.dumps(self.settings))
o.close()
event.Skip()
def testWindows(self, event): # wxGlade: Macro.<event_handler>
shell = win32com.client.Dispatch("WScript.Shell")
window_list = self.settings['WINDOW_NAMES'].split('\n')
message_list = ["Window %s test %s\n"%(i+1,{False:"Failed",True:"Worked"}[shell.AppActivate(window_list[i])]) for i in range(0,len(window_list)) if window_list[i]]
wx.MessageBox(''.join(message_list),"Test Results",wx.ICON_INFORMATION)
event.Skip()
def onRun(self, event): # wxGlade: Macro.<event_handler>
os.system('espeak "start"')
command_list = [self.VIEWER.GetCellValue(i,0) for i in range(0,self.VIEWER.GetNumberRows()) if self.VIEWER.GetCellValue(i,0)]
window_list = self.settings['WINDOW_NAMES'].split('\n')
self.p = subprocess.Popen(['basicinterpreter/basicinterpreter.exe'],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
self.pout=NonBlockingStreamReader(self.p.stdout)
command = '''sys.path.append(os.getcwd())\nshell=client.Dispatch("WScript.Shell")\n'''
self.p.stdin.write(command)
self.p.stdin.flush()
command = '''def clipboard(shell,time,win32clipboard):\n win32clipboard.OpenClipboard()\n commands = win32clipboard.GetClipboardData()\n win32clipboard.CloseClipboard()\n for a in commands.strip():\n shell.SendKeys(a)\n\ndef myrun(shell,time,win32clipboard):\n'''
for z in range(0,int(self.REPEAT_NUMBER.Value)):
for i in command_list:
if i:
if ' |*| ' in i:
key,repeat = i.split(' |*| ')
for a in range(0,int(repeat)):
command += " shell.SendKeys('%s')\n time.sleep(%s)\n"%(key,float(self.TIME_DELAY.Value))
elif 'switch ' in i:
command +='%s'%(" if not shell.AppActivate('%s'):\n return 'window failure'\n"%window_list[int(i.replace('switch ',''))-1],)
elif 'sleep ' in i:
command +='%s'%(" time.sleep(%s)\n"%float('%s'%i.replace('sleep ','')),)
elif 'clipboard' in i:
command += " clipboard(shell,time,win32clipboard)\n"
elif 'end ' in i:
command += " shell.AppActivate('%s')\n"%window_list[int(i.replace('end ',''))-1]
else:
command += " shell.SendKeys('%s')\n"%i
command += " time.sleep(%s)\n"%float(self.TIME_DELAY.Value)
command += ' return "Done"\n\n'
o = open('macro.py','w')
o.write(command)
o.close()
command = 'from macro import *\nmyrun(shell,time,win32clipboard)\nsys.stdout.flush()\n'
self.p.stdin.write(command)
self.retval = None
while not self.retval:
self.retval = self.pout.readline()
wx.Yield()
self.p.stdin.close()
os.system('espeak "%s"'%self.retval.replace('... ','').replace('>>> ','').replace("'",''))
self.pout=None
self.p.wait()
event.Skip()
def onInsert(self, event): # wxGlade: Macro.<event_handler>
self.VIEWER.InsertRows(self.VIEWER.GetGridCursorRow())
self.VIEWER.SetFocus()
event.Skip()
def onClearRow(self, event): # wxGlade: Macro.<event_handler>
self.VIEWER.SetCellValue(self.VIEWER.GetGridCursorRow(),0,'')
self.VIEWER.SetFocus()
event.Skip()
def onCellChange(self, event): # wxGlade: Macro.<event_handler>
if self.VIEWER.GetCellValue(self.VIEWER.GetNumberRows()-1,0):
self.VIEWER.AppendRows()
event.Skip()
def saveWindows(self, event): # wxGlade: Macro.<event_handler>
self.settings['WINDOW_NAMES']=self.WINDOW_NAMES.Value
event.Skip()
def saveTimeDelay(self, event): # wxGlade: Macro.<event_handler>
self.settings['TIME_DELAY'] = self.TIME_DELAY.Value
event.Skip()
def saveRepeat(self, event): # wxGlade: Macro.<event_handler>
self.settings['REPEAT_NUMBER']=self.REPEAT_NUMBER.Value
event.Skip()
# end of class Macro
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
Macro = Macro(None, wx.ID_ANY, "")
app.SetTopWindow(Macro)
Macro.Show()
app.MainLoop()
|
def answer(x):
result = []
for item in x:
if item not in result:
if item[::-1] not in result:
result.append(item)
return len(result)
#test = ["abc", "cba", "bac"]
#test = ["foo", "bar", "oof", "bar"]
test = ["x", "y", "xy", "yy", "", "yx"]
print answer(test)
|
def flatten(l):
if type(l) != type([]): return [l]
if l == []: return l
else:
return flatten(l[0]) + flatten(l[1:])
assert(flatten([1,[2]]) == [1,2])
print "hi2"
assert(flatten([1,2,[3,[4,5],6],7]) == [1,2,3,4,5,6,7])
print "hi3"
assert(flatten(['wow', [2,[[]]], [True]]) ==['wow', 2, True])
#print "hi4"
assert(flatten([]) ==[])
assert(flatten([[]]) ==[])
assert(flatten(3) == 3)
|
#Project Euler problem 9
#pythagorean triple that sums to 1000
'''
SOME MATH
Observing that squaring any imaginary number yields
an integer pythagorean triplet, and using this fact to
generalize, I found a formula for generating pythagorean triples.
it is: (u^2-v^2,2uv,u^2+v^2)
summing those variables and setting it equal to 1000 yields
24 unique solutions
the problem asked for natural number solutions, so instead of
looking through a space of 1000^3, I simplified it to only
24 using high school math.(This is a 43.66 million times speed up)
searching these numbers yields:
u = -20, v = -5
which results in (375,200,425)
'''
u = -20
v = -5
print("(",(2*u*v),",",(u**2 - v**2),",",(u**2 + v**2),")")
print("Sum = ",(u**2 - v**2)+(u**2 + v**2)+(2*u*v))
print("Product = Answer = ",(u**2 - v**2)*(u**2 + v**2)*(2*u*v) )
|
# Contributors: Aure, Kowther
from flask import url_for
from app.models import User
from tests.test_main import BaseTestCase
class TestPosts(BaseTestCase):
def test_login_fails_with_invalid_details(self):
response = self.login(email='atlas@gmail.com', password='password')
self.assertIn(b'Invalid username or password', response.data)
def test_signup_page_valid(self):
response = self.client.get('/signup')
self.assertEqual(response.status_code, 200)
def test_login_succeeds_with_valid_details(self):
response = self.login(email='luna@gmail.com', password='5678')
self.assertIn(b'Login successful!', response.data)
def test_registration_valid_details(self):
count = User.query.count()
response = self.client.post(url_for('auth.signup'), data=dict(
username=self.renter_data.get('Luna'),
first_name=self.renter_data.get('rent'),
last_name=self.renter_data.get('er'),
email=self.renter_data.get('email'),
password=self.renter_data.get('password'),
roles=self.renter_data.get('role')
), follow_redirects=True)
count2 = User.query.count()
self.assertEqual(count2 - count, 0)
self.assertEqual(response.status_code, 200)
self.assertIn(b'Sign Up', response.data)
renter_data = dict(username='Luna', first_name="rent", last_name="er", email="luna@gmail.com",
roles='renter', password='5678')
|
import os
import re
import os.path
from iptcinfo import IPTCInfo
from galleryitem import JpegPicture, JpegDirectory, directory_name_to_html_file_name
from ..utils.inject import assign_injectables
def is_jpeg_file(file_name):
"""
Determine if a file is labeled as a JPEG.
Args:
file_name the name of the file.
Returns:
True if the file ends with .jpg.
"""
return file_is_of_type(file_name, 'jpg')
def is_css_file(file_name):
"""
Determine if a file is labeled as CSS.
Args:
file_name the name of the file.
Returns:
True if the file ends with .css.
"""
return file_is_of_type(file_name, 'css')
def is_js_file(file_name):
"""
Determine if a file is labeled as JavaScript.
Args:
file_name the name of the file.
Returns:
True if the file ends with .js.
"""
return file_is_of_type(file_name, 'js')
def file_is_of_type(file_name, extension):
"""
Return whether a file is of a certain type.
Args:
file_name the name of the file to test.
extension the part of the name after the . which will be checked
with a regular expression.
Returns:
True if file_name ends with extension.
"""
type_re = re.compile(r'\.%s' % extension)
return type_re.search(file_name) != None
class GalleryItemFactory(object):
"""
Class to bootstrap the application by reading the disk and
creating GalleryItems from the existing JPEGs and subdirectories.
"""
def __init__(self, lookup_table, should_prompt,
iptc_info_constructor=IPTCInfo,
list_directory=os.listdir, is_directory=os.path.isdir):
"""
Constructor for GalleryItemFactory
Args:
lookup_table the lookup_table that the files use to search IPTCInfo.data.
should_prompt whether the program should prompt the user for directory
names.
iptc_info_constructor the constructor for IPTCInfo objects that the files
will use to lookup metadata (defaults to IPTCInfo).
list_directory the function that takes a path and lists the files in it,
defaults to os.listdir
is_directory a function that takes a file name and returns true if it
is a directory (defaults to os.path.isdir).
"""
assign_injectables(self, locals())
def create_directory(self, path, parent_path=None):
"""
Creates a JpegDirectory object with the appropriate GalleryItems
Args:
path the path to the directory that the JPEGs are stored in.
parent_path the directory one level up of path; if we are creating
a subdirectory this will be used to populate back_href.
It can be None if we are creating the top-most directory.
Returns:
A JpegDirectory containing GalleryItems wrapped around all the appropriate
contents of the directory referred to by path.
Raises:
Any exception thrown when trying to extract IPTC information from a JPEG
file. See the documentation of try_create_jpeg_picture for details.
"""
file_names = self.list_directory(path)
jpeg_names = filter(is_jpeg_file, file_names)
path_contents = []
for name in jpeg_names:
maybe_jpeg_picture = self.try_create_jpeg_picture(path, name)
if maybe_jpeg_picture is not None:
path_contents.append(maybe_jpeg_picture)
subdirectories = self.create_subdirectories(file_names, path)
path_contents.extend(subdirectories)
back_href = self.maybe_get_back_href(parent_path)
return JpegDirectory(path, path_contents, self.should_prompt,
back_href=back_href)
def try_create_jpeg_picture(self, path, name):
"""
Given a path and the name of a file ending in .jpg, tries to create
a JpegPicture object out of it.
Args:
path the path to the directory the file is in.
name the name of the file.
Returns:
A JpegPicture object, if creating it was successful. None if creating
the JpegPicture failed for some reason that does not warrant crashing
the program.
Raises:
Any exception raised when trying to extract IPTC information from the
JPEG, that is not an IOError or an exception with the message
'No IPTC data found.' In those two cases, simply skips the file and
prints a message saying so.
"""
full_jpeg_name = os.path.join(path, name)
try:
return JpegPicture(name,
directory_name_to_html_file_name(path),
self.iptc_info_constructor(full_jpeg_name),
self.lookup_table)
except IOError:
print "I was unable to open the file ", name, " for some reason"
print "Maybe it's corrupted?"
print "Skipping it..."
return None
except Exception as possible_iptc_exception:
if str(possible_iptc_exception) == 'No IPTC data found.':
print "I was unable to get IPTC data from the file %s" % name
print "Skipping it..."
return None
else:
raise possible_iptc_exception # Some other exception
def maybe_get_back_href(self, path):
"""
Given a nullable path name, turns it into a href that can be used
to write an anchor tag pointing to a HTML file. If path
is None, propagates the None by returning it.
Args:
path the path name, or None if it is not applicable.
"""
if path is None:
return None
else:
return directory_name_to_html_file_name(path)
def create_subdirectories(self, file_names, path):
"""
Helper methods to find the subdirectories of path and create JpegDirectories
for them, fully initializing their contents too.
Args:
file_names the names of the files in path.
path the root directory path to process.
"""
full_file_names = [os.path.join(path, name) for name in file_names]
directory_names = filter(self.is_directory, full_file_names)
jpeg_directories = [self.create_directory(directory_name, parent_path=path) \
for directory_name in directory_names]
return jpeg_directories
|
class Solution:
def totalMoney(self, n: int) -> int:
d, m = divmod(n, 7)
return (49 + 7 * d) * d // 2 + (2 * d + m + 1) * m // 2
|
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.template import loader
from core.decoratos import login_required, admin_required
from order.forms import OrderCreateForm
# External Architecture
from order.repository.repository_django_orm import Repository
from order.usecases.usecase import UseCase
# This will allow users non admin to create menu orders
@csrf_exempt
@login_required
def placeOrder(request, menuId):
template = loader.get_template('order_new.html')
form = OrderCreateForm(request.POST)
userId = request.session['userId']
repository = Repository()
use_case = UseCase(repository)
menu = use_case.getMenuNameById(menuId)
if form.is_valid():
result = use_case.createOrder(userId,
menuId,
form.cleaned_data.get('customization'),
)
context = {
'saved': result,
'menu': menu,
'form': form,
}
return HttpResponse(template.render(context, request))
else:
context = {
'form': OrderCreateForm(),
'menu': menu,
}
return HttpResponse(template.render(context, request))
# this will get all users in general, for Nora to
# see how to build and deliver food
@login_required
@admin_required
def getAll(request):
template = loader.get_template('orders.html')
repository = Repository()
use_case = UseCase(repository)
result = use_case.getAll()
context = {
'order_list': result,
}
return HttpResponse(template.render(context, request))
# this will return single order information by orderId
@login_required
def getById(request, orderId):
template = loader.get_template('order_view.html')
user_id = request.session.get('userId')
repository = Repository()
use_case = UseCase(repository)
result = use_case.getById(orderId, user_id)
context = {
'order': result,
}
return HttpResponse(template.render(context, request))
@login_required
def getAllById(request):
template = loader.get_template('orders.html')
user_id = request.session.get('userId')
repository = Repository()
use_case = UseCase(repository)
result = use_case.getAllById(user_id)
context = {
'order_list': result,
'user': user_id,
}
return HttpResponse(template.render(context, request))
|
import MySQLdb
# conn=MySQLdb.connect(host='localhost',port=3306,user='root',passwd='ws940113ZYF',db='test',charset='utf8')
# cur=conn.cursor()
#
# cur.execute("""
# create table if not EXISTS account
# (
# accid int(10) PRIMARY KEY ,
# money int(10)
# )
# """)
#
# cur.execute('insert into account(accid,money) VALUES (1,110)')
# cur.execute('insert into account(accid,money) VALUES (2,10)')
#
# conn.commit()
# cur.close()
# conn.close()
import sys
class transfermoney(object):
def __init__(self,conn):
self.conn=conn
def check(self,accid):
cursor=self.conn.cursor()
try:
sql='select * from account where accid=%s'%accid
cursor.execute(sql)
print('check_acct_available' + sql)
rs=cursor.fetchall()
if len(rs)!=1:
raise Exception('账号%s 不存在' %accid)
finally:
cursor.close()
def enoughmoney(self,accid,money):
cursor=self.conn.cursor()
try:
sql = 'select * from account where accid=%s and money>=%s' % (accid, money)
cursor.execute(sql)
print('reduce money'+sql)
rs=cursor.fetchall()
if len(rs)!=1:
raise Exception('账号%s 减款失败' %accid)
finally:
cursor.close()
def reducemoney(self,accid,money):
cursor=self.conn.cursor()
try:
sql = 'update account set money=money-%s where accid=%s' % (money, accid)
cursor.execute(sql)
print('reduce money' + sql)
rs = cursor.fetchall()
if cursor.rowcount != 1:
raise Exception('账号%s 减款失败' % accid)
finally:
cursor.close()
def addmoney(self, accid, money):
cursor = self.conn.cursor()
try:
sql = 'update account set money=money+%s where accid=%s' % (money, accid)
cursor.execute(sql)
print('reduce money' + sql)
rs = cursor.fetchall()
if cursor.rowcount != 1:
raise Exception('账号%s 加款失败' % accid)
finally:
cursor.close()
def transfer(self,source_accid,target_accid,money):
try:
self.check(source_accid)
self.check(target_accid)
self.enoughmoney(source_accid,money)
self.reducemoney(source_accid,money)
self.addmoney(target_accid,money)
self.conn.commit()
except Exception as e:
self.conn.rollback() #若出现异常,数据不发生变化
raise e
if __name__=='__main__':
source_accid=sys.argv[1]
target_accid=sys.argv[2]
money=sys.argv[3]
conn=MySQLdb.connect(host='localhost',port=3306,user='root',passwd='ws940113ZYF',db='test',charset='utf8')
tr=transfermoney(conn)
try:
tr.transfer(source_accid,target_accid,money)
except Exception as e:
print('出现问题'+str(e))
finally:
conn.close()
|
# Create your tasks here
from __future__ import absolute_import, unicode_literals
from mobility_5g_rest_api.models import Event, RadarEvent
import json
from datetime import datetime, timedelta
from celery import shared_task
from celery.utils.log import get_task_logger
from mobility_5g_rest_api.utils import process_daily_inflow
logger = get_task_logger(__name__)
@shared_task(track_started=True)
def sensor_fusion(json_data):
# Do the sensor fusion
json_obj = json.loads(json_data)
print(json_obj)
location = None
if json_obj:
timestamp_event = datetime.strptime(json_obj[0]['date'], "%Y-%m-%d %H:%M:%S")
if json_obj[0]['radarId'] == 5:
location = 'PT'
elif json_obj[0]['radarId'] == 7:
location = 'RA'
vehicles_to_process = []
for event in json_obj:
if event['inside_road']:
if event['class'] in ['car', 'truck', 'motocycle']:
vehicles_to_process.append(event)
elif event['class'] == 'person':
Event.objects.create(location=location,
event_type="RD",
event_class="PE",
timestamp=timestamp_event)
elif event['class'] in ['cat', 'dog', 'horse', 'sheep', 'cow', 'bear']:
Event.objects.create(location=location,
event_type="RD",
event_class="AN",
timestamp=timestamp_event)
else:
if event['class'] in ['cat', 'dog', 'horse', 'sheep', 'cow', 'bear']:
Event.objects.create(location=location,
event_type="BL",
event_class="AN",
timestamp=timestamp_event)
elif event['class'] == 'person':
Event.objects.create(location=location,
event_type="BL",
event_class="PE",
timestamp=timestamp_event)
elif event['class'] == 'bicycle':
Event.objects.create(location=location,
event_type="BL",
event_class="BC",
timestamp=timestamp_event)
if vehicles_to_process:
neg_velocity_radar_events = list(RadarEvent.objects.filter(timestamp=timestamp_event,
radar_id=json_obj[0]['radarId'],
velocity__lt=0))
pos_velocity_radar_events = list(RadarEvent.objects.filter(timestamp=timestamp_event,
radar_id=json_obj[0]['radarId'],
velocity__gt=0))
last_5_seconds_radar_events_gt = list(RadarEvent.objects.filter(
timestamp__gte=(timestamp_event - timedelta(seconds=5)),
radar_id=json_obj[0]['radarId'], velocity__gt=0))
last_5_seconds_radar_events_lt = list(RadarEvent.objects.filter(
timestamp__gte=(timestamp_event - timedelta(seconds=5)),
radar_id=json_obj[0]['radarId'], velocity__lt=0))
for event in reversed(vehicles_to_process):
if event['is_stopped']:
Event.objects.create(location=location,
event_type="RD",
event_class="SC",
timestamp=timestamp_event)
else:
radar_event = None
event_class = 'CA' if event['class'] == 'car' else ('TR' if event['class'] == 'truck' else 'MC')
if event['speed'] > 0:
if pos_velocity_radar_events:
radar_event = pos_velocity_radar_events[0]
Event.objects.create(location=location,
event_type="RT",
event_class=event_class,
timestamp=radar_event.timestamp,
velocity=abs(radar_event.velocity))
radar_event.delete()
pos_velocity_radar_events.remove(radar_event)
else:
if last_5_seconds_radar_events_gt:
radar_event = last_5_seconds_radar_events_gt[0]
Event.objects.create(location=location,
event_type="RT",
event_class=event_class,
timestamp=radar_event.timestamp,
velocity=abs(radar_event.velocity))
radar_event.delete()
last_5_seconds_radar_events_gt.remove(radar_event)
elif event['speed'] < 0:
if neg_velocity_radar_events:
radar_event = neg_velocity_radar_events[0]
Event.objects.create(location=location,
event_type="RT",
event_class=event_class,
timestamp=radar_event.timestamp,
velocity=abs(radar_event.velocity))
radar_event.delete()
neg_velocity_radar_events.remove(radar_event)
else:
if last_5_seconds_radar_events_lt:
radar_event = last_5_seconds_radar_events_lt[0]
Event.objects.create(location=location,
event_type="RT",
event_class=event_class,
timestamp=radar_event.timestamp,
velocity=abs(radar_event.velocity))
radar_event.delete()
last_5_seconds_radar_events_lt.remove(radar_event)
if radar_event:
process_daily_inflow(radar_event, location)
return "Processed the JSON {}".format(json_data)
|
import math
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
Record = namedtuple("Record", ['freq', 'delta', 'length'])
baseFreq = 440.0
data = [
# 1
[[30.0 - 2.95], [495.0, 499.0]],
[[30.0 + 0.05], [456.0, 469.5]],
[[30.0 + 2.95], [420.0, 426.0]],
[[30.0 + 6.00], [397.0, 400.0]],
[[30.0 + 8.15], [378.0, 381.0]],
# 6
[[30.0 + 10.90], [356.0, 364.0]],
[[30.0 + 13.45], [340.0, 344.5]],
[[30.0 + 15.45], [329.0, 330.5]],
[[30.0 + 17.50], [315.0, 318.5]],
[[30.0 + 19.30], [303.0, 308.0]],
# 11
[[50.0 + 1.35], [296.0, 297.0]],
[[50.0 + 3.45], [286.0, 287.0]],
[[50.0 + 5.80], [275.0, 277.5]],
[[50.0 + 8.35], [256.0, 258.0]],
]
# data[_][1]: 长度, 单位 cm
# data[_][2]: 音高, 单位 hz
# 除以 100 是 cm 转 m
l = np.average([ x[0] for x in data ], axis = 1) / 100
print('len array \n %s' % (l))
f = np.average([ x[1] for x in data ], axis = 1)
print('freq array \n %s' % (f))
# 拟合
# F = A + B / L
params = [0.0, 220.0]
def loss(params):
return np.sqrt(np.sum(np.square(
f - (params[0] + params[1] / l)
)))
mm = minimize(loss, x0 = params)
print("::: F = %f + %f / L (in m.)" % (mm.x[0], mm.x[1]))
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(np.array(l) * 100.0, f)
dr = range(25, 60)
ax.plot(dr, [mm.x[0] + mm.x[1] / (length / 100.0) for length in dr])
plt.ylim(0)
plt.show() |
# -*- coding:utf-8 -*-#
# --------------------------------------------------------------
# NAME: 08
# Description: 使用字典:键值对格式存储,相当于java中的map
# Author: xuezy
# Date: 2020/7/6 17:48
# --------------------------------------------------------------
def main():
scores = {'张三': 11, '李四': 12, '王五': 13}
# 通过键可以获取字典中对应的值
# 11
print(scores['张三'])
# 遍历
# 张三 ---> 11
# 李四 ---> 12
# 王五 ---> 13
for elem in scores:
# \t表示空四个字符
# 格式符: %s 字符串 %d 十进制整数
print('%s\t--->\t%d' % (elem, scores[elem]))
# 更新元素
scores['李四'] = 15
# 添加元素
scores.update(赵六=18,崔七=19)
# {'张三': 11, '李四': 15, '王五': 13, '赵六': 18, '崔七': 19}
print(scores)
# 获取指定元素,没有则取默认值
print(scores.get('一', 60))
# 删除字典元素,默认删除末尾的元素 ('崔七', 19)
print(scores.popitem())
# 根据键删除元素,返回删除的值,没有则返回默认值 33
print(scores.pop('二', 33))
# 11
print(scores.pop('张三', 34))
# {'李四': 15, '王五': 13, '赵六': 18}
print(scores)
# 清空字典
scores.clear()
# {}
print(scores)
if __name__ == '__main__':
main()
|
from .logistic_regression import LogisticRegression
from .linear_regression import LinearRegression
from .dnn import DNN
|
x = 42
if x < 10:
print 'one digit number'
elif x < 100:
print 'two digit number'
else:
print 'a big number'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.