index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
25,100 | 8335ff41e87ac6338ffe08f31137e7f4ce28e944 | # Generated by Django 2.2 on 2019-09-18 17:42
from django.db import migrations, models
import pretix.base.models.fields
class Migration(migrations.Migration):
dependencies = [
('pretixbase', '0135_auto_20191007_0803'),
]
operations = [
migrations.AddField(
model_name='checkin',
name='auto_checked_in',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='checkinlist',
name='auto_checkin_sales_channels',
field=pretix.base.models.fields.MultiStringField(default=[]),
)
]
|
25,101 | 2d48e87b235b4d11851e14114b765f1984fd1818 | #coding:utf-8
for i in range(1,5): #1-4
#print(i)
for n in range(2,4): #2
print(i*n)
|
25,102 | 48cdf018a9f6027352f4c9245e39b55c8feb1dc5 | class TreeNode(object):
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def find_path(root, sums):
if not root:
return []
path = []
curSum = 0
find_path_core(root,sums,path,curSum)
def find_path_core(root,sums,path,curSum):
curSum += root.value
path.append(root.value)
if not root.left and not root.right:
if curSum == sums:
print(path)
else:
if root.left:
find_path_core(root.left,sums,path,curSum)
if root.right:
find_path_core(root.right,sums,path,curSum)
path.pop()
if __name__ == "__main__":
node5 = TreeNode(7)
node4 = TreeNode(4)
node3 = TreeNode(12)
node2 = TreeNode(5, node4, node5)
node1 = TreeNode(10, node2, node3)
root = node1
find_path(root, 22) |
25,103 | dcaa50cb9c7f56801bd8c6d40a6807e604017554 | import cv2
import dlib
import base64
import numpy as np
from app import config
from appfly.app import app
from imutils import face_utils
predictor = dlib.shape_predictor(config['LANDMARKS68'])
def base64_to_cv2(uri):
encoded_data = uri.split(',')[1]
nparr = np.fromstring(base64.b64decode(app.images.get().split('base64')[-1]), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
# def get_face_points(shape):
# return np.array([
# shape[33], # Nose tip
# shape[8], # Chin
# shape[36], # Left eye left corner
# shape[45], # Right eye right corne
# shape[48], # Left Mouth corner
# shape[54] # Right mouth corner
# ], dtype="double")
def _dog():
"""Returns a dog frame."""
fh = open("/home/italojs/dev/python/api-flask-noalvo-demo/app/domain/live_stream/static/funny-dogs.jpg", "rb")
frame = fh.read()
fh.close()
return frame
def gen_livestream():
"""Video streaming generator function."""
flag = True
frame = _dog()
while True:
time.sleep(0.02)
if app.images.qsize():
image = app.images.get()
if flag:
image = base64_to_cv2(image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
detector = dlib.get_frontal_face_detector()
rects = detector(gray, 0)
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
for (x, y) in shape:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1)
_, frame = cv2.imencode('.jpg', image)
else:
frame = _dog()
# print(position)
flag = not flag
# yield ('Content-Type: image/jpeg\r\n\r\n' + base64.b64encode(frame).decode("utf-8") + '\r\n')
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
|
25,104 | 5afd3f3b5da49f98598430582e6e300b25bb7928 | class Employee:
numberOfWorkingHours = 40
employeeOne = Employee()
employeeTwo = Employee()
print(employeeOne.numberOfWorkingHours)
print(employeeTwo.numberOfWorkingHours)
# class attributes would be common to class and we can overwrite the call attribute value
Employee.numberOfWorkingHours = 45 # overwrtiting class attribute value
print(employeeOne.numberOfWorkingHours) # prints 45
print(employeeTwo.numberOfWorkingHours) # prints 45
# Instance attributes is specific to that particular instanciated object
employeeOne.name = "John" # A new attribute is created without having that in class Employee
print(employeeOne.name)
employeeTwo.name = "Mary"
print(employeeTwo.name) |
25,105 | c0014d5b096ba7b188fbe5e487fc93c9aa09232e | # Generated by Django 3.0.5 on 2020-05-07 15:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('languages', '0002_auto_20200507_2008'),
]
operations = [
migrations.RenameField(
model_name='programmer',
old_name='languages',
new_name='lang',
),
]
|
25,106 | cd1752969ad057ab22880196eba2068c1a14ef19 |
import a04readAM as AM
import a02conDatabase as DTB
import a01conectWifi as WF
import time
import machine
import am2320
from machine import I2C, Pin
WF.connect()
while True:
print("New")
time.sleep(1)
time.sleep(1)
try:
print("e")
i2c = I2C(scl=Pin(12), sda=Pin(14))
sensor = am2320.AM2320(i2c)
sensor.measure()
print(sensor.temperature())
print(sensor.humidity())
print("eee")
DTB.AMTeplotaVlhkost(sensor.temperature(),sensor.humidity())
except:
print("An exception occurred")
print("Sl")
time.sleep(300)
print("eep")
print("Reset")
machine.reset()
|
25,107 | be12212f81a41b533f487cf7087d47f99191532f | import numpy as np
from flask import Flask, redirect, url_for, jsonify, render_template, url_for, request
import pickle
# import sqlalchemy
# from sqlalchemy.ext.automap import automap_base
# from sqlalchemy.orm import Session
# from sqlalchemy import create_engine, func
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
# app = Flask(__name__, static_url_path='')
#################################################
# Flask Routes
#################################################
@app.route('/')
def root():
return render_template('index.html')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/about_the_data')
def about_the_data():
return render_template('about_the_data.html')
@app.route('/symptom_checker')
def symptom_checker():
return render_template('symptom_checker.html')
@app.route('/graphs')
def graphs():
return render_template('graphs.html')
@app.route('/results', methods=['POST'])
# def results():
# # age = request.form.get('Age')
# polyuria = 1 if request.form.get('Polyuria') is not None else 0
# input_data = [[20,polyuria,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
# print(polyuria)
# return str(polyuria)
# if positive
# return render_template('predicted_positive.html')
# if negative
# return render_template('predicted_negative.html')
@app.route('/action_page', methods=["post"])
def action_page():
print(request.form)
inputs=[]
Age=float(request.form['Age'])
inputs.append(Age)
print(Age)
try:
Gender=float(request.form['Gender'])
print(Gender)
except:
Gender=0
inputs.append(Gender)
try:
Polyuria=float(request.form['Polyuria'])
print(Polyuria)
except:
Polyuria=0
inputs.append(Polyuria)
try:
Polydipsia=float(request.form['Polydipsia'])
print(Polydipsia)
except:
Polydipsia=0
inputs.append(Polydipsia)
try:
weight_loss=float(request.form['weight loss'])
print(weight_loss)
except:
weight_loss=0
inputs.append(weight_loss)
try:
weakness=float(request.form['weakness'])
print(weakness)
except:
weakness=0
inputs.append(weakness)
try:
Polyphagia=float(request.form['Polyphagia'])
print(Polyphagia)
except:
Polyphagia=0
inputs.append(Polyphagia)
try:
Genital_thrush=float(request.form['Genital thrush'])
print(Genital_thrush)
except:
Genital_thrush=0
inputs.append(Genital_thrush)
try:
visual_blurring=float(request.form['visual blurring'])
print(visual_blurring)
except:
visual_blurring=0
inputs.append(visual_blurring)
try:
Itching=float(request.form['Itching'])
print(Itching)
except:
Itching=0
inputs.append(Itching)
try:
Irritability=float(request.form['Irritability'])
print(Irritability)
except:
Irritability=0
inputs.append(Irritability)
try:
delayed_healing=float(request.form['delayed healing'])
print(delayed_healing)
except:
delayed_healing=0
inputs.append(delayed_healing)
try:
partial_paresis=float(request.form['partial paresis'])
print(partial_paresis)
except:
partial_paresis=0
inputs.append(partial_paresis)
try:
muscle_stiffness=float(request.form['muscle stiffness'])
print(muscle_stiffness)
except:
muscle_stiffness=0
inputs.append(muscle_stiffness)
try:
Alopecia=float(request.form['Alopecia'])
print(Alopecia)
except:
Alopecia=0
inputs.append(Alopecia)
try:
Obesity=float(request.form['Obesity'])
print(Obesity)
except:
Obesity=0
inputs.append(Obesity)
print(inputs)
model=pickle.load(open('./static/rfc.sav', 'rb'))
prediction=model.predict([inputs])
print(prediction)
if prediction[0] == 0 :
outcome='Negative'
else:
outcome='Positive'
return render_template('symptom_checker.html', outcome=outcome)
# @app.route('/predicted_positive')
# def predicted_positive():
# return render_template('predicted_positive.html')
# @app.route('/predicted_negative')
# def predicted_negative():
# return render_template('predicted_negative.html')
if __name__ == '__main__':
app.run(debug=True)
|
25,108 | 660196d5f52c102bee6d38fab6723d755f06e2b1 | import testlib, os
print('?') |
25,109 | 6dceb6534af8fb4f190ac479cedcb457af7d0a03 | from tkinter import messagebox
from tkinter import *
import sqlite3
class Atm:
def __init__(self):
self.db = sqlite3.connect('atm.db')
self.cursor = self.db.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS users(username TEXT, password TEXT, cardtype TEXT, amount TEXT, acc_no TEXT)")
self.run()
def withdraw(self):
data = self.cursor.execute("SELECT * FROM users WHERE acc_no='{}'".format(self.atmaccountno.get())).fetchall()
totamt = int(data[0][3])
wamt = int(self.atmamount.get())
if self.atmpin.get() == data[0][1]:
if totamt < wamt:
messagebox.showerror("Error","Insufficient Balance")
else:
query = "UPDATE users SET amount='{}' WHERE acc_no='{}'".format(totamt-wamt,self.atmaccountno.get())
self.cursor.execute(query)
self.db.commit()
messagebox.showinfo("Success","Withdrawal Successful")
else:
messagebox.showerror("Error","Incorrect PIN")
self.atmaccountno.delete(0,END)
self.atmpin.delete(0,END)
self.atmamount.delete(0,END)
self.atm.focus()
def add(self):
self.cursor.execute("INSERT INTO users(username,password,cardtype,amount,acc_no) VALUES(?,?,?,?,?)",
(self.name.get(),self.pin.get(),self.cardtype.get(),self.amount.get(),self.acc_no.get()))
self.db.commit()
messagebox.showinfo("Success","Account Created")
self.admin.destroy()
self.adminaccess()
def edit(self):
query = "UPDATE users SET username='{}', password='{}', cardtype='{}', amount='{}' WHERE acc_no='{}'".format(self.editname.get(),self.editpin.get(),self.editcardtype.get(),self.editamount.get(),self.id)
self.cursor.execute(query)
self.db.commit()
messagebox.showinfo("Success","Account Updated")
self.admin.focus()
def delete(self):
try:
self.cursor.execute("DELETE FROM users WHERE acc_no ='{}'".format(self.id))
messagebox.showinfo("Success","Account Deleted")
self.admin.destroy()
self.root.wm_state('normal')
except:
messagebox.showerror("Error","Please select an account to delete")
def menu_ops(self,args):
data = self.cursor.execute("SELECT * FROM users WHERE acc_no = ?",(args[0],)).fetchall()
try:
print(self.id)
except:
self.id = args[0]
if self.id == args[0]:
if len(self.editname.get()) == 0:
self.id = self.id
self.editname.insert(0,data[0][0])
self.editpin.insert(0,data[0][1])
self.editcardtype.insert(0,data[0][2])
self.editamount.insert(0,data[0][3])
else:
self.editname.delete(0,END)
self.editpin.delete(0,END)
self.editcardtype.delete(0,END)
self.editamount.delete(0,END)
self.editname.insert(0,data[0][0])
self.editpin.insert(0,data[0][1])
self.editcardtype.insert(0,data[0][2])
self.editamount.insert(0,data[0][3])
self.id = args[0]
def allusers(self):
self.allusers = Toplevel(self.root)
self.allusers.resizable(False,False)
self.allusers.title("All Users")
self.table = Label(self.allusers, text="Username")
self.table.grid(row=1, column=0)
self.table = Label(self.allusers, text="Password")
self.table.grid(row=1, column=1)
self.table = Label(self.allusers, text="Card Type")
self.table.grid(row=1, column=2)
self.table = Label(self.allusers, text="Amount")
self.table.grid(row=1, column=3)
self.table = Label(self.allusers, text="Account Number")
self.table.grid(row=1, column=4)
self.cursor.execute("SELECT * FROM users")
self.data = self.cursor.fetchall()
for i in range(len(self.data)):
self.table = Label(self.allusers, text=self.data[i][0])
self.table.grid(row=i+2, column=0)
self.table = Label(self.allusers, text=self.data[i][1])
self.table.grid(row=i+2, column=1)
self.table = Label(self.allusers, text=self.data[i][2])
self.table.grid(row=i+2, column=2)
self.table = Label(self.allusers, text=self.data[i][3])
self.table.grid(row=i+2, column=3)
self.table = Label(self.allusers, text=self.data[i][4])
self.table.grid(row=i+2, column=4)
self.allusers.mainloop()
def adminaccess(self):
if self.username.get() == 'admin' and self.password.get() == 'admin':
self.admin = Toplevel(self.root)
self.admin.title("Admin | Logged In")
self.root.wm_state('iconic')
self.admin.resizable(False,False)
self.admin.geometry("800x400")
namelabel = Label(self.admin,text = "Name")
namelabel.place(relx = 0.1, rely = 0.1)
self.name = Entry(self.admin, width = 40)
self.name.place(relx = 0.1, rely = 0.16)
pinlabel = Label(self.admin,text = "PIN")
pinlabel.place(relx = 0.1, rely = 0.22)
self.pin = Entry(self.admin, width = 40)
self.pin.place(relx = 0.1, rely = 0.28)
cardtype = Label(self.admin,text = "Card Type")
cardtype.place(relx = 0.1, rely = 0.34)
self.cardtype = Entry(self.admin, width = 40)
self.cardtype.place(relx = 0.1, rely = 0.40)
amountlabel = Label(self.admin,text = "Amount")
amountlabel.place(relx = 0.1, rely = 0.46)
self.amount = Entry(self.admin, width = 40)
self.amount.place(relx = 0.1, rely = 0.52)
acc_nolabel = Label(self.admin,text="Account Number")
acc_nolabel.place(relx = 0.1, rely = 0.58)
self.acc_no = Entry(self.admin, width = 40)
self.acc_no.place(relx = 0.1, rely = 0.64)
addbtn = Button(self.admin,text = "Add",command = self.add)
addbtn.place(relx = 0.3, rely = 0.70)
try:
acclabel = Label(self.admin,text = "Select Account Number")
acclabel.place(relx = 0.55, rely = 0.1)
clicked = StringVar()
clicked.set("SELCECT ACCOUNT NUMBER")
data = self.cursor.execute("SELECT acc_no FROM users").fetchall()
self.acclist = OptionMenu(self.admin,clicked, *data , command = self.menu_ops)
self.acclist.place(relx = 0.55, rely = 0.16)
editnamelabel = Label(self.admin , text="Name")
editnamelabel.place(relx = 0.55, rely = 0.30)
self.editname = Entry(self.admin, width = 40)
self.editname.place(relx = 0.55, rely = 0.36)
editpinlabel = Label(self.admin, text="PIN")
editpinlabel.place(relx = 0.55, rely = 0.42)
self.editpin = Entry(self.admin, width = 40)
self.editpin.place(relx = 0.55, rely = 0.48)
editcardtype = Label(self.admin, text="Card Type")
editcardtype.place(relx = 0.55, rely = 0.54)
self.editcardtype = Entry(self.admin, width = 40)
self.editcardtype.place(relx = 0.55, rely = 0.60)
editamountlabel = Label(self.admin, text="Amount")
editamountlabel.place(relx = 0.55, rely = 0.66)
self.editamount = Entry(self.admin, width = 40)
self.editamount.place(relx = 0.55, rely = 0.72)
editbtn = Button(self.admin, text="Update", command = self.edit)
editbtn.place(relx = 0.55, rely = 0.78)
deletebtn = Button(self.admin, text="Delete", command = self.delete)
deletebtn.place(relx = 0.65, rely = 0.78)
allusersbtn = Button(self.admin, text="All Users", command = self.allusers)
allusersbtn.place(relx = 0.75, rely = 0.78)
except:
noaccountlabel = Label(self.admin, text="No Accounts Available")
noaccountlabel.place(relx = 0.55, rely = 0.1)
self.admin.mainloop()
def atm(self):
self.atm = Toplevel(self.root)
self.atm.geometry("300x300")
self.atm.resizable(False,False)
self.atm.title("ATM")
acclabel = Label(self.atm,text = "Account Number")
acclabel.place(relx = 0.1 , rely = 0.1)
self.atmaccountno = Entry(self.atm, width = 40)
self.atmaccountno.place(relx = 0.1 , rely = 0.16)
pinlabel = Label(self.atm,text = "PIN Number")
pinlabel.place(relx = 0.1 , rely = 0.22)
self.atmpin = Entry(self.atm , width = 40 , show='*')
self.atmpin.place(relx=0.1,rely = 0.28)
amountlabel = Label(self.atm,text = "Amount")
amountlabel.place(relx = 0.1 , rely = 0.34)
self.atmamount = Entry(self.atm , width = 40)
self.atmamount.place(relx = 0.1, rely = 0.40)
withdraw = Button(self.atm,text = "Withdraw" ,command= self.withdraw )
withdraw.place(relx = 0.4,rely=0.46)
self.atm.mainloop()
def run(self):
self.root = Tk()
self.root.title("ADMIN LOGIN")
self.root.geometry("400x400")
self.root.resizable(False,False)
usernamelabel = Label(self.root, text = "Username:")
usernamelabel.place(relx = 0.1,rely = 0.1)
self.username = Entry(self.root , width = 50)
self.username.place(relx = 0.1,rely=0.16)
passwordlabel = Label(self.root , text = "Password:")
passwordlabel.place(relx = 0.1, rely = 0.22)
self.password = Entry(self.root, width = 50, show='*')
self.password.place(relx = 0.1, rely = 0.28)
submit = Button(self.root,text = "Submit",command = self.adminaccess)
submit.place(relx = 0.4 , rely = 0.34)
atmbtn = Button(self.root,width = 40,text = "ATM", command = self.atm)
atmbtn.place(relx = 0.1,rely = 0.45)
self.root.mainloop()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.db.close()
Atm() |
25,110 | e175deaedc1ad45d06ebe4196238bf8c0b581001 | ##=============================================================================================
##=============================================================================================
# IMPLEMENTATION OF A CONVOLUTIONAL NEURAL NETWORK TO CLASSIFY JET IMAGES AT LHC
##=============================================================================================
##=============================================================================================
# This script loads the (image arrays,true_values) tuples, creates the train, cross-validation and test sets and runs a convolutional neural network to classify signal vs background images. We then get the statistics and analyze the output. We plot histograms with the probability of signal and background to be tagged as signal, ROC curves and get the output of the intermediate layers and weights.
# Last updated: October 30, 2017. Sebastian Macaluso
##---------------------------------------------------------------------------------------
##---------------------------------------------------------------------------------------
# This code is ready to use on the jet_array1/test_large_sample dir. (The "expand image" function is currently commented). This version is for gray scale images.
# To run:
# Previous runs:
# python cnn_keras_jets.py input_sample_signal input_sample_background number_of_epochs fraction_to_use mode(train or notrain) weights_filename
# python convnet_keras.py test_large_sample 20 0.1 train &> test2
##=============================================================================================
##=============================================================================================
##=============================================================================================
############ LOAD LIBRARIES
##=============================================================================================
from __future__ import print_function
import numpy as np
np.random.seed(1560) # for reproducibility
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras import regularizers
from keras import backend as K # We are using TensorFlow as Keras backend
# from keras import optimizers
from keras.utils import np_utils
import pickle
import gzip
import sys
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import h5py
import time
start_time = time.time()
import data_loader as dl
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
#config.gpu_options.visible_device_list = "0"
set_session(tf.Session(config=config))
##=============================================================================================
############ GLOBAL VARIABLES
##=============================================================================================
# local_dir='/Users/sebastian/Documents/Deep-Learning/jet_images/'
local_dir=''
os.system('mkdir -p jet_array_3')
image_array_dir_in=local_dir+'jet_array_1/' #Input dir to load the array of images
# image_array_dir_in='../David/jet_array_1/'
in_arrays_dir= sys.argv[1]
# large_set_dir=image_array_dir_in+in_arrays_dir+'/'
large_set_dir=image_array_dir_in+in_arrays_dir+'/'
in_std_label='no_std' #std label of input arrays
std_label='bg_std' #std label for the standardization of the images with probabilities between prob_min and prob_max
bias=2e-02
npoints = 38 #npoint=(Number of pixels+1) of the image
N_pixels=np.power(npoints-1,2)
myMethod='std'
my_bins=20
# npoints=38
# extra_label='batch_norm'
extra_label='_early_stop'
min_prob=0.2
max_prob=0.8
my_batch_size = 128
num_classes = 2
epochs =int(sys.argv[2])
#Run over different sets sizes
sample_relative_size=float(sys.argv[3])
mode=sys.argv[4]
# mode='train'
# mode='notrain'
# input image dimensions
img_rows, img_cols = 37, 37
learning_rate=[np.sqrt(8.0)]# The default value for the learning rate (lr) Adadelta is 1.0. We divide the learning rate by sqrt(2) when the loss does not improve, we should start with lr=sqrt(8), so that the starting value is 2 (This is because we defined self.losses = [1,1] as the starting point).
# learning_rate=[1.0]
##=============================================================================================
############ FUNCTIONS TO LOAD AND CREATE THE TRAINING, CROSS-VAL AND TEST SETS
##=============================================================================================
#1) We load the .npy file with the image arrays
def load_array(Array):
print('Loading signal and background arrays ...')
print('-----------'*10)
data=np.load(large_set_dir+Array) #We load the .npy files
return data
##---------------------------------------------------------------------------------------------
#2) We expand the images (adding zeros when necessary)
def expand_array(images):
# ARRAY MUST BE IN THE FORM [[[iimage,ipixel,jpixel],val],...]
Nimages=len(images)
print('Number of images ',Nimages)
expandedimages=np.zeros((Nimages,img_rows,img_cols))
for i in range(Nimages):
# print(i,len(images[i]))
for j in range(len(images[i])):
# print(i,j,images[i][j][1])
expandedimages[images[i][j][0][0],images[i][j][0][1],images[i][j][0][2]] = images[i][j][1]
# np.put(startgrid,ind,val)
return expandedimages
##---------------------------------------------------------------------------------------------
#3) We create a tuple of (image array, true value) joining signal and background, and we shuffle it.
def add_sig_bg_true_value(Signal,Background):
print('Creating tuple (data,true value) ...')
print('-----------'*10)
Signal=np.asarray(Signal)
Background=np.asarray(Background)
input_array=[]
true_value=[]
for ijet in range(0,len(Signal)):
input_array.append(Signal[ijet].astype('float32'))
true_value.append(np.array([0]).astype('float32'))
# print('List of arrays for signal = \n {}'.format(input_array))
# print('-----------'*10)
for ijet in range(0,len(Background)):
input_array.append(Background[ijet].astype('float32'))
true_value.append(np.array([1]).astype('float32'))
# print('Joined list of arrays for signal and background = \n {}'.format(input_array))
# print('-----------'*10)
# print('Joined list of true values for signal and background = \n {}'.format(true_value))
# print('-----------'*10)
output=list(zip(input_array,true_value))
#print('Input array for neural network, with format (Input array,true value)= \n {}'.format(output[0][0]))
# for (x,y) in output:
# print('x={}'.format(x))
# print('y={}'.format(y))
# print('-----------'*10)
print('Shuffling tuple (data, true value) ...')
print('-----------'*10)
shuffle_output=np.random.permutation(output)
return shuffle_output
##---------------------------------------------------------------------------------------------
#4) This function loads the zipped tuple of image arrays and true values. It divides the data into train and validation sets. Then we create new arrays with$
def sets(Data):
print('Generating arrays with the correct input format for Keras ...')
print('-----------'*10)
# Ntrain=int(0.8*len(Data))
X=[x for (x,y) in Data]
Y=[y for (x,y) in Data]
# Y_test=[y for (x,y) in Data[Ntrain:]]
# print('X (train+test) before adding [] to each element=\n{}'.format(X))
X=np.asarray(X)
print('Shape X = {}'.format(X.shape))
X_out=X.reshape(X.shape[0],X.shape[1],X.shape[2],1)
print('-----------'*10)
print('Shape X out after adding [] to each element= {}'.format(X_out.shape))
# print('Input arrays X_out after adding [] to each element (middle row)=\n{}'.format(X_out[17][0:37]))
print('-----------'*10)
output_tuple=list(zip(X_out,Y))
# print('Tuple of (array,true value) as input for the cnn =\n {}'.format(output_tuple))
return output_tuple
##---------------------------------------------------------------------------------------------
#5) Get the list with the input images file names
def get_input_array_list(input_array_dir):
# sg_imagelist = [filename for filename in np.sort(os.listdir(input_array_dir)) if filename.startswith('tt_') and 'batch' in filename and 210000>int(filename.split('_')[1])>190000]
# bg_imagelist = [filename for filename in np.sort(os.listdir(input_array_dir)) if filename.startswith('QCD_') and 'batch' in filename and 210000>int(filename.split('_')[1])>190000]
sg_imagelist = [filename for filename in np.sort(os.listdir(input_array_dir)) if filename.startswith('tt_') ] # and 'batch' in filename and 210000>int(filename.split('_')[1])>190000]
bg_imagelist = [filename for filename in np.sort(os.listdir(input_array_dir)) if filename.startswith('QCD_')] # and 'batch' in filename and 210000>int(filename.split('_')[1])>190000]
# N_arrays=len(imagelist)
return sg_imagelist, bg_imagelist
##---------------------------------------------------------------------------------------------
#6) Define a dictionary to identify the training, cross-val and test sets
def load_all_files(array_list):
dict={}
for index in range(len(array_list)):
dict[index]=load_array(array_list[index])
print('Dict {} lenght = {}'.format(index,len(dict[index])))
return dict
##---------------------------------------------------------------------------------------------
#7) Cut the number of images in the sample when necessary
def cut_sample(data_tuple, sample_relative_size):
print('-----------'*10)
print(data_tuple.shape, 'Input array sample shape before cut')
print('-----------'*10)
N_max= int(sample_relative_size*len(data_tuple))
out_array= data_tuple[0:N_max]
print(out_array.shape, 'Input array sample shape after cut')
print('-----------'*10)
return out_array
##---------------------------------------------------------------------------------------------
#8) Split the sample into train, cross-validation and test
def split_sample(data_tuple, train_frac_rel, val_frac_rel, test_frac_rel):
val_frac_rel=train_frac_rel+val_frac_rel
test_frac_rel =(val_frac_rel+test_frac_rel)
train_frac=train_frac_rel
val_frac=val_frac_rel
test_frac=test_frac_rel
N_train=int(train_frac*len(data_tuple))
Nval=int(val_frac*len(data_tuple))
Ntest=int(test_frac*len(data_tuple))
x_train=[x for (x,y) in data_tuple[0:N_train]]
Y_train=[y for (x,y) in data_tuple[0:N_train]]
x_val=[x for (x,y) in data_tuple[N_train:Nval]]
Y_val=[y for (x,y) in data_tuple[N_train:Nval]]
x_test=[x for (x,y) in data_tuple[Nval:Ntest]]
Y_test=[y for (x,y) in data_tuple[Nval:Ntest]]
##---------------------------------------------------------------------------------------------
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(Y_train, num_classes)
y_val = keras.utils.to_categorical(Y_val, num_classes)
y_test = keras.utils.to_categorical(Y_test, num_classes)
##---------------------------------------------------------------------------------------------
# Define input data format as Numpy arrays
x_train = np.array(x_train)
y_train = np.array(y_train)
x_val = np.array(x_val)
y_val = np.array(y_val)
x_test = np.array(x_test)
y_test = np.array(y_test)
# x_train = x_train.astype('float32')
# print('x_train = \n {}'.format(x_train))
# print('x_train shape:', x_train[0].shape)
print('-----------'*10)
print(len(x_train), 'train samples ('+str(train_frac*100)+'% of the set)')
print(len(x_val), 'validation samples ('+str((val_frac-train_frac)*100)+'% of the set)')
print(len(x_test), 'test samples ('+str(100-(val_frac)*100)+'% of the set)')
print('-----------'*10)
print(x_train.shape, 'train sample shape') # train_x.shape should be (batch or number of samples, height, width, channels), where channels is 1 for gray scale and 3 for RGB pictures
print('-----------'*10)
print('-----------'*10)
# print('y_train=\n {}'.format(y_train))
# print('y_test=\n {}'.format(y_test))
return x_train, y_train, x_val, y_val, x_test, y_test#, N_train
##---------------------------------------------------------------------------------------------
#9) Concatenate arrays into a single set (i.e. cross-val or test) when multiple files are loaded
def concatenate_arrays(array_list, label_sg_bg, label):
if label_sg_bg=='sg' and label=='val':
temp_array=my_dict_val_sg[0]
elif label_sg_bg=='bg' and label=='val':
temp_array=my_dict_val_bg[0]
elif label_sg_bg=='sg' and label=='test':
temp_array=my_dict_test_sg[0]
elif label_sg_bg=='bg' and label=='test':
temp_array=my_dict_test_bg[0]
else:
print('Please specify the right labels')
# temp_array=load_array(array_list[0])
temp_array = cut_sample(temp_array, sample_relative_size)
# temp_array = expand_array(temp_array)
for index in range(len(array_list[1::])):
new_index=index+1
if label_sg_bg=='sg' and label=='val':
single_array=my_dict_val_sg[new_index]
elif label_sg_bg=='bg' and label=='val':
single_array=my_dict_val_bg[new_index]
elif label_sg_bg=='sg' and label=='test':
single_array=my_dict_test_sg[new_index]
elif label_sg_bg=='bg' and label=='test':
single_array=my_dict_test_bg[new_index]
else:
print('Please specify the right labels')
# single_array= load_array(i_file)
single_array = cut_sample(single_array, sample_relative_size)
# single_array=expand_array(single_array)
elapsed=time.time()-start_time
print('images expanded')
print('elapsed time',elapsed)
temp_array=np.concatenate((temp_array,single_array), axis=0)
return temp_array
##---------------------------------------------------------------------------------------------
#10) Create the validation and test sets
def generate_input_sets(sg_files, bg_files,train_frac_rel, in_val_frac_rel,in_test_frac_rel, set_label):
print('Generates batches of samples for {}'.format(sg_files))
# Infinite loop
print('len(sg_files)=',len(sg_files))
indexes = np.arange(len(sg_files))
print('-----------'*10)
print( 'indexes =',indexes)
print('-----------'*10)
# signal= load_array(sg_files[0])
# background = load_array(bg_files[0])
signal = concatenate_arrays(sg_files,'sg',set_label)
background = concatenate_arrays(bg_files,'bg',set_label)
print(signal.shape, 'signal sample shape') # train_x.shape should be (batch or number of samples, height, width, channels), where channels is 1 for gray scale and 3 for RGB pictures
print(background.shape, 'background sample shape') # train_x.shape should be (batch or number of samples, height, width, channels), where channels is 1 for gray scale and 3 for RGB pictures
data_in= add_sig_bg_true_value(signal,background)
data_tuple = sets(data_in)
x_train1, y_train1, x_val1, y_val1, x_test1, y_test1 = split_sample(data_tuple, train_frac_rel, in_val_frac_rel,in_test_frac_rel)
if set_label=='train':
print('-----------'*10)
print('Using training dataset')
print('-----------'*10)
return x_train1, y_train1
elif set_label == 'val':
print('-----------'*10)
print('Using validation dataset')
print('-----------'*10)
return x_val1, y_val1
elif set_label=='test':
return x_test1, y_test1
##=============================================================================================
############ DATA GENERATOR CLASS TO GENERATE THE BATCHES FOR TRAINING
##=============================================================================================
# (We create this class because the sample size is larger than the memory of the system)
# The data generator is just that a generator that has no idea how the data it generates is going to be used and at what epoch. It should just keep generating batches of data forever as needed.
class DataGenerator(object):
print('Generates data for Keras')
def __init__(self, dim_x = img_rows, dim_y = img_cols, batch_size = my_batch_size, shuffle = False):
# 'Initialization'
self.dim_x = dim_x
self.dim_y = dim_y
self.batch_size = batch_size
self.shuffle = shuffle
def generate(self, sg_files, bg_files,train_frac_rel, in_val_frac_rel,in_test_frac_rel, set_label):
print('Generates batches of samples for {}'.format(sg_files))
# Infinite loop
# print('len(sg_files)=',len(sg_files))
while True:
indexes = np.arange(len(sg_files))
print('len(sg_files)= ',len(sg_files))
for index in indexes:
name_sg=str('_'.join(sg_files[index].split('_')[:2]))
name_bg=str('_'.join(bg_files[index].split('_')[:-1]))
in_tuple=name_sg+'_'+name_bg
# print('Name signal ={}'.format(name_sg))
# print('Name background={}'.format(name_bg))
# print('-----------'*10)
signal= my_dict_train_sg[index]
background = my_dict_train_bg[index]
# signal= load_array(sg_files[index])
# background = load_array(bg_files[index])
signal = cut_sample(signal, sample_relative_size)
background = cut_sample(background, sample_relative_size)
# signal=expand_array(signal)
# background=expand_array(background)
elapsed=time.time()-start_time
print('images expanded')
print('elapsed time',elapsed)
data_in= add_sig_bg_true_value(signal,background)
data_tuple = sets(data_in)
x_train1, y_train1, x_val1, y_val1, x_test1, y_test1 = split_sample(data_tuple, train_frac_rel, in_val_frac_rel,in_test_frac_rel)
subindex= np.arange(len(x_train1))
print('len(x_train1[{}])= {}'.format(index,len(x_train1)))
imax = int(len(subindex)/self.batch_size)
print('imax =',imax)
print('\n'+'-----------'*10)
print('////////////'*10)
for i in range(imax):
if set_label=='train':
# x_train_temp = [x_train1[k] for k in subindex[i*self.batch_size:(i+1)*self.batch_size]]
# y_train_temp = [y_train1[k] for k in subindex[i*self.batch_size:(i+1)*self.batch_size]]
x_train_temp = x_train1[i*self.batch_size:(i+1)*self.batch_size]
y_train_temp = y_train1[i*self.batch_size:(i+1)*self.batch_size]
# print(x_train_temp.shape, 'x_train_temp sample shape') # train_x.shape should be (batch or number of samples, height, width, channels), where channels is 1 for gray scale and 3 for RGB pictures
# print('-----------'*10)
# print('Using training dataset')
# print('-----------'*10)
yield x_train_temp, y_train_temp
elif set_label == 'val':
# x_val_temp = [x_val1[k] for k in subindex[i*self.batch_size:(i+1)*self.batch_size]]
# y_val_temp = [y_val1[k] for k in subindex[i*self.batch_size:(i+1)*self.batch_size]]
x_val_temp = x_val1[i*self.batch_size:(i+1)*self.batch_size]
y_val_temp = y_val1[i*self.batch_size:(i+1)*self.batch_size]
print('-----------'*10)
print('Using validation dataset')
print('-----------'*10)
yield x_val_temp, y_val_temp
elif set_label=='test':
yield x_test1, y_test1
##=============================================================================================
############ LOAD AND CREATE THE TRAINING, CROSS-VAL AND TEST SETS
##=============================================================================================
signal_array_list,background_array_list = get_input_array_list(large_set_dir)
#----------------------------------------------------------------------------------------------------
train_signal_array_list = signal_array_list[0:-4]
train_background_array_list = background_array_list[0:-4]
print('-----------'*10)
print('-----------'*10)
print('train_signal_array_list=',train_signal_array_list)
print('-----------'*10)
print('train_bg_array_list=',train_background_array_list)
print('-----------'*10)
total_images=0
for i_file in range(len(train_signal_array_list)):
steps_file=load_array(train_signal_array_list[i_file])
total_images+=len(steps_file)
# If the fraction from my input files for tratining is different from (train_frac, val_frac, test_frac)=(1,0,0), then also multiply Ntrain*train_frac
Ntrain=2*total_images*sample_relative_size
print('Ntrain',Ntrain)
val_signal_array_list = signal_array_list[-4:-2]
val_background_array_list = background_array_list[-4:-2]
test_signal_array_list = signal_array_list[-2::]
test_background_array_list = background_array_list[-2::]
print('-----------'*10)
print('val_signal_array_list=',val_signal_array_list)
print('-----------'*10)
print('val_bg_array_list=',val_background_array_list)
print('-----------'*10)
print('-----------'*10)
print('test_signal_array_list=',test_signal_array_list)
print('-----------'*10)
print('test_bg_array_list=',test_background_array_list)
print('-----------'*10)
##---------------------------------------------------------------------------------------------
# Load all the files to the dictionary
my_dict_train_sg=load_all_files(train_signal_array_list)
my_dict_train_bg=load_all_files(train_background_array_list)
my_dict_val_sg=load_all_files(val_signal_array_list)
my_dict_val_bg=load_all_files(val_background_array_list)
my_dict_test_sg=load_all_files(test_signal_array_list)
my_dict_test_bg=load_all_files(test_background_array_list)
##=============================================================================================
############ DEFINE THE NEURAL NETWORK ARCHITECTURE AND IMPLEMETATION
##=============================================================================================
input_shape = (img_rows, img_cols,1)
model = Sequential()
convin1=Conv2D(32, kernel_size=(4, 4),
activation='relu',
input_shape=input_shape)
model.add(convin1)
convout1 = MaxPooling2D(pool_size=(2, 2))
model.add(convout1)
model.add(Conv2D(64, (4, 4), activation='relu'))
convout2=MaxPooling2D(pool_size=(2, 2))
model.add(convout2)
model.add(Dropout(0.25))
model.add(Conv2D(64, (2, 2), activation='relu'))
convout3=MaxPooling2D(pool_size=(2, 2))
model.add(convout3)
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
##---------------------------------------------------------------------------------------------
# LOSS FUNCTION - OPTIMIZER
# We define the loss/cost function and the optimizer to reach the minimum (e.g. gradient descent, adadelta, etc).
#a) For loss=keras.losses.categorical_crossentropy, we need to get the true values in the form of vectors of 0 and 1: y_train = keras.utils.to_categorical(y_train, num_classes)
#b) Use metrics=['accuracy'] for classification problems
#1) Adadelta
Adadelta=keras.optimizers.Adadelta(lr=learning_rate[0], rho=0.95, epsilon=1e-08, decay=0.0)
#2) Adam
Adam=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
#3) Sigmoid gradient descent: the convergence is much slower than with Adadelta
sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=Adadelta,
metrics=['categorical_accuracy'])
##---------------------------------------------------------------------------------------------
# FUNCTIONS TO ADJUST THE LEARNING RATE
# We write functons to divide by 2 the learning rate when the validation loss (val_loss) does not improve within some treshold
# Get the validation losses after each epoch
sd=[]
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = [1,1] #Initial value of the val loss function
def on_epoch_end(self, epoch, logs={}):
self.losses.append(logs.get('val_loss')) # We append the val loss of the last epoch to losses
sd.append(step_decay(len(self.losses))) # We run step_decay to determine if we update the learning rate
# print('lr:', step_decay(len(self.losses)))
##-----------------------------
# Take the difference between the last 2 val_loss and divide the learning rate by sqrt(2) when it does not improve. Both requirements should be satisfied:
#1) loss[-2]-loss[-1]<0.0005
#2) loss[-2]-loss[-1]< loss[-1]/3
def step_decay(losses):
# if float(np.array(history.losses[-2])-np.array(history.losses[-1]))<0.0005 and
if float(np.array(history.losses[-2])-np.array(history.losses[-1]))<0.0001 and float(np.array(history.losses[-2])-np.array(history.losses[-1]))< np.array(history.losses[-1])/3:
print('\n loss[-2] = ',np.array(history.losses[-2]))
print('\n loss[-1] = ',np.array(history.losses[-1]))
print('\n loss[-2] - loss[-1] = ',float(np.array(history.losses[-2])-np.array(history.losses[-1])))
lrate=learning_rate[-1]/np.sqrt(2)
learning_rate.append(lrate)
else:
lrate=learning_rate[-1]
print('\n Learning rate =',lrate)
print('------------'*10)
return lrate
##-----------------------------
history=LossHistory() #We define the class history that will have the val loss values
# Get val_loss for each epoch. This is called at the end of each epoch and it will append the new value of the val_loss to the list 'losses'.
lrate=keras.callbacks.LearningRateScheduler(step_decay) # Get new learning rate
early_stop=keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.002, patience=4, verbose=0, mode='auto')
# patience=4 means that if there is no improvement in the cross-validation accuracy greater that 0.002 within the following 3 epochs, then it stops
##=============================================================================================
############ TRAIN THE MODEL (OR LOAD TRAINED WEIGHTS)
##=============================================================================================
#Make folder to save weights
weights_dir = 'weights/'
#os.system("rm -rf "+executedir)
os.system("mkdir -p "+weights_dir)
if mode=='notrain':
my_weights=sys.argv[6]
WEIGHTS_FNAME=weights_dir+my_weights
if True and os.path.exists(WEIGHTS_FNAME):
# Just change the True to false to force re-training
print('Loading existing weights')
print('------------'*10)
model.load_weights(WEIGHTS_FNAME)
else:
print('Please specify a weights file to upload')
elif mode=='train':
# We add history and lrate as callbacks. A callback is a set of functions to be applied at given stages of the training procedure. You can use callbacks to get a view on internal states and statistics of the model during training. You can pass a list of callbacks (as the keyword argument callbacks) to the .fit() method of the Sequential or Model classes. The relevant methods of the callbacks will then be called at each stage of the training.
# my_weights_name='cnn_weights_epochs_'+str(epochs)+'_Ntrain_'+str(Ntrain)+'_'+in_tuple+extra_label+'.hdf'
my_weights_name='cnn_weights_epochs_'+str(epochs)+'_Ntrain_'+str(Ntrain)+'_'+extra_label+'.hdf'
#Load weights and continue with training
if(len(sys.argv)>6):
my_weights=sys.argv[6]
WEIGHTS_FNAME=weights_dir+my_weights
if True and os.path.exists(WEIGHTS_FNAME):
# Just change the True to false to force re-training
print('Loading existing weights')
print('------------'*10)
model.load_weights(WEIGHTS_FNAME)
previous_epoch=int('_'.join(my_weights.split('_')[3]))
# my_weights_name='cnn_weights_epochs_'+str(epochs+previous_epoch)+'_Ntrain_'+str(Ntrain)+'_'+in_tuple+extra_label+'.hdf'
my_weights_name='cnn_weights_epochs_'+str(epochs+previous_epoch)+'_Ntrain_'+str(Ntrain)+'_'+extra_label+'.hdf'
##-----------------------------
# Create training and cross-validation sets
train_x_train_y = DataGenerator().generate(train_signal_array_list, train_background_array_list, 1.0,0.0,0.0, 'train')
# val_x_val_y = DataGenerator().generate(val_signal_array_list, val_background_array_list, 0.0,1.0,0.0, 'val')
val_x, val_y = generate_input_sets(val_signal_array_list, val_background_array_list, 0.0,1.0,0.0, 'val')
print('total_images =',total_images)
my_steps_per_epoch= int(2*total_images*sample_relative_size/my_batch_size)
print('my_steps_per_epoch =',my_steps_per_epoch)
my_max_q_size=my_steps_per_epoch/6
##-----------------------------
# Run Keras training routine
model.fit_generator(generator = train_x_train_y,
steps_per_epoch = my_steps_per_epoch, #This is the number of files that we use to train in each epoch
epochs=epochs,
verbose=2,
validation_data =(val_x, val_y)
,max_q_size=my_max_q_size # defaults to 10
,callbacks=[history,lrate,early_stop]
)
WEIGHTS_FNAME = weights_dir+my_weights_name
print('------------'*10)
print('Weights filename =',WEIGHTS_FNAME)
print('------------'*10)
# We save the trained weights
model.save_weights(WEIGHTS_FNAME, overwrite=True)
else:
print('Please specify a valid mode')
print('------------'*10)
##-----------------------------
# Create the test set and evaluate the model
test_x, test_y = generate_input_sets(test_signal_array_list, test_background_array_list, 0.0,0.0,1.0, 'test')
score = model.evaluate(test_x, test_y, verbose=0)
print('Test loss = ', score[0])
print('Test accuracy = ', score[1])
print('------------'*10)
print('All learning rates = ',learning_rate)
print('------------'*10)
# sys.exit()
##=============================================================================================
##=============================================================================================
########################### ANALYZE RESULTS ####################################
##=============================================================================================
##=============================================================================================
##=============================================================================================
############ LOAD LIBRARIES
##=============================================================================================
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
##=============================================================================================
############ GLOBAL VARIABLES
##=============================================================================================
N_out_layer0=1
N_out_layer1=32
N_out_layer2=64
##-------------------------------
name_sg=str('_'.join(signal_array_list[0].split('_')[:2]))
name_bg=str('_'.join(background_array_list[0].split('_')[:-1]))
in_tuple=name_sg+'_'+name_bg
print('------------'*10)
print('------------'*10)
print('in_tuple = ',in_tuple)
print('------------'*10)
name='_'.join(in_tuple.split('_')[:4])+'_pTj_'+'_'.join(in_tuple.split('_')[-3:-1])
# print(in_tuple.split('_'))
print('Name of dir with weights and output layer images=',name)
##-------------------------------
# Create directorires
os.system('mkdir -p analysis/')
os.system('mkdir -p analysis/outlayer_plots/')
os.system('mkdir -p analysis/weight_plots/')
##=============================================================================================
############ PREDICT OUTPUT PROBABILITIES
##=============================================================================================
# Predict output probability for each class (signal or background) for the image
Y_Pred_prob = model.predict(test_x)
print('y_Test (categorical). This is a vector of zeros with a one in the position of the image class =\n ',test_y[0:15])
# Convert vector of 1 and 0 to index
y_Pred = np.argmax(Y_Pred_prob, axis=1)
y_Test = np.argmax(test_y, axis=1)
print('Predicted output from the CNN (0 is signal and 1 is background) = \n',y_Pred[0:15])
print('y_Test (True value) =\n ',y_Test[0:15])
print('y_Test lenght', len(y_Test))
print('------------'*10)
#Print classification report
print(classification_report(y_Test, y_Pred))
print('------------'*10)
##---------------------------------------------------------------------------------------------
# We calculate a single probability of tagging the image as signal
out_prob=[]
for i_prob in range(len(Y_Pred_prob)):
out_prob.append((Y_Pred_prob[i_prob][0]-Y_Pred_prob[i_prob][1]+1)/2)
print('Predicted probability of each output neuron = \n',Y_Pred_prob[0:15])
print('------------'*10)
print('Output of tagging image as signal = \n',np.array(out_prob)[0:15])
print('------------'*10)
##----------------------------------------------------
#Make folder to save output probability and true values
outprob_dir = 'analysis/out_prob/'
#os.system("rm -rf "+executedir)
os.system("mkdir -p "+outprob_dir)
## SAVE OUTPUT PROBABILITIES AND TRUE VALUES
# np.save(outprob_dir+'out_prob_'+in_tuple,out_prob)
# np.save(outprob_dir+'true_value_'+in_tuple,y_Test)
print('Output probabilitiy filename = {}'.format(outprob_dir+'out_prob_'+in_tuple))
print('True value filename = {}'.format(outprob_dir+'true_value_'+in_tuple))
##=============================================================================================
############ Analysis over the images in the "mistag range"
# (images with prob between min_prob and max_prob)
##=============================================================================================
##------------------------------------------------------------------------------------------
# 1) Get probability for signal and background sets to be tagged as signal
# 2) Get index of signal and bg images with a prob of being signal in some specific range
# y_Test is the true value and out_prob the predicted probability of the image to be signal
sig_prob=[] #Values of the precicted probability that are labeled as signal in the true value array
bg_prob=[] #Values of the precicted probability that are labeled as bg in the true value array
sig_idx=[]
bg_idx=[]
for i_label in range(len(y_Test)):
if y_Test[i_label]==0: #signal label
sig_prob.append(out_prob[i_label])
if min_prob<out_prob[i_label]<max_prob:
sig_idx.append(i_label)
elif y_Test[i_label]==1: #bg label
bg_prob.append(out_prob[i_label])
if min_prob<out_prob[i_label]<max_prob:
bg_idx.append(i_label)
print('-----------'*10)
print('-----------'*10)
print('Predicted probability (images labeled as signal) = \n',sig_prob[0:15])
print('-----------'*10)
print('Predicted probability (images labeled as background) =\n ',bg_prob[0:15])
print('-----------'*10)
##--------------------------
# Get the array of bg and signal images with a prob of being signal within some specific range
sig_images=[]
bg_images=[]
sig_label=[]
bg_label=[]
for index in sig_idx:
sig_images.append(test_x[index])
sig_label.append(y_Test[index])
for index in bg_idx:
bg_images.append(test_x[index])
bg_label.append(y_Test[index])
sig_images=np.asarray(sig_images)
bg_images=np.asarray(bg_images)
print('-----------'*10)
print('Number of signal images in the slice between %s and %s = %i' %(str(min_prob), str(max_prob),len(sig_images)))
print('-----------'*10)
print('Number of background images in the slice between %s and %s = %i' %(str(min_prob), str(max_prob),len(bg_images)))
print('-----------'*10)
print('-----------'*10)
print('Signal images with a prob between %s and %s label (1st 10 values) = \n %a' % (str(min_prob), str(max_prob),sig_label[0:10]))
print('-----------'*10)
print('Background images with a prob between %s and %s label (1st 10 values) = \n %a' % (str(min_prob), str(max_prob),bg_label[0:10]))
print('-----------'*10)
##=============================================================================================
############ PLOT HISTOGRAM OF SIG AND BG EVENTS DEPENDING ON THEIR PROBABILITY OF BEING TAGGED AS SIGNAL
##=============================================================================================
#Make folder to save plots
outprob_dir = 'analysis/out_prob/'
#os.system("rm -rf "+executedir)
os.system("mkdir -p "+outprob_dir)
# Histogram function
def make_hist(in_sig_prob,in_bg_prob,name):
# the histogram of the data
# n, bins, patches = plt.hist(sig_prob, my_bins, facecolor='red')
# n, bins, patches = plt.hist(bg_prob, my_bins, facecolor='blue')
plt.hist(in_sig_prob, my_bins, alpha=0.5, facecolor='red')
plt.hist(in_bg_prob, my_bins, alpha=0.5, facecolor='blue')
red_patch = mpatches.Patch(color='red', label='True value = top jet')
blue_patch = mpatches.Patch(color='blue', label='True value = qcd jet')
plt.legend(handles=[red_patch,blue_patch],bbox_to_anchor=(1, 1),
bbox_transform=plt.gcf().transFigure)
# plt.legend(handles=[red_patch,blue_patch])
# plt.legend(handles=[blue_patch])
# add a 'best fit' line
# y = mlab.normpdf( bins, mu, sigma)
# l = plt.plot(bins, y, 'r--', linewidth=1)
plt.xlabel('CNN output probability')
plt.ylabel('Number of jets')
# plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
# plt.axis([40, 160, 0, 0.03])
plt.grid(True)
# plt.show()
fig = plt.gcf()
plot_FNAME = 'Hist_'+name+in_tuple+'.png'
print('------------'*10)
print('Hist plot name = ',plot_FNAME)
print('------------'*10)
plt.savefig(outprob_dir+plot_FNAME)
##-------------------------
# Plot the histogram
# make_hist(sig_prob,bg_prob, '_all_set')
# sys.exit()
##=============================================================================================
############ PLOT ROC CURVE
##=============================================================================================
#Make folder to save plots
ROC_plots_dir = 'analysis/ROC/'
#os.system("rm -rf "+executedir)
os.system("mkdir -p "+ROC_plots_dir)
ROC_plots_dir2 = 'analysis/ROC/'+str(in_tuple)+'/'
os.system("mkdir -p "+ROC_plots_dir2)
# Make ROC with area under the curve plot
def generate_results(y_test, y_score):
#I modified from pos_label=1 to pos_label=0 because I found out that in my code signal is labeled as 0 and bg as 1
fpr, tpr, thresholds = roc_curve(y_test, y_score,pos_label=0, drop_intermediate=False)
print('Thresholds[0:6] = \n',thresholds[:6])
print('Thresholds lenght = \n',len(thresholds))
print('fpr lenght',len(fpr))
print('tpr lenght',len(tpr))
print('------------'*10)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='red',label='Train epochs = '+str(epochs)+'\n ROC curve (area = %0.2f)' % roc_auc)
#plt.plot(fpr[2], tpr[2], color='red',
# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
#plt.plot([0, 1], [0, 1], 'k--')
plt.xscale('log')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.xlabel('Mistag Rate (False Positive Rate)')
plt.ylabel('Signal Tag Efficiency (True Positive Rate)')
plt.legend(loc="lower right")
#plt.title('Receiver operating characteristic curve')
# plt.show()
plt.grid(True)
fig = plt.gcf()
label=''
plot_FNAME = 'ROC_'+str(epochs)+'_'+in_tuple+label+'.png'
plt.savefig(ROC_plots_dir2+plot_FNAME)
ROC_FNAME = 'ROC_'+str(epochs)+'_'+in_tuple+label+'_Ntrain_'+str(Ntrain)+'.npy'
np.save(ROC_plots_dir2+'fpr_'+str(sample_relative_size)+'_'+ROC_FNAME,fpr)
np.save(ROC_plots_dir2+'tpr_'+str(sample_relative_size)+'_'+ROC_FNAME,tpr)
print('ROC filename = {}'.format(ROC_plots_dir2+plot_FNAME))
print('AUC =', np.float128(roc_auc))
print('------------'*10)
generate_results(y_Test, out_prob)
# sys.exit()
##=============================================================================================
############ VISUALIZE CONVOLUTION RESULT
##=============================================================================================
##---------------------------------------------------------------------------------------------
# Utility functions
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy.ma as ma
def nice_imshow(ax, data, vmin=None, vmax=None, cmap=None):
"""Wrapper around pl.imshow"""
if cmap is None:
cmap = cm.jet
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
im = ax.imshow(data, vmin=vmin, vmax=vmax, interpolation='nearest', cmap=cmap)
pl.colorbar(im, cax=cax)
def make_mosaic(imgs, nrows, ncols, border=1):
"""
Given a set of images with all the same shape, makes a
mosaic with nrows and ncols
"""
nimgs = imgs.shape[0]
imshape = imgs.shape[1:]
mosaic = ma.masked_all((nrows * imshape[0] + (nrows - 1) * border,
ncols * imshape[1] + (ncols - 1) * border),
dtype=np.float32)
paddedh = imshape[0] + border
paddedw = imshape[1] + border
for i in range(nimgs):
row = int(np.floor(i / ncols))
col = i % ncols
mosaic[row * paddedh:row * paddedh + imshape[0],
col * paddedw:col * paddedw + imshape[1]] = imgs[i]
return mosaic
#pl.imshow(make_mosaic(np.random.random((9, 10, 10)), 3, 3, border=1))
##---------------------------------------------------------------------------------------------
# Get and plot the average of each intermediate convolutional layer
##---------------------------------------------------------------------------------------------
#Split input image arrays into signal and background ones
x_test_sig=[]
x_test_bg=[]
# y_Test is the true value
n_sig=0
n_bg=0
for i_image in range(len(y_Test)):
if y_Test[i_image]==0:
x_test_sig.append(test_x[i_image])
n_sig+=1
elif y_Test[i_image]==1:
x_test_bg.append(test_x[i_image])
n_bg+=1
print('Lenght x_test signal = {} and number of signal samples = {}'.format(len(x_test_sig),n_sig))
print('Lenght x_test background {} and number of background samples = {}'.format(len(x_test_bg),n_bg))
# K.learning_phase() is a flag that indicates if the network is in training or
# predict phase. It allows layer (e.g. Dropout) to only be applied during training
inputs = [K.learning_phase()] + model.inputs
_convout1_f = K.function(inputs, [convout1.output])
_convout2_f = K.function(inputs, [convout2.output])
_convout3_f = K.function(inputs, [convout3.output])
# def convout1_f(X):
# # The [0] is to disable the training phase flag
# return _convout1_f([0] + [X])
# i = 3000
# Visualize the first layer of convolutions on an input image
# X = x_test[i:i+1]
# outlayer_plots_dir = 'analysis/outlayer_plots/'+name+'_epochs_'+str(epochs)+'/'
outlayer_plots_dir = 'analysis/outlayer_plots/'+'_'.join(in_tuple.split('_')[:-1])+'/'
#os.system("rm -rf "+executedir)
os.system("mkdir -p "+outlayer_plots_dir)
def get_output_layer_avg(x_test,func,layer):
if layer==1:
avg_conv=np.zeros((32,17,17))
elif layer==2:
avg_conv=np.zeros((64,7,7))
elif layer==3:
avg_conv=np.zeros((64,3,3))
# print("avg image shape = ", np.shape(avg_conv)) #create an array of zeros for the image
for i_layer in range(len(x_test)):
X = [x_test[i_layer]]
# The [0] is to disable the training phase flag
Conv = func([0] + [X])
# print('Conv_array type = ',type(Conv))
Conv=np.asarray(Conv)
# print('New type Conv_array type = ',type(Conv))
# print('Conv = \n',Conv[0:2])
# print("First convolutional output layer shape before swapaxes = ", np.shape(Conv))
Conv = np.squeeze(Conv) #Remove single-dimensional entries from the shape of an array.
Conv=np.swapaxes(Conv,0,2) #Interchange two axes of an array.
Conv=np.swapaxes(Conv,1,2)
# print('Con[0]= \n',Conv[0])
# print("First convolutional output layer shape after swap axes = ", np.shape(Conv))
# print('-----------'*10)
# print('avg_conv=\n',avg_conv[0:2])
avg_conv=avg_conv+Conv
print("avg image shape after adding all images = ", np.shape(avg_conv)) #create an array of zeros for the image
return avg_conv
def plot_output_layer_avg(func,n_im,layer,type):
pl.figure(figsize=(15, 15))
plt.axis('off')
# pl.suptitle('convout1b')
nice_imshow(pl.gca(), make_mosaic(func, n_im,8), cmap=cm.gnuplot)
# return Conv
# plt.show()
fig = plt.gcf()
plot_FNAME = 'avg_image_layer_'+str(layer)+'_'+str(type)+'_'+'_'.join(in_tuple.split('_')[4:-1])+'.png'
# plot_FNAME = 'layer_'+str(layer)+'_img_'+str(i_im)+'_epochs_'+str(epochs)+'_'+in_tuple[:-4]+'.png'
print('Saving average image for layer {} ...'.format(layer))
print('-----------'*10)
plt.savefig(outlayer_plots_dir+plot_FNAME)
print('Output layer filename = {}'.format(outlayer_plots_dir+plot_FNAME))
# print('Name sig','_'.join(in_tuple.split('_')[4:-1]))
# avg_conv_array_sig=get_output_layer_avg(_convout2_f,2)
# avg_conv_array_sig1=get_output_layer_avg(x_test_sig,_convout1_f,1)
# avg_conv_array_bg1=get_output_layer_avg(x_test_bg,_convout1_f,1)
# avg_conv_array_sig2=get_output_layer_avg(x_test_sig,_convout2_f,2)
# avg_conv_array_bg2=get_output_layer_avg(x_test_bg,_convout2_f,2)
avg_conv_array_sig3=get_output_layer_avg(x_test_sig,_convout3_f,3)
avg_conv_array_bg3=get_output_layer_avg(x_test_bg,_convout3_f,3)
# plot_output_layer_avg(avg_conv_array_sig1,4,1,'tt')
# plot_output_layer_avg(avg_conv_array_bg1,4,1,'QCD')
# plot_output_layer_avg(avg_conv_array_sig2,8,2,'tt')
# plot_output_layer_avg(avg_conv_array_bg2,8,2,'QCD')
plot_output_layer_avg(avg_conv_array_sig3,8,3,'tt')
plot_output_layer_avg(avg_conv_array_bg3,8,3,'QCD')
# sys.exit()
##=============================================================================================
############ VISUALIZE WEIGHTS
##=============================================================================================
W1 = model.layers[0].kernel
W2 = model.layers[2].kernel
W3 = model.layers[5].kernel
# all_W=[]
# for i_weight in range(3):
# all_W.append(model.layers[i_weight].kernel)
# W is a tensorflow variable: type W = <class 'tensorflow.python.ops.variables.Variable'>. We want to transform it to a numpy array to plot the weights
# print('type W1 = ',type(W1))
print('------------'*10)
import tensorflow as tf
# sess = tf.Session()
# # from keras import backend as K
# K.set_session(sess)
# weightmodel = tf.global_variables_initializer()
##---------------------------------------------------------------------------------------------
# Transform tensorflow Variable to a numpy array to plot the weights
def tf_to_np(weight):
print('Type weight_array before opening a tensorflow session = ',type(weight))
sess = tf.Session()
# from keras import backend as K
K.set_session(sess)
weightmodel = tf.global_variables_initializer()
with sess:
sess.run(weightmodel)
weight_array=sess.run(weight)
print('Type weight_array = ',type(weight_array))
print('Shape weight_array before swapaxes = ',np.shape(weight_array))
# weight_array=np.squeeze(weight_array)
weight_array=np.swapaxes(weight_array,0,2)
weight_array=np.swapaxes(weight_array,1,2)
weight_array=np.asarray(weight_array)
print('Shape weight_array after swapaxes = ',np.shape(weight_array))
print('Shape weight_array after swapaxes[0] = ',np.shape(weight_array)[0])
# print('Weight_aray = ',weight_array)
return weight_array
# all_W_np=[]
# all_W_np.append(tf_to_np(W1))
# all_W_np.append(tf_to_np(W2))
# all_W_np.append(tf_to_np(W3))
##---------------------------------------------------------------------------------------------
# Plot the weights
weight_plots_dir = 'analysis/weight_plots/'+name+'_epochs_'+str(epochs)+'/'
#os.system("rm -rf "+executedir)
os.system("mkdir -p "+weight_plots_dir)
# N_map=0
def plot_2nd_3d_layer(ww,N_out_layer,n_weight,n_row):
wout=tf_to_np(ww)
# if n_weight==2 or n_weight==3:
wout=wout[N_out_layer]
wout=np.swapaxes(wout,0,2)
wout=np.swapaxes(wout,1,2)
pl.figure(figsize=(15, 15))
plt.axis('off')
nice_imshow(pl.gca(), make_mosaic(wout, n_row, 8), cmap=cm.gnuplot)
fig = plt.gcf()
plot_FNAME = 'weights_'+str(n_weight)+'_epochs_'+str(epochs)+'_N_out_layer_'+str(N_out_layer)+'_'+in_tuple[:-4]+'.png'
# plt.savefig(weight_plots_dir+plot_FNAME)
print('Weights filename = {}'.format(weight_plots_dir+plot_FNAME))
print('------------'*10)
for N_map in range(N_out_layer0):
plot_2nd_3d_layer(W1,N_map,1,4)
# sys.exit()
for N_map in range(N_out_layer1):
plot_2nd_3d_layer(W2,N_map,2,8)
for N_map in range(N_out_layer2):
plot_2nd_3d_layer(W3,N_map,3,8)
##=============================================================================================
##=============================================================================================
##=============================================================================================
# Code execution time
print('-----------'*10)
print("Code execution time = %s minutes" % ((time.time() - start_time)/60))
print('-----------'*10)
|
25,111 | bb93daa79b6ae4f0c67b77590f334d669b59ba8a | from abc import ABCMeta
from abc import abstractmethod
# simple Abastract Base Class demo (Not exactly what I wnat....
# anyway......
class Foo(object):
def oop_trash(self):
return "agreement from Foo"
class LayerMeta(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
raise NotImplementedError
@abstractmethod
def __getitem__(self):
raise NotImplementedError
def getitem(self, index):
return self.__getitem__(index)
@classmethod
def __subclasshook__(cls, C):
if cls is LayerMeta:
if any("oop_trash" in base.__dict__ for base in C.__mro__):
return True
return NotImplementedError
LayerMeta.register(Foo) # Now the LayerMeta has attribute
class LayerFoo(LayerMeta):
def __init__(self):
print "OOP sucks"
def __getitem__(self, index):
return "Foo is Here {0}".format(index)
def oop_trash(self):
return False
if __name__ == "__main__":
layer, n_vis, n_hidden = [None] * 3
print dir(LayerMeta)
assert issubclass(Foo, LayerMeta)
m = LayerFoo()
print m.oop_trash()
# m = LayerMeta([19, 12])
# print LayerMeta()(10, 10)
# print dir(LayerMeta())
# print tuple.__mro__
# print dir(iter(range(10)))
# print dir(tuple)
|
25,112 | 973fa848bd5cb724c8e180be788cc118a10a7d8d | """This script is for training a learning model using the spectrograms of clips
from the dataset.
Authors: Anna Buchele, Ariana Olson
Usage
-----
* Create spectrograms using spectrogram_maker.py
python spectrogram_maker.py
Remember to change the source and destination directories for each speaker in
the dataset as per the instructions in spectrogram_maker.
* Train the network
python learning_model.py
"""
import tensorflow.keras as keras
import numpy as np
from data_preprocess import partition_data, get_label, plot_spectrogram
from pyAudioAnalysis import audioTrainTest as aT
def train_nn(randomize_labels = False):
train, valid = partition_data('../../Spectrograms', 0.9)
# Training data
train_data = np.expand_dims(np.array([np.load(t) for t in train]), axis=3)
np.random.shuffle(train_data)
training_labels = np.array([get_label(f) for f in train])
# Validation data
valid_data = np.expand_dims(np.array([np.load(v) for v in valid]), axis=3)
np.random.shuffle(valid_data)
validation_labels = np.array([get_label(f) for f in valid])
if randomize_labels:
# To test if the nn is doing any better than purely random
np.random.shuffle(training_labels)
np.random.shuffle(validation_labels)
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(64, (3, 3), padding='same', activation='relu', input_shape=train_data[0].shape))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Dropout(0.1))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.summary()
model.fit(train_data, training_labels, validation_data=(valid_data, validation_labels), epochs=5)
model.save('cnn.h5')
valid_predicted = model.predict(valid_data)
# for i in range(15):
# print(valid_predicted[i])
# print(validation_labels[i])
# plot_spectrogram(valid_data[i])
hist_pred = [(i[0] < i[1]) for i in valid_predicted]
hist_valid = [(i[0] < i[1]) for i in validation_labels]
num_correct = 0
num_wrong = 0
for i in range(len(valid_predicted)):
if hist_pred[i] == hist_valid[i]:
num_correct += 1
else:
num_wrong += 1
print("Number of correct answers: %d \n Number of wrong answers: %d \n Percent accuracy: %f" % (num_correct, num_wrong, num_correct/float(len(valid_predicted))))
return num_correct/float(len(valid_predicted))
def nonlearning():
"""
Using pyAudioAnalysis package to train parameters for classification of emotion. Sorted chunked data into positive and negative categories,
then followed the documentation here: https://github.com/tyiannak/pyAudioAnalysis/wiki/4.-Classification-and-Regression
"""
aT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'],
1.0, 1.0, aT.shortTermWindow, aT.shortTermStep,
"svm", "emotion_classifier", True)
if __name__ == '__main__':
nonlearning()
|
25,113 | f0ca3f420fa676d4f80bc314db550a9cbac1d979 | import sys
inp = open(sys.argv[1]);
outp = open(sys.argv[2], 'w');
count = int(inp.next())
i = 1
for line in inp:
line = line.split()
N = int(line[0])
S = int(line[1])
p = int(line[2])
s = 0
m = 0
scores = [int(x) for x in line[3:]]
if p == 0:
m = len(scores)
else:
for score in scores:
if score < max(p * 3 - 4, 1):
pass
elif score < p * 3 - 2:
s = s + 1
else:
m = m + 1
m = m + min(s, S)
outp.write("Case #%d: %d\n" % (i, m))
i = i + 1
|
25,114 | 5fd2983484540dfe4d784ea9ef1cda10a08faa0f | class person(object):
population = 50
def __init__(self,name,age):
self.name = name
self.age = age
@classmethod
def getpopulation(cls,x): #you dont need an object to access the methods and you dont need self parameter
return cls.population,x
@staticmethod
def isadult(age):
return age >=18
def display(self):
print(self.name, 'is' , self.age, 'years old')
newperson = person('tim',18)
print(person.getpopulation(5))
print(person.isadult(5))
#main difference is class method can use variables which are declared in classes while static cant
#static doesnt need any variable its optional but in class method it must has to be a variable
print(newperson.display())
person('tim',25)
#print(person.display()) we cant do this bc its not static or classic
|
25,115 | ff44e17ba5e002c5162673572aeacbaff033654f | from __future__ import absolute_import, unicode_literals
import json
import pkg_resources
from celery.utils.log import get_task_logger
from celery.worker.control import Panel
from reviewbot.celery import celery
from rbtools.api.client import RBClient
from reviewbot.processing.review import Review
from reviewbot.repositories import repositories
from reviewbot.utils.filesystem import cleanup_tempfiles
# TODO: Make the cookie file configurable.
COOKIE_FILE = 'reviewbot-cookies.txt'
# TODO: Include version information in the agent.
AGENT = 'ReviewBot'
# Status Update states
PENDING = 'pending'
DONE_SUCCESS = 'done-success'
DONE_FAILURE = 'done-failure'
ERROR = 'error'
logger = get_task_logger(__name__)
@celery.task(ignore_result=True)
def RunTool(server_url='',
session='',
username='',
review_request_id=-1,
diff_revision=-1,
status_update_id=-1,
review_settings={},
tool_options={},
repository_name='',
base_commit_id='',
*args, **kwargs):
"""Execute an automated review on a review request.
Args:
server_url (unicode):
The URL of the Review Board server.
session (unicode):
The encoded session identifier.
username (unicode):
The name of the user who owns the ``session``.
review_request_id (int):
The ID of the review request being reviewed (ID for use in the
API, which is the "display_id" field).
diff_revision (int):
The ID of the diff revision being reviewed.
status_update_id (int):
The ID of the status update for this invocation of the tool.
review_settings (dict):
Settings for how the review should be created.
tool_options (dict):
The tool-specific settings.
repository_name (unicode):
The name of the repository to clone to run the tool, if the tool
requires full working directory access.
base_commit_id (unicode):
The ID of the commit that the patch should be applied to.
args (tuple):
Any additional positional arguments (perhaps used by a newer
version of the Review Bot extension).
kwargs (dict):
Any additional keyword arguments (perhaps used by a newer version
of the Review Bot extension).
Returns:
bool:
Whether the task completed successfully.
"""
try:
routing_key = RunTool.request.delivery_info['routing_key']
route_parts = routing_key.partition('.')
tool_name = route_parts[0]
log_detail = ('(server=%s, review_request_id=%s, diff_revision=%s)'
% (server_url, review_request_id, diff_revision))
logger.info('Running tool "%s" %s', tool_name, log_detail)
try:
logger.info('Initializing RB API %s', log_detail)
api_client = RBClient(server_url,
cookie_file=COOKIE_FILE,
agent=AGENT,
session=session)
api_root = api_client.get_root()
except Exception as e:
logger.error('Could not contact Review Board server: %s %s',
e, log_detail)
return False
logger.info('Loading requested tool "%s" %s', tool_name, log_detail)
tools = [
entrypoint.load()
for entrypoint in pkg_resources.iter_entry_points(
group='reviewbot.tools', name=tool_name)
]
if len(tools) == 0:
logger.error('Tool "%s" not found %s', tool_name, log_detail)
return False
elif len(tools) > 1:
logger.error('Tool "%s" is ambiguous (found %s) %s',
tool_name, ', '.join(tool.name for tool in tools),
log_detail)
return False
else:
tool = tools[0]
repository = None
try:
logger.info('Creating status update %s', log_detail)
status_update = api_root.get_status_update(
review_request_id=review_request_id,
status_update_id=status_update_id)
except Exception as e:
logger.exception('Unable to create status update: %s %s',
e, log_detail)
return False
if tool.working_directory_required:
if not base_commit_id:
logger.error('Working directory is required but the diffset '
'has no base_commit_id %s', log_detail)
status_update.update(
state=ERROR,
description='Diff does not include parent commit '
'information.')
return False
try:
repository = repositories[repository_name]
except KeyError:
logger.error('Unable to find configured repository "%s" %s',
repository_name, log_detail)
return False
try:
logger.info('Initializing review %s', log_detail)
review = Review(api_root, review_request_id, diff_revision,
review_settings)
status_update.update(description='running...')
except Exception as e:
logger.exception('Failed to initialize review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Initializing tool "%s %s" %s',
tool.name, tool.version, log_detail)
t = tool()
except Exception as e:
logger.exception('Error initializing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
try:
logger.info('Executing tool "%s" %s', tool.name, log_detail)
t.execute(review, settings=tool_options, repository=repository,
base_commit_id=base_commit_id)
logger.info('Tool "%s" completed successfully %s',
tool.name, log_detail)
except Exception as e:
logger.exception('Error executing tool "%s": %s %s',
tool.name, e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
if t.output:
file_attachments = \
api_root.get_user_file_attachments(username=username)
attachment = \
file_attachments.upload_attachment('tool-output', t.output)
status_update.update(url=attachment.absolute_url,
url_text='Tool console output')
try:
if len(review.comments) == 0:
status_update.update(state=DONE_SUCCESS,
description='passed.')
else:
logger.info('Publishing review %s', log_detail)
review_id = review.publish().id
status_update.update(state=DONE_FAILURE,
description='failed.',
review_id=review_id)
except Exception as e:
logger.exception('Error when publishing review: %s %s', e, log_detail)
status_update.update(state=ERROR, description='internal error.')
return False
logger.info('Review completed successfully %s', log_detail)
return True
finally:
cleanup_tempfiles()
@Panel.register
def update_tools_list(panel, payload):
"""Update the list of installed tools.
This will detect the installed analysis tool plugins
and inform Review Board of them.
Args:
panel (celery.worker.control.Panel):
The worker control panel.
payload (dict):
The payload as assembled by the extension.
Returns:
bool:
Whether the task completed successfully.
"""
logger.info('Request to refresh installed tools from "%s"',
payload['url'])
logger.info('Iterating Tools')
tools = []
for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'):
entry_point = ep.name
tool_class = ep.load()
tool = tool_class()
logger.info('Tool: %s' % entry_point)
if tool.check_dependencies():
tools.append({
'name': tool_class.name,
'entry_point': entry_point,
'version': tool_class.version,
'description': tool_class.description,
'tool_options': json.dumps(tool_class.options),
'timeout': tool_class.timeout,
'working_directory_required':
tool_class.working_directory_required,
})
else:
logger.warning('%s dependency check failed.', ep.name)
logger.info('Done iterating Tools')
hostname = panel.hostname
try:
api_client = RBClient(
payload['url'],
cookie_file=COOKIE_FILE,
agent=AGENT,
session=payload['session'])
api_root = api_client.get_root()
except Exception as e:
logger.exception('Could not reach RB server: %s', e)
return {
'status': 'error',
'error': 'Could not reach Review Board server: %s' % e,
}
try:
api_tools = _get_extension_resource(api_root).get_tools()
api_tools.create(hostname=hostname, tools=json.dumps(tools))
except Exception as e:
logger.exception('Problem POSTing tools: %s', e)
return {
'status': 'error',
'error': 'Problem uploading tools: %s' % e,
}
return {
'status': 'ok',
'tools': tools,
}
def _get_extension_resource(api_root):
"""Return the Review Bot extension resource.
Args:
api_root (rbtools.api.resource.Resource):
The server API root.
Returns:
rbtools.api.resource.Resource:
The extension's API resource.
"""
# TODO: Cache this. We only use this resource as a link to sub-resources.
return api_root.get_extension(
extension_name='reviewbotext.extension.ReviewBotExtension')
|
25,116 | 9e1b6a8d098c02ca4b3905bf22e15132f75bff06 | def str_correction(var,e=0):
c=0
final="r"
# for i in range (0,len(var)):
# if var[i]==" "
# c+=1
for i in range (c,len(var)):
if final[-1]==".":
final+=" "
if var[i] ==" ":
if final[-1]==" ":
pass
else:
final+=var[i]
else:
final+=var[i]
# w= var.find('.',e,len(var))
# if w > -1 :
# # var[w+1]=" "
# print(w)
# print(var[w+1])
# str_correction(var,w+1)
if final[1]!=" ":
return final[1:]
else:
return final[2:]
z= str_correction(" my . lkl gdj .sdf.ssfd")
print(z[-1])
print (z)
|
25,117 | 4411df23094f20b8af85104af42845a5e3aa0b74 | def create_stopword():
with open('important_data/stopword.txt', 'r') as words:
stopwords = []
for word in words:
stopwords.append(word.replace('\n',''))
return stopwords |
25,118 | 4f5ee152ad605334520dbe786d3c0b507d12b838 | import ctypes
from ctypes.wintypes import *
if ctypes.sizeof(ctypes.c_void_p) == 8:
ULONG_PTR = ctypes.c_ulonglong
else:
ULONG_PTR = ctypes.c_ulong
NTSTATUS = ULONG
SIZE_T = ULONG
PSIZE_T = ctypes.POINTER(SIZE_T)
FARPROC = LPCVOID
KAFFINITY = ULONG_PTR
KPRIORITY = DWORD
THREADINFOCLASS = DWORD
|
25,119 | d56acabda4831f700093c29af7afb1947ef5165f | # coding=utf-8
'''
pi@raspberrypi ~ $ echo $LANG
zh_TW.UTF-8
https://github.com/ashtons/picam
url http://host:port/s/foo_webapp.html
'''
import settings
import picam
import logging,threading
import datetime,time
import Image
import httplib, urllib
import collections,array
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
# False when test
lastEvtTime = 0
class WSHandler(tornado.websocket.WebSocketHandler):
connections = set()
lock = threading.Lock()
def open(self):
print 'New connection was opened'
#self.write_message("Welcome to my websocket!")
self.lock.acquire()
try:
self.connections.add(self)
finally:
self.lock.release()
def on_message(self, message):
print 'Incoming message:', message
#self.write_message("You said: " + message)
def on_close(self):
print 'Connection was closed...'
self.lock.acquire()
try:
self.connections.remove(self)
finally:
self.lock.release()
@classmethod
def wsSend(cls,msg):
#logging.debug("sending message %s" %msg)
cls.lock.acquire()
try:
for conn in cls.connections:
try:
conn.write_message(msg)
except:
logging.error("Error sending message",exc_info=True)
finally:
cls.lock.release()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello Test")
application = tornado.web.Application([
(r'/ws', WSHandler),(r'/',MainHandler),
(r'/s/(.*)', tornado.web.StaticFileHandler, {'path': settings.WWW}),
])
def pushoverPost(msg):
if not settings.PUSHOVER_ENABLE :
logging.info('[TestPrintOnly]Send pushover event')
return
conn = httplib.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.urlencode({
"token": settings.PUSHOVER_APPTOKEN,
"user": settings.PUSHOVER_USERKEY,
"message": msg,
}), { "Content-type": "application/x-www-form-urlencoded" })
logging.info('HTTP POST Send %s' % msg)
r = conn.getresponse()
logging.info("HTTP POST status=%d , reason=%s",r.status,r.reason)
logging.info(r.read())
conn.close()
def found(q):
global lastEvtTime
lastEvtTime = time.time()
logging.info("EVENT FOUND")
m = '我家F門 Event px=%d'%q
t = threading.Thread(target=pushoverPost, args=(m,))
t.start()
def initLog():
dateTag = datetime.datetime.now().strftime("%Y%b%d_%H%M%S")
logging.basicConfig(filename="mt_%s.log"%dateTag,level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s : %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logging.info('Started')
def isMotion4(kl):
return len(kl)==4 and kl[1]-kl[0] > 777 and kl[2] > 1000 and kl[3] > 1000
def handleMotion(k,q):
if isMotion4(k):
ediff = time.time() - lastEvtTime
logging.debug("EvtTimeDiff=%d" % ediff)
if ediff > 300:
found(q)
def startTornado():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(settings.PORT)
tornado.ioloop.IOLoop.instance().start()
def stopTornado():
tornado.ioloop.IOLoop.instance().stop()
def main():
initLog()
t = threading.Thread(target=startTornado).start()
try:
runDiffCheck()
except (KeyboardInterrupt, SystemExit):
stopTornado()
raise
def runDiffCheck():
k = collections.deque(maxlen=4)
width = 100
height = 100
THRESHOLD = 15
QUANITY_MIN = 50
f1 = picam.takeRGBPhotoWithDetails(width,height)
while True:
f2 = picam.takeRGBPhotoWithDetails(width,height)
(_,q) = picam.difference(f1,f2,THRESHOLD)
if q > 10 : logging.debug("px=%d", q)
k.append(q)
#print 'px %d' %q
WSHandler.wsSend(str(q))
picam.LEDOn() if q > QUANITY_MIN else picam.LEDOff()
handleMotion(k,q)
f1 = f2
if __name__ == '__main__':
main()
|
25,120 | 19309c19a3a6433df690d8d43a3cb29b06e86f8f | '''
Created on Jun 23, 2014
@author: Danielle Mowery
'''
import re, sys, os, random, glob, shutil, sqlite3
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
def createSqliteDB(qDB):
conn=sqlite3.connect(qDB)
curs=conn.cursor()
curs.execute('''
CREATE TABLE reports( id text not null,
reportid text,
report text)''');
field_count=3
markers = ', '.join(['?']*field_count)
query = 'INSERT INTO reports VALUES (%s)'%markers
print "Table is created"
conn.commit()
conn.close()
def queryDB(qDB, fileRefDict):
conn=sqlite3.connect(qDB)
curs=conn.cursor();ids=0
createSqliteDB(qDB)
for key in fileRefDict.keys():
ids+=1; print "DB", ids
txt=fileRefDict[key]
curs.execute("""INSERT INTO reports VALUES (?,?,?)""", (unicode(ids), unicode(key), unicode(txt)) )
conn.commit()
conn.close()
if __name__ == "__main__":
#path="<point to your directory containing your individual text files e.g.,/USER/YOU/DESKTOP/DIRECTORY_OF_FILES/*.txt>"
radTxts=glob.glob(path)
print radTxts
fileRefDict={};ids=0
for txtPath in radTxts:
txt=open(txtPath).read()
txt=txt.decode("ISO-8859-1").strip()
txtName=os.path.split(txtPath)[-1]
ids+=1
fileRefDict[txtName]=txt
queryDB(os.getcwd()+"/pyConTextNLP-0.5.1.3/src/patient_corpus.db", fileRefDict)
|
25,121 | 10cbc2b8ffb5cf136797e4080463465ac0273202 | #No es mutable
tupla = ("Hola",7,7.928289,True, ("Tupla",2))
#Acceso a los elementos de una tupla
#print(tupla[4][1])
#Si es mutable
lista = ["Hola",7,7.928289,True, ["Tupla",2]]
#Acceso a los elementos de una lista
#print(lista[4][1])
alumnos = ("Alejandro", "Ana Abigail", "Andrea", "Karime", "Eric", "Josue")
for alumno in alumnos:
alumno = alumno + " IEST"
print(alumno)
for i in range(10,1001,4):
print (i)
|
25,122 | 23042332589896211a3656f87c38ba60a1d458e5 | class AddrBookEntry(object):
'address book entry class'
def __init__(self, nm, ph):
self.name = nm
self.phone = ph
print 'Created instance for: ', self.name
def updatePhone(self, newph):
self.phone = newph
print 'Updated phone# for:', self.name
class EmplAddrBookEntry(AddrBookEntry):
'employee address book entry class'
def __init__(self, nm, ph, id, em):
AddrBookEntry.__init__(self, nm, ph)
self.empid = id
self.email = em
def updateEmail(self, newem):
self.email = newem
print 'Updated email for:', self.name
john = EmplAddrBookEntry('John Doe', '2342134', '1', 'abcdefg@qq.com')
print john
print john.name
print john.phone
print john.email
print 'Class Doc:', EmplAddrBookEntry.__doc__
john.updatePhone('66666666')
john.updateEmail('123456@gmail.com')
print john.email
|
25,123 | 4f7feefb1a484a37730e07299b45034749ab238f | import base64
import unittest
import os
from moviepy.editor import AudioFileClip
from syncit.converter import Converter
from syncit.constants import Constants
from werkzeug.datastructures import FileStorage
# Setup Constants
SAMPLE_AUDIO = os.path.join(Constants.SAMPLES_FOLDER, 'audio.m4a')
LANGUAGE = 'en'
# test_language_conversion Constants
LANGUAGE_CODE = 'en-US'
# test_convert_audio_to_text Constants
WORD = 'elsa'
START = 57
END = 60
class TestConverter(unittest.TestCase):
"""
Class to test the Converter class.
"""
def setUp(self):
"""
Setup to run before each test.
"""
audio = open(SAMPLE_AUDIO, 'rb')
self.converter = Converter(FileStorage(audio), 'en')
audio.close()
def test_language_conversion(self):
"""
Make sure the language is correct.
"""
self.assertEqual(self.converter.language, LANGUAGE_CODE,
'Check the language conversion.')
def test_repair_file(self):
"""
Make sure the output file is wav format that can be used in moviepy.
"""
audio_path = self.converter.audio
self.assertTrue(audio_path.endswith('.wav'))
# Make sure it can be loaded in moviepy
clip = AudioFileClip(audio_path)
def test_convert_audio_to_text(self):
"""
Check the convert_audio_to_text method.
"""
text = self.converter.convert_audio_to_text(START, END, [WORD], lambda: False)
text = text.strip()
self.assertEqual(text, WORD)
|
25,124 | 773abd1fd5b3b72fa4d3f2812dfac5ab84e1e363 | #
# [236] Lowest Common Ancestor of a Binary Tree
#
# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/description/
#
# algorithms
# Medium (29.98%)
# Total Accepted: 152.3K
# Total Submissions: 508.2K
# Testcase Example: '[1,2]\nnode with value 1\nnode with value 2'
#
#
# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes
# in the tree.
#
#
#
# According to the definition of LCA on Wikipedia: “The lowest common ancestor
# is defined between two nodes v and w as the lowest node in T that has both v
# and w as descendants (where we allow a node to be a descendant of
# itself).”
#
#
#
# _______3______
# / \
# ___5__ ___1__
# / \ / \
# 6 _2 0 8
# / \
# 7 4
#
#
#
# For example, the lowest common ancestor (LCA) of nodes 5 and 1 is 3. Another
# example is LCA of nodes 5 and 4 is 5, since a node can be a descendant of
# itself according to the LCA definition.
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
# 6 star. no idea.
if not root:
return root
if root in (p, q):
return root
left, right = (self.lowestCommonAncestor(k, p, q) for k in (root.left, root.right))
if left and right:
return root
return left or right
|
25,125 | cd38fc4c622b94d838e24e48f18140a8ca229c3d |
k=lambda a: a*a
n=int(input("enter the number : "))
result=k(n)
print(result) |
25,126 | ede73d32088a3cee25bc1a1d6b323f812bc0714e | # -*- coding: utf-8 -*-
from .interface import Multimeter
from .keithley_2000 import Keithley2000
from .agilent_34410a import Agilent_34410A
|
25,127 | 3e52b7f775fd7e37d3397544d7881e6712d4344f | import requests
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
print("Getting xps from messages")
url = "https://slack.com/api/channels.history"
querystring = {"token":"{your token}","channel":"CJWDYLS2Z","pretty":"1"}
headers = {
'User-Agent': "PostmanRuntime/7.17.1",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Postman-Token': "{postman-token}",
'Host': "slack.com",
'Accept-Encoding': "gzip, deflate",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
xp = pd.DataFrame(columns = ["id", "xp_value"])
name = pd.DataFrame(columns = ["id", "real_name"])
response = requests.request("GET", url, headers=headers, params=querystring)
ress = response.json()
for i in range(len(ress["messages"])):
text = ress["messages"][i]["text"]
#print(text)
#find for xp
index = text.find('xp ')
#conditions if not found
if(index == -1):
index = text.find('XP ')
if(index == -1):
continue
#get xp value
try:
xp_val = int(text[: index].strip(' ')[-2:])
except:
continue
for t in text.split("@")[1:]:
id = t.split(">")[:-1]
for i in id:
xp = xp.append(pd.Series([i, xp_val], index = xp.columns), ignore_index = True)
print("Getting Names of users")
url = "https://slack.com/api/users.list"
querystring = {"token":"{your-token}","pretty":"1"}
headers = {
'User-Agent': "PostmanRuntime/7.17.1",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Postman-Token': "{postman token}",
'Host': "slack.com",
'Accept-Encoding': "gzip, deflate",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
response1 = requests.request("GET", url, headers=headers, params=querystring)
res = response1.json()
for i in range(len(res["members"])):
info = res["members"][i]
n = info["profile"]["real_name"]
id = info["id"]
name = name.append(pd.Series([id, n], index = name.columns), ignore_index = True)
result = pd.DataFrame(columns=['id', 'real_name', 'xp'])
name.sort_values(by='id', ascending=[1], inplace=True)
xp.sort_values(by='id', ascending=[1], inplace=True)
print("Mapping xps to users")
for index, rowx in xp.iterrows():
for index, rown in name[rowx['id']==name['id']].iterrows():
result = result.append(pd.Series([rown['id'], rown['real_name'], rowx['xp_value']], index=result.columns), ignore_index=True)
print("Calculating total xps of each students")
result = result.groupby(['id','real_name']).sum().reset_index()
result = result.sort_values(by = 'xp', ascending = False).reset_index(drop = True)
x = result['xp']
y = result['real_name']
colors = cm.hsv(x / float(max(x)))
plot = plt.scatter(x, x, c = x, cmap = 'hsv')
plt.clf()
fig= plt.figure(figsize=(10,4))
axes= fig.add_axes([0.1,0.1,0.8,4])
axes.set_ylim([len(y), 0])
plt.xticks(np.arange(0, 100, 10))
axes.barh(y, x, color = colors)
#axes.barh(result['real_name'], result['xp'], align = 'center')
plt.xlabel("Total xps")
plt.ylabel("Name")
plt.colorbar(plot)
rects = axes.patches
# For each bar: Place a label
for rect in rects:
# Get X and Y placement of label from rect.
x_value = rect.get_width()
y_value = rect.get_y() + rect.get_height() / 2
# Number of points between bar and label. Change to your liking.
space = 5
# Vertical alignment for positive values
ha = 'left'
# If value of bar is negative: Place label left of bar
if x_value < 0:
# Invert space to place label to the left
space *= -1
# Horizontally align label at right
ha = 'right'
# Use X value as label and format number with one decimal place
label = "{:.1f}".format(x_value)
# Create annotation
plt.annotate(
label, # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(space, 0), # Horizontally shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
va='center', # Vertically center label
ha=ha) # Horizontally align label differently for
# positive and negative values.
#plt.show()
print("Saving graph to image")
fig.savefig('xpreport.jpg', bbox_inches = 'tight')
print("Mailing")
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
fromaddr = "cee300asu@gmail.com"
toaddr = ["Fall_20.2wm5iqal96jxq5ht@u.box.com", "thakkarsamip@gmail.com"]
for i in toaddr:
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = i
msg['Subject'] = "Mail with attachment"
body = "PFA"
msg.attach(MIMEText(body, 'plain'))
filename = "xpreport.jpg"
attachment = open("xpreport.jpg", "rb")
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(p)
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(fromaddr, "thomasseager")
text = msg.as_string()
s.sendmail(fromaddr, i, text)
s.quit()
print("Check your mails!!!!")
|
25,128 | 8aa33b2fc0e4a60e5f98c506ca9b08aacca19f57 | class ListNode:
def __init__(self, val, next = None):
self.val = val
self.next = next
class Solution:
def removeNthFromEnd(self, head, n):
fast, slow = head, head
while n:
fast = fast.next
n -= 1
# print(fast.val)
prev = None
while fast:
fast = fast.next
prev = slow
slow = slow.next
if slow == head:
return head.next
prev.next = slow.next
return head
nums = [1, 2, 3, 4, 5]
lst = [ListNode(n) for n in nums]
for i in range(len(lst) - 1):
lst[i].next = lst[i + 1]
head = lst[0]
inst = Solution()
head = inst.removeNthFromEnd(head, 5)
while head:
print(head.val, end = ' ')
head = head.next
print()
|
25,129 | 57c1df012e1d96617481ceb8f456c03e5f4c988b | # 作者 : "刘军"
# 创建日期: 2018/8/23
from vnpy.trader.gateway.fxcmGateway.fxcmGateway import FxcmGateway
from vnpy.event.eventEngine import EventEngine
if __name__ == '__main__':
ee = EventEngine()
fm = FxcmGateway(ee)
fm.connect() |
25,130 | 076571c8f1717fedc1ef682142e415913e9be89b | import re
import requests
from bs4 import BeautifulSoup as BS
with open('symbols.html', encoding='utf-8') as f:
soup = BS(f, 'html.parser')
sections = soup.find_all('li', attrs={
'id': re.compile(r'huge_it_gallery_pupup_element_\d+')
})
email_re = r'\S+@[A-z0-9\-]+\.\w+'
phone_re = r'\(?\d\d\d[\) -]*\d\d\d[\- ]*\d\d\d\d'
for section in sections:
name_text = section.find('h3', attrs={'class': 'title'})
email_text = section.find(string=re.compile(email_re))
phone_number_text = section.find(string=re.compile(phone_re))
if not name_text:
continue
name = ' '.join(name_text.get_text().strip().split()).replace(',', '')
if not email_text:
email_text = ''
if not phone_number_text:
phone_number_text = ''
email = re.search(email_re, email_text)
phone_number = re.search(phone_re, phone_number_text)
print(f'{name},{email.group(0) if email else ""},{phone_number.group(0) if phone_number else ""}')
|
25,131 | 96f962693141c40ec750b1888731be2130ad24d1 | """
22. Generate Parentheses
Medium
Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
Example 1:
Input: n = 3
Output: ["((()))","(()())","(())()","()(())","()()()"]
Example 2:
Input: n = 1
Output: ["()"]
Constraints:
1 <= n <= 8
https://leetcode.com/submissions/detail/406795598/
"""
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
def isValid(s):
chk = 0
for c in s:
if c == ")":
chk -= 1
else:
chk += 1
if chk < 0:
return False
if chk == 0:
return True
else:
return False
valid = []
def btrack(s):
if len(s) == 2*n:
if isValid(s):
valid.append(s)
return
btrack(s+"(")
btrack(s+")")
btrack("(")
return valid
|
25,132 | e2d73a244cf8a897fb0cdf4bd5f68d6a5086b11b | import numpy as np
import pandas as pd
import sklearn
import sklearn.preprocessing
import scipy
import tensorflow.keras as keras
df = pd.read_csv('WISDM_clean.csv')
df_train = df[df['user_id'] <= 30]
df_test = df[df['user_id'] > 30]
# Norm
scale_columns = ['x_axis', 'y_axis', 'z_axis']
scaler = sklearn.preprocessing.RobustScaler()
scaler = scaler.fit(df_train[scale_columns])
df_train.loc[:, scale_columns] = scaler.transform(
df_train[scale_columns].to_numpy()
)
df_test.loc[:, scale_columns] = scaler.transform(
df_test[scale_columns].to_numpy()
)
def create_dataset(X, y, time_steps=1, step=1):
Xs, ys = [], []
for i in range(0, len(X) - time_steps, step):
v = X.iloc[i:(i + time_steps)].values
labels = y.iloc[i: i + time_steps]
Xs.append(v)
ys.append(scipy.stats.mode(labels)[0][0])
return np.array(Xs), np.array(ys).reshape(-1, 1)
TIME_STEPS = 200
STEP = 40
X_train, y_train = create_dataset(
df_train[['x_axis', 'y_axis', 'z_axis']],
df_train.activity,
TIME_STEPS,
STEP
)
X_test, y_test = create_dataset(
df_test[['x_axis', 'y_axis', 'z_axis']],
df_test.activity,
TIME_STEPS,
STEP
)
print(X_train.shape, y_train.shape)
enc = sklearn.preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
enc = enc.fit(y_train)
y_train = enc.transform(y_train)
y_test = enc.transform(y_test)
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(
units=128,
input_shape=[X_train.shape[1], X_train.shape[2]]
)
)
)
model.add(keras.layers.Dropout(rate=0.5))
model.add(keras.layers.Dense(units=128, activation='relu'))
model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
print(X_train.shape) # (22454, 200, 3)
history = model.fit(
X_train, y_train,
epochs=20,
batch_size=64,
validation_split=0.1,
shuffle=True
)
model.summary()
model.save("lstm.h5")
|
25,133 | 2d0012b410f8204cabddd22ed9d0fb3ca09e8011 | import dask
import numpy as np
import pytest
import xarray as xr
from .xfilter import bandpass, highpass, lowpass
def assert_allclose(a, b, **kwargs):
xr.testing.assert_allclose(a, b, **kwargs)
xr.testing._assert_internal_invariants(a)
xr.testing._assert_internal_invariants(b)
@pytest.fixture
def test_data():
π = np.pi
t = np.arange(0, 20001, 4) # in days
freqs = dict(zip(["high", "mid", "low"], [5, 100, 1000]))
data = xr.Dataset()
for name, f in freqs.items():
data[name] = xr.DataArray(
np.sin(2 * π / f * t), dims=["time"], coords={"time": t}
)
data["total"] = data.low + data.mid + data.high
data.attrs["freqs"] = freqs.values()
return data
@pytest.mark.parametrize(
"filt, freq, expect",
[
(lowpass, 1 / 250, "low"),
(highpass, 1 / 40, "high"),
(bandpass, (1 / 40, 1 / 250), "mid"),
],
)
def test_filters(test_data, filt, freq, expect):
actual = filt(test_data.total, coord="time", freq=freq, order=4)
expected = test_data[expect].where(~np.isnan(actual))
assert_allclose(actual, expected, atol=1e-2)
@pytest.mark.xfail(reason="use_overlap needs to be fixed.")
@pytest.mark.parametrize(
"filt, freq", [(lowpass, 1 / 50), (highpass, 1 / 50), (bandpass, (1 / 40, 1 / 250))]
)
def test_map_overlap(test_data, filt, freq):
actual = filt(
test_data.total.chunk({"time": 1001}), coord="time", freq=freq
).compute()
expected = filt(test_data.total, coord="time", freq=freq)
assert (np.isnan(actual) == np.isnan(expected)).all()
assert_allclose(actual, expected)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize(
"filt, freq", [(lowpass, 1 / 50), (highpass, 1 / 50), (bandpass, (1 / 40, 1 / 250))]
)
def test_gappy_filter(test_data, filt, freq):
da = test_data.total.copy()
da[500:1000] = np.nan
da[3000:3100] = np.nan
da = da.expand_dims(x=10)
chunked = da.chunk({"time": -1, "x": 1})
kwargs = dict(coord="time", freq=freq)
numpy_ans = filt(da, coord="time", freq=freq, gappy=False)
numpy_ans_1d = filt(da, coord="time", freq=freq, gappy=True)
dask_ans = filt(chunked, coord="time", freq=freq, gappy=False)
dask_ans_1d = filt(chunked, coord="time", freq=freq, gappy=True)
assert isinstance(dask_ans.data, dask.array.Array)
xr.testing.assert_allclose(numpy_ans, numpy_ans_1d)
xr.testing.assert_equal(numpy_ans, dask_ans.compute())
xr.testing.assert_equal(numpy_ans_1d, dask_ans_1d.compute())
xr.testing.assert_allclose(dask_ans.compute(), dask_ans_1d.compute())
|
25,134 | 67abc7cb4ba9eeb922ad73eb1ed35bdcbd4a161c | #Sarwar Hussain
from tabulate import tabulate
from Event import Event
from Contact import Contact
class Meeting (Event):
def __init__(self, title='default event', date='01-01-2020', start_time='00:00', end_time='24:00', list_of_contacts=None):
super().__init__(title, date, start_time, end_time)
if list_of_contacts == None:
list_of_contacts = []
self.__list_of_contacts = list_of_contacts
for participant in list_of_contacts:
participant.add_meeting(self)
def print_event(self):
""" Print a meeting in the format specified in tabulate """
list_of_names = [str(c) for c in self.__list_of_contacts]
joined_names = ', '.join(list_of_names)
table = [[str(self._title)],["Date: "+str(self._date)],["Time: "+str(self._start)+" - "+str(self._end)],["Participants: "+str(joined_names)]]
print(tabulate(table, tablefmt='grid'))
def write_to_file(self, file):
file.write(str(self._title)+",")
file.write(str(self._date)+",")
file.write(str(self._start)+",")
file.write(str(self._end)+",")
for ind, p in enumerate(self.__list_of_contacts):
if ind==(len(self.__list_of_contacts)-1):
file.write(str(p)+"\n")
else:
file.write(str(p)+",")
|
25,135 | 1a0c59fb5befe716bf1b2f135ff6e749b714708a | import unittest
from bson.objectid import ObjectId
from pymongo import mongo_client
from core.data_checks.implementation.company_checks.competition.missing_cci_data_check import MissingCCIDataCheck
__author__ = 'vgold'
class TestMissingCCIDataCheck(unittest.TestCase):
conn = None
mds = None
@classmethod
def setUpClass(cls):
cls.conn = mongo_client.MongoClient("localhost", 27017)
cls.mds = cls.conn["itest_mds"]
@classmethod
def tearDownClass(cls):
cls.conn.close()
def setUp(self):
self.ind1 = ObjectId()
self.ind2 = ObjectId()
self.ind3 = ObjectId()
self.co10 = ObjectId()
self.co11 = ObjectId()
self.co20 = ObjectId()
self.co21 = ObjectId()
self.co30 = ObjectId()
self.mds.industry.insert([
{
"_id": self.ind1,
"links": {
"industry": {
"industry_competition": [
{
"entity_role_from": "competitor",
"entity_role_to": "competitor",
"entity_id_to": self.ind1
},
{
"entity_role_from": "competitor",
"entity_role_to": "competitor",
"entity_id_to": self.ind2
}
]
}
}
},
{
"_id": self.ind2,
"links": {
"industry": {
"industry_competition": [
{
"entity_role_from": "competitor",
"entity_role_to": "competitor",
"entity_id_to": self.ind1
},
{
"entity_role_from": "competitor",
"entity_role_to": "competitor",
"entity_id_to": self.ind2
}
]
}
}
},
{
"_id": self.ind3,
"links": {
"industry": {
"industry_competition": [
{
"entity_role_from": "competitor",
"entity_role_to": "competitor",
"entity_id_to": self.ind3
}
]
}
}
}
])
self.mds.company.insert([
{
"_id": self.co10,
"data": {"type": "retail_banner", "workflow": {"current": {"status": "published"}}},
"links": {
"industry": {
"industry_classification": [
{
"entity_role_from": "primary_industry_classification",
"entity_role_to": "primary_industry",
"entity_id_to": self.ind1
}
]
}
}
},
{
"_id": self.co11,
"data": {"type": "retail_banner", "workflow": {"current": {"status": "published"}}},
"links": {
"industry": {
"industry_classification": [
{
"entity_role_from": "primary_industry_classification",
"entity_role_to": "primary_industry",
"entity_id_to": self.ind1
}
]
}
}
},
{
"_id": self.co20,
"data": {"type": "retail_banner", "workflow": {"current": {"status": "published"}}},
"links": {
"industry": {
"industry_classification": [
{
"entity_role_from": "primary_industry_classification",
"entity_role_to": "primary_industry",
"entity_id_to": self.ind2
}
]
}
}
},
{
"_id": self.co21,
"data": {"type": "retail_banner", "workflow": {"current": {"status": "published"}}},
"links": {
"industry": {
"industry_classification": [
{
"entity_role_from": "primary_industry_classification",
"entity_role_to": "primary_industry",
"entity_id_to": self.ind2
}
]
}
}
},
{
"_id": self.co30,
"data": {"type": "retail_banner", "workflow": {"current": {"status": "published"}}},
"links": {
"industry": {
"industry_classification": [
{
"entity_role_from": "primary_industry_classification",
"entity_role_to": "primary_industry",
"entity_id_to": self.ind3
}
]
}
}
}
])
self.mds.company_competition_instance.insert([
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co10,
"entity_id_to": self.co10
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co10,
"entity_id_to": self.co11
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co10,
"entity_id_to": self.co20
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co10,
"entity_id_to": self.co21
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co11,
"entity_id_to": self.co10
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co11,
"entity_id_to": self.co11
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co11,
"entity_id_to": self.co20
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co11,
"entity_id_to": self.co21
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co20,
"entity_id_to": self.co10
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co20,
"entity_id_to": self.co11
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co20,
"entity_id_to": self.co20
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co20,
"entity_id_to": self.co21
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co21,
"entity_id_to": self.co10
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co21,
"entity_id_to": self.co11
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co21,
"entity_id_to": self.co20
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co21,
"entity_id_to": self.co21
}
}
},
{
"_id": ObjectId(),
"data": {
"pair": {
"entity_id_from": self.co30,
"entity_id_to": self.co30
}
}
}
])
def tearDown(self):
self.mds.industry.drop()
self.mds.company.drop()
self.mds.company_competition_instance.drop()
def test_missing_cci_data_check__good(self):
published_company_set = {
str(co["_id"])
for co in self.mds.company.find({"data.workflow.current.status": "published"}, {"_id": 1})
}
checker = MissingCCIDataCheck(self.mds, self.mds.company.find_one({"_id": self.co10}), published_company_set)
result = checker.check()
self.assertTrue(result)
self.assertEqual(len(checker.failures), 0)
def test_missing_cci_data_check__bad(self):
ind4 = ObjectId()
self.mds.industry.insert({
"_id": ind4,
"links": {
"industry": {
"industry_competition": [
{
"entity_role_from": "competitor",
"entity_role_to": "competitor",
"entity_id_to": ind4
}
]
}
}
})
co22 = ObjectId()
co40 = ObjectId()
self.mds.company.insert([
{
"_id": co22,
"data": {"type": "retail_banner", "workflow": {"current": {"status": "published"}}},
"links": {
"industry": {
"industry_classification": [
{
"entity_role_from": "primary_industry_classification",
"entity_role_to": "primary_industry",
"entity_id_to": self.ind2
}
]
}
}
},
{
"_id": co40,
"data": {"type": "retail_banner", "workflow": {"current": {"status": "published"}}},
"links": {
"industry": {
"industry_classification": [
{
"entity_role_from": "primary_industry_classification",
"entity_role_to": "primary_industry",
"entity_id_to": ind4
}
]
}
}
}
])
published_company_set = {
str(co["_id"])
for co in self.mds.company.find({"data.workflow.current.status": "published"}, {"_id": 1})
}
checker = MissingCCIDataCheck(self.mds, self.mds.company.find_one({"_id": self.co10}), published_company_set)
result = checker.check()
self.assertFalse(result)
self.assertEqual(
checker.failures,
{self.ind2: ({co22}, 3, 2)}
)
checker = MissingCCIDataCheck(self.mds, self.mds.company.find_one({"_id": co22}), published_company_set)
result = checker.check()
self.assertFalse(result)
self.assertEqual(
sorted(checker.failures),
[self.ind1, self.ind2]
)
checker = MissingCCIDataCheck(self.mds, self.mds.company.find_one({"_id": co40}), published_company_set)
result = checker.check()
self.assertFalse(result)
self.assertEqual(
checker.failures,
{ind4: ({co40}, 1, 0)}
)
if __name__ == '__main__':
unittest.main()
|
25,136 | 1017473cbace6f8740b0fd633dd3b2d3a027db3d | '''Kasaparov_BC_Player.py
The beginnings of an agent that might someday play Baroque Chess.
'''
import time
import BC_state_etc as BC
#for reference
BLACK = 0
WHITE = 1
BLACK_PINCER = 2
BLACK_COORDINATOR = 4
BLACK_LEAPER = 6
BLACK_IMITATOR = 8
BLACK_WITHDRAWER = 10
BLACK_KING = 12
BLACK_FREEZER = 14
WHITE_PINCER = 3
WHITE_COORDINATOR = 5
WHITE_LEAPER = 7
WHITE_IMITATOR = 9
WHITE_WITHDRAWER = 11
WHITE_KING = 13
WHITE_FREEZER = 15
def makeMove(currentState, currentRemark, timelimit): #time limit in miliseconds
# Compute the new state for a move.
# This is a placeholder that just copies the current state.
newState = BC.BC_state(currentState.board)
# Fix up whose turn it will be.
newState.whose_move = 1 - currentState.whose_move
whoseMove = 1 - newState.whose_move
board = newState.board
startAt = time.time()
bestrating = 0
bestMove = []
allStates = [[board]] #list of list of states
while True:
if time.time() - startAt> timelimit*0.97:
break
nextStates = []
depthXrating = 100000 #reset rating to something big
for listOfStates in allStates:
lastboard = listOfStates[len(listOfStates)-1] #look at the last state in a list of states
allMoves = getAllMoves(lastboard, whoseMove) #get all possible moves from that last state
newlist = []
#print(allMoves)
#print(lastboard)
if time.time() - startAt> timelimit*0.97:
break
for move in allMoves:
#print(move)
newlist.append(listOfStates + [getState(move, lastboard)])
if time.time() - startAt> timelimit*0.97:
break
#print(newlist)
for listState in newlist:
#print(listState)
rating = ((-1)**newState.whose_move)*staticEval2(listState, len(allMoves), startAt, timelimit)
#rating => the smaller, the better it is for US, THIS player
#if we are white, whosemove=1, good move = big => -1**whosemove good move = small
#if we are black, whosemove=0, good move = small => -1**whosemove good move = small
#nextStates.append(newlist) #for all the generated moves, generate the new state, and append the new state to the previous list of states.
if rating < depthXrating:
#print(rating)
depthXrating = rating
bestrating = rating
bestMove = listState
whoseMove = 1 - whoseMove
if time.time() - startAt> timelimit*0.97:
break
allStates = nextStates
#assumes time is close to up.
#print(bestMove)
move = getMoveBeforeAfter(bestMove[0],bestMove[1])
newState.board = bestMove[1]
newState.whose_move = 1- currentState.whose_move
# Construct a representation of the move that goes from the
# currentState to the newState.
# Here is a placeholder in the right format but with made-up
# numbers:
#move = ((6, 4), (3, 4))
# Make up a new remark
newRemark = getRemark(bestrating)
return [[move, newState], newRemark]
# takes a move and returns a new state of the complete board after the move was made
def getState(move, state):
resultstate = [[0,0,0,0,0,0,0,0] for i in range(8)]
for i in range(8):
for j in range(8):
resultstate[i][j] = state[i][j]
#print(resultstate)
intlLoc = move[0]
goalLoc = move[1]
piece = state[intlLoc[1]][intlLoc[0]]
# assumes the initial location won't be 0 in state
color = state[intlLoc[1]][intlLoc[0]] % 2
opoColor = 1 - color
# pincer
if piece - color == 2:
if len(kingHelper(goalLoc, state, opoColor)) != 0:
# +x east
x = min(goalLoc[0] + 1, 7)
if state[goalLoc[1]][x] % 2 != color:
x1 = state[goalLoc[1]][min(x + 1, 7)]
if x1 % 2 == color and x1 != 0:
resultstate[goalLoc[1]][x] = 0
# -x west
x = max(goalLoc[0] - 1, 0)
if state[goalLoc[1]][x] % 2 != color:
x1 = state[goalLoc[1]][max(x - 1, 0)]
if x1 % 2 == color and x1 != 0:
resultstate[goalLoc[1]][x] = 0
# +y north
x = min(goalLoc[1] + 1, 7)
if state[x][goalLoc[0]] % 2 == opoColor:
x1 = state[min(x + 1, 7)][goalLoc[0]]
if x1 % 2 == color and x1 != 0:
resultstate[x][goalLoc[0]] = 0
# -y south
x = max(goalLoc[0] - 1, 0)
if state[x][goalLoc[0]] % 2 != color:
x1 = state[max(x - 1, 0)][goalLoc[1]]
if x1 % 2 == color and x1 != 0:
resultstate[x][goalLoc[0]] = 0
# Leaper
elif piece - color == 6:
diffX = goalLoc[0] - intlLoc[0]
diffY = goalLoc[1] - intlLoc[1]
if diffX != 0 :
x = int(diffX/abs(diffX))
else:
x = 0
if diffY != 0:
y = int(diffY/abs(diffY))
else:
y=0
for i in range(abs(diffX)):
resultstate[intlLoc[1]+(i*y)][intlLoc[0]+(i*x)] = 0
# coordinator
elif piece - color == 4:
kingsLoc = [-1, -1]
for i in range(8):
for j in range(8):
if state[i][j] == 12 + color:
kingsLoc = [j,i]
break
if kingsLoc[0] != -1:
break
if state[goalLoc[1]][kingsLoc[0]] % 2 == opoColor:
resultstate[goalLoc[1]][kingsLoc[0]] = 0
if state[kingsLoc[1]][goalLoc[0]] % 2 == opoColor:
resultstate[kingsLoc[1]][goalLoc[0]] = 0
# withdrawer
elif piece - color == 10:
if len(kingHelper(intlLoc, state, opoColor)) != 0:
dirY = intlLoc[0]-goalLoc[0]
dirX = intlLoc[1]-goalLoc[1]
if dirY != 0:
dirY = dirY/abs(dirY)
if dirX != 0:
dirX = dirX/abs(dirX)
opoY = int(intlLoc[0]-dirY)
opoX = int(intlLoc[1]-dirX)
if opoY in range(8) and opoX in range(8) and resultstate[opoY][opoX]%2 == opoColor:
resultstate[opoY][opoX] = 0
# imitator
elif piece - color == 8:
# knight
resultstate[intlLoc[1]][intlLoc[0]] = 6 + color
resultstate = getState(move, resultstate)
# cordinator
resultstate[intlLoc[1]][intlLoc[0]] = 4 + color
resultstate = getState(move, resultstate)
# pincer
resultstate[intlLoc[1]][intlLoc[0]] = 2 + color
resultstate = getState(move, resultstate)
# withdrawer
resultstate[intlLoc[1]][intlLoc[0]] = 10 + color
resultstate = getState(move, resultstate)
resultstate[intlLoc[1]][intlLoc[0]] = 0
resultstate[goalLoc[1]][goalLoc[0]] = piece
return resultstate
def getRemark(score):
if score > 500:
return "uh oh."
if score > 100:
return "This is real bad."
if score > 75:
return "I'm getting a little worried here."
if score > 50:
return "This isn't over yet!"
if score > 30:
return "You got me."
if score > 20:
return "Nice move."
if score > 10:
return "Finally we're getting somewhere."
if score > -5:
return "This is going nowhere."
if score > -15:
return "Finally we're getting somewhere."
if score > -25:
return "Take that!"
if score > -35:
return "Having a little trouble there?"
if score > -55:
return "You're free to give up."
if score > -75:
return "I'd like to see how you would get out of that."
if score > -100:
return "Victory is in sight!"
return "Good game."
def getMoveBeforeAfter(oldstate, newstate):
move = [[-1,-1],[-1,-1]]
for i in range(8):
for j in range(8):
if oldstate[i][j] != newstate[i][j] and oldstate[i][j] == 0:
move[1][0] = j
move[1][1] = i
if oldstate[i][j] != newstate[i][j] and newstate[i][j] == 0:
move[0][0] = j
move[0][1] = i
return move
def getAllMoves(currentState, whose_move):
moves = []
for i in range(8):
for j in range(8):
current = currentState[i][j]
if current != 0 and current%2 == whose_move:
current = current - current%2
if current == 2:
moves += [[[j,i],[x,y]] for [x,y] in pincer([j,i], currentState)]
elif current == 6:
moves += [[[j,i],[x,y]] for [x,y] in knight([j,i], currentState)]
elif current == 8:
moves += [[[j,i],[x,y]] for [x,y] in imitator([i,j], currentState)]
elif current == 12:
moves += [[[j,i],[x,y]] for [x,y] in king([j,i], currentState)]
else:
moves += [[[j,i],[x,y]] for [x,y] in other([j,i], currentState)]
return moves # moves is a list of elements in the form of ((x,y),(x2,y2))
# These functions should take a position as a parameter and return a list of all possible positions of that piece
def king(loc, currentState):
result = []
color = currentState[loc[1]][loc[0]] % 2
opponCol = 1 - color # opponent's color
if checkFreezer(loc, currentState):
return result
else:
x = [min(loc[0] + 1, 7), loc[1]]
if currentState[x[1]][x[0]] == 0 or currentState[x[1]][x[0]] % 2 == opponCol:
result.append(x)
x = [min(loc[0] + 1, 7), min(loc[1] + 1, 7)]
if currentState[x[1]][x[0]] == 0 or currentState[x[1]][x[0]] % 2 == opponCol:
result.append(x)
x = [min(loc[0] + 1, 7), max(loc[1] - 1, 0)]
if currentState[x[1]][x[0]] == 0 or currentState[x[1]][x[0]] % 2 == opponCol:
result.append(x)
x = [max(loc[0] - 1, 0), max(loc[1] - 1, 0)]
if currentState[x[1]][x[0]] == 0 or currentState[x[1]][x[0]] % 2 == opponCol:
result.append(x)
x = [max(loc[0] - 1, 0), min(loc[1] + 1, 7)]
if currentState[x[1]][x[0]] == 0 or currentState[x[1]][x[0]] % 2 == opponCol:
result.append(x)
x = [max(loc[0] - 1, 0), loc[1]]
if currentState[x[1]][x[0]] == 0 or currentState[x[1]][x[0]] % 2 == opponCol:
result.append(x)
x = [loc[0], min(loc[1] + 1, 7)]
if currentState[x[1]][x[0]] == 0 or currentState[x[1]][x[0]] % 2 == opponCol:
result.append(x)
x = [loc[0], max(loc[1] - 1, 0)]
if currentState[x[1]][x[0]] == 0 or currentState[x[1]][x[0]] % 2 == opponCol:
result.append(x)
return result
# returns a list of opposition pieces around a loc
def kingHelper(x, currentState, opoCol):
result = []
y1 = min(x[1] + 1, 7)
x1 = min(x[0] + 1, 7)
y2 = max(x[1] - 1, 0)
x2 = max(x[0] - 1, 0)
if currentState[y1][x1] % 2 == opoCol and currentState[y1][x1] != 0:
result.append(currentState[y1][x1])
if currentState[y2][x1] % 2 == opoCol and currentState[y1][x1] != 0:
result.append(currentState[y2][x1])
if currentState[y1][x2] % 2 == opoCol and currentState[y1][x1] != 0:
result.append(currentState[y1][x2])
if currentState[y2][x2] % 2 == opoCol and currentState[y1][x1] != 0:
result.append(currentState[y2][x2])
if currentState[y1][x[0]] % 2 == opoCol and currentState[y1][x1] != 0:
result.append(currentState[y1][x[0]])
if currentState[y2][x[0]] % 2 == opoCol and currentState[y1][x1] != 0:
result.append(currentState[y2][x[0]])
if currentState[x[1]][x1] % 2 == opoCol and currentState[y1][x1] != 0:
result.append(currentState[x[1]][x1])
if currentState[x[1]][x2] % 2 == opoCol and currentState[y1][x1] != 0:
result.append(currentState[x[1]][x2])
return result
def pincer(loc, currentState):
# currentState = [[4, 6, 8, 10, 12, 8, 6, 14], [2, 2, 2, 2, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 3, 3, 3], [15, 7, 9, 11, 13, 9, 7, 5]]
# loc = [0, 1]
result = []
if checkFreezer(loc, currentState):
return result
else:
# +x direction
for i in range(1,7):
x = min(loc[0] + i, 7)
box1 = currentState[loc[1]][x]
if box1 != 0 : break
else : result.append([x, loc[1]])
if x == 7 : break
# -x direction
for i in range(1,7):
x = max(loc[0] - i, 0)
box2 = currentState[loc[1]][x]
if box2 != 0 : break
else : result.append([x, loc[1]])
if x == 0 : break
# +y direction
for i in range(1,7):
y = min(loc[1] + i, 7)
box3 = currentState[y][loc[0]]
if box3 != 0 : break
else:result.append([loc[0], y])
if y == 7 : break
# -y direction
for i in range(1,7):
y = max(loc[1] - i, 0)
box4 = currentState[y][loc[0]]
if box4 != 0 : break
else : result.append([loc[0], y])
if y == 0 : break
return result
# The leaper
def knight(loc, currentState):
moves = []
if checkFreezer(loc, currentState):
return moves
color = currentState[loc[1]][loc[0]] % 2
found = False
# check west
for i in range(7):
x = loc[0] - (1 + i)
y = loc[1]
if x >= 0:
if currentState[y][x] != 0: # non-empty space
if currentState[y][x] % 2 == color: break
elif not found: found = True
else: break
else:
moves.append((x, y))
else:
break
found = False
# check east
for i in range(7):
x = loc[0] + (1 + i)
y = loc[1]
if x <= 7:
if currentState[y][x] != 0: # non-empty space
if currentState[y][x] % 2 == color: break
elif not found: found = True
else: break
else:
moves.append((x, y))
else:
break
found = False
# check south
for i in range(7):
x = loc[0]
y = loc[1] + (1 + i)
if y <= 7:
if currentState[y][x] != 0: # non-empty space
if currentState[y][x] % 2 == color: break
elif not found: found = True
else: break
else:
moves.append((x, y))
else:
break
found = False
# check north
for i in range(7):
x = loc[0]
y = loc[1] - (1 + i)
if y >= 0:
if currentState[y][x] != 0: # non-empty space
if currentState[y][x] % 2 == color: break
elif not found: found = True
else: break
else:
moves.append((x, y))
else:
break
found = False
# check southwest
for i in range(7):
x = loc[0] - (1 + i)
y = loc[1] + (1 + i)
if x >= 0 and y <= 7:
if currentState[y][x] != 0: # non-empty space
if currentState[y][x] % 2 == color: break
elif not found: found = True
else: break
else:
moves.append((x, y))
else:
break
found = False
# check northwest
for i in range(7):
x = loc[0] - (1 + i)
y = loc[1] - (1 + i)
if x >= 0 and y >= 0:
if currentState[y][x] != 0: # non-empty space
if currentState[y][x] % 2 == color: break
elif not found: found = True
else: break
else:
moves.append((x, y))
else:
break;
found = False
# check northeast
for i in range(7):
x = loc[0] + (1 + i)
y = loc[1] - (1 + i)
if x <= 7 and y >= 0:
if currentState[y][x] != 0: # non-empty space
if currentState[y][x] % 2 == color: break
elif not found: found = True
else: break
else:
moves.append((x, y))
else:
break;
found = False
# check southeast
for i in range(7):
x = loc[0] + (1 + i)
y = loc[1] + (1 + i)
if x <= 7 and y <= 7:
if currentState[y][x] != 0: # non-empty space
if currentState[y][x] % 2 == color: break
elif not found: found = True
else: break
else:
moves.append((x, y))
else:
break
return moves
# We tried
def imitator(loc, currentState):
moves = []
return moves
if checkFreezer(loc, currentState):
return moves
color = currentState[loc[1]][loc[0]] % 2
opponCol = 1 - color #opponent's color
passLeap = False
#check west
for i in range(7):
x = loc[0] - (1+i)
y = loc[1]
if x >= 0:
if currentState[x][y] == 6 + opponCol and ~passLeap: #opposing leaper
passLeap = True
elif currentState[x][y] != 0: #non-empty space, not leaper
break
else:
moves.append((x,y))
else:
break
passLeap = False
#check east
for i in range(7):
x = loc[0] + (1+i)
y = loc[1]
if x <= 7:
if currentState[x][y] == 6 + opponCol and ~passLeap: #opposing leaper
passLeap = True
elif currentState[x][y] != 0: #non-empty space, not leaper
break
else:
moves.append((x,y))
else:
break
passLeap = False
#check south
for i in range(7):
x = loc[0]
y = loc[1] + (1+i)
if y <= 7:
if currentState[x][y] == 6 + opponCol and ~passLeap: #opposing leaper
passLeap = True
elif currentState[x][y] != 0: #non-empty space, not leaper
break
else:
moves.append((x,y))
else:
break
passLeap = False
#check north
for i in range(7):
x = loc[0]
y = loc[1] - (1+i)
if y >= 0:
if currentState[x][y] == 6 + opponCol and ~passLeap: #opposing leaper
passLeap = True
elif currentState[x][y] != 0: #non-empty space, not leaper
break
else:
moves.append((x,y))
else:
break
passLeap = False
#check southwest
for i in range(7):
x = loc[0] - (1+i)
y = loc[1] + (1+i)
if x >= 0 and y <= 7:
if currentState[x][y] == 6 + opponCol and ~passLeap: #opposing leaper
passLeap = True
elif currentState[x][y] != 0: #non-empty space, not leaper
break
else:
moves.append((x,y))
else:
break
passLeap = False
#check northwest
for i in range(7):
x = loc[0] - (1+i)
y = loc[1] - (1+i)
if x >= 0 and y >= 0:
if currentState[x][y] == 6 + opponCol and ~passLeap: #opposing leaper
passLeap = True
elif currentState[x][y] != 0: #non-empty space, not leaper
break
else:
moves.append((x,y))
else:
break
passLeap = False
#check northeast
for i in range(7):
x = loc[0] + (1+i)
y = loc[1] - (1+i)
if x <= 7 and y >= 0:
if currentState[x][y] == 6 + opponCol and ~passLeap: #opposing leaper
passLeap = True
elif currentState[x][y] != 0: #non-empty space, not leaper
break
else:
moves.append((x,y))
else:
break
passLeap = False
#check southeast
for i in range(7):
x = loc[0] + (1+i)
y = loc[1] + (1+i)
if x <= 7 and y <= 7:
if currentState[x][y] == 6 + opponCol and ~passLeap: #opposing leaper
passLeap = True
elif currentState[x][y] != 0: #non-empty space, not leaper
break
else:
moves.append((x,y))
else:
break
return moves
def other(loc, currentState):
moves = []
if checkFreezer(loc, currentState):
return moves
color = currentState[loc[1]][loc[0]] % 2
opponCol = 1 - color #opponent's color
#check west
for i in range(7):
x = loc[0] - (1+i)
y = loc[1]
if x >= 0:
if currentState[y][x] != 0: #non-empty space, stop checking direction
break
else:
moves.append((x,y))
else:
break
#check east
for i in range(7):
x = loc[0] + (1+i)
y = loc[1]
if x <= 7:
if currentState[y][x] != 0: #non-empty space, stop checking direction
break
else:
moves.append((x,y))
else:
break
#check south
for i in range(7):
x = loc[0]
y = loc[1] + (1+i)
if y <= 7:
if currentState[y][x] != 0: #non-empty space, stop checking direction
break
else:
moves.append((x,y))
else:
break
#check north
for i in range(7):
x = loc[0]
y = loc[1] - (1+i)
if y >= 0:
if currentState[y][x] != 0: #non-empty space, stop checking direction
break
else:
moves.append((x,y))
else:
break
#check southwest
for i in range(7):
x = loc[0] - (1+i)
y = loc[1] + (1+i)
if x >= 0 and y <= 7:
if currentState[y][x] != 0: #non-empty space, stop checking direction
break
else:
moves.append((x,y))
else:
break
#check northwest
for i in range(7):
x = loc[0] - (1+i)
y = loc[1] - (1+i)
if x >= 0 and y >= 0:
if currentState[y][x] != 0: #non-empty space, stop checking direction
break
else:
moves.append((x,y))
else:
break
#check northeast
for i in range(7):
x = loc[0] + (1+i)
y = loc[1] - (1+i)
if x <= 7 and y >= 0:
if currentState[y][x] != 0: #non-empty space, stop checking direction
break
else:
moves.append((x,y))
else:
break
#check southeast
for i in range(7):
x = loc[0] + (1+i)
y = loc[1] + (1+i)
if x <= 7 and y <= 7:
if currentState[y][x] != 0: #non-empty space, stop checking direction
break
else:
moves.append((x,y))
else:
break
return moves
#returns true if there is an opponent's freezer in any of the 8 directions, else returns false
def checkFreezer(loc, currentState):
result = False
color = currentState[loc[1]][loc[0]]
oppo = kingHelper(loc, currentState, 1 -color)
for x in oppo:
if x - color == 14:
result = True
return result
def nickname():
return "Kasper"
def introduce():
return "I'm Kasparov a Russian Baroque Chess player who doesn't know how to play the imitator."
def prepare(player2Nickname):
pass
# Mathematical sum of all the pieces we have based on a point system.
# the position index in comparision to opponents
# pieces and our pieces in the kings diagonals, verticals and horizontals.
def staticEval(state):
# print(state)
sum = 0
for i in range(8):
for j in range(8):
x = state[i][j]
if x != 0:
if i < 2 and x % 2 == 1:
sum += 1
if i > 5 and x % 2 == 0:
sum -= 1
# checks the number of opponents the freezer has trapped
if x == 14:
sum -= (len(kingHelper([j,i], state, 1)) * 3)
if x == 15:
sum += (len(kingHelper([j, i], state, 0)) * 3)
# when piece is king checks surrounding places for opponents
if x == 12:
sum -= 1000
sum += len(kingHelper([j,i], state, 1))
elif x == 13:
sum += 1000
sum -= len(kingHelper([j, i], state, 0))
# pincers
elif x == 2:
sum -= 2
elif x == 3:
sum += 2
# all others
elif x % 2 == 0:
sum -= 4
elif x % 2 == 1:
sum += 4
# Check the middle squares
if i == 3 or i == 4:
if j == 3 or j == 4:
blackP = kingHelper([j,i], state, 1)
whiteP = kingHelper([j,i], state, 0)
sum += len(blackP)
sum -= len(whiteP)
return sum
def staticEval2(states, nMoves, startTime, timelimit): #accepts a list of states and also the number of moves
total = 0
#print(states)
for state in states:
#print(state)
if time.time() - startTime> timelimit*0.97:
break
total += staticEval(state)
return total / len(states)
|
25,137 | e5e4345ab96b246ffde449a1bc262090f559e2bd | # 根据一棵树的前序遍历与中序遍历构造二叉树。
# 注意:
# 你可以假设树中没有重复的元素。
# 前序:根-左-右
# 中序: 左-根-右
def bildTree(preorder, inorder):
return build(preorder, 0, len(preorder)-1,
inorder, 0, len(inorder)-1)
def build(preorder,preStart,preEnd,inorder,inStart,inEnd):
if preStart>preEnd:
return None
# 找到根节点的值
value=preorder[preStart]
# 在中序中找到根节点位置,以定位左子树
for i in range(inStart,inEnd):
if inorder[i]==value:
index=i
break
root=TreeNode(value)
# 添加左右子树
# 左子树
root.left=build(preorder,preStart+1,index,
inorder,inStart,index-1)
# 右子树
root.right=bulid(preorder,index+1,preEnd,
inorder,index+1,inEnd)
return root
|
25,138 | 5f34880289156a6edf154e82d4477c2fb9349cf5 | #Valida si la cadena tiene @, letras, números, guión
id = "rasarag@3-1"
acumulador = 0
for i in id:
if(i.isalpha()):
contA=1
elif (i.isdigit()):
contD = 1
elif (i=="@"):
contE = 1
elif (i=="-"):
contG=1
if contD==1 and contA == 1 and contD==1 and contG == 1:
print("El usuario tiene letras, números, arroba y guión") |
25,139 | cf51d64d0267886649d7e219b5e0af6377d77a32 | #!/usr/bin/env python
import sys
current_key = None
# input comes from STDIN
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# parse the input we got from mapper.py
(key, score) = line.split('\t', 1)
# this IF-switch only works because Hadoop sorts map output
# by key (here: word) before it is passed to the reducer
if current_key == key:
continue
else:
(key1, key2) = key.split("$$$$", 1)
print '%s\t%s\t%s' % (key1, key2, score)
current_key = key
exit(0)
|
25,140 | e2de47d0733d2404e69e26eae9ced9fb1e7921c9 | from module_prac1 import mul # module_prac1 에 있는 mul 함수 불러와서 계산
print(mul(2,3)) |
25,141 | 52b264e3a571a92c0d6e2ab050dcb3294b314ed9 | class Adjacent_Elements_Products(object):
def __init__(self):
pass
def adjacentElementsProduct(self, inputArray):
# Almacenaremos los Productos del Array.
array_Product = []
for i in range(0, len(inputArray) - 1): # El -1 es para NO Exceder el Lenght Original del Array.
array_Product.append(inputArray[i] * inputArray[i+1])
print(array_Product)
# Inicializamos el Elemento Maximo como el Primer Elemento del Array de Productos.
int_Max = array_Product[0]
# buscamos el Producto MAS Grande.
for i in range(0, len(array_Product)):
# Si el producto del Array es Mayor a MAX, este tomara su Valor; sino MAX continua siendo el MISMO.
if array_Product[i] > int_Max:
int_Max = array_Product[i]
return int_Max
if __name__ == '__main__':
aep = Adjacent_Elements_Products()
print(aep.adjacentElementsProduct([-23,4,-3,8,-12])) |
25,142 | f0a543f7daae5ef8b1c53288c4d85a522f39f894 | #! /usr/local/bin/python3.6
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from rasa_core.channels.console import ConsoleInputChannel
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.interpreter import RasaNLUInterpreter
import warnings
# ignore warnings
warnings.filterwarnings(action='ignore', category=DeprecationWarning)
warnings.filterwarnings(action='ignore', category=FutureWarning)
logger = logging.getLogger(__name__)
def run_online(input_channel,
interpreter,
domain_def_file='domain/domain.yml',
training_data_file='data/stories.md',
):
agent = Agent(domain_def_file,
policies=[MemoizationPolicy(max_history=3), KerasPolicy()],
interpreter=interpreter)
training_data = agent.load_data(training_data_file)
agent.train_online(training_data,
input_channel=input_channel,
batch_size=50,
epochs=200,
max_training_samples=300)
return agent
if __name__ == '__main__':
try:
print("[INFO] Running interactive training..\n")
logging.basicConfig(level='INFO')
nlu_interpreter = RasaNLUInterpreter('models/zimsec_bot/default/nlu')
run_online(ConsoleInputChannel(), nlu_interpreter)
except Exception as err:
print("[ERROR] There was a problem: ", err)
|
25,143 | aae5ad08178d4b5a73b3ce24e62c8910f1ed136f | from django.core.urlresolvers import reverse_lazy
from django.views.generic import CreateView, TemplateView
from application.apps.edr.forms import OptOutForm
from application.apps.edr.models import OptOut
class EdrOptoutView(CreateView):
model = OptOut
form_class = OptOutForm
success_url = reverse_lazy('edr:success')
class EdrOptOutSuccess(TemplateView):
template_name = 'edr/optout_success.html'
|
25,144 | f9d64a1de8833397f85acc606affa413c3d28de8 | def get_parent_id(task_dict: dict) -> int | None:
"""
Get a task's parent id from a JSON task dictionary
Parameters
----------
task_dict: Dict
Dictionary with task information
Returns
-------
parent_id: int | None
Parent_id of the task
"""
return task_dict['parent']['id'] \
if task_dict['parent'] and 'id' in task_dict['parent'] else None
|
25,145 | 5bc271c25993408ed6cbf5428ad9650ed2c4b29e | from flask import Blueprint
from flask import Flask
from flask import abort
from flask import render_template
from flask import request
from lib.constants import SUCCESSFUL_PATCH
from lib.p2p import P2P
from lib.transaction import build_transaction
from lib.wallet import Wallet
from nacl.signing import SigningKey
from pathlib import Path
import jsonpickle
import logging
import os
app = Flask(__name__)
api = Blueprint("api",
__name__,
template_folder="templates",
url_prefix="/api")
p2p = P2P()
wallet = Wallet()
@api.route("/transaction", methods=["POST"])
def transaction():
"""Broadcast transaction to peers."""
data = jsonpickle.decode(request.get_data())
address = data["address"]
amount = int(data["amount"])
keyname = data["keyname"]
pkplus, pkminus = wallet.keys(keyname)
my_balance = p2p.query("/balance", address=pkplus)["balance"]
if my_balance < amount:
abort(404, description="Not enough funds.")
my_utxo = p2p.query("/find-utxos", address=pkplus, amount=amount)["utxos"]
rem = sum(utxo.amount for utxo in my_utxo) - amount
address_amount = [(address, amount)]
assert rem >= 0
if rem > 0:
address_amount.append((pkplus, rem))
tx = build_transaction(my_utxo, address_amount, pkminus)
try:
p2p.broadcast("/transaction-pool", transaction=tx)
return SUCCESSFUL_PATCH
except UnsuccessfulPatch:
payload = jsonpickle.encode(
{"message": "Transaction wasn't accepted by the network."})
return payload, 420, {"ContentType": "application/json"}
@api.route("/keys", methods=["GET", "POST"])
def keys():
if request.method == "GET":
data = [{
"keyname": keyname,
"address": wallet.pkplus(keyname)
} for keyname in wallet.keynames()]
payload = jsonpickle.encode({"keys": data})
return payload, 200, {"Content-Type": "application/json"}
elif request.method == "POST":
data = request.get_data()
keyname = jsonpickle.decode(data)["keyname"]
keypath = wallet.create_keys(keyname)
# TODO: handle duplicated keys
payload = jsonpickle.encode({"keypath": str(keypath)})
return payload, 200, {"Content-Type": "application/json"}
@api.route("/balance", methods=["GET"])
def balance():
"""Ask peers for balance."""
address = request.args.get("address")
balance = p2p.query("/balance", address=address)["balance"]
payload = jsonpickle.encode({"balance": balance})
return payload, 200, {"Content-Type": "application/json"}
app.register_blueprint(api)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=os.getenv("PORT"), debug=True)
|
25,146 | e66bf14ef2ba58269644274f274b7d38fd4968a1 | import numpy as np
import matplotlib.pyplot as plt
import h5py
import time
import copy
from random import randint
#load MNIST data
MNIST_data = h5py.File('MNISTdata.hdf5', 'r')
x_train = np.float32(MNIST_data['x_train'][:] )
y_train = np.int32(np.array(MNIST_data['y_train'][:,0]))
x_test = np.float32( MNIST_data['x_test'][:] )
y_test = np.int32( np.array( MNIST_data['y_test'][:,0] ) )
MNIST_data.close()
#Implementation of stochastic gradient descent algorithm
#number of inputs
num_inputs = 28*28
#number of hidden layer units
num_hiddens = 50
#number of outputs
num_outputs = 10
model = {}
model['W1'] = np.random.randn(num_hiddens,num_inputs) / np.sqrt(num_inputs)
model['b1'] = np.zeros((num_hiddens,1))
model['C'] = np.random.randn(num_outputs,num_hiddens) / np.sqrt(num_hiddens)
model['b2'] = np.zeros((num_outputs,1))
def indicator_function(y, num_output):
vec = np.zeros((num_output, 1))
vec[y] = 1
return vec
def softmax_function(z):
z_max = np.max(z)
z_new = z - z_max
ZZ = np.exp(z_new) / np.sum(np.exp(z_new), axis = 0, keepdims = True)
return ZZ
def forward(x, model):
x_new = x.reshape(-1, 1)
Z1 = np.dot(model['W1'], x_new) + model['b1']
H1 = np.tanh(Z1)
U = np.dot(model['C'], H1) + model['b2']
p = softmax_function(U)
# Also store the cache for backprop usage
cache = {"Z1": Z1,
"H1": H1,
"U": U,
"p": p}
return cache
def backward(x, y, cache, model):
# Retrive some model parameter values
W1 = model['W1']
C = model['C']
# Retrive some cache values
H1 = cache["H1"]
p = cache["p"]
# Backward prop calculations
dU = p - indicator_function(y, num_outputs)
dC = np.dot(dU, H1.T)
db2 = dU
dZ1 = np.dot(C.T, dU) * (1 - np.power(H1, 2))
dW1 = np.dot(dZ1, x.reshape(1, -1))
db1 = dZ1
model_grads = {"dW1": dW1,
"db1": db1,
"dC": dC,
"db2": db2}
return model_grads
import time
time1 = time.time()
LR = .01
num_epochs = 20
prev_train_acc = 0.0
list_train_acc = []
list_test_acc = []
epoch_sizes = []
for epochs in range(num_epochs):
#Learning rate schedule
if (epochs > 5):
LR = 0.001
if (epochs > 10):
LR = 0.0001
if (epochs > 15):
LR = 0.00001
total_correct = 0
for n in range( len(x_train)):
n_random = randint(0,len(x_train)-1 )
y = y_train[n_random]
x = x_train[n_random][:]
cache = forward(x, model)
prediction = np.argmax(cache["p"])
if (prediction == y):
total_correct += 1
model_grads = backward(x, y, cache, model)
model['W1'] = model['W1'] - LR*model_grads["dW1"]
model['b1'] = model['b1'] - LR*model_grads["db1"]
model['C'] = model['C'] - LR*model_grads["dC"]
model['b2'] = model['b2'] - LR*model_grads["db2"]
curr_train_acc = total_correct/np.float(len(x_train) )
print(curr_train_acc)
if (abs(curr_train_acc - prev_train_acc) < 0.0001):
print("Two consecutive loss is too close(< 0.01%), thus terminate the SGD")
break
prev_train_acc = curr_train_acc
# for learning curve plot
epoch_sizes.append(epochs + 1)
# after each epoch, save the accuracy of training data
list_train_acc.append(curr_train_acc)
# for learning curve plot
# after each epoch, save the accuracy of testing data
total_curr_correct = 0
for n in range( len(x_test)):
y = y_test[n]
x = x_test[n][:]
cache = forward(x, model)
prediction = np.argmax(cache["p"])
if (prediction == y):
total_curr_correct += 1
list_test_acc.append(total_curr_correct/np.float(len(x_test) ) )
#time for training
time2 = time.time()
print(time2-time1)
#test data
total_correct = 0
for n in range( len(x_test)):
y = y_test[n]
x = x_test[n][:]
cache = forward(x, model)
prediction = np.argmax(cache["p"])
if (prediction == y):
total_correct += 1
print(total_correct/np.float(len(x_test) ) )
#plot the learning curve
plt.plot(epoch_sizes, list_train_acc, '-', color='b', label="Training Acc")
plt.plot(epoch_sizes, list_test_acc, '--', color='r', label="Testing Acc")
# Create plot
plt.title("Learning Curve")
plt.xlabel("Epoch"), plt.ylabel("Accuracy for Training and Testing along different Epochs"), plt.legend(loc="best")
plt.tight_layout()
plt.show() |
25,147 | d7aebe4f5102d6e863929a5d64e602529cb6f45e | #Section 1
from urllib.request import urlopen
from bs4 import BeautifulSoup
url ="https://www.apple.com/itunes/charts/songs"
conn = urlopen(url)
raw_data = conn.read()
html_page = raw_data.decode("utf-8")
soup = BeautifulSoup(html_page, "html.parser")
section = soup.find('section', 'section chart-grid')
li_list = section.div.ul.find_all('li')
# print(li_list[0])
songs_list =[]
for li in li_list:
title = li.h3.string
link = li.a['href']
artist = li.h4.string
song = {
"Title": title,
"Link": link,
"Artist": artist,
}
songs_list.append(song)
import pyexcel
pyexcel.save_as(records = songs_list, dest_file_name = 'itunes.xlsx')
#Section 2
from youtube_dl import YoutubeDL
options = {
'default_search': 'ytsearch', # tell downloader to search instead of directly downloading
'max_downloads': 1 # Tell downloader to download only the first entry (video)
}
dl = YoutubeDL(options)
#Concentrate Title and Artist into 1 string
songs_list_new = []
for song in songs_list:
song_new = song['Title'] + ' ' + song['Artist']
songs_list_new.append(song_new)
dl.download(songs_list_new)
|
25,148 | aa9949d64e030028ae7eb978c47d3fcb0794e420 | a=1
b=2
c=5
d=5
x=1
z=1000
if a==b and c==d or x!=z:
print("true")
##+ - / *
##
##== != < > <= >=
##
##and or not in
|
25,149 | 5d2a36a611bfaaf18e1273d818aa9f459cd39438 | #!/usr/bin/python3
#
# Copyright (c) 2019 - 2020, Khang Hua, email: khanghua1505@gmail.com
# All right reserved.
#
# This file is written and modified by Khang Hua.
#
# This model is free software; you can redistribute it and/or modify it under the terms
# of the GNU Lesser General Public License; either version 2.1 of the License, or (at your option)
# any later version. See the GNU Lesser General Public License for more details,
#
# This model is distributed in the hope that it will be useful.
import os
import sys
import math
import errno
import getopt
import socket
import scapy.config
import scapy.layers.l2
import scapy.route
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-5s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
def _long2net(arg):
if arg <= 0 or arg >= 0xFFFFFFFF:
raise ValueError("illegal netmask value", hex(arg))
return 32 - int(round(math.log(0xFFFFFFFF - arg, 2)))
def _to_CIDR_notation(bytes_network, bytes_netmask):
network = scapy.utils.ltoa(bytes_network)
netmask = _long2net(bytes_netmask)
net = "%s/%s" % (network, netmask)
if netmask < 16:
logger.warning("%s is too big. skipping" % net)
return None
return net
def scan_ip_host(net, interface, timeout=5):
if os.geteuid() != 0:
print('You need to be root to run this script', file=sys.stderr)
return
logger.info("arping %s on %s" % (net, interface))
try:
ans, unans = scapy.layers.l2.arping(net, iface=interface,
timeout=timeout, verbose=True)
for s, r in ans.res:
host_info = {}
line = r.sprintf("%Ether.src% %ARP.psrc%")
host_info["src"] = r.src
host_info["psrc"] = r.psrc
try:
hostname = socket.gethostbyaddr(r.psrc)
line += " " + hostname[0]
host_info["host_name"] = hostname
except socket.herror:
host_info["host_name"] = "Unknown"
pass
logger.info(line)
yield host_info
except socket.error as e:
if e.errno == errno.EPERM: # Operation not permitted
logger.error("%s. Did you run as root?", e.strerror)
else:
raise
def scan_all_host(interface_to_scan=None, timeout_per_net=5):
if os.geteuid() != 0:
print('You need to be root to run this script', file=sys.stderr)
return
for network, netmask, _, interface, address, _ in scapy.config.conf.route.routes:
if interface_to_scan and interface_to_scan != interface:
continue
if network == 0 or interface == 'lo' or address == '127.0.0.1' or address == '0.0.0.0':
continue
if netmask <= 0 or netmask == 0xFFFFFFFF:
continue
if interface != interface_to_scan and interface.startswith('docker') or interface.startswith('br-'):
logger.warning("Skipping interface '%s'" % interface)
continue
net = _to_CIDR_notation(network, netmask)
if net:
for info in scan_ip_host(net, interface, timeout_per_net):
yield info
if __name__ == '__main__':
scan_all_host()
|
25,150 | 33fa996ca9db38773afcec89afd74e2f9f00d72e |
from naiback.indicators.ema import EMA
def _calc_rsi(g, l):
if g is None or l is None:
return None
if l == 0:
return 100
return 100 - 100 / (1 + g / l)
def RSI(data, period):
diffs = [0]
prevd = data[0]
for d in data[1:]:
diffs.append(d - prevd)
prevd = d
gains = EMA([max(x, 0) for x in diffs], period, 1. / period)
losses = EMA([-min(x, 0) for x in diffs], period, 1. / period)
return [_calc_rsi(g, l) for (g, l) in zip(gains, losses)]
|
25,151 | a1df3a3b52d4b4e57c822c323cb0f2ab34db384a | import os
from csv import writer
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import cross_validation
import six
import utilities
# Decide read/write mode based on python version
read_mode, write_mode = ('r', 'w') if six.PY2 else ('rt', 'wt')
# Set the path to your consolidated files
path = '/Users/chrysovalantis/Documents/UCY/EPL451/Project'
os.chdir(path)
# File names
ftrain = 'train_consolidation.txt'
ftest = 'test_consolidation.txt'
flabel = 'trainLabels.csv'
fsubmission = 'submission.csv'
labels = utilities.read_labels(flabel)
# Dimensions for train set
ntrain = 10868
nfeature = 16 ** 2 + 1 + 1 # For two_byte_codes, no_que_marks, label
train = utilities.read_train(ntrain, nfeature, labels, ftrain)
X = train[:, :-1]
y = train[:, -1]
del labels
del train
# Parameters for trees
random_state = 5342
n_jobs = 8
verbose = 1
n_estimators = 89
# ExtraTreesClassifier - feature selection
clf1 = ExtraTreesClassifier(criterion='gini', random_state=random_state, n_jobs=n_jobs, verbose=verbose, n_estimators=n_estimators, max_features=None)
clf1.fit(X, y)
X_new = clf1.transform(X, '0.5*median')
X = X_new
# Initialize classifier
clf = KNeighborsClassifier(n_neighbors=20, p=1)
# Start training
print('training started')
############################
# test log loss
print('computing log loss')
kf = cross_validation.KFold(ntrain, n_folds=4)
_logloss = 0.0
for trainIndex, testIndex in kf:
print("TRAIN:", trainIndex, "TEST:", testIndex)
X_train, X_test = X[trainIndex], X[testIndex]
y_train, y_test = y[trainIndex], y[testIndex]
clf.fit(X_train, y_train)
pred = clf.predict_proba(X_test)
_logloss += utilities.log_loss(pred, y_test)
print('log loss = ', _logloss/len(kf))
############################
clf.fit(X, y)
print('training completed')
del X
del y
# Dimensions for train set
ntest = 10873
nfeature = 16 ** 2 + 1 # For two_byte_codes, no_que_marks
test, Ids = utilities.read_test(ntest, nfeature, ftest)
test = clf1.transform(test, '1.25*median')
# Predict for whole test set
final_pred = clf.predict_proba(test)
del test
# Writing results to file
with open(fsubmission, write_mode) as f:
fw = writer(f)
# Header preparation
header = ['Id'] + ['Prediction' + str(i) for i in range(1, 10)]
fw.writerow(header)
for t, (Id, pred) in enumerate(zip(Ids, final_pred.tolist())):
fw.writerow([Id] + pred)
if (t + 1) % 1000 == 0:
print(t + 1, 'prediction written')
print('all done!') |
25,152 | a4f3e937e26c7d82d10f8c00331b06374be45728 |
import math
import random
import time
d = 10000
array = [i for i in range(d)]
item = random.randint(0, d)
def binary_insert(array, item):
previous = None
search_length = math.floor(len(array) / 2)
index = search_length
while previous != index:
# print(item, index, array[index])
if index == len(array):
break
previous = index
search_length = max(math.floor(search_length / 2), 1)
down = item < array[index]
if down:
index = index - search_length
elif item == array[index]:
return index
else:
index = index + search_length
# boundary conditions
index = max(index, 0)
index = min(index, len(array))
return index
def linear_insert(array, item):
for i in range(len(array)):
if item < array[i]:
return i-1
def bilinear_insert(array, item):
l = len(array)
for i in range(l):
if item < array[i]:
return i-1
elif item > array[l - i - 1]:
return l - i
binary_start = time.time()
binary_index = binary_insert(array, item)
binary_time = time.time() - binary_start
print(binary_index, binary_time)
linear_start = time.time()
linear_index = linear_insert(array, item)
linear_time = time.time() - linear_start
print(linear_index, linear_time)
bilinear_start = time.time()
bilinear_index = bilinear_insert(array, item)
bilinear_time = time.time() - bilinear_start
print(bilinear_index, bilinear_time)
|
25,153 | ed1d5e15e86c2ed3d3927f2c6ebf3174f876dab6 | #coding:utf-8
import torch
from torch import nn
import torch.nn.functional as F
from modules.embedding.embedding import TokenEmbedding
class ModelTrace(nn.Module):
def __init__(self, input_dim=None, vocab_size=None, **kwargs):
"""
trace类的抽象类
"""
super(ModelTrace, self).__init__()
if input_dim is not None and vocab_size is not None:
self.embedding = TokenEmbedding(input_dim, vocab_size)
#加载预训练的词向量
if "pretrain" in kwargs:
if kwargs["pretrain"]:
self.embedding.from_pretrained(kwargs['vectors'])
def forward(self, *arg, **kwarg):
"""
模型前向过程
"""
raise Exception("Not implemented!!")
def mock_input_data(self):
"""
mock trace输入
"""
return torch.ones((1, 128), dtype=torch.long), torch.ones((1, 128), dtype=torch.long)
def load_state_dict_trace(self, state_dict, strict=True):
true_state_dict = {}
for k,v in state_dict.items():
if k.startswith("model."):
k = k.split(".", 1)[1] #去掉名字中的第一个model.
true_state_dict[k] = v
self.load_state_dict(true_state_dict, strict)
def get_parameter_names(self):
return [name for name, _ in self.named_parameters()]
|
25,154 | c69eb514b33c0e0080b469d1d5ad031783970e60 | """
Created by Florian Fricke.
"""
import csv
def preprocess_one_million_posts_corpus(csvfile, tsvfile):
with open(csvfile, 'r', encoding="utf-8") as csvin, open(tsvfile, 'w', encoding="utf-8", newline='') as tsvout:
csvin = csv.reader(csvin)
tsvout = csv.writer(tsvout, delimiter='\t',)
for line in csvin:
if line[0] == "SentimentNeutral":
line[0] = "neutral"
elif line[0] == "SentimentNegative":
line[0] = "negative"
elif line[0] == "SentimentPositive":
line[0] = "positive"
else:
continue
line[1] = line[1].replace('\r', '').replace('\n', '')
tsvout.writerow(line)
return
preprocess_one_million_posts_corpus(
"data/labeled_sentiment_data/million_pos_corpus.csv", "data/labeled_sentiment_data/million_pos_corpus.tsv")
|
25,155 | c6a04d7a79ebc7011328b5e03170ab17ce83b885 | #!/usr/bin/env python
import zmq
import sys, os, time, random, signal, json
sys.dont_write_bytecode = True
import logging, labstatslogger, argparse
from daemon import Daemon
from datetime import datetime, timedelta, date
from time import mktime, sleep
import cPickle
directory = "/var/run/labstats/"
timeformat = '%Y-%m-%dT%H:%M:%S'
logger = labstatslogger.logger
'''
Utility functions used by the rest further below
'''
###############################################################################
# Outputs to stdout if --verbose enabled
def verbose_print(message):
if options.verbose:
print message
# Outputs to both logging and stdout (if --verbose enabled)
def error_output(message):
logger.warning(message)
verbose_print(message)
# Exits script. Will delete daemon's pidfile if --daemon was specified
def clean_quit():
if options.daemon:
daemon.delpid()
exit(1)
# If collector is killed manually, clean up and quit
def sigterm_handler(signal, frame):
error_output("Subscriber killed via SIGTERM")
output_checkins()
clean_quit()
# If SIGHUP received, do "soft restart" of sockets and files
# No need to re-input checkins
def sighup_handler(signal, frame):
error_output("Collector received a SIGHUP")
context.destroy()
time.sleep(5)
main(options.retries, 2000, options.tlimit)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGHUP, sighup_handler)
'''
Reaper functions: check timestamps, read in/out checked-in machines.
By default, the reaper will write out its state every recv()
and will check that all checked-in machines are no older than 20 minutes
(by default) every recv()
'''
###############################################################################
# Verbose prints out check_ins: hostname::timestamp format
def print_checkins(last_check, check_ins):
verbose_print("Last check was at "+last_check.strftime(timeformat))
verbose_print("Checked-in machines: ")
for hostname, timestamp in check_ins.iteritems():
verbose_print(hostname+"::"+timestamp.strftime(timeformat))
# Outputs pickled (last_check, check_ins) tuple.
# Overwrites existing checked_in file
def output_checkins(last_check, check_ins):
if options.output is False:
return
try:
checkinfile = open('checked_in', 'w')
except Exception as e:
error_output("Warning: unable to open checked_in logfile. "+str(e))
return
try:
tup = (last_check, check_ins)
cPickle.dump(tup, checkinfile)
checkinfile.close()
except Exception as e:
error_output("Error: could not dump pickled check_in data. "+str(e))
# Read from outputted checked_in file, return last_check and check_ins
def read_checkins():
if not os.path.isfile('checked_in'): # No checkins.log found
logger.warning("No checked_in found")
return (None, {})
try:
infile = open('checked_in', 'r')
last_check, check_ins = cPickle.load(infile)
infile.close()
print_checkins(last_check, check_ins) # verbose prints what was stored
return last_check, check_ins
except Exception as e:
error_output("Error: could not get last_check and check_ins. "+str(e))
return (None, {})
# Checks timestamp is within <interval> minutes' time.
# Returns True if timestamp is outdated
def outdated(curtime, timestamp): # pass in type datetime, datetime
verbose_print("Checking timestamp "+timestamp.strftime(timeformat)+" against current time")
timeobj = datetime.fromtimestamp(mktime(timestamp.timetuple()))
diff = curtime - timeobj # type timedelta
return diff >= timedelta(minutes = options.interval)
# Checks timestamps are all <interval> minutes within current time
# Removes machines/timestamps that are outdated
# Set last_check to current GMT (4-5 hour offset)
def reap(last_check, last_recv, check_ins):
# if last check and last recv are eg. >90 mins from each other,
# stop/skip reaper (because it could be throttling error)
if last_check - last_recv > timedelta(minutes = options.faulttime):
error_output("Too much time between now and last_recv, skipping reaping")
return (last_check, check_ins)
# converting directly from gmtime to datetime loses DST data
cur_string = time.strftime(timeformat, time.gmtime())
last_check = datetime.strptime(cur_string, timeformat)
new_dict = {}
deleted = 0
for hostname, timestamp in check_ins.iteritems():
if outdated(last_check, timestamp) is True:
verbose_print(hostname+" is outdated")
deleted += 1
else: # not outdated; add back to new_dict
new_dict[hostname] = timestamp
verbose_print("Reaped "+str(deleted)+" items from check-ins")
output_checkins(last_check, new_dict)
return (last_check, new_dict)
###############################################################################
# Output the json into a log file in /var/log/labstats
def output_log(to_write):
if not os.path.exists('/var/log/labstats/'):
try:
os.mkdir('/var/log/labstats/')
except OSError as e:
error_output("Error: could not make /var/log/labstats/. Not sudo/root.")
return
try:
logout = open('/var/log/labstats/subscriber.log', 'w')
for line in to_write:
logout.write(line)
logout.close()
except OSError as e:
error_output("Error: could not write to subscriber.log. No root access.")
except Exception as e:
error_output("Error: could not write to subscriber.log. "+str(e).capitalize())
def main(ntries, ntime, tlimit):
last_check, check_ins = read_checkins()
# Set up ZMQ sockets and connections
context = zmq.Context()
subscriber = context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE,'')
pushsocket = context.socket(zmq.PUSH)
try:
subscriber.connect('tcp://%s:5556' % options.server) # Allows multiple connections
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5556. '+str(e).capitalize())
clean_quit()
try:
pushsocket.connect('tcp://%s:5557' % options.server)
except zmq.ZMQError as e:
error_output('Error: could not connect to port 5557. '+str(e).capitalize())
# Done initializing sockets, begin listening for messages
while ntries != 0 and (tlimit < 0 or ntime <= tlimit):
try:
# Wait for and receive JSON file
verbose_print("Waiting for message...")
message = subscriber.recv_json() # possible source of delay
recv_str = time.strftime(timeformat, time.gmtime())
last_recv = datetime.strptime(recv_str, timeformat)
verbose_print("Received: ")
verbose_print(message)
logger.warning("Subscriber received JSON")
# Send it over to port 5557 to hostinfo-client
try:
pushsocket.send_json(message)
print 'Sent message'
except zmq.ZMQError:
error_output("Warning: could not send data to hostinfo service.")
# skips over without quitting/backoff here
# Output log if daemonized. Will overwrite.
if options.daemon and message['success'] is True:
logger.warning("Dumping JSON into logfile")
output_log(json.dumps(message))
# fault protection if socket/subscriber stalls, don't check and delete all checkins
# Takes timestamp, splits it at '+' (UTC offset unable to convert), converts to datetime
check_ins[message['hostname']] = datetime.strptime(message['clientTimestamp'].split('+')[0], timeformat)
print_checkins(last_check, check_ins) # verbose prints only
last_check, check_ins = reap(last_check, last_recv, check_ins) # will not reap if too far apart
except zmq.ZMQError as e:
error_output("Warning: ZMQ error. "+str(e).capitalize()+
". Restarting with "+str(ntries)+" tries left...")
# Exponential backoff is done here
context.destroy()
time.sleep(ntime / 1000)
ntime = (2 * ntime) + random.randint(0, 1000)
main(ntries - 1, ntime, tlimit)
except (KeyboardInterrupt, SystemExit):
verbose_print('\nQuitting subscriber...')
clean_quit()
except OSError as e:
error_output('Error: '+str(e)+'. Quitting...')
clean_quit()
except Exception as e:
verbose_print("Warning: "+str(e)+". Line "+str(sys.exc_info()[-1].tb_lineno))
logger.warning("Warning: "+str(e)+".")
# Quits when all restart tries used up
error_output("Warning: used up restart tries. Quitting...")
clean_quit()
class subscriberDaemon(Daemon):
def run(self):
main(options.retries, 2000, options.tlimit)
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--server", "-s", action = "store", default = 'localhost',
dest = "server", help = "Set server to connect to")
parser.add_argument("--verbose", "-v", action = "store_true", default = False,
dest = "verbose", help = "Turns on verbosity flag")
parser.add_argument("--daemon", "-d", action = "store_true", default = False,
dest = "daemon", help = "Turns subscriber into daemon")
parser.add_argument("--pidfile", "-p", action = "store", default = directory,
dest = "directory", help = "Sets location of daemon's pidfile")
parser.add_argument("--interval", "-i", action = "store", type = int, default = 20,
dest = "interval",
help = "Sets max time in minutes a system can be dormant before reaping (20 by default)")
parser.add_argument("--faulttime", "-fault", action = "store", type = int, default = 90,
dest = "faulttime",
help = "Set minimum difference in minutes of last check and last recv to skip reaping (90 by default)")
parser.add_argument("--tlimit", "-t", action = "store", type = int, default = -1,
dest = "tlimit",
help = "Sets maximum restart sleep time in ms (-1 or infinite by default)")
parser.add_argument("--retries", "-r", action = "store", type = int, default = 3,
dest = "retries",
help = "Sets maximum number of retries when restarting (3 by default)")
parser.add_argument("--output", "-o", action = "store_true", default = True,
dest = "output",
help = "Sets whether or not check-in data will be outputted (true by default)")
options = parser.parse_args()
# ntries specified and negative, but no tlimit provided
if options.retries < 0 and options.tlimit < 0:
parser.error("must specify --tlimit if --retries is negative")
verbose_print("Verbosity on")
if options.daemon:
if not os.path.exists(options.directory):
try:
os.mkdir(options.directory)
except OSError as e: # bad directory, or no permissions
error_output("Encountered error while trying to create " + options.directory + ". "
+ e.args[1].capitalize() + ".")
exit(1)
daemon = subscriberDaemon(directory+'subscriber.pid')
daemon.start()
else:
main(options.retries, 2000, options.tlimit)
|
25,156 | affaa8f2e5aab2daaaf1898d7249db9a6e37b41b | from dependents import dependents
import getpass
import os
import sys
import platform
from colorama import Fore
import pip
class Setup(object):
curr_os = os.name
curr_platform = platform.system()
access_packages_install = True
error_modules = []
def _get_all_modules(self):
modules = dependents['sys_modules']
return modules
def _get_all_packages(self):
packages = dependents['packages']
return packages
def is_linux_posix(self):
if 'Linux' in self.curr_platform and 'posix' in self.curr_os:
return True
def is_linux_fedora(self):
if 'Linux' in self.curr_platform and 'fedora' in self.curr_platform:
return True
def is_root(self):
if os.geteuid() == 0:
return True
def run_install_module(self, cache, modules, it_imptnt):
pkg = lambda module: cache[module]
for module in modules:
try:
curr_pkg = pkg(module)
if curr_pkg.is_installed:
print Fore.BLUE + "%s - already installed" % module
else:
print Fore.GREEN + "\n %s installing..." % module
curr_pkg.mark_install()
try:
cache.commit()
except Exception, arg:
print Fore.RED + "ERROR, %s module failed install" % module
if it_imptnt:
self.error_modules[module] = arg
except:
print Fore.RED + "Unable to locate package %s " % module
for module, message in self.error_modules:
sys.stdout.write(Fore.RED + "Package %s not installed!\n" % module)
def _install_modules_l_posix(self, it_imptnt):
import apt
modules = self._get_all_modules()
cache = apt.cache.Cache()
update = cache.update
self.run_install_module(cache=cache, modules=modules, it_imptnt=it_imptnt)
if self.error_modules:
self.access_packages_install = False
return True
def _install_modules_f_posix(self):
import yum
modules = self._get_all_modules()
yumex = yum.YumBase()
yumex.conf.assumeyes = True
for module in modules:
if yumex.rpmdb.searchNevra(name=module.strip()):
sys.stdout.write("%s - already installed")
else:
sys.stdout.write("%s installing ...")
yumex.install(name=module.strip())
def run_install_packeges(self, packages=[]):
for package in packages:
try:
sys.stdout.write("%s searching...\n" % package)
pip.main(['install', package])
except Exception, arg:
print arg
return True
def _install_packages(self):
packages = self._get_all_packages()
if hasattr(sys, 'real_prefix'):
self.run_install_packeges(packages=packages)
else:
qust = raw_input("Virtualenv did not activated!Would you like install packages to global?[y/n]")
if qust.lower() == 'y':
self.run_install_packeges(packages=packages)
else:
sys.stdout.write("Packages not installed!\n") |
25,157 | 7332a39db44ecf29a05b436459933238e2dcd429 | """
这是bar模块的文档说明
"""
def bar1():
"""
这是bar1函数的文档说明
"""
print("bar1函数执行")
def bar2():
"""
这是bar2函数的文档说明
"""
print("bar2函数执行")
|
25,158 | 6ec5ff5c0bf30681c32f5b885b626d95cf93220e | def unflatten(adict, separator='.'):
result = {}
for key, value in adict.items():
_unflatten(key, value, result, separator)
return result
def _unflatten(key, value, out, separator):
key, *rest = key.split(separator, 1)
if rest:
_unflatten(rest[0], value, out.setdefault(key, {}), separator)
else:
out[key] = value |
25,159 | 9886f4381d0a461388c071b8301aa7f4491a2a58 | import requests
import json
import pymongo
import time
import datetime
def get_recent_news_links():
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["bluefire"]
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
col = db["cnstock_{}".format(now)]
endpoint = "http://app.cnstock.com/api/xcx/kx?&colunm=szkx&page={}&num=15"
pagenum = 1
while True:
resp = requests.get(endpoint.format(pagenum))
resp_json = json.loads(resp.text)
print(resp_json)
if resp_json["error"] != 0:
break
for item in resp_json["item"]:
col.insert(item)
pagenum += 1
time.sleep(1)
if __name__ == "__main__":
get_recent_news_links() |
25,160 | 51810642bdb61778950383f0c064e0599ca9c364 | from config import Config
from dotenv import load_dotenv
import os
APP_ROOT = os.path.join(os.path.dirname(__file__), '..')
dotenv_path = os.path.join(__file__, '.env')
load_dotenv(dotenv_path)
from flask import Flask
app = Flask(__name__)
app.config.from_object(Config)
from app import routes
|
25,161 | 2ac8c902a4f0589f54cd74ef14b052b5f8648f6d | """
Write down the function, which reads input line-by-line, and
find maximum and minimum values.
Function should return a tuple with the max and min values.
For example for [1, 2, 3, 4, 5], function should return [1, 5]
We guarantee, that file exists and contains line-delimited integers.
To read file line-by-line you can use this snippet:
with open("some_file.txt") as fi:
for line in fi:
...
"""
import math
from typing import Tuple
def find_maxi_and_min(file_name: str) -> Tuple[int, int]:
minint = math.inf
maxint = -math.inf
with open(file_name) as fi:
for line in fi:
if int(line) < minint:
minint = int(line)
if int(line) > maxint:
maxint = int(line)
return minint, maxint
|
25,162 | b0aa76819efaf459e25379c2af5269b1aa766bec | '''
https://leetcode.com/problems/search-in-rotated-sorted-array
'''
class Solution:
def search(self, nums: List[int], target: int) -> int:
if len(nums) == 0:
return -1
if len(nums) == 1:
if target == nums[0]:
return 0
else:
return -1
def _search_max_idx(nums, first_id, last_id):
if first_id == last_id:
return first_id
if last_id - first_id == 1:
if nums[first_id] > nums[last_id]:
return first_id
else:
return last_id
mid_id = ((last_id - first_id)//2) + first_id
if nums[first_id] < nums[last_id]:
return last_id
if nums[first_id] < nums[mid_id]:
return _search_max_idx(nums, mid_id, last_id)
else:
return _search_max_idx(nums, first_id, mid_id)
def _search(nums, first_id, last_id, target):
if first_id == last_id:
if nums[first_id] == target:
return first_id
return -1
if first_id > last_id:
return -1
mid_id = first_id + int((last_id - first_id)/2)
first = nums[first_id]
last = nums[last_id]
mid = nums[mid_id]
if target == mid:
return mid_id
if target == first:
return first_id
if target == last:
return last_id
if first < target < mid:
return _search(nums, first_id+1, mid_id-1, target)
if mid < target < last:
return _search(nums, mid_id+1, last_id-1, target)
return -1
max_id = _search_max_idx(nums, 0, len(nums)-1)
if max_id == len(nums)-1:
_mid_id = len(nums)//2
else:
_mid_id = max_id + 1
if nums[0] <= target <= nums[_mid_id-1]:
return _search(nums, 0, _mid_id-1, target)
else:
return _search(nums, _mid_id, len(nums)-1, target)
|
25,163 | a75f4ffe1c82d7d903c69910607fc93a46c6d0bf | ############
## 10001st Prime #
############
import math
def find_a_prime(which_prime):
prime_counter = 6
# the question gives that 13 was already tested and it's a known that all evens are not primes, so we start w/ 15
tested_int = 15
prime = 13
prime_dictionary = {1: 2, 2: 3, 3: 5, 4: 7, 5: 11, 6: 13}
if prime_counter >= which_prime:
print("{} is the prime you are looking for.".format(prime_dictionary[which_prime]))
else:
dividend = math.floor(tested_int / 2) - 1
while prime_counter < which_prime:
if tested_int % dividend == 0:
tested_int += 2
dividend = tested_int - 1
elif tested_int % dividend != 0 and dividend == 3:
prime = tested_int
prime_counter += 1
tested_int += 2
dividend = math.floor(tested_int / 2) - 1
else:
dividend -= 1
print("{} is the prime you are looking for.".format(prime))
find_a_prime(1000)
# 104743
|
25,164 | a6e6568aab910431774e54ccdcdc45aa4385ce4f | from graph import graph_t, path_t, INVALID_NODE
from pq import DPQ_t
# import sp_algorithms
class sssp_result_t:
def __init__(self, N, src, dst, ncyc, p, d, c):
self.N = N
self.src = src
self.dst = dst
self.pred = p
self.has_negative_cycle = ncyc
self.dist = d
self.relax_count=c
def sssp_to_sp_result(self, dst):
p = None
assert(self.dst == INVALID_NODE or self.dst == dst)
if (not self.pred[dst] == INVALID_NODE):
p = path_t(self.pred,dst)
r = sp_result_t(self.src, dst, p, self.dist[dst], self.relax_count)
return r
def print_sssp_result(self, f):
N = self.N
if (N<10):
NN = N
else:
NN = 10
f.write("Distmap:")
for i in range(0, NN):
f.write(" ")
self.dist[i].print_weight(f)
if (N < NN):
f.write("...\n")
else:
f.write("\n")
class sp_result_t:
def __init__(self, src, dst, p, d, c):
self.src = src
self.dst = dst
self.path = p
self.dist = d
self.relax_count = c
def print_sp_result(self, f):
f.write("Distance: ")
self.dist.print_weight(f)
f.write("\n")
f.write("Path: ")
print_path(f, self.path)
f.write("\n")
f.write("# Relaxed nodes: %d\n" % (self.relax_count))
def print_path(f, p):
if (p == None):
f.write("NULL")
elif (p.path_len() == 0):
f.write("EMP")
else:
f.write("%d" % (p.path_get(0)))
for i in range(1, p.path_len()):
f.write("%d" % (p.path_get(i)))
|
25,165 | dd0a2bcfce8ab2a498abe4f1863bdb042445794c | import unittest
from SharedParkingPlace.tools.fileutil import FileUtil
from HTMLTestRunner import HTMLTestRunner
class Start:
def start(self, path):
suite = unittest.TestSuite()
loader = unittest.TestLoader()
test_class_info = FileUtil.get_txt(path)
tests = loader.loadTestsFromName(test_class_info)
suite.addTests(tests)
with open('report.html', 'w') as wf:
runner = HTMLTestRunner(stream=wf, verbosity=2)
runner.run(suite)
if __name__ == '__main__':
Start().start('../conf/case_class_path.conf')
|
25,166 | b2853ea8a912583f0ad76f6619552de0e0535ae2 | from distutils.core import setup
setup(name='toggl-cli',
version='0.0.1',
description='A simple command-line interface for toggl.com',
author='D. Robert Adams',
author_email='d.robert.adams@gmail.com',
url='http://github.com/drobertadams/toggl-cli/',
requires=['iso8601', 'pytz', ' dateutil', 'requests'],
scripts=['toggl.py', 'toggl.sh'],
)
|
25,167 | 6e29cb05376ff384e9fd70ddacf87afb7995d9b7 | from django.shortcuts import render
from django.http import HttpResponseNotFound
from .models import Artist, Song
def artists(request):
"""
- Task 1: Implement a view under /artists URL that fetches all
Artist's objects stored in the DB and render the 'artists.html'
template sending all 'artists' as context.
- Task 2: In this same view, check if a 'first_name' GET parameter
is sent. If so, filter the previous queryset with ALL artists, in
order to keep only the ones that contains the given pattern in its
first_name
- Task 3: Similar to previous task, take the 'popularity' GET
parameter (if its given) and filter all the artists that have a
popularity greater or equal to the given one.
"""
pass
def artist(request, artist_id):
"""
- Task 4: Implement a view under /artists/<artist_id> that takes the
given artist_id in the URL and gets the proper Artist object from
the DB. Then render the 'artist.html' template sending the 'artist'
object as context
"""
pass
def songs(request, artist_id=None):
"""
- Task 5: Implement a view under /songs URL that display ALL the songs stored
in the DB. In order to do this, fetch all the Song objects and
render the 'songs.html' sending the 'songs' queryset as context.
Before rendering the template, loop through the songs queryset and
for each song, fetch the proper Artist object that matches with the
artist_id from the song. Once you have the song's artist object, bind
it like 'song.artist = artist'.
- Task 6: Add a 'title' filter from a 'title' GET parameter (if given)
that filters the 'songs' queryset for songs that contains that
pattern, in a similar way that the tasks before.
- Task 7: Add a new /songs/<artist_id> URL that points to this
same view. If the artist_id is given, filter the songs queryset for
songs that match with given artist_id and render the same 'songs.html'
template.
"""
pass
|
25,168 | 3df19b767fc5b006c8fb698c9be50c04ed167ce8 | """
Copyright (C) 2015-2016, Juniper Networks, Inc.
All rights reserved.
Authors:
jpzhao, bphillips, ajaykv
Description:
Toby Network Event Engine.
"""
# pylint: disable=locally-disabled,undefined-variable,invalid-name
import re
#import copy
import os
import sys
#import types
#import pprint
import time
import importlib
import inspect
from robot.libraries.BuiltIn import BuiltIn as RobotBuiltIn
from jnpr.toby.utils.Vars import Vars
from jnpr.toby.engines.events.event_engine_utils import elog
import jnpr.toby.engines.events.event_engine_utils as ee_utils
import jnpr.toby.engines.config.config_utils as config_utils
class eventEngine(object):
"""
Class of Toby Event Engine
- A network event is a disruption to normal network operational evironment('steady states'),
which usually causes topology/routing information changes, that requires the network
to react to the changes in order to maintain network connectivity.
- triggering an event in a testbed will test the DUTs capability to recover from the event(s).
- Generic Event engine handles all event actions in a consistent way
. standard logging for easy debugging
. arg handling
. iteration/duration/timeout/exception handling
- Extensible to add new events in a consistent style
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def __init__(self):
# get the event/check methods/functions from a register file
#self.events_registered = self.register_event()
self.events_registered = {}
self.response = ''
self.status = ''
#self.error_msg = ''
self.time_spent = None
self.src_path = None
if Vars().get_global_variable('${SUITE_SOURCE}'):
self.src_path = os.path.dirname(Vars().get_global_variable('${SUITE_SOURCE}'))
else:
self.src_path = os.getcwd()
# the built-in event yaml file are in the same location of eventEngine.
self.ee_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) #
if self.ee_path not in sys.path:
sys.path.append(self.ee_path)
def _call_keyword(self, *args, **kwargs):
'''
call Robot keyword inside event engine
'''
# TBD: if it is Toby keyword, call them directly with Python code?
my_args = []
keyword = None
if kwargs:
for key, val in kwargs.items():
if key == 'ROBOT_keyword':
keyword = val
else:
my_args.append('{}={}'.format(key, val))
if keyword is None:
elog('_call_keyword(): no keyword passed in via ROBOT_keyword')
return False
# run user picked Robot keyword
elog('debug', '====== Robot keyword {} with args: {}'.format(keyword, my_args))
res = RobotBuiltIn().run_keyword_and_return_status(keyword, *my_args)
return res
def _update_events(self, events):
'''
updagate events to ee's attribute 'events_registered'
'''
registered_events = {}
for event in events:
registered_events[event] = {}
for action in events[event]: # trigger or check
if events[event][action].get('method'):
method_name_with_path = events[event][action]['method'].strip('\'\"')
func_name = method_name_with_path
#module_name = 'jnpr.toby.engines.events.triggers'
module_name = 'triggers'
if '.' in method_name_with_path:
module_name, func_name = method_name_with_path.rsplit('.', 1)
if module_name.endswith(')'):
# dealing with a class method here
class_pkg, class_name = module_name.rsplit('.', 1)
class_name = class_name.rstrip(r'()')
class_obj = getattr(importlib.import_module(class_pkg), class_name)()
method = getattr(class_obj, func_name)
config_utils.nested_set(registered_events[event],
[action, 'type', 'class_obj'], class_obj)
elif re.match(r'ROBOT:', func_name):
# A Robot keyword
# 1. any Robot keyword user defined : done
# 2. Todo: Toby keywords, pre-imported?( verify, execute_cli_.., )
# any benefit of doing that?
method = self._call_keyword
keyword = re.sub(r'ROBOT:', '', func_name).strip()
config_utils.nested_set(registered_events[event],
[action, 'type', 'ROBOT_keyword'], keyword)
else:
# a function
method = getattr(importlib.import_module(module_name), func_name)
config_utils.nested_set(registered_events[event],
[action, 'type', 'function'], func_name)
config_utils.nested_set(registered_events[event], [action, 'method'], method)
if events[event][action].get('args'):
# tbd: processing tv/cv in args?
# proc_args =
config_utils.nested_set(registered_events[event], [action, 'args'],
events[event][action]['args'])
#update registered events
config_utils.nested_update(self.events_registered, registered_events)
return registered_events
def register_event(self, *args, **kwargs):
'''
register events
'''
events = {}
if not self.events_registered:
# import Event Engine BuiltIn events file
print('+++++++++++++++ builtin event file path', self.ee_path)
events = config_utils.read_yaml(\
file=self.ee_path + '/Builtin_Events.yaml')
self._update_events(events)
if kwargs.get('file'):
events.update(config_utils.read_yaml(file=kwargs['file']))
self._update_events(events)
elif args:
# expecting one arg as event name
the_event = args[0].lower().strip('\'\" ')
if ' ' in the_event:
the_event = '_'.join(the_event.split())
if not events.get(the_event):
# a new event
t.log('\n=== adding a new event: {}'.format(the_event))
events[the_event] = {}
else:
t.log('debug', 'updating existing event: ' + the_event)
event_args = ('trigger_method', 'trigger_args', 'check_method', 'check_args')
for arg_key in kwargs:
if arg_key in event_args:
key_list = arg_key.split('_')
config_utils.nested_set(events[the_event], key_list, kwargs[arg_key])
self._update_events(events)
return self.events_registered
def _get_event_functions(self, event):
'''
only 'registered events with methods, and CLI/VTY commands are accepted
so that no user defined config can 'sneak in' via event for example
'''
nevent = re.sub(r'\s+', '_', event.strip()).lower()
if self.events_registered.get(nevent):
return self.events_registered[nevent]
else:
raise Exception('cannot find this event: ' + event)
# maybe just return None
def _process_method_args(self, event, trigger_method, **kwargs):
'''
process args and find missing args of a trigger method
'''
trg_kwargs = {}
if trigger_method.get('args'):
if '**kwargs' in trigger_method['args']:
trg_kwargs.update(kwargs)
for default_targ in trigger_method.get('args'):
targ = default_targ.strip(' \'\"')
if re.match(r'\*args|\*\*kwargs', targ):
continue
tval = None
if '=' in default_targ:
matched = re.match(r'([^=]+)=([^=]+)$', targ)
targ = matched.group(1)
tval = matched.group(2)
if targ in kwargs:
trg_kwargs.update({targ: kwargs[targ]})
elif tval is not None:
trg_kwargs.update({targ: tval}) # take registered default value
else:
raise Exception('missing mandatory argument "{}" in event "{}"'.\
format(default_targ, event))
## adjust args depending on the type of method
if trigger_method['type'].get('ROBOT_keyword'):
trg_kwargs['ROBOT_keyword'] = trigger_method['type']['ROBOT_keyword']
return trg_kwargs
##### exposed keyword and high level functions
def run_event(self, event, *args, **kwargs):
"""
This is the exposed Event keyword to toby/Robot
- Note: Only take the trigger args and check args as named args
"""
if not self.events_registered:
# get the BuiltIn list of events
self.events_registered = self.register_event()
iteration = int(kwargs.get('iteration', 1))
device = kwargs.get('device', None)
#interface = kwargs.get('interface', None)
kwargs['me_object'] = ee_utils.me_object()
dev_name = ''
dev_tag = ''
if device:
dh = ee_utils.device_handle_parser(device=device)
#if dh.__dict__.get('TE') is None:
# dh.TE = {}
kwargs['dh'] = dh
dev_name = ee_utils.get_dh_name(dh)
dev_tag = ee_utils.get_dh_tag(dh)
# get all the functions related to this event
func_list = self._get_event_functions(event)
trg_kwargs = {}
if func_list['trigger'].get('args'):
trg_kwargs = self._process_method_args(event, func_list['trigger'], **kwargs)
chk_kwargs = {}
if kwargs.get('enable_check'):
if func_list.get('check') and func_list['check'].get('args'):
chk_kwargs = self._process_method_args(event, func_list['check'], **kwargs)
start_time = time.time()
elog('==== event <{}> starts:'.format(event))
# find duration/iteration.
interval = float(kwargs.get('interval', 5)) # unit second. 0.01 also works( msec)
# up+down considered one iteration
duration = kwargs.get('duration', None)
if duration is not None:
duration = float(duration)
iteration = 99999999 # duration takes control
# execute
# todo: running in parallel, (noise at back ground)
# todo: multiple events
# todo: as a seperate tool, or multi-thread, or async?
error = 0
for itr in range(iteration):
elog('== BEGIN: Event {} # {}: {}({})'.format(event, str(itr+1), dev_tag, dev_name), \
annotate=True, **kwargs)
#elog('== BEGIN: Event {} #{}: {}({})/{}'.format(event, str(itr+1), dh.tag, \
#dh.name, ifd), annotate=True, **kwargs)
#look for function first
kwargs['event_iteration'] = itr + 1
res = func_list['trigger']['method'](**trg_kwargs)
t.log('debug', 'run_event trigger returned {}'.format(str(res)))
if res is False:
error += 1
elif not self._confirm_event_state(event, check_kwargs=chk_kwargs, **kwargs):
error += 1
if iteration > 1 and itr < iteration - 1:
t.log('debug', 'wait for {} seconds before next iteration'.format(str(interval)))
time.sleep(interval)
if duration and time.time() - start_time > duration:
print('Event duration is up')
break
#if time.time() - start_time > timeout
# break
end_time = time.time()
self.time_spent = end_time - start_time
elog('==== END: Event <{0}>, took {1:.2f} seconds'.format(event, self.time_spent), \
annotate=True, **kwargs)
# return True/false or raise exception when failed??
#ret = False if error > 0 else True
if error > 0:
# Todo: an eventException to standardize error msg
#raise Exception('event failed with error: ' + str(error))
elog('error', 'event failed with error: ' + str(error))
return False
return True
def _confirm_event_state(self, event, **kwargs):
'''
check to confirm event status
'''
if not kwargs.get('enable_check'):
return True
self.status = True
func_list = self._get_event_functions(event)
st_check = False
if func_list.get('check'):
check_kwargs = kwargs.get('check_kwargs', {})
# time in float means it can take millisecond
timeout = float(kwargs.get('timeout', 30))
check_interval = float(kwargs.get('check_interval', 1))
start = time.time()
while time.time() - start < timeout:
res = func_list['check']['method'](**check_kwargs)
if res:
t.log('debug', 'state confirmed')
duration = time.time() - start
st_check = duration
t.log('takes {} for {} to finish'.format(duration, event))
break
time.sleep(check_interval)
else:
elog('error', '== Check event {} status failed'.format(event))
st_check = False
else:
t.log('warn', 'No check function for {}, skip'.format(event))
st_check = True
return st_check
|
25,169 | f98520e1e529e598d33fad72f964191c8d4f87bc | from sys import argv
#import os,math,ast,itertools
file_name = argv[1]
fp = open(file_name,'r+')
contents = [list(map(int,line.strip('\n').split(','))) for line in fp]
#print (contents)
def nearest_pow_of_2(lst):
i,multi = 1,lst[1]
while lst[0] > multi:
num = lst[1]
i +=1
multi = num*i
return multi
if __name__=='__main__':
for item in contents:
num = nearest_pow_of_2(item)
print (num)
'''
Given numbers x and n, where n is a power of 2, print out the smallest multiple of n which is greater than or equal to x. Do not use division or modulo operator.
Input sample:
The first argument will be a path to a filename containing a comma separated list of two integers, one list per line. E.g.
13,8
17,16
Output sample:
Print to stdout, the smallest multiple of n which is greater than or equal to x, one per line. E.g.
16
32'''
|
25,170 | 0cbc6636fc68bc39e0feb518df9b2c3e8ad45b08 | """Base Schemas and classes for Kaltura AT Classes"""
from collections import OrderedDict
from Products.Archetypes import atapi
from AccessControl import ClassSecurityInfo
from rfa.kaltura.content import vocabularies
from rfa.kaltura.credentials import getCredentials
from rfa.kaltura.kutils import kSetStatus
KalturaBaseSchema = atapi.Schema(
(atapi.StringField('entryId',
searchable=0,
mode='r',
accesssor="getEntryId",
widget=atapi.ComputedWidget(label="Entry Id",
description="Entry Id set by Kaltura after upload (read only)",
visible = { 'edit' :'visible', 'view' : 'visible' },
i18n_domain="kaltura_video"),
),
#sub-classes that use this schema may alter this field
# to use a selection widget and a vocabulary
# if not, it's a simple string field where you type in the playerId (aka ui_conf) manually.
atapi.StringField('playerId',
searchable=0,
accessor="getPlayer",
mutator="setPlayer",
mode='rw',
default_method="getDefaultPlayerId",
vocabulary_factory="rfa.kaltura.video_players",
widget=atapi.SelectionWidget(label="Player",
label_msgid="label_kplayerid_msgid",
description="Choose the player to use",
description_msgid="desc_kplayerid_msgid",
i18n_domain="kaltura_video"),
),
atapi.IntegerField('partnerId',
searchable=0,
mode='rw',
default_method="getDefaultPartnerId",
widget=atapi.IntegerWidget(label="Partner Id",
label_msgid="label_kpartnerid_msgid",
description="Kaltura Partner Id (use default if unsure)",
description_msgid="desc_kpartnerid_msgid",
i18n_domain="kaltura_video"),
),
)
)
#this seems misnamed
KalturaMetadataSchema = atapi.Schema(
(atapi.LinesField('categories',
multiValued = True,
searchable=0,
required=False,
vocabulary="getCategoryVocabulary",
accessor="getCategories",
mutator="setCategories",
widget=atapi.MultiSelectionWidget(label="Categories",
label_msgid="label_kvideofile_categories",
description="Select video category(ies) this playlist will provide",
description_msgid="desc_kplaylist_categories",
i18n_domain="kaltura_video"),
),
atapi.LinesField('tags',
multiValued = True,
searchable=0,
required=False,
accessor="getTags",
mutator="setTags",
widget=atapi.LinesWidget(label="Tags",
label_msgid="label_kvideofile_tags",
description="keyword tag(s) this playlist will provide (one per line)",
description_msgid="desc_kplaylist_tags",
i18n_domain="kaltura_video"),
),
)
)
###XXX Todo: create base class ExternalMediaEntry
##based off of http://www.kaltura.com/api_v3/testmeDoc/index.php?object=KalturaExternalMediaEntry
class KalturaContentMixin(object):
security = ClassSecurityInfo()
KalturaObject = None
categories = {}
tags = []
_category_vocabulary = None
def __init__(self, oid, **kwargs):
super(KalturaContentMixin, self).__init__(oid, **kwargs) #xxx go away
self.KalturaObject = None
self._categoryVocabulary = None #Cached vocabulary - should not be persistent.
security.declarePrivate("setKalturaObject")
def setKalturaObject(self, obj):
self.KalturaObject = obj
self.KalturaObject.referenceId = self.UID()
security.declarePublic("getEntryId")
def getEntryId(self):
if self.KalturaObject is not None:
return self.KalturaObject.getId()
else:
return None
entryId = property(getEntryId)
security.declarePrivate('getDefaultPartnerId')
def getDefaultPartnerId(self):
return getCredentials()['PARTNER_ID']
def getTags(self):
return self.tags
def setTags(self, tags):
self.tags = tags
def getCategories(self):
""" Returns a list of the category id's for Kaltura Queries"""
return self.categories.keys()
def getCategoriesDict(self):
return self.categories
def setCategories(self, categories):
"""Sets the selected categories for this object in plone
'categories' is internally stored as a dictionary:
keys are id's, values are names
"""
vocabulary = dict(self.getCategoryVocabulary())
self.categories = OrderedDict()
for catId in categories:
name = vocabulary.get(catId, None)
if name is not None:
self.categories[catId] = vocabulary[catId]
else:
# Sliently ignore that category id, it doesn't have a matching category name.
# I apologize if you found this comment after hours of digging around code.
pass
def getTagVocabulary(self):
return vocabularies.getTagVocabulary()
def getCategoryVocabulary(self, parent=None):
"""This gets the entire list of avaiable categories from the Kaltura server"""
self._categoryVocabulary = vocabularies.getCategoryVocabulary(parent)
return self._categoryVocabulary
def setModerationStatus(self, status):
"""given a kaltura video object, set the status on the media entry
and update the server
See KalturaClient.Core.KalturaEntryModerationStatus for enum definitions
"""
kSetStatus(self, status)
def getModerationStatus(self):
status = self.KalturaObject.getModerationStatus()
return status['value'] |
25,171 | 4cee1102a1ef1dd60bc0d501d6a45695aacc36b3 | N=int(input())
A=[0]+list(map(int,input().split()))+[0]
x=[0]
x[0]=A[0]
for i in range(2,N+3):
x.append(x[i-2]+abs(A[i-1]-A[i-2]))
for i in range(1,N+1):
ans=0
ans+=x[i-1]+abs(A[i-1]-A[i+1])+x[-1]-x[i+1]
print(ans) |
25,172 | 83c0dcb088f0f8df1eab1a146a9d9976383e959a | from typing import List
import dill
import pickle
from tspec.search.generic import GenericSearch
from tspec.search.helpers import *
from tspec.reporter import GenericReporter
from tspec.loader import TNode
class ExhaustiveSearch(GenericSearch):
def __init__(self, spec: str, reporter: GenericReporter, obj, cont=False):
super().__init__(spec, reporter, obj)
self.cont = cont
def dfs(self, nodes: List[TNode], path: str, pdims: List[int]):
node = nodes[-1]
path += node.hash()
# Copy dimensions
pdims = pdims[:]
pdims += node.get_dims()
# Leaf node
if len(node.children) == 0:
# build script
psel = [0] * len(pdims)
state = [0] * (len(nodes) + 1)
state[0] = dill.dumps({'global': dict(), 'local': dict()})
reports = [0] * (len(nodes) + 1)
reports[0] = pickle.dumps(dict())
# start from last result
if self.cont:
last = self.reporter.last_in_path(path)
else:
last = None
if last is not None:
lpos = 0
for n in range(len(nodes)):
pl = nodes[n].get_dims()
for p, p_cnt in enumerate(pl):
last_param = last[lpos + p]
last_pick = None
for param in range(p_cnt):
if nodes[n].get_pval_at(p, param) == last_param:
last_pick = param
psel[lpos + p] = last_pick
lpos += len(pl)
# cnt is messed up here
# Will rerun the last one
c = 0
cnt = 0
TOT = 1
for d in pdims:
TOT *= d
pos = -1
while c == 0 and not self._stop:
print("{} {} : {:.2%}".format(
self.best['obj'] if self.best else "None", path, cnt / TOT))
b = 0
val = list()
cur = 0
while b + len(nodes[cur].get_dims()) <= pos:
pl = len(nodes[cur].get_dims())
val += nodes[cur].get_pval(psel[b:(b + pl)])
b += pl
cur += 1
pos = len(pdims) - 1
# setup program state
pstate = dill.loads(state[cur])
# setup report state
self.reporter.metrics = pickle.loads(reports[cur])
try:
for n in range(cur, len(nodes)):
pl = len(nodes[n].get_dims())
val += nodes[n].get_pval(psel[b:(b + pl)])
scr = nodes[n].compile(psel[b:(b + pl)])
b += pl
if runseg(self.reporter, scr, pstate):
state[n + 1] = dill.dumps(pstate)
reports[n + 1] = pickle.dumps(self.reporter.metrics)
else:
pos = b - 1
raise ScriptExit()
self.update(nodes, val)
self.reporter.finalize(path, val)
except ScriptExit:
pass
step = 1
for n in pdims[(pos + 1):]:
step *= n
cnt = cnt + step
self.reporter.clear()
# prepare for next
c = 1
while c > 0 and pos >= 0:
psel[pos] += c
if psel[pos] == pdims[pos]:
psel[pos] = 0
c = 1
else:
c = 0
pos -= 1
pos += 1
self.reporter.flush()
else:
# Internal node continue dfs
for c in node.children:
nodes.append(c)
self.dfs(nodes, path, pdims)
nodes.pop()
def run(self):
super().run()
# Path explore
self.dfs([self.graph.root], "", list())
self.reporter.flush()
super().stop()
|
25,173 | c56f7c3f663bcb2b229f3b414850c6d7c7b8780d | #printing values
a=20
b=30
c=10
y=a+b+c
x=a-b-c
z=a*b
r=b/a
print("\naddition=",y, "\nsubtraction=",x, "\nmultiplication=",z, "\ndivision=",r) |
25,174 | 3d9e04deaeacf22ebe4f715700f92a8ca62894d0 | from app.Extension.Encryption.TeaAlgo import tea
"""
关键的key
"""
class UniqueKey:
_key = [123,456,789,150];
'''
'''
def encode(self,args):
ch = "";
tmpList = list(args);
tea.encode(tmpList,self._key)
for t in tmpList:
ch += str(t)+"-"
return ch;
def decode(self,code):
pass
# 单例
uniqueKey = UniqueKey(); |
25,175 | 07df83ad0575dfdb5874975a646f94bb8dc28cc2 | from math import cos,sin
import rhinoscriptsyntax as rs
rs.DeleteObjects(rs.AllObjects('select'))#delete all existing objects
def spiro(R,r,d,angle,moveMe):
x = (R-r) * cos(angle) - d * cos(((R-r)/r)*angle)
y = (R-r) * sin(angle) - d * sin(((R-r)/r)*angle)
rs.RotateObject(moveMe,basePointInner,angle)
pointsList.append(pen)
#print('spiro')
"""define gears and pen sizes and locations"""
outerGearRadius = 10
innerGearRadius = 4
pointDistance = 3 #how far the pen is placed from the center of the innerGear
baseX = 0
baseY = 0
baseZ = 0
basePointOuter = (baseX,baseY,baseZ)
basePointInner = ((baseX + (outerGearRadius - innerGearRadius)),baseY,baseZ)
basePointPen = ((baseX + (outerGearRadius - innerGearRadius)+ pointDistance),baseY, baseZ)
numRotates = 5
angle = 15
pointsList = []
"""Insure proportions are correct"""
if outerGearRadius <= innerGearRadius or innerGearRadius <= pointDistance:
print ('error')
#stop/exit program somehow
"""draw the gears and pen and create an easily movable group"""
outerGear = rs.AddCircle(basePointOuter, outerGearRadius)
innerGear = rs.AddCircle(basePointInner,innerGearRadius)
pen = rs.AddPoint(basePointPen)
moveMe = [innerGear, pen]
"""Rotate the inner gear and pen"""
for i in range(0,numRotates):
spiro(outerGearRadius,innerGearRadius,pointDistance,angle,moveMe)
#rs.MoveObject(moveMe, vector?)
#rotation = rs.VectorCreate(startpoint, endpoint)
for x in range(0,len(pointsList)-1):
if x == len(pointsList)-1:
rs.AddLine(pointsList[x],pointsList[0])
else: rs.AddLine(pointsList[x],pointsList[x+1]) |
25,176 | 3f21332ead5380b1e55cbe0725de86e8c25dc40a | # coding: utf-8
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __repr__(self):
return "<val={}>".format(self.val)
__str__ = __repr__
class Solution:
def preorder_traversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
stack = list()
rtn = list()
stack.append(root)
while stack:
cur = stack.pop()
rtn.append(cur.val)
if cur.right:
stack.append(cur.right)
if cur.left:
stack.append(cur.left)
return rtn
def preorder_traversal_recursive(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
rtn = list()
def helper(node):
rtn.append(node.val)
if node.left:
helper(node.left)
if node.right:
helper(node.right)
helper(root)
return rtn
def inorder_traversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
rtn = list()
stack = list()
stack.append(root)
while stack:
if stack[-1].left:
stack.append(stack[-1].left)
else:
cur = stack.pop()
while not cur.right and stack:
rtn.append(cur.val)
cur = stack.pop()
rtn.append(cur.val)
if cur.right:
stack.append(cur.right)
return rtn
def inorder_traversal_recursive(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
rtn = []
# Note:
# helper_1和helper_2的逻辑本质上是一样的, 递归的处理left, root, right,区别在于
# 判定有效节点的位置,helper_1在每次进入helper_1后判断,导致无效None节点
# 多调用一次helper_1,使得其解的时间效率降低,而helper_2则在调用helper_2之前
# 判断,通过预判减少了无效递归的次数,从而提高了效率。
def helper_1(node):
if node:
helper_1(node.left)
rtn.append(node.val)
helper_1(node.right)
def helper_2(node):
if node.left:
helper_2(node.left)
rtn.append(node.val)
if node.right:
helper_2(node.right)
# helper_1(root) # 低效
helper_2(root) # 高效
return rtn
def postorder_traversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
rtn = list()
if not root:
return rtn
stack = list()
stack.append(root)
while stack:
cur = stack.pop()
rtn.append(cur.val)
left, right = cur.left, cur.right
if left:
stack.append(left)
if right:
stack.append(right)
return rtn[::-1]
def postorder_traversal_recursive(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
rtn = []
if not root:
return rtn
def helper(node):
if node.left:
helper(node.left)
if node.right:
helper(node.right)
rtn.append(node.val)
helper(root)
return rtn
def level_order_traversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
rtn = []
if not root:
return rtn
queue = list()
queue.append(root)
while queue:
cur_val = list()
cur_level = queue.copy()
queue.clear()
for x in cur_level:
cur_val.append(x.val)
if x.left:
queue.append(x.left)
if x.right:
queue.append(x.right)
rtn.append(cur_val)
return rtn
def max_depth_top_down(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.depth = 0
# def helper(root, d):
# self.depth = max(self.depth, d+1)
# if root.left:
# helper(root.left, d+1)
# if root.right:
# helper(root.right, d+1)
# helper(root, 0)
def helper(root, d):
if not root:
return
if not (root.left or root.right):
self.depth = max(self.depth, d)
helper(root.left, d+1)
helper(root.right, d+1)
helper(root, 1)
return self.depth
def max_depth_bottom_up(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def helper(node):
if not node:
return 0
left_depth = helper(node.left)
right_depth = helper(node.right)
return max(left_depth, right_depth) + 1
return helper(root)
if __name__ == '__main__':
node_3 = TreeNode(3)
node_2 = TreeNode(2)
node_2.left = node_3
node_1 = TreeNode(1)
node_1.right = node_2
print(node_1)
solution = Solution()
print(solution.preorder_traversal(node_1))
print(solution.preorder_traversal_recursive(node_1))
print(solution.inorder_traversal(node_1))
print(solution.inorder_traversal_recursive(node_1))
print(solution.postorder_traversal_recursive(node_1))
print(solution.postorder_traversal(node_1))
print(solution.level_order_traversal(node_1))
print(solution.max_depth_top_down(node_1))
print(solution.max_depth_bottom_up(node_1))
only_node = TreeNode(1)
print(solution.preorder_traversal(only_node))
print(solution.preorder_traversal_recursive(only_node))
print(solution.inorder_traversal(only_node))
print(solution.inorder_traversal_recursive(only_node))
print(solution.postorder_traversal(only_node))
print(solution.level_order_traversal(only_node))
print(solution.max_depth_top_down(only_node))
print(solution.max_depth_bottom_up(only_node))
node_a = TreeNode(1)
node_b = TreeNode(3)
node_b.left = node_a
node_c = TreeNode(2)
node_c.left = node_b
print(solution.inorder_traversal(node_c))
print(solution.inorder_traversal_recursive(node_c))
print(solution.postorder_traversal_recursive(node_c))
print(solution.postorder_traversal(node_c))
print(solution.level_order_traversal(node_c))
print(solution.max_depth_top_down(node_c))
print(solution.max_depth_bottom_up(node_c))
|
25,177 | b684d60be9f6e92c2181528f078dd4480b8105ca | # a few low-level functions that are used throughout
from __future__ import absolute_import, division, print_function # python2 compatibility
import numpy as np
import os
from scipy import interpolate
from .read_spectrum import read_carpy_fits
from . import spectral_model
#=======================================================================================================================
def vac2air(lamvac):
"""
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
Morton 2000
"""
s2 = (1e4/lamvac)**2
n = 1 + 0.0000834254 + 0.02406147 / (130 - s2) + 0.00015998 / (38.9 - s2)
return lamvac/n
def read_in_neural_network():
'''
read in the weights and biases parameterizing a particular neural network.
'''
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'other_data/NN_normalized_spectra_float16.npz')
tmp = np.load(path)
w_array_0 = tmp["w_array_0"]
w_array_1 = tmp["w_array_1"]
w_array_2 = tmp["w_array_2"]
b_array_0 = tmp["b_array_0"]
b_array_1 = tmp["b_array_1"]
b_array_2 = tmp["b_array_2"]
x_min = tmp["x_min"]
x_max = tmp["x_max"]
wavelength_payne = tmp["wavelength_payne"]
wavelength_payne = vac2air(wavelength_payne)
NN_coeffs = (w_array_0, w_array_1, w_array_2, b_array_0, b_array_1, b_array_2, x_min, x_max)
tmp.close()
return NN_coeffs, wavelength_payne
def read_default_model_mask():
NN_coeffs, wavelength_payne = read_in_neural_network()
errors_payne = np.zeros_like(wavelength_payne)
theory_mask = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),'other_data/theory_mask.txt'))
for wmin, wmax in theory_mask:
assert wmin < wmax, (wmin, wmax)
errors_payne[(wavelength_payne >= wmin) & (wavelength_payne <= wmax)] = 999.
return errors_payne
#--------------------------------------------------------------------------------------------------------------------------
def read_in_example():
'''
read in a default spectrum to be fitted.
'''
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'other_data/2M06062375-0639010_red_multi.fits')
wavelength, spectrum, spectrum_err = read_carpy_fits(path)
return wavelength, spectrum, spectrum_err
#--------------------------------------------------------------------------------------------------------------------------
def read_in_blaze_spectrum():
'''
read in a default hot star spectrum to determine telluric features and blaze function.
'''
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'other_data/Hot_Star_HR718.fits')
wavelength_blaze, spectrum_blaze, spectrum_err_blaze = read_carpy_fits(path)
return wavelength_blaze, spectrum_blaze, spectrum_err_blaze
#--------------------------------------------------------------------------------------------------------------------------
def doppler_shift(wavelength, flux, dv):
'''
dv is in km/s
positive dv means the object is moving away.
'''
c = 2.99792458e5 # km/s
doppler_factor = np.sqrt((1 - dv/c)/(1 + dv/c))
new_wavelength = wavelength * doppler_factor
new_flux = np.interp(new_wavelength, wavelength, flux)
return new_flux
#--------------------------------------------------------------------------------------------------------------------------
def match_blaze_to_spectrum(wavelength, spectrum, wavelength_blaze, spectrum_blaze):
'''
match wavelength of the blaze spectrum to the wavelength of the fitting spectrum
'''
for i in range(wavelength.shape[0]):
if wavelength_blaze[i,0] > wavelength[i,0]:
wavelength_blaze[i,0] = wavelength[i,0]
if wavelength_blaze[i,-1] < wavelength[i,-1]:
wavelength_blaze[i,-1] = wavelength[i,-1]
spectrum_interp = np.zeros(wavelength.shape)
for i in range(wavelength.shape[0]):
f_blaze = interpolate.interp1d(wavelength_blaze[i,:], spectrum_blaze[i,:])
spectrum_interp[i,:] = f_blaze(wavelength[i,:])
return spectrum_interp, wavelength
#------------------------------------------------------------------------------------------
def mask_telluric_region(spectrum_err, spectrum_blaze,
smooth_length=30, threshold=0.9):
'''
mask out the telluric region by setting infinite errors
'''
for j in range(spectrum_blaze.shape[0]):
for i in range(spectrum_blaze[j,:].size-smooth_length):
if np.min(spectrum_blaze[j,i:i+smooth_length]) \
< threshold*np.max(spectrum_blaze[j,i:i+smooth_length]):
spectrum_err[j,i:i+smooth_length] = 999.
return spectrum_err
#------------------------------------------------------------------------------------------
def cut_wavelength(wavelength, spectrum, spectrum_err, wavelength_min = 3500, wavelength_max = 10000):
'''
remove orders not in wavelength range
'''
ii_good = np.sum((wavelength > wavelength_min) & (wavelength < wavelength_max), axis=1) == wavelength.shape[1]
print("Keeping {}/{} orders between {}-{}".format(ii_good.sum(), len(ii_good), wavelength_min, wavelength_max))
return wavelength[ii_good,:], spectrum[ii_good,:], spectrum_err[ii_good,:]
#------------------------------------------------------------------------------------------
def mask_wavelength_regions(wavelength, spectrum_err, mask_list):
'''
mask out a mask_list by setting infinite errors
'''
assert wavelength.shape == spectrum_err.shape
for wmin, wmax in mask_list:
assert wmin < wmax
spectrum_err[(wavelength > wmin) & (wavelength < wmax)] = 999.
return spectrum_err
#------------------------------------------------------------------------------------------
def scale_spectrum_by_median(spectrum, spectrum_err):
'''
dividing spectrum by its median
'''
for i in range(spectrum.shape[0]):
scale_factor = 1./np.median(spectrum[i,:])
spectrum[i,:] = spectrum[i,:]*scale_factor
spectrum_err[i,:] = spectrum_err[i,:]*scale_factor
return spectrum, spectrum_err
#---------------------------------------------------------------------
def whitten_wavelength(wavelength):
'''
normalize the wavelength of each order to facilitate the polynomial continuum fit
'''
wavelength_normalized = np.zeros(wavelength.shape)
for k in range(wavelength.shape[0]):
mean_wave = np.mean(wavelength[k,:])
wavelength_normalized[k,:] = (wavelength[k,:]-mean_wave)/mean_wave
return wavelength_normalized
#---------------------------------------------------------------------
def transform_coefficients(popt, NN_coeffs=None):
'''
Transform coefficients into human-readable
'''
if NN_coeffs is None:
NN_coeffs, dummy = read_in_neural_network()
w_array_0, w_array_1, w_array_2, b_array_0, b_array_1, b_array_2, x_min, x_max = NN_coeffs
popt_new = popt.copy()
popt_new[:4] = (popt_new[:4] + 0.5)*(x_max-x_min) + x_min
popt_new[0] = popt_new[0]*1000.
popt_new[-1] = popt_new[-1]*100.
return popt_new
def normalize_stellar_parameter_labels(labels, NN_coeffs=None):
'''
Turn physical stellar parameter values into normalized values.
Teff (K), logg (dex), FeH (solar), aFe (solar)
'''
assert len(labels)==4, "Input Teff, logg, FeH, aFe"
# Teff, logg, FeH, aFe = labels
labels = np.ravel(labels)
labels[0] = labels[0]/1000.
if NN_coeffs is None:
NN_coeffs, dummy = read_in_neural_network()
w_array_0, w_array_1, w_array_2, b_array_0, b_array_1, b_array_2, x_min, x_max = NN_coeffs
new_labels = (labels - x_min) / (x_max - x_min) - 0.5
assert np.all(new_labels >= -0.5), new_labels
assert np.all(new_labels <= 0.5), new_labels
return new_labels
|
25,178 | 6c2f1bc32bc1e267ad69f61b9c6f22c5522e3b0c | x = int(input())
dp = [0]*30001
#보텀업 +1, %2, %3, %5
for i in range(2,x+1):
dp[i] = dp[i-1] + 1
if i%2 == 0:
dp[i] = min(dp[i],dp[i//2]+1)
if i%3 == 0:
dp[i] = min(dp[i],dp[i//3]+1)
if i%5 == 0:
dp[i] = min(dp[i],dp[i//5]+1)
print(dp[x])
|
25,179 | a3b8c329fcdb78445a57000cfae09f67889fe991 | """
装饰器
练习:exercise04-装饰器.py
"""
# def print_func_name(func):
# def wrapper():
# print(func.__name__)# 打印函数名称
# func()# 调用函数
# return wrapper
# @print_func_name #相当于say_hello = print_func_name(say_hello)
def say_hello():
print("hello")
# 拦截:新功能 + 旧功能
# say_hello = print_func_name(say_hello)
#
# def say_goodbye():
# print("goodbye")
#
# say_hello()
#>>wrapper
# say_hello
# hello
# say_goodbye()
# 需求:在不改变原函数以及调用情况下,增加新功能(打印函数名称).
def print_func_name(func):
# *args 原函数参数可以无限制
def wrapper(*args,**kwargs):
print(func.__name__)# 打印函数名称
# return 原函数返回值
return func(*args,**kwargs)# 调用函数
# #此处语法return func(),若函数有返回值可以直接打印,若没有也不影响,直接调用函数
# func(*args,**kwargs)# 调用函数 #若func函数没有返回值此处直接调用函数
return wrapper #若没有报错TypeError: 'NoneType' object is not callable
@print_func_name
def say_hello():
print("hello")
return 1
@print_func_name
def say_goodbye(name):
print(name,"---goodbye")
return 2
# print(say_hello())#1
a = say_hello()
print(a)
# print(say_goodbye("qtx"))#2
|
25,180 | 3beea61df8776f46055809acbb8b4754ae6ac820 | import pymysql
import settings
import docker_interface
def get_db(config):
host = config['host']
port = config['port']
user = config['user']
password = config['password']
dbname = config['dbname']
db = pymysql.connect(host=host, port=port, user=user, password=password, db=dbname)
return db
def update_task_status(status, task_id):
db = get_db(settings.db)
cursor = db.cursor()
sql = "UPDATE task SET task_status=%s WHERE id=%s"
try:
cursor.execute(sql, (status, task_id))
db.commit()
except Exception as e:
db.rollback()
print(e)
finally:
db.close()
def check_task_status(task_id):
db = get_db(settings.db)
cursor = db.cursor()
sql = "SELECT task_status FROM task WHERE id=%s"
try:
cursor.execute(sql, (task_id))
task_status = cursor.fetchone()
print('The task_status of task ' + str(task_id) + ' is ' + str(task_status[0]))
return task_status[0]
except Exception as e:
print(e)
finally:
db.close()
def get_all_not_finished_tasks():
tasks = []
db = get_db(settings.db)
cursor = db.cursor()
sql = 'SELECT id FROM task WHERE task_status<>0'
cursor.execute(sql)
for task in cursor.fetchall():
tasks.append(task[0])
print('all_not_finished_tasks: ' + str(tasks))
return tasks
def get_all_finished_tasks():
tasks = []
db = get_db(settings.db)
cursor = db.cursor()
sql = 'SELECT id FROM task WHERE task_status=0'
cursor.execute(sql)
for task in cursor.fetchall():
tasks.append(task[0])
print('all_finished_tasks: ' + str(tasks))
return tasks
def change_executor_status():
db = get_db(settings.db)
cursor = db.cursor()
sql = 'update executor set status = 0 where exec_ip = %s'
try:
cursor.execute(sql, (docker_interface.config['self']['ip']))
db.commit()
except Exception as e:
print(e)
finally:
db.close()
if __name__ == '__main__':
# update_task_status(0, 1)
check_task_status(2)
get_all_not_finished_tasks()
get_all_finished_tasks()
|
25,181 | e165ccbaee3f9673a1c7db65835657b095c2ff90 | # Generated by Django 3.0.8 on 2020-08-06 21:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0003_product_nutrition_grade'),
]
operations = [
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(max_length=300),
),
]
|
25,182 | feacec1089e02cf6d9f5d1da7b027f63f2123933 | from django.db import models
class ButtonResponse(models.Model):
status_code = models.CharField(max_length=100)
response_answer = models.TextField()
response_date = models.DateTimeField()
|
25,183 | 6110287b9dbe88340059e59aa7d11964787d62da | from cassandra.cluster import Cluster
hostname = '127.0.0.1'
keyspace = 'db1'
column_family = 'userInfo'
nodes = []
nodes.append(hostname)
cluster = Cluster(nodes)
session = cluster.connect(keyspace)
def insertInUserInfoCF(domainName, userinfo_map):
'''
Inserts a row into User_Info ColumnFamily with key as domainName
:param domainName:
:param userinfo_map:
:return:
'''
executeBQ = session.prepare(
"insert into userInfo (domain, usr_With_Most_Comment, best5_Users, top3Loc_With_Most_Users) values (?, ?, ?, ?);")
session.execute(executeBQ,
(domainName, userinfo_map['usr_With_Most_Comment'], userinfo_map['best5_Users'],
userinfo_map['top3Loc_With_Most_Users']))
|
25,184 | e30792f932a31a3cbff8400dbff2356cea3350e5 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re , urllib.parse , urllib.request , http.cookiejar , base64 , binascii , rsa
from bs4 import BeautifulSoup as bs
import sqlite3
import time,sys
import socks
import socket
import os
from instapush import Instapush, App
class LoginStatus:
def __init__(self,nick,passwd):
'''create table status (id integer primary key AUTOINCREMENT,uid varchar(32), username varchar(512),time varchar(32),action varchar(64));'''
self.nick = nick
self.passwd = passwd
socks.set_default_proxy(socks.SOCKS5, "localhost", 7070)
socket.socket = socks.socksocket
self.cj = http.cookiejar.LWPCookieJar()
self.cookie_support = urllib.request.HTTPCookieProcessor(self.cj)
self.opener = urllib.request.build_opener(self.cookie_support , urllib.request.HTTPHandler)
urllib.request.install_opener(self.opener)
a = urllib.request.urlopen("http://1212.ip138.com/ic.asp").read()
print(a.decode('latin-1').split('[')[1].split(']')[0])
def getData(self , url):
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
text = response.read()
try:
text = text.decode('utf-8')
except Exception as e:
text = text.decode('latin-1')
return text
def postData(self, url , data):
headers = {'User-Agent' : 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)'}
data = urllib.parse.urlencode(data).encode('utf-8')
request = urllib.request.Request(url , data , headers)
response = urllib.request.urlopen(request)
text = response.read().decode('gbk')
return text
def login_weibo(self):
#========================== get servertime , pcid , pubkey , rsakv===========================
# pre login
prelogin_url = 'http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=%s&rsakt=mod&checkpin=1&client=ssologin.js(v1.4.15)&_=1400822309846' % self.nick
preLogin = self.getData(prelogin_url)
servertime = re.findall('"servertime":(.*?),' , preLogin)[0]
pubkey = re.findall('"pubkey":"(.*?)",' , preLogin)[0]
rsakv = re.findall('"rsakv":"(.*?)",' , preLogin)[0]
nonce = re.findall('"nonce":"(.*?)",' , preLogin)[0]
#===============encode username & password================
su = base64.b64encode(bytes(urllib.request.quote(self.nick) , encoding = 'utf-8'))
rsaPublickey = int(pubkey , 16)
key = rsa.PublicKey(rsaPublickey , 65537)
message = bytes(str(servertime) + '\t' + str(nonce) + '\n' + str(self.passwd) , encoding = 'utf-8')
sp = binascii.b2a_hex(rsa.encrypt(message , key))
#=======================login =======================
param = {'entry' : 'weibo' , 'gateway' : 1 , 'from' : '' , 'savestate' : 7 , 'useticket' : 1 , 'pagerefer' : 'http://login.sina.com.cn/sso/logout.php?entry=miniblog&r=http%3A%2F%2Fweibo.com%2Flogout.php%3Fbackurl%3D' , 'vsnf' : 1 , 'su' : su , 'service' : 'miniblog' , 'servertime' : servertime , 'nonce' : nonce , 'pwencode' : 'rsa2' , 'rsakv' : rsakv , 'sp' : sp , 'sr' : '1680*1050' ,
'encoding' : 'UTF-8' , 'prelt' : 961 , 'url' : 'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack'}
s = self.postData('http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.15)' , param)
try:
urll = re.findall("location.replace\(\'(.*?)\'\);" , s)[0]
except Exception as e:
pass
self.getData(urll)
def getStatus(self,uid):
self.login_weibo()
now = time.strftime('%Y-%m-%d %H:%M',time.localtime(time.time()))
text = self.getData('http://www.weibo.com/aj/user/newcard?id='+str(uid))
text = text.replace('\\"','"').replace('\\/','/')
soup = bs(text)
inner = soup.find('i',{'class':'W_chat_stat'})
status = inner['class'][1]
u = [x.split('=')[1] for x in soup.find('a',{'class':'W_btn_c'})['action-data'].split('&')]
print(None,u[0],u[1],now,status)
return (None,u[0],u[1],now,status)
def refresh(self,uid):
uid = str(uid)
cx = sqlite3.connect("weibo.db")
cu=cx.cursor()
last = cu.execute('select max(rowid),action from status where uid=="'+uid+'" ').fetchone()[1]
data = self.getStatus(uid)
iid,uid,username,t,status = data
username = username.encode('latin-1').decode('unicode_escape')
print(username,status)
if status != last:
cu.execute('insert into status values (?,?,?,?,?)',data)
self.instapush(username,t,status)
cx.commit()
def instapush(self,username,time,status):
action = {'W_chat_stat_online':'上线了','W_chat_stat_offline':'下线了'}
app = App(appid='appid',secret='secret')
app.notify(event_name='WeiboStat',trackers={"username":username,"time":time,"status":action[status]})
if __name__ == '__main__':
weibo = LoginStatus('username','password')
uids = [ 1223920903 ]
for i in uids:
try:
weibo.refresh(i)
except Exception as e:
print(e)
continue
|
25,185 | 02bea4a8d96d919e4f2a53bbb1934abe797b72a6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def upFirst(x):
if not isinstance(x,(str)):
raise TypeError("please enter a str")
if len(x)<2:
raise TypeError("str is too short")
return x[0].upper() + x[1:].lower()
if __name__ == '__main__':
result = map(upFirst,['adam', 'LISA', 'barT'])
print result
|
25,186 | 8b4cf93a25976beca67fffd97db5a2f36dd27acd | #!/usr/bin/env python3
from q_model import Policy
def _get_reward(game):
''' Score for the current game state '''
if game.won():
return 1
if game.lost():
return -1
# Negative reward values incentivizes finding the fastest solution
return -0.2
def solve(game, attempts: int = 0):
'''
Try to solve the game in a predefined number of steps
If attempts is 0, game.attempts is used instead
'''
actions = game.get_actions()
policy = Policy(len(actions))
attempts = attempts or game.attempts
for _ in range(attempts):
game.reset()
game.draw()
steps = 0
while not (game.won() or game.lost()):
steps += 1
# Take the index of the state with highest reward
state = game.get_state()
idx = policy.get_action_id(state)
# Execute the highest rewarding action
game.do_action(actions[idx])
reward = _get_reward(game)
# Update the policy of the last action performed
new_state = game.get_state()
policy.update(state, new_state, idx, reward)
game.draw()
if game.won():
print('Won in ', steps)
else:
print('Lost in ', steps)
if __name__ == '__main__':
from problems.caprecavoly import Game
solve(Game())
|
25,187 | 149541eccd62b74e7b720a91086f215da5167c11 | import pytest
from magma import *
from magma.uniquification import MultipleDefinitionException
@pytest.mark.skip("Multiple Definitions no longer supported because we cache on names")
def test_multiple_definitions_are_same():
class Circ1(Circuit):
name = "same"
IO = ['I', In(Bit), 'O', Out(Bit)]
@classmethod
def definition(io):
wire(io.I, io.O)
class Circ2(Circuit):
name = "same"
IO = ['I', In(Bit), 'O', Out(Bit)]
@classmethod
def definition(io):
wire(io.I, io.O)
test = DefineCircuit('test', 'I', In(Bit), 'O1', Out(Bit), 'O2', Out(Bit))
circ1 = Circ1()
wire(test.I, circ1.I)
wire(test.O1, circ1.O)
circ2 = Circ2()
wire(test.I, circ2.I)
wire(test.O2, circ2.O)
EndDefine()
try:
compile('build/shouldnotmatter', test)
assert False, "Should throw MultipleDefinitionException"
except MultipleDefinitionException:
pass
@pytest.mark.skip("Multiple Definitions no longer supported because we cache on names")
def test_multiple_definitions_are_same_older_def_approach():
IO = ['I', In(Bit), 'O', Out(Bit)]
Circ1 = DefineCircuit("same", *IO)
wire(Circ1.I, Circ1.O)
EndDefine()
Circ2 = DefineCircuit("same", *IO)
wire(Circ2.I, Circ2.O)
EndDefine()
test = DefineCircuit('test', 'I', In(Bit), 'O1', Out(Bit), 'O2', Out(Bit))
circ1 = Circ1()
wire(test.I, circ1.I)
wire(test.O1, circ1.O)
circ2 = Circ2()
wire(test.I, circ2.I)
wire(test.O2, circ2.O)
EndDefine()
try:
compile('shouldnotmatter', test)
assert Circ1 is Circ2
except MultipleDefinitionException:
pass
@pytest.mark.skip("Multiple Definitions no longer supported because we cache on names")
def test_same_definitions():
class Circ1(Circuit):
name = "same"
IO = ['I', In(Bit), 'O', Out(Bit)]
@classmethod
def definition(io):
wire(io.I, io.O)
test = DefineCircuit('test', 'I', In(Bit), 'O1', Out(Bit), 'O2', Out(Bit))
circ1 = Circ1()
wire(test.I, circ1.I)
wire(test.O1, circ1.O)
circ2 = Circ1()
wire(test.I, circ2.I)
wire(test.O2, circ2.O)
EndDefine()
try:
compile("build/test_same_definition", test)
except MultipleDefinitionException:
assert False, "Should not throw MultipleDefinitionException"
|
25,188 | 3b142d4187cc956f245623d08c1005fd44efee53 | __author__ = 'cmotevasselani'
from time import sleep
from TokenBucket import TokenBucket
class TokenBuckets:
def __init__(self):
self.buckets = {}
def create_bucket(self, bucket_name, rate, max_count, start_filled=False):
if bucket_name not in self.buckets:
self.buckets[bucket_name] = TokenBucket(rate, max_count, start_filled)
return True
else:
return False
def get_from_bucket(self, bucket_name, tokens):
print "bucket has: " + str(self.buckets[bucket_name].token_count)
return self.buckets[bucket_name].get_tokens(tokens)
if __name__ == "__main__":
tokenBuckets = TokenBuckets()
print "creating buckets"
tokenBuckets.create_bucket('first', 10, 10)
tokenBuckets.create_bucket('second', 5, 10)
print "sleeping for 1 second"
sleep(1)
print "(True) getting 10 from first: " + str(tokenBuckets.get_from_bucket('first', 10)) + "\n"
print "(False) getting 10 from second: " + str(tokenBuckets.get_from_bucket('second', 10)) + "\n"
sleep(50 / 1000)
print "(False) getting 10 from first: " + str(tokenBuckets.get_from_bucket('first', 10)) + "\n"
print "(False) getting 10 from second: " + str(tokenBuckets.get_from_bucket('second', 10)) + "\n"
|
25,189 | 0213153cefb818d8e559cba4f930a84a7cf7eb73 | import asyncio
from typing import List
from bs4 import BeautifulSoup
from fake_headers import Headers
from pyppeteer import launch
from pyppeteer.network_manager import Response
from pyppeteer.page import Page
from kekmonitors.base_scraper import BaseScraper
from kekmonitors.config import Config
from kekmonitors.shoe_stuff import Shoe
from kekmonitors.utils.tools import make_default_executable
class Footdistrict(BaseScraper):
def init(self):
# website infos
self.base_url = "https://footdistrict.com"
self.endpoints = ["/zapatillas/f/b/converse/"]
# create a random headers generator, configured to generate random windows headers
self.headers_gen = Headers(os="win", headers=True)
# max links to be monitored
self.max_links = 5
self.found_links = [] # type: List[str]
async def async_init(self):
self.browser = await launch()
# self.context = await self.browser.createIncognitoBrowserContext()
self.context = self.browser
async def on_async_shutdown(self):
self.general_logger.debug("Shutting down browser...")
await self.browser.close()
self.general_logger.info("Browser has shut down...")
async def get_fd_page(self, link: str, page: Page):
await page.setExtraHTTPHeaders(self.headers_gen.generate())
await page.setJavaScriptEnabled(True)
self.network_logger.debug(f"{link}: getting...")
response = await page.goto(link)
if response.status == 307:
self.network_logger.debug(
f"{link}: got 307, waiting to redirect and stuff..."
)
try:
response = await page.waitForResponse(
lambda res: res.url == link and res.status == 200,
{"timeout": 10000},
)
if response.ok:
self.network_logger.debug(
f"{link}: got redirection, waiting for it to finish loading..."
)
await page.waitForNavigation()
await asyncio.sleep(1)
self.network_logger.debug(f"{link}: has loaded")
# await page.setJavaScriptEnabled(False)
# await page.keyboard.press("Escape")
else:
self.network_logger.warning(
f"{link}: failed to get: {response.status}"
)
except:
self.general_logger.exception(
f"{link} has failed to redirect. Current url: {response.url}, status: {response.status}, page url: {page.url}"
)
elif response.ok:
self.network_logger.debug(f"{link}: got it with code {response.status}")
return response
async def loop(self):
# tasks will contain asynchronous tasks to be executed at once, asynchronously
# in this case, they will contain the requests to the endpoints
pages = [] # type: List[Page]
tasks = [] # List[Coroutine]
# create a task for each endpoint
for ep in self.endpoints:
page = await self.context.newPage()
pages.append(page)
tasks.append(self.get_fd_page(self.base_url + ep, page))
# gather, execute all tasks
responses = await asyncio.gather(*tasks) # type: List[Response]
for link, page, response in zip(self.endpoints, pages, responses):
if not response.ok:
self.general_logger.debug(
f"{link}: skipping parsing on code {response.status}"
)
continue
self.general_logger.debug("Getting content...")
text = await response.text()
self.general_logger.debug("Parsing content...")
# BeautifulSoup can be used to parse html pages in a very convenient way
soup = BeautifulSoup(text, "lxml")
self.general_logger.debug("Content parsed...")
# parsing example. in this case we simply add the first self.max_links products.
grid = soup.find("ol", {"class": "product-items"})
count = 0
for prod in grid.find_all("li"):
count += 1
if count <= self.max_links:
link = prod.a.get("href")
if link not in self.found_links:
shoe = Shoe()
shoe.link = link
self.general_logger.info(f"Found {link}")
self.shoe_check(
shoe
) # inserts/updates the shoe in the database, updating last_seen
else:
break
await page.close()
if __name__ == "__main__":
custom_config = Config()
custom_config["WebhookConfig"]["crash_webhook"] = "your-crash-webhook-here"
make_default_executable(Footdistrict)
|
25,190 | d6c95b9ae0403978a490530f4cb3a732fdb81dca | # Generated by Django 3.0.7 on 2020-09-25 12:33
import datetime
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contact_name', models.CharField(max_length=23)),
('town', models.CharField(max_length=34)),
('postcode', models.CharField(default='43701', max_length=5, verbose_name='zip code')),
('state', models.CharField(max_length=34)),
],
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=3, unique=True)),
('pre_symbol', models.CharField(blank=True, max_length=1)),
('post_symbol', models.CharField(blank=True, max_length=1)),
],
),
migrations.CreateModel(
name='customer',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('Customer_gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('TS', 'Transgender')], max_length=50)),
('Customer_profilepic', models.FileField(upload_to='')),
('Customer_Email', models.EmailField(max_length=111)),
('Customer_created_at', models.DateTimeField(auto_now_add=True)),
('Customer_PhoneNo1', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region=None)),
('Customer_Street', models.CharField(default='', max_length=250)),
('Customer_Landmark', models.CharField(default='', max_length=100)),
('Customer_Zipcode', models.IntegerField(default='')),
('Customer_State', models.CharField(default='', max_length=100)),
('Customer_Country', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='location1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=30)),
('state', models.CharField(max_length=23)),
('city', models.CharField(max_length=23)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('order_id', models.AutoField(primary_key=True, serialize=False)),
('items_json', models.CharField(max_length=5000)),
('name', models.CharField(max_length=90)),
('email', models.CharField(max_length=111)),
('amount', models.IntegerField(default=0)),
('phone', models.CharField(default='', max_length=111)),
('coursename', models.CharField(max_length=5000)),
('prices', models.CharField(max_length=500)),
('qty', models.CharField(max_length=400)),
('date', models.DateTimeField(blank=True, default=datetime.datetime(2020, 9, 25, 18, 3, 13, 630139))),
],
),
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('address', models.CharField(max_length=34)),
('invoice_id', models.CharField(blank=True, editable=False, max_length=6, null=True, unique=True)),
('invoice_date', models.DateField(default=datetime.date.today)),
('invoiced', models.BooleanField(default=False)),
('draft', models.BooleanField(default=False)),
('paid_date', models.DateField(blank=True, null=True)),
('currency', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='crm.Currency')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='crm.customer')),
],
options={
'ordering': ('-invoice_date', 'id'),
},
),
]
|
25,191 | a59141d7458495978d15f6e15ab879a2d623fc83 | while True :
word = input("문자열을 입력하시오 :")
worldlength = len(word)
if worldlength == 0 :
break
elif 5 <= worldlength <= 8 :
continue
elif worldlength < 5 :
result = '*' + word + '*'
elif worldlength > 8 :
result = "$" + word + "$"
print("유효한 입력 결과 : ", result) |
25,192 | e7183558e7a74e3f10da4cd4b9523eb106c1505a |
factorial = lambda x: 1 if not x else x * factorial(x-1)
def filter_factorials(n):
fs = [ factorial(x) for x in range(1,max(n)) ]
return [ e for e in n if e in fs ]
|
25,193 | 454dd305056b3931a93f4e6868e7ece46fe13168 | #!/usr/bin/env python
# coding: utf-8
# In[26]:
import pandas as pd
import numpy as np
import requests
from pathlib import Path
import librosa
import librosa.display
import matplotlib.pyplot as plt
import IPython.display as ipd
from pydub import AudioSegment
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout,Activation,Flatten
from tensorflow.keras.optimizers import Adam
from sklearn import metrics
import os
import gc
import soundfile as sf
from scipy.io.wavfile import write
import itertools
import tensorflow
import keras
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from tensorflow.keras.models import Sequential
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Dropout, Flatten, Dense
from tensorflow.keras import applications
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import math
import datetime
import time
from time import sleep
# In[15]:
N_FFT = 1024
HOP_SIZE = 1024
N_MELS = 128
WIN_SIZE = 1024
WINDOW_TYPE = 'hann'
FEATURE = 'mel'
FMIN = 1400
# In[20]:
#Loading vgg16 model
vgg16 = applications.VGG16(include_top=False, weights='imagenet')
# # Load saved model
# In[2]:
model = keras.models.load_model('D:/C Drive Documents/Bird_Sound_Recognition/My_Model')
# # Testing on new images
# In[10]:
def removeSilence(signal):
return signal[librosa.effects.split(signal)[0][0] : librosa.effects.split(signal)[0][-1]]
# In[9]:
def mel_spectogram_generator(audio_name,signal,sample_rate,augmentation,target_path):
S = librosa.feature.melspectrogram(y=signal,sr=sample_rate,
n_fft=N_FFT,
hop_length=HOP_SIZE,
n_mels=N_MELS,
htk=True,
fmin=FMIN,
fmax=sample_rate/2)
plt.figure(figsize=(10, 4))
librosa.display.specshow(librosa.power_to_db(S**2,ref=np.max), fmin=FMIN,y_axis='linear')
plt.axis('off')
plt.savefig(target_path + augmentation + audio_name[:-4] + '.png',bbox_inches='tight',transparent=True, pad_inches=0)
plt.clf()
plt.close("all")
gc.collect()
# In[11]:
def read_image(file_path):
print("[INFO] loading and preprocessing image...")
image = load_img(file_path, target_size=(558, 217))
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image /= 255.
return image
# In[23]:
def test_single_image(path):
birds = ['AshyPrinia',
'AsianKoel',
'BlackDrongo',
'CommonMyna',
'CommonTailorbird',
'GreaterCoucal',
'GreenBee-eater',
'IndianRobin',
'LaughingDove',
'White-throatedKingfisher']
images = read_image(path)
time.sleep(.5)
bt_prediction = vgg16.predict(images)
preds = model.predict_proba(bt_prediction)
for idx, bird, x in zip(range(0,10), birds , preds[0]):
print("ID: {}, Label: {} {}%".format(idx, bird, round(x*100,2) ))
print('Final Decision:')
time.sleep(.5)
for x in range(3):
print('.'*(x+1))
time.sleep(.2)
class_predicted = model.predict_classes(bt_prediction)
for idx, bird, x in zip(range(0,10), birds , preds[0]):
if idx == class_predicted[0]:
print("ID: {}, Label: {}".format(class_predicted[0], bird))
return load_img(path)
# In[29]:
def predict_bird_sound(source_path,file_name, target_path = 'D:/'):
N_FFT = 1024
HOP_SIZE = 1024
N_MELS = 128
WIN_SIZE = 1024
WINDOW_TYPE = 'hann'
FEATURE = 'mel'
FMIN = 1400
augmentation = ''
signal, sample_rate = librosa.load(source_path + file_name,sr = None)
DNsignal = removeSilence(signal)
mel_spectogram_generator(file_name,DNsignal,sample_rate,'',target_path)
path = target_path + augmentation + file_name[:-4] + '.png'
test_single_image(path)
# In[37]:
print("BIRD SOUND RECOGNITION APP - By Karthik Mandapaka")
sleep(1)
print("Welcome")
sleep(2)
while(1):
source_path = input("Please enter Source path: ")
sleep(2)
file_name = input("Please enter the audio file name: ")
sleep(2)
print("Recognizing bird sound")
sleep(0.5)
print('.')
sleep(0.5)
print('..')
sleep(0.5)
print('...')
predict_bird_sound(source_path,file_name)
cont = input("Do you want to identify another bird sound?(Enter 1 for Yes or 0 for No)")
if (cont == '0'): break
# In[25]:
# predict_bird_sound('D:/C Drive Documents/Bird_Sound_Recognition/Data for each bird/data/xeno-canto-dataset/LaughingDove/','Spilopelia280683.wav','D:/')
|
25,194 | f6d3f0f786b5fd14580f3850a657dcc714103cf3 | #Prisila Michelle - 13516129
import sys,os
def openfile(filename): #fungsi untuk membaca file
with open(os.path.join(sys.path[0], filename), 'r') as file:
size = int(file.readline())
matrix = []
for i in range(0,size):
string = file.readline()
string = string[:-1]
matrix.append(list(string))
while ' ' in matrix[i]:
matrix[i].remove(' ')
return size, matrix
def findsize(contour,size):
dp_matrix = []
for i in range(0,size):
dp_matrix.append([])
max_size = 0
for i in range(0,size):
for j in range(0,size):
if i == 0 or j == 0:
dp_matrix[i].append(1)
else:
if contour[i][j] == contour[i-1][j] == contour[i][j-1] == contour[i-1][j-1]:
dp_matrix[i].append(min(min(dp_matrix[i][j-1], dp_matrix[i-1][j]), dp_matrix[i-1][j-1]) + 1)
else:
dp_matrix[i].append(1)
max_size = max(max_size, dp_matrix[i][j])
return max_size
size,contour = openfile("contour.txt")
n = findsize(contour,size)
print("file loaded!")
print("Luas maksimum Krasti Krab yang dapat dibangun adalah", n*n)
|
25,195 | bbc5a54d240bc636d46404a509ffb98a8dd52e4f | S = input()
K = int(input())
substrings = set()
for length in range(1, K + 1):
for i in range(len(S) - length + 1):
substrings.add(S[i:i + length])
substrings = sorted(substrings)
print(substrings[K - 1])
|
25,196 | 955eae817d37394324d65d7feacb694b858bf77d | # Generated by Django 2.1.7 on 2019-07-27 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('send', '0004_data_jid'),
]
operations = [
migrations.AlterField(
model_name='data',
name='jid',
field=models.CharField(default=0, max_length=20),
),
]
|
25,197 | d46a8088ef16089d10abcf0f92fa0c40d3686b10 | customer_num=5
invoice_num=1212
print("invoice No(s):")
while customer_num>0:
print("INV-",invoice_num)
invoice_num=invoice_num+3
customer_num=customer_num-1 |
25,198 | 76be41943d04f80d7485e012ce9fa7055887896f | import torch
from deprecated.autograd import BoostGrad
class ImgBase(torch.nn.Module):
def __init__(self, size: int = 224, k: float = 5.0, weight_init: float = 0.05):
super().__init__()
self.size = size
self.k = k
self.color = torch.nn.Parameter(
torch.tensor(
[
[-0.1409, 0.0855, -0.7620],
[0.2596, -0.5239, 0.0996],
[0.1653, -0.0719, 0.0889],
]
)
)
self.w = torch.nn.Parameter(
torch.randn(1, 3, size, size, requires_grad=True) * weight_init
)
def forward(self) -> torch.Tensor:
img = self.w
color = self.color / self.color.norm(p=2)
img = torch.nn.functional.linear(img.permute(0, 2, 3, 1), color).permute(
0, 3, 1, 2
)
img = self.to_rgb(img, self.k)
return img
def to_rgb(self, input: torch.Tensor, k: float) -> torch.Tensor:
return (input.clamp(-k, k) + k) / (2 * k)
class ImgBaseOld(torch.nn.Module):
"""X"""
def __init__(self, size=224, weight_init=0.05, decolorize=0.0, darken=0.0):
super().__init__()
self.decolorize = decolorize
self.darken = darken
self.w = torch.ones(1, 3, size, size, requires_grad=True) * weight_init
self.w = torch.nn.Parameter(self.w.half())
def forward(self):
return self.w
def post_process(self):
with torch.no_grad():
self.w.clamp_(0.0, 1.0)
if self.decolorize > 0.0:
self.w += self.decolorize * (
-self.w + self.w.mean(dim=1, keepdim=True).repeat(1, 3, 1, 1)
)
if self.darken > 0.0:
self.w *= 1.0 - self.darken
class ImgBaseFFT(torch.nn.Module):
"""X"""
def __init__(self, size=224, k=15.0, weight_init=0.05):
super().__init__()
self.size = size
self.k = k
self.color = torch.nn.Linear(3, 3, bias=False)
w = torch.fft.rfft2(
torch.randn(1, 3, size, size, requires_grad=True) * weight_init
)
self.w = torch.nn.Parameter(w)
self.act = torch.sin
self.bg = BoostGrad()
self.norm = ChanNorm(dim=3)
def forward(self):
img = torch.fft.irfft2(self.w)
img = self.bg.apply(img)
img = self.norm(img)
img = self.color(img.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return img
def get_img(self, size=None):
size = size if size is not None else self.size
img = torch.fft.irfft2(self.w)
if size != self.size:
img = torch.nn.functional.interpolate(img, (size, size), mode="area")
img = self.norm(img)
img = self.color(img.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
return (img.clamp(-self.k, self.k) + self.k) / (2 * self.k)
# From: https://github.com/lucidrains/stylegan2-pytorch
class ChanNorm(torch.nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.eps = eps
self.g = torch.nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = torch.nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim=1, unbiased=False, keepdim=True).sqrt()
mean = torch.mean(x, dim=1, keepdim=True)
return (x - mean) / (std + self.eps) * self.g + self.b
|
25,199 | ce56969ad93b0b6ef18c2d8a304cb54375ec8422 | # Generated by Django 3.1.1 on 2020-11-17 22:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0007_auto_20201117_1701'),
('cartview', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cartveiw',
name='productID',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='p_info', to='products.products'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.