text stringlengths 38 1.54M |
|---|
#!/usr/bin/python3
import os
import sys
FILE_LOCATION = "/var/log/hymera/"
class csvWriter():
def __init__(self, fileName):
self.file = FILE_LOCATION + fileName
os.environ["FILENAME"] = FILE_LOCATION + fileName
with open(self.file, "w") as f:
pass
def writeToFile(self, dataToWrite):
with open(self.file, "a") as f:
f.write(dataToWrite+"\n")
return True
if __name__ == "__main__":
a = csvWriter("test.txt")
a.writeToFile("a")
a.writeToFile("b")
a.writeToFile("c") |
# Program: monkey_problem.py
# Class: cs131
# Date: 10/8/14
# Author: Joel Ristvedt
# Description: The Monkey Problem program solves for the number of coconuts
# in the problem in the instructions function over a range designated by the
# user. The user specifies what range to search over, how many sailors they
# want to watch suffer on the island, and whether or not they want to see
# all of the results found.
# Imports
import time
# Functions
def Instructions():
"""Prints the name of the program, story, and instructions.
Requires none
Returns none
"""
print()
print('Monkey Problem')
print('Five sailors are shipwrecked on an island. Fearing a long stay')
print('before rescue, they gather together a large pile of coconuts as')
print('a source of nourishment. Night falls before they are able to')
print('divide the coconuts among themselves. So the sailors agree to go')
print('to sleep and divide the pile of coconuts in the morning. During')
print('the night, one of the sailors wakes up and decides to make sure')
print('that he gets his fair share of coconuts. He divides the pile into')
print('five equal piles, one for each sailor, with one coconut left')
print('over. he hides his pile. pushes the other four piles together and')
print('tosses the extra coconut to a monkey that is watching him. A')
print('little while later, a second sailor wakes up and does the same')
print('dividing and hiding. He ends up with two extra coconuts, which he')
print('tosses to the monkey. As the night goes by, the other sailors')
print('wake up in turn to divide the pile of coconuts left over, the')
print('fourth sailor has four coconuts left over but the fifth sailor')
print('has no coconuts left over. This works out well, since by the time')
print('the fifth sailor awakens to divide the pile, it is fairly late')
print('and the monkey has gone to bed. In the light of day, the pile of')
print('coconuts is much smaller, but no one points this out since each')
print('thinks he is responsible. The sailors do one last official')
print('division in which each sailor receives the same number of')
print('coconuts with one coconut left over. The sailors agree to give')
print('the extra coconut to the monkey for breakfast. Each sailor then')
print('takes his official pile and settles in to wait for rescue.')
print()
print('This program will ask for a range of coconuts to search for the')
print('possible number of coconuts in the pile at the beginning. The')
print('program will also ask for how many sailors there are on the')
print('island, as it can solve for a variable number of sailors. Next')
print('the program will ask if you want to see all of the possible')
print('number of coconuts in the range entered for the number of sailors')
print('entered (Enter range like 1337-9001).')
print()
def Get_Input():
"""Gets the output from the user and returns it
returns coconut_test, coconut_test_limit, sailors, print_results
coconut_test = integer
coconut_test_limit = integer
sailors = integer
print_results = boolean
coconut_test - number to start checking for solutions with
coconut_test_limit - the highest number to search for solutions
sailors - number of sailors on the island
print_results - boolean pertaining to whether the user wants to see the
working solutions or not
"""
# coconut_test_string - string representing the starting number to check
# answer - users string answer
coconut_test_string, coconut_test_limit = input(
'What is the range of coconuts to test? ').split('-')
coconut_test = int(coconut_test_string)
coconut_test_limit = int(coconut_test_limit)
sailors = int(input('How many sailors are on the island? '))
answer = input('Do you want to see the successful numbers (y/n)? ')
print_results = (answer == 'y' or answer == 'Y')
print()
return coconut_test, coconut_test_limit, sailors, print_results
def Calculations(coconut_test, coconut_test_limit, sailors, print_results):
"""Finds the difference between two working solutions
requires: (int) coconut_test, (int) coconut_test_limit, (int) sailors,
(boolean) print_results
returns: (int)results
coconut_test = the number of coconuts under scrutiny
coconut_test_limit = the highest number of coconuts that will be
results - the number of solutions found
sailors - the number of sailors on the island put there by the malicious
user
print_results - boolean pertaining to whether the user wants to see all
the results or just the number of how many results
there are
"""
# first_result = the first working solution
# scrutinized
# sailor_count = the number of sailors that have taken their secret share
# plausible - boolean pertaining to whether the coconut value is a
# possible solution based on if the coconut value passed for
# coconuts_in_pile - the running total of coconuts in the pile
# leftover_coconuts - coconuts remaining after every secret division that
# get thrown to the monkey
# coconuts_taken - number taken during every secret division
results = 0
while coconut_test < coconut_test_limit:
sailor_count = 1
plausible = True
coconuts_in_pile = coconut_test
coconuts_taken, leftover_coconuts = divmod(coconuts_in_pile, sailors)
while sailor_count < sailors and plausible == True:
if leftover_coconuts == sailor_count:
coconuts_in_pile -= (leftover_coconuts + coconuts_taken)
coconuts_taken, leftover_coconuts = divmod(coconuts_in_pile,
sailors)
else:
plausible = False
sailor_count += 1
coconuts_in_pile -= (leftover_coconuts + coconuts_taken)
if (plausible and leftover_coconuts == 0 and coconuts_in_pile %
sailors == 1):
if print_results:
print(coconut_test)
results += 1
coconut_test += 1
return results
def Print_Output(results, coconut_test, coconut_test_limit, cpu_secs,
wall_secs):
"""Prints the number of results over the specified range and the ammount
of time the calculations took.
requires: (int)results, (int)coconut_test, (int)coconut_test_limit,
(float)cpu_secs, (float)wall_secs
returns none
results - the number of working solutions found
coconut_test - the lowest number for checking for solutions
coconut_test_limit - the highest number for checking for solutions
cpu_secs - elapsed time it took for the cpu to do calculations
wall_secs - elapsed time it took for the calculations to be finished
"""
if results == 0:
results = 'no'
s = 's'
are = 'are'
if results == 1:
s = ''
are = 'is'
print()
print('There ', are,' ', results, ' result', s,' within the range ', coconut_test,
'-', coconut_test_limit, '.', sep='')
print('CPU secs: ', '{0:.3f}'.format(cpu_secs))
print('Elapsed secs: ', '{0:.3f}'.format(wall_secs))
print()
def Main():
"""Prints the story with the instructions, gets the input from the user,
starts timing, finds the difference between the first two working
solutions and applies that to complete and print all working solutions,
stops the timing returns output to the user.
Requires none
Returns none
"""
# coconut_test - the lowest number for checking for solutions
# coconut_test_limit - the highest number for checking for solutions
# sailors - the number of sailors put on the forsaken island
# print_results - the boolean pertaining to whether the user wants to see
# the results of just the number of how many results there
# are
# wall_start - the programs run time at the start of the calculations
# cpu_start - the time spent computing at the beginning of the
# calcluations
# second_solution - the second working solution
# results - the number of working solutions found
# increment - the difference between any two working solutions
# wall_stop - the programs run time at the end of the calculations
# cpu_stop - the time spent computing at the end of the calculations
# wall_secs - the elapsed time it took the program to complete the calcs
# cpu_secs - the total time spent computing during the calculations
Instructions()
done = False
while not done:
coconut_test, coconut_test_limit, sailors, print_results = Get_Input()
wall_start = time.time()
cpu_start = time.clock()
results = Calculations(coconut_test, coconut_test_limit, sailors,
print_results)
wall_stop = time.time()
cpu_stop = time.clock()
wall_secs = wall_stop - wall_start
cpu_secs = cpu_stop - cpu_start
Print_Output(results, coconut_test, coconut_test_limit, cpu_secs,
wall_secs)
answer = input('Would you like to enter numbers again (y/n)? ')
done = (answer == 'n' or answer == 'N')
print()
# Main Function
Main()
|
# -*- coding: UTF-8 -*-
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
#from django.contrib.auth.forms import
from django.shortcuts import redirect,render,render_to_response
# from django.core.context_processors import csrf
from django import forms
import os,imghdr
import urllib.request as urllib2
from PIL import Image, ImageFont, ImageDraw
import codecs
BACK = """</br><script>
function back()
{
window.history.back()
}
</script>
<body>
<button onclick="back()">Go Back</button>
</body>"""
def user_exists(username):
if User.objects.filter(username=username).count():
return True
return False
def get_next_file(username):
bp = "/tmp/memes/"+username+"/"
if len(os.listdir(bp)) > 9:
return bp+min(os.listdir(bp), key=lambda x:os.path.getctime(bp+x))
else:
return bp+str(len(os.listdir(bp)))
def add_text(fn,fmt,text):
i = Image.open(fn)
d = ImageDraw.Draw(i)
d.text((0,0),text,(255,255,255),font=ImageFont.truetype("font.ttf", 30))
i.save(fn,format=fmt)
def logmein(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
print(username,password)
user = authenticate(username=username, password=password)
print(user)
if user is not None:
if user.is_active:
login(request, user)
# return redirect('/index')
return HttpResponseRedirect('/index/')
return HttpResponse("Error: login failed"+BACK)
return render(request,"login.html",{'auth':True})
def logmeout(request):
logout(request)
return redirect('/index')
def register(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
if user_exists(username):
return HttpResponse("Error: user exists"+BACK)
# 判断用户名 合法规则
if (".." in username) or ("/" in username):
return HttpResponse("Error: invalid username"+BACK)
try:
os.mkdir("/tmp/memes/"+username)
except:
return HttpResponse("Error: failed to create user"+BACK)
User.objects.create_user(username,password=password)
user = authenticate(username=username, password=password)
print(user)
login(request,user)
return redirect('/index')
return render(request,"register.html")
@login_required(login_url='/login')
def makememe(request):
username = str(request.user)
if request.method == 'POST':
url = request.POST['url']
text = request.POST['text']
try:
if "http://" in url:
image = urllib2.urlopen(url)
else:
image = urllib2.urlopen("http://"+url)
except:
return HttpResponse("Error: couldn't get to that URL"+BACK)
if int(image.headers["Content-Length"]) > 1024*1024:
return HttpResponse("File too large")
fn = get_next_file(username)
print(fn)
open(fn,"wb+").write(image.read())
add_text(fn,imghdr.what(fn),text)
return render(request,"make.html",{'files':os.listdir("/tmp/memes/"+username)})
@login_required(login_url='/login')
def viewmeme(request,meme=None):
print(meme)
username = str(request.user)
if meme is not None:
filename = "/tmp/memes/"+username+"/"+str(meme)
ctype = str(imghdr.what(filename))
return HttpResponse(open(filename,'rb').read(),content_type="image/"+ctype)
else:
return render(request,"view.html",{'files':sorted(os.listdir("/tmp/memes/"+username), key=lambda x:os.path.getctime(bp+x) )})
return HttpResponse("view"+username)
def index(request):
print([request.session[a] for a in request.session.keys()])
return render(request,"index.html",{'auth':request.user.is_authenticated()})
|
#!/usr/bin/env python
# coding=utf-8
#决策树算法
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
#读取文件数据
Wdata = pd.read_csv('F:\learningsources\graduation project\dataset\depth_train.csv',sep=' ',header=None,names=["weibo_id","user_id","time","emotional_level","fans_num","at_flag","topic_flag","url_flag","content_length",'time_step','follow_num','d1','d2','d3','d4','d5','d6','d7','d8','d9'])
#定义训练模型式时使用的特征
predictors = ["emotional_level", "at_flag", "topic_flag", "url_flag", "content_length", "d1",
"d2"]
#定义训练数据的自变量个目标变量
train_x = Wdata[predictors][:7000]
train_y = Wdata['d9'][:7000]
#定义测试数据的自变量和目标变量
groud_truth = Wdata[predictors][7000:]
true_value = Wdata['d9'][7000:]
#建立模型
clf = GradientBoostingRegressor()
#训练模型
clf = clf.fit(train_x, train_y)
# #模型预测
pre_value = clf.predict(groud_truth)
#计算平均绝对百分比误差
a = (abs(pre_value-true_value)/true_value).sum()
average_error = a/len(pre_value)
average_precision=1-average_error
# print('梯度提升回归算法的平均绝对百分比误差为:', average_error)
# print('梯度提升回归的平均绝对百分比精度为:', average_precision)
fig=plt.figure('梯度提升回归算法:50条微博', figsize=(7, 5))
ax1 = fig.add_subplot(111)
ax1.set_title('GDBTRegressor_average_precision=42.73%')
x1 = x2 = range(0, 50)
y1 = true_value[0:50]
y2 = pre_value[0:50]
plt.plot(x1,y1,c='r', label ='true_value')
plt.plot(x2,y2,"b--", label='pre_value')
plt.ylabel('Depth')
plt.xlabel('WeiBo_Number')
plt.legend()
plt.show()
|
from django.contrib import admin
# from django.contrib.gis.db import models
# from mapwidgets.widgets import GooglePointFieldWidget
#
# class Port(admin.ModelAdmin):
# formfield_overrides = {
# models.PointField: {"widget": GooglePointFieldWidget}
# }
|
#!/usr/bin/env python
"""
Use: ./realign.py 1000_logs.fa alignpt.fa
"""
import sys
import fasta
import itertools
nuc = open(sys.argv[1])
pt = open(sys.argv[2])
doc = open("alignew.fa", "w")
for (nident, nseq), (pident, pseq) in itertools.izip(fasta.FASTAReader(nuc), fasta.FASTAReader(pt)):
position = 0
for p in pseq:
if p == "-":
doc.write("---")
else:
doc.write(nseq[position:position + 3])
position = position + 3
doc.write("\n")
# print doc |
import curses
def main(stdscr):
curses.start_color()
curses.use_default_colors()
stdscr.addstr(f"{curses.COLORS}\n")
for i in range(0, curses.COLORS):
curses.init_pair(i + 1, i, -1)
try:
for i in range(0, 511):
stdscr.addstr(f"{i} ", curses.color_pair(i))
stdscr.addstr('\n')
for i in range(0, 511):
stdscr.addstr(f"{i} ", curses.color_pair(i) | curses.A_BOLD | curses.A_UNDERLINE)
stdscr.addstr("\nnormal", curses.color_pair(2))
stdscr.addstr("\nblink", curses.color_pair(2) | curses.A_BLINK)
stdscr.addstr("\nbold", curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr("\ndim", curses.color_pair(2) | curses.A_DIM)
stdscr.addstr("\nreverse", curses.color_pair(2) | curses.A_REVERSE)
stdscr.addstr("\nunderline", curses.color_pair(2) | curses.A_UNDERLINE)
except curses.ERR:
# End of screen reached
pass
stdscr.getch()
curses.wrapper(main)
|
import copy
import time
import pygame
import cv2
import cv2.aruco as aruco
import numpy as np
from Dame.constant import X, Y, PLAYER1
from Dame.ai_logic import minimax
from Dame.table import draw_piece, creating_piece
from Dame.logic import execute_move
def get_piece_on_board():
board = np.zeros([Y, X], dtype=int)
for i in range(Y):
for j in range(X):
if i % 2 == 0 != j % 2:
pass
elif i % 2 != 0 == j % 2:
pass
return board
def board_vid(frame, coord):
width = 600
height = 600
pts1 = np.float32([coord[2], coord[3], coord[1], coord[0]])
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
new_vid = cv2.warpPerspective(frame, matrix, (width, height))
matrix = cv2.getRotationMatrix2D((width//2, height/2), 90, 1)
new_vid = cv2.warpAffine(new_vid, matrix, (width, height))
return new_vid
def detect_pieces(circles, board):
# circles = [[[x, y, r],...]]
# x is the pixel value in the x-axis
# r is the radius of the circle
for circle in circles[0]:
m = int(circle[0]/75)
if m == 8:
m = 7
n = int(circle[1]/75)
if n == 8:
n = 7
board[n][m] = 2
return board
def calc_bound(boxes):
coord = []
# Change the aruco markers and put new ones with correct orientation
for box in boxes:
if box[0][0][0] > 300 and box[0][0][1] > 300:
x = box[0][2][0]
y = box[0][2][1]
elif box[0][0][0] < 300 < box[0][0][1]:
x = box[0][1][0]
y = box[0][1][1]
elif box[0][0][0] < 300 and box[0][0][1] < 300:
x = box[0][2][0]
y = box[0][2][1]
else:
x = box[0][2][0]
y = box[0][2][1]
coord.append([x, y])
return coord
def capture_piece(old_board, board):
old_pos = new_pos = []
for i in range(8):
for j in range(8):
if board[i][j] == 2 and old_board[i][j] == 0:
new_pos = [i, j]
if board[i][j] == 0 and old_board[i][j] == 2:
old_pos = [i, j]
if not new_pos:
return old_pos, new_pos, 0
if abs(new_pos[0] - old_pos[0]) == 1:
return old_pos, new_pos, 0
else:
if new_pos[0] > old_pos[0] and new_pos[1] > old_pos[1]:
capture_pos = [old_pos[0]+1, old_pos[1]+1]
elif new_pos[0] < old_pos[0] and new_pos[1] < old_pos[1]:
capture_pos = [old_pos[0] - 1, old_pos[1] - 1]
elif new_pos[0] < old_pos[0] and new_pos[1] > old_pos[1]:
capture_pos = [old_pos[0] - 1, old_pos[1] + 1]
else:
capture_pos = [old_pos[0] + 1, old_pos[1] - 1]
return old_pos, new_pos, capture_pos
def aruco_marker(win):
# think about separating this function into 2
counter = 0
# skip = 1
old_board = board = algo_board = np.zeros([8, 8], dtype=int)
# board = np.zeros([8, 8], dtype=int)
algo_board = creating_piece(algo_board)
cap = cv2.VideoCapture(1 )
coord = []
new_coord = [[], [], [], []]
status = True
numberOfPieces = 12
while True:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
arucoDict = aruco.Dictionary_get(aruco.DICT_4X4_50)
arucoParam = aruco.DetectorParameters_create()
boxes, ids, rejected = aruco.detectMarkers(imgGray, arucoDict, parameters=arucoParam)
if boxes and status:
print(boxes)
print(len(boxes))
aruco.drawDetectedMarkers(img, boxes)
cv2.imshow("Video", img)
if len(boxes) > 3:
coord = calc_bound(boxes)
print(coord)
status = False
for coo in coord:
if coo[0] > 300 and coo[1] > 300:
new_coord[0] = coo
elif coo[0] < 300 < coo[1]:
new_coord[1] = coo
elif coo[0] < 300 and coo[1] < 300:
new_coord[2] = coo
else:
new_coord[3] = coo
if coord:
new_img = board_vid(img, new_coord)
grey_vid = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)
blur_vid = cv2.GaussianBlur(grey_vid, (21, 21), 1)
circles = cv2.HoughCircles(blur_vid, cv2.HOUGH_GRADIENT, 1, 20,
param1=50, param2=40, minRadius=16, maxRadius=100)
if circles is not None:
# print(circles)
circles = np.uint16(np.around(circles))
counter = counter + 1
# print(counter)
new_board = copy.deepcopy(board)
board = detect_pieces(circles, new_board)
# Draw the circles
for i in circles[0, :]:
# draw the outer circle
cv2.circle(new_img, (i[0], i[1]), i[2], (0, 255, 0), 2)
# draw the center of the circle
cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)
if counter == 50:
if np.sum(board) == numberOfPieces*2 and not (old_board == board).all():
old_pos, new_pos, capture_pos = capture_piece(old_board, board)
print(capture_pos)
if new_pos:
algo_board, xx = execute_move(old_pos, new_pos, algo_board, capture_pos)
for i in range(8):
for j in range(8):
if algo_board[i][j] == 2:
algo_board[i][j] = 0
if board[i][j] == 2:
algo_board[i][j] = 2
draw_piece(win, algo_board)
pygame.display.update()
print(board)
a1, a2, a3, algo_board = minimax(algo_board, 8, PLAYER1)
draw_piece(win, algo_board)
pygame.display.update()
old_board = np.zeros([8, 8], dtype=int)
piece = 0
for i in range(8):
for j in range(8):
if algo_board[i][j] == 2:
old_board[i][j] = 2
piece += 1
if numberOfPieces != piece:
time.sleep(5)
numberOfPieces = piece
counter = 0
board = np.zeros([8, 8], dtype=int)
cv2.imshow("New Video", new_img)
# skip += 1
if cv2.waitKey(5) == 27:
break
|
# Generated by Django 2.1.7 on 2019-03-27 07:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AcAdmin', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='add_officer',
name='id',
field=models.CharField(max_length=15, primary_key=True, serialize=False),
),
]
|
from app import app
from flask import render_template
# contendra todas nuestra vistas
@app.route("/")
def index():
return render_template("public/index.html")
@app.route("/about") # todo
def about():
return render_template("public/about.html")
|
# Generated by Django 2.2.3 on 2019-07-22 21:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='About',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(upload_to='static/hosts/%Y/%m/%d')),
('title', models.CharField(blank=True, max_length=75)),
('text', models.TextField(max_length=10000)),
],
),
migrations.CreateModel(
name='Amber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile', models.ImageField(upload_to='static/amber/%Y/%m/%d')),
('bio', models.TextField(max_length=10000)),
],
),
migrations.CreateModel(
name='Contacts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254, unique=True)),
('phone', models.IntegerField()),
('information', models.TextField(max_length=5000)),
],
),
migrations.CreateModel(
name='HomePage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(upload_to='static/main/%Y/%m/%d')),
('blurb', models.TextField(max_length=255)),
],
),
migrations.CreateModel(
name='Karly',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile', models.ImageField(upload_to='static/karly/%Y/%m/%d')),
('bio', models.TextField(max_length=10000)),
],
),
]
|
import cv2
def CatchFace(window_name, catch_pic_num, path_name):
cv2.namedWindow(window_name)
cap = cv2.VideoCapture(0)
classfier = cv2.CascadeClassifier(
r"./openCv/opencv/data/haarcascades/haarcascade_frontalface_alt2.xml"
)
color = (115, 233, 86)
num = 1
while cap.isOpened():
try:
ok, frame = cap.read()
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faceRects = classfier.detectMultiScale(
grey, scaleFactor=1.2, minNeighbors=3, minSize=(80, 80)
)
if len(faceRects) > 0:
for (x, y, w, h) in faceRects:
img_name = '%s/%d.jpg' % (path_name, num) # 定义图片存储路径+图片名称
image = frame[y - 10: y + h + 10, x - 10: x + w + 10]
cv2.imwrite(img_name, image) # 将当前帧保存为图片
num += 1
if num > (catch_pic_num): # 成功捕捉超过1000次突出循环
break
cv2.rectangle(
frame, (x - 10, y - 10), (
x + w + 10, y + h + 10
), color, 2
) # 画矩形
cv2.putText(
frame, 'num:%d' % (num), (
x + 30, y + 30
), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 4
) # 显示当前捕捉人脸图片是第几个
# 超过指定最大保存数量结束程序
if num > (catch_pic_num):
break
cv2.imshow(window_name, frame)
c = cv2.waitKey(10)
if c & 0xFF == ord('q'):
break
except BaseException:
continue
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
CatchFace("CatchFace", 1000, './faceData/posFaceData')
|
from django.db import models
from django.contrib.auth.models import User
class Estudante(User):
matricula = models.CharField(max_length=15)
|
"""
Terminator plugin to open a file using a chosen editor.
Author: michele.silva@gmail.com
License: GPLv2
"""
import inspect, os, shlex, subprocess
from terminatorlib import plugin
from terminatorlib import config
AVAILABLE = ['EditorPlugin']
DEFAULT_COMMAND = '{filepath}'
DEFAULT_EDITOR = 'kate'
DEFAULT_REGEX = '[^ \\t\\n\\r\\f\\v:]+?\.(ini|conf|me|txt|xml|json)[ \\n:]([0-9]+)*'
REPLACE = {'\\t':'\t', '\\n':'\n', '\\r':'\r', '\\f':'\f', '\\v':'\v'}
class EditorPlugin(plugin.URLHandler):
""" Process URLs returned by commands. """
capabilities = ['url_handler']
handler_name = 'editorurl'
nameopen = 'Open File'
namecopy = 'Copy Open Command'
match = None
def __init__(self):
self.plugin_name = self.__class__.__name__
self.current_path = None
self.config = config.Config()
self.check_config()
self.match = self.config.plugin_get(self.plugin_name, 'match')
for key,val in REPLACE.iteritems():
self.match = self.match.replace(key, val)
def check_config(self):
updated = False
config = self.config.plugin_get_config(self.plugin_name)
if not config:
config = {}
updated = True
if 'command' not in config:
config['command'] = DEFAULT_COMMAND
updated = True
if 'editor' not in config:
config['editor'] = DEFAULT_EDITOR
updated = True
if 'match' not in config:
config['match'] = DEFAULT_REGEX
updated = True
if updated:
self.config.plugin_set_config(self.plugin_name, config)
self.config.save()
def get_cwd(self):
""" Return current working directory. """
# HACK: Because the current working directory is not available to plugins,
# we need to use the inspect module to climb up the stack to the Terminal
# object and call get_cwd() from there.
for frameinfo in inspect.stack():
frameobj = frameinfo[0].f_locals.get('self')
if frameobj and frameobj.__class__.__name__ == 'Terminal':
return frameobj.get_cwd()
return None
def open_url(self):
""" Return True if we should open the file. """
# HACK: Because the plugin doesn't tell us we should open or copy
# the command, we need to climb the stack to see how we got here.
return inspect.stack()[3][3] == 'open_url'
def callback(self, strmatch):
strmatch = strmatch.strip(':').strip()
filepath = os.path.join(self.get_cwd(), strmatch.split(':')[0])
#BUG si ls -l /etc/ suis encore dans home :( filepath pas bon !!
lineno = strmatch.split(':')[1] if ':' in strmatch else '1'
# Generate the openurl string
command = self.config.plugin_get(self.plugin_name, 'editor') +' '+ self.config.plugin_get(self.plugin_name, 'command')
command = command.replace('{filepath}', filepath)
command = command.replace('{line}', lineno)
if filepath.find("/home/")<0:
command = 'kdesu ' + command
# Check we are opening the file
if self.open_url():
if os.path.exists(filepath):
subprocess.call(shlex.split(command))
return '--version'
return command
|
#!/usr/bin/python3
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root is None:
return []
res = []
curLevel = [root]
while curLevel:
nextLevel = []
tmpRes = []
for node in curLevel:
tmpRes.append(node.val)
if node.left:
nextLevel.append(node.left)
if node.right:
nextLevel.append(node.right)
res.append(tmpRes[-1])
curLevel = nextLevel
return res |
#!/usr/bin/env python3
import os
import re
import sys
import pickle
import shutil
import hashlib
import tempfile
import subprocess
# list of: descr(str) fname(str) tags(list(str)) md5(str) size(int)
database = []
def dbRead():
global database, fileSizes, hashes
database = pickle.load(open('archive.p', 'rb'))
def dbWrite():
global database
pickle.dump(database, open('archive.p', 'wb'))
def dbDump(db=database):
global database
print '{:^4} {:^32} {:^16} {:^8} {}'.format('id', 'descr', 'fname', 'size', 'tags')
print '{:-^4} {:-^32} {:-^16} {:-^8} ----'.format('','','','')
for (i,entry) in enumerate(db):
(descr,fname,size,tags) = \
(entry['descr'], entry['fname'], entry['size'], entry['tags'])
if len(fname) > 16:
fname = fname[:11] + '..' + fname[-3:]
tags = ','.join(tags)
print '{:04d} {:<32.32} {:>16.16} {:>8} {}'.format(i,descr,fname,size,tags)
# test if file is in database
# exists -> return entry (database row)
# doesnt -> return False
def dbTestFileExist(path):
global database
# quickest test: filesize
size = os.path.getsize(path)
matches = filter(lambda entry: size == entry['size'], database)
if not matches:
return False
# slower test: hash
md5 = md5File(path)
matches = filter(lambda entry: md5 == entry['md5'], database)
if not matches:
return False
assert(len(matches)==1)
return matches[0]
def md5File(path):
ctx = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
ctx.update(chunk)
return ctx.hexdigest()
def askUserFileInfo(entry):
if not entry:
entry = {'descr':'', 'fname':'', 'tags':[], 'md5':'', 'size':0}
body = 'descr: %s\n' % entry['descr']
body += 'fname: %s\n' % entry['fname']
body += 'tags: %s\n' % (''.join(entry['tags']))
body += 'md5: %s\n' % entry['md5']
body += 'size: %d\n' % entry['size']
(tmp_handle, tmp_name) = tempfile.mkstemp()
tmp_obj = os.fdopen(tmp_handle, 'w')
tmp_obj.write(body)
tmp_obj.close()
# edit
subprocess.call(["vim", '-f', tmp_name])
# now open, encode, encrypt
fp = open(tmp_name)
lines = fp.readlines()
fp.close()
m = re.match(r'^descr: (.*)$', lines[0])
descr = m.group(1)
m = re.match(r'^fname: (.*)$', lines[1])
fname = m.group(1)
m = re.match(r'^tags: (.*)$', lines[2])
tags = m.group(1).split(',')
m = re.match(r'^md5: (.*)$', lines[3])
md5 = m.group(1)
m = re.match(r'^size: (.*)$', lines[4])
size = int(m.group(1))
return {'descr':descr, 'fname':fname, 'tags':tags, 'md5':md5, 'size':size}
if __name__ == '__main__':
dbRead()
if not sys.argv[1:]:
dbDump(database)
elif sys.argv[1]=='addfast':
for path in sys.argv[2:]:
print "adding: %s" % path
entry = dbTestFileExist(path)
if entry:
print "exists already (%s)" % entry['fname']
else:
entry = {'descr':'', 'fname':os.path.basename(path), 'tags':[], 'md5':md5File(path), 'size':os.path.getsize(path)}
database.append(entry)
shutil.copyfile(path, os.path.normpath(os.path.join(os.getcwd(), os.path.basename(path))))
dbWrite()
elif sys.argv[1]=='edit':
idx = int(sys.argv[2])
entry = askUserFileInfo(database[idx])
database[idx] = entry
print "one record changed"
dbDump([entry])
dbWrite()
|
# Generated by Django 2.2.1 on 2020-04-16 08:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='actioncomment',
name='action',
),
migrations.RemoveField(
model_name='actioncomment',
name='user',
),
migrations.RemoveField(
model_name='boardcomment',
name='message_board',
),
migrations.RemoveField(
model_name='boardcomment',
name='user',
),
migrations.RemoveField(
model_name='messageboard',
name='user',
),
migrations.AlterField(
model_name='movie',
name='director',
field=models.CharField(max_length=128, verbose_name='导演名称'),
),
migrations.AlterField(
model_name='movie',
name='name',
field=models.CharField(max_length=32, unique=True, verbose_name='电影名称'),
),
migrations.AlterField(
model_name='tags',
name='name',
field=models.CharField(max_length=32, unique=True, verbose_name='标签'),
),
migrations.DeleteModel(
name='Action',
),
migrations.DeleteModel(
name='ActionComment',
),
migrations.DeleteModel(
name='BoardComment',
),
migrations.DeleteModel(
name='MessageBoard',
),
]
|
"""Write an application which takes an integer number as an input (num).
Return a list of 'num' primary elements from fibonacci sequence starting from beginning
e.g: Given: fibonacci sequence "0, 1, 1, 2, 3, 5, 8, 13, 21, ..."
When: user type '4' as application input
Then: application returns [2, 3, 5, 13]"""
import time
import unittest
def fibonacci(prime_elements_num: int):
if type(prime_elements_num) != int:
raise NonIntInputException
if prime_elements_num == 0:
raise WrongInputParameterException("Input parameter should be greater than 0")
if prime_elements_num > 9:
raise WrongInputParameterException("Cause of performance issue, input parameter should be less than 10. "
"We are working on finding the reasons and will try to keep you updated "
"on the news")
prime_elements_list = []
fibonacci_pair = [1, 2]
while len(prime_elements_list) < prime_elements_num:
first = fibonacci_pair[0]
second = fibonacci_pair[1]
if all(second % i for i in range(2, second)):
prime_elements_list.append(second)
fibonacci_pair = [second, first + second]
return prime_elements_list
class TestFibonacci(unittest.TestCase):
def test_fibonacci_happy_path(self):
self.assertTrue(fibonacci(5), [2, 3, 5, 13, 89])
def test_fibonacci_non_int_input_validation(self):
self.assertRaises(NonIntInputException, lambda: fibonacci("5"))
def test_fibonacci_zero_input_validation(self):
with self.assertRaises(WrongInputParameterException) as error:
fibonacci(0)
self.assertEqual(str(error.exception), "Input parameter should be greater than 0")
def test_fibonacci_maximum_input_validation(self):
with self.assertRaises(WrongInputParameterException) as error:
fibonacci(10)
self.assertEqual(str(error.exception), "Cause of performance issue, input parameter should be less than 10. "
"We are working on finding the reasons and will try to keep you updated "
"on the news")
def test_fibonacci_performance(self):
millis_before_test = int(round(time.time() * 1000))
fibonacci(9)
millis_after_test = int(round(time.time() * 1000))
method_speed = millis_after_test - millis_before_test
print(method_speed)
self.assertLess(method_speed, 50)
class NullInputException(Exception):
pass
class NonIntInputException(Exception):
pass
class WrongInputParameterException(Exception):
pass
if __name__ == '__main__':
unittest.main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
query for chatterbot
input format:
question:can be any string
output files:
anwser: result from query
Usage:
query.py -q <question> -w <way(edit_distance,tfidf)>
"""
import re
import os, sys
import pandas as pd
import traceback
import requests
import logging
import numpy as np
import json
#import synonyms
from optparse import OptionParser
from scipy import spatial
from hparams import create_hparams
from elasticsearch import Elasticsearch
from build_es_body import build_body
from ..preprocess.sent_vec import sent_vec
reload(sys)
sys.setdefaultencoding('utf-8')
es = Elasticsearch(["http://192.168.241.35:9200",
"http://192.168.241.46:9200",
"http://192.168.241.50:9201",
"http://192.168.241.47:9201"],
sniffer_timeout = 200, timeou = 100)
class Query(object):
def __init__(self):
pass
def es_query(self, ):
lables = ['must','should']#
for label in lables:
print label
body = build_body(self.Hpamas,label)
result = self.deal_data(body)
if result:
return result
def run(self, dic):
self.Hpamas = create_hparams(json.dumps(dic))
# self.__prepare(HPramas)
response = self.es_query()
return response
def sort_sent(self,lresponse):
return sorted(lresponse,key=lambda x:x['sim_score'],reverse=True)[0:10]
def deal_data(self,body):
question_vec = np.array(sent_vec(self.Hpamas.question))
lresponse = []
es_re = es.search(index="q2a", body=body, size=self.Hpamas.filter_size)
if es_re['hits']['max_score']:
res = es_re['hits']['hits']
for line in res:
line['_source']['body'] = body
line['_source']['score'] = line['_score']
line['_source']['sim_score'] = 1 - spatial.distance.cosine(question_vec, np.array(line['_source']['sent_vec']))
lresponse.append(line['_source'])
if len(lresponse)>0:
return self.sort_sent(lresponse)
else:
return '对不起,你所问的我不知道,正在为你转人工'
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
import logging.config
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info('task is start')
# opts = load_option()
# if opts.question is None or opts.way is None:
# print(__doc__)
# sys.exit(0)
dic = {"question":"product油耗怎么样","product":["途观",],"attribute":["油耗"]}
# HPramas = create_hparams(json.dumps(dic))
ins = Query()
ins.run(dic)
response = ins.es_query()
print response[0]['question']
|
#segundo taller de diccionarios
"""
Escribir un programa que almacene el diccionario con los créditos de las asignaturas de un
curso matemáticas, física, química y solicitar al usuario los créditos de estas, después
muestre por pantalla los créditos de cada asignatura en el formato <asignatura> tiene
<créditos> créditos, donde <asignatura> es cada una de las asignaturas del curso,
y <créditos> son sus créditos. Al final debe mostrar también el número total de créditos
del curso.
""" |
from isbn_srch import isbn_srch as srch
from json import dumps
while True:
search = input("isbn: ")
if search == "quit":
print("quitting isbn search....")
break
data = srch(isbn = search, create_json = True, json_name = "isbn+title")
print (data) |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Custom exception raised by pyswitcheo"""
# Human readable http error codes
import json
from http import HTTPStatus
_ERR = {
HTTPStatus.BAD_REQUEST: "Your request is badly formed.",
HTTPStatus.UNAUTHORIZED: "You did not provide a valid signature.",
HTTPStatus.NOT_FOUND: "The specified endpoint or resource could not be found.",
HTTPStatus.NOT_ACCEPTABLE: "You requested a format that isn't json.",
HTTPStatus.TOO_MANY_REQUESTS: "Slow down requests and use Exponential backoff timing.",
HTTPStatus.UNPROCESSABLE_ENTITY: "Your request had validation errors.",
HTTPStatus.INTERNAL_SERVER_ERROR: "We had a problem with our server. Try again later.",
HTTPStatus.SERVICE_UNAVAILABLE: "We're temporarily offline for maintenance. Please try again later.",
}
class HTTPResponseError(Exception):
"""Wrapper around Exception to raise custom messages."""
def __init__(self, response):
self.value = {
"code": response.status_code,
"server_msg": response.text,
"err_msg": _ERR.get(response.status_code, "Unexpected error"),
}
def __str__(self):
return json.dumps(self.value, indent=4, sort_keys=True)
|
from SimMuon.MCTruth.muonAssociatorByHitsNoSimHitsHelper_cfi import *
muonSimClassifier = cms.EDProducer("MuonSimClassifier",
muons = cms.InputTag("muons"),
trackType = cms.string("glb_or_trk"), # 'inner','outer','global','segments','glb_or_trk'
trackingParticles = cms.InputTag("mix","MergedTrackTruth"), # default TrackingParticle collection (should exist in the Event)
associatorLabel = cms.InputTag("muonAssociatorByHitsNoSimHitsHelper"),
decayRho = cms.double(200), # to classify differently decay muons included in ppMuX
decayAbsZ = cms.double(400), # and decay muons that could not be in ppMuX
linkToGenParticles = cms.bool(True), # produce also a collection of GenParticles for secondary muons
genParticles = cms.InputTag("genParticles"), # and associations to primary and secondaries
)
muonSimClassificationByHitsTask = cms.Task(
muonAssociatorByHitsNoSimHitsHelper,muonSimClassifier
)
|
kąt = 0
tm = TM1637.create(DigitalPin.P8, DigitalPin.P12, 2, 4)
servos.P0.set_angle(0)
def on_forever():
global kąt
if 0 == pins.digital_read_pin(DigitalPin.P14):
basic.show_arrow(ArrowNames.NORTH)
pins.digital_write_pin(DigitalPin.P15, 0)
basic.pause(100)
kąt += 10
servos.P0.set_angle(kąt)
pins.digital_write_pin(DigitalPin.P15, 1)
else:
if 0 < kąt:
basic.show_arrow(ArrowNames.SOUTH)
pins.digital_write_pin(DigitalPin.P15, 1)
kąt += -10
servos.P0.set_angle(kąt)
tm.show_number(kąt)
basic.forever(on_forever)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('league', '0002_auto_20150910_1014'),
]
operations = [
migrations.AddField(
model_name='league',
name='des',
field=models.TextField(verbose_name=b'\xe8\xaf\xb4\xe6\x98\x8e', blank=True),
),
]
|
from flask import Flask, escape, request, jsonify
import json
app = Flask(__name__) # create an app instance
@app.route('/')
def hello():
name = request.args.get('name', 'World')
return 'Hello, {escape(name)}!'
@app.route('/pokedex', methods=['GET'])
def view_pokedex():
return jsonify(pokedex.list())
@app.route('/<name>', methods=['GET'])
def view_pokemon(name):
return jsonify(pokedex[name].list())
if __name__ == 'main':
pokedex = request.get('pokemon.json')
app.run(debug=True)
|
#to find the minimum no from list
def findmin(lst):
print("The minimum no ",min(lst))
lst=[]
no=int(input("Enter the limits"))
for i in range(no):
ele=int(input())
lst.append(ele)
print(lst)
findmin(lst) |
class ListNode(object):
def __init__(self,x):
self.val = x
self.next = None
a = ListNode(None) |
"""
#!-*- coding=utf-8 -*-
@author: BADBADBADBADBOY
@contact: 2441124901@qq.com
@software: PyCharm Community Edition
@file: prune.py
@time: 2020/6/27 10:23
"""
import sys
sys.path.append('/home/aistudio/external-libraries')
from models.DBNet import DBNet
import torch
import torch.nn as nn
import numpy as np
import collections
import torchvision.transforms as transforms
import cv2
import os
import argparse
import math
from PIL import Image
from torch.autograd import Variable
def resize_image(img,short_side=736):
height, width, _ = img.shape
if height < width:
new_height = short_side
new_width = int(math.ceil(new_height / height * width / 32) * 32)
else:
new_width = short_side
new_height = int(math.ceil(new_width / width * height / 32) * 32)
resized_img = cv2.resize(img, (new_width, new_height))
return resized_img
def prune(args):
img = cv2.imread(args.img_file)
img = resize_image(img)
img = Image.fromarray(img)
img = img.convert('RGB')
img = transforms.ToTensor()(img)
img = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)
img = Variable(img.cuda()).unsqueeze(0)
model = DBNet(args.backbone, adaptive=False).cuda()
model_dict = torch.load(args.checkpoint)['state_dict']
state = model.state_dict()
for key in state.keys():
if key in model_dict.keys():
state[key] = model_dict[key]
model.load_state_dict(state)
model.eval()
with torch.no_grad():
out = model(img)
cv2.imwrite('re.jpg',out[0,0].cpu().numpy()*255)
bn_weights = []
for m in model.modules():
if (isinstance(m, nn.BatchNorm2d)):
bn_weights.append(m.weight.data.abs().clone())
bn_weights = torch.cat(bn_weights, 0)
sort_result, sort_index = torch.sort(bn_weights)
thresh_index = int(args.cut_percent * bn_weights.shape[0])
if (thresh_index == bn_weights.shape[0]):
thresh_index = bn_weights.shape[0] - 1
prued = 0
prued_mask = []
bn_index = []
conv_index = []
remain_channel_nums = []
for k, m in enumerate(model.modules()):
if (isinstance(m, nn.BatchNorm2d)):
bn_weight = m.weight.data.clone()
mask = bn_weight.abs().gt(sort_result[thresh_index])
remain_channel = mask.sum()
if (remain_channel == 0):
remain_channel = 1
mask[int(torch.argmax(bn_weight))] = 1
v = 0
n = 1
if (remain_channel % args.base_num != 0):
if (remain_channel > args.base_num):
while (v < remain_channel):
n += 1
v = args.base_num * n
if (remain_channel - (v - args.base_num) < v - remain_channel):
remain_channel = v - args.base_num
else:
remain_channel = v
if (remain_channel > bn_weight.size()[0]):
remain_channel = bn_weight.size()[0]
remain_channel = torch.tensor(remain_channel)
result, index = torch.sort(bn_weight)
mask = bn_weight.abs().ge(result[-remain_channel])
remain_channel_nums.append(int(mask.sum()))
prued_mask.append(mask)
bn_index.append(k)
prued += mask.shape[0] - mask.sum()
elif (isinstance(m, nn.Conv2d)):
conv_index.append(k)
print(remain_channel_nums)
print('total_prune_ratio:', float(prued) / bn_weights.shape[0])
print(bn_index)
new_model = DBNet(args.backbone, adaptive=False).cuda()
merge1_index = [13, 17, 24, 32]
merge2_index = [41, 45, 52, 60, 68]
merge3_index = [77, 81, 88, 96, 104, 112, 120]
merge4_index = [129, 133, 140, 148]
index_0 = []
for item in merge1_index:
index_0.append(bn_index.index(item))
mask1 = prued_mask[index_0[0]] | prued_mask[index_0[1]] | prued_mask[index_0[2]] | prued_mask[index_0[3]]
index_1 = []
for item in merge2_index:
index_1.append(bn_index.index(item))
mask2 = prued_mask[index_1[0]] | prued_mask[index_1[1]] | prued_mask[index_1[2]] | prued_mask[index_1[3]] | prued_mask[
index_1[4]]
index_2 = []
for item in merge3_index:
index_2.append(bn_index.index(item))
mask3 = prued_mask[index_2[0]] | prued_mask[index_2[1]] | prued_mask[index_2[2]] | prued_mask[index_2[3]] | prued_mask[
index_2[4]] | prued_mask[index_2[5]] | prued_mask[index_2[6]]
index_3 = []
for item in merge4_index:
index_3.append(bn_index.index(item))
mask4 = prued_mask[index_3[0]] | prued_mask[index_3[1]] | prued_mask[index_3[2]] | prued_mask[index_3[3]]
for index in index_0:
prued_mask[index] = mask1
for index in index_1:
prued_mask[index] = mask2
for index in index_2:
prued_mask[index] = mask3
for index in index_3:
prued_mask[index] = mask4
print(new_model)
##############################################################
index_bn = 0
index_conv = 0
bn_mask = []
conv_in_mask = []
conv_out_mask = []
for m in new_model.modules():
if (isinstance(m, nn.BatchNorm2d)):
m.num_features = prued_mask[index_bn].sum()
bn_mask.append(prued_mask[index_bn])
index_bn += 1
elif (isinstance(m, nn.Conv2d)):
if(index_conv == 0):
m.in_channels = 3
conv_in_mask.append(torch.ones(3))
else:
m.in_channels = prued_mask[index_conv - 1].sum()
conv_in_mask.append(prued_mask[index_conv - 1])
m.out_channels = prued_mask[index_conv].sum()
conv_out_mask.append(prued_mask[index_conv])
index_conv += 1
if (index_bn > len(bn_index) - 3):
break
conv_change_index = [16,44,80,132] #
change_conv_bn_index = [3,32,68,120] #
tag = 0
for m in new_model.modules():
if (isinstance(m, nn.Conv2d)):
if(tag in conv_change_index):
index = conv_change_index.index(tag)
index = change_conv_bn_index[index]
index =bn_index.index(index)
mask = prued_mask[index]
conv_in_mask[index+4] = mask
m.in_channels = mask.sum()
tag+=1
bn_i = 0
conv_i = 0
scale_i = 0
scale_mask = [mask4,mask3,mask2,mask1]
for [m0, m1] in zip(model.modules(), new_model.modules()):
if (bn_i > len(bn_mask)-1):
if isinstance(m0, nn.Conv2d):
# import pdb
# pdb.set_trace()
if(scale_i<4):
m1.in_channels = scale_mask[scale_i].sum()
idx0 = np.squeeze(np.argwhere(np.asarray(scale_mask[scale_i].cpu().numpy())))
idx1 = np.squeeze(np.argwhere(np.asarray(torch.ones(256).cpu().numpy())))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
w = m0.weight.data[:, idx0, :, :].clone()
m1.weight.data = w[idx1, :, :, :].clone()
if m1.bias is not None:
m1.bias.data = m0.bias.data[idx1].clone()
else:
m1.weight.data = m0.weight.data.clone()
if m1.bias is not None:
m1.bias.data = m0.bias.data.clone()
scale_i+=1
else:
if isinstance(m0, nn.BatchNorm2d):
idx1 = np.squeeze(np.argwhere(np.asarray(bn_mask[bn_i].cpu().numpy())))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
m1.weight.data = m0.weight.data[idx1].clone()
if m1.bias is not None:
m1.bias.data = m0.bias.data[idx1].clone()
m1.running_mean = m0.running_mean[idx1].clone()
m1.running_var = m0.running_var[idx1].clone()
bn_i += 1
elif isinstance(m0, nn.Conv2d):
if (isinstance(conv_in_mask[conv_i], list)):
idx0 = np.squeeze(np.argwhere(np.asarray(torch.cat(conv_in_mask[conv_i], 0).cpu().numpy())))
else:
idx0 = np.squeeze(np.argwhere(np.asarray(conv_in_mask[conv_i].cpu().numpy())))
idx1 = np.squeeze(np.argwhere(np.asarray(conv_out_mask[conv_i].cpu().numpy())))
if idx0.size == 1:
idx0 = np.resize(idx0, (1,))
if idx1.size == 1:
idx1 = np.resize(idx1, (1,))
w = m0.weight.data[:, idx0, :, :].clone()
m1.weight.data = w[idx1, :, :, :].clone()
if m1.bias is not None:
m1.bias.data = m0.bias.data[idx1].clone()
conv_i += 1
print(new_model)
new_model.eval()
with torch.no_grad():
out = new_model(img)
print(out.shape)
cv2.imwrite('re1.jpg',out[0,0].cpu().numpy()*255)
save_obj = {'prued_mask': prued_mask, 'bn_index': bn_index, 'state_dict': new_model.state_dict()}
torch.save(save_obj, os.path.join(args.save_prune_model_path, 'pruned_dict.pth.tar'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--backbone', nargs='?', type=str, default='resnet50')
parser.add_argument('--num_workers', nargs='?', type=int, default=0,
help='num workers to train')
parser.add_argument('--base_num', nargs='?', type=int, default=8,
help='Base after Model Channel Clipping')
parser.add_argument('--cut_percent', nargs='?', type=float, default=0.9,
help='Model channel clipping scale')
parser.add_argument('--checkpoint', default='./checkpoints/DB_resnet50_bs_16_ep_1200/DB.pth.tar',
type=str, metavar='PATH',
help='ori model path')
parser.add_argument('--save_prune_model_path', default='./pruned/checkpoints/', type=str, metavar='PATH',
help='pruned model path')
parser.add_argument('--img_file',
default='/home/aistudio/work/data/icdar/test_img/img_10.jpg',
type=str,
help='')
args = parser.parse_args()
prune(args)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
'''
Takes the output of ipmitool and creates a Zabbix template from it.
'''
__author__ = "Tom Walsh"
__version__ = "0.1.0"
__license__ = "MIT"
import argparse
from pyghmi import exceptions, ipmi
from logzero import logger
from datetime import datetime
from collections import OrderedDict
from yattag import Doc, indent
import sys
import os
doc, tag, text = Doc().tagtext()
sensors = ["Fan", "Temperature", "Voltage"]
templates = {}
ipmidata = {}
itemdefaults = OrderedDict()
itemdefaults['type'] = '12'
itemdefaults['snmp_community'] = None
itemdefaults['snmp_oid'] = None
itemdefaults['delay'] = '240'
itemdefaults['history'] = '90d'
itemdefaults['trends'] = '365d'
itemdefaults['status'] = '0'
itemdefaults['value_type'] = '0'
itemdefaults['allowed_hosts'] = None
itemdefaults['snmpv3_contextname'] = None
itemdefaults['snmpv3_securityname'] = None
itemdefaults['snmpv3_securitylevel'] = '0'
itemdefaults['snmpv3_authprotocol'] = '0'
itemdefaults['snmpv3_authpassphrase'] = None
itemdefaults['snmpv3_privprotocol'] = '0'
itemdefaults['snmpv3_privpassphrase'] = None
#itemdefaults['snmpv3_passphrase'] = None
#itemdefaults['formula'] = '1'
#itemdefaults['delay_flex'] = None
itemdefaults['params'] = None
itemdefaults['authtype'] = '0'
itemdefaults['username'] = None
itemdefaults['password'] = None
itemdefaults['publickey'] = None
itemdefaults['privatekey'] = None
itemdefaults['port'] = None
itemdefaults['description'] = None
itemdefaults['inventory_link'] = '0'
itemdefaults['valuemap'] = None
itemdefaults['logtimefmt'] = None
itemdefaults['preprocessing'] = None
itemdefaults['jmx_endpoint'] = None
itemdefaults['timeout'] = '3s'
itemdefaults['url'] = None
itemdefaults['query_fields'] = None
itemdefaults['posts'] = None
itemdefaults['status_codes'] = '200'
itemdefaults['follow_redirects'] = '1'
itemdefaults['post_type'] = '0'
itemdefaults['http_proxy'] = None
itemdefaults['headers'] = None
itemdefaults['retrieve_mode'] = '0'
itemdefaults['request_method'] = '0'
itemdefaults['output_format'] = '0'
itemdefaults['allow_traps'] = '0'
itemdefaults['ssl_cert_file'] = None
itemdefaults['ssl_key_file'] = None
itemdefaults['ssl_key_password'] = None
itemdefaults['verify_peer'] = '0'
itemdefaults['verify_host'] = '0'
itemdefaults['master_item'] = None
def main(args):
import pyghmi.ipmi.command
import pyghmi.ipmi.sdr
import pyghmi.exceptions
try:
logger.info(args)
connect = pyghmi.ipmi.command.Command(bmc=args.host, userid=args.user, password=args.password, onlogon=None, kg=None)
except pyghmi.exceptions.IpmiException as error_name:
logger.error("Can't connect to IPMI: " + str(error_name))
except:
logger.error("Unexpected exception")
exit(1)
sdr = pyghmi.ipmi.sdr.SDR(connect)
for number in sdr.get_sensor_numbers():
rsp = connect.raw_command(command=0x2d, netfn=4, data=(number,))
if 'error' in rsp:
continue
reading = sdr.sensors[number].decode_sensor_reading(rsp['data'])
if reading is not None:
ipmidata[reading.name.lower()] = reading
if reading.type.lower() not in templates.keys() and reading.type in sensors:
templates[reading.type.lower()] = reading.type
with tag('zabbix_export'):
with tag('version'):
text("4.0")
with tag('date'):
text(datetime.now().replace(microsecond=0).isoformat()+'Z')
with tag('groups'):
with tag('group'):
with tag('name'):
text('Templates')
with tag('templates'):
with tag('template'):
with tag('template'):
text(args.name)
with tag('name'):
text(args.name)
with tag('description'):
pass
with tag('groups'):
with tag('group'):
with tag('name'):
text('Templates')
with tag('applications'):
for key, value in templates.iteritems():
with tag('application'):
with tag('name'):
text(value)
with tag('items'):
for key in sorted(ipmidata.iterkeys()):
if ipmidata[key].type in sensors:
with tag('item'):
with tag('name'):
text(ipmidata[key].name)
for itemkey, itemdefault in itemdefaults.items():
if itemdefault is None:
doc.stag(itemkey)
else:
with tag(itemkey):
text(itemdefault)
if itemkey is 'snmp_oid':
with tag('key'):
keydata = ipmidata[key].type + '.' + key.replace(' ', '_')
keydata = keydata.replace('+','plus')
if args.namespace is not None:
keydata = args.namespace + '.' + keydata
text('ipmi.sensor.' + keydata.lower())
if itemkey is 'allowed_hosts':
with tag('units'):
text(ipmidata[key].units.replace('\xc2\xb0', ''))
if itemkey is 'params':
with tag('ipmi_sensor'):
text(ipmidata[key].name)
if itemkey is 'inventory_link':
with tag('applications'):
with tag('application'):
with tag('name'):
text(ipmidata[key].type)
with tag('discovery_rules'):
pass
with tag('httptests'):
pass
with tag('macros'):
pass
with tag('templates'):
pass
with tag('screens'):
pass
result = indent(
doc.getvalue(),
indentation = ' '*4,
newline = '\n'
)
result = '<?xml version="1.0" encoding="UTF-8"?>' + os.linesep + result
if args.write is not None:
tf = open(args.write, "w")
tf.write(result)
tf.close()
else:
print(result)
if __name__ == "__main__":
""" This is executed when run from the command line """
parser = argparse.ArgumentParser(description="Parses ipmitool output and generates Zabbix XML templates")
parser.add_argument("-H", "--host", action="store", dest="host", help="IPMI host to query", required=True)
parser.add_argument("-u", "--user", action="store", dest="user", help="IPMI user to login", required=True)
parser.add_argument("-p", "--password", action="store", dest="password", help="IPMI password to login", required=True)
parser.add_argument("-t", "--type", action="store", dest="type", default="lanplus", help="IPMI interface type")
parser.add_argument("--name", action="store", dest="name", default="IPMI Template", help="Name of the IPMI template")
parser.add_argument("--write", action="store", dest="write", default=None, help="Write XML output to this file")
parser.add_argument("--namespace", action="store", dest="namespace", default=None, help="The namespace for the item in Zabbix")
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="Verbosity (-v, -vv, etc)")
# Specify output of "--version"
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s (version {version})".format(version=__version__))
args = parser.parse_args()
main(args)
|
# Generated by Django 2.2.2 on 2019-06-27 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Alimentos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('proteinas', models.FloatField(verbose_name='Proteinas (gr)')),
('grasas', models.FloatField(verbose_name='Grasas (gr)')),
('carbohidratos', models.FloatField(verbose_name='Carbohidratos (gr)')),
('kilocalorias', models.FloatField(verbose_name='Kilocalorias (kcal)')),
('porcion_comestible', models.FloatField(verbose_name='Porción comestible (gr)')),
],
),
]
|
import re
import sys
import time
import paramiko
import logging
from subprocess import Popen
from rackops.oob.base import OobBase
class Dell(OobBase):
def console(self):
ipmi_host = self.oob_info["ipmi"]
try:
Popen(['moob', '-u', '{}'.format(self.username),
'-p', '{}'.format(self.password), '-m', ipmi_host.replace("https://", "")])
except OSError:
print('Please run "gem install moob"')
sys.exit(10)
def _ssh(self, command):
# performs command using ssh
# returns decoded output
nbytes = 4096
port = 22
hostname = self.oob_info["ipmi"].replace("https://", "")
username = self.username
password = self.password
client = paramiko.Transport((hostname, port))
client.connect(username=username, password=password)
stdout_data = []
stderr_data = []
session = client.open_channel(kind='session')
session.exec_command(command)
while True:
if session.recv_ready():
stdout_data.append(session.recv(nbytes))
if session.recv_stderr_ready():
stderr_data.append(session.recv_stderr(nbytes))
if session.exit_status_ready():
break
output = b''.join(stdout_data)
session.close()
client.close()
return output.decode("utf-8")
def _find_jid(self, output):
try:
return re.search(r"JID_.*", output).group(0)
except AttributeError:
print("No Job ID found.\nCommand output: ", output)
sys.exit(10)
def _confirm_job(self, jid):
try:
re.search(r"Job completed successfully", jid).group(0)
except AttributeError:
print("Job did not complete successfully.\nCommand output: ", jid)
sys.exit(10)
def diagnostics(self):
jobqueue_view = 'racadm jobqueue view -i {}'
output = self._ssh('racadm techsupreport collect')
jid = self._find_jid(output)
logging.info("Sleeping for 3 minutes to collect the TSR report")
time.sleep(180) # wait 3 minutes to collect the TSR report
view_output = self._ssh(jobqueue_view.format(jid))
self._confirm_job(view_output)
output = self._ssh('racadm techsupreport export -l {}'.format(self.nfs_share))
jid = self._find_jid(output)
view_output = self._ssh(jobqueue_view.format(jid))
self._confirm_job(view_output)
def autoupdate(self):
jobqueue_view = 'racadm jobqueue view -i {}'
schedule_updates = ("racadm autoupdatescheduler create -l {} "
"-f grnet_1.00_Catalog.xml -a 0 -time 08:30"
"-dom * -wom * -dow * -rp 1").format(self.http_share)
enable_updates = 'racadm set lifecycleController.lcattributes.AutoUpdate Enabled'
enable_updates_output = self._ssh(enable_updates)
schedule_updates_output = self._ssh(schedule_updates)
print(enable_updates_output)
print(schedule_updates_output)
def upgrade(self):
http_addr = self.http_share.strip('http:/')
upgrade = 'racadm update -f grnet_1.00_Catalog.xml -e {} -t HTTP -a FALSE'.format(http_addr)
output = self._ssh(upgrade)
def idrac_info(self):
firm_info = 'racadm get idrac.info'
bios_info = 'racadm get bios.sysinformation'
print(self._ssh(firm_info))
print(self._ssh(bios_info))
def clear_autoupdate(self):
clear_command = 'racadm autoupdatescheduler clear'
print(self._ssh(clear_command))
def flush_jobs(self):
flush_command = 'racadm jobqueue delete --all'
print(self._ssh(flush_command))
def pdisks_status(self):
pdisks_status_command = 'racadm storage get pdisks -o'
print(self._ssh(pdisks_status_command))
def storage_status(self):
storage_status_command = 'racadm storage get status'
print(self._ssh(storage_status_command))
def controllers_status(self):
controllers_status_command = 'racadm storage get controllers -o'
print(self._ssh(controllers_status_command))
|
#!/usr/bin/env python
# coding: utf-8
# #PANDA INTRODUCTION
# In[1]:
get_ipython().system('pip install pandas')
# In[2]:
import pandas as pd
# In[3]:
df = pd.read_csv("http://rcs.bu.edu/examples/python/data_analysis/Salaries.csv")
# In[7]:
df.head()
# In[11]:
df.head(10)
# In[12]:
df.head(20)
# In[13]:
df.head(50)
# In[14]:
df.dtypes
# In[15]:
df.columns
# In[16]:
df.axes
# In[17]:
df.ndim
# In[18]:
df.size
# In[19]:
df.shape
# In[20]:
df.values
# In[21]:
df.size
# In[22]:
df.columns
# In[25]:
len(df.columns)
# In[24]:
# In[26]:
df.columns
# In[28]:
df.dtypes
# In[30]:
len(df.row)
# In[31]:
df.count
# In[36]:
total_rows = df['rank'].count
# In[38]:
total_rows
# In[42]:
dir(df)
# In[43]:
df.describe()
# In[44]:
df.max()
# In[45]:
df.min()
# In[50]:
df.sample(10, random_state=5)
# In[51]:
df.describe()
# In[52]:
df.std()
# In[55]:
df1 = df.head(50).mean()
# In[56]:
df1
# In[57]:
df.rank
# In[58]:
df['rank']
# In[59]:
df.salary
# In[60]:
df['salary']
# In[61]:
df[['salary']]
# In[62]:
df2 = df["phd"]
# In[68]:
df2.describe()
# In[71]:
df.phd.count()
# In[66]:
rows
# In[72]:
df.phd.mean()
# In[78]:
df[(df['salary']>120000) & (df['sex']== "Female")]
# In[79]:
df.iloc[1:4,0:2]
# In[80]:
df.iloc[0:10,0:4]
# In[94]:
df.groupby('rank')[['salary']].mean()
# In[86]:
df.groupby('rank')[['salary','phd']].mean()
# In[88]:
df[df['salary']>100000
# In[89]:
import matplotlib.pyplot as plt
# In[90]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[91]:
x = [-3,5,7 ]
# In[92]:
y = [10, 2, 5]
# In[93]:
import seaborn as sns
# In[ ]:
|
import os
import random
import numpy as np
import pickle
from . import dataset_split
from .constants import VNET_INPUT_KEYS, VNET_OUTPUT_KEYS, PRIME_VNET_OUTPUT_KEYS
from pathlib import Path
from tqdm import tqdm
import shutil
class ScorePerformPairData:
def __init__(self, piece, perform):
self.piece_path = piece.xml_path
self.perform_path = perform.midi_path
self.piece_name = Path(piece.xml_path).name
self.perform_name = Path(perform.midi_path).name
self.graph_edges = piece.notes_graph
self.features = {**piece.score_features, **perform.perform_features}
self.split_type = None
self.features['num_notes'] = piece.num_notes
self.score_qpm_primo = piece.score_features['qpm_primo']
self.performance_beat_tempos = perform.beat_tempos
self.performance_measure_tempos = perform.measure_tempos
class PairDataset:
def __init__(self, dataset):
self.dataset_path = dataset.path
self.data_pairs = []
self.feature_stats = None
self.index_dict = None
self._initialize_data_pairs(dataset)
def _initialize_data_pairs(self, dataset):
for piece in dataset.pieces:
for performance in piece.performances:
self.data_pairs.append(ScorePerformPairData(piece, performance))
class ScorePerformPairData_Emotion(ScorePerformPairData):
def __init__(self, piece, perform):
super().__init__(piece, perform)
self.emotion = perform.emotion
self.performer = perform.performer
class EmotionPairDataset(PairDataset):
def __init__(self, dataset):
self.data_pair_set_by_piece = []
super().__init__(dataset)
def _initialize_data_pairs(self, dataset):
for piece in dataset.pieces:
tmp_set = []
for performance in piece.performances:
pair_data = ScorePerformPairData_Emotion(piece, performance)
self.data_pairs.append(pair_data)
tmp_set.append(pair_data)
tmp_set = sorted(tmp_set, key=lambda pair:pair.perform_name)
#assert tmp_set[0].emotion is 1
#self.data_pair_set_by_piece.append(tmp_set)
performer_set = set([pair.performer for pair in tmp_set])
for performer_num in performer_set:
performer_set = [pair for pair in tmp_set if pair.performer is performer_num]
performer_set = sorted(performer_set, key=lambda pair:pair.emotion)
self.data_pair_set_by_piece.append(performer_set)
# optimized to emotion dataset
# get PairDataset class and generate data in virtuosoNet format
class DataGenerator:
def __init__(self, pair_dataset, save_path):
self.pair_dataset = pair_dataset
self.save_path = Path(save_path)
def generate_statistics(self, valid_set_list=dataset_split.EMOTION_VALID_LIST, test_set_list=dataset_split.EMOTION_TEST_LIST):
self._update_dataset_split_type(valid_set_list, test_set_list)
self._update_mean_stds_of_entire_dataset()
def _update_dataset_split_type(self, valid_set_list, test_set_list):
for pair_data in self.pair_dataset.data_pairs:
path = pair_data.piece_path
for valid_name in valid_set_list:
if valid_name in path:
pair_data.split_type = 'valid'
break
else:
for test_name in test_set_list:
if test_name in path:
pair_data.split_type = 'test'
pair_data.features['qpm_primo'] = pair_data.score_qpm_primo
break
if pair_data.split_type is None:
pair_data.split_type = 'train'
def _update_mean_stds_of_entire_dataset(self):
# get squeezed features
feature_data = dict()
for pair in self.pair_dataset.data_pairs:
for feature_key in pair.features.keys():
if type(pair.features[feature_key]) is dict:
if pair.features[feature_key]['need_normalize']:
if feature_key not in feature_data.keys():
feature_data[feature_key] = []
if isinstance(pair.features[feature_key]['data'], list):
feature_data[feature_key] += pair.features[feature_key]['data']
else:
feature_data[feature_key].append(
pair.features[feature_key]['data'])
# cal mean and stds
stats = dict()
for feature_key in feature_data.keys():
mean = sum(feature_data[feature_key]) / \
len(feature_data[feature_key])
var = sum(
(x-mean)**2 for x in feature_data[feature_key]) / len(feature_data[feature_key])
stds = var ** 0.5
if stds == 0:
stds = 1
stats[feature_key] = {'mean': mean, 'stds': stds}
self.pair_dataset.feature_stats = stats
def save_final_feature_dataset(self, input_feature_keys=VNET_INPUT_KEYS, output_feature_keys=VNET_OUTPUT_KEYS, with_e1_qpm=False, e1_to_input_feature_keys=PRIME_VNET_OUTPUT_KEYS, output_for_classifier=False):
self._generate_save_folders()
for pair_data_list in tqdm(self.pair_dataset.data_pair_set_by_piece):
feature_dict_list = []
if e1_to_input_feature_keys:
e1_data, _ = self._convert_feature(pair_data_list[0].features, self.pair_dataset.feature_stats, keys=e1_to_input_feature_keys)
for pair_data in pair_data_list:
feature_dict = dict()
feature_dict['input_data'], input_feature_index_dict = self._convert_feature(pair_data.features, self.pair_dataset.feature_stats, keys=input_feature_keys)
if output_for_classifier:
feature_dict['e1_perform_data'] = e1_data
elif e1_to_input_feature_keys and not output_for_classifier:
feature_dict['input_data'], input_feature_index_dict = self._add_e1_output_feature_to_input_feature(
feature_dict['input_data'], input_feature_index_dict, e1_data)
if output_for_classifier:
feature_dict['label'] = pair_data.emotion - 1
feature_dict['output_data'], output_feature_index_dict = self._convert_feature(pair_data.features, self.pair_dataset.feature_stats, keys=output_feature_keys)
feature_dict['note_location'] = pair_data.features['note_location']
feature_dict['align_matched'] = pair_data.features['align_matched']
feature_dict['articulation_loss_weight'] = pair_data.features['articulation_loss_weight']
feature_dict['graph'] = pair_data.graph_edges
feature_dict['score_path'] = pair_data.piece_path
feature_dict['perform_path'] = pair_data.perform_path
feature_dict_list.append(feature_dict)
if self.pair_dataset.index_dict is None:
self.pair_dataset.index_dict = {'input_index_dict': input_feature_index_dict,
'output_index_dict': output_feature_index_dict}
if with_e1_qpm and pair_data.emotion is 1:
qpm_index = self.pair_dataset.index_dict['input_index_dict']['qpm_primo']['index']
e1_qpm = feature_dict['input_data'][0][qpm_index]
for feature_dict in feature_dict_list:
if with_e1_qpm:
feature_dict['input_data'] = self._change_qpm_primo_to_e1_qpm_primo(
feature_dict['input_data'], self.pair_dataset.index_dict['input_index_dict'], e1_qpm)
self._save_feature_dict(feature_dict, pair_data.split_type, self.pair_dataset.dataset_path)
self._save_dataset_info()
def save_final_feature_dataset_for_analysis(self, perform_feature_keys):
self._generate_save_folders()
for pair_data_list in tqdm(self.pair_dataset.data_pair_set_by_piece):
#e1_data, _ = self._convert_feature(pair_data_list[0].features, self.pair_dataset.feature_stats, keys=e1_to_input_feature_keys)
for pair_data in pair_data_list:
feature_dict = dict()
#feature_dict['e1_perform_data'] = e1_data
feature_dict['emotion_number'] = pair_data.emotion
feature_dict['features'] = dict()
for key in perform_feature_keys:
feature_dict['features'][key] = pair_data.features[key]['data']
feature_dict['score_path'] = pair_data.piece_path
feature_dict['perform_path'] = pair_data.perform_path
self._save_feature_dict(feature_dict, pair_data.split_type, self.pair_dataset.dataset_path)
self._save_dataset_info()
def _generate_save_folders(self):
save_folder = Path(self.save_path)
split_types = ['train', 'valid', 'test']
save_folder.mkdir(exist_ok=True)
for split in split_types:
(save_folder / split).mkdir(exist_ok=True)
def _check_if_global_and_normalize(self, value, key, note_num, stats):
# global features like qpm_primo, tempo_primo, composer_vec
if not isinstance(value, list) or len(value) != note_num:
value = [value] * note_num
if key in stats: # if key needs normalization,
value = [(x - stats[key]['mean']) / stats[key]['stds']
for x in value]
return value
def _convert_feature(self, feature_data, stats, keys):
data = []
index_dict = dict()
total_feature_length = 0
for key in keys:
value = self._check_if_global_and_normalize(
feature_data[key]['data'], key, feature_data['num_notes'], stats)
data.append(value)
# cal feature len
if isinstance(value[0], list):
feature_len = len(value[0])
else:
feature_len = 1
if key in index_dict.keys(): # since 'beat_tempo' is doubled in output_keys
index_dict[key]['index'] = [index_dict[key]['index'], total_feature_length]
else:
index_dict[key] = {'len': feature_len, 'index': total_feature_length}
total_feature_length += feature_len
index_dict['total_length'] = total_feature_length
data_array = np.zeros((feature_data['num_notes'], total_feature_length))
cur_idx = 0
for value in data:
if isinstance(value[0], list):
length = len(value[0])
data_array[:, cur_idx:cur_idx+length] = value
else:
length = 1
data_array[:, cur_idx] = value
cur_idx += length
return data_array, index_dict
def _add_e1_output_feature_to_input_feature(self, input_data, index_dict, e1_data):
index_dict['e1_data'] = {'index': len(input_data[0]), 'len': len(e1_data[0])}
input_data = np.append(input_data, e1_data, axis=1) # b/c shape is (note_num, feature_num)
index_dict['total_length'] += len(e1_data[0])
return input_data, index_dict
def _change_qpm_primo_to_e1_qpm_primo(self, input_data, index_dict, e1_qpm):
qpm_index = index_dict['qpm_primo']['index']
input_data[:, qpm_index] = e1_qpm
return input_data
def _flatten_path(self, file_path):
return '_'.join(file_path.parts)
def _save_feature_dict(self, feature_dict, split_type, dataset_path):
piece_path = feature_dict['score_path']
perform_path = feature_dict['perform_path']
data_name = self._flatten_path(Path(perform_path).relative_to(Path(dataset_path))) + '.dat'
final_save_path = self.save_path.joinpath(split_type, data_name)
with open(final_save_path, "wb") as f:
pickle.dump(feature_dict, f, protocol=2)
if split_type == 'test':
xml_name = Path(piece_path).name
xml_path = Path(self.save_path.joinpath(split_type, xml_name))
shutil.copy(piece_path, str(xml_path))
def _save_dataset_info(self):
dataset_info = {'stats': self.pair_dataset.feature_stats,
'index_dict': self.pair_dataset.index_dict}
with open(self.save_path.joinpath("dataset_info.dat"), "wb") as f:
pickle.dump(dataset_info, f, protocol=2)
'''
class PairDataset:
def __init__(self, dataset):
self.dataset_path = dataset.path
self.data_pairs = []
self.feature_stats = None
self._initialize_data_pairs(dataset)
def _initialize_data_pairs(self, dataset):
for piece in dataset.pieces:
for performance in piece.performances:
self.data_pairs.append(
ScorePerformPairData(piece, performance))
def get_squeezed_features(self):
squeezed_values = dict()
for pair in self.data_pairs:
for feature_key in pair.features.keys():
if type(pair.features[feature_key]) is dict:
if pair.features[feature_key]['need_normalize']:
if isinstance(pair.features[feature_key]['data'], list):
if feature_key not in squeezed_values.keys():
squeezed_values[feature_key] = []
squeezed_values[feature_key] += pair.features[feature_key]['data']
else:
if feature_key not in squeezed_values.keys():
squeezed_values[feature_key] = []
squeezed_values[feature_key].append(
pair.features[feature_key]['data'])
return squeezed_values
def update_mean_stds_of_entire_dataset(self):
squeezed_values = self.get_squeezed_features()
self.feature_stats = cal_mean_stds(squeezed_values)
def update_dataset_split_type(self, valid_set_list=dataset_split.VALID_LIST, test_set_list=dataset_split.TEST_LIST):
# TODO: the split
for pair in self.data_pairs:
path = pair.piece_path
for valid_name in valid_set_list:
if valid_name in path:
pair.split_type = 'valid'
break
else:
for test_name in test_set_list:
if test_name in path:
pair.split_type = 'test'
break
if pair.split_type is None:
pair.split_type = 'train'
def shuffle_data(self):
random.shuffle(self.data_pairs)
def save_features_for_virtuosoNet(self, save_folder):
#Convert features into format of VirtuosoNet training data
#:return: None (save file)
def _flatten_path(file_path):
return '_'.join(file_path.parts)
save_folder = Path(save_folder)
split_types = ['train', 'valid', 'test']
save_folder.mkdir(exist_ok=True)
for split in split_types:
(save_folder / split).mkdir(exist_ok=True)
training_data = []
validation_data = []
test_data = []
for pair_data in tqdm(self.data_pairs):
formatted_data = dict()
try:
formatted_data['input_data'], formatted_data['output_data'] = convert_feature_to_VirtuosoNet_format(
pair_data.features, self.feature_stats)
for key in VNET_COPY_DATA_KEYS:
formatted_data[key] = pair_data.features[key]
formatted_data['graph'] = pair_data.graph_edges
formatted_data['score_path'] = pair_data.piece_path
formatted_data['perform_path'] = pair_data.perform_path
save_name = _flatten_path(
Path(pair_data.perform_path).relative_to(Path(self.dataset_path))) + '.dat'
with open(save_folder / pair_data.split_type / save_name, "wb") as f:
pickle.dump(formatted_data, f, protocol=2)
if pair_data.split_type == 'test':
xml_name = Path(pair_data.piece_path).name
xml_path = Path(save_folder).joinpath(
pair_data.split_type, xml_name)
shutil.copy(pair_data.piece_path, str(xml_path))
except:
print('Error: No Features with {}'.format(
pair_data.perform_path))
with open(save_folder / "stat.dat", "wb") as f:
pickle.dump(self.feature_stats, f, protocol=2)
def get_feature_from_entire_dataset(dataset, target_score_features, target_perform_features):
# e.g. feature_type = ['score', 'duration'] or ['perform', 'beat_tempo']
output_values = dict()
for feat_type in (target_score_features + target_perform_features):
output_values[feat_type] = []
for piece in dataset.pieces:
for performance in piece.performances:
for feat_type in target_score_features:
output_values[feat_type].append(
piece.score_features[feat_type])
for feat_type in target_perform_features:
output_values[feat_type].append(
performance.perform_features[feat_type])
return output_values
def normalize_feature(data_values, target_feat_keys):
for feat in target_feat_keys:
concatenated_data = [note for perf in data_values[feat]
for note in perf]
mean = sum(concatenated_data) / len(concatenated_data)
var = sum(pow(x-mean, 2)
for x in concatenated_data) / len(concatenated_data)
for i, perf in enumerate(data_values[feat]):
data_values[feat][i] = [(x-mean) / (var ** 0.5) for x in perf]
return data_values
def cal_mean_stds_of_entire_dataset(dataset, target_features):
#:param dataset: DataSet class
#:param target_features: list of dictionary keys of features
#:return: dictionary of mean and stds
output_values = dict()
for feat_type in (target_features):
output_values[feat_type] = []
for piece in dataset.pieces:
for performance in piece.performances:
for feat_type in target_features:
if feat_type in piece.score_features:
output_values[feat_type] += piece.score_features[feat_type]
elif feat_type in performance.perform_features:
output_values[feat_type] += performance.perform_features[feat_type]
else:
print('Selected feature {} is not in the data'.format(feat_type))
stats = cal_mean_stds(output_values)
return stats
def cal_mean_stds(feat_datas):
stats = dict()
for feature_key in feat_datas.keys():
mean = sum(feat_datas[feature_key]) / len(feat_datas[feature_key])
var = sum(
(x-mean)**2 for x in feat_datas[feature_key]) / len(feat_datas[feature_key])
stds = var ** 0.5
if stds == 0:
stds = 1
stats[feature_key] = {'mean': mean, 'stds': stds}
return stats
def make_note_length_feature_list(feature_data, note_length):
if not isinstance(feature_data, list) or len(feature_data) != note_length:
feature_data = [feature_data] * note_length
return feature_data
def normalize_feature_list(note_length_feature_data, mean, stds):
return [(x - mean) / stds for x in note_length_feature_data]
def make_feature_data_for_VirtuosoNet(feature_data, stats, input_keys=VNET_INPUT_KEYS, output_keys=VNET_OUTPUT_KEYS):
input_data = dict()
output_data = dict()
for key in input_keys:
feature = make_note_length_feature_list(
feature_data[key]['data'], feature_data['num_notes'])
if key in stats:
feature = normalize_feature_list(
feature, stats[key]['mean'], stats[key]['stds'])
for key in output_keys:
pass
def convert_feature_to_VirtuosoNet_format(feature_data, stats, input_keys=VNET_INPUT_KEYS, output_keys=VNET_OUTPUT_KEYS):
input_data = []
output_data = []
def check_if_global_and_normalize(key):
value = feature_data[key]['data']
# global features like qpm_primo, tempo_primo, composer_vec
if not isinstance(value, list) or len(value) != feature_data['num_notes']:
value = [value] * feature_data['num_notes']
if key in stats: # if key needs normalization,
value = [(x - stats[key]['mean']) / stats[key]['stds']
for x in value]
return value
def add_to_list(alist, item):
if isinstance(item, list):
alist += item
else:
alist.append(item)
return alist
def cal_dimension(data_with_all_features):
total_length = 0
for feat_data in data_with_all_features:
if isinstance(feat_data[0], list):
length = len(feat_data[0])
else:
length = 1
total_length += length
return total_length
for key in input_keys:
value = check_if_global_and_normalize(key)
input_data.append(value)
for key in output_keys:
value = check_if_global_and_normalize(key)
output_data.append(value)
input_dimension = cal_dimension(input_data)
output_dimension = cal_dimension(output_data)
input_array = np.zeros((feature_data['num_notes'], input_dimension))
output_array = np.zeros((feature_data['num_notes'], output_dimension))
current_idx = 0
for value in input_data:
if isinstance(value[0], list):
length = len(value[0])
input_array[:, current_idx:current_idx + length] = value
else:
length = 1
input_array[:, current_idx] = value
current_idx += length
current_idx = 0
for value in output_data:
if isinstance(value[0], list):
length = len(value[0])
output_array[:, current_idx:current_idx + length] = value
else:
length = 1
output_array[:, current_idx] = value
current_idx += length
return input_array, output_array
'''
|
# this file is created by Gursimar Kaur
from django.http import HttpResponse
from django.shortcuts import render
def home(request):
return render(request, 'home.html')
def aboutus(request):
return render(request, 'aboutus.html')
def contactus(request):
return render(request, 'contactus.html')
def analyzetext(request):
textString = request.POST.get('text') #video 17 get into post
rempunc = request.POST.get('removepunc', 'off')
uc = request.POST.get('uppercase', 'off')
lc = request.POST.get('lowercase', 'off')
cf = request.POST.get('capfirst', 'off')
nlr = request.POST.get('newlineremove', 'off')
sr = request.POST.get('spaceremove', 'off')
esr = request.POST.get('extraspaceremove', 'off')
cc = request.POST.get('charcount', 'off')
punctuations = """!()-[]{};:'"\,<>./?@#$%^&*_~"""
if textString != "":
analyzed = ""
if rempunc == 'on':
analyzed = ""
for char in textString:
if char not in punctuations:
analyzed = analyzed + char
textString = analyzed
if uc == 'on':
analyzed = ""
for char in textString:
analyzed = analyzed + char.upper()
textString = analyzed
if lc == 'on':
analyzed = ""
if textString == "":
textString = "No text entered."
for char in textString:
analyzed = analyzed + char.lower()
textString = analyzed
if cf == 'on':
analyzed = ""
for index, char in enumerate(textString):
if index == 0:
analyzed = analyzed + char.upper()
elif (textString[index-2] == '.' and textString[index-1] == ' ') or (textString[index-2] == '!' and textString[index-1] == ' ') or (textString[index-2] == '?' and textString[index-1] == ' '):
analyzed = analyzed + char.upper()
else:
analyzed = analyzed + char
textString = analyzed
if nlr == 'on':
analyzed = ""
for char in textString:
if char != '\n':
analyzed = analyzed + char
textString = analyzed
if sr == 'on':
analyzed = ""
for char in textString:
if char != " ":
analyzed = analyzed + char
textString = analyzed
if esr == 'on':
analyzed = ""
for index, char in enumerate(textString):
if not(textString[index] == " " and textString[index + 1] == " "):
analyzed = analyzed + char
textString = analyzed
if cc == 'on':
analyzed = textString + "\n" + str(len(textString) - textString.count('\n'))
if analyzed == "":
return HttpResponse("Error")
else:
params = {'textString': analyzed}
else:
params = {'textString': 'No text entered'}
return render(request, 'analyze.html', params)
|
"""HelloWorld Integration for Cortex XSOAR - Unit Tests file
This file contains the Unit Tests for the HelloWorld Integration based
on pytest. Cortex XSOAR contribution requirements mandate that every
integration should have a proper set of unit tests to automatically
verify that the integration is behaving as expected during CI/CD pipeline.
Test Execution
--------------
Unit tests can be checked in 3 ways:
- Using the command `lint` of demisto-sdk. The command will build a dedicated
docker instance for your integration locally and use the docker instance to
execute your tests in a dedicated docker instance.
- From the command line using `pytest -v` or `pytest -vv`
- From PyCharm
Example with demisto-sdk (from the content root directory):
demisto-sdk lint -i Packs/HelloWorld/Integrations/HelloWorld
Coverage
--------
There should be at least one unit test per command function. In each unit
test, the target command function is executed with specific parameters and the
output of the command function is checked against an expected output.
Unit tests should be self contained and should not interact with external
resources like (API, devices, ...). To isolate the code from external resources
you need to mock the API of the external resource using pytest-mock:
https://github.com/pytest-dev/pytest-mock/
In the following code we configure requests-mock (a mock of Python requests)
before each test to simulate the API calls to the HelloWorld API. This way we
can have full control of the API behavior and focus only on testing the logic
inside the integration code.
We recommend to use outputs from the API calls and use them to compare the
results when possible. See the ``test_data`` directory that contains the data
we use for comparison, in order to reduce the complexity of the unit tests and
avoding to manually mock all the fields.
NOTE: we do not have to import or build a requests-mock instance explicitly.
requests-mock library uses a pytest specific mechanism to provide a
requests_mock instance to any function with an argument named requests_mock.
More Details
------------
More information about Unit Tests in Cortex XSOAR:
https://xsoar.pan.dev/docs/integrations/unit-testing
"""
import json
import io
import pytest
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_say_hello():
"""
Tests helloworld-say-hello command function.
Given:
- No mock is needed here because the say_hello_command does not call any external API.
When:
- Running the 'say_hello_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, say_hello_command
client = Client(base_url='https://test.com/api/v1', verify=False, auth=('test', 'test'))
args = {
'name': 'Dbot'
}
response = say_hello_command(client, args)
assert response.outputs == 'Hello Dbot'
def test_start_scan(requests_mock):
"""
Tests helloworld-scan-start command function.
Given:
- requests_mock instance to generate the appropriate start_scan API
response when the correct start_scan API request is performed.
- A hostname.
When:
- Running the 'scan_start_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, scan_start_command
mock_response = {
'scan_id': '7a161a3f-8d53-42de-80cd-92fb017c5a12',
'status': 'RUNNING'
}
requests_mock.get('https://test.com/api/v1/start_scan?hostname=example.com', json=mock_response)
client = Client(
base_url='https://test.com/api/v1',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
args = {
'hostname': 'example.com'
}
response = scan_start_command(client, args)
assert response.outputs_prefix == 'HelloWorld.Scan'
assert response.outputs_key_field == 'scan_id'
assert response.outputs == {
'scan_id': '7a161a3f-8d53-42de-80cd-92fb017c5a12',
'status': 'RUNNING',
'hostname': 'example.com'
}
def test_status_scan(requests_mock):
"""
Tests helloworld-scan-status command function.
Given:
- requests_mock instance to generate the appropriate check_scan
API responses based on the scan ID provided.
- Scan IDs.
When:
- Running the 'scan_status_command'.
Then:
- Checks the output of the command function with the expected output.
For scan_id 100, 300 status should be COMPLETE while for scan ID 200 is RUNNING.
"""
from HelloWorld import Client, scan_status_command
mock_response = {
'scan_id': '100',
'status': 'COMPLETE'
}
requests_mock.get('https://test.com/api/v1/check_scan?scan_id=100', json=mock_response)
mock_response = {
'scan_id': '200',
'status': 'RUNNING'
}
requests_mock.get('https://test.com/api/v1/check_scan?scan_id=200', json=mock_response)
mock_response = {
'scan_id': '300',
'status': 'COMPLETE'
}
requests_mock.get('https://test.com/api/v1/check_scan?scan_id=300', json=mock_response)
client = Client(
base_url='https://test.com/api/v1',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
args = {
'scan_id': ['100', '200', '300']
}
response = scan_status_command(client, args)
assert response.outputs_prefix == 'HelloWorld.Scan'
assert response.outputs_key_field == 'scan_id'
assert response.outputs == [
{
'scan_id': '100',
'status': 'COMPLETE'
},
{
'scan_id': '200',
'status': 'RUNNING'
},
{
'scan_id': '300',
'status': 'COMPLETE'
}
]
def test_scan_results(requests_mock):
"""
Tests helloworld-scan-results command function.
Given:
- requests_mock instance to generate the appropriate get_scan_results API response,
loaded from a local JSON file.
When:
- Running the 'scan_results_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, scan_results_command
from CommonServerPython import Common
mock_response = util_load_json('test_data/scan_results.json')
requests_mock.get('https://test.com/api/v1/get_scan_results?scan_id=100', json=mock_response)
client = Client(
base_url='https://test.com/api/v1',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
args = {
'scan_id': '100',
'format': 'json'
}
response = scan_results_command(client, args)
assert response[0].outputs == mock_response
assert response[0].outputs_prefix == 'HelloWorld.Scan'
assert response[0].outputs_key_field == 'scan_id'
# This command also returns Common.CVE data
assert isinstance(response, list)
assert len(response) > 1
for i in range(1, len(response)):
assert isinstance(response[i].indicator, Common.CVE)
def test_search_alerts(requests_mock):
"""
Tests helloworld-search-alerts command function.
Given:
- requests_mock instance to generate the appropriate get_alerts API response,
loaded from a local JSON file.
When:
- Running the 'search_alerts_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, search_alerts_command
mock_response = util_load_json('test_data/search_alerts.json')
requests_mock.get(
'https://test.com/api/v1/get_alerts?alert_status=ACTIVE&severity=Critical&max_results=2&start_time=1581982463',
json=mock_response['alerts'])
client = Client(
base_url='https://test.com/api/v1',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
args = {
'severity': 'Critical',
'start_time': 1581982463,
'max_results': 2,
'status': 'ACTIVE'
}
response = search_alerts_command(client, args)
# We modify the timestamp from the raw mock_response of the API, because the
# integration changes the format from timestamp to ISO8601.
mock_response['alerts'][0]['created'] = '2020-02-17T23:34:23.000Z'
mock_response['alerts'][1]['created'] = '2020-02-17T23:34:23.000Z'
assert response.outputs_prefix == 'HelloWorld.Alert'
assert response.outputs_key_field == 'alert_id'
assert response.outputs == mock_response['alerts']
def test_get_alert(requests_mock):
"""
Tests helloworld-get-alert command function.
Given:
- requests_mock instance to generate the appropriate get_alert_details API response,
loaded from a local JSON file.
- An alert ID.
When:
- Running the 'get_alert_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, get_alert_command
mock_response = util_load_json('test_data/get_alert.json')
requests_mock.get('https://test.com/api/v1/get_alert_details?alert_id=695b3238-05d6-4934-86f5-9fff3201aeb0',
json=mock_response)
client = Client(
base_url='https://test.com/api/v1',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
args = {
'alert_id': '695b3238-05d6-4934-86f5-9fff3201aeb0',
}
response = get_alert_command(client, args)
# We modify the timestamp from the raw mock_response of the API, because the
# integration changes the format from timestamp to ISO8601.
mock_response['created'] = '2020-04-17T14:43:59.000Z'
assert response.outputs == mock_response
assert response.outputs_prefix == 'HelloWorld.Alert'
assert response.outputs_key_field == 'alert_id'
def test_update_alert_status(requests_mock):
"""
Tests helloworld-update-alert-status command function.
Given:
- requests_mock instance to generate the appropriate change_alert_status API response,
loaded from a local JSON file.
- Alert ID and a status.
When:
- Running the 'update_alert_status_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, update_alert_status_command
mock_response = util_load_json('test_data/update_alert_status.json')
requests_mock.get(
'https://test.com/api/v1/change_alert_status?alert_id=695b3238-05d6-4934-86f5-9fff3201aeb0&alert_status=CLOSED',
json=mock_response)
client = Client(
base_url='https://test.com/api/v1',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
args = {
'alert_id': '695b3238-05d6-4934-86f5-9fff3201aeb0',
'status': 'CLOSED'
}
response = update_alert_status_command(client, args)
# We modify the timestamp from the raw mock_response of the API, because the
# integration changes the format from timestamp to ISO8601.
mock_response['updated'] = '2020-04-17T14:45:12.000Z'
assert response.outputs == mock_response
assert response.outputs_prefix == 'HelloWorld.Alert'
assert response.outputs_key_field == 'alert_id'
def test_ip(requests_mock):
"""
Tests the ip reputation command function.
Given:
- requests_mock instance to generate the appropriate ip reputation API response,
loaded from a local JSON file.
- An IP address to check.
When:
- Running the 'ip_reputation_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, ip_reputation_command
from CommonServerPython import Common, DBotScoreReliability
ip_to_check = '151.1.1.1'
mock_response = util_load_json('test_data/ip_reputation.json')
requests_mock.get(f'http://test.com/api/v1/ip?ip={ip_to_check}',
json=mock_response)
client = Client(
base_url='http://test.com/api/v1',
verify=False,
headers={
'Authorization': 'Bearer some_api_key'
}
)
args = {
'ip': ip_to_check,
'threshold': 65,
}
response = ip_reputation_command(client, args, 65, DBotScoreReliability.C)
assert response[0].outputs == mock_response
assert response[0].outputs_prefix == 'HelloWorld.IP'
assert response[0].outputs_key_field == 'ip'
# This command also returns Common.IP data
assert isinstance(response, list)
assert isinstance(response[0].indicator, Common.IP)
assert response[0].indicator.ip == ip_to_check
def test_domain(requests_mock):
"""
Tests the domain reputation command function.
Given:
- requests_mock instance to generate the appropriate domain reputation API response,
loaded from a local JSON file.
- A domain to check.
When:
- Running the 'domain_reputation_command'.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, domain_reputation_command
from CommonServerPython import Common, DBotScoreReliability
domain_to_check = 'google.com'
mock_response = util_load_json('test_data/domain_reputation.json')
requests_mock.get(f'http://test.com/api/v1/domain?domain={domain_to_check}',
json=mock_response)
client = Client(
base_url='http://test.com/api/v1',
verify=False,
headers={
'Authorization': 'Bearer some_api_key'
}
)
args = {
'domain': domain_to_check,
'threshold': 65,
}
response = domain_reputation_command(client, args, 65, DBotScoreReliability.C)
# We modify the timestamps from the raw mock_response of the API, because the
# integration changes the format from timestamp to ISO8601.
mock_response['expiration_date'] = '2028-09-14T04:00:00.000Z'
mock_response['creation_date'] = '1997-09-15T04:00:00.000Z'
mock_response['updated_date'] = '2019-09-09T15:39:04.000Z'
assert response[0].outputs == mock_response
assert response[0].outputs_prefix == 'HelloWorld.Domain'
assert response[0].outputs_key_field == 'domain'
# This command also returns Common.Domain data
assert isinstance(response, list)
assert isinstance(response[0].indicator, Common.Domain)
assert response[0].indicator.domain == domain_to_check
def test_fetch_incidents(requests_mock):
"""
Tests the fetch-incidents command function.
Given:
- requests_mock instance to generate the appropriate get_alert API response,
loaded from a local JSON file.
When:
- Running the 'fetch_incidents' command.
Then:
- Checks the output of the command function with the expected output.
"""
from HelloWorld import Client, fetch_incidents
mock_response = util_load_json('test_data/search_alerts.json')
requests_mock.get(
'https://test.com/api/v1/get_alerts?alert_status=ACTIVE'
'&severity=Low%2CMedium%2CHigh%2CCritical&max_results=2'
'&start_time=1581944401', json=mock_response['alerts'])
client = Client(
base_url='https://test.com/api/v1',
verify=False,
headers={
'Authentication': 'Bearer some_api_key'
}
)
last_run = {
'last_fetch': 1581944401 # Mon Feb 17 2020
}
_, new_incidents = fetch_incidents(
client=client,
max_results=2,
last_run=last_run,
alert_status='ACTIVE',
min_severity='Low',
alert_type=None,
first_fetch_time='3 days',
)
assert new_incidents == [
{
'name': 'Hello World Alert 100',
'occurred': '2020-02-17T23:34:23.000Z',
'rawJSON': json.dumps(mock_response['alerts'][0]),
'severity': 4, # critical, this is XSOAR severity (already converted)
},
{
'name': 'Hello World Alert 200',
'occurred': '2020-02-17T23:34:23.000Z',
'rawJSON': json.dumps(mock_response['alerts'][1]),
'severity': 2, # medium, this is XSOAR severity (already converted)
}
]
def test_invalid_ip():
"""
Given:
- An invalid IP address to check.
When:
- Running the 'ip_reputation_command'.
Then:
- Checks that the command raises a suitable error message (Invalid IP).
"""
from HelloWorld import Client, ip_reputation_command
from CommonServerPython import DBotScoreReliability
ip_to_check = '1.1.1' # an invalid ip
client = Client(
base_url='http://test.com/api/v1',
verify=False,
headers={
'Authorization': 'Bearer some_api_key'
}
)
args = {
'ip': ip_to_check,
'threshold': 65,
}
with pytest.raises((Exception, ValueError)) as e:
ip_reputation_command(client, args, 65, DBotScoreReliability.C)
assert e.value.args[0] == f'IP "{ip_to_check}" is not valid'
@pytest.mark.parametrize('domain_date, expected_parsed_date', [
('1997-09-15 04:00:00', '1997-09-15T04:00:00.000Z'),
(['1997-09-15 04:00:00'], '1997-09-15T04:00:00.000Z')
])
def test_parse_domain_date(domain_date, expected_parsed_date):
"""
Given:
1. A string of a date.
2. A list including a string of a date.
When:
- Running the 'parse_domain_date' function.
Then:
- Verify that the dates were parsed to ISO8601 format correctly.
"""
from HelloWorld import parse_domain_date
assert parse_domain_date(domain_date) == expected_parsed_date
@pytest.mark.parametrize('hello_world_severity, expected_xsoar_severity', [
('Low', 1), ('Medium', 2), ('High', 3), ('Critical', 4)
])
def test_convert_to_demisto_severity(hello_world_severity, expected_xsoar_severity):
"""
Given:
- A string represent an HelloWorld severity.
When:
- Running the 'convert_to_demisto_severity' function.
Then:
- Verify that the severity was translated to an XSOAR severity correctly.
"""
from HelloWorld import convert_to_demisto_severity
assert convert_to_demisto_severity(hello_world_severity) == expected_xsoar_severity
|
""" Print the sum of digits in 100!
This is a very easy question ,
I choose to learn python functional programming features from this
I have used reduce(x, y, z)
x is a binary operator that returns a value
y is an iterable object (list, generator .... )
z is the starting value.
For example x = mul
y = [10, 20, 30]
z = 1
reduce(x, y, z) = (((1 * 10) * 20) * 30)
sum is short for reduce(add, y, 0)
"""
from operator import mul
print sum(int(z) for z in str(reduce (mul , xrange(1, 101), 1)))
|
import os
import re
import sys
from svcshare.clientcontrol import hellanzbcontrol
from svcshare.clientcontrol import sabnzbdcontrol
class ClientControl(object):
"""Interface to the client using the shared service."""
def __init__(self, proxy, client_name, client_url, client_key):
"""Create a ClientControl object.
Args:
proxy: ConnectionProxyServer instance
client_name: client name (supported: 'hellanzb', 'sabnzbd', None)
client_url: URL to control client
"""
self.client_name = client_name
self.client_url = client_url
self.proxy = proxy
if client_name == "hellanzb":
self.client = hellanzbcontrol.HellanzbControl(client_url)
elif client_name == "sabnzbd":
self.client = sabnzbdcontrol.SabnzbdControl(client_url, client_key)
else:
self.client = None
def pause(self):
self.proxy.runningIs(False)
if self.client:
return self.client.pause()
def resume(self):
self.proxy.runningIs(True)
if self.client:
return self.client.resume()
def eta(self):
if self.client:
return self.client.eta()
else:
return ""
def queue_size(self):
if self.client:
return self.client.queue_size()
else:
return 0
def is_paused(self):
return (not self.proxy.running())
def enqueue(self, id):
if self.client:
return self.client.enqueue(id)
|
message = input("Enter your message: ")
message = message.upper()
print(message)
if "H" in message:
print("Letter 'H or h' is present in " + message)
else:
print("Letter 'H or h' isn't present in " + message)
|
import os
import tensorflow as tf
from beam_search import BeamSearch
class BSDecoder(object):
def __init__(self, model, batch_reader, model_config, data_config, vocab, data_loader):
self.model = model
self.batch_reader = batch_reader
self.model_config = model_config
self.data_config = data_config
self.vocab = vocab
self.data_loader = data_loader
self.saver = tf.train.Saver()
self.session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.restore_model_flag = self.restore_model()
self.bs = BeamSearch(self.model, self.model_config.beam_size,
self.data_loader.word_to_id(self.data_config.sentence_start),
self.data_loader.word_to_id(self.data_config.sentence_end),
self.model_config.abstract_length)
def restore_model(self):
"""
restore model
:return: if restore model success, return True, else return False
"""
ckpt_state = tf.train.get_checkpoint_state(self.model_config.model_path)
if not (ckpt_state and ckpt_state.model_checkpoint_path):
print('No model to decode yet at {0}'.format(self.model_config.model_path))
return False
ckpt_path = os.path.join(self.model_config.model_path, os.path.basename(ckpt_state.model_checkpoint_path))
self.saver.restore(self.session, ckpt_path)
return True
def decode(self, article):
"""
decode article to abstract by model
:param article: article, which is word id list
:return: abstract
"""
if self.restore_model_flag:
"""
convert to list, which list length is beam size
"""
article_batch = article * self.model_config.beam_size
article_length_batch = [len(article)] * self.model_config.beam_size
best_beam = self.bs.search(self.session, article_batch, article_length_batch)[0]
"""
get word id after 1, because 1 is start id
"""
result = [int(word_id) for word_id in best_beam[1:]]
return result
else:
return None |
#coding:utf-8
from gensim.models import Word2Vec
import os
#walks = [map(str, walk) for walk in walks]
def load_walks():
walks=[]
for filename in os.listdir("path/result_s_equal"):
print(filename)
for i in open("path/result_s_equal//"+str(filename)).readlines():
newi=i.strip().split('\t')
walks.append(newi)
walks = [ walk for walk in walks]
return walks
walks=load_walks()
#####size 5 dimension window 5 size
#model = Word2Vec(walks, size=50, window=4, min_count=10, sg=1, workers=8, iter=20, hs=0, negative=100)
model = Word2Vec(walks, size=5,window=4,sg=1, workers=10, iter=40, hs=0, negative=50)
model.wv.save_word2vec_format("embedding/drug_embeding")
|
import re
from flask import Flask, render_template, request, redirect, session, flash
from mysqlconnection import MySQLConnector
from flask.ext.bcrypt import Bcrypt
app = Flask(__name__)
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-z]+$')
NAME_REGEX = re.compile(r'^[a-zA-Z]+$')
bcrypt = Bcrypt(app)
mysql = MySQLConnector(app, 'mydb')
#print mysql.query_db("SELECT * FROM logins")
app.secret_key = "busysignal"
@app.route('/')
def index():
return render_template('index.html')
@app.route('/register')
def register():
return render_template('register.html')
@app.route('/login', methods=['POST'])
def login():
query = 'SELECT id, password FROM logins WHERE email = "{}"'.format(request.form['email'])
user = mysql.query_db(query)
print user
if len(user)<1:
errors +=1
flash('email doesnt exist')
elif not bcrypt.check_password_hash(user[0]['password', request.form['email']]):
errors += 1
flash('go away, liar')
return redirect('/')
@app.route('/signup', methods=['POST'])
def signup():
errors = 0
if len(request.form['first']) < 2:
errors += 1
flash('needs more characters in first name')
elif not NAME_REGEX.match(request.form['first']):
errors += 1
flash('no numbers allowed in first name')
if len(request.form['last']) < 2:
errors += 1
flash('needs more characters in last name')
elif not NAME_REGEX.match(request.form['last']):
errors += 1
flash('no numbers allowed in last name')
if not EMAIL_REGEX.match(request.form['email']):
errors +=1
flash('invalid email')
if len(request.form['password']) <9:
errors +=1
flash('password must be at least 9 characters long')
if request.form['password'] != request.form['confirm']:
errors +=1
flash('passwords do not match')
if errors == 0:
query = "INSERT INTO logins (first_name, last_name, email, pw_hash, created_at, updated_at) VALUES (:first_name, :last_name, :email, :pw_hash, NOW(), NOW())"
password = request.form['password']
pw_hash = bcrypt.generate_password_hash(password)
print pw_hash
data = {
'first_name' : request.form['first'],
'last_name' : request.form['last'],
'email' : request.form['email'],
'pw_hash' : pw_hash
}
mysql.query_db(query, data)
return redirect('/')
elif errors > 0:
return redirect('/register')
app.run(debug=True) |
import random
total_vertex = 10000
f = open("./test.txt", "w")
f.write("{}\n".format(total_vertex))
total_map = {}
for i in range(total_vertex):
total_map[i] = set();
for i in range(total_vertex):
num = random.randint(1, total_vertex / 10)
if num <= len(total_map[i]):
continue
for n in range(num):
neighbor = random.randint(0, total_vertex-1)
if neighbor != i:
total_map[i].add(neighbor)
total_map[neighbor].add(i)
for i in range(total_vertex):
f.write("{},{}".format(i, len(total_map[i])))
for neighbor in total_map[i]:
f.write(",{}".format(neighbor))
f.write("\n")
f.close()
|
"""
@File :redis_dealer.py
@Author :JohsuaWu1997
@Date :14/07/2020
"""
from dealer import BasicDealer
import redis
import json
import numpy as np
import pandas as pd
[host, port] = ['192.168.137.153', 6379]
class Dealer(BasicDealer):
def __init__(self, trade_id, unit=100):
self.redisPool = redis.ConnectionPool(host=host, port=port, decode_responses=True)
self.marketRedis = redis.Redis(connection_pool=self.redisPool)
self.positionRedis = redis.Redis(connection_pool=self.redisPool)
self.listRedis = redis.Redis(connection_pool=self.redisPool)
self.detailRedis = redis.Redis(connection_pool=self.redisPool)
self.stockRedis = redis.Redis(connection_pool=self.redisPool)
self.trade_list = json.loads(self.listRedis.hget('trade_list', trade_id))
self.trade_position = self.positionRedis.hget('position', trade_id)
self.trade_position = dict() if self.trade_position is None else json.loads(self.trade_position)
super().__init__(trade_id, unit)
def get_test_ticks(self):
common = self.begin[:max(
[i + 1 if self.begin[:i + 1] == self.end[:i + 1] else 0 for i in range(len(self.begin))]
)]
ticks = dict()
market_iter = self.marketRedis.hscan_iter('market', match=common + '*', count=10000)
for time_step in market_iter:
if self.begin <= time_step[0] <= self.end:
tick = json.loads(time_step[1])
tick = [
[index] + [float(tick[index][item]) for item in ['buy', 'sell', 'amount']]
for index in self.stock_list
]
ticks[time_step[0]] = tick
timestamps = list(ticks.keys())
timestamps.sort()
print('find total ' + str(len(timestamps)) + ' timestamps, test back starts now')
self.get_position(timestamps[0])
return timestamps, ticks
def get_stock_list(self):
self.begin = self.trade_list['begin_datetime']
self.end = self.trade_list['end_datetime']
self.stock_list.append(self.trade_list['trade_baseline'])
self.stock_list.extend(json.loads(self.stockRedis.hget('trade_stock', self.trade_id)))
if self.stock_list[1].startswith('all'):
self.stock_list.pop()
stock_list = list(json.loads(self.marketRedis.hget('market', self.begin)).keys())
stock_list.remove(self.stock_list[0])
self.stock_list.extend(stock_list)
print(self.stock_list)
def get_position(self, n_time=None):
self.position = pd.DataFrame(0, columns=['volume', 'curr_price'], index=self.stock_list).astype(float)
market = json.loads(self.marketRedis.hget('market', self.begin))
self.cash = float(self.trade_list['valid_cash'])
for key in self.trade_position.keys():
self.position.loc[key]['volume'] = self.trade_position[key]
for key in self.stock_list:
self.position.loc[key]['curr_price'] = float(market[key]['buy'])
self.set_total_asset()
def set_total_asset(self):
self.net_value = self.cash + np.sum(self.position['volume'] * self.position['curr_price'])
self.trade_list['total_asset'] = self.net_value
self.trade_list['valid_cash'] = self.cash
self.listRedis.hset('trade_list', self.trade_id, json.dumps(self.trade_list))
print('current Net Value:\t', self.net_value)
print(self.position['volume'].values.tolist())
def update_database(self, ids, price, amount, n_time):
for key in self.stock_list:
self.trade_position[key] = self.position.loc[key]['volume']
trade_detail = self.detailRedis.hget('trade_detail', self.trade_id)
trade_detail = json.loads(trade_detail) if trade_detail is not None else dict()
append_detail = dict()
for index, p, volume in zip(ids, price, amount):
direction = 'buy' if volume > 0 else 'sell'
append_detail['index'] = dict(zip(['volume', 'direction', 'price'], [abs(volume), direction, p]))
trade_detail[str(n_time)] = append_detail
self.detailRedis.hset('trade_detail', self.trade_id, json.dumps(trade_detail))
|
# Generated by Django 3.1.7 on 2021-04-06 06:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Bit', '0002_auto_20210405_2348'),
]
operations = [
migrations.CreateModel(
name='Course_year_2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_code', models.CharField(max_length=150)),
('course_name', models.TextField()),
],
),
migrations.CreateModel(
name='Course_year_3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_code', models.CharField(max_length=150)),
('course_name', models.TextField()),
],
),
migrations.RenameModel(
old_name='Course',
new_name='Course_year_1',
),
]
|
def longest_subarray_all_equal(A):
"""
Write a program that takes an array of integers and finds the length of a longest subarray whose entries are equal
"""
left, right, max_length = 0, 0, 1
for i in range(1, len(A)):
if A[i] == A[i - 1]:
right += 1
max_length = max(max_length, right - left + 1)
else:
left = right = i
return max_length
if __name__ == '__main__':
print(longest_subarray_all_equal([1, 2, 3, 3, 3, 5, 78, 0]))
|
import pandas as pd
#data=pd.read_csv('D:/mother/round1_ijcai_18_train_20180301.txt',nrows=10000,delimiter=' ',header=0)
data=pd.read_csv('D:/mother/round1_ijcai_18_train_20180301.txt',delimiter=' ',header=0)
data.shape
aa=data[['is_trade']]
aa.iloc[:,0].value_counts()
data.isnull().any()
a1=data[['item_category_list']]
import numpy as np
a11=pd.DataFrame(np.zeros((len(a1),1)))
a12=pd.DataFrame(np.zeros((len(a1),1)))
a1=a1['item_category_list'].apply(lambda x:x.split(';'))
a11=a1.apply(lambda x :x[0])
a12=a1.apply(lambda x :x[1])
a11=pd.DataFrame(a11)
a12=pd.DataFrame(a12)
a12=a12['item_category_list'].apply(lambda x:int(x))
####令商品的category——list为a12
data[['item_category_list']]=a12
######广告商品类目表数值化
######广告商品属性列表数值化
b1=data[['item_property_list']]
b1=b1['item_property_list'].apply(lambda x:x.split(';'))
b1=b1.apply(lambda x:len(x))
data[['item_property_list']]=b1
###########上下文信息
####查询词预测类目属性表数值化
c1=data[['predict_category_property']]
c1=c1['predict_category_property'].apply(lambda x:x.split(';'))
c1=c1.apply(lambda x:len(x))
data[['predict_category_property']]=c1
#########时间戳时间格式化
import datetime
c2=data[['context_timestamp']]
c2=c2['context_timestamp'].apply(lambda x:datetime.datetime.fromtimestamp(x))
c3=c2.astype(str).apply(lambda x:x.split(' '))
data['day']=c3.apply(lambda x:x[0]).apply(lambda x:int(x[8:10]))
data['hour']=c3.apply(lambda x:x[1]).apply(lambda x:int(x[0:2]))
#c31=c3.apply(lambda x:(int(x[0])*10+int(x[1]))%24)
#
#c32=c3.apply(lambda x:(int(x[3])*10+int(x[4]))%60)
#
#
#def time_duan(x,y):
# if (x<=7 and y<=59):
# return 1
# elif (x>=8 and x<11 and y<=59):
# return 2
# elif (x>=11 and x<13 and y<=59):
# return 3
# elif (x>=13 and x<18 and y<=59):
# return 4
# else :
# return 5
#
#c33=np.zeros((len(c31),1))
#
#for i in range(len(c31)):
# c33[i]=time_duan(c31[i],c32[i])
del data['context_timestamp']
#data[['context_timestamp']]=c33
#######根据instance_id进行重复项的删除
data=data.drop_duplicates(['instance_id'])
data.shape
data.dtypes
###############挖掘其他隐含特征
u=data[['item_id']]
u.drop_duplicates(inplace=True)
###商品浏览总次数
u1=data[['item_id']]
u1['item_is_see']=1
u1=u1.groupby(['item_id']).agg('sum').reset_index()
item_feature=pd.merge(u,u1,on=['item_id'],how='left')
#####商品成交总次数
u2=data[['item_id','is_trade']]
u2=u2[(u2.is_trade==1)][['item_id']]
u2['item_is_trade']=1
u2=u2.groupby(['item_id']).agg('sum').reset_index()
item_feature=pd.merge(item_feature,u2,on=['item_id'],how='left')
######商品成交率
item_feature=item_feature.fillna(0)
item_feature['item_%%trade']=item_feature.item_is_trade/item_feature.item_is_see
#####商品不同品牌浏览总数
u1=data[['item_brand_id']]
u1['item_brand_see']=1
u1=u1.groupby(['item_brand_id']).agg('sum').reset_index()
######商品不同品牌成交次数
u2=data[(data.is_trade==1)][['item_brand_id']]
u2['item_brand_trade']=1
u2=u2.groupby(['item_brand_id']).agg('sum').reset_index()
######s商品不同同品成交率
item_brand_feature=pd.merge(u1,u2,on=['item_brand_id'],how='left')
item_brand_feature=item_brand_feature.fillna(0)
item_brand_feature['item_brand_%%trade']=item_brand_feature.item_brand_trade/item_brand_feature.item_brand_see
#####y用户浏览总次数
u1=data[['user_id']]
u1['user_id_see']=1
u1=u1.groupby('user_id').agg('sum').reset_index()
####用户成交次数
u2=data[(data.is_trade==1)][['user_id']]
u2['user_trade']=1
u2=u2.groupby('user_id').agg('sum').reset_index()
#####用户历史成交率
user_feature=pd.merge(u1,u2,on=['user_id'],how='left')
user_feature=user_feature.fillna(0)
user_feature['user_%%trade']=user_feature.user_trade/user_feature.user_id_see
####上下文page对应的浏览数和点击
u1=data[['context_page_id']]
u1['page_see']=1
u1=u1.groupby(['context_page_id']).agg('sum').reset_index()
u2=data[(data.is_trade==1)][['context_page_id']]
u2['page_trade']=1
u2=u2.groupby(['context_page_id']).agg('sum').reset_index()
page_feature=pd.merge(u1,u2,on=['context_page_id'],how='left')
page_feature=page_feature.fillna(0)
page_feature['page_%%trade']=page_feature.page_trade/page_feature.page_see
######店铺的浏览次数
#u1=data[['context_timestamp']]
#u1['context_timestamp_see']=1
#u1=u1.groupby('context_timestamp').agg('sum').reset_index()
u1=data[['shop_id']]
u1['shop_id_see']=1
u1=u1.groupby('shop_id').agg('sum').reset_index()
####店铺的成交次数
u2=data[(data.is_trade==1)][['shop_id']]
u2['shop_id_trade']=1
u2=u2.groupby('shop_id').agg('sum').reset_index()
####店铺的成交率
shop_feature=pd.merge(u1,u2,on=['shop_id'],how='left')
shop_feature=shop_feature.fillna(0)
shop_feature['shop_%%trade']=shop_feature.shop_id_trade/shop_feature.shop_id_see
#####用户和商品编号的之间的特征
u1=data[['user_id','item_id']]
u1['user_item_see']=1
u1=u1.groupby(['user_id','item_id']).agg('sum').reset_index()
u2=data[(data.is_trade==1)][['user_id','item_id']]
u2['user_item_trade']=1
u2=u2.groupby(['user_id','item_id']).agg('sum').reset_index()
user_item_feature=pd.merge(u1,u2,on=['user_id','item_id'],how='left')
user_item_feature=user_item_feature.fillna(0)
#########用户和商品 品牌之间的特征
u1=data[['user_id','item_brand_id']]
u1['user_item_brand_see']=1
u1=u1.groupby(['user_id','item_brand_id']).agg('sum').reset_index()
u2=data[(data.is_trade==1)][['user_id','item_brand_id']]
u2['user_item_brand_trade']=1
u2=u2.groupby(['user_id','item_brand_id']).agg('sum').reset_index()
user_brand_feature=pd.merge(u1,u2,on=['user_id','item_brand_id'],how='left')
user_brand_feature=user_brand_feature.fillna(0)
##########用户和上下文时间的特征
#u1=data[['user_id','context_timestamp']]
#u1['user_time_see']=1
#u1=u1.groupby(['user_id','context_timestamp']).agg('sum').reset_index()
#
#u2=data[(data.is_trade==1)][['user_id','context_timestamp']]
#u2['user_time_trade']=1
#u2=u2.groupby(['user_id','context_timestamp']).agg('sum').reset_index()
#
#user_time_feature=pd.merge(u1,u2,on=['user_id','context_timestamp'],how='left')
#user_time_feature=user_time_feature.fillna(0)
###########用户和店铺的特征
u1=data[['user_id','shop_id']]
u1['user_shop_see']=1
u1=u1.groupby(['user_id','shop_id']).agg('sum').reset_index()
u2=data[(data.is_trade==1)][['user_id','shop_id']]
u2['user_shop_trade']=1
u2=u2.groupby(['user_id','shop_id']).agg('sum').reset_index()
user_shop_feature=pd.merge(u1,u2,on=['user_id','shop_id'],how='left')
user_shop_feature=user_shop_feature.fillna(0)
######用户和上下文page之间的联系
u1=data[['user_id','context_page_id']]
u1['user_page_see']=1
u1=u1.groupby(['user_id','context_page_id']).agg('sum').reset_index()
u2=data[(data.is_trade==1)][['user_id','context_page_id']]
u2['user_page_trade']=1
u2=u2.groupby(['user_id','context_page_id']).agg('sum').reset_index()
user_page_feature=pd.merge(u1,u2,on=['user_id','context_page_id'],how='left')
user_page_feature=user_page_feature.fillna(0)
#############用户和店铺以及品牌的特征
u1=data[['user_id','item_brand_id','shop_id']]
u1['user_brand_shop_see']=1
u1=u1.groupby(['user_id','item_brand_id','shop_id']).agg('sum').reset_index()
u2=data[data.is_trade==1][['user_id','item_brand_id','shop_id']]
u2['user_brand_shop_trade']=1
u2=u2.groupby(['user_id','item_brand_id','shop_id']).agg('sum').reset_index()
user_brand_shop_feature=pd.merge(u1,u2,on=['user_id','item_brand_id','shop_id'],how='left')
user_brand_shop_feature=user_brand_shop_feature.fillna(0)
######用户和商品品牌以及商品页码的特征
u1=data[['user_id','item_brand_id','context_page_id']]
u1['user_brand_page_see']=1
u1=u1.groupby(['user_id','item_brand_id','context_page_id']).agg('sum').reset_index()
u2=data[(data.is_trade==1)][['user_id','item_brand_id','context_page_id']]
u2['user_brand_page_trade']=1
u2=u2.groupby(['user_id','item_brand_id','context_page_id']).agg('sum').reset_index()
user_brand_page_feature=pd.merge(u1,u2,on=['user_id','item_brand_id','context_page_id'],how='left')
user_brand_page_feature=user_brand_page_feature.fillna(0)
########商品品牌编号和上下文广告商品展示编号的特征
u1=data[['item_brand_id','context_page_id']]
u1['brand_page_see']=1
u1=u1.groupby(['item_brand_id','context_page_id']).agg('sum').reset_index()
u2=data[(data.is_trade==1)][['item_brand_id','context_page_id']]
u2['brand_page_trade']=1
u2=u2.groupby(['item_brand_id','context_page_id']).agg('sum').reset_index()
brand_page_feature=pd.merge(u1,u2,on=['item_brand_id','context_page_id'],how='left')
brand_page_feature=brand_page_feature.fillna(0)
#######上下文展示编号和店铺id间的特征
u1=data[['context_page_id','shop_id']]
u1['page_shop_see']=1
u1=u1.groupby(['context_page_id','shop_id']).agg('sum').reset_index()
u2=data[(data.is_trade==1)][['context_page_id','shop_id']]
u2['page_shop_trade']=1
u2=u2.groupby(['context_page_id','shop_id']).agg('sum').reset_index()
page_shop_feature=pd.merge(u1,u2,on=['context_page_id','shop_id'],how='left')
page_shop_feature=page_shop_feature.fillna(0)
###########合并另外提取的特征
new_feature_data=data.drop(['instance_id','context_id','is_trade'],axis=1,inplace=False)
new_feature_data=pd.merge(new_feature_data,item_feature,on='item_id',how='left')
new_feature_data=pd.merge(new_feature_data,item_brand_feature,on='item_brand_id',how='left')
new_feature_data=pd.merge(new_feature_data,user_feature,on='user_id',how='left')
new_feature_data=pd.merge(new_feature_data,page_feature,on='context_page_id',how='left')
new_feature_data=pd.merge(new_feature_data,shop_feature,on='shop_id',how='left')
new_feature_data=pd.merge(new_feature_data,user_item_feature,on=['user_id','item_id'],how='left')
new_feature_data=pd.merge(new_feature_data,user_brand_feature,on=['user_id','item_brand_id'],how='left')
#new_feature_data=pd.merge(new_feature_data,user_time_feature,on=['user_id','context_timestamp'],how='left')
new_feature_data=pd.merge(new_feature_data,user_shop_feature,on=['user_id','shop_id'],how='left')
new_feature_data=pd.merge(new_feature_data,user_page_feature,on=['user_id','context_page_id'],how='left')
new_feature_data=pd.merge(new_feature_data,user_brand_shop_feature,on=['user_id','item_brand_id','shop_id'],how='left')
new_feature_data=pd.merge(new_feature_data,user_brand_page_feature,on=['user_id','item_brand_id','context_page_id'],how='left')
new_feature_data=pd.merge(new_feature_data,brand_page_feature,on=['item_brand_id','context_page_id'],how='left')
new_feature_data=pd.merge(new_feature_data,page_shop_feature,on=['context_page_id','shop_id'],how='left')
new_feature_data.to_csv('new_feature_data.csv',index=None)
train=new_feature_data[(new_feature_data.day<24)]
test=new_feature_data[(new_feature_data.day==24)]
y_train=data[(data.day<24)][['is_trade']]
y_test=data[(data.day==24)][['is_trade']]
string2=new_feature_data.columns.values.tolist()
print(string2)
string3=['item_id','item_category_list','item_brand_id','item_city_id','user_id','user_gender_id','user_occupation_id','context_page_id','shop_id']
features = ['item_id', 'item_brand_id', 'item_city_id', 'item_price_level', 'item_sales_level',
'item_collected_level', 'item_pv_level', 'user_gender_id', 'user_occupation_id',
'user_age_level', 'user_star_level',
'context_page_id', 'hour', 'shop_id', 'shop_review_num_level', 'shop_star_level',
'shop_review_positive_rate', 'shop_score_service', 'shop_score_delivery', 'shop_score_description',
]
import lightgbm as lgb
from sklearn.metrics import log_loss
clf=lgb.LGBMClassifier(num_leaves=63,max_depth=7,n_estimators=80)
clf.fit(train,y_train,feature_name=string2,categorical_feature=string3)
y_pre=clf.predict_proba(test)[:,1]
print(log_loss(y_test,y_pre))
######划分需要进行编码的特征数据
dataset1=new_feature_data.loc[:,['item_id','item_category_list','item_brand_id','item_city_id','user_id',
'user_gender_id','user_occupation_id','context_timestamp','context_page_id','shop_id']]
#dataset2=new_feature_data.loc[:,['item_property_list','item_property_list','item_price_level','item_sales_level',
# 'item_collected_level','item_pv_level','user_age_level','user_star_level',
# 'predict_category_property','shop_review_num_level',
# 'shop_review_positive_rate','shop_star_level','shop_score_service','shop_score_delivery',
# 'shop_score_description']]
dataset2=new_feature_data.drop(['item_id','item_category_list','item_brand_id','item_city_id','user_id','user_gender_id','user_occupation_id','context_timestamp','context_page_id','shop_id'],axis=1,inplace=False)
label=data.loc[:,'is_trade']
###############lightGBM
#
#new_feature_data=new_feature_data.apply(lambda x:(x-np.min(x))/(np.max(x)-np.min(x)))
#X=data.drop(['instance_id','context_id','is_trade'],axis=1,inplace=False)
X=new_feature_data
y=data[['is_trade']]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
import json
import lightgbm as lgb
from sklearn.metrics import roc_curve, auc, roc_auc_score
print("load data")
#df_train=pd.read_csv(path+"regression.train",header=None,sep='\t')
#df_test=pd.read_csv(path+"regression.train",header=None,sep='\t')
#y_train = df_train[0].values
#y_test = df_test[0].values
#X_train = df_train.drop(0, axis=1).values
#X_test = df_test.drop(0, axis=1).values
df_train=X_train
#df_test=pd.read_csv(path+"regression.train",header=None,sep='\t')
y_train =y_train.iloc[:,0].values
y_test =y_test.iloc[:,0].values
X_train =np.array(X_train)
X_test = np.array(X_test)
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
#
#
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'logloss', 'auc'},
'num_leaves': 50,
'learning_rate': 0.02,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
#string2=['item_id','item_category_list','item_property_list','item_brand_id','item_city_id','item_price_level','item_sales_level','item_collected_level','item_pv_level','user_id','user_gender_id',
# 'user_age_level','user_occupation_id','user_star_level','context_timestamp','context_page_id','predict_category_property','shop_id','shop_review_num_level','shop_review_positive_rate','shop_star_level','shop_score_service','shop_score_delivery','shop_score_description']
string2=new_feature_data.columns.values.tolist()
print(string2)
string3=['item_id','item_category_list','item_brand_id','item_city_id','user_id','user_gender_id','user_occupation_id','context_timestamp','context_page_id','shop_id']
print('Start training...')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=3000,
feature_name=string2,
categorical_feature=string3,
valid_sets=lgb_eval,
early_stopping_rounds=10)
print('Save model...')
# save model to file
gbm.save_model('model.txt')
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print(y_pred)
print('The roc of prediction is:', roc_auc_score(y_test, y_pred) )
num_round = 300
lgb.cv(params, lgb_train, num_round, nfold=5,feature_name=string2,categorical_feature=string3,early_stopping_rounds=10)
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
from sklearn.metrics import log_loss
print(log_loss(y_test,y_pred))
import numpy as np
logloss=np.zeros((len(y_test),1))
p=y_pred
import math as math
for i in range(len(y_test)):
logloss[i]=y_test[i]*(math.log(10,p[i]))+(1-y_test[i])*(math.log(10,1-p[i]))
logloss=-1/len(y_test)*(np.sum(logloss))
print('losloss is that',logloss)
##########进行独热编码
t1=dataset1.astype(str)
data_t1=pd.get_dummies(t1)
###########可以验证一下id类型的种类个数
###item:3695 item_brand_id:1101 item_city_id:99 user_id:13573 user_gender_id:4
####user_occupation_id:5 context_timestamp:5 shop_id:2015 item_category_list:13
t=pd.DataFrame(dataset2.loc[:,'item_category_list'])
t['instance_id_count']=1
t=t.groupby('item_category_list').agg('sum').reset_index()
#########
###########将数值型特征进行归一化
dataset2_ave=dataset2.apply(lambda x:(x-np.min(x))/(np.max(x)-np.min(x)))
######将id类特征进行归一化
dataset1_ave=dataset1.apply(lambda x:(x-np.min(x))/(np.max(x)-np.min(x)))
######输入GBDT进行特征融合
data_new=data.drop(['context_id','is_trade'],axis=1,inplace=False)
data_new=data_new.apply(lambda x:(x-np.min(x))/(np.max(x)-np.min(x)))
X=data_new
y=label
#X=np.array(X)
#y=np.array(y)
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
# y_train,
# test_size=0.5)
#X_train_11=X_train[['item_id','item_category_list','item_brand_id','item_city_id','user_id',
# 'user_gender_id','user_occupation_id','context_timestamp','shop_id']]
#
#X_train_lr11=X_train[['item_id','item_category_list','item_brand_id','item_city_id','user_id',
# 'user_gender_id','user_occupation_id','context_timestamp','shop_id']]
grd_11 = GradientBoostingClassifier()
grd_enc_11 = OneHotEncoder()
grd_lm_11 = LogisticRegression()
grd_11.fit(X_train, y_train)
grd_enc_11.fit(grd_11.apply(X_train)[:, :, 0])
lr11=grd_enc_11.transform(grd_11.apply(X_train)[:, :, 0])
grd_lm_11.fit(lr11, y_train)
ltest11=grd_enc_11.transform(grd_11.apply(X_test)[:, :, 0])
y_pred_grd_lm = grd_lm_11.predict_proba(ltest11)[:, 1]
p=y_pred_grd_lm#############预测的概率
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
xgb_lr_auc = roc_auc_score(y_test, y_pred_grd_lm)
print('基于组合特征的LR AUC: %.5f' % xgb_lr_auc)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
logloss=np.zeros((len(y_test),1))
import math as math
for i in range(len(y_test)):
logloss[i]=y_test[i]*(math.log(10,p[i]))+(1-y_test[i])*(math.log(10,1-p[i]))
logloss=-1/len(y_test)*(np.sum(logloss))
print('losloss is that',logloss)
#####对id类和非id类分别建树
X_train_1=X_train[['item_id']]
X_train_2=X_train[['item_category_list']]
X_train_3=X_train[['item_brand_id']]
X_train_4=X_train[['item_city_id']]
X_train_5=X_train[['user_id']]
X_train_6=X_train[['user_gender_id']]
X_train_7=X_train[['user_occupation_id']]
X_train_8=X_train[['context_timestamp']]
X_train_9=X_train[['shop_id']]
X_train_10=X_train[['item_property_list','item_property_list','item_price_level','item_sales_level',
'item_collected_level','item_pv_level','user_age_level','user_star_level',
'context_page_id','predict_category_property','shop_review_num_level',
'shop_review_positive_rate','shop_star_level','shop_score_service','shop_score_delivery',
'shop_score_description']]
#X_train_lr1=X_train_lr[['item_id']]
#X_train_lr2=X_train_lr[['item_category_list']]
#X_train_lr3=X_train_lr[['item_brand_id']]
#X_train_lr4=X_train_lr[['item_city_id']]
#X_train_lr5=X_train_lr[['user_id']]
#X_train_lr6=X_train_lr[['user_gender_id']]
#X_train_lr7=X_train_lr[['user_occupation_id']]
#X_train_lr8=X_train_lr[['context_timestamp']]
#X_train_lr9=X_train_lr[['shop_id']]
#X_train_lr10=X_train_lr[['item_property_list','item_property_list','item_price_level','item_sales_level',
# 'item_collected_level','item_pv_level','user_age_level','user_star_level',
# 'context_page_id','predict_category_property','shop_review_num_level',
# 'shop_review_positive_rate','shop_star_level','shop_score_service','shop_score_delivery',
# 'shop_score_description']]
n_estimator =1
grd_1 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_1 = OneHotEncoder()
grd_lm_1 = LogisticRegression()
grd_1.fit(X_train_1, y_train)
grd_enc_1.fit(grd_1.apply(X_train_1)[:, :, 0])
lr1=grd_enc_1.transform(grd_1.apply(X_train_1)[:, :, 0])
grd_2 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_2 = OneHotEncoder()
grd_lm_2 = LogisticRegression()
grd_2.fit(X_train_2, y_train)
grd_enc_2.fit(grd_2.apply(X_train_2)[:, :, 0])
lr2=grd_enc_2.transform(grd_2.apply(X_train_2)[:, :, 0])
grd_3 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_3 = OneHotEncoder()
grd_lm_3 = LogisticRegression()
grd_3.fit(X_train_3, y_train)
grd_enc_3.fit(grd_3.apply(X_train_3)[:, :, 0])
lr3=grd_enc_3.transform(grd_3.apply(X_train_3)[:, :, 0])
grd_4 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_4 = OneHotEncoder()
grd_lm_4 = LogisticRegression()
grd_4.fit(X_train_4, y_train)
grd_enc_4.fit(grd_4.apply(X_train_4)[:, :, 0])
lr4=grd_enc_4.transform(grd_4.apply(X_train_4)[:, :, 0])
grd_5 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_5 = OneHotEncoder()
grd_lm_5 = LogisticRegression()
grd_5.fit(X_train_5, y_train)
grd_enc_5.fit(grd_5.apply(X_train_5)[:, :, 0])
lr5=grd_enc_5.transform(grd_5.apply(X_train_5)[:, :, 0])
grd_6 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_6 = OneHotEncoder()
grd_lm_6 = LogisticRegression()
grd_6.fit(X_train_6, y_train)
grd_enc_6.fit(grd_6.apply(X_train_6)[:, :, 0])
lr6=grd_enc_6.transform(grd_6.apply(X_train_6)[:, :, 0])
grd_7 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_7 = OneHotEncoder()
grd_lm_7 = LogisticRegression()
grd_7.fit(X_train_7, y_train)
grd_enc_7.fit(grd_7.apply(X_train_7)[:, :, 0])
lr7=grd_enc_7.transform(grd_7.apply(X_train_7)[:, :, 0])
grd_8 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_8 = OneHotEncoder()
grd_lm_8 = LogisticRegression()
grd_8.fit(X_train_8, y_train)
grd_enc_8.fit(grd_8.apply(X_train_8)[:, :, 0])
lr8=grd_enc_8.transform(grd_8.apply(X_train_8)[:, :, 0])
grd_9 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_9 = OneHotEncoder()
grd_lm_9 = LogisticRegression()
grd_9.fit(X_train_9, y_train)
grd_enc_9.fit(grd_9.apply(X_train_9)[:, :, 0])
lr9=grd_enc_9.transform(grd_9.apply(X_train_9)[:, :, 0])
grd_10 = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc_10 = OneHotEncoder()
grd_lm_10 = LogisticRegression()
grd_10.fit(X_train_10, y_train)
grd_enc_10.fit(grd_10.apply(X_train_10)[:, :, 0])
lr10=grd_enc_10.transform(grd_10.apply(X_train_10)[:, :, 0])
lr1=lr1.toarray()
lr2=lr2.toarray()
lr3=lr3.toarray()
lr4=lr4.toarray()
lr5=lr5.toarray()
lr6=lr6.toarray()
lr7=lr7.toarray()
lr8=lr8.toarray()
lr9=lr9.toarray()
lr10=lr10.toarray()
result=np.concatenate([lr1,lr2,lr3,lr4,lr5,lr6,lr7,lr8,lr9,lr10],axis=1)
y_train=np.array(y_train)
#######result为总的GBDT提取出来的特征向量
grd_lm=LogisticRegression()
grd_lm.fit(result, y_train)
##############测试集做同样处理
X_test_1=X_test[['item_id']]
X_test_2=X_test[['item_category_list']]
X_test_3=X_test[['item_brand_id']]
X_test_4=X_test[['item_city_id']]
X_test_5=X_test[['user_id']]
X_test_6=X_test[['user_gender_id']]
X_test_7=X_test[['user_occupation_id']]
X_test_8=X_test[['context_timestamp']]
X_test_9=X_test[['shop_id']]
X_test_10=X_test[['item_property_list','item_property_list','item_price_level','item_sales_level',
'item_collected_level','item_pv_level','user_age_level','user_star_level',
'context_page_id','predict_category_property','shop_review_num_level',
'shop_review_positive_rate','shop_star_level','shop_score_service','shop_score_delivery',
'shop_score_description']]
#
#grd_enc_9.fit(grd_9.apply(X_train_9)[:, :, 0])
#
#lr9=grd_enc_9.transform(grd_9.apply(X_train_lr9)[:, :, 0])
ltest1=grd_enc_1.transform(grd_1.apply(X_test_1)[:, :, 0])
ltest2=grd_enc_2.transform(grd_2.apply(X_test_2)[:, :, 0])
ltest3=grd_enc_3.transform(grd_3.apply(X_test_3)[:, :, 0])
ltest4=grd_enc_4.transform(grd_4.apply(X_test_4)[:, :, 0])
ltest5=grd_enc_5.transform(grd_5.apply(X_test_5)[:, :, 0])
ltest6=grd_enc_6.transform(grd_6.apply(X_test_6)[:, :, 0])
ltest7=grd_enc_7.transform(grd_7.apply(X_test_7)[:, :, 0])
ltest8=grd_enc_8.transform(grd_8.apply(X_test_8)[:, :, 0])
ltest9=grd_enc_9.transform(grd_9.apply(X_test_9)[:, :, 0])
ltest10=grd_enc_10.transform(grd_10.apply(X_test_10)[:, :, 0])
ltest1=ltest1.toarray()
ltest2=ltest2.toarray()
ltest3=ltest3.toarray()
ltest4=ltest4.toarray()
ltest5=ltest5.toarray()
ltest6=ltest6.toarray()
ltest7=ltest7.toarray()
ltest8=ltest8.toarray()
ltest9=ltest9.toarray()
ltest10=ltest10.toarray()
new_test=np.concatenate([ltest1,ltest2,ltest3,ltest4,ltest5,ltest6,ltest7,ltest8,ltest9,ltest10],axis=1)
y_test=np.array(y_test)
y_pred_grd_lm = grd_lm.predict_proba(new_test)[:, 1]
p=y_pred_grd_lm#############预测的概率
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
xgb_lr_auc = roc_auc_score(y_test, y_pred_grd_lm)
print('基于组合特征的LR AUC: %.5f' % xgb_lr_auc)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
logloss=np.zeros((len(y_test),1))
import math as math
for i in range(len(y_test)):
logloss[i]=y_test[i]*(math.log(10,p[i]))+(1-y_test[i])*(math.log(10,1-p[i]))
logloss=-1/len(y_test)*(np.sum(logloss))
print('losloss is that',logloss)
#
#
#x1=pd.DataFrame(dataset1.loc[:,['user_id', 'user_gender_id']])
#x1['count']=1
#x1=x1.groupby(['user_id','user_gender_id']).agg('sum').reset_index()
#
#
#t1=dataset1.loc[:,['item_id']]
#t1=t1.apply(lambda x:x.astype(str))
#t11=pd.get_dummies(t1)
#
#
##t1=np.array([[1],[2],[3],[5]])
##
##from sklearn import preprocessing
##enc=preprocessing.OneHotEncoder()
##enc.fit(t1)
##aa=enc.transform(t1)
##print(aa)
#
#t2=t1.loc[:,'item_id'].apply(lambda x :x<0)
#
#from numpy import argmax
#from sklearn.preprocessing import LabelEncoder
#from sklearn.preprocessing import OneHotEncoder
#
#d1=np.array(data[['item_id']])
#print(d1)
#
#label_encoder=LabelEncoder()
#integer_encoded=label_encoder.fit_transform(d1)
#
#
######广告商品的特征
#a13=t1
#t=a13
#t['instance_id_count']=1
#t=t.groupby('item_id').agg('sum').reset_index()
|
import numpy as np
import math
def normalize(n):
norm = (n[0] ** 2 + n[1] ** 2) ** 0.5
n[0] /= norm
n[1] /= norm
return n
def get_point_type(pt, mask):
try:
if mask[pt[0], pt[1]] == 1:
boundary_detector = mask[pt[0] - 1, pt[1] - 1] * mask[pt[0] - 1, pt[1]] * mask[pt[0] - 1, pt[1] + 1] * \
mask[pt[0], pt[1] - 1] * mask[pt[0], pt[1]] * mask[pt[0], pt[1] + 1] * \
mask[pt[0] + 1, pt[1] - 1] * mask[pt[0] + 1, pt[1]] * mask[pt[0] + 1, pt[1] + 1]
if boundary_detector == 0:
point_type = 2 # boundary point
else:
point_type = 1 # object point / inside point
else:
point_type = 0 # not object point / outside point
except:
point_type = 0
return point_type
def find_contact_seed_point(r_c, c_c, sin_theta, cos_theta, w, mask, search_resolution=4):
for i in range(int(math.log(w / search_resolution, 2)) + 1):
for j in range(int((2 ** i - 1) / 2) + 1):
for sign in range(2):
s = int((2 * sign - 1) * 2 ** (-i) * (2 * j + 1) * w)
dsr = s * sin_theta
dsc = s * cos_theta
rt = int(round(r_c + dsr))
ct = int(round(c_c + dsc))
point_type = get_point_type([rt, ct], mask)
if point_type == 1:
return s
# print('no contact seed :(')
return -1
def get_boundary_pixel_normal(boundary_pixel, mask, x, neighbor_vectors_row, neighbor_vectors_col):
r0 = boundary_pixel[0] - x
c0 = boundary_pixel[1] - x
window_weights = [[0, 0.314, 0.813, 1, 0.813, 0.314, 0],
[0.314, 1, 1, 1, 1, 1, 0.314],
[0.813, 1, 1, 1, 1, 1, 0.813],
[1, 1, 1, 1, 1, 1, 1],
[0.813, 1, 1, 1, 1, 1, 0.813],
[0.314, 1, 1, 1, 1, 1, 0.314],
[0, 0.314, 0.813, 1, 0.813, 0.314, 0]]
window_weights = np.array(window_weights) # change the window shape to a circle of radius x
window_mask = mask[r0:r0 + 2 * x + 1, c0:c0 + 2 * x + 1]
occupied_neighbor_vectors_row = neighbor_vectors_row * window_mask * window_weights
occupied_neighbor_vectors_col = neighbor_vectors_col * window_mask * window_weights
n_r = np.sum(occupied_neighbor_vectors_row)
n_c = np.sum(occupied_neighbor_vectors_col)
neighbor_vectors_sum = normalize([n_r, n_c])
normal = [-neighbor_vectors_sum[0], -neighbor_vectors_sum[1]]
return normal
def gpl_intersection_points(r0, c0, sin_theta, cos_theta, hn, w, mask, window_size):
# -------------------calculate normals----------------------------
x = int(window_size / 2)
v = np.arange(-x, x + 1)
neighbor_vectors_row = np.repeat(v.reshape((-1, 1)), window_size, axis=1)
neighbor_vectors_col = np.repeat(v.reshape((1, -1)), window_size, axis=0)
# ----------------------------------------------------------------------
intsec_points = np.ndarray((0, 7), dtype=np.float16)
dcr = hn * cos_theta
dcc = hn * sin_theta
r_c = r0 - dcr
c_c = c0 + dcc
# find seed point
s_found = find_contact_seed_point(r_c, c_c, sin_theta, cos_theta, w, mask, search_resolution=2)
if s_found != -1: # the grasp line intersects the object mask
l1 = -w
l2 = s_found
r1 = s_found
r2 = w
left_contact_found = False
right_contact_found = False
while not (left_contact_found and right_contact_found):
if not left_contact_found and l2 - l1 > 1:
lm = (l1 + l2) / 2
# calculate left test point
lrt = int(round(r_c + lm * sin_theta))
lct = int(round(c_c + lm * cos_theta))
point_type_l = get_point_type([lrt, lct], mask)
if point_type_l == 2:
normal1 = get_boundary_pixel_normal([lrt, lct], mask, x, neighbor_vectors_row, neighbor_vectors_col)
left_contact_point = [lrt, lct, 1, lm, hn, normal1[0], normal1[1]]
intsec_points = np.append(intsec_points, [left_contact_point], axis=0)
left_contact_found = True
elif point_type_l == 0:
l1 = lm
else:
l2 = lm
elif not left_contact_found and l2 - l1 <= 1:
left_contact_point = [-1, -1, 1, -2 * w - 1, hn, 0, 0] # left side collision
intsec_points = np.append(intsec_points, [left_contact_point], axis=0)
left_contact_found = True
if not right_contact_found and r2 - r1 > 1:
rm = (r1 + r2) / 2
# calculate right test point
rrt = int(round(r_c + rm * sin_theta))
rct = int(round(c_c + rm * cos_theta))
point_type_r = get_point_type([rrt, rct], mask)
if point_type_r == 2:
normal2 = get_boundary_pixel_normal([rrt, rct], mask, x, neighbor_vectors_row, neighbor_vectors_col)
right_contact_point = [rrt, rct, 2, rm, hn, normal2[0], normal2[1]]
intsec_points = np.append(intsec_points, [right_contact_point], axis=0)
right_contact_found = True
elif point_type_r == 0:
r2 = rm
else:
r1 = rm
elif not right_contact_found and r2 - r1 <= 1:
right_contact_point = [-1, -1, 2, 2 * w + 1, hn, 0, 0] # right side collision
intsec_points = np.append(intsec_points, [right_contact_point], axis=0)
right_contact_found = True
else: # the grasp line does not intersect the object mask
left_contact_point = [-1, -1, 1, -2 * w, hn, 0, 0] # no contact
intsec_points = np.append(intsec_points, [left_contact_point], axis=0)
right_contact_point = [-1, -1, 2, 2 * w, hn, 0, 0] # no contact
intsec_points = np.append(intsec_points, [right_contact_point], axis=0)
return intsec_points
def extract_contact_region(r0, c0, theta, h, w, mask):
left_contact_region = np.ndarray((0, 7), dtype=np.float16)
right_contact_region = np.ndarray((0, 7), dtype=np.float16)
sin_theta = np.sin(np.deg2rad(theta))
cos_theta = np.cos(np.deg2rad(theta))
for hn in range(-h, h + 1):
intersection_pts = gpl_intersection_points(r0, c0, sin_theta, cos_theta, hn, w, mask, 7)
if intersection_pts.shape[0] == 2:
if intersection_pts[0][2] == 1:
left_contact_region = np.append(left_contact_region, [intersection_pts[0]], axis=0)
right_contact_region = np.append(right_contact_region, [intersection_pts[1]], axis=0)
else:
left_contact_region = np.append(left_contact_region, [intersection_pts[1]], axis=0)
right_contact_region = np.append(right_contact_region, [intersection_pts[0]], axis=0)
return left_contact_region, right_contact_region
def rotation_angle(l_profile, r_profile, l_min, r_max, contact_threshold=1):
left_angle = 0
right_angle = 0
n = l_profile.size
sl1 = n
sl2 = -1
sr1 = n
sr2 = -1
for i in range(n):
if abs(l_profile[i] - l_min) < contact_threshold:
if i < sl1:
sl1 = i
if i > sl2:
sl2 = i
if abs(r_profile[i] - r_max) < contact_threshold:
if i < sr1:
sr1 = i
if i > sr2:
sr2 = i
# rotation angles-------------------------
if sl1 > sr2 and sl1 != n and sl2 != -1 and sr1 != n and sr2 != -1: # left up right down
rot_center = int((sl1 + sr2) / 2)
for i in range(rot_center):
dh = abs(l_profile[sl1] - l_profile[i])
ds = abs(i - sl1)
if ds == 0:
ang_i = 0
else:
tan_i = dh / ds
ang_i = np.float16(np.rad2deg(np.arctan(tan_i)))
if i == 0:
left_angle = ang_i
elif ang_i < left_angle:
left_angle = ang_i
for j in range(rot_center, n):
dh = abs(r_profile[sr2] - r_profile[j])
ds = abs(j - sr2)
if ds == 0:
ang_i = 0
else:
tan_i = dh / ds
ang_i = np.float16(np.rad2deg(np.arctan(tan_i)))
if j == rot_center:
right_angle = ang_i
elif ang_i < right_angle:
right_angle = ang_i
if sr1 > sl2 and sl1 != n and sr1 != n and sl2 != -1 and sr2 != -1:
rot_center = int((sr1 + sl2) / 2)
for i in range(rot_center, n):
dh = abs(l_profile[sl2] - l_profile[i])
ds = abs(i - sl2)
if ds == 0:
ang_i = 0
else:
tan_i = dh / ds
ang_i = np.float16(np.rad2deg(np.arctan(tan_i)))
if i == rot_center:
left_angle = ang_i
elif ang_i < left_angle:
left_angle = ang_i
for j in range(rot_center):
dh = abs(r_profile[sr1] - r_profile[j])
ds = abs(j - sr1)
if ds == 0:
ang_i = 0
else:
tan_i = dh / ds
ang_i = np.float16(np.rad2deg(np.arctan(tan_i)))
if j == 0:
right_angle = ang_i
elif ang_i < right_angle:
right_angle = ang_i
return min(left_angle, right_angle)
def slippage_angle(l_profile, r_profile, l_normals, r_normals, theta, l_min, r_max, contact_threshold=3):
l_contact_points_ids = np.ndarray((0,), dtype=np.int8)
r_contact_points_ids = np.ndarray((0,), dtype=np.int8)
left_slippage_angle, right_slippage_angle = 0, 0
sin_theta = np.sin(np.deg2rad(theta))
cos_theta = np.cos(np.deg2rad(theta))
grasp_direction = [sin_theta, cos_theta]
left_slippage_angles = np.ndarray((0,), dtype=np.float16)
right_slippage_angles = np.ndarray((0,), dtype=np.float16)
left_slippage_angles_sum1 = 0
left_slippage_angles_sum2 = 0
right_slippage_angles_sum1 = 0
right_slippage_angles_sum2 = 0
left_contact_count = 0
right_contact_count = 0
rot_m = [[cos_theta, -sin_theta],
[sin_theta, cos_theta]] # rotation from image coordinate system to gripper coordinate system
for i in range(l_profile.size):
if abs(l_profile[i] - l_min) < contact_threshold: # test contact points
l_normal_g = np.matmul(rot_m, l_normals[i])
dcl = np.float16(np.dot(grasp_direction, l_normals[i]))
left_slippage_angle_i = 180 - np.rad2deg(np.arccos(dcl))
left_slippage_angles_sum1 += left_slippage_angle_i
if l_normal_g[0] < 0:
left_slippage_angles = np.append(left_slippage_angles, [-left_slippage_angle_i], axis=0)
left_slippage_angles_sum2 -= left_slippage_angle_i
else:
left_slippage_angles = np.append(left_slippage_angles, [left_slippage_angle_i], axis=0)
left_slippage_angles_sum2 += left_slippage_angle_i
l_contact_points_ids = np.append(l_contact_points_ids, [i], axis=0)
left_contact_count += 1
if abs(r_profile[i] - r_max) < contact_threshold:
r_normal_g = np.matmul(rot_m, r_normals[i])
dcr = np.float16(np.dot(grasp_direction, r_normals[i]))
right_slippage_angle_i = np.rad2deg(np.arccos(dcr))
right_slippage_angles_sum1 += right_slippage_angle_i
if r_normal_g[0] < 0:
right_slippage_angles = np.append(right_slippage_angles, [-right_slippage_angle_i], axis=0)
right_slippage_angles_sum2 -= right_slippage_angle_i
else:
right_slippage_angles = np.append(right_slippage_angles, [right_slippage_angle_i], axis=0)
right_slippage_angles_sum2 += right_slippage_angle_i
r_contact_points_ids = np.append(r_contact_points_ids, [i], axis=0)
right_contact_count += 1
if abs(left_slippage_angles_sum1) == abs(left_slippage_angles_sum2) and left_contact_count != 0:
left_slippage_angle = left_slippage_angles_sum1 / left_contact_count
if abs(right_slippage_angles_sum1) == abs(right_slippage_angles_sum2) and right_contact_count != 0:
right_slippage_angle = right_slippage_angles_sum1 / right_contact_count
return left_slippage_angle, right_slippage_angle, l_contact_points_ids, r_contact_points_ids
def contact_center_offset(l_contacts, r_contacts, gipper_hh):
if l_contacts.size != 0:
# print('left contact ids', np.amax(l_contacts), np.amin(l_contacts))
lc_off = (np.amax(l_contacts) + np.amin(l_contacts)) / 2 - gipper_hh
else:
lc_off = gipper_hh
if r_contacts.size != 0:
rc_off = (np.amax(r_contacts) + np.amin(r_contacts)) / 2 - gipper_hh
# print('right contact ids', np.amax(r_contacts), np.amin(r_contacts))
else:
rc_off = gipper_hh
return abs((lc_off + rc_off) / 2)
def high_level_grasp_feature(left_contact_region, right_contact_region, theta, h, w):
l_profile = left_contact_region[:, 3]
r_profile = right_contact_region[:, 3]
l_normals = left_contact_region[:, 5:]
r_normals = right_contact_region[:, 5:]
l_min = 2 * w + 2
r_max = -2 * w + 2
collision = False
translation = 200.0
rot_ang = 180.0
l_slip_ang = 180.0
r_slip_ang = 180.0
gripper_offset = h
lcids = np.ndarray((0,), dtype=np.int8)
rcids = np.ndarray((0,), dtype=np.int8)
# -------find primary contact point and check for collision
for pt in l_profile:
if pt == -2 * w - 1:
collision = True
break
elif -w <= pt < l_min:
l_min = pt
for pt in r_profile:
if pt == 2 * w + 1:
collision = True
break
elif w >= pt > r_max:
r_max = pt
# ---------------------------------------------------------
if not collision:
if np.amax(l_profile) == -2 * w:
# print('no object detected$$$$$$$$$$$$$$')
translation = -1
else:
# --------------------------translation
translation = abs((l_min + r_max) / 2.0)
# --------------------------rotation
rot_ang = rotation_angle(l_profile, r_profile, l_min, r_max)
l_slip_ang, r_slip_ang, lcids, rcids = slippage_angle(l_profile, r_profile, l_normals, r_normals, theta,
l_min, r_max, 5)
gripper_offset = contact_center_offset(lcids, rcids, h)
return collision, translation, rot_ang, [l_slip_ang, r_slip_ang], lcids, rcids, gripper_offset
def linearly_normalized_score(feature, n, kernel_profile, feature_uncertainty=-1):
score = 0.0
score_uncertainty = 0.0
for i in range(n):
xi = kernel_profile[2 * (i + 1)]
if feature <= xi:
x0 = kernel_profile[2 * i]
y0 = kernel_profile[2 * i + 1]
yi = kernel_profile[2 * (i + 1) + 1]
k = (yi - y0) / (xi - x0)
score = k * feature + y0 - k * x0
score_uncertainty = abs(k) * feature_uncertainty
break
if feature_uncertainty == -1:
return score
else:
return [score, score_uncertainty]
def combine_score_v2(scores):
scores = np.array(scores).reshape((1, -1))
s_min = np.amin(scores)
# print(scores)
s_min_str = list(str(format(s_min, 'f')))
# print(s_min_str)
while len(s_min_str) < 3:
s_min_str.append('0')
x_str = '0.0'
for ci in range(len(s_min_str)):
if s_min_str[ci] != '0' and s_min_str[ci] != '.':
x_str = ''.join(s_min_str[:ci + 1])
break
x = float(x_str)
dx_str = list(x_str)
dx_str[-1] = '1'
dx = float(''.join(dx_str))
ds_sum = 0
for score in scores[0, :]:
ds_sum += score - x
p = dx * ds_sum / (3 * (1 - x) + dx)
final_score = x + p
# print(final_score)
return final_score
def grasp_quality_score_v2(collision, translation, rot, slip, contact_offset, gripper_hh, gripper_hw):
# if collision:
# score = 0.0
# elif translation == -1: # no object detected
# score = -1.0
# else:
# slip_ang = (abs(slip[0]) + abs(slip[1])) / 2
# # contact_offset = contact_offsets[0] + contact_offsets[1]
# translation = translation / gripper_hw
# s1 = linearly_normalized_score(translation, 1, [0.0, 1.0, 1.0, 0.0])
# s2 = linearly_normalized_score(rot, 1, [0.0, 1.0, 60.0, 0.0])
# s3 = linearly_normalized_score(slip_ang, 1, [0.0, 1.0, 60.0, 0.0])
# s4 = linearly_normalized_score(contact_offset, 2, [0.0, 1.0, 0.5 * gripper_hh, 0.7, gripper_hh, 0.0])
# score = combine_score_v2([s1, s2, s3, s4])
# return score
if collision:
score = 0.0
elif translation == -1: # no object detected
score = -1.0
else:
slip_ang = (abs(slip[0]) + abs(slip[1])) / 2
# contact_offset = contact_offsets[0] + contact_offsets[1]
s1 = linearly_normalized_score(translation, 1, [0.0, 1.0, 100.0, 0.0])
# translation = translation/gripper_hw
# s1 = linearly_normalized_score(translation, 2, [0.0, 1.0, 0.5, 0.6, 1.0, 0.0])
# s1 = linearly_normalized_score(translation, 1, [0.0, 1.0, 1.0, 0.0])
s2 = linearly_normalized_score(rot, 1, [0.0, 1.0, 60.0, 0.0])
s3 = linearly_normalized_score(slip_ang, 1, [0.0, 1.0, 60.0, 0.0])
s4 = linearly_normalized_score(contact_offset, 2, [0.0, 1.0, 0.5 * gripper_hh, 0.7, gripper_hh, 0.0])
score = combine_score_v2([s1, s2, s3, s4])
return score
def grasp_quality_score_v3(collision, translation, rot, slip, contact_offset, gripper_hh, gripper_hw):
if collision:
score = 0.0
s1, s2, s3, s4 = (0.0, 0.0, 0.0, 0.0)
elif translation == -1: # no object detected
score = -1.0
s1, s2, s3, s4 = (0.0, 0.0, 0.0, 0.0)
else:
slip_ang = (abs(slip[0]) + abs(slip[1])) / 2
# contact_offset = contact_offsets[0] + contact_offsets[1]
translation = translation / gripper_hw
s1 = linearly_normalized_score(translation, 1, [0.0, 1.0, 1.0, 0.0])
# s1 = linearly_normalized_score(translation, 1, [0.0, 1.0, 100.0, 0.0])
s2 = linearly_normalized_score(rot, 1, [0.0, 1.0, 60.0, 0.0])
s3 = linearly_normalized_score(slip_ang, 1, [0.0, 1.0, 60.0, 0.0])
s4 = linearly_normalized_score(contact_offset, 2, [0.0, 1.0, 0.5 * gripper_hh, 0.7, gripper_hh, 0.0])
# combine scores as: final score = 0.9*min_score+0.1*average of other scores
s_ = np.array([s1, s2, s3, s4])
s_min = np.amin(s_)
score = 0.9 * s_min + 0.1 / 3 * (np.sum(s_)-s_min)
return score, [s1, s2, s3, s4]
def evaluate_grasp(grasp, mask):
# grasp=[row,col,angle,hh,hw]
# st_contact = time.time()
contact_rl, contact_rr = extract_contact_region(grasp[0], grasp[1], grasp[2], grasp[3], grasp[4], mask)
# et_contact = time.time()
cli, trans, rotation, slippage, lcids, rcids, offs = high_level_grasp_feature(contact_rl, contact_rr, grasp[2],
grasp[3], grasp[4])
# et_feature = time.time()
score, features = grasp_quality_score_v3(cli, trans, rotation, slippage, offs, grasp[3], grasp[4])
# et_score = time.time()
# print(f'Finding contact points used: {et_contact-st_contact:.3f} seconds')
# print(f'Finding quality features used: {et_feature - et_contact:.3f} seconds')
# print(f'Finding quality score used: {et_score - et_feature:.3f} seconds')
return score, features
def evaluate_center_of_mass(mask):
row, col = mask.shape
point_sum = np.array([0, 0])
point_num = 0
for i in range(row):
for j in range(col):
if mask[i, j] == 1:
point_sum = np.add(point_sum, [i, j])
point_num += 1
com_float = point_sum / point_num
com_r_int = np.int(np.round(com_float[0]))
com_c_int = np.int(np.round(com_float[1]))
return [com_r_int, com_c_int]
|
# Generated by Django 2.0.7 on 2018-07-26 08:21
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_auto_20180725_1358'),
]
operations = [
migrations.AddField(
model_name='order',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='imp_uid',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='order',
name='merchant_uid',
field=models.UUIDField(default=uuid.uuid4, editable=False),
),
migrations.AddField(
model_name='order',
name='status',
field=models.CharField(choices=[('ready', '미결제'), ('paid', '결제완료'), ('cancelled', '결제취소'), ('failed', '결제실패')], db_index=True, default='ready', max_length=9),
),
migrations.AddField(
model_name='order',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
import math
import sys
from pandas import read_csv
import matplotlib.pyplot as plt
# colorblind-friendly colors from the IBM Design Library
# https://davidmathlogic.com/colorblind/#%23648FFF-%23785EF0-%23DC267F-%23FE6100-%23FFB000
ibm_blue = '#648FFFaa'
ibm_violet = '#785EF0'
ibm_red = '#DC267F'
ibm_orange = '#FE6100'
ibm_yellow = '#FFB000'
def plot_motion_plan(filename):
df_path = read_csv('data/' + filename + '/path.csv')
# draw base graph
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# draw solution path
ax.plot(df_path['x'].to_numpy(), df_path['y'].to_numpy(), df_path['time'].to_numpy(),
'D-', linewidth=2, color=ibm_red, zorder=8000, label='solution')
# draw yaw lines
# for i in range(len(df_path.index)):
# x = df_path.iloc[i, 0]
# y = df_path.iloc[i, 1]
# yaw = df_path.iloc[i, 2]
# t = df_path.iloc[i, 3]
# newx = x + math.cos()
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('t')
plt.show()
# Call with csv file to plot as command line argument
if __name__ == '__main__':
filename = sys.argv[1]
plot_motion_plan(filename) |
#from django.test import LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import sys
import unittest
class NewVisitorTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
for arg in sys.argv:
if 'liveserver' in arg:
cls.server_url = 'http://' + arg.split('=')[1]
return
super().setUpClass()
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if cls.server_url == cls.live_server_url:
super().tearDownClass()
def setUp(self):
self.browser = webdriver.Chrome()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.refresh()
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
self.browser.get(self.server_url)
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('작업', header_text)
# 그녀는 바로 작업을 추가하기로 한다
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'작업 아이템 입력'
)
# "공작깃털 사기" 라고 텍스트 상자에 입력한다
# (에디스의 취미는 날치 잡이용 그물을 만드는 것이다)
inputbox.send_keys('공작깃털 사기')
# 엔터키를 치면 페이지가 갱신되고 작업 목록에
# "1: 공작깃털 사기" 아이템이 추가된다
inputbox.send_keys(Keys.ENTER)
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
self.check_for_row_in_list_table('1: 공작깃털 사기')
# 추가 아이템을 입력할 수 있는 여분의 텍스트 상자가 존재한다
# 다시 "공작깃털을 이용해서 그물 만들기"라고 입력한다
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('공작깃털을 이용해서 그물 만들기')
inputbox.send_keys(Keys.ENTER)
# 페이지는 다시 갱신되고, 두 개 아이템이 목록에 보인다
self.check_for_row_in_list_table('2: 공작깃털을 이용해서 그물 만들기')
self.check_for_row_in_list_table('1: 공작깃털 사기')
#######################################
# 새로운 사용자인 프란시스가 사이트에 접속한다
# 새로운 브라우저 세션을 이용하여 에디스의 정보가 쿠키를 통해 유입되는것을 방지함
self.browser.quit()
self.browser = webdriver.Chrome()
#self.browser.implicitly_wait( 3 )
# 프란시스가 홈페이지에 접속한다. 에디스의 리스트는 안보인다
self.browser.get(self.server_url)
#page_text = self.browser.find_element_by_tag_name('table').text
#self.assertIn('공작깃털', page_text)
#self.browser.find_element_by_tag_name('table')
# 프란시스가 새로운 작업 아이템을 입력한다
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('우유 사기')
inputbox.send_keys(Keys.ENTER)
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/(\d+)')
self.assertNotEqual(francis_list_url, edith_list_url)
# 에디스가 입력한 흔적이 없다는 것을 다시 확인한다
self.assertNotIn('공작깃털', self.browser.find_element_by_tag_name('table').text)
# 둘 다 만족하고 잠자리에 든다
self.fail('Finish the test!')
def check_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
def test_layout_and_style(self):
self.browser.get(self.server_url)
self.browser.set_window_size(1024,768)
inputbox = self.browser.find_element_by_tag_name('input')
self.assertAlmostEqual(
inputbox.location['x'] + inputbox.size['width'] /2, 512, delta=10)
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
"""
Helper classes to convert iTunes library XML file to SQLite database to use in Flask app
Usage:
db = SongDb('itunes/library/file.xml', echo=False)
db.create_db()
db.populate_db()
"""
import shutil
import sys
import xml.etree.cElementTree as ET
from peewee import DoesNotExist
from pyItunes import Library
from jukebox.models import Album, Artist, create_db, Song
ITUNES_FILE = 'itunes_library.xml'
ITUNES_FILE_BAK = 'itunes_library.xml.bak'
class SongDb():
def __init__(self, lib_file=ITUNES_FILE, echo=True):
self.library_file = lib_file
def populate_db(self):
print('Backing up', ITUNES_FILE)
shutil.copyfile(ITUNES_FILE, ITUNES_FILE_BAK)
print('Parsing iTunes file...')
library = Library(ITUNES_FILE)
print('Done, library contains %s songs.' % len(library.songs))
print('Populating db...')
for key, s in library.songs.items():
try:
if not s.location or s.location.endswith('.ipa'):
continue # Don't include apps
try:
artist, _ = Artist.get_or_create(name=s.artist.strip())
except Exception as e:
#print("Error getting artist:", e, "Song:", s.name)
artist = None
try:
album, _ = Album.get_or_create(title=s.album.strip(), artist=artist)
except Exception as e:
#print("Error getting album:", e, "Song:", s.name)
album = None
new_song = Song.create(title=s.name,
location=s.location,
track_id=key,
track_number=s.track_number,
length=s.length,
artist=artist,
album=album)
#print('Adding', new_song.title)
#new_song.save()
except Exception as e:
print("Error saving song:", e)
# raise
print('Done')
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'update':
database = SongDb()
database.populate_db()
elif len(sys.argv) == 2 and sys.argv[1] == 'init':
create_db()
database = SongDb()
database.populate_db()
|
import csv
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
fp = open("epkk-epwa.csv" , "rt")
try:
reader = csv.reader(fp)
for row in reader:
print(row)
finally:
fp.close()
"""
fp = open("epkk-epwa.csv" , "rt")
l = fp.readline()
print(l.split(','))
"""
categories_guard=True
fp = open("epkk-epwa.csv" , "rt")
reader = csv.reader(fp)
for row in reader:
if(categories_guard):
categories = row
categories_guard=False
else:
for i in range(len(categories)):
print(str(categories[i]) + " : " + str(row[i]))
fp.close()
for i in categories:
print(i)
|
# coding: utf-8
# In[1]:
decimal = int(input("digite um numero decimal:"))
print(bin(decimal))
|
#!/usr/bin/env python
#
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the OpenTimelineIO project
import unittest
import os
import pkg_resources
import sys
try:
# Python 3.3 forward includes the mock module
from unittest import mock
could_import_mock = True
except ImportError:
# Fallback for older python (not included in standard library)
try:
import mock
could_import_mock = True
except ImportError:
# Mock appears to not be installed
could_import_mock = False
try:
# Python3: use importlib.reload
from importlib import reload as import_reload
except ImportError:
# Python2:
from imp import reload as import_reload
import opentimelineio as otio
from tests import baseline_reader
@unittest.skipIf(
not could_import_mock,
"mock module not found. Install mock from pypi or use python >= 3.3."
)
class TestSetuptoolsPlugin(unittest.TestCase):
def setUp(self):
# Get the location of the mock plugin module metadata
mock_module_path = os.path.join(
baseline_reader.path_to_baseline_directory(),
'plugin_module',
)
self.mock_module_manifest_path = os.path.join(
mock_module_path,
"otio_jsonplugin",
"plugin_manifest.json"
)
# Create a WorkingSet as if the module were installed
entries = [mock_module_path] + pkg_resources.working_set.entries
self.sys_patch = mock.patch('sys.path', entries)
self.sys_patch.start()
working_set = pkg_resources.WorkingSet(entries)
# linker from the entry point
self.entry_patcher = mock.patch(
'pkg_resources.iter_entry_points',
working_set.iter_entry_points
)
self.entry_patcher.start()
def tearDown(self):
self.sys_patch.stop()
self.entry_patcher.stop()
if 'otio_mockplugin' in sys.modules:
del(sys.modules['otio_mockplugin'])
def test_detect_plugin(self):
"""This manifest uses the plugin_manifest function"""
# Create a manifest and ensure it detected the mock adapter and linker
man = otio.plugins.manifest.load_manifest()
# Make sure the adapter is included in the adapter list
adapter_names = [adapter.name for adapter in man.adapters]
self.assertIn('mock_adapter', adapter_names)
# Make sure the linker is included in the linker list
linker_names = [linker.name for linker in man.media_linkers]
self.assertIn('mock_linker', linker_names)
# Make sure adapters and linkers landed in the proper place
for adapter in man.adapters:
self.assertIsInstance(adapter, otio.adapters.Adapter)
for linker in man.media_linkers:
self.assertIsInstance(linker, otio.media_linker.MediaLinker)
def test_pkg_resources_disabled(self):
os.environ["OTIO_DISABLE_PKG_RESOURCE_PLUGINS"] = "1"
import_reload(otio.plugins.manifest)
# detection of the environment variable happens on import, force a
# reload to ensure that it is triggered
with self.assertRaises(AssertionError):
self.test_detect_plugin()
# remove the environment variable and reload again for usage in the
# other tests
del os.environ["OTIO_DISABLE_PKG_RESOURCE_PLUGINS"]
import_reload(otio.plugins.manifest)
def test_detect_plugin_json_manifest(self):
# Test detecting a plugin that rather than exposing the plugin_manifest
# function, just simply has a plugin_manifest.json provided at the
# package top level.
man = otio.plugins.manifest.load_manifest()
# Make sure the adapter is included in the adapter list
adapter_names = [adapter.name for adapter in man.adapters]
self.assertIn('mock_adapter_json', adapter_names)
# Make sure the linker is included in the linker list
linker_names = [linker.name for linker in man.media_linkers]
self.assertIn('mock_linker_json', linker_names)
# Make sure adapters and linkers landed in the proper place
for adapter in man.adapters:
self.assertIsInstance(adapter, otio.adapters.Adapter)
for linker in man.media_linkers:
self.assertIsInstance(linker, otio.media_linker.MediaLinker)
self.assertTrue(
any(
True for p in man.source_files
if self.mock_module_manifest_path in p
)
)
def test_deduplicate_env_variable_paths(self):
"Ensure that duplicate entries in the environment variable are ignored"
# back up existing manifest
bak_env = os.environ.get('OTIO_PLUGIN_MANIFEST_PATH')
relative_path = self.mock_module_manifest_path.replace(os.getcwd(), '.')
# set where to find the new manifest
os.environ['OTIO_PLUGIN_MANIFEST_PATH'] = os.pathsep.join(
(
# absolute
self.mock_module_manifest_path,
# relative
relative_path
)
)
result = otio.plugins.manifest.load_manifest()
self.assertEqual(
len(
[
p for p in result.source_files
if self.mock_module_manifest_path in p
]
),
1
)
if relative_path != self.mock_module_manifest_path:
self.assertNotIn(relative_path, result.source_files)
if bak_env:
os.environ['OTIO_PLUGIN_MANIFEST_PATH'] = bak_env
else:
del os.environ['OTIO_PLUGIN_MANIFEST_PATH']
if __name__ == '__main__':
unittest.main()
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import signal
import subprocess
import sys
import tempfile
import py_utils
from devil.android import device_temp_file
from devil.android.perf import perf_control
from profile_chrome import ui
from systrace import trace_result
from systrace import tracing_agents
_CATAPULT_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..')
sys.path.append(os.path.join(_CATAPULT_DIR, 'telemetry'))
try:
# pylint: disable=F0401,no-name-in-module,wrong-import-position
from telemetry.internal.platform.profiler import android_profiling_helper
from telemetry.internal.util import binary_manager
except ImportError:
android_profiling_helper = None
binary_manager = None
_PERF_OPTIONS = [
# Sample across all processes and CPUs to so that the current CPU gets
# recorded to each sample.
'--all-cpus',
# In perf 3.13 --call-graph requires an argument, so use the -g short-hand
# which does not.
'-g',
# Increase priority to avoid dropping samples. Requires root.
'--realtime', '80',
# Record raw samples to get CPU information.
'--raw-samples',
# Increase sampling frequency for better coverage.
'--freq', '2000',
]
class _PerfProfiler(object):
def __init__(self, device, perf_binary, categories):
self._device = device
self._output_file = device_temp_file.DeviceTempFile(
self._device.adb, prefix='perf_output')
self._log_file = tempfile.TemporaryFile()
# TODO(jbudorick) Look at providing a way to unhandroll this once the
# adb rewrite has fully landed.
device_param = (['-s', str(self._device)] if str(self._device) else [])
cmd = ['adb'] + device_param + \
['shell', perf_binary, 'record',
'--output', self._output_file.name] + _PERF_OPTIONS
if categories:
cmd += ['--event', ','.join(categories)]
self._perf_control = perf_control.PerfControl(self._device)
self._perf_control.SetPerfProfilingMode()
self._perf_process = subprocess.Popen(cmd,
stdout=self._log_file,
stderr=subprocess.STDOUT)
def SignalAndWait(self):
self._device.KillAll('perf', signum=signal.SIGINT)
self._perf_process.wait()
self._perf_control.SetDefaultPerfMode()
def _FailWithLog(self, msg):
self._log_file.seek(0)
log = self._log_file.read()
raise RuntimeError('%s. Log output:\n%s' % (msg, log))
def PullResult(self, output_path):
if not self._device.FileExists(self._output_file.name):
self._FailWithLog('Perf recorded no data')
perf_profile = os.path.join(output_path,
os.path.basename(self._output_file.name))
self._device.PullFile(self._output_file.name, perf_profile)
if not os.stat(perf_profile).st_size:
os.remove(perf_profile)
self._FailWithLog('Perf recorded a zero-sized file')
self._log_file.close()
self._output_file.close()
return perf_profile
class PerfProfilerAgent(tracing_agents.TracingAgent):
def __init__(self, device):
tracing_agents.TracingAgent.__init__(self)
self._device = device
self._perf_binary = self._PrepareDevice(device)
self._perf_instance = None
self._categories = None
def __repr__(self):
return 'perf profile'
@staticmethod
def IsSupported():
return bool(android_profiling_helper)
@staticmethod
def _PrepareDevice(device):
if not 'BUILDTYPE' in os.environ:
os.environ['BUILDTYPE'] = 'Release'
if binary_manager.NeedsInit():
binary_manager.InitDependencyManager(None)
return android_profiling_helper.PrepareDeviceForPerf(device)
@classmethod
def GetCategories(cls, device):
perf_binary = cls._PrepareDevice(device)
# Perf binary returns non-zero exit status on "list" command.
return device.RunShellCommand([perf_binary, 'list'], check_return=False)
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
self._categories = _ComputePerfCategories(config)
self._perf_instance = _PerfProfiler(self._device,
self._perf_binary,
self._categories)
return True
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
if not self._perf_instance:
return
self._perf_instance.SignalAndWait()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
with open(self._PullTrace(), 'r') as f:
trace_data = f.read()
return trace_result.TraceResult('perf', trace_data)
@staticmethod
def _GetInteractivePerfCommand(perfhost_path, perf_profile, symfs_dir,
required_libs, kallsyms):
cmd = '%s report -n -i %s --symfs %s --kallsyms %s' % (
os.path.relpath(perfhost_path, '.'), perf_profile, symfs_dir, kallsyms)
for lib in required_libs:
lib = os.path.join(symfs_dir, lib[1:])
if not os.path.exists(lib):
continue
objdump_path = android_profiling_helper.GetToolchainBinaryPath(
lib, 'objdump')
if objdump_path:
cmd += ' --objdump %s' % os.path.relpath(objdump_path, '.')
break
return cmd
def _PullTrace(self):
symfs_dir = os.path.join(tempfile.gettempdir(),
os.path.expandvars('$USER-perf-symfs'))
if not os.path.exists(symfs_dir):
os.makedirs(symfs_dir)
required_libs = set()
# Download the recorded perf profile.
perf_profile = self._perf_instance.PullResult(symfs_dir)
required_libs = \
android_profiling_helper.GetRequiredLibrariesForPerfProfile(
perf_profile)
if not required_libs:
logging.warning('No libraries required by perf trace. Most likely there '
'are no samples in the trace.')
# Build a symfs with all the necessary libraries.
kallsyms = android_profiling_helper.CreateSymFs(self._device,
symfs_dir,
required_libs,
use_symlinks=False)
perfhost_path = binary_manager.FetchPath(
android_profiling_helper.GetPerfhostName(), 'linux', 'x86_64')
ui.PrintMessage('\nNote: to view the profile in perf, run:')
ui.PrintMessage(' ' + self._GetInteractivePerfCommand(perfhost_path,
perf_profile, symfs_dir, required_libs, kallsyms))
# Convert the perf profile into JSON.
perf_script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'third_party', 'perf_to_tracing.py')
json_file_name = os.path.basename(perf_profile)
with open(os.devnull, 'w') as dev_null, \
open(json_file_name, 'w') as json_file:
cmd = [perfhost_path, 'script', '-s', perf_script_path, '-i',
perf_profile, '--symfs', symfs_dir, '--kallsyms', kallsyms]
if subprocess.call(cmd, stdout=json_file, stderr=dev_null):
logging.warning('Perf data to JSON conversion failed. The result will '
'not contain any perf samples. You can still view the '
'perf data manually as shown above.')
return None
return json_file_name
def SupportsExplicitClockSync(self):
return False
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
# pylint: disable=unused-argument
assert self.SupportsExplicitClockSync(), ('Clock sync marker cannot be '
'recorded since explicit clock sync is not supported.')
def _OptionalValueCallback(default_value):
def callback(option, _, __, parser): # pylint: disable=unused-argument
value = default_value
if parser.rargs and not parser.rargs[0].startswith('-'):
value = parser.rargs.pop(0)
setattr(parser.values, option.dest, value)
return callback
class PerfConfig(tracing_agents.TracingConfig):
def __init__(self, perf_categories, device):
tracing_agents.TracingConfig.__init__(self)
self.perf_categories = perf_categories
self.device = device
def try_create_agent(config):
if config.perf_categories:
return PerfProfilerAgent(config.device)
return None
def add_options(parser):
options = optparse.OptionGroup(parser, 'Perf profiling options')
options.add_option('-p', '--perf', help='Capture a perf profile with '
'the chosen comma-delimited event categories. '
'Samples CPU cycles by default. Use "list" to see '
'the available sample types.', action='callback',
default='', callback=_OptionalValueCallback('cycles'),
metavar='PERF_CATEGORIES', dest='perf_categories')
return options
def get_config(options):
return PerfConfig(options.perf_categories, options.device)
def _ComputePerfCategories(config):
if not PerfProfilerAgent.IsSupported():
return []
if not config.perf_categories:
return []
return config.perf_categories.split(',')
|
from typing import List, Optional
from models.elastic import ESFilterGenre, ESQuery
from models.film import SFilm
from models.interface import AbstractDataStore, AbstractMovie
class Movie(AbstractMovie):
def __init__(self, datastore: AbstractDataStore) -> None:
self.datastore = datastore
def set_movie_index(self, movieindex: str) -> None:
self.movieindex = movieindex
async def get_film_by_id(self, film_id: str) -> Optional[SFilm]:
movie = await self.datastore.get_by_id(self.movieindex, film_id)
if movie:
return SFilm(**movie)
async def get_all_film(
self,
sort: str,
page_size: int, page_number: int, genre_filter: str
) -> Optional[List[SFilm]]:
if genre_filter is not None:
genre_filter = ESFilterGenre(query={'term': {'genre': {'value': genre_filter}}}).json()
movies = await self.datastore.search(
self.movieindex,
page_size=page_size, page_number=page_number,
sort=sort, body=genre_filter
)
movies = [SFilm(**movie) for movie in movies]
return movies
async def search_film(
self,
query: str, page_size: int, page_number: int
) -> Optional[List[SFilm]]:
body = ESQuery(query={'multi_match': {'query': query}}).json(by_alias=True)
movies = await self.datastore.search(
self.movieindex,
page_size=page_size, page_number=page_number,
sort=None, body=body
)
movies = [SFilm(**movie) for movie in movies]
return movies
|
# -*- encoding: utf-8 -*-
import sys
from math import sqrt
# Parameters for the text gen
max_inrange = 20 # How big difference between a byte in tape and new byte can be for byte reusage
# ------------------------------------------------------------------
# Helper functions for genlogic()
class Bftextgen_exception(Exception):
def __init__(self, value):
self.value = value
# Returns -1 on not found, index of the cell with byte in range if found.
def locate_in_range(tape, byte):
for i in range(len(tape)): # Go through tape
if abs(tape[i] - byte) <= max_inrange: # Is a byte in range to be reused?
return i # Return its index
return -1
# ------------------------------------------------------------------
def genlogic(text):
# Assume byte-based implementation using utf-8
if sys.version_info.major < 3:
bytes = [ord(byte) for byte in text.encode('utf-8')] # Work like 1-char strings
else:
bytes = [byte for byte in text.encode('utf-8')] # Work like ints
tape = [] # Keep track of what we already have stored
program = []
for i in range(len(bytes)):
byte = bytes[i]
tape_index = locate_in_range(tape, byte)
if tape_index == -1: # Nothing found in range, create new cell
tape_index = len(tape)
# Generate code to create cell
program.append(('set', tape_index, byte))
# Update our tape
tape.append(byte)
else:
change = byte - tape[tape_index]
# Generate code to change cell
program.append(('change', tape_index, change))
# Update our tape
tape[tape_index] = byte
# Generate output
program.append(('output', tape_index))
return program
# ------------------------------------------------------------------
# Helper functions for genbf()
class Bftextgen_invalid_IR(Bftextgen_exception):
def __str__(self):
return 'Invalid IR form: %s' % self.value
def move_tape_pointer(change):
if change < 0:
return '<' * -change
else:
return '>' * change
def change_cell(change):
if change < 0:
return '-' * -change
else:
return '+' * change
def set_cell(value):
def factorize(value):
factors = []
rest = value
while True:
max_factor = int(sqrt(rest))
i = 2 # Everything is divisible by 1 and nothing by 0
while i <= max_factor:
if rest%i == 0:
factors.append(i)
rest = int(rest / i)
break
i += 1
if i > max_factor: # No more factors to find anymore
factors.append(rest)
break
return factors
# Special case 1 and 0 as rest assumes only primes have one factor
if value == 0:
return ''
elif value == 1:
return '+'
factors = factorize(value)
if len(factors) == 1:
# We don't want to have a huge string of '+'s
# Fortunately prime - 1 is not a prime
# Thus, we generate code for value - 1, then add 1
return set_cell(value - 1) + '+' # Safe as it's guaranteed the value >= 2
if len(factors) % 2 == 0: # Even number of factors means we must start at the cell above us, to end up at right cell
start_cell = 1
else:
start_cell = 0
program = move_tape_pointer(start_cell) + '+' * factors[0]
cell_pointer = start_cell
for factor in factors[1:]:
move = (cell_pointer+1)%2 - cell_pointer # -1 if at 1, +1 if at 0
# First, create a loop that does the multiplication, moving the result to the other cell
program += '[' + move_tape_pointer(move) + '+' * factor + move_tape_pointer(-move) + '-' + ']'
# Then, move to the other cell
program += move_tape_pointer(move)
cell_pointer += move
return program
# ------------------------------------------------------------------
def genbf(logic):
tape_pointer = 0
program = ''
for command in logic:
if command[0] == 'output':
if len(command) != 2:
raise Bftextgen_invalid_IR(command)
cell = command[1]
program += move_tape_pointer(cell - tape_pointer)
program += '.'
tape_pointer = cell
elif command[0] == 'set':
if len(command) != 3:
raise Bftextgen_invalid_IR(command)
cell = command[1]
value = command[2]
program += move_tape_pointer(cell - tape_pointer)
program += set_cell(value)
tape_pointer = cell
elif command[0] == 'change':
if len(command) != 3:
raise Bftextgen_invalid_IR(command)
cell = command[1]
change = command[2]
program += move_tape_pointer(cell - tape_pointer)
program += change_cell(change)
tape_pointer = cell
else:
raise Bftextgen_invalid_IR(command)
return program
def bf(text):
return genbf(genlogic(text))
if __name__ == '__main__':
while True:
try:
if sys.version_info.major < 3:
text = raw_input().decode('utf-8') # Need to manually convert to unicode string
else:
text = input()
except EOFError:
break
print(bf(text))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import re
import inspect
import datetime
import calendar
from flask import url_for
from .compat import *
from .core import commands, Url
commands.add("g google", "http://www.google.com/search?q={}", default=True) # google
commands.add("gm googlemap", "http://maps.google.com/?q={}") # google maps
commands.add("goopat gp googlepatent googlepatents", "http://www.google.com/patents?btnG=Search+Patents&q={}")
commands.add("gi", "http://images.google.com/images?um=1&ie=UTF-8&sa=N&tab=wi&q={}")
# 9-29-11
commands.add("gt translate trans", "http://translate.google.com/translate?hl=en&sl=auto&tl=en&u={}")
commands.add("dictionary", "http://www.dictionary.com/browse/{}") # dictionary
commands.add("wd wikidict dw", "https://en.wiktionary.org/wiki/{}") # dictionary
# 11-13-09
commands.add("d dict dic nw ninja", "http://ninjawords.com/?q={}", "definition for word")
# 09-26-2017
commands.add("s ds sy syn", "https://www.powerthesaurus.org/{}/synonyms", "Synonyms for word")
commands.add("da an ant", "https://www.powerthesaurus.org/{}/antonyms", "Antonyms for word")
commands.add("wk", "http://en.wikipedia.org/wiki/Special:Search?fulltext=Search&search={}")
commands.add("wpg wkg wikigoogle", "http://www.google.com/custom?domains=en.wikipedia.org&sitesearch=en.wikipedia.org&q={}")
commands.add("tv", "http://www.tv.com/search.php?type=11&stype=all&tag=search%3Bbutton&qs={}")
commands.add("yhoo", "http://search.yahoo.com/bin/search?p={}")
commands.add("a am amazon amaz", "http://www.amazon.com/s/ref=nb_ss_gw/102-5754341-9464967?url=search-alias%3Daps&Go=Go&field-keywords={}")
commands.add("epg ep epguides eg", "http://www.google.com/search?hl=en&q=allintitle%3A&q=site%3Aepguides.com&btnG=Search&q={}")
#commands.add("yt", "http://www.youtube.com/results?search=Search&search_query={}")
def yt_callback(q):
# updated to just go to homescreen on 1-21-2021
if q:
url = "http://www.youtube.com/results?search=Search&search_query={}".format(q)
else:
url = "http://www.youtube.com/"
return url
commands.add("yt", yt_callback, "Search Youtube")
#commands.add("yt", "http://www.youtube.com/results?search=Search&search_query={}")
# 8-8-12, updated to youtubensfw on 1-22-2021
def ytnsfw_callback(q):
# allows watching youtube nsfw vidoes without logging in
url = re.sub(r".youtube.", ".youtubensfw.", q, count=1)
# m = re.match("/v=([^&]+)/", q)
# if m:
# url = 'http://deturl.com/play.asp?v={}'.format(m.group(1))
return url
commands.add("yti ty yta ytnsfw", ytnsfw_callback)
commands.add("imdb", "http://www.imdb.com/find?s=all&q={}")
commands.add("bmn bug bugmenot", "http://www.bugmenot.com/view/{}")
commands.add("wks wikiseek", "http://www.wikiseek.com/results.php?q={}")
commands.add("gd", "http://www.godaddy.com/gdshop/registrar/search.asp?isc=ffsearch&checkavail=1&domaintocheck={}")
commands.add("ws websnif websniff", "http://web-sniffer.net/?submit=Submit&http=1.1&gzip=yes&type=GET&url={}")
commands.add("e eb ebay", "http://www.ebay.com/sch/i.html?_nkw={}")
# added 1-6-08...
def php_callback(q):
if q:
url = "http://us2.php.net/{}".format(q)
else:
url = "http://us2.php.net/manual/en/funcref.php"
return url
#commands.add("php", "http://us2.php.net/{}")
commands.add("php", php_callback)
commands.add("yf stock symbol", "http://finance.yahoo.com/q?s={}")
# 9-30-10
# 3-31-2020 adds callback and fleshes out this search
def rb_callback(q):
# NOTE -- ruby urls are case-sensitive (let that sink in), I use title here
# but it would be better to do things like `String` instead of `string`
d = {
"str": "String",
"strings": "String",
"arr": "Array",
"list": "Array",
"[]": "Array",
"dict": "Hash",
"dicts": "Hash",
"dictionary": "Hash",
"{}": "Hash",
}
if q.lower() in d:
q = d[q.lower()]
if q:
url = "https://ruby-doc.org/core/{}.html".format(q.title())
else:
# This has a cool class/function filter at the bottom
url = "https://ruby-doc.org/core/"
return url
commands.add("rb rubyc rbc", rb_callback)
# 5-19-2016
def py_callback(q, version="3"):
d = {
"set": "https://docs.python.org/{}/library/stdtypes.html#set",
"iobase": "https://docs.python.org/3/library/io.html#io.IOBase",
"open2": "https://docs.python.org/3/library/io.html#io.IOBase",
"file": "https://docs.python.org/3/library/io.html#io.IOBase",
"file2": "https://docs.python.org/{}/tutorial/inputoutput.html#methods-of-file-objects",
"open": "https://docs.python.org/{}/library/functions.html#open",
"mode": "https://docs.python.org/{}/library/functions.html#open",
"modes": "https://docs.python.org/{}/library/functions.html#open",
"filemode": "https://docs.python.org/{}/library/functions.html#open",
"filemodes": "https://docs.python.org/{}/library/functions.html#open",
"list": "https://docs.python.org/{}/tutorial/datastructures.html#more-on-lists",
"lists": "https://docs.python.org/{}/tutorial/datastructures.html#more-on-lists",
"[]": "https://docs.python.org/{}/tutorial/datastructures.html#more-on-lists",
#"list": "http://infohost.nmt.edu/tcc/help/pubs/python/web/list-methods.html",
"tuple": "https://docs.python.org/{}/library/functions.html#tuple",
"tuples": "https://docs.python.org/{}/library/functions.html#tuple",
"dict": "https://docs.python.org/{}/library/stdtypes.html#dict",
"dicts": "https://docs.python.org/{}/library/stdtypes.html#dict",
"{}": "https://docs.python.org/{}/library/stdtypes.html#dict",
"collections": "https://docs.python.org/{}/library/collections.html#module-collections",
"format": "https://docs.python.org/{}/library/string.html#formatspec",
"logformat": "https://docs.python.org/3/library/logging.html#logrecord-attributes",
"logform": "https://docs.python.org/3/library/logging.html#logrecord-attributes",
"log": "https://docs.python.org/3/library/logging.html#logrecord-attributes",
"logging": "https://docs.python.org/3/library/logging.html#logrecord-attributes",
"logrecord": "https://docs.python.org/3/library/logging.html#logrecord-attributes",
"functions": "https://docs.python.org/{}/library/functions.html",
"funcs": "https://docs.python.org/{}/library/functions.html",
"func": "https://docs.python.org/{}/library/functions.html",
"builtins": "https://docs.python.org/{}/library/functions.html",
"builtin": "https://docs.python.org/{}/library/functions.html",
"date": "https://docs.python.org/{}/library/datetime.html#strftime-strptime-behavior",
"dateformat": "https://docs.python.org/{}/library/datetime.html#strftime-strptime-behavior",
"test": "https://docs.python.org/{}/library/unittest.html#unittest.TestCase",
"testing": "https://docs.python.org/{}/library/unittest.html#unittest.TestCase",
"assert": "https://docs.python.org/{}/library/unittest.html#unittest.TestCase",
"asserts": "https://docs.python.org/{}/library/unittest.html#unittest.TestCase",
"exceptions": "https://docs.python.org/{}/library/exceptions.html",
"exception": "https://docs.python.org/{}/library/exceptions.html",
"except": "https://docs.python.org/{}/library/exceptions.html",
"exc": "https://docs.python.org/{}/library/exceptions.html",
"error": "https://docs.python.org/{}/library/exceptions.html",
"err": "https://docs.python.org/{}/library/exceptions.html",
"errors": "https://docs.python.org/{}/library/exceptions.html",
"strings": "https://docs.python.org/{}/library/stdtypes.html#string-methods",
"string2": "https://docs.python.org/{}/library/stdtypes.html#string-methods",
"str": "https://docs.python.org/{}/library/stdtypes.html#string-methods",
"byte": "https://docs.python.org/{}/library/stdtypes.html#bytes-methods",
"bytes": "https://docs.python.org/{}/library/stdtypes.html#bytes-methods",
"pdb": "https://docs.python.org/{}/library/pdb.html#pdbcommand-commands",
"code": "https://github.com/python/cpython/tree/master/Lib",
"code3": "https://github.com/python/cpython/tree/master/Lib",
"3": "https://github.com/python/cpython/tree/master/Lib",
"code2": "https://github.com/python/cpython/tree/2.7/Lib",
"2": "https://github.com/python/cpython/tree/2.7/Lib",
"env": "https://docs.python.org/3/using/cmdline.html#environment-variables",
"environ": "https://github.com/python/cpython/tree/2.7/Lib",
"environment": "https://github.com/python/cpython/tree/2.7/Lib",
"context": "https://docs.python.org/3/reference/datamodel.html#context-managers",
"with": "https://docs.python.org/3/reference/datamodel.html#context-managers",
"__enter": "https://docs.python.org/3/reference/datamodel.html#context-managers",
"__enter__": "https://docs.python.org/3/reference/datamodel.html#context-managers",
"__exit": "https://docs.python.org/3/reference/datamodel.html#context-managers",
"__exit__": "https://docs.python.org/3/reference/datamodel.html#context-managers",
"magic": "https://docs.python.org/3/reference/datamodel.html#special-method-names",
"special": "https://docs.python.org/3/reference/datamodel.html#special-method-names",
"__": "https://docs.python.org/3/reference/datamodel.html#special-method-names",
}
q = q.lower()
if not q:
q = "code{}".format(version)
if q in d:
url = d[q].format(version)
else:
bd = {}
for k, v in inspect.getmembers(builtins):
bd[k.lower()] = v
if q in bd:
v = bd[q]
if q.lower().endswith("error"):
url = "{}#{}".format(d["error"], q).format(version)
else:
url = "{}#{}".format(d["func"], q).format(version)
else:
url = "https://docs.python.org/{}/library/{}.html".format(version, q)
return url
# added 8-16-08
commands.add("py", py_callback)
# 5-19-2016
def py3_callback(q, version="3"):
return py_callback(q, version)
commands.add("py3", py3_callback)
# 1-2-2018
def py2_callback(q, version="2"):
return py_callback(q, version)
commands.add("py2", py2_callback)
# 7-21-2016
# 3-19-2019 I fleshed chef search out more
# 3-31-2020 Updates to latest chef links and fixes search
def chef_callback(q):
if q:
q = q.lower()
if q == "custom":
url = "https://docs.chef.io/custom_resources.html"
elif q in set(["common", "prop", "props", "properties"]):
url = "https://docs.chef.io/resources/#common-functionality"
else:
url = "https://docs.chef.io/resources/{}/".format(q.replace(" ", "_").replace("-", "_"))
else:
url = "https://docs.chef.io/resources/"
return url
commands.add("ch chefdoc", chef_callback, "Chef documentation")
# added 10-28-2008...
commands.add("mtv", "http://www.mtvmusic.com/search/?term={}")
commands.add("h", "http://www.hulu.com/videos/search?query={}")
commands.add("gf", "http://finance.google.com/finance?q={}")
# 11-6-08...
commands.add("t tw twit ts", "https://twitter.com/search?q={}&f=tweets&vertical=news")
# 11-19-08...
commands.add("yc syc hn", "https://hn.algolia.com/?query={}&sort=byPopularity&prefix&page=0&dateRange=all&type=story")
commands.add("li", "http://www.lipsum.com/feed/html")
# 12-4-08...
commands.add("new", "http://www.newegg.com/Product/ProductList.aspx?Submit=ENE&DEPA=0&Order=BESTMATCH&Description={}&x=0&y=0")
commands.add("al alexa", "http://www.alexa.com/data/details/traffic_details/{}")
# 1-9-09...
commands.add("nf ne net", "https://www.netflix.com/search/{}")
# 10-17-12 better netflix search
commands.add("nfi neti", "http://instantwatcher.com/titles?q={}&search_episodes=")
# 1-31-09
commands.add("down", "http://downforeveryoneorjustme.com/{}")
# 11-19-09
commands.add("tviv", "http://tviv.org/w/index.php?search={}&title=Special%3ASearch")
# 8-30-11...
commands.add("camel", "http://camelcamelcamel.com/products?sq={}")
# 1-14-12
commands.add("lds scriptures", "http://lds.org/scriptures/search?lang=eng&query={}&x=0&y=0")
# 2-1-12
def lds_callback(volume, question):
url = 'http://www.lds.org/scriptures/{}'.format(volume)
if question:
bits = question.split(" ", 1)
book = bits[0] if bits[0] else ''
chapter = bits[1] if len(bits) > 1 else ''
if book:
url += "/{}".format(book)
if chapter:
url += "/{}".format(int(chapter))
return url
def dc_callback(q):
q = "dc {}".format(q) if q else ""
return lds_callback('dc-testament', q)
commands.add("dc dandc", dc_callback)
commands.add("bible", lambda q: lds_callback("bible", q))
commands.add("ot", lambda q: lds_callback("ot", q))
commands.add("nt", lambda q: lds_callback("nt", q))
commands.add("bofm bm bom", lambda q: lds_callback("bofm", q))
commands.add("pgp pearl pg pofpg pgop", lambda q: lds_callback("pgp", q))
# 10-15-2017
def jst_callback(q):
url = "http://www.centerplace.org/hs/iv/"
return url
commands.add("jst", jst_callback, "The Joseph Smith Translation of the Bible")
# 2-1-2012
#commands.add('sec 10k 10q s1', 'http://www.sec.gov/cgi-bin/browse-edgar?company={}&owner=exclude&Find=Find+Companies&action=getcompany')
commands.add(
'sec 10k 10q s1',
'https://www.sec.gov/cgi-bin/browse-edgar?CIK={}&owner=exclude&action=getcompany',
"Search SEC for company stock symbol"
)
# 1-30-13
commands.add('mf msf msnm', 'http://investing.money.msn.com/investments/institutional-ownership?symbol={}')
# 5-18-12
def stw_callback(q):
return 'http://stocktwits.com/symbol/{}'.format(q.lstrip("$").upper())
commands.add("stocktwits sts stt sk stw", stw_callback)
# 1-31-13
commands.add('rev revere rv', 'https://reports.reveredata.com/reports/store/lookup?q={}&submit=Search')
# 4-3-12
commands.add('app', 'http://appshopper.com/search/?search={}')
# 2-8-13
commands.add('harmonica harm', 'http://www.harptabs.com/searchsong.php?Name={}&Author=&Username=&Difficulty=0&Range=0&HarpType=0')
# 5-25-13
commands.add('fw', 'http://www.fatwallet.com/forums/search/results.php?query={}&type=forums&forum=18&match=titles')
# 6-27-13
commands.add('gip giphy', 'http://giphy.com/tags/{}', 'GIF search engine')
commands.add('gif', 'http://www.google.com/search?q={}&source=lnms&tbm=isch&tbs=itp:animated', 'Google image GIF specific search')
# 9-14-13
# https://news.ycombinator.com/item?id=6296634
def exsh_callback(q):
bits = re.split("\s+", q, 1)
cmd = bits[0]
args = ""
if len(bits) > 1:
args = bits[1]
url = 'http://explainshell.com/explain/{}?args={}'.format(cmd, quote_plus(args))
return url
commands.add(
'explain exsh esh explainsh',
exsh_callback,
'explainshell.com - write down a command-line to see the help text that matches each argument'
)
# 1-21-2014 (updated 11-28-2018 to use https://www.crunchbase.com/opensearch.xml?version=2)
commands.add(
'cb',
'https://www.crunchbase.com/textsearch?q={}',
'Crunchbase company search'
)
# 5-19-2016
def list_callback(q):
return url_for("ls", q=q) if q else url_for("ls")
commands.add("bounce", list_callback, "list all the available commands")
# 8-19-2016
commands.add('color', 'http://www.color-hex.com/color/{}', 'Color information about hex color')
# 5-15-2017
commands.add('wb way wayback', 'https://web.archive.org/web/*/{}', 'Wayback machine of Internet archive, pass in full urls')
# 9-29-2017
# https://news.ycombinator.com/item?id=15346541
def punc_callback(q):
url = "http://www.thepunctuationguide.com/"
d = {
".": "http://www.thepunctuationguide.com/period.html",
"?": "http://www.thepunctuationguide.com/question-mark.html",
"!": "http://www.thepunctuationguide.com/exclamation-point.html",
",": "http://www.thepunctuationguide.com/comma.html",
";": "http://www.thepunctuationguide.com/semicolon.html",
":": "http://www.thepunctuationguide.com/colon.html",
"-": "http://www.thepunctuationguide.com/hyphen.html",
"--": "http://www.thepunctuationguide.com/en-dash.html",
"---": "http://www.thepunctuationguide.com/em-dash.html",
"(": "http://www.thepunctuationguide.com/parentheses.html",
")": "http://www.thepunctuationguide.com/parentheses.html",
"'": "http://www.thepunctuationguide.com/apostrophe.html",
"\"": "http://www.thepunctuationguide.com/quotation-marks.html",
"/": "http://www.thepunctuationguide.com/slash.html",
"<": "http://www.thepunctuationguide.com/angle-brackets.html",
">": "http://www.thepunctuationguide.com/angle-brackets.html",
"{": "http://www.thepunctuationguide.com/braces.html",
"}": "http://www.thepunctuationguide.com/braces.html",
"...": "http://www.thepunctuationguide.com/ellipses.html",
"[": "http://www.thepunctuationguide.com/brackets.html",
"]": "http://www.thepunctuationguide.com/brackets.html",
}
if q in d:
url = d[q]
return url
commands.add(
'punc p pu',
punc_callback,
'Punctuation and style guide'
)
# 10-15-2017
commands.add('ip myip', 'https://www.where-am-i.co/my-ip-location', 'My IP Address and current location')
# 11-7-2017
commands.add('dns', 'https://www.whatsmydns.net/?utm_source=whatsmydns.com&utm_medium=redirect#A/{}', 'DNS check for domain (so pass in something like "example.com"')
# 1-2-2018
commands.add('y yelp', 'https://www.yelp.com/search?find_desc=burgers&ns=1', 'Search Yelp listings')
commands.add('ig insta', 'https://www.instagram.com/{}/', 'Redirect to instangram username')
commands.add('gh code', 'https://github.com/search?q={}&type=', 'Search Github repos')
# 6-5-2018
commands.add('mojo', 'https://www.boxofficemojo.com/search/?q={}', 'Search for movies on Box Office Mojo')
# 4-12-2019
def videoeta(q):
dt = datetime.datetime.utcnow()
month = dt.month
year = dt.year
first_day = 1
last_day = calendar.monthrange(dt.year, dt.month)[1]
query_kwargs = {
"datetype": "videoreleases",
"start_date": "{:02}/{:02}/{}".format(month, first_day, year),
"end_date": "{:02}/{:02}/{}".format(month, last_day, year),
"keywords": "*",
"ord_by": "box_office",
"ord_sort": "desc",
"search_type": "daterange"
}
base_url = "https://videoeta.com/search"
return Url(base_url, **query_kwargs)
commands.add("veta videoeta bluray movies releases videos vids dvd", videoeta, "Get the new video releases for the current month")
# 4-12-2019
def unquote(q):
return commands.unquote(q)
commands.add("unquote urldecode", unquote, "url decode the input")
# 6-7-2019
commands.add("ikea", 'https://www.ikea.com/us/en/search/?query={}', "Search IKEA")
# 6-20-2019
def tweetthread(q):
url = q
m = re.search(r"\/(\d+)(?:\/|\?)?", q)
if m:
url = "https://threadreaderapp.com/thread/{}.html?refreshed=yes".format(m.group(1))
return url
commands.add("thread storm tweetstorm tweetthread", tweetthread, "Convert a tweet storm into easy to read longform")
# 7-9-2019
def unsplash(q):
q = re.sub(r"\s+", "-", q)
return "https://unsplash.com/search/photos/{}".format(q)
commands.add("unsplash blogpic", unsplash, "Freely useable images")
# 8-4-2019
commands.add("nin", "https://www.nintendo.com/search/#category=all&page=1&query={}", "Search Nintendo", plus=False)
# 5-7-2020
commands.add("nindeals nind", "https://www.dekudeals.com/search?q={}", "Search Nintendo deals and price history")
# 4-3-2020
commands.add("ps", "https://store.playstation.com/en-us/grid/search-game/1?query={}", "Search Playstation store", plus=False)
# 5-7-2020
commands.add("ps psdeals psd", "https://psprices.com/region-us/search/?q={}&dlc=show", "Search Playstation deals and price history")
# 8-19-2019 (updated 12-30-2022 with new query syntax)
commands.add("howlong game beat", https://howlongtobeat.com/?q={}, "How long to beat the game")
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.db import models
from filebrowser.fields import FileBrowseField
class BaseCategory(models.Model):
title = models.CharField("Название", max_length=255)
slug = models.SlugField("URL", unique=True)
description = models.TextField("Описание", blank=True, null=True)
image = FileBrowseField("Изображение", max_length=255, blank=True,
null=True)
published = models.BooleanField(default=True, verbose_name="Опубликовано")
visits_num = models.PositiveIntegerField("Кол. посещений", default=0,
editable=False)
class Meta:
abstract = True
def __unicode__(self):
return self.title
def inc_visits(self):
self.visits_num += 1
self.save()
class Category(BaseCategory):
class Meta:
verbose_name = "Категория рецептов"
verbose_name_plural = "Категории рецептов"
class SubCategory(BaseCategory):
category = models.ManyToManyField(Category, verbose_name="Категории")
def get_absolute_url(self):
return reverse("category_details", args=(self.slug, ))
class Meta:
verbose_name = "Подкатегория рецептов"
verbose_name_plural = "Подкатегории рецептов"
|
"""
# Example 7.1 CES Production Function Revisited
# Estimating CES Production Function
# Judge, et. al. [1988], Chapter 12
"""
import numpy as np
import pandas as pd
import scipy.optimize as opt
judge = pd.read_csv("http://web.pdx.edu/~crkl/ceR/data/judge.txt",names=['L','K','Q'],sep='\s+')
L, K, Q = judge.L, judge.K, judge.Q
def ces(b):
e=np.log(Q)-(b[0]+b[3]*np.log(b[1]*L**b[2]+(1-b[1])*K**b[2]))
return e
def sse_ces(b):
e=ces(b)
return sum(e**2)
def print_output(b,vb,bname):
"Print Regression Output"
se = np.sqrt(np.diag(vb))
tr = b/se
params = pd.DataFrame({'Parameter': b, 'Std. Error': se, 't-Ratio': tr}, index=bname)
var_cov = pd.DataFrame(vb, index=bname, columns=bname)
print('\nParameter Estimates')
print(params)
print('\nVariance-Covriance Matrix')
print(var_cov)
return
b0=[1,0.5,-1,-1]
res0=opt.minimize(sse_ces,b0,method='Nelder-Mead')
res=opt.minimize(sse_ces,res0.x,method='bfgs')
res
# computing var-cov matrix
from numpy.linalg import inv
import statsmodels.tools.numdiff as nd
# gauss-newton approximation of the hessian matrix
gv = nd.approx_fprime(res.x,ces)
H = (gv.T @ gv)
# assume homoscedasticity
v = sse_ces(res.x)/len(judge)
var= v * inv(H)
# alternatively, try ...
# var = v * res.hess_inv
print_output(res.x,var,['b1','b2','b3','b4'])
# alternatively, a better approach is
# Using lmfit package which depends on leastsq and least_squares
import lmfit as nlm
# data variables are Q, L, K
def ces(params):
"CES Production Function"
b0=params['b0']
b1=params['b1']
b2=params['b2']
b3=params['b3']
e = np.log(Q)-(b0+b3*np.log(b1*L**b2+(1-b1)*K**b2))
return e
b = nlm.Parameters()
# b.add_many(('b0', 1.0), ('b1', 0.5), ('b2', -1.0), ('b3', -1.0))
b.add('b0',value=1.0)
b.add('b1',value=0.5,min=1.0e-6,max=1.0)
b.add('b2',value=-1.0)
b.add('b3',value=-1.0)
b
# using default Levenberg-Marquardt method (leastsq)
out = nlm.minimize(ces,b)
nlm.report_fit(out)
out1 = nlm.minimize(ces,b,method='least_squares')
nlm.report_fit(out1)
# need to install numdifftools for some methods
out2 = nlm.minimize(ces,b,method='bfgs')
nlm.report_fit(out2)
# with parameter restriction b3=1/b2
b.add('b3',expr='1/b2')
b
out3 = nlm.minimize(ces,b,method='bfgs')
nlm.report_fit(out3)
# minimize the sum-of-squares of errors directly
def sse_ces(params):
e = ces(params)
return(sum(e**2))
res = nlm.minimize(sse_ces,b,method='bfgs')
nlm.report_fit(res)
|
import sys
sys.stdin = open('input.txt')
import string
N = int(input())
_36_to_deci = {h: deci for deci, h in enumerate(string.digits + string.ascii_uppercase)}
deci_to_36 = {deci: h for deci, h in enumerate(string.digits + string.ascii_uppercase)}
count = {deci: 0 for deci in range(36)}
sum = 0
for n in range(N):
num = input()[::-1]
for i in range(len(num)):
count[_36_to_deci[num[i]]] += 36 ** i
K = int(input())
sorted_list = [*sorted(count.items(), key=lambda x: (35 - x[0]) * x[1], reverse=True)]
for k in range(K):
sum += _36_to_deci['Z'] * sorted_list[k][1]
for k in range(K, 36):
sum += sorted_list[k][0] * sorted_list[k][1]
answer = []
if sum == 0:
print(0)
else:
while sum:
answer.append(deci_to_36[sum % 36])
sum //= 36
print(''.join(reversed(answer)))
|
import os
from skimage import io,transform
import numpy as np
import matplotlib.pyplot as plt
#肺部数据链接
path_lung = 'G:/阿里天池/肺和结肠癌的组织病理学影像/lung_image_sets/lung_image_sets'
#结肠癌链接
path_colon = 'G:/阿里天池/肺和结肠癌的组织病理学影像/colon_image_sets/colon_image_sets'
# path_lung_aca_img = []
# path_lung_aca_label = [] #标签1
#
# path_lung_n_img = []
# path_lung_n_label = []#标签2
#
# path_lung_scc_img = []
# path_lung_scc_label = []#标签3
# #
# path_colon_aca_img = []
# path_colon_aca_label = []#标签4
#
# path_colon_n_img = []
# path_colon_n_label = []#标签5
img_list = []
img_label = []
def get_file_1(file_dir):
for file in os.listdir(file_dir+'/'+'lung_aca'):
img_list.append(file_dir+'/'+'lung_aca'+'/'+file)
img_label.append(1)
# for file in os.listdir(file_dir+'/'+'lung_n'):
# img_list.append(file_dir+'/'+'lung_n'+'/'+file)
# img_label.append(2)
#
# for file in os.listdir(file_dir + '/' + 'lung_scc'):
# img_list.append(file_dir + '/' + 'lung_scc' + '/'+file)
# img_label.append(3)
get_file_1(path_lung)
print(len(img_list))
# def get_file_2(file_dir):
# for file in os.listdir(file_dir+'/'+'colon_aca'):
# img_list.append(file_dir+'/'+'colon_aca'+'/'+file)
# img_label.append(4)
# for file in os.listdir(file_dir+'/'+'colon_n'):
# img_list.append(file_dir+'/'+'colon_n'+'/'+file)
# img_label.append(5)
# get_file_2(path_colon)
# print(img_list)
# image_list = np.hstack((path_lung_scc_img, path_lung_aca_img, path_lung_n_img, path_colon_aca_img,path_colon_n_img))
# label_list = np.hstack((path_lung_scc_label, path_lung_aca_label, path_lung_n_label, path_colon_aca_label,path_colon_n_label))
# temp = np.array([image_list, label_list])
# temp = temp.transpose()
# np.random.shuffle(temp)
# all_image_list_1 = list(temp[:, 0])
# all_label_list_1 = list(temp[:, 1])
def img_deal(img_list):
total_image_img_list = []
for i in img_list:
img = io.imread(i)
img = transform.resize(img, (64, 64))
img = img/255.0
img = img.astype('float16')
total_image_img_list.append(img)
return total_image_img_list
x = img_deal(img_list[:100])
print(x)
from sklearn.preprocessing import LabelEncoder
from keras.utils.np_utils import to_categorical
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(img_label)
y = to_categorical(y,5)
x = np.array(x)
#
# from sklearn.model_selection import train_test_split
# x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2)
# print(x_train.shape)
# print(x_test.shape)
#
# from keras.preprocessing.image import ImageDataGenerator
# augs_gen = ImageDataGenerator(
# featurewise_center=False,
# samplewise_center=False,
# featurewise_std_normalization=False,
# samplewise_std_normalization=False,
# zca_whitening=False,
# zoom_range=0.1,
# width_shift_range=0.2,
# height_shift_range=0.2,
# horizontal_flip=True,
# vertical_flip=False
# )
# augs_gen.fit(x_train)
|
import sdl2, sdl2.ext
import pybasic.sprite as sp
import pybasic.draw as draw
__all__ = ['create_window', 'refresh_window', 'get_window']
_window = None
def create_window(title, size, position=None, flags=None):
global _window
_window = sdl2.ext.Window(title, size, position, flags)
_window.show()
def refresh_window(clear=True, cls_color=(0, 0, 0)):
if clear:
if sp.GlRenderer:
sp.GlRenderer.clear(cls_color)
else:
sdl2.ext.fill(sp._renderer.surface, cls_color)
sp.render_all_sprites()
_window.refresh()
def get_window():
return _window |
import threading
from DBInterface import DBConnection, loginDB, employeeDB, allocationsDB, cabsDB, driversDB, employeeAddressDB
class AgencyInterface (threading.Thread):
def __init__( self, clientConnection, msgList, db ):
threading.Thread.__init__(self)
self.type = "Agency Interface"
self.loginType = "agency"
self.clientConnection = clientConnection
self.msgList = msgList
self.db = db
def connectDB( self ): #connect to the sql database and create cursor object
#self.db = DBConnection.DBConnection("localhost", "cab4employee", "", "cab4employee")
#self.db.connect()
self.cursor = self.db.getCursor()
def sendData(self, data ):
self.clientConnection.send( data + '\n' )
def receiveData( self ):
return self.clientConnection.recv(1024)
def disconnect( self ):
self.clientConnection.close()
print 'client disconnected'
def login( self ): #authenticate using username, password and type and get eid from database
if self.msgList[1] != 'login':
return None
self.username = self.msgList[2]
password = self.msgList[3]
self.eid = loginDB.authenticate( self.cursor, self.username, password, self.loginType )
del self.msgList
def addCab( self, msgList ): #enter employee details into database
data = {}
data['cid'] = msgList[1]
data['c_model'] = msgList[2]
data['maxpassengers'] = msgList[3]
data['rating'] = "5"
checkData = cabsDB.getCab(self.cursor, data['cid'])
if checkData == None:
cabsDB.insertCab( self.cursor, self.db, data )
self.db.commit()
self.sendData("done")
else:
self.sendData("existing")
# def sendCabs( self ):
# cidList = cabsDB.getAllCid(self.cursor)
# msg = ""
# for cid in cidList:
# data = cabsDB.getCab( self.cursor, cid)
# msg += data[0] + " " + data[1] + " " + data[2] + " "
# print msg
# self.sendData(msg)
def addDriver( self, msgList ):
data = {}
data['did'] = msgList[1]
data['first_name'] = msgList[2]
data['last_name'] = msgList[3]
data['cid'] = msgList[4]
data['contact_number'] = msgList[5]
data['rating'] = "5"
checkData = driversDB.getDriver(self.cursor, data['did'])
if checkData == None:
driversDB.insertDriver( self.cursor, data )
self.db.commit()
self.sendData("done")
elif int( checkData['rating'] ) == -1 :
self.sendData("flagged")
elif int( checkData['rating'] ) < -1 :
data['rating'] = str( -1* int(checkData['rating']) )
driversDB.modifyDriver( self.cursor, data )
self.db.commit()
self.sendData("redone")
else:
self.sendData("existing")
def sendAllocations( self ):
dataList = allocationsDB.getAllocations( self.cursor )
msg = ""
if dataList == None:
self.sendData("None")
return
for data in dataList:
count = len( data['eid'].split(',') )
msg += data['aid']+" "+data['cid']+" "+str(count)+" "+data['atime']+" "
print msg
self.sendData( msg )
def sendDrivers( self ):
didList = driversDB.getAllDid(self.cursor)
msg = ""
for did in didList:
data = driversDB.getDriver( self.cursor, did )
msg += data['did'] + " " + data['first_name'] + " " + data['last_name'] + " " + data['cid'] + " " + data['contact_number'] + " " + data['rating'] + " "
print "msg : "+msg
self.sendData( msg )
def sendDriver(self, msgList):
did = msgList[1]
msg = ""
data = driversDB.getDriver( self.cursor, did )
msg += data['did'] + " " + data['first_name'] + " " + data['last_name'] + " " + data['cid'] + " " + data['contact_number'] + " " + data['rating']
print "msg : " + msg
self.sendData(msg)
def sendCabs( self ):
cidList = cabsDB.getAllCid(self.cursor)
msg = ""
for cid in cidList:
data = cabsDB.getCab( self.cursor, cid )
msg += data['cid'] + " " + data['c_model'] + " " + data['maxpassengers'] + " " + data['rating'] + " "
print "msg : "+msg
self.sendData( msg )
def sendCab(self, msgList):
cid = msgList[1]
msg = ""
data = cabsDB.getCab( self.cursor, cid)
msg += data['cid'] + " " + data['c_model'] + " " + data['maxpassengers'] + " "
print msg
self.sendData(msg)
def sendCidList(self):
cidList = cabsDB.getCidList(self.cursor)
msg = ""
for cid in cidList:
msg += str( cid ) + " "
print msg
self.sendData(msg)
def checkCidAllocated(self, msgList):
cid = msgList[1]
status = allocationsDB.checkCidAllocated(self.cursor, cid)
if status == True:
self.sendData("yes")
else:
self.sendData("no")
def allocateCab(self, msgList):
aid = msgList[1]
cid = msgList[2]
pcid = msgList[3]
status = allocationsDB.modifyCid( self.cursor, aid, cid )
if( status == True ):
did = driversDB.getDidFromCid( self.cursor, cid )
if did == None:
self.sendData("fail")
return
status = allocationsDB.modifyDid( self.cursor, aid, did )
allocationsDB.setChangeFlag( self.cursor, aid )
if status == True :
self.db.commit()
self.sendData("success")
return
else :
self.sendData("fail")
else:
self.sendData("fail")
def sendAvailableCidList(self):
cidList = allocationsDB.getAvailableCidList(self.cursor)
msg = ""
for cid in cidList:
msg += str(cid) + " "
print msg
self.sendData(msg)
def searchDrivers(self, msgList):
msg = ""
pattern = msgList[1]
dataList = driversDB.searchDrivers(self.cursor, pattern)
if dataList == None:
self.sendData("NotFound")
return
for data in dataList:
msg += data['did'] + " " + data['first_name'] + " " + data['last_name'] + " " + data['cid'] + " " + data['contact_number'] + " " + data['rating'] + " "
print msg
self.sendData(msg)
def searchCabs(self, msgList):
msg = ""
pattern = msgList[1]
dataList = cabsDB.searchCabs(self.cursor, pattern)
if dataList == None:
self.sendData(str(" "))
return
for data in dataList:
msg += data['cid'] + " " + data['c_model'] + " " + data['maxpassengers'] + " " + data['rating'] + " "
print msg
self.sendData(msg)
def sendRemainingCidList(self):
cidList = driversDB.getRemainingCidList(self.cursor)
msg = ""
for cid in cidList:
msg += str(cid) + " "
print msg
self.sendData(msg)
def sendAllocationType(self, msgList):
aid = msgList[1]
data = allocationsDB.getAllocationType(self.cursor, aid)
self.sendData(data)
def sendAllocationAddresses(self, msgList):
aid = msgList[1]
data = allocationsDB.getAllocation( self.cursor, aid )
eidList = data['eid']
eids = eidList.split(',')
msg = ""
for eid in eids:
data = employeeAddressDB.getEmployeeAddress(self.cursor, eid)
msg += data['house_num']+" "+data['street_name']+" "+data['city']+" "+data['postal_code']+" "
print msg
self.sendData(msg)
def sendAllocatedDriver(self, msgList):
aid = msgList[1]
data = allocationsDB.getAllocation( self.cursor, aid )
did = data['did']
driver = driversDB.getDriver(self.cursor, did)
msg = driver['did']+" "+driver['first_name']+" "+driver['last_name']+" "+driver['contact_number']+" "+driver['rating']+" "
self.sendData(msg)
def sendAllocatedCab(self, msgList):
aid = msgList[1]
data = allocationsDB.getAllocation( self.cursor, aid )
cid = data['cid']
cab = cabsDB.getCab(self.cursor, cid)
msg = cab['cid']+" "+cab['c_model']+" "+cab['maxpassengers']+" "+cab['rating']+" "
self.sendData(msg)
def deallocateCab(self, msgList):
aid = msgList[1]
status = allocationsDB.resetCidDid( self.cursor, aid )
print 'reset cid status : ' + str(status)
if status == True:
status = allocationsDB.setChangeFlag( self.cursor, aid )
print 'set change_flag status : ' + str(status)
self.sendData("success")
self.db.commit()
else:
self.sendData("fail")
def removeCab(self, msgList):
cid = msgList[1]
status = cabsDB.removeCab( self.cursor, cid )
if status == True:
status = driversDB.resetCab( self.cursor, cid )
if status == True:
self.sendData("done")
self.db.commit()
else:
self.sendData("fail")
def modifyCab(self, msgList):
data = {}
data['cid'] = msgList[1]
data['model'] = msgList[2]
data['maxpassengers'] = msgList[3]
status = cabsDB.modifyCab( self.cursor, data )
if status == True:
self.sendData("done")
self.db.commit()
else:
self.sendData("fail")
def modifyDriver(self, msgList):
data = {}
data['did'] = msgList[1]
data['first_name'] = msgList[2]
data['last_name'] = msgList[3]
data['cid'] = msgList[4]
data['contact_number'] = msgList[5]
data['rating'] = 'None'
status = driversDB.modifyDriver(self.cursor, data)
if status == True:
self.sendData("done")
self.db.commit()
else:
self.sendData("fail")
def removeDriver(self, msgList):
did = msgList[1]
driver = driversDB.getDriver(self.cursor, did)
rating = int(driver['rating'])
rating = -1*rating
status = driversDB.removeDriver(self.cursor, did, str(rating) )
if status == True:
self.sendData("done")
self.db.commit()
else:
self.sendData("fail")
def getDriverFromCid(self, msgList):
cid = msgList[1]
data = driversDB.getDriverFromCid(self.cursor, cid)
if data != None:
msg = data['did']+" "+data['first_name']+" "+data['last_name']+" "+data['cid']+" "+data['contact_number']
self.sendData(msg)
else:
self.sendData("failed")
def combineAllocations(self, msgList):
count = int( msgList[1] )
aidList = msgList[2].split(',')
mainAid = str(aidList[0])
mainEid = allocationsDB.getEid(self.cursor, mainAid)
del(aidList[0])
for aid in aidList:
eid = allocationsDB.getEid(self.cursor,aid)
mainEid += "," + eid
allocationsDB.deleteAllocation(self.cursor, aid)
allocationsDB.modifyEid(self.cursor, mainAid, mainEid)
self.db.commit()
self.sendData("done")
def run( self ): #main entry point
try:
self.connectDB() #establish connection to database
self.login() #attempt authentication
if self.eid == None : #if authentication failed
print 'login failed : agency interface'
self.sendData("failed") #send response that failed
return #stop the thread due to login failure
else :
print 'sending done'
self.sendData("done")
#sendAllocations()
print 'sent' #send a login accepted message
##
#main request loop
##
while True:
print 'waiting for request'
self.msg = str( self.receiveData() ) #get a request from server
print self.msg
if self.msg == None:
return
msgList = self.msg.split()
if len( msgList ) == 0:
return
if msgList[0] == 'addcab' : #request to add an employee
print 'add cab'
self.addCab( msgList )
elif msgList[0] == 'adddriver' :
print 'add driver'
self.addDriver( msgList )
elif msgList[0] == 'sendcabs' :
print 'send cabs'
self.sendCabs()
elif msgList[0] == 'sendcab':
print 'send cab'
self.sendCab(msgList)
elif msgList[0] == 'senddriver':
print 'send driver'
self.sendDriver(msgList)
elif msgList[0] == 'senddrivers':
print 'send drivers'
self.sendDrivers()
elif msgList[0] == 'sendallocations':
print 'get allocations'
self.sendAllocations()
elif msgList[0] == 'sendcidlist':
print 'send cidlist'
self.sendCidList()
elif msgList[0] == 'sendavailablecidlist':
print 'send available cidlist'
self.sendAvailableCidList()
elif msgList[0] == 'allocatecab':
print 'allocate cab'
self.allocateCab(msgList)
elif msgList[0] == 'deallocatecab':
print 'deallocate cab'
self.deallocateCab( msgList )
elif msgList[0] == 'searchdrivers':
print 'search drivers'
self.searchDrivers(msgList)
elif msgList[0] == 'searchcabs':
print 'search cabs'
self.searchCabs(msgList)
elif msgList[0] == 'semdremainingcidlist' :
print 'send remaining cabs'
self.sendRemainingCidList()
elif msgList[0] == 'sendallocationaddresses':
print 'send allocation addresses'
self.sendAllocationAddresses(msgList)
elif msgList[0] == 'sendallocateddriver':
print 'send allocated driver'
self.sendAllocatedDriver(msgList)
elif msgList[0] == 'sendallocatedcab' :
print 'send allocated cab'
self.sendAllocatedCab(msgList)
elif msgList[0] == 'sendallocationtype':
print 'send allocation type'
self.sendAllocationType(msgList)
elif msgList[0] == 'removecab':
print 'remove cab'
self.removeCab(msgList)
elif msgList[0] == 'modifycab':
print 'modify cab'
self.modifyCab(msgList)
elif msgList[0] == 'removedriver':
print 'remove driver'
self.removeDriver(msgList)
elif msgList[0] == 'modifydriver':
print 'modify driver'
self.modifyDriver(msgList)
elif msgList[0] == 'checkcidallocated':
print 'check cid allocated'
self.checkCidAllocated(msgList)
elif msgList[0] == 'getdriverfromcid':
print 'get driver from cid'
self.getDriverFromCid(msgList)
elif msgList[0] == 'combineallocations':
print 'combine allocations'
self.combineAllocations(msgList)
else :
return
##
#
##
# except IntegrityError as e:
# self.sendData("EC1")
# except :
# print 'something wrong'
finally:
self.disconnect() # disconnect when leaving thread
|
# Create your models here.
from django.db import models
from django.contrib.auth.models import User
# from managers import FriendshipManager
from django.db.models.signals import post_save
class MyUser(models.Model):
user = models.OneToOneField(User)
name = models.CharField(max_length=100, default = 'Me')
description = models.TextField(default = "None")
like_number = models.IntegerField(default = 0)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
try:
existing = MyUser.objects.get(user=self.user)
self.id = existing.id #force update instead of insert
except MyUser.DoesNotExist:
pass
models.Model.save(self, *args, **kwargs)
def create_user_profile(sender, instance, created, **kwargs):
if created:
MyUser.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
class Message(models.Model):
sender = models.ForeignKey(MyUser, related_name = 'profile')
receiver = models.ForeignKey(User)
message = models.TextField()
subject = models.CharField(max_length=50, default = 'No Subject')
class Like(models.Model):
user = models.ForeignKey(User)
profile = models.ForeignKey(MyUser) |
from appium import webdriver
# {cmp=com.lemon.lemonban/.activity.MainActivity}
print('new UiSelector().text({})'.format("heell")) |
import json
from models.GCN import GCN
import torch
from dataloader import DEAP
from trainer import Trainer
from torch_geometric.data import DataLoader
import torch.optim as optim
def run(config, train_dataset, val_dataset):
device = 'cpu' if torch.cuda.is_available() else 'cpu'
model = GCN(1, 64, 4, 0.01).to(device)
print("Training on {}, batch_size is {}, lr is {}".format(device, config['batch_size'], config['lr']))
criterion = torch.nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=config['lr'])
train_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=config['batch_size'], shuffle=True)
trainer = Trainer(model, train_loader, val_loader, criterion, optimizer, config, device)
train_acc, train_loss, val_acc, val_loss = trainer.train()
return train_acc, train_loss, val_acc, val_loss
if __name__ == '__main__':
with open('config.json', 'r') as f:
config = json.load(f)
train_dataset = DEAP(root_dir="./clean_data", label_path='clean_data')
train_acc, train_loss, val_acc, val_loss = run(config, train_dataset, train_dataset) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : {{create_time}}
# @Author : by {{author_name}}
# @File : {{file_name}}
# @Email : {{email}}
"""
token化模块,除了用已经用的tokenizer,用户必须实现TokenizerBase中的get_tokenizer方法,
"""
import os
from ..core.dataprocessing import DataProcess
DP = DataProcess()
logger = DP.logger
# try:
# {% if tokenizer_type == "bert" -%}
# from tokennizers.tokenizer_by_bert import FullTokenizer as _tokenizer
# _tokenizer = _tokenizer(DP.config['bert'])
# {% else %}
# from tokennizers.
# {% endif %}
# except:
# raise ImportError('请导入正确的tokenizer 或者自定义一个tokenizer')
class TokenizerBase():
def get_tokenizer(self):
"""
:return: tokenizer 对象,里面必须包含字典,类似bert等的实现方式
"""
raise NotImplementedError()
def __call__(self,example_list,token_fields = [], expand_fn = {}):
"""
:params example_list: [example] list
:params token_fields:[{},] e.g.,{'text_a':['label1','label2']},'text_b']
字典表示以key为基准进行expand
:params expand_fn expand 的方法,当token_fields 中有字典结构的时候,对应的每一个value必须有对应的函数
"""
if not token_fields:
raise ValueError('token_fields can not be none')
if isinstance(expand_fn,dict):
raise ValueError('expand_fn must be a dict:{0}'.format(str(expand_fn)))
if expand_fn:
for k,v in expand_fn.items():
if not callable(v):
raise ValueError('The value in example_fn musk be function: {0}'.format(expand_fn))
single_fields = [] # 没有依赖的token字段
bound_fields = [] # 有 bound 的字段,里面是一个字典
for field in token_fields:
if isinstance(field,dict):
if len(field.keys())>1:
raise ValueError('Field have one more key: {0}'.format(str(field)))
for bound_field in field.values():
if bound_field not in expand_fn:
raise ValueError('Field in value of dict must be in expand_fn: {0}'.format(str(field)))
bound_fields.append(field)
elif isinstance(field,str):
single_fields.append(field)
else:
raise ValueError('Token_fields can only include str or dict: {0}'.format(str(field)))
[self._tokenize_one_sample(e,single_fields,bound_fields,expand_fn) for e in example_list]
class Tokenizer(TokenizerBase):
def __init__(self):
self._tokenizer = self.get_tokenizer()
DP.tokenizer = self._tokenizer
if hasattr(self._tokenizer,'tokenize'):
raise ValueError('tokenizer must have a tokenize method')
# self._fields = DP.fields
# {% if task_type == "sl" -%}
# self.tokenize_domain = DP.config['TaskInput']['TokenizeDomainHeader'] # label 依照哪一个进行token化
# self.label = [f for f in self._fields if f.startswith('label_')]
#
# if len(self.label)>1:
# raise ValueError('There are two or more fields start with "label_", which is not allow in sequence labeling')
# self.tag_scheme = DP.config['TaskInput']['TagScheme'] #
# if self.tag_scheme not in ['BIO','BIOES']:
# raise ValueError('tag scheme must be BIO or BIOES, current scheme is {0}'.format(self.tag_scheme))
# {% endif %}
def _tokenize_one_sample(self,example,single_fields,bound_fields,expand_fn):
"""
:param self:
:param single_fields: 简单字段的token化
:param bound_fields: 绑定的字段的token化
:param expand_fn: 绑定的字段对应的函数
:return:
"""
# 单个字段直接进行token化
for field in single_fields:
example[field+'_tokenized'] = self._tokenizer.tokenize(example[field])
# 绑定的字段进行token化
for bound_field in bound_fields:
k = bound_field.keys()[0]
v = bound_field[k]
# tokenized 初始化
example[k+'_tokenized'] = []
# 对应到的一个映射函数,方便对token之后的进行还原
# 里面保存的是当前字的对应到token之后的start与end index
example[k+'_mapping'] = []
for i in v:
example[i+'_tokenized'] = []
_example = [example[k]] + [example[i] for i in v]
# 逐个位置都进行token化
current_index = 0
for t in zip(*_example):
v_domain = self._tokenizer.tokenize(t[0])
example[k+'_tokenized'] += v_domain
example[k+'_mapping'].append((current_index,current_index+len(v_domain)-1))
current_index = current_index+len(v_domain)
for k,i in enumerate(v):
example[i+'_tokenized'] += expand_fn[i](v_domain,t[k+1])
# def _tokenize_sequence_labelling_one_sample(self,example,single_fields,bound_fields):
#
#
# example[self.tokenize_domain+'_tokenized'] = []
#
# example[self.label+'_tokenized'] = []
# for feature,lable in zip([example[self.tokenize_domain],example[self.label]):
# feature_after_tokenize = self._tokenizer.tokenize(feature)
# example[self.tokenize_domain+'_tokenized'].append(feature_after_tokenize)
# if self.tag_scheme=='BIO':
# if lable in ['O','I']:
# example[self.label+'_tokenized'] = [label]*len(feature_after_tokenize)
# elif self.tag_scheme=='BIOES'
# def _tokenize_classification_one_sample(self,example):
#
# """
# 分类任务中的token化
# :return:
# """
# pass
if __name__ == '__main__':
pass
|
import os
import json
json_file = os.environ.get('JSON_CONFIG')
json_config = json.load(open(json_file)) if json_file else {}
ARTICLES_PER_PAGE = json_config.get('ARTICLES_PER_PAGE', 5)
DATABASE_NAME = json_config.get('DATABASE_NAME', 'mrshoeblog')
PORT = json_config.get('PORT', 8088)
COMMENTS_ENABLED = json_config.get('COMMENTS_ENABLED', False)
GOOGLE_ANALYTICS_ACCOUNT = json_config.get('GOOGLE_ANALYTICS_ACCOUNT', 'UA-168882-1')
BLOGGER_NAME = json_config.get('BLOGGER_NAME', 'David Shoemaker')
BLOG_TITLE = json_config.get('BLOG_TITLE', 'MrShoe.org Blog')
BASE_URL = json_config.get('BASE_URL', 'http://mrshoe.org/')
HOME_URL = BASE_URL
BLOG_URL = BASE_URL+'blog/'
ATOM_URL = BASE_URL+'blog/index.xml'
BLOG_PASSWORD = json_config.get('BLOG_PASSWORD', open('passwd').read().strip())
|
import sys
if len(sys.argv) != 3:
sys.stderr.write("Usage: Python %s inputfile outputfile\n" % sys.argv[0])
raise SystemExit(1)
# print(sys.argv[0])
inputfile = sys.argv[1]
outoputfile = sys.argv[2]
|
"""
These functions read the input, and write the output.
"""
from pandas import read_csv
def read_and_tune_csv_data(fname):
data = read_csv("data/{}".format(fname), dtype=dict(
Sex='category',
Cabin='category',
Embarked='category',
))
return data
def read_train_data():
return read_and_tune_csv_data("train.csv")
def read_test_data():
return read_and_tune_csv_data("train.csv")
|
from handler import Handler
from dbschema import User
import crypto
import json
class BlogHandler(Handler):
def set_secure_cookie(self, key, value):
cookie_value = crypto.secure_mesg(value)
self.response.headers.add_header(
'Set-Cookie',
'{0}={1}; Path=/'.format(key, cookie_value)
)
def read_secure_cookie(self, key):
cookie_value = self.request.cookies.get(key)
return cookie_value and crypto.validate(cookie_value)
def login(self, user):
self.set_secure_cookie('user_id', str(user.key().id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
Handler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
self.format = 'html'
if self.request.url.endswith('.json'):
self.format = 'json'
def render_json(self, d):
self.response.content_type = 'application/json; charset=UTF-8'
self.response.write(json.dumps(d)) |
# coding: utf-8
from abc import ABCMeta, abstractmethod
from threading import Thread
class Subject(object):
#def __init__(self, )
def register(self, newObserver):
self.action()
def unregister(self, deleteObserver):
self.action()
def notifyObserver(self):
self.action()
def action(self):
raise NotImplementedError('action must be defined')
class Observer(object): #(metaclass=ABCMeta):
__metaclass__ = ABCMeta
@abstractmethod
def update(self, ibmPrice, aaplPrice, googPrice):
pass
class StockGrabber(Subject):
def __init__(self):
self.observers = []
self.ibmPrice = 0.0
self.aaplPrice = 0.0
self.googPrice = 0.0
def register(self, newObserver):
self.observers.append(newObserver)
def unregister(self, deleteObserver):
if deleteObserver in self.observers:
del self.observers[deleteObserver]
def notifyObserver(self):
for observer in self.observers:
observer.update(self.ibmPrice, self.aaplPrice, self.googPrice)
def setIBMPrice(self, newIBMPrice):
self.ibmPrice = newIBMPrice
self.notifyObserver()
def setAAPLPrice(self, newAAPLPrice):
self.aaplPrice = newAAPLPrice
self.notifyObserver()
def setGOOGPrice(self, newGOOGPrice):
self.googPrice = newGOOGPrice
self.notifyObserver()
class StockObserver(Observer):
def __init__(self, stockGrabber):
self.stockGrabber = stockGrabber
self.stockGrabber.register(self)
def update(self, ibmPrice, aaplPrice, googPrice):
self.ibmPrice = ibmPrice
self.aaplPrice = aaplPrice
self.googPrice = googPrice
self.printThePrices()
def printThePrices(self):
print ("IBM: " + str(self.ibmPrice) + "\nAAPL: " + str(self.aaplPrice) + "\nGOOGPrice: " + str(self.googPrice) + "\n")
class GrabStocks(object):
def run(self):
stockGrabber = StockGrabber()
stockObserver1 = StockObserver(stockGrabber)
stockGrabber.setIBMPrice(197.00)
stockGrabber.setAAPLPrice(667.60)
stockGrabber.setGOOGPrice(676.40)
stockObserver2 = StockObserver(stockGrabber)
stockGrabber.setIBMPrice(197.00)
stockGrabber.setAAPLPrice(667.60)
stockGrabber.setGOOGPrice(676.40)
class GetTheStock(object):
pass
if __name__=="__main__":
test = GrabStocks()
test.run()
|
class Car :
speed = 5 # 클래스 안에서, 멤버함수 밖에 위치한 변수 (클래스 변수)
def drive(self):
self.speed = 10 # 클래스 안에서, 멤버함수 안에 위치한 변수 (인스턴스 변수)
def output(self):
print ('Car.speed :', Car.speed)
print ('self.speed :', self.speed)
print(Car.speed)
print('-' * 30)
myCar = Car()
myCar.output()
print('-' * 30)
myCar.drive()
myCar.output()
print('-' * 30)
print(myCar.speed)
print(Car.speed)
myCar.speed = 100
Car.speed = 200
print('-' * 30)
print(myCar.speed)
print(Car.speed)
print('-' * 30) |
import numpy as np
a = np.ones((2, 3))
print("a:\n", a, end='\n')
a *= 3
print("a:\n", a, end='\n')
b = np.random.random((2, 3))
b += a
print("b:\n", b, end='\n')
|
class Doctor:
def __init__(self, number):
self.patientAge = number
self.currentPatient = None
self.timeRemaning = 0
def tickTock(self):
if self.currentPatient != None:
self.timeRemaning = self.timeRemaning - 1
if self.timeRemaning == 0 :
self.currentPatient = None
def isBusy(self):
if self.currentPatient != None:
return True
else:
return False
def startNext(self,newpatient):
self.currentPatient = newpatient
self.timeRemaning = ( (newpatient.getAge()) // (self.patientAge) ) * 60
|
# -*- coding: utf-8 -*-
import unittest
from pythainlp.tokenize import THAI2FIT_TOKENIZER
from pythainlp.ulmfit import (
THWIKI_LSTM,
ThaiTokenizer,
document_vector,
merge_wgts,
post_rules_th,
post_rules_th_sparse,
pre_rules_th,
pre_rules_th_sparse,
process_thai,
)
from pythainlp.ulmfit.preprocess import (
fix_html,
lowercase_all,
remove_space,
replace_rep_after,
replace_rep_nonum,
replace_url,
replace_wrep_post,
replace_wrep_post_nonum,
rm_brackets,
rm_useless_newlines,
rm_useless_spaces,
spec_add_spaces,
ungroup_emoji,
)
from pythainlp.ulmfit.tokenizer import BaseTokenizer as base_tokenizer
import pandas as pd
import pickle
import torch
# fastai
import fastai
from fastai.text import *
# pythainlp
from pythainlp.ulmfit import *
class TestUlmfitPackage(unittest.TestCase):
def test_ThaiTokenizer(self):
self.thai = ThaiTokenizer()
self.assertIsNotNone(self.thai.tokenizer("ทดสอบการตัดคำ"))
self.assertIsNone(self.thai.add_special_cases(["แมว"]))
def test_BaseTokenizer(self):
self.base = base_tokenizer(lang="th")
self.assertIsNotNone(self.base.tokenizer("ทดสอบ การ ตัด คำ"))
self.assertIsNone(self.base.add_special_cases(["แมว"]))
def test_load_pretrained(self):
self.assertIsNotNone(THWIKI_LSTM)
def test_pre_rules_th(self):
self.assertIsNotNone(pre_rules_th)
def test_post_rules_th(self):
self.assertIsNotNone(post_rules_th)
def test_pre_rules_th_sparse(self):
self.assertIsNotNone(pre_rules_th_sparse)
def test_post_rules_th_sparse(self):
self.assertIsNotNone(post_rules_th_sparse)
def test_fix_html(self):
self.assertEqual(
fix_html("Some HTML text<br />"), "Some HTML& text\n"
)
def test_rm_useless_spaces(self):
self.assertEqual(
rm_useless_spaces("Inconsistent use of spaces."),
"Inconsistent use of spaces.",
)
def test_spec_add_spaces(self):
self.assertEqual(
spec_add_spaces("I #like to #put #hashtags #everywhere!"),
"I # like to # put # hashtags # everywhere!",
)
def test_replace_rep_after(self):
self.assertEqual(replace_rep_after("น้อยยยยยยยย"), "น้อยxxrep8 ")
def test_replace_rep_nonum(self):
self.assertEqual(replace_rep_nonum("น้อยยยยยยยย"), "น้อย xxrep ")
def test_replace_wrep_post(self):
self.assertEqual(
replace_wrep_post(["น้อย", "น้อย"]), ["xxwrep", "1", "น้อย"]
)
self.assertEqual(
replace_wrep_post(["นก", "กา", "กา", "กา"]),
["นก", "xxwrep", "2", "กา"],
)
def test_replace_wrep_post_nonum(self):
self.assertEqual(
replace_wrep_post_nonum(["น้อย", "น้อย"]), ["xxwrep", "น้อย"]
)
self.assertEqual(
replace_wrep_post_nonum(["นก", "กา", "กา", "กา"]),
["นก", "xxwrep", "กา"],
)
def test_remove_space(self):
self.assertEqual(remove_space([" ", "น้อย", " ", "."]), ["น้อย", "."])
def test_replace_url(self):
self.assertEqual(replace_url("https://thainlp.org web"), "xxurl web")
def test_rm_useless_newlines(self):
self.assertEqual(rm_useless_newlines("text\n\n"), "text ")
def test_rm_brackets(self):
self.assertEqual(rm_brackets("()()(ข้อความ)"), "(ข้อความ)")
self.assertEqual(rm_brackets("[][][ข้อความ]"), "[ข้อความ]")
self.assertEqual(rm_brackets("{}{}{ข้อความ}"), "{ข้อความ}")
def test_ungroup_emoji(self):
self.assertEqual(ungroup_emoji("👍👍👍"), ["👍", "👍", "👍"])
def test_lowercase_all(self):
self.assertEqual(
lowercase_all("HeLlO ."), ["h", "e", "l", "l", "o", " ", "."]
)
def test_process_thai_sparse(self):
text = "👍👍👍 #AnA มากกกก น้อยน้อย ().1146"
actual = process_thai(text)
# after pre_rules_th_sparse
# >>> "👍👍👍 # Ana มาก xxrep น้้อยน้อย .1146"
#
# after tokenize with word_tokenize(engine="newmm")
# >>> ["👍👍👍", " ", "#", " ","Ana", " ", "มาก", "xxrep",
# " ", "น้อย", "น้อย", " ", ".", "1146"]
#
# after post_rules_th
# - remove whitespace token (" ")
# >>> ["xxwrep, "👍", "#", "ana", "มาก",
# "xxrep", "xxwrep", "น้อย", ".", "1146"]
expect = [
"xxwrep",
"👍",
"#",
"ana",
"มาก",
"xxrep",
"xxwrep",
"น้อย",
".",
"1146",
]
self.assertEqual(actual, expect)
def test_process_thai_dense(self):
text = "👍👍👍 #AnA มากกกก น้อยน้อย ().1146"
actual = process_thai(
text,
pre_rules=pre_rules_th,
post_rules=post_rules_th,
tok_func=THAI2FIT_TOKENIZER.word_tokenize,
)
# after pre_rules_th
# >>> "👍👍👍 # Ana มากxxrep4 น้้อยน้อย .1146"
#
# after tokenize with word_tokenize(engine="newmm")
# >>> ["👍👍👍", " ", "#", "Ana", " ", "มาก", "xxrep", "4",
# " ", "น้อย", "น้อย", " ", ".", "1146"]
# after post_rules_th
# -- because it performs `replace_wrep_post` before `ungroup_emoji`,
# 3 repetitive emoji are not marked with special token "xxwrep num"
#
# >>> ["👍", "👍","👍", " ", "#", "ana", " ", "มาก",
# "xxrep", "4", " ", "xxwrep", "1", "น้อย", " ",
# ".", "1146"]
expect = [
"👍",
"👍",
"👍",
" ",
"#",
" ",
"ana",
" ",
"มาก",
"xxrep",
"4",
" ",
"xxwrep",
"1",
"น้อย",
" ",
".",
"1146",
]
self.assertEqual(actual, expect)
def test_document_vector(self):
imdb = untar_data(URLs.IMDB_SAMPLE)
dummy_df = pd.read_csv(imdb/'texts.csv')
thwiki = THWIKI_LSTM
thwiki_itos = pickle.load(open(thwiki['itos_fname'], 'rb'))
thwiki_vocab = fastai.text.transform.Vocab(thwiki_itos)
tt = Tokenizer(
tok_func=ThaiTokenizer,
lang='th',
pre_rules=pre_rules_th,
post_rules=post_rules_th
)
processor = [
TokenizeProcessor(
tokenizer=tt, chunksize=10000, mark_fields=False
),
NumericalizeProcessor(
vocab=thwiki_vocab, max_vocab=60000, min_freq=3
)
]
data_lm = (
TextList.from_df(
dummy_df,
imdb,
cols=['text'],
processor=processor
)
.split_by_rand_pct(0.2)
.label_for_lm()
.databunch(bs=64)
)
data_lm.sanity_check()
config = dict(
emb_sz=400,
n_hid=1550,
n_layers=4,
pad_token=1,
qrnn=False,
tie_weights=True,
out_bias=True,
output_p=0.25,
hidden_p=0.1,
input_p=0.2,
embed_p=0.02,
weight_p=0.15
)
trn_args = dict(drop_mult=0.9, clip=0.12, alpha=2, beta=1)
learn = language_model_learner(
data_lm,
AWD_LSTM,
config=config,
pretrained=False,
**trn_args
)
learn.load_pretrained(**thwiki)
self.assertIsNotNone(
document_vector('วันนี้วันดีปีใหม่', learn, data_lm)
)
self.assertIsNotNone(
document_vector('วันนี้วันดีปีใหม่', learn, data_lm, agg="sum")
)
with self.assertRaises(ValueError):
document_vector('วันนี้วันดีปีใหม่', learn, data_lm, agg='abc')
def test_merge_wgts(self):
wgts = {'0.encoder.weight': torch.randn(5,3)}
itos_pre = ["แมว", "คน", "หนู"]
itos_new = ["ปลา", "เต่า", "นก"]
em_sz = 3
self.assertIsNotNone(merge_wgts(em_sz, wgts, itos_pre, itos_new))
|
#!/usr/bin/env python
# coding:utf-8
# N.B. : Some of these docstrings are written in reSTructured format so that
# Sphinx can use them directly with fancy formatting.
# In the context of a REST application, this module must be loaded first as it
# is the one that instantiates the Flask Application on which other modules
# will depend.
"""
This module defines the generic REST API for annotation services as defined by
the CANARIE API specification. See :
https://collaboration.canarie.ca/elgg/file/download/849
"""
# -- Standard lib ------------------------------------------------------------
import collections
import datetime
import logging
# -- 3rd party ---------------------------------------------------------------
from flask import render_template
from flask import jsonify
# -- Setup and configuration -------------------------------------------------
from .app_objects import APP, CELERY_APP
# -- Project specific --------------------------------------------------------
from .utility_rest import set_html_as_default_response
from .utility_rest import get_canarie_api_response
from .utility_rest import validate_service_route
from .utility_rest import make_error_response
from .utility_rest import request_wants_json
from .utility_rest import mongo
from .reverse_proxied import ReverseProxied
from .utility_rest import AnyIntConverter
from . import __meta__
# Handle Reverse Proxy setups
APP.wsgi_app = ReverseProxied(APP.wsgi_app)
START_UTC_TIME = datetime.datetime.utcnow()
FL_API_URL = APP.config['FLOWER_API_URL']
# REST requests required by CANARIE
CANARIE_API_VALID_REQUESTS = ['doc',
'releasenotes',
'support',
'source',
'tryme',
'licence',
'provenance']
# HTML errors for which the service provides a custom error page
HANDLED_HTML_ERRORS = [400, 404, 405, 500, 503]
HANDLED_HTML_ERRORS_STR = ", ".join(map(str, HANDLED_HTML_ERRORS))
# Map an error handler for each handled HTML error
# Errors handled here are the ones that occur internally in the application
#
# The loop replace the following code for each handled html error
# @APP.errorhandler(400)
# def page_not_found_400(some_error):
# return handle_error(400, str(some_error))
#
# For the lambda syntax see the following page explaining the requirement for
# status_code_copy=status_code
# http://stackoverflow.com/questions/938429/scope-of-python-lambda-functions-
# and-their-parameters/938493#938493
for status_code in HANDLED_HTML_ERRORS:
APP.register_error_handler(status_code,
lambda more_info, status_code_copy = status_code: \
make_error_response(html_status=status_code_copy,
html_status_response=str(more_info)))
@APP.errorhandler(Exception)
def handle_exceptions(exception_instance):
"""
Generate error response for raised exceptions.
:param exception_instance: Exception instance.
"""
logger = logging.getLogger(__name__)
logger.debug("Generating error response for the exception %s",
repr(exception_instance))
logger.exception(exception_instance)
if APP.debug:
logger.info("In debug mode, re-raising exception")
raise
status_code = None
try:
status_code = exception_instance.status_code
except AttributeError:
logger.info("Processing exception which has no attribute "
"«status_code»")
logger.debug("Status code is %s", status_code)
response = make_error_response(
html_status=status_code,
html_status_response=exception_instance.message,
vesta_exception=exception_instance)
return response
# -- Flask routes ------------------------------------------------------------
APP.url_map.converters['any_int'] = AnyIntConverter
@APP.route("/<any_int(" + HANDLED_HTML_ERRORS_STR + "):status_code_str>")
def extern_html_error_handler(status_code_str):
"""
Handle errors that occur externally provided that Apache is configured so
that it uses this route for handling errors.
For this add this line for each handled html errors in the Apache
configuration::
ErrorDocument 400 <Rest root>/400
"""
return make_error_response(html_status=int(status_code_str))
def global_info():
"""
Return an overview of the services hosted by this REST instance
"""
info_ = {'version': __meta__.API_VERSION,
'services': APP.config['WORKER_SERVICES']}
return jsonify(info_)
@APP.route("/info")
@APP.route("/service/info")
@APP.route("/<service_route>/info")
@APP.route("/<service_route>/service/info")
def info(service_route='.'):
"""
Required by CANARIE
A service can define it's service_route as '.', in which case, the URL
doesn't have to contain a route token
"""
logger = logging.getLogger(__name__)
# JSON is used by default but the Canarie API requires html as default
set_html_as_default_response()
# Handle the special case where info is requested without any route
# In this case we return the global info
if service_route == '.' and \
service_route not in APP.config['WORKER_SERVICES']:
return global_info()
service_name = validate_service_route(service_route)
service_info_categories = ['name',
'synopsis',
'institution',
'releaseTime',
'supportEmail',
'category',
'researchSubject']
worker_config = APP.config['WORKER_SERVICES'][service_name]
service_info = []
service_info.append(('version', '{0}_{1}'.
format(__meta__.API_VERSION,
worker_config['version'])))
for category in service_info_categories:
cat = worker_config[category]
service_info.append((category, cat))
tags = worker_config['tags']
service_info.append(('tags', tags.split(',')))
# Get information on registered workers ---------------------
queue_name = worker_config['celery_queue_name']
logger.info("Refreshing knowledge on all worker queues")
inspector = CELERY_APP.control.inspect()
active_queues = inspector.active_queues()
logger.debug("Worker info : %s", active_queues)
logger.debug("Queue info : %s", queue_name)
active_workers = 0
if active_queues:
for _ql_ in active_queues.values():
for _q_ in _ql_:
if queue_name in _q_['name']:
active_workers += 1
logger.info("There are %s known workers found", active_workers)
service_info.append(('activeWorkers', active_workers))
service_info = collections.OrderedDict(service_info)
if request_wants_json():
return jsonify(service_info)
return render_template('default.html', Title="Info", Tags=service_info)
@APP.route("/stats")
@APP.route("/<service_route>/stats")
@APP.route("/<service_route>/service/stats")
def stats(service_route='.'):
"""
Required by CANARIE.
A service can define it's service_route as '.', in which case, the URL
doesn't have to contain a route token
"""
logger = logging.getLogger(__name__)
logger.info("Requested stats for service %s", service_route)
# JSON is used by default but the Canarie API requires html as default
set_html_as_default_response()
service_name = validate_service_route(service_route)
service_stats = {}
service_stats['lastReset'] = START_UTC_TIME.strftime('%Y-%m-%dT%H:%M:%SZ')
service_stats['invocations'] = mongo.db.Invocations.count({"datetime": {"$gt": START_UTC_TIME}, "service": service_name})
if request_wants_json():
return jsonify(service_stats)
return render_template('default.html', Title="Stats", Tags=service_stats)
@APP.route("/")
@APP.route("/<any(" +
",".join(CANARIE_API_VALID_REQUESTS) + "):api_request>")
@APP.route("/<service_route>/<any(" +
",".join(CANARIE_API_VALID_REQUESTS) + "):api_request>")
@APP.route("/<service_route>/service/<any(" +
",".join(CANARIE_API_VALID_REQUESTS) + "):api_request>")
def simple_requests_handler(api_request='home', service_route='.'):
"""
Handle simple requests required by CANARIE
A service can define it's service_route as '.', in which case, the URL
doesn't have to contain a route token
"""
# JSON is used by default but the Canarie API requires html as default
set_html_as_default_response()
return get_canarie_api_response(service_route, api_request)
def configure_home_route():
"""
Configure the route /<service_route>
Cannot be done with the decorator because we must know the exact routes
name and not match any keyword since it will conflict with other route like
/info, /doc, etc.
"""
logger = logging.getLogger(__name__)
logger.debug("Current configuration is : %s", APP.config)
logger.debug("Root path is %s", APP.root_path)
logger.info("Static path is %s", APP.static_folder)
known_services_routes = list(APP.config['WORKER_SERVICES'].keys())
logger.info("Configuring home route for services %s",
known_services_routes)
routes = [r for r in known_services_routes if r != '.']
if len(routes) > 0:
rule = '/<any({0}):service_route>/'.format(','.join(routes))
logger.debug("Adding route rule : {0}".format(rule))
APP.add_url_rule(rule, None, simple_requests_handler)
logger.debug("Flask url map: {0}".format(APP.url_map)) |
import datetime
import enum
class DayOfWeek(enum.Enum):
# Domingo
sunday = "sunday",
# Segunda - feira
monday = "monday",
# Terça - feira
tuesday = "tuesday",
# Quarta - feira
wednesday = "wednesday",
# Quinta - feira
thursday = "thursday",
# Sexta - feira
friday = "friday",
# Sábado
saturday = "saturday"
def __eq__(self, other):
return other and self.name == other.name
@staticmethod
def all():
return list(map(lambda c: c, DayOfWeek))
def parse(value) -> DayOfWeek:
if isinstance(value, str):
return eval(f'DayOfWeek.{value}')
days = DayOfWeek.all()
if isinstance(value, int) and 0 <= value <= len(days):
return days[value]
raise Exception("DayOfWeek inválido!")
def parse_today():
return parse(datetime.date.today().isoweekday())
|
import heapq
import time
from typing import AbstractSet, Callable, Dict, Optional, Sequence, Tuple
from pddlenv import env
from pddlenv.base import Action, Predicate, Problem
from pddlenv.search import base, utils
Heuristic = Callable[[AbstractSet[Predicate], Problem], float]
class GreedyBestFirst:
def __init__(self, heuristic: Heuristic, logger: Optional[base.Logger] = None):
self.heuristic = heuristic
self.logger = logger
def search(self,
state: env.EnvState,
time_limit: float = None,
expansion_limit: int = None) -> Optional[Sequence[Action]]:
expanded_states = 0
start_time = time.perf_counter()
# we assume that the dynamics will never change the problem instance
problem = state.problem
heap = [base.Candidate(0., state)]
parents: Dict[env.EnvState, Optional[Tuple[env.EnvState, Action]]] = {state: None}
dynamics = env.PDDLDynamics()
while heap:
if time_limit and time_limit <= time.perf_counter() - start_time:
break
if expansion_limit and expansion_limit <= expanded_states:
break
expanded_states += 1
state = heapq.heappop(heap).state
actions, timesteps = dynamics.sample_transitions(state)
next_states = [timestep.observation for timestep in timesteps]
for action, next_state in zip(actions, next_states):
literals = next_state.literals
if next_state not in parents:
parents[next_state] = (state, action)
heapq.heappush(
heap, base.Candidate(self.heuristic(literals, problem), next_state))
if problem.goal_satisfied(literals):
if self.logger is not None:
self.logger.write({"expanded_states": expanded_states,
"search_time": time.perf_counter() - start_time})
return utils.generate_plan(next_state, parents)
if self.logger is not None:
self.logger.write({"expanded_states": expanded_states,
"search_time": time.perf_counter() - start_time})
return None
|
from flask import Flask, request, redirect, url_for, flash, jsonify
from features_calculation import doTheCalculation
import json, pickle
import pandas as pd
import numpy as np
app = Flask(__name__)
@app.route('/api/makecalc/', methods=['POST'])
def makecalc():
"""
Function run at each API call
"""
jsonfile = request.get_json()
data = pd.read_json(json.dumps(jsonfile),orient='index',convert_dates=['dteday'])
print(data)
res = dict()
ypred = model.predict(doTheCalculation(data))
for i in range(len(ypred)):
res[i] = ypred[i]
return jsonify(res)
if __name__ == '__main__':
modelfile = 'modelfile.pickle'
model = pickle.load(open(modelfile, 'rb'))
print("loaded OK")
app.run(debug=True)
|
class Solution(object):
def maxAreaOfIsland(self, grid):
self.maxArea= 0 #存储最大岛屿的面积
row = len(grid) #存储地图的行数
col = len(grid[0]) #存储地图的列数
for i in range(row):
for j in range(col): #开始从左到右,从上到下的搜索岛屿
if grid[i][j] == 1: #如果发现了陆地的话
current = 1
self.dfs(i, j, current, grid) #测量岛屿面积,核心代码
return self.maxArea #最后返回最大岛屿的
def dfs(self, k, z, current, grid):
# k,z为点的坐标
#current存储目前岛屿的面积,grid为地图
grid[k][z] = 2 #第一步先标记此处为已访问
if k > 0 and grid[k-1][z] == 1: #向上走前先考察不越界并且为陆地
current= self.dfs(k-1, z, current+1, grid) #递归调用函数,更新x左边和当前面积
if k < (len(grid)-1) and grid[k+1][z] == 1: #向下走前先考察不越界并且为陆地
current= self.dfs(k+1, z, current+1, grid)
if z > 0 and grid[k][z - 1] == 1: #向左走前先考察不越界并且为陆地
current= self.dfs(k, z-1, current+1, grid)
if z < (len(grid[0])-1) and grid[k][z+1] == 1: #向右走前先考察不越界并且为陆地
current= self.dfs(k, z+1, current+1, grid)
self.maxArea= max(self.maxArea, current) #更新最大面积变量
return current |
# -*-coding:"utf-8"-*-
# import pymongo
import tweepy
import time
import random
# joguinho do papel com elementos da grande família!!!!!!!!!!
'''
Quem
Com quem
Fazendo
Onde
Chegou (alguém)
E disse
Moral da história
'''
def popozonize(arqs):
# testando tempo de execução
start = time.time()
conto = ""
# descobrir como otimizar a manipulação do arquiv
for arq in range(0, len(arqs)):
# termo aleatório do arquivo atual
rodada_atual = open(arqs[arq], "r")
line_1 = rodada_atual.read().splitlines()
str_arq = random.choice(line_1)
rodada_atual.close()
# popozonize
conto = conto + "\n" + str_arq
# testando tempo de execução
end = time.time()
print(end - start)
return conto
# escrever função que junte as frases q provavelmente façam mais sentido
if __name__ == "__main__":
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
quem = "nomes.txt"
com_quem = "nomes_2.txt"
fazendo = "fazendo.txt"
onde = "lugares.txt"
chegou_alguem = "nomes_3.txt"
e_disse = "frases_1.txt"
moral = "frases_2.txt"
historia = [quem, com_quem, fazendo, onde, chegou_alguem, e_disse, moral]
while(True):
lineuzinho = popozonize(historia)
lineuzar = api.update_status(status=lineuzinho)
time.sleep(900)
|
from Config import Config
class Browser:
def getDriver(self):
self.driver = Config.environment
return self.driver |
#!/usr/bin/python3
"""
Splitter plików Dr.a Makuchowskiego.
Wymaga do działania:
1. pliku: neh.data.txt
2. folderu splitted
"""
file = open("neh.data.txt", "r")
lines = file.readlines()
file.close()
print("Rozpoczynam parsowanie pliku")
for line in lines:
if "data" in line:
file.close()
file = open(("splitted/" + line)[:-2], "w")
else:
file.write(line)
file.close()
print("Parsowanie zakończone")
|
"""
Вивести дійсні числа зі списку.
"""
my_list = ['fgjio', 'tthf', 56, 59, 'ufiud', 54.6, 58.3, 77.3]
index_float= 0
for elem in my_list:
elem = index_float
index_float += 1
if index_float isinstance (float):
print(elem)
|
import math, os
from helpers import analytics
analytics.monitor()
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "bin", "p099_base_exp.txt")
baseExpPairsFile = open(filename, "r")
pairs = [[int(n) for n in line.split(",")] for line in baseExpPairsFile]
def main():
largest = 0
line = 0
for i in range(len(pairs)):
a,b = pairs[i]
if b*math.log(a) > largest:
largest = b*math.log(a)
line = i+1
return line
print(main(), analytics.lap(), analytics.maxMem()) |
"""
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This file contains the implementation of a generic graph for the
Travelling Salesman Problem (TSP).
Author: Mattia Neroni, Ph.D., Eng. (Set 2021).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
import networkx as nx
import networkx.algorithms.shortest_paths.dense as nxalg
import random
import itertools
import math
import matplotlib.pyplot as plt
# Default parameters:
# NODES = 30: The number of nodes in the graph.
# SPACE_SIZE = (1000,1000) : The size of the area in which these nodes are placed.
def euclidean (x, y):
"""
The euclidean distance between two coordinates expressed
as two tuples.
"""
return int(math.sqrt( (x[0] - y[0])**2 + (x[1] - y[1])**2 ))
class TSP (object):
"""
An instance of this class represents a gereric graph for
Travelling Salesman Problem.
"""
def __init__ (self, nodes = 30, space_size = (1000, 1000)):
# The graph instance
G = nx.Graph()
# The nodes list
nodes_dict = dict()
# Create nodes
for i in range(nodes):
nodes_dict[i] = (random.randint(0, space_size[0]), random.randint(0, space_size[1]))
G.add_node(i)
# Create edges
for i, j in itertools.permutations(range(nodes), 2):
G.add_edge(i, j, weight=euclidean(nodes_dict[i], nodes_dict[j]))
self.G = G
self.nodes = nodes_dict
self.distance_matrix = nxalg.floyd_warshall_numpy(G)
def plot (self):
"""
This method plot the generated graph.
"""
nx.draw(self.G, pos=self.nodes, with_labels=True, font_weight='bold')
plt.show()
|
import pytest
import allure
import json
#=======================================================================================================================
#==================================================== Code 200 =========================================================
#=======================================================================================================================
#@allure.issue("https://trac.brightpattern.com/ticket/22667")
@pytest.mark.usefixtures("get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0")
class Test_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0():
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_status_code_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0')
def test_check_status_code_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0(self, get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0):
print("request_result_status_code : ", get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0.status_code)
assert "200" in str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0.status_code), "Answer status not 200 ; actual status code : " + str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_answer_text_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0')
def test_check_answer_text_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0(self, get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0):
print("request_result_text : ", get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0.text)
status = '[{"entry":{"last name":"Name_Last_C1","first name":"Name_First_C1","agent id":"Test.C1","phone2":"8005","date/time":"07-07-2071","caller id":"101","integer":"1","phone1":"7005"},"index":0,"status":{"totalAttempts":0,"completed":false}},{"entry":{"last name":"Name_Last_C2","first name":"Name_First_C2","agent id":"Test.C2","phone2":"8006","date/time":"07-07-2072","caller id":"102","integer":"2","phone1":"7006"},"index":1,"status":{"totalAttempts":0,"completed":false}},{"entry":{"last name":"Name_Last_C3","first name":"Name_First_C3","agent id":"Test.C3","phone2":"8007","date/time":"07-07-2073","caller id":"103","integer":"3","phone1":"7007"},"index":2,"status":{"totalAttempts":0,"completed":false}}]'
assert status in str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_0.text)
@pytest.mark.usefixtures("get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2")
class Test_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2():
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_status_code_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2')
def test_check_status_code_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2(self, get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2):
print("request_result_status_code : ", get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2.status_code)
assert "200" in str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2.status_code), "Answer status not 200 ; actual status code : " + str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_answer_text_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2')
def test_check_answer_text_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2(self, get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2):
print("request_result_text : ", get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2.text)
status = '[{"entry":{"last name":"Name_Last_C3","first name":"Name_First_C3","agent id":"Test.C3","phone2":"8007","date/time":"07-07-2073","caller id":"103","integer":"3","phone1":"7007"},"index":2,"status":{"totalAttempts":0,"completed":false}}]'
assert status in str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_2.text)
@pytest.mark.usefixtures("get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3")
class Test_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3():
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_status_code_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3')
def test_check_status_code_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3(self, get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3):
print("request_result_status_code : ", get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3.status_code)
assert "200" in str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3.status_code), "Answer status not 200 ; actual status code : " + str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_answer_text_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3')
def test_check_answer_text_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3(self, get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3):
print("request_result_text : ", get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3.text)
status = '[]'
assert status in str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_start_index_from_3.text)
@pytest.mark.usefixtures("get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0")
class Test_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0():
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_status_code_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0')
def test_check_status_code_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0(self, get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0):
print("request_result_status_code : ", get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0.status_code)
assert "200" in str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0.status_code), "Answer status not 200 ; actual status code : " + str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_answer_text_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0')
def test_check_answer_text_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0(self, get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0):
print("request_result_text : ", get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0.text)
status = '[{"entry":{"last name":"Name_Last_C2","first name":"Name_First_C2","agent id":"Test.C2","phone2":"8006","date/time":"07-07-2072","caller id":"102","integer":"2","phone1":"7006"},"index":1,"status":{"totalAttempts":0,"completed":false}}]'
assert status in str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_valid_list_campaign_fromtime_and_maxsize_maxsize_1_and_fromindex_more_than_0.text)
@pytest.mark.usefixtures("get_all_records_post_request_with_maxsize_set_to_1000")
class Test_post_request_with_maxsize_set_to_1000():
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_status_code_post_request_with_maxsize_set_to_1000')
def test_check_status_code_post_request_with_maxsize_set_to_1000(self, get_all_records_post_request_with_maxsize_set_to_1000):
print("request_result_status_code : ", get_all_records_post_request_with_maxsize_set_to_1000.status_code)
assert "200" in str(
get_all_records_post_request_with_maxsize_set_to_1000.status_code), "Answer status not 200 ; actual status code : " + str(
get_all_records_post_request_with_maxsize_set_to_1000.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 200")
@allure.step('test_check_answer_text_post_request_with_maxsize_set_to_1000')
def test_check_answer_text_post_request_with_maxsize_set_to_1000(self, get_all_records_post_request_with_maxsize_set_to_1000):
print("request_result_text : ", get_all_records_post_request_with_maxsize_set_to_1000.text)
status = len(json.loads(get_all_records_post_request_with_maxsize_set_to_1000.text))
print("status_length : ", status)
assert status == 1000, "Answer text length not 1000 ; actual message length : " + str(status)
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#==================================================== Code 400 =========================================================
#=======================================================================================================================
@allure.issue("https://trac.brightpattern.com/ticket/22720")
@pytest.mark.usefixtures("get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical")
class Test_post_request_with_invalid_fromindex_value_fromindex_alphabetical():
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_status_code_post_request_with_invalid_fromindex_value_fromindex_alphabetical')
def test_check_status_code_post_request_with_invalid_fromindex_value_fromindex_alphabetical(self, get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical):
print("request_result_status_code : ", get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical.status_code)
assert "400" in str(
get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical.status_code), "Answer status not 400 ; actual status code : " + str(
get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_answer_text_post_request_with_invalid_fromindex_value_fromindex_alphabetical')
def test_check_answer_text_post_request_with_invalid_fromindex_value_fromindex_alphabetical(self, get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical):
print("request_result_text : ", get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical.text)
status = '<Fill this response>'
assert status in str(
get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_invalid_fromindex_value_fromindex_alphabetical.text)
@allure.issue("https://trac.brightpattern.com/ticket/22721")
@pytest.mark.usefixtures("get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical")
class Test_post_request_with_invalid_maxsize_value_maxsize_alphabetical():
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_status_code_post_request_with_invalid_fromindex_value_fromindex_alphabetical')
def test_check_status_code_post_request_with_invalid_fromindex_value_fromindex_alphabetical(self, get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical):
print("request_result_status_code : ", get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical.status_code)
assert "400" in str(
get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical.status_code), "Answer status not 400 ; actual status code : " + str(
get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_answer_text_post_request_with_invalid_fromindex_value_fromindex_alphabetical')
def test_check_answer_text_post_request_with_invalid_fromindex_value_fromindex_alphabetical(self, get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical):
print("request_result_text : ", get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical.text)
status = '<Fill this response>'
assert status in str(
get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_invalid_maxsize_value_maxsize_alphabetical.text)
@pytest.mark.usefixtures("get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex")
class Test_post_request_with_valid_list_campaign_maxsize_but_without_fromindex():
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_status_code_post_request_with_invalid_fromindex_value_fromindex_alphabetical')
def test_check_status_code_post_request_with_invalid_fromindex_value_fromindex_alphabetical(self, get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex):
print("request_result_status_code : ", get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex.status_code)
assert "400" in str(
get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex.status_code), "Answer status not 400 ; actual status code : " + str(
get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_answer_text_post_request_with_invalid_fromindex_value_fromindex_alphabetical')
def test_check_answer_text_post_request_with_invalid_fromindex_value_fromindex_alphabetical(self, get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex):
print("request_result_text : ", get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex.text)
status = 'invalid parameters'
assert status in str(
get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_valid_list_campaign_maxsize_but_without_fromindex.text)
@pytest.mark.usefixtures("get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize")
class Test_post_request_with_valid_list_campaign_fromindex_but_without_maxsize():
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_status_code_post_request_with_valid_list_campaign_fromindex_but_without_maxsize')
def test_check_status_code_post_request_with_valid_list_campaign_fromindex_but_without_maxsize(self, get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize):
print("request_result_status_code : ", get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize.status_code)
assert "400" in str(
get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize.status_code), "Answer status not 400 ; actual status code : " + str(
get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_answer_text_post_request_with_valid_list_campaign_fromindex_but_without_maxsize')
def test_check_answer_text_post_request_with_valid_list_campaign_fromindex_but_without_maxsize(self, get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize):
print("request_result_text : ", get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize.text)
status = 'invalid parameters'
assert status in str(
get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_valid_list_campaign_fromindex_but_without_maxsize.text)
@pytest.mark.usefixtures("get_all_records_post_request_with_maxsize_set_to_1001")
class Test_post_request_with_maxsize_set_to_1001():
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_status_code_post_request_with_maxsize_set_to_1001')
def test_check_status_code_post_request_with_maxsize_set_to_1001(self, get_all_records_post_request_with_maxsize_set_to_1001):
print("request_result_status_code : ", get_all_records_post_request_with_maxsize_set_to_1001.status_code)
assert "400" in str(
get_all_records_post_request_with_maxsize_set_to_1001.status_code), "Answer status not 400 ; actual status code : " + str(
get_all_records_post_request_with_maxsize_set_to_1001.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_answer_text_post_request_with_maxsize_set_to_1001')
def test_check_answer_text_post_request_with_maxsize_set_to_1001(self, get_all_records_post_request_with_maxsize_set_to_1001):
print("request_result_text : ", get_all_records_post_request_with_maxsize_set_to_1001.text)
status = 'maxSize is too large (no more than 1000)'
assert status in str(
get_all_records_post_request_with_maxsize_set_to_1001.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_maxsize_set_to_1001.text)
@allure.issue("https://trac.brightpattern.com/ticket/24665")
@pytest.mark.usefixtures("get_all_records_post_request_with_incorrect_body_format_typization")
class Test_post_request_with_incorrect_body_format_typization():
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_status_code_post_request_with_incorrect_body_format_typization')
def test_check_status_code_post_request_with_incorrect_body_format_typization(self,
get_all_records_post_request_with_incorrect_body_format_typization):
print("request_result_status_code : ", get_all_records_post_request_with_incorrect_body_format_typization.status_code)
assert "400" in str(
get_all_records_post_request_with_incorrect_body_format_typization.status_code), "Answer status not 400 ; actual status code : " + str(
get_all_records_post_request_with_incorrect_body_format_typization.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 400")
@allure.step('test_check_answer_text_post_request_with_incorrect_body_format_typization')
def test_check_answer_text_post_request_with_incorrect_body_format_typization(self,
get_all_records_post_request_with_incorrect_body_format_typization):
print("request_result_text : ", get_all_records_post_request_with_incorrect_body_format_typization.text)
status = "Expected BEGIN_OBJECT but was BEGIN_ARRAY at line 1 column 2 path $"
assert status in str(
get_all_records_post_request_with_incorrect_body_format_typization.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_incorrect_body_format_typization.text)
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#==================================================== Code 404 =========================================================
#=======================================================================================================================
@allure.issue("https://trac.brightpattern.com/ticket/22717")
@pytest.mark.usefixtures("get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated")
class Test_post_request_with_valid_list_and_campaign_that_are_not_associated():
@allure.epic("test_get_all_records")
@allure.feature("answer code 404")
@allure.step('test_check_status_code_post_request_with_valid_list_and_campaign_that_are_not_associated')
def test_check_status_code_post_request_with_valid_list_and_campaign_that_are_not_associated(self, get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated):
print("request_result_status_code : ", get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated.status_code)
assert "404" in str(
get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated.status_code), "Answer status not 404 ; actual status code : " + str(
get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 404")
@allure.step('test_check_answer_text_post_request_with_valid_list_and_campaign_that_are_not_associated')
def test_check_answer_text_post_request_with_valid_list_and_campaign_that_are_not_associated(self, get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated):
print("request_result_text : ", get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated.text)
status = 'campaign is not found'
assert status in str(
get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_valid_list_and_campaign_that_are_not_associated.text)
@pytest.mark.usefixtures("get_all_records_post_request_with_invalid_url")
class Test_post_request_with_invalid_url():
@allure.epic("test_get_all_records")
@allure.feature("answer code 404")
@allure.step('test_check_status_code_post_request_with_invalid_url')
def test_check_status_code_post_request_with_invalid_url(self, get_all_records_post_request_with_invalid_url):
print("request_result_status_code : ", get_all_records_post_request_with_invalid_url.status_code)
assert "404" in str(
get_all_records_post_request_with_invalid_url.status_code), "Answer status not 404 ; actual status code : " + str(
get_all_records_post_request_with_invalid_url.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 404")
@allure.step('test_check_answer_text_post_request_with_invalid_url')
def test_check_answer_text_post_request_with_invalid_url(self, get_all_records_post_request_with_invalid_url):
print("request_result_text : ", get_all_records_post_request_with_invalid_url.text)
status = "HTTP 404 Not Found"
assert status in str(
get_all_records_post_request_with_invalid_url.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_invalid_url.text)
@pytest.mark.usefixtures("get_all_records_post_request_to_the_non_existent_list")
class Test_post_request_to_the_non_existent_list():
@allure.epic("test_get_all_records")
@allure.feature("answer code 404")
@allure.step('test_check_status_code_post_request_to_the_non_existent_list')
def test_check_status_code_post_request_to_the_non_existent_list(self, get_all_records_post_request_to_the_non_existent_list):
print("request_result_status_code : ", get_all_records_post_request_to_the_non_existent_list.status_code)
assert "404" in str(
get_all_records_post_request_to_the_non_existent_list.status_code), "Answer status not 404 ; actual status code : " + str(
get_all_records_post_request_to_the_non_existent_list.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 404")
@allure.step('test_check_answer_text_post_request_to_the_non_existent_list')
def test_check_answer_text_post_request_to_the_non_existent_list(self, get_all_records_post_request_to_the_non_existent_list):
print("request_result_text : ", get_all_records_post_request_to_the_non_existent_list.text)
status = "calling list not found"
assert status in str(
get_all_records_post_request_to_the_non_existent_list.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_to_the_non_existent_list.text)
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#==================================================== Code 401 =========================================================
#=======================================================================================================================
@pytest.mark.usefixtures("get_all_records_post_request_with_do_not_authorize_session")
class Test_post_request_with_do_not_authorize_session():
@allure.epic("test_get_all_records")
@allure.feature("answer code 401")
@allure.step('test_check_status_code_post_request_with_do_not_authorize_session')
def test_check_status_code_post_request_with_do_not_authorize_session(self,
get_all_records_post_request_with_do_not_authorize_session):
print("request_result_status_code : ", get_all_records_post_request_with_do_not_authorize_session.status_code)
assert "401" in str(
get_all_records_post_request_with_do_not_authorize_session.status_code), "Answer status not 401 ; actual status code : " + str(
get_all_records_post_request_with_do_not_authorize_session.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 401")
@allure.step('test_check_answer_text_post_request_with_do_not_authorize_session')
def test_check_answer_text_post_request_with_do_not_authorize_session(self,
get_all_records_post_request_with_do_not_authorize_session):
print("request_result_text : ", get_all_records_post_request_with_do_not_authorize_session.text)
status = "Session is not authenticated"
assert status in str(
get_all_records_post_request_with_do_not_authorize_session.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_do_not_authorize_session.text)
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#==================================================== Code 403 =========================================================
#=======================================================================================================================
@pytest.mark.usefixtures("get_all_records_post_request_with_authorize_session_for_user_without_permission")
class Test_post_request_with_authorize_session_for_user_without_permission():
@allure.epic("test_get_all_records")
@allure.feature("answer code 403")
@allure.step('test_check_status_code_post_request_with_authorize_session_for_user_without_permission')
def test_check_status_code_post_request_with_authorize_session_for_user_without_permission(self,
get_all_records_post_request_with_authorize_session_for_user_without_permission):
print("request_result_status_code : ", get_all_records_post_request_with_authorize_session_for_user_without_permission.status_code)
assert "403" in str(
get_all_records_post_request_with_authorize_session_for_user_without_permission.status_code), "Answer status not 403 ; actual status code : " + str(
get_all_records_post_request_with_authorize_session_for_user_without_permission.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 403")
@allure.step('test_check_answer_text_post_request_with_authorize_session_for_user_without_permission')
def test_check_answer_text_post_request_with_authorize_session_for_user_without_permission(self,get_all_records_post_request_with_authorize_session_for_user_without_permission):
print("request_result_text : ", get_all_records_post_request_with_authorize_session_for_user_without_permission.text)
status = "User authenticated but does not have sufficient privileges"
assert status in str(
get_all_records_post_request_with_authorize_session_for_user_without_permission.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_post_request_with_authorize_session_for_user_without_permission.text)
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#=======================================================================================================================
#==================================================== Code 405 =========================================================
#=======================================================================================================================
@pytest.mark.usefixtures("get_all_records_get_request_with_correct_body")
class Test_get_request_with_correct_body():
@allure.epic("test_get_all_records")
@allure.feature("answer code 405")
@allure.step('test_check_status_code_get_request_with_correct_body')
def test_check_status_code_get_request_with_correct_body(self, get_all_records_get_request_with_correct_body):
print("request_result_status_code : ", get_all_records_get_request_with_correct_body.status_code)
assert "405" in str(
get_all_records_get_request_with_correct_body.status_code), "Answer status not 405 ; actual status code : " + str(
get_all_records_get_request_with_correct_body.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 405")
@allure.step('test_check_answer_text_get_request_with_correct_body')
def test_check_answer_text_get_request_with_correct_body(self, get_all_records_get_request_with_correct_body):
print("request_result_text : ", get_all_records_get_request_with_correct_body.text)
status = "Method Not Allowed"
assert status in str(
get_all_records_get_request_with_correct_body.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_get_request_with_correct_body.text)
# assert len(str(get_request_with_correct_body.text)) == 0, "Answer text not empty ; actual message : " + str(get_request_with_correct_body.text)
#@allure.issue("https://trac.brightpattern.com/ticket/24265")
@pytest.mark.usefixtures("get_all_records_put_request_with_correct_body")
class Test_put_request_with_correct_body():
@allure.epic("test_get_all_records")
@allure.feature("answer code 405")
@allure.step('test_check_status_code_put_request_with_correct_body')
def test_check_status_code_put_request_with_correct_body(self, get_all_records_put_request_with_correct_body):
print("request_result_status_code : ", get_all_records_put_request_with_correct_body.status_code)
assert "405" in str(
get_all_records_put_request_with_correct_body.status_code), "Answer status not 405 ; actual status code : " + str(
get_all_records_put_request_with_correct_body.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 405")
@allure.step('test_check_answer_text_put_request_with_correct_body')
def test_check_answer_text_put_request_with_correct_body(self, get_all_records_put_request_with_correct_body):
print("request_result_text : ", get_all_records_put_request_with_correct_body.text)
status = "Method Not Allowed"
assert status in str(
get_all_records_put_request_with_correct_body.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_put_request_with_correct_body.text)
#@allure.issue("https://trac.brightpattern.com/ticket/24265")
@pytest.mark.usefixtures("get_all_records_delete_request_with_correct_body")
class Test_delete_request_with_correct_body():
@allure.epic("test_get_all_records")
@allure.feature("answer code 405")
@allure.step('test_check_status_code_delete_request_with_correct_body')
def test_check_status_code_delete_request_with_correct_body(self, get_all_records_delete_request_with_correct_body):
print("request_result_status_code : ", get_all_records_delete_request_with_correct_body.status_code)
assert "405" in str(
get_all_records_delete_request_with_correct_body.status_code), "Answer status not 405 ; actual status code : " + str(
get_all_records_delete_request_with_correct_body.status_code)
@allure.epic("test_get_all_records")
@allure.feature("answer code 405")
@allure.step('test_check_answer_text_delete_request_with_correct_body')
def test_check_answer_text_delete_request_with_correct_body(self, get_all_records_delete_request_with_correct_body):
print("request_result_text : ", get_all_records_delete_request_with_correct_body.text)
status = "Method Not Allowed"
assert status in str(
get_all_records_delete_request_with_correct_body.text), "Answer text not " + status + " ; actual message : " + str(
get_all_records_delete_request_with_correct_body.text)
#=======================================================================================================================
#=======================================================================================================================
#======================================================================================================================= |
name = input("Sisesta oma nimi: ")
print("Tere, " + name + "!")
city = input("Kus on su elukoht: ")
if city.lower() == "kuressaare":
print("Hei ma olen ka sealt!")
age = int(input("Kui vana sa oled: "))
if age < 18:
print("Sa oled veel liiga noor, et autot juhtida!")
elif age > 18:
print("Sa oled piisavalt vana, et autot juhtida!")
else:
print("Palju õnne täisealiseks saamiseks!")
|
from Vector import Vector
import math
import random
class Vehicle:
maxSpeed = 2
maxForce = 0.05
width = 0
height = 0
c = None
mutationRate = 0.4
def __init__(self, x, y, dna, health):
self.position = Vector(x, y)
self.velocity = Vector(0, -2)
self.acceleration = Vector(0, 0)
self.vehicle = Vehicle.c.create_oval(x - 5, y - 5, x + 5, y + 5, fill="gray")
self.x = x
self.y = y
self.health = health
if dna is not None:
self.dna = [0, 0, 0, 0]
self.dna[0] = dna[0]
print(random.random())
if random.random() < Vehicle.mutationRate:
self.dna[0] += random.uniform(-0.5, 0.5)
self.dna[1] = dna[1]
if random.random() < Vehicle.mutationRate:
self.dna[1] += random.uniform(-0.5, 0.5)
self.dna[2] = dna[2]
if random.random() < Vehicle.mutationRate:
self.dna[2] += random.uniform(-10, 10)
self.dna[3] = dna[3]
if random.random() < Vehicle.mutationRate:
self.dna[3] += random.uniform(-10, 10)
else:
self.dna = [0, 0, 0, 0]
# Food Steer
self.dna[0] = random.randint(-2, 2)
# Poison Steer
self.dna[1] = random.randint(-2, 2)
# Food Perception
self.dna[2] = random.randint(1, 100)
# Poison Perception
self.dna[3] = random.randint(1, 100)
self.foodLine = Vehicle.c.create_line(x, y, x + self.dna[0] * 10, y, fill="green", width=3)
self.poisonLine = Vehicle.c.create_line(x, y, x + self.dna[1] * 10, y, fill="red", width=2)
self.foodPerception = Vehicle.c.create_oval(x - self.dna[2], y - self.dna[2], x + self.dna[2], y + self.dna[2],
fill="", outline="green")
self.poisonPerception = Vehicle.c.create_oval(x - self.dna[3], y - self.dna[3], x + self.dna[3],
y + self.dna[3],
fill="", outline="red")
# self.txtSpeed = Vehicle.c.create_text(x, y - 20, text="0", fill="white")
self.txtFoodSteer = Vehicle.c.create_text(x, y - 40, text=str(self.dna[0]), fill="green", font=1)
self.txtPoisonSteer = Vehicle.c.create_text(x, y - 20, text=str(self.dna[1]), fill="red", font=1)
# print("Food steer: " + str(self.dna[0]) + " Poison steer: " + str(self.dna[1]))
def update(self):
self.health -= 1
self.velocity.add(self.acceleration)
self.position.add(self.velocity)
self.velocity.setLimit(Vehicle.maxSpeed)
self.acceleration.mult(0)
red = int((1000 - self.health) * 0.255)
green = int(self.health * 0.255)
if red < 0:
red = 0
elif red > 255:
red = 255
if green > 255:
green = 255
elif green < 0:
green = 0
# print("red" + str(int((1000 - self.health) * 0.255)) + " green:" + str(int(self.health * 0.255)))
Vehicle.c.itemconfig(self.vehicle, fill=self.toRGB((red, green, 0)))
def eat(self, listV, list, nutrition, perception):
x = self.position.x
y = self.position.y
distance = 3000
closestIndex = -1
for index in range(len(listV)):
d = math.dist([x, y], [listV[index].x, listV[index].y])
if d < distance and d < perception:
distance = d
closestIndex = index
if distance < 5:
self.health += nutrition
Vehicle.c.delete(list[closestIndex])
list.remove(list[closestIndex])
listV.remove(listV[closestIndex])
elif closestIndex > -1:
return self.seek(listV[closestIndex])
return Vector(0, 0)
def applyForce(self, force):
self.acceleration.add(force)
def behaviors(self, goodV, badV, good, bad):
steerG = self.eat(goodV, good, 100, self.dna[2])
steerB = self.eat(badV, bad, -200, self.dna[3])
steerG.mult(self.dna[0])
steerB.mult(self.dna[1])
self.applyForce(steerG)
self.applyForce(steerB)
def seek(self, target):
desired = Vector.sSub(target, self.position)
desired.setMag(Vehicle.maxSpeed)
steer = Vector.sSub(desired, self.velocity)
steer.setLimit(Vehicle.maxForce)
return steer
# self.applyForce(steer)
def display(self):
x = self.position.x
y = self.position.y
# theta = self.velocity.heading() + math.pi / 2
# center = (x, y)
# self.rotate(theta, center)
Vehicle.c.move(self.vehicle, x - self.x, y - self.y)
self.updateIndicators()
self.x += x - self.x
self.y += y - self.y
def updateIndicators(self):
x = self.position.x
y = self.position.y
Vehicle.c.move(self.foodLine, x - self.x, y - self.y)
Vehicle.c.move(self.poisonLine, x - self.x, y - self.y)
Vehicle.c.move(self.foodPerception, x - self.x, y - self.y)
Vehicle.c.move(self.poisonPerception, x - self.x, y - self.y)
Vehicle.c.move(self.txtFoodSteer, x - self.x, y - self.y)
Vehicle.c.move(self.txtPoisonSteer, x - self.x, y - self.y)
# Vehicle.c.itemconfig(self.txtSpeed, text=str(self.velocity.magnitude))
# Vehicle.c.move(self.txtSpeed, x - self.x, y - self.y)
def deleteIndicators(self):
Vehicle.c.delete(self.foodLine)
Vehicle.c.delete(self.poisonLine)
Vehicle.c.delete(self.foodPerception)
Vehicle.c.delete(self.poisonPerception)
Vehicle.c.delete(self.txtFoodSteer)
Vehicle.c.delete(self.txtPoisonSteer)
# Vehicle.c.delete(self.txtSpeed)
def dead(self):
return self.health < 0
def boundaries(self):
x = self.position.x
y = self.position.y
d = 25
desired = None
if self.position.x < d:
desired = Vector(Vehicle.maxSpeed, self.velocity.y)
elif self.position.x > Vehicle.width - d:
desired = Vector(-Vehicle.maxSpeed, self.velocity.y)
if self.position.y < d:
desired = Vector(self.velocity.x, Vehicle.maxSpeed)
elif self.position.y > Vehicle.height - d:
desired = Vector(self.velocity.x, -Vehicle.maxSpeed)
if desired is not None:
desired.normalize()
desired.mult(Vehicle.maxSpeed)
steer = Vector.sSub(desired, self.velocity)
steer.setLimit(Vehicle.maxForce)
self.applyForce(steer)
def clone(self):
if random.random() < 0.0003 + self.health / 2000000:
return Vehicle(self.position.x, self.position.y, self.dna, 500)
else:
return None
@staticmethod
def toRGB(rgb):
return "#%02x%02x%02x" % rgb
'''
def rotate(self, theta, center):
cos_val = math.cos(theta)
sin_val = math.sin(theta)
cx, cy = center
new_points = []
for x_old, y_old in self.points:
x_old -= cx
y_old -= cy
x_new = x_old * cos_val - y_old * sin_val
y_new = x_old * sin_val + y_old * cos_val
new_points.append([x_new + cx, y_new + cy])
new2points = []
for pair in new_points:
for coordinates in pair:
new2points.append(coordinates)
Vehicle.c.coords(self.vehicle, new2points)
'''
|
import sqlite3
from Consts import Consts
from Objects.TwitterLogger import TwitterLogger
class SqliteAdapter():
def __init__(self):
self.connect()
def connect(self):
try:
self.conn = sqlite3.connect(Consts.TWITTER_DB_LOCATION)
except Exception as exc:
print exc
#self.logger.ERROR("Didn't succeed to connect to DB, Error occurred:{0}".format(exc))
else:
print 'connected'
def get_cursor(self):
return self.conn.cursor() |
# Blender API imports
import bpy
from bpy.props import StringProperty, BoolProperty, IntProperty
from bpy_extras.io_utils import ImportHelper
from bpy.types import Operator
# Importing the TCK file reader
from . import readtck
# TrainTracts Blender addon info
bl_info = {
"name" : "TrainTracts",
"description": "An addon for the import and translation of brain tractography .TCK files into 3D objects.",
"author" : "Athanasios Bourganos",
"blender" : (3, 0, 0),
"version" : (1, 0, 0),
"location": "File > Import",
"category": "Import Export"
}
def create_tract(ob_name, coords, edges=[], faces=[]):
'''
Function for creating a new mesh from tract data
Takes in object name, coordinates in format [(X1, Y1, Z1), (X2, Y2, Z2), ...],
list of edges in format [[vert1, vert2], [vert2, vert3], ...], and list of faces
in format [[vert1, vert2, vert3], ...] (No faces are used in TrainTracts plugin)
'''
# Create instance of mesh and object
mesh = bpy.data.meshes.new(ob_name + "Mesh")
obj = bpy.data.objects.new(ob_name, mesh)
# Make the tractography mesh from a list of vertices/edges
mesh.from_pydata(coords, edges, faces)
# Don't display name and update the mesh in Blender
obj.show_name = False
mesh.update()
return obj
class OpenTCKFile(Operator, ImportHelper):
# Plugin operator info and label for the menu
bl_idname = "test.open_tck"
bl_label = "Tractography (.tck)"
bl_icon = 'SYSTEM'
# File filtering property in the file picker
filter_glob: StringProperty(
default='*.tck',
options={'HIDDEN'}
)
# Property for setting import as verbose
is_verbose: BoolProperty(
name='Verbose',
description='Make file import verbose.',
default=False,
)
# Property for decimating the mesh by removing tracts
# (1/decimate of the tracts will be used in the mesh)
decimate: IntProperty(
name='Decimate Factor',
description='Decimate tracts by 1/value (2 = half of tracks).',
default=1,
min=1,
max=100
)
def execute(self, context):
# Method to actually open the file, get the data, and make the mesh!
# Open the file and extract the header and tracts
header, tracts = readtck.readTCK(self.filepath, verbose=self.is_verbose)
# If verbose then extract the tract count for progress messages
if self.is_verbose:
t_count = str()
for char in header['count']:
if char.isdigit():
t_count += char
t_count = int(int(t_count)/self.decimate)
print('Header reading complete and file data open...')
# Define some important variables
c_count = 0
pydata = list()
edgedata = list()
# Iterate through the tracts and decimate if needed
for a in range(0, len(tracts), self.decimate):
# Set current tract and iterate count
tract = tracts[a]
c_count += 1
# Some more talkative code with progress included...
if self.is_verbose and c_count % 10000 == 1:
print(str((c_count/t_count)*100)+'%', 'of tracts prepared...')
# Some code that generates a list of edges within but not between tracts!
# This will likely be the most underapreciated optimization in the code...
p_index = len(pydata)
pydata += tract
for _ in range(len(tract)-1):
edgedata.append([p_index, p_index+1])
p_index += 1
# Create the mesh from the vertices and edges
tract_obj = create_tract("tracts", pydata, edges=edgedata)
# Link object to the active collection
bpy.context.collection.objects.link(tract_obj)
# Finish the execution and send the finished message!
return {'FINISHED'}
# Get the plugin akk setup as an operator
def custom_draw(self, context):
self.layout.operator("test.open_tck")
# Register the plugin and display it in the file import menu
def register():
bpy.utils.register_class(OpenTCKFile)
bpy.types.TOPBAR_MT_file_import.append(custom_draw)
# Unregister the plugin if needed
def unregister():
bpy.utils.unregister_class(OpenTCKFile)
# Start it all up when loaded!
if __name__ == "__main__":
register() |
APP_LABEL = 'regmain'
class ATTENDANT_TYPE(object):
NORMAL = 'normal'
VOLUNTEER = 'volunteer'
WORKER = 'worker'
ALL = (NORMAL, VOLUNTEER, WORKER)
class SEX_TYPE(object):
MALE = 'male'
FEMALE = 'female'
TRANSGENDER = 'transgender'
ALL = (MALE, FEMALE, TRANSGENDER)
class FLOW_TYPE(object):
INBOUND = 'inbound'
OUTBOUND = 'outbound'
ALL = (INBOUND, OUTBOUND)
|
#########################################################
#importing required files/libraries
import simulation
import optparse
import sys
import os
try:
sys.path.append(os.path.join(os.path.dirname(
__file__), '..', '..', '..', '..', "tools")) # tutorial in tests
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(__file__), "..", "..", "..")), "tools")) # tutorial in docs
from sumolib import checkBinary # noqa
except ImportError:
sys.exit(
"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
import traci
import traci.constants as tc
import vehicleControl
##########################################################
#SIMULATION OPTIONS
def get_options():
optParser = optparse.OptionParser()
optParser.add_option("--gui", action="store_true",
default=False, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
##########################################################
# this is the main entry point of this script
if __name__ == "__main__":
options = get_options()
# this script has been called from the command line. It will start sumo as a
# server, then connect and run
if options.gui:
sumoBinary = checkBinary('sumo-gui')
else:
sumoBinary = checkBinary('sumo')
# this is the normal way of using traci. sumo is started as a
# subprocess and then the python script connects and runs
traci.start([sumoBinary, "-c", "graph_data/simulation.sumocfg"])
#call the simulation function
simulation.simulate() |
"""Imports."""
from node import Node
class LinkedList:
"""Class for new linked lists."""
def __init__(self, iter=[]):
"""Initializer."""
self.head = None
self._len = 0
for item in iter:
self.head = self.insert(item)
def __len__(self):
"""Return len of the corrent object."""
return self._len
def __str__(self):
"""Return all items from the LL."""
lis = ''
current = self.head
while current:
lis += str(current.val) + ' '
current = current._next
return lis
def insert(self, val):
"""Add item to the LL."""
node = Node(val, self.head)
self.head = node
self._len += 1
return self.head
def find(self, val):
"""Search for element and return True or false."""
if self.head is None:
return False
elif self.head == val:
return True
else:
current = self.head
while current:
if val == current:
return True
current = current._next
return False
def append(self, value):
"""Append value at the end of the list."""
current = self.head
while current._next:
current = current._next
current._next = Node(value)
self._len += 1
def insert_before(self, value, newval):
"""Insert new node before correct."""
if self.head is not None:
current = self.head
if self.head == value:
self.head = Node(newval, self.head)
while current._next:
current = current._next
if current.val == value:
nxt = current._next
current._next = Node(newval, nxt)
self._len += 1
else:
return False
else:
self.head = Node(newval)
self._len += 1
return self.__str__
def insert_after(self, value, newval):
"""Insert new node after correct."""
print(str(self))
if self.head is not None:
current = self.head
if self.head.val == value:
self.head._next = Node(newval, current._next)
return True
while current._next:
current = current._next
if current.val == value:
nxt = current._next
current._next = Node(newval, nxt)
self._len += 1
else:
return False
else:
self.head = Node(newval)
self._len += 1
return self.__str__
|
from django.shortcuts import render, get_object_or_404
from django.db.models import *
import decimal
from django.http import JsonResponse
from django.template.loader import render_to_string
from django.urls import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from datetime import *
from django.forms import formset_factory
from django.db import IntegrityError, transaction
from transport.models import Pupil, EstatePupil, TermPupil, Term, Rate, Zone, Payment as TransportPay
from driving.models import Class, Student, StudentEnrolment, Attendance, Payment as DrivingPay, Rate as DrRate
from rent.models import Tenant, Room_Tenant, Rent_Regime, Payment as RentPay
from common.models import Trip, Expense, ExpenseType
def preppaydata(Model, desc, start, end):
objs = Model.objects.filter(datepaid__gte=start, datepaid__lte=end)
paid = objs.values('amount').annotate(paid=Sum('amount')).order_by('paid')
total = 0
for i in range(len(paid)):
total += paid[i]['paid']
data = []
data.append(desc)
data.append(objs)
data.append(total)
return data
def getexpensedata(start, end, exp='all'):
if exp == 'all':
expType = ExpenseType.objects.all()
else:
expType = ExpenseType.objects.filter(exp=exp)
data = []
print ('Expense: {0}'.format(exp))
totals = 0
duration = []
duration.append(start)
duration.append(end)
data.append(duration)
expdata = []
for xpTy in expType:
dt = []
exp = Expense.objects.filter(exptype=xpTy, date__lte=end, date__gte=start)
paid = exp.values('amount').annotate(paid=Sum('amount')).order_by('paid')
total = 0
for i in range(len(paid)):
total += paid[i]['paid']
dt.append(xpTy)
dt.append(exp)
dt.append(total)
totals += total
expdata.append(dt)
data.append(expdata)
data.append(totals)
return data
def getPayment(start, end):
tran = preppaydata(TransportPay, 'Pupil Payments', start, end)
rent = preppaydata(RentPay, 'Tenant Payments', start, end)
driv = preppaydata(DrivingPay, 'Student Payments', start, end)
trip = preppaydata(Trip, 'Trip Payments', start, end)
data = []
data.append(tran)
data.append(rent)
data.append(driv)
data.append(trip)
data.append('{0} - {1}'.format(start,end))
return data
def prepEstate(trm):
pups = Pupil.objects.all()
estTerm = EstatePupil.objects.filter(term=trm)
puplist = []
misspup = []
for est in estTerm:
puplist.append(est.pupil)
for pup in pups:
if pup not in puplist:
misspup.append(pup)
return misspup
class terms():
def __init__(self):
self.term = ''
self.start = ''
self.end = ''
def getTerm(self, yr, tm):
self.term = tm
yea = int(yr)
if tm == 'First':
self.start = date(yea, 1, 1)
self.end = date(yea, 4, 15)
elif tm == 'Second':
self.start = date(yea, 5, 1)
self.end = date(yea, 8, 15)
elif tm == 'Third':
self.start = date(yea, 9, 1)
self.end = date(yea, 12, 15)
return self
def get_term(yr, trm):
if trm == 'first': trm = 'First'
if trm == 'second': trm = 'Second'
if trm == 'third': trm = 'Third'
ter = terms().getTerm(yr, trm)
tm = Term.objects.filter(term=trm, year=yr)
if len(tm) > 0:
return tm[0]
else:
return "No Term Found"
def getTransportDue(pup):
estPup = EstatePupil.objects.filter(pupil=pup)
termPup = TermPupil.objects.filter(pupil=pup)
due = 0
for trm in termPup:
try:
rate = Rate.objects.filter(zone=estPup[0].estate.zone, term=trm.term)
if len(rate) > 0: due += rate[0].rate
except BaseException:
print (trm, ' --> Issue encountered')
due = due
return due
def getTransportPay(pup):
objs = TransportPay.objects.filter(pupil=pup)
return getPaymentsDone(objs)
def getTransportArrears(pup):
due = getTransportDue(pup)
pay = getTransportPay(pup)
return due - pay
def getPupilArrears(pup, trm):
term = terms().getTerm(trm.year, trm.term)
pupPay = [x for x in TransportPay.objects.filter(pupil=pup, datepaid__gte=term.start, datepaid__lte=term.end)]
estPup = EstatePupil.objects.filter(pupil=pup, estate__zone__rate__term=trm)
rate = 'Not a Customer or not yet defined!'
if estPup.count() > 0:
rate = Rate.objects.filter(zone=estPup[0].estate.zone, term=trm)[0].rate
amount = 0
if len(pupPay) > 0:
for pay in pupPay:
amount += pay.amount
return rate - amount
else:
return rate
def getTripPayments(start, end):
pay = Trip.objects.filter(date_gte=start, date__lte=end)
return pay
def getAllPupilArrears(term):
pups = Pupil.objects.all().order_by('sname')
pupList = []
for pup in pups:
pp = []
pp.append(pup.fname + ' ' + pup.sname)
arr = getPupilArrears(pup, term)
add = False
if type(arr) == decimal.Decimal:
if arr > 0:
pp.append(arr)
add = True
else:
pp.append(arr)
add = True
if add: pupList.append(pp)
return pupList
def getpaydetails(pay, obj, typ, term='all'):
if term == 'all':
if typ == 'par':
return pay.objects.filter(pupil__parent__pname=obj)
else:
return pay.objects.filter(pupil__estatepupil__estate__estate=obj)
else:
ter = terms().getTerm(term.year, term.term)
if typ == 'par':
return pay.objects.filter(pupil__parent__pname=obj, datepaid__gte=ter.start, datepaid__lte=ter.end)
else:
return pay.objects.filter(pupil__estatepupil__estate__estate=obj, datepaid__gte=ter.start, datepaid__lte=ter.end)
def getpaysumparent(pay,par, term='all'):
pp = getpaydetails(pay, par, 'par', term)
pp = pp.values('amount').annotate(paid=Sum('amount')).order_by('paid')
if len(pp) > 0:
sm = 0
for i in range(len(pp)):
sm += pp[i]['paid']
return sm
else: return 0
def getpaysumestate(pay,est, term='all'):
pp = getpaydetails(pay, est, 'est', term)
pp = pp.values('amount').annotate(paid=Sum('amount')).order_by('paid')
if len(pp) > 0:
sm = 0
for i in range(len(pp)):
sm += pp[i]['paid']
return sm
else: return 0
def getPaymentsDone(objs):
paid = objs.values('amount').annotate(paid=Sum('amount')).order_by('paid')
total = 0
for i in range(len(paid)):
total += paid[i]['paid']
return total
def getRentPayment(tenant):
objs = RentPay.objects.filter(tenant=tenant)
return getPaymentsDone(objs)
def getRentDue(tenant):
rmtenant = Room_Tenant.objects.filter(tenant=tenant)
if len(rmtenant) <=0: return 0
commence = rmtenant[0].commencement
today = datetime.now()
duration = int((datetime.date(datetime.now()) - commence).days/30)
st_rate = Rent_Regime.objects.filter(beg_date__lte=commence, end_date__gte=commence).order_by('beg_date')[0]
mon_rem = int((st_rate.end_date-commence).days/30)
rates = list(Rent_Regime.objects.filter(end_date__lte=today))
rates.append(list(Rent_Regime.objects.filter(beg_date__lte=today, end_date__gte=today))[0])
due = 0
due = mon_rem * st_rate.amount
rem_mon = duration - mon_rem
if len(rates) > 0:
for item in rates:
if item.beg_date < st_rate.beg_date:
rates.remove(item)
# remove st_rate
rates.remove(rates[0])
for i in range(len(rates)-1):
months = int((rates[i].end_date - rates[i].beg_date).days/30)
due += months * rates[i].amount
rem_mon -= months
# compute for current year
due += rem_mon * rates[len(rates)-1].amount
return due
# Rent Arrears
def getRentArrears(tenant):
due = getRentDue(tenant)
pay = getRentPayment(tenant)
return due - pay
def getDrivingDue(student):
due = 0
cls = Student.objects.filter(fname=student.fname, lname=student.lname).values('cls')
if len(cls) > 0:
clss = cls[0]
else: return due
cls = Class.objects.filter(cls=clss['cls'])
if len(cls) > 0:
rate = DrRate.objects.filter(clss=cls[0])
if len(rate) > 0:
due = rate[0].rate
return due
def getDrivingPayment(student):
objs = DrivingPay.objects.filter(student=student)
return getPaymentsDone(objs)
# Driving School Arrears
def getDrivingArrears(student):
due = getDrivingDue(student)
pay = getDrivingPayment(student)
return due - pay
# prepare arrears data
def preparrdata(RefModel, PayModel, Type, desc):
total = 0
objArr = []
objs = RefModel.objects.all()
if Type == 'Transport':
for obj in objs:
obAr = []
arr = getTransportArrears(obj)
obAr.append(obj.fname + ' ' + obj.sname)
estpup = EstatePupil.objects.filter(pupil=obj)
if len(estpup) > 0:
ora = estpup[0].estate
else:
ora = 'Estate not defined'
obAr.append(ora)
obAr.append(arr)
objArr.append(obAr)
total += arr
elif Type == 'Driving':
for obj in objs:
obAr = []
arr = getDrivingArrears(obj)
obAr.append(obj.fname + ' ' + obj.lname)
studenrol = StudentEnrolment.objects.filter(student=obj)
if len(studenrol) > 0:
ora = studenrol[0].branch
else:
ora = 'Branch not defined'
obAr.append(ora)
obAr.append(arr)
objArr.append(obAr)
total += arr
elif Type == 'Rent':
for obj in objs:
obAr = []
arr = getRentArrears(obj)
obAr.append(obj.name)
rmten = Room_Tenant.objects.filter(tenant=obj)
if len(rmten) > 0:
ora = rmten[0].room.site + ' ' + rmten[0].room.room
else:
ora = 'Room Tenant not defined'
obAr.append(ora)
obAr.append(arr)
objArr.append(obAr)
total += arr
data = []
data.append(desc)
data.append(objArr)
data.append(total)
return data
def makeEmptyArrears(desc):
data = []
data.append(desc)
data.append([])
data.append(0)
return data
def getArrears(arrtype):
data = []
if arrtype=='all':
tran = preparrdata(Pupil, TransportPay, 'Transport', 'Pupil Arrears')
rent = preparrdata(Tenant, RentPay, 'Rent', 'Tenant Arrears')
driv = preparrdata(Student, DrivingPay, 'Driving', 'Student Arrears')
if arrtype == 'transport':
tran = preparrdata(Pupil, TransportPay, 'Transport', 'Pupil Arrears')
rent = makeEmptyArrears('Tenant Arrears')
driv = makeEmptyArrears('Student Arrears')
if arrtype == 'driving':
tran = makeEmptyArrears('Pupil Arrears')
rent = makeEmptyArrears('Tenant Arrears')
driv = preparrdata(Student, DrivingPay, 'Driving', 'Student Arrears')
if arrtype == 'rent':
tran = makeEmptyArrears('Pupil Arrears')
rent = preparrdata(Tenant, RentPay, 'Rent', 'Tenant Arrears')
driv = makeEmptyArrears('Student Arrears')
data.append(tran)
data.append(rent)
data.append(driv)
data.append(datetime.now().date())
return data
def getPayDetails(obj, pay, typ):
data = []
# objPay = []
print(typ, obj)
data.append(obj)
if typ == 'pup':
objPay = pay.objects.filter(pupil=obj)
arr = getTransportArrears(obj)
elif typ == 'drv':
objPay = pay.objects.filter(student=obj)
arr = getDrivingArrears(obj)
elif typ == 'rent':
objPay = pay.objects.filter(tenant=obj)
arr = getRentArrears(obj)
if len(objPay) > 0:
data.append(objPay)
else: data.append('No payments received')
data.append(arr)
return data
def getPupilDetails(pupil):
data = getPayDetails(pupil, TransportPay, 'pup')
estpup = EstatePupil.objects.filter(pupil=pupil)
if len(estpup) > 0:
data.append(estpup)
else: data.append([])
trm = TermPupil.objects.filter(pupil=pupil)
if len(trm) > 0:
data.append(trm)
else: data.append([])
return data
def getStudentDetails(student):
data = getPayDetails(student, DrivingPay, 'drv')
cn=Attendance.objects.filter(student=student).values('lesson').annotate(cnt=Count('lesson'))
data.append(len(cn))
statt=StudentEnrolment.objects.filter(student=student)
if len(statt)>0:
data.append(statt)
else: data.append([])
return data
def getTenantDetails(tenant):
data = getPayDetails(tenant, RentPay, 'rent')
rmten = Room_Tenant.objects.filter(tenant=tenant)
if len(rmten)>0:
data.append(rmten)
else: data.append([])
return data
# View management snippets
def save_form(request, form, Model, page, urlroot, template):
data = dict()
if request.method == 'POST':
if form.is_valid():
form.save()
data['form_is_valid'] = True
tmpl = 'includes/partial_list.html'
objdata = get_data_list(request, Model, form, urlroot, page)
data['html_list'] = render_to_string(tmpl, {'data': objdata})
else:
data['form_is_valid'] = False
formdata = []
formdata.append(page)
formdata.append(form)
context = {'formdata': formdata}
data['html_form'] = render_to_string(template, context, request=request)
return JsonResponse(data)
def create_form(request, Form):
if request.method == 'POST':
form = Form(request.POST)
else:
form = Form()
return form
def update_form(request, pk, Form, Model):
obj = get_object_or_404(Model, pk=pk)
if request.method == 'POST':
form = Form(request.POST, instance=obj)
else:
form = Form(instance=obj)
return form
def delete_form(request, pk, Form, Model, url, urlroot):
obj = get_object_or_404(Model, pk=pk)
page = dict()
data = dict()
if request.method == 'POST':
obj.delete()
page = get_page(Model._meta.verbose_name, urlroot + '_create', Model._meta.verbose_name)
data['form_is_valid'] = True
tmpl = 'includes/partial_list.html'
objdata = get_data_list(request, Model, Form, urlroot, page)
data['html_list'] = render_to_string(tmpl, {'data': objdata})
else:
page['url'] = url
page['objname'] = Model._meta.verbose_name
page['objtitle'] = obj.__str__()
formdata = []
formdata.append(page)
formdata.append(obj)
context = {'formdata': formdata}
tmpl = 'includes/partial_delete.html'
data['html_form'] = render_to_string(tmpl, context, request=request)
return data
#Term Object manipulation
def get_page(heading, create_url, new):
page = dict()
page['heading'] = heading
page['create_url'] = reverse(create_url)
page['new'] = new
return page
def obj_create(request, Form, Model, rev_url):
form = create_form(request, Form)
page = dict()
page['url'] = reverse(rev_url)
page['objname'] = Model._meta.verbose_name
return form, page
def obj_update(request, pk, Form, Model, rev_url):
form = update_form(request, pk, Form, Model)
page = dict()
page['url'] = reverse(rev_url, args={pk})
page['objname'] = Model._meta.verbose_name
return form, page
def pageddata(request, data, items):
page = request.GET.get('page', 1)
paginator = Paginator(data, items)
try:
pgdata = paginator.page(page)
except PageNotAnInteger:
pgdata = paginator.page(1)
except EmptyPage:
pgdata = paginator.page(paginator.num_pages)
return pgdata
def get_data_list(request, Model, ModelForm, urlroot, pagedata):
objects = Model.objects.all()
fields = ModelForm._meta.fields
data = []
pk = Model._meta.pk.name
create_url = urlroot + '_create'
update_url = urlroot + '_update'
delete_url = urlroot + '_delete'
for obj in objects:
row = dict()
rowurl = dict()
datapiece = []
for fld in fields:
row[obj._meta.get_field(fld).verbose_name] = getattr(obj, fld)
rowurl['update'] = reverse(update_url, args={getattr(obj, pk)})
rowurl['delete'] = reverse(delete_url, args={getattr(obj, pk)})
datapiece.append(row)
datapiece.append(rowurl)
data.append(datapiece)
pgdata = pageddata(request, data, 20)
objdata = []
objdata.append(pagedata)
objdata.append(pgdata)
return objdata
def add_formset(request, Form, Model, text, url):
ModelFormset = formset_factory(Form, extra=5)
fields = Form._meta.fields
if request.method == "POST":
formset = ModelFormset(request.POST, request.FILES)
if formset.is_valid():
for fm in formset:
formdata = dict()
valid_data = False
for fld in fields:
formdata[fld] = fm.cleaned_data.get(fld)
if fm.cleaned_data.get(fld): valid_data = True
if valid_data:
fm.save()
else:
formset = ModelFormset()
data = []
data.append(text)
data.append(formset)
data.append(reverse(url))
return data
|
# Compare Version Numbers
'''
Compare two version numbers version1 and version2.
If version1 > version2 return 1; if version1 < version2 return -1;otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" or "half way to version three", it is the fifth second-level revision of the second first-level revision.
You may assume the default revision number for each level of a version number to be 0. For example, version number 3.4 has a revision number of 3 and 4 for its first and second level revision number. Its third and fourth level revision number are both 0.
Example 1:
Input: version1 = "0.1", version2 = "1.1"
Output: -1
Example 2:
Input: version1 = "1.0.1", version2 = "1"
Output: 1
Example 3:
Input: version1 = "7.5.2.4", version2 = "7.5.3"
Output: -1
Example 4:
Input: version1 = "1.01", version2 = "1.001"
Output: 0
Explanation: Ignoring leading zeroes, both “01” and “001" represent the same number “1”
Example 5:
Input: version1 = "1.0", version2 = "1.0.0"
Output: 0
Explanation: The first version number does not have a third level revision number, which means its third level revision number is default to "0"
Note:
Version strings are composed of numeric strings separated by dots . and this numeric strings may have leading zeroes.
Version strings do not start or end with dots, and they will not be two consecutive dots.
'''
##########################################
# Solution 1 #
# 72 / 72 test cases passed. #
# Runtime: 28 ms (> 80.87%) #
# Memory Usage: 13.9 MB (> 39.53%) #
##########################################
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
v1 = list(map(int, version1.split('.')))
v2 = list(map(int, version2.split('.')))
print(v1)
print(v2)
version_len = max(len(v1), len(v2))
if len(v1) < version_len:
for i in range(version_len - len(v1)):
v1.append(0)
if len(v2) < version_len:
for i in range(version_len - len(v2)):
v2.append(0)
for i in range(version_len):
if v1[i] > v2[i]:
return 1
elif v1[i] < v2[i]:
return -1
return 0
##########################################
# Fast solution - 12 ms #
##########################################
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
l1, l2 = version1.split('.'), version2.split('.')
n1, n2 = len(l1), len(l2)
i = 0
for i in range(max(n1, n2)):
val1 = int(l1[i]) if i < n1 else 0
val2 = int(l2[i]) if i < n2 else 0
if val1 != val2:
return 1 if val1 > val2 else -1
# if val1 > val2:
# return 1
# elif val1 < val2:
# return -1
return 0
##########################################
# Solution using less memory - 13516 KB #
##########################################
from itertools import zip_longest
class Solution:
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
versions1 = list(map(int, version1.split(".")))
versions2 = list(map(int, version2.split(".")))
for v1, v2 in zip_longest(versions1, versions2, fillvalue=0):
if v1 > v2:
return 1
elif v1 < v2:
return -1
return 0 |
"""
Digital Signal Processing 4
Assignment 2: FIR Filters
By Kai Ching Wong (GUID:2143747W)
"""
import numpy as np
import matplotlib.pyplot as plt
from FIR_Fil import FIR_filter as fir
###############################################################################
"""Task 1"""
ecg = np.loadtxt('Ricky_ECG.dat')
fs = 1000 #Sampling Rate
t = ecg[:,0]
amplitude = ecg[:,1] #Only column 1
plt.figure(1)
plt.plot(t,amplitude)
plt.title('Column 1')
plt.xlabel('Time (ms)')
plt.ylabel('Amplitude')
plt.xlim(0,5000)
plt.savefig('Ricky_ECG.svg')
#Converting ECG into Milli Volt
step = (4.096+4.096)/2**12
mV = ((amplitude - 2**(12-1))*step/2000)*1000
plt.figure(2)
plt.plot(t,mV)
plt.title('ECG in Milli Volt')
plt.xlabel('Time (ms)')
plt.ylabel('Voltage (mV)')
plt.xlim(0,5000)
plt.savefig('Ricky_ECG_mV.svg')
#Extracting a heart beat
abeat = mV[2500:3300]
plt.figure(3)
plt.plot(abeat)
plt.title('A Heart Beat')
plt.xlabel('Time (ms)')
plt.ylabel('Voltage (mV)')
plt.savefig('A_Heart_Beat.svg')
#Converitng into Frequency Domain
xfecg = np.fft.fft(mV)
f = np.linspace(0,fs,len(xfecg))
plt.figure(4)
plt.plot(f,abs(xfecg))
plt.title('ECG in Frequency Domain')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude')
plt.xlim(-5,500)
plt.savefig('Ricky_ECG_Hz.svg')
###############################################################################
###############################################################################
""" 1 ECG Filtering: Task 3 """
#Creating Impulse Response with analytical calculation
f1 = 45/fs
f2 = 55/fs
n = np.arange(-200,200+1)
h = 2*f1*np.sinc(2*f1*n)-2*f2*np.sinc(2*f2*n)
h[200]=1+(2*(f1-f2))
plt.figure(5)
plt.plot(h)
plt.title('Impulse Response of a 50Hz Notch Filter')
plt.xlabel('Number of Tabs (n)')
plt.ylabel('h(n)')
plt.savefig('50Hz_Notch_Filter_Impulse_Response.svg')
#Frequency Response of the FIR filter with analytical calculation
h1 = h
xfh1 = np.fft.fft(h1)
fh1 = np.linspace(0,fs,len(xfh1))
plt.figure(6)
plt.plot(fh1, 20*np.log10(xfh1)) #Converting frequency response in dB
plt.title('Frequency Response in Decibel')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decibel (dB)')
plt.savefig('Task_1.3_Frequency_response_dB.svg')
#Using Hamming Window Function
h = h * np.hamming(400+1)
xfh = np.fft.fft(h)
fh = np.linspace(0,fs,len(xfh))
plt.figure(7)
plt.plot(fh, 20*np.log10(xfh))
plt.title('Frequency Response with Hamming Window Function')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decibel (dB)')
plt.savefig('Frequency_Response_Hamming.svg')
#Filtering ECG with FIR Filter
fil = fir(h)
filecg = np.zeros(len(mV))
for i in range(len(mV)):
filecg[i] = fil.filter(mV[i])
plt.figure(8)
plt.plot(t,filecg)
plt.title('Filtering ECG with FIR Filter')
plt.xlabel('Time (ms)')
plt.ylabel('Voltage (mV)')
plt.xlim(0,5000)
plt.savefig('ECG_after_FIR.svg')
xfilecg = np.fft.fft(filecg) #Converting into Frequency Domain
plt.figure(9)
plt.plot(f,abs(xfilecg))
plt.title('Filtering ECG with FIR Filter in Frequency Domain')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude')
plt.xlim(-5,500)
plt.savefig('ECG_after_FIR_Hz.svg')
###############################################################################
###############################################################################
""" 1 ECG Filtering: Task 4 """
#Creating impulse response with numerical calculation
ntaps =1000 #number of tabs
f1 =int(45.0/fs*ntaps) #Indice for 45Hz
f2 =int(55.0/fs*ntaps) #Indice for 55Hz
f0 =int(1/fs*ntaps) #Indice for baseline shift
f_resp = np.ones(ntaps)
f_resp[f1:f2+1] = 0 #remove noise
f_resp[ntaps-f2: ntaps- f1+1] = 0 #mirror of f2 and f1
f_resp[ntaps-f0: ntaps- 0+1] = 0 #mirror of baseline shift
f_resp[0:f0+1] = 0 #remove baseline shift
plt.figure(10)
plt.plot(f_resp)
plt.title('Discrete Spectrum')
plt.xlabel('Number of Tabs (n)')
plt.ylabel('Amplitude')
plt.savefig('Discrete_Spectrum.svg')
coeff_tmp = np.fft.ifft(f_resp)
coeff_tmp = np.real(coeff_tmp) #want only real value from complex number
coeff = np.zeros(ntaps) #empty impulse response
coeff[0:int(ntaps/2)] = coeff_tmp[int(ntaps/2):ntaps] #fix the signal position
coeff[int(ntaps/2):ntaps] = coeff_tmp[0:int(ntaps/2)]
plt.figure(11)
plt.plot(coeff)
plt.title('Impulse Response for Numerical Calculation')
plt.xlabel('Number of Tabs (n)')
plt.ylabel('h(n)')
plt.savefig('Impulse_Response_numerical_calculation.svg')
#Using Hamming Window Function
coeff = coeff * np.hamming(1000)
plt.figure(12)
plt.plot(coeff)
plt.title('Impusle Response with Hamming Window Function')
plt.xlabel('Number of Tabs(n)')
plt.ylabel('h(n)')
plt.savefig('Task_1.4_Impulse_Response_Hamming.svg')
fil = fir(coeff)
filecg = np.zeros(len(mV))
for i in range(len(mV)):
filecg[i] = fil.filter(mV[i])
plt.figure(13)
plt.plot(t,filecg)
plt.title('Filtering ECG with FIR Filter using Numerical Calculation')
plt.xlabel('Time (ms)')
plt.ylabel('Voltage (mV)')
plt.savefig('FIR_ECG_Numerical_Calsulation.svg')
xfilecg = np.fft.fft(filecg) #Converting into Frequency Domain
plt.figure(14)
plt.plot(f,abs(xfilecg))
plt.title('Filtering ECG with FIR Filter using Numerical Calculation')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude')
plt.xlim(-5,500)
plt.savefig('FIR_ECG_Numerical_Calsulation_Hz.svg')
############################################################################### |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.