text stringlengths 38 1.54M |
|---|
# ------------------------------------------------------------------------ #
# Title: Assignment 08
# Description: Working with classes
# ChangeLog (Who,When,What):
# RRoot,1.1.2030,Created started script
# RRoot,1.1.2030,Added pseudo-code to start assignment 8
# Jonathan Ou, 5/30/20, Modified code to product class
# Jonathan Ou, 5/31/21, added code to IO class
# Jonathan Ou, 5/31/21, added code to Main Script Body
# ------------------------------------------------------------------------ #
# Data -------------------------------------------------------------------- #
strFileName = 'products.txt'
lstOfProductObjects = []
objFile = None # An object that represents a file
strStatus = ""
class Product(object):
"""Stores data about a product:
properties:
product_name: (string) with the products's name
product_price: (float) with the products's standard price
methods:
"""
def __init__(self, product_name, product_price):
self.__product_name = ''
self.__product_price = ''
self.product_name = product_name
self.product_price = product_price
@property
def product_name(self):
return str(self.__product_name).title()
@product_name.setter
def product_name(self, name):
if not name.isnumeric():
self.__product_name = name
else:
raise Exception("Names can't be numbers")
@property
def product_price(self):
return float(self.__product_price)
@product_price.setter
def product_price(self, value: float):
try:
self.__product_price = float(value) # cast to float
except ValueError:
raise Exception("Prices must be numbers")
# -- Methods --
def to_string(self):
return self.__str__()
def __str__(self):
return self.product_name + "," + str(self.product_price)
# Data -------------------------------------------------------------------- #
# Processing ------------------------------------------------------------- #
class FileProcessor:
"""Processes data to and from a file and a list of product objects:
methods:
save_data_to_file(file_name, list_of_product_objects):
read_data_from_file(file_name): -> (a list of product objects)
"""
pass
@staticmethod
def read_data_from_file(file_name: str):
""" Reads data from a file into a list of object rows
:param file_name: (string) with name of file
:return: (list) of object rows
"""
list_of_rows = []
try:
file = open(file_name, "r")
for line in file:
prod = line.split(",")
row = Product(prod[0],prod[1])
list_of_rows.append(row)
file.close()
except Exception as e:
print("There was a general error!")
print(e, e.__doc__, type(e), sep='\n')
return list_of_rows
@staticmethod
def save_data_to_file(file_name: str, list_of_objects: list):
""" Write data to a file from a list of object rows
:param file_name: (string) with name of file
:param list_of_objects: (list) of objects data saved to file
:return: (bool) with status of success status
"""
success_status = False
try:
file = open(file_name, "w")
for row in list_of_objects:
file.write(row.__str__() + "\n")
file.close()
success_status = True
except Exception as e:
print("There was a general error!")
print(e, e.__doc__, type(e), sep='\n')
return success_status
# Processing ------------------------------------------------------------- #
# Presentation (Input/Output) -------------------------------------------- #
class IO:
# TODO: Add docstring
""" performs input/output tasks
methods:
print_menu_tasks()
input_menu_choice()
print_current_product_data()
input_product_data()
input_yes_no_choice()
input_press_to_continue()
"""
# TODO: Add code to show menu to user
@staticmethod
def print_menu_tasks():
""" Display a menu of choices to the user
:return: nothing
"""
print('''
Menu of Options
1) Show Current Products
2) Add New Product Details
3) Save Data to File
4) Exit Program
''')
print()
# TODO: Add code to get user's choice
@staticmethod
def input_menu_choice():
""" Gets the menu choice from a user
:return: string
"""
choice = input("Which option would you like to perform? [1 to 4] - ").strip()
print() # Add an extra line for looks
return choice
# TODO: Add code to show the current data from the file to user
@staticmethod
def print_current_product_data(lstOfProductObjects):
print("******* The current Product details are: *******")
for row in lstOfProductObjects:
print(str(row.product_name)
+ ","
+ str(row.product_price))
print("*******************************************")
# TODO: Add code to get product data from user
@staticmethod
def input_product_data():
name = str(input("What is your new product? - ").strip())
price = float(input("What is your product price - ").strip())
prod = Product(product_name=name, product_price=price)
print()
return prod
@staticmethod
def input_yes_no_choice(message):
""" Gets a yes or no choice from the user
:return: string
"""
return str(input(message)).strip().lower()
@staticmethod
def input_press_to_continue(optional_message=''):
""" Pause program and show a message before continuing
:param optional_message: An optional message you want to display
:return: nothing
"""
print(optional_message)
input('Press the [Enter] key to continue.')
# Presentation (Input/Output) -------------------------------------------- #
# Main Body of Script ---------------------------------------------------- #
# TODO: Add Data Code to the Main body
# Load data from file into a list of product objects when script starts
# Show user a menu of options
# Get user's menu option choice
# Show user current data in the list of product objects
# Let user add data to the list of product objects
# let user save current data to file and exit program
# Step 1 - When the program starts, Load data from products.txt.
lstOfProductObjects = FileProcessor.read_data_from_file(strFileName) # read file data
while (True):
IO.print_menu_tasks() # Shows menu
strChoice = IO.input_menu_choice() # Get menu option
# Step 4 - Process user's menu choice
if strChoice.strip() == '1': # show current products
IO.print_current_product_data(lstOfProductObjects) # Show current data in the list/table
IO.input_press_to_continue(strStatus)
continue # to show the menu
elif strChoice == '2': # add new product details
lstOfProductObjects.append(IO.input_product_data())
IO.input_press_to_continue(strStatus)
continue # to show the menu
elif strChoice == '3': # Save Data to File
strChoice = IO.input_yes_no_choice("Save this data to file? (y/n) - ")
if strChoice.lower() == "y":
FileProcessor.save_data_to_file(strFileName, lstOfProductObjects)
IO.input_press_to_continue(strStatus)
else:
IO.input_press_to_continue("Save Cancelled!")
continue # to show the menu
elif strChoice == '4': # Exit Program
print("Goodbye!")
break # and Exit
# Main Body of Script ---------------------------------------------------- #
|
#!/usr/bin/python3
import os.path as path
import socket
import subprocess as sp
import psutil
NODE = "/usr/bin/node"
SERVER = "/opt/cloud9/c9sdk/server.js"
USER = sp.check_output(["whoami"]).decode().rstrip()
HOME = path.expanduser("~")
def is_cloud9(pinfo):
cmd = pinfo["cmdline"]
return pinfo["username"] == USER and\
cmd[0] == NODE and\
cmd[1] == SERVER and\
cmd[2] == "-p"
def get_cloud9():
pid, port = None, None
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=["pid", "username", "cmdline"])
except psutil.NoSuchProcess:
pass
else:
if is_cloud9(pinfo):
pid, port = pinfo["pid"], int(pinfo["cmdline"][3])
return pid, port
def free_port():
s = socket.socket()
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return port
def start_cloud9(port=None):
if port is None:
port = free_port()
with open(path.join(HOME, ".cloud9.log"), "w") as fh:
cmd = [NODE, SERVER, "-p", str(port), "-w", HOME]
proc = sp.Popen(cmd, stdout=fh, stderr=fh)
return proc.pid, port
if __name__ == "__main__":
pid, port = get_cloud9()
if pid is None or port is None:
pid, port = start_cloud9()
print(pid, port, sep="\n")
|
from flask import Flask, request, render_template, redirect, url_for
import json
# Set project directory as static directory
app = Flask(__name__, template_folder = "templates")
# TODO: Make number of lines come from questions.json
lines = 1
# Set app routes
@app.route("/")
def root():
"""Homepage"""
return render_template("index.html")
@app.route("/", methods = ["GET", "POST"])
def mode_redirect():
"""Redirect user as candidate or administrator"""
if request.method == "POST":
# Set default value to avoid 404
default_name = "0"
selected = request.form.get("mode", default_name)
# Redirect candidates to test taking page
if selected == "candidate":
return redirect(url_for("test"))
# Redirect administrators to admin page
elif selected == "admin":
return redirect(url_for("admin_show"))
@app.route("/admin")
def admin_show():
"""Show admin login"""
return render_template("admin.html")
@app.route("/questions")
def questions():
"""Show page to add questions"""
return render_template("questions.html")
@app.route("/invalid")
def invalid():
"""Show invalid login attempt"""
return render_template("invalid.html")
# TODO: add real authentication
@app.route("/admin", methods = ["GET", "POST"])
def admin_handle():
"""Handle admin login"""
if request.method == "POST":
# Set default name to avoid 404
default_name = "0"
# Get credentials given by user
username = request.form.get("username", default_name)
password = request.form.get("password", default_name)
with open("creds.json", "r") as f:
creds = json.load(f)
username_real = creds["admin"][0]
password_real = creds["admin"][1]
if username == username_real and password == password_real:
return redirect(url_for("questions"))
else:
return redirect(url_for("invalid"))
@app.route("/questions", methods = ["GET", "POST"])
def add_questions():
"""Add new questions"""
if request.method == "POST":
# Set default name to avoid 404
default_name = "0"
# Get question details
question = request.form.get("new_question", default_name)
count = 1
a = request.form.get("a", default_name)
b = request.form.get("a", default_name)
c = request.form.get("a", default_name)
d = request.form.get("a", default_name)
answer = request.form.get("answer", default_name)
# Construct dictionary for new question
new_question = {
"number" : count,
"question" : question,
"a" : a,
"b" : b,
"c" : c,
"d" : d,
"answer" : answer
}
# Dump the constructed dictionary to JSON database
with open("questions.json", "a") as f:
json.dump(new_question, f)
f.write("\n")
# Redirect back to new question page
return redirect(url_for("questions"))
@app.route("/test")
def test():
"""Show test taking page"""
# TODO: dump answers to database
with open("questions.json", "r") as f:
questions = json.load(f)
for i in range(lines):
for key, value in questions.items():
if key == "question":
question = value
if key == "a":
a = value
if key == "b":
b = value
if key == "c":
c = value
if key == "d":
d = value
return render_template("test.html", question = question, a = a, b = b, c = c, d = d)
answer_count = 1
@app.route("/test", methods = ["GET", "POST"])
def test_form():
"""Handle form for test taking page"""
global answer_count
if request.method == "POST":
# Set default name to avoid 404
default_name = "0"
# Get answer
answer = request.form.get("answer", default_name)
response = {
"number" : answer_count,
"answer" : answer
}
with open("answers.json", "w") as f:
json.dump(response, f)
f.write("\n")
answer_count += 1
return render_template("test.html")
# TODO: add scoring mechanism |
from main import Week, db
from datetime import datetime, timedelta
start = datetime(2014, 9, 1, 0, 0, 0)
for i in range(1, 52):
end_date = start + timedelta(days=6, hours=23, minutes=59, seconds=59)
week = Week(start_date=start, end_date=end_date,
week_id=end_date.strftime('%V'))
db.session.add(week)
start += timedelta(days=7)
db.session.commit()
|
# -*- coding:utf-8 -*-
# Definition for a Node.
class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
if head is None: return None
# 添加新节点 1->2->3; 1->1->2->2->3->3
cur = head
while cur:
new_node = Node(cur.val)
new_node.next = cur.next
cur.next = new_node
cur = cur.next.next
cur = head
# 新节点增加random指针; 理解cur.next.random和cur.random.next意义
while cur:
cur.next.random = cur.random.next if cur.random else None
cur = cur.next.next
# 将链表拆分为二
old_list = head
new_list = head.next
ans = new_list
while old_list:
old_list.next = old_list.next.next
new_list.next = new_list.next.next if new_list.next else None
old_list = old_list.next
new_list = new_list.next
return ans
if __name__ == '__main__':
n1 = Node(3)
n2 = Node(3)
n3 = Node(3)
n1.next = n2
n2.next = n3
n1.random = n3.next
n2.random = n1
n3.random = n3.next
ans = Solution().copyRandomList(n1)
|
theYearIEntered = int(input("Please enter a year in the future"))
theYearIWasBornIn = int(input("Please enter the year in which you were born"))
print(type(theYearIEntered))
print(type(theYearIWasBornIn))
print("My age in", theYearIEntered, " will be", theYearIEntered-theYearIWasBornIn)
#Tests
# 1) Entered a string - breaks
# 2) Enter a decimal - breaks
# 3) Empty string - breaks
# 4) a space - breaks
|
import re
class Solution:
def myAtoi(self, s):
matches = re.search("^\s*([-+]?[0-9]+)", s)
if matches:
groups = matches.groups()
num = int(groups[0])
else:
num = 0
if num < -(2**31):
num = -(2**31)
if num > 2**31-1:
num = 2**31-1
return num
# print(match)
# print(matches.group())
sol = Solution()
s = "-4193 with words"
# s = "words and -987"
print(sol.myAtoi(s))
|
# -*- coding: utf-8 -*-
import pytest
from sqlalchemy import exc
from h.api.nipsa import models
@pytest.mark.usefixtures("db_session")
def test_init():
nipsa_user = models.NipsaUser("test_id")
assert nipsa_user.userid == "test_id"
def test_get_by_userid_with_matching_user(db_session):
nipsa_user = models.NipsaUser("test_id")
db_session.add(nipsa_user)
assert models.NipsaUser.get_by_userid("test_id") == nipsa_user
@pytest.mark.usefixtures("db_session")
def test_get_by_userid_not_found():
assert models.NipsaUser.get_by_userid("does not exist") is None
@pytest.mark.usefixtures("db_session")
def test_all_with_no_rows():
assert models.NipsaUser.all() == []
def test_all_with_one_row(db_session):
nipsa_user = models.NipsaUser("test_id")
db_session.add(nipsa_user)
assert models.NipsaUser.all() == [nipsa_user]
def test_all_with_multiple_rows(db_session):
nipsa_user1 = models.NipsaUser("test_id1")
db_session.add(nipsa_user1)
nipsa_user2 = models.NipsaUser("test_id2")
db_session.add(nipsa_user2)
nipsa_user3 = models.NipsaUser("test_id3")
db_session.add(nipsa_user3)
assert models.NipsaUser.all() == [nipsa_user1, nipsa_user2, nipsa_user3]
def test_two_rows_with_same_id(db_session):
db_session.add(models.NipsaUser("test_id"))
with pytest.raises(exc.IntegrityError):
db_session.add(models.NipsaUser("test_id"))
db_session.flush()
def test_null_id(db_session):
with pytest.raises(exc.IntegrityError):
db_session.add(models.NipsaUser(None))
db_session.flush()
|
state = 1
from tools import input, initFileInputter
initFileInputter('stockfile_1.txt')
import numpy as np
m,kf,df = map(float,input().split())
k = int(kf)
stckop = {}
for i in range(k):
row = list(input().split())
stockname, stocks, history = row[0], int(row[1]),list(map(float,row[2:]))
#print(history)
#x = np.arange(0,len(history)+1)
x = np.arange(0,4)
#x = np.logspace(2, 3, num=len(history)+1)
y = np.array(history[-3:])
#import matplotlib.pyplot as plt
#plt.plot(x[:-1],y)
pf = np.polyfit(x[:-1],y,1)
ev1 = pf[0]*x[-2] + pf[1]
ev2 = pf[0]*x[-1] + pf[1]
exv = ev2 - ev1 + history[-1]
#print(stockname, history[-1], exv)
#print(x,y)
stckop[stockname] = {"n" : stocks, "y" : history[-1], "t" : ev2}
#print(stckop)
#plt.show()
bestbuys = sorted([x for x in stckop.items() if x[1]["t"] > x[1]["y"]], key=lambda x: x[1]["t"] - x[1]["y"],reverse=True)
shouldsell = sorted([x for x in stckop.items() if x[1]["n"] > 0 and x[1]["t"] < x[1]["y"]], key=lambda x: x[1]["t"] - x[1]["y"])
#print(bestbuys)
#print(shouldsell)
trns = []
for ss in shouldsell:
trns.append((ss[0],"SELL",max(int(ss[1]["n"]) // 2 ,1)))
#trns.append((ss[0],"SELL",int(ss[1]["n"])))
# print("{} SELL {}".format(ss[0],int(ss[1]["n"])))
for bb in bestbuys:
if (m > 0):
tb = m // bb[1]["y"]
# trns.append({"s": bb[0], "a": "BUY", "q": int(tb)})
if (tb > 0):
#trns.append((ss[0], "SELL", max(int(ss[1]["n"]) // 2, 1)))
amt = max(int(tb) // 2, 1)
trns.append((bb[0], "BUY", amt))
#
# print("{} BUY {}".format(bb[0],int(tb)))
m = m - amt*bb[1]["y"]
print(len(trns))
for trn in trns:
print(*trn)
|
import random
from sys import exit
# Заводим матрицу и выводим ее.
n = int(input('размер матрицы '))
if n == 1:
print('плохой ввод')
exit()
m = n
A = []
stolb =0
z = -1
a = [[random.randrange(0,9) for y in range(n)] for x in range(m)]
print('----------Исходная матрица----------')
for i in range(m):
print(' ', a[i])
print()
if n % 2 == 0:
for i in range(n//2):
for j in range(n):
a[i][j], a[i + n//2][j] = a[i + n//2][j], a[i][j]
for i in range(m):
print(' ', a[i])
else:
for i in range(n//2):
for j in range(n):
a[i][j], a[i + n//2+1][j] = a[i + n//2+1][j], a[i][j]
for i in range(m):
print(' ', a[i])
for i in range(n):
summ = 0
k = 0
for j in range(m):
summ = summ + a[j][i]
k = k + 1
A.append(summ//k)
print(A)
maxx = A[0]
for i in range(n):
if maxx < A[i]:
maxx = A[i]
print(maxx)
stolb = A.index(maxx)
print(stolb)
p = 0
if stolb != n-1:
for i in range(n-1):
a[i][stolb] = a[i][stolb+1]
for r in range(stolb, m-1):
for i in range(n):
a[i][stolb+p] = a[i][stolb+1+p]
p = p + 1
for i in range(m):
del (a[i][n-1])
for i in range(m):
print(' ', a[i])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-03-19 11:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("checkout", "0003_auto_20200318_1730"),
]
operations = [
migrations.RemoveField(model_name="order", name="phone_number",),
]
|
# coding=utf-8
##################################
#这是一个已经废弃的版本
###################################
import datetime
import numpy as np
#import scipy.misc
import cv2
import os
import csv
import shutil
from DBUtil import DBUtil
from conf.config import config
from store_to_execl import csv_operator
def judegzero(width,height):
if width!=0 and height!=0:
return 1
else:
return 0
def judgeoutborder(width,height):
conf=config()
heightborder= conf.heightborder
widthborder = conf.widthborder
del conf
if height<heightborder and width<widthborder:#注意这里是小于哦
if height>=0 and width>=0:#点击位置
return 1
return 0
def producepicture(spm,startdate,enddate):
conn=DBUtil()
tablename=conn.tablename
print("现在处理的spm:",spm)
sql = "SELECT slideend_x,slideend_y,entity_x,entity_y,entity_width,entity_height FROM " \
+tablename+" where spm=%s and dt>=%s and dt<=%s and touch_type=2 order by pos limit 0,100000; "
args=(spm,startdate,enddate)
results =conn.executesearch(sql,args)
conf = config()
processim = np.zeros([conf.heightborder,conf.widthborder], dtype=int)#高度和宽度
count=0
for data in results:
if judegzero(data[4],data[5])!=0:
x =int((data[0]-data[2])/data[4]*conf.widthborder)# 鼠标点击位置减去容器框位置除以容器框的宽度
y=int((data[1]-data[3])/data[5]*conf.heightborder)#360 120 另外一组是
if judgeoutborder(x,y):
count=count+1
processim[y, x] =processim[y, x]+1
if count%1000==0:
print("处理数据进度:",count)
print(str(data))
maxcount = np.max(processim)
print("最大点击次数为:",maxcount)
processim = processim * 255 / maxcount
new_path ="imgs/"+spm+startdate+enddate+".png"
print("总点击次数为:",count)
if(count>=1000):
csv_operator.saveexecl(spm,maxcount,count)
#scipy.misc.imsave(new_path, processim)
cv2.imwrite(new_path, processim)
def spmlist(startdate,enddate):
conn = DBUtil()
conf = config()
tablename = conf.tablename
sql = "SELECT distinct spm FROM " \
+tablename+" where dt>=%s and dt<=%s and touch_type=2 ; "
args=(startdate,enddate)
results = conn.executesearch(sql, args)
return results
def removeimgs():
if os.path.isdir("imgs"):
try:
shutil.rmtree('imgs')
except Exception as e:
print(e)
os.mkdir("imgs")
if __name__ == '__main__':
removeimgs()
conf = config()
startdate=(datetime.datetime.now() + datetime.timedelta(days=conf.starttime)).strftime("%Y%m%d")
enddate = (datetime.datetime.now() + datetime.timedelta(days=conf.endtime)).strftime("%Y%m%d")
results=spmlist(startdate,enddate)
spmlist=[]
for spm in results:
producepicture(spm[0], startdate, enddate)
|
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from content.forms.article import ArticleForm
from content.models.article import Article, ArticleCategory
@login_required
def index(request):
article_list = Article.objects.filter(author=request.user)
return render(request, 'content/article/index.html', locals())
@login_required
def view(request, slug):
article = Article.objects.get(slug=slug)
return render(request, 'content/article/view.html', locals())
@login_required
def add(request):
form = ArticleForm()
if request.method == "POST":
data = request.POST
form = ArticleForm(data)
print(data)
if form.is_valid():
new_form = form.save(commit=False)
new_form.author = request.user
new_form.save()
form.save_m2m()
messages.success(request, "Content saved successfuly")
return redirect('content-article-edit', slug=new_form.slug)
else:
messages.error(request, "Form can't be saved")
return render(request, 'content/article/add.html', locals())
@login_required
def edit(request, slug):
article = Article.objects.get(slug=slug)
form = ArticleForm(instance=article)
if request.method == "POST":
data = request.POST
form = ArticleForm(data, request.FILES, instance=article)
if form.is_valid():
new_form = form.save(commit=False)
new_form.author = request.user
new_form.save()
form.save_m2m()
messages.success(request, "Content saved successfuly")
return redirect('content-article-edit', slug=new_form.slug)
else:
messages.error(request, "Form can't be saved")
return render(request, 'content/article/edit.html', locals())
|
import socket
import os
from colorama import Fore, Back, Style
class Client:
s = socket.socket()
host = ''
port = ''
alive = 1
list_of_commands = ['FileHashcheckall','FileHashverify','FileDownload','IndexGetlonglist','IndexGetshortlist','verify']
def __init__(self, portnumber):
self.host = socket.gethostname()
self.port = portnumber
self.s.connect((self.host, self.port))
def receivefromserver(self):
#print "waiting to recive something"
close = 0
while close == 0:
#print "yeah"
m = self.s.recv(1024)
if m.endswith('EOM'):
close = 1
print m[:-3]
elif len(m):
print m
#print "receiving done"
return
def receivefile(self,fname):
close = 0
m = self.s.recv(1024)
if m.endswith("is not found.EOM"):
print fname + " is not found"
else:
f = open(fname, 'wb')
while close == 0:
#print "yeah"
m = self.s.recv(1024)
if m.endswith('EOIF'):
close = 1
f.write(m[:-4])
elif len(m):
f.write(m)
f.close()
#print "receiving done"
return
def send_to_server(self, data):
#print "sending ",data
self.s.send(data+'EOM')
#self.s.send('EOM')
return
def takeinput(self):
print(Fore.RED + 'command>'),
print(Style.RESET_ALL),
x = raw_input()
if x.strip() == 'help' or x.strip() == 'Help':
self.print_help()
elif x.strip() == 'clear':
os.system('clear')
elif len(x):
if x == 'exit':
self.alive = 0
self.s.send("exitEOM")
self.s.close()
else:
'''
if ''.join(x.split(' ')[:-1]).translate(None,' ') in self.list_of_commands:
self.send_to_server(x.strip())
if ''.join(x.split(' ')[:-1]).translate(None,' ') == 'FileDownload':
self.receivefile(x.split(' ')[1].replace("'",""))
else:
#print "command is " + ''.join(x.split(' ')[:-1]).translate(None,' ')
self.receivefromserver()
elif x.translate(None,' ') in self.list_of_commands:
#print "command is " + x.translate(None,' ')
self.send_to_server(x.strip())
self.receivefromserver()
elif ''.join(x.split(' ')[:-2]) in self.list_of_commands and len(x.split(' ')) == 4:
self.send_to_server(x.strip())
self.receivefromserver()
else:
print "Command not found"
'''
if x.split()[0] in self.list_of_commands:
#for filedownload
if x.split()[0] == 'FileDownload':
if len(x.split()) == 2:
self.send_to_server(x.strip())
self.receivefile(x.split()[1].replace("'",""))
else:
print 'invalid number of arguments'
else:
self.send_to_server(x.strip())
self.receivefromserver()
elif ''.join(x.split()) in self.list_of_commands:
if ''.join(x.split()) == 'IndexGetshortlist':
print "please provide 2 arguments"
else:
self.send_to_server(x.strip())
self.receivefromserver()
elif ''.join(x.split()[0:2]) in self.list_of_commands and len(x.split()) == 4:
self.send_to_server(x.strip())
self.receivefromserver()
else:
print "Command not found"
return
def print_help(self):
print " USER GUIDE \n\n"
print "----------------------------------------------------------------------------\n"
print "- COMMAND | DESCRIPTION -\n"
print "---------------------------------------------------------------------------\n"
print "- IndexGet longlist | Get name,size,timestamp,type -\n"
print "---------------------------------------------------------------------------\n"
print "- IndexGet shortlist | Get name,size,timestamp between 2 timestamps -\n"
print "---------------------------------------------------------------------------\n"
print "- verify <Filename> | verify if the file -\n"
print "---------------------------------------------------------------------------\n"
print "- FileHash Checkall | returns the last -\n"
print "----------------------------------------------------------------------------\n"
print "- FileDownload <filename> | saves the file in the local directory -\n"
print "----------------------------------------------------------------------------\n"
print "- clear | clears the screen -\n"
print "----------------------------------------------------------------------------\n"
print "- help | Displays list of all commands available -\n"
print "----------------------------------------------------------------------------\n"
print "- clear | Clears the screen -\n"
print "----------------------------------------------------------------------------\n"
print "- exit | Exists the terminal -\n"
print "----------------------------------------------------------------------------\n"
return
def runclient(self):
while self.alive:
self.takeinput()
print "closing client"
return
if __name__ == '__main__':
client1 = Client(8889)
client1.runclient()
|
#!/usr/bin/env python
import os
import sys
import re
ssh_config = os.environ['HOME'] + '/.ssh/config'
# key_location = os.environ['HOME'] + '/.ssh/ssh-keys/'
key_location = '/tmp/source/day2ops-keys/ssh-keys/'
box_ip = sys.argv[1]
# r = '^10.\d+.\d+$'
r = '10\.\d+\.\d+'
def check_config_for_ip(box_ip):
grep_config = os.popen('cat ' + ssh_config + ' | grep ' + box_ip)
result = grep_config.read()
if result:
print("%s is already in config file." % str(box_ip))
return True
else:
print("New entry")
return False
class Entry(object):
def __init__(self, box_ip):
self.ip = box_ip
self.user = self._get_user()
self.pem_key = self._get_pem_key()
self.proxy_bool = self._get_proxy_bool()
self.bastion = self._get_bastion()
def _get_user(self):
user = raw_input("User: ")
return user
def _get_pem_key(self):
pem_key = raw_input("Pem key name: ")
return pem_key
def _get_proxy_bool(self):
proxy_bool = raw_input("Proxy through bastion? (y/n) ")
if proxy_bool is "y":
return True
elif proxy_bool is "n":
return False
else:
print("Invalid input")
sys.exit(0)
def _get_bastion(self):
if self.proxy_bool:
bastion = raw_input("Bastion IP: ")
return bastion
else:
return None
def append_to_config(self):
config_to_add = "\n\nHost %s\n\tUser %s\n\tIdentityFile %s%s" % (str(self.ip), self.user, key_location, self.pem_key)
if self.proxy_bool:
hp = "%h:%p"
config_to_add = config_to_add + "\n\tProxyCommand ssh -F %s -W %s %s" % (ssh_config, hp, self.bastion)
with open(ssh_config, "a") as f:
f.write(config_to_add)
f.close()
# starting main program
if re.search(r, str(box_ip)):
check = check_config_for_ip(box_ip)
if check:
sys.exit(0)
else:
new_entry = Entry(box_ip)
# new_entry = Entry(box_ip, user, bastion, pem_key)
print("IP is %s" % new_entry.ip)
print("User is %s" % new_entry.user)
print("Pem key is %s" % new_entry.pem_key)
print("Adding to %s" % ssh_config)
new_entry.append_to_config()
else:
print("Not valid IP")
sys.exit(0)
|
class Vector(tuple):
@property
def x(self):
return super().__getitem__(0)
@property
def y(self):
return super().__getitem__(1)
def __hash__(self):
return super().__hash__()
def __add__(self, other):
return Vector([self.x + other.x, self.y + other.y])
def __sub__(self, other):
return Vector([self.x - other.x, self.y - other.y])
def __mul__(self, other: int):
return Vector([self.x * other, self.y * other])
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return self.x != other.x or self.y != other.y
def __neg__(self):
return Vector([-self.x, -self.y])
|
from django.core.management.base import BaseCommand, CommandError
import book_genie.search.amazon as amazon
class Command(BaseCommand):
help = 'Fills the cache'
def handle(self, *args, **options):
amazon.prepopulate_cache()
|
class AnalysisVersion:
version = '0.6.4-BETA' #need something reasonable here...
def getVersion(self):
return self.version
|
from ngram import Model
from collections import Counter
from numpy import std, average
from wordcloud import WordCloud
from io import BytesIO
class StatisticsModel(Model):
def describe_word(self, word):
indexed_block_start = self._word_index.get_index('_')
indexed_empty_word = self._word_index.get_index('')
padding = (indexed_empty_word,) * (self._n - 1)
words = self._indexed_word_data[padding].copy()
words.pop(self._word_index.get_index('_'), None)
indexed_word = self._word_index.add_word(word)
if indexed_word not in words:
return None
count = words[indexed_word]
place = words.most_common().index((indexed_word, count)) + 1
next = [self._word_index.get_word(i[0]) for i in
self._indexed_word_data[padding[:-1] + (indexed_word,)].most_common(4)]
prev_counter = Counter()
over_one_counter = Counter()
for key in self._indexed_word_data:
if key[:-1] == padding[:-1] and key[-1] not in (indexed_empty_word, indexed_block_start):
candidates = self._indexed_word_data[key]
if indexed_word in candidates:
prev_counter[key[-1]] += candidates[indexed_word]
if key[:-2] == padding[:-2] and key[-2] == indexed_word:
over_one_counter.update(self._indexed_word_data[key])
prev = [self._word_index.get_word(i[0]) for i in prev_counter.most_common(4)]
over_one = [self._word_index.get_word(i[0]) for i in over_one_counter.most_common(4)]
return count, place, next, prev, over_one
def get_single_words(self):
indexed_empty_word = self._word_index.get_index('')
padding = (indexed_empty_word,) * (self._n - 1)
words = self._indexed_word_data[padding].copy()
words.pop(self._word_index.get_index('_'), None)
return words
def get_stop_words(self, indexed=False):
words = self.get_single_words()
values = list(words.values())
deviation = std(values)
avg = average(values)
stops = set()
for word in words:
if abs(words[word] - avg) > deviation * 3:
if indexed:
stops.add(word)
else:
stops.add(self._word_index.get_word(word))
return stops
def get_top(self, count, order_desc=True):
stops = self.get_stop_words(True)
words = self.get_single_words()
for stop in stops:
words.pop(stop, None)
words = sorted(words.items(), key=lambda x: x[1], reverse=not order_desc)
words = [self._word_index.get_word(i[0]) for i in words]
return list(words[0:count])
def describe_all(self):
words = self.get_single_words()
freq_dist = sorted(Counter(words.values()).most_common(14))
word_lengths = [len(self._word_index.get_word(i)) for i in words.keys()]
len_dist = sorted(Counter(word_lengths).most_common(14))
return freq_dist, len_dist
def word_cloud(self, color):
wc = WordCloud(stopwords=self.get_stop_words(), width=1280, height=720)
words = self.get_single_words()
for index in list(words.keys()):
words[self._word_index.get_word(index)] = words.pop(index)
wc.generate_from_frequencies(words)
byte_image = BytesIO()
wc.to_image().save(byte_image, 'PNG')
byte_image.seek(0)
return byte_image
|
import glob
import numpy as np
from PIL import Image
import cv2
import quat_math as tfs
import scipy.io as scio
from tqdm import tqdm
def main():
dataset_root = 'datasets/ycb/YCB_Video_Dataset'
with open('datasets/ycb/dataset_config/classes.txt') as f:
classes = f.read().split()
classes.insert(0,'background')
# with open('datasets/ycb/dataset_config/rendered_data_list.txt') as f:
# file_list = f.read().split()
for obj in tqdm(range(2, 22)):
file_list = ['depth_renders_offset/{}/{:04d}'.format(classes[obj], i) for i in range(3885)]
for fn in tqdm(file_list):
img = Image.open('{0}/{1}-color.png'.format(dataset_root, fn))
obj_label = classes.index(fn.split('/')[-2])
quat = np.load('{0}/{1}-trans.npy'.format(dataset_root, fn))
label = np.where(np.array(img.split()[-1])==255, obj_label, 0)
cv2.imwrite('{0}/{1}-label.png'.format(dataset_root, fn), label)
poses = np.zeros([3,4,1])
poses[:3,:3,0] = tfs.quaternion_matrix(quat)[:3,:3]
poses[:3,3,0] = [0.,0.,1.]
scio.savemat('{0}/{1}-meta.mat'.format(dataset_root, fn),
{'cls_indexes':np.array([[obj_label]]),
'factor_depth':np.array([[10000]]),
'poses':poses})
if __name__=='__main__':
main()
|
# Celery Backend: For saving the task result attributes
app = Celery('tasks', broker='pyamqp://guest@localhost//',backend='redis://localhost')
# For Keeping Results we use Celery with result backend.like keeping status of task.
# There are several built-in result backends to choose from:
# SQLAlchemy/Django ORM,
# MongoDB,
# Memcached,
# Redis,
# RPC
# Custom Built
@app.task
def add(x, y):
return x + y
|
#!/usr/bin/env python3
import asyncio
import json
import websockets
import subprocess
from util import logger
from core.scanner import Scanner
from util import project_manager as pm
from util import upload as upload
from util import raspi_state as raspi
APP_PATH = '/home/pi/.wcscanner'
USERS = set()
scanner = Scanner()
logger = logger.logger
async def send_state_data():
"""
Send complete list of projects data
"""
if USERS:
data = dict()
data['type'] = 'state_data'
data['project_data'] = pm.get_projects_data()
data['disk_usage_data'] = raspi.get_disk_info()
await asyncio.wait([user.send(json.dumps(data)) for user in USERS])
async def send_download_ready(project_name) :
if USERS:
data = {'type': "download_ready", 'project_name': project_name}
await asyncio.wait([user.send(json.dumps(data)) for user in USERS])
async def register(websocket):
"""
Register a new client using his websocket
:param websocket: websocket of the client
"""
USERS.add(websocket)
logger.info('New client connected')
await send_state_data()
async def unregister(websocket):
"""
Remove a websocket of the client list when the websocket connection is closed
:param websocket: client to remove
"""
logger.info("One client disconnected")
USERS.remove(websocket)
await send_state_data()
async def mainLoop(websocket, path):
"""
Main loop that catch entry message given by users
Send back a response, ex after a scan loop we send projects data
:param websocket: websocket used for external communication (client)
"""
await register(websocket)
try:
#await send_project_data_users()
async for message in websocket:
data = json.loads(message)
logger.info("Message received : %s", str(data))
if data['action'] == 'loop_capture':
scanner.loop_capture(data['project_name'])
await send_state_data()
elif data['action'] == 'create_project':
pm.create_project(data['project_name'], data['description'], data['pict_per_rotation'], data['pict_res'])
await send_state_data()
elif data['action'] == 'turn_bed_CW':
angle = float(data['plateau_degree'])
scanner.turn_bed(angle)
await send_state_data()
elif data['action'] == 'turn_bed_CCW':
angle = float(data['plateau_degree'])
scanner.turn_bed(-1 * angle)
await send_state_data()
elif data['action'] == 'request_project_info':
await websocket.send(pm.get_projects_data())
elif data['action'] == 'request_upload_email_project':
project_name = data['project_name']
email_to = data['email_to']
pm.zip_project(project_name)
upload.send_email_zip_project(project_name, email_to)
await send_state_data()
elif data['action'] == 'request_remove_project':
project_name = data['project_name']
pm.remove_single_project(project_name)
await send_state_data()
elif data['action'] == 'request_zip_data':
project_name = data["project_name"]
pm.zip_project(project_name)
await send_download_ready(project_name)
elif data['action'] == 'camera_preview':
data = scanner.get_preview_capture()
msg = {'type': 'camera_preview', 'data': data}
await websocket.send(json.dumps(msg))
else:
logger.error("unsupported event: {}", data)
finally:
await unregister(websocket)
def activeUSB(usbNumber):
"""
Show active usb devices connected to the host running this script
:param usbNumber:
:return:
"""
usb = subprocess.check_output('lsusb')
usb = usb.decode()
liste = []
tmp = ""
for i in range(0, len(usb) - 3):
if usb[i] == '\n':
liste.append(tmp)
tmp = ""
else:
tmp = tmp + usb[i]
res = []
for i in liste:
if i[4:7] == str(usbNumber):
res.append(i)
for i in res:
i = i.split(" ")
if __name__ == '__main__':
pm.create_base_projects_folder()
pm.get_projects_data()
asyncio.get_event_loop().run_until_complete(
websockets.serve(mainLoop, '0.0.0.0', 6789))
scanner.on_ready()
asyncio.get_event_loop().run_forever()
|
from Utils import Data_util
from Network import Network
import numpy as np
import os
def load_image():
"""
For loading data and create one-hot encoding labels
:return:
"""
dir_path = os.path.dirname(os.path.realpath(__file__)) # get path
file_train_x = dir_path + "/data/train-images.idx3-ubyte"
file_train_y = dir_path + "/data/train-labels.idx1-ubyte"
file_test_x = dir_path + "/data/t10k-images.idx3-ubyte"
file_test_y = dir_path + "/data/t10k-labels.idx1-ubyte"
train_x = Data_util(file_train_x).get_image()
train_y = Data_util(file_train_y).get_label()
test_x = Data_util(file_test_x).get_image()
test_y = Data_util(file_test_y).get_label()
# one hot encoding
train_y_ = np.zeros((np.shape(train_y)[0], 10), dtype=float)
test_y_ = np.zeros((np.shape(test_y)[0], 10), dtype=float)
for i in range(len(train_y)):
train_y_[i][train_y[i]] = 1.0
for i in range(len(test_y)):
test_y_[i][test_y[i]] = 1.0
return train_x, train_y_, test_x, test_y_
if __name__ == '__main__':
_, _, test_x, test_y = load_image()
net = Network() # weight init
net.load_weight()
net.eval(test_x, test_y, "test_data", save=False)
|
import unittest, ast
class TestModules(unittest.TestCase):
def __init__(self, testname, dw, path_test, name_test):
super(TestModules, self).__init__(testname)
self.path_test = path_test
self.dw = dw
self.name_test = name_test
def test(self):
method_to_call = getattr(__import__(self.path_test,\
fromlist=[self.name_test]), self.name_test)
result = method_to_call(self)
|
from django.db import models
# Create your models here.
class ServerID(models.Model):
id = models.BigIntegerField(blank=False, primary_key=True)
num_connected = models.IntegerField(blank=False, default=0)
|
#!/usr/bin/env python2
from xbmc.pidgin import Forwarder
import argparse
import logging
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--host', dest='host', action='store', required=True)
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False)
args = parser.parse_args()
logging.root.name = 'xbmcpidgin'
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
f = Forwarder(args.host)
f.run()
|
import numpy as np
dt1 = np.array([1,2,3,40,50,5,6,7])
dt2 = np.array([[1,2,3],[4,5,6],[7,8,9]])
print(np.sum(dt1))
print(np.sum(dt2))
# 1열만 합계
print(np.sum(dt2[:,1]))
print(dt2[:,1])
# 평균
print(np.mean(dt1))
print(np.mean(dt2))
# 중앙값
print(np.median(dt1))
print(np.median(dt2))
print(np.percentile(dt1, 31))
#분산
print(np.std(dt1))
#최대값
print(np.max(dt2))
print(np.max(dt2[:,1]))
# 조건에 맞는 튜플 반환, 각 행에서 가장 큰 값
print(np.max(dt2, axis=1))
# 각 열에서 가장 큰 값
print(np.max(dt2, axis=0))
# max 값의 인덱스
print(np.argmax(dt1))
# 조건에 맞는 인덱스 반환
print(np.where(dt1 > 4)[0])
b = np.array([True,False,True,False,True,True,False,False])
#boolean index
print(dt1[b])
print(dt1*b)
print(dt1 > 5)
# 조건에 맞는 값 반환
print(dt1[dt1>5])
|
import numpy as np
import matplotlib.pyplot as plt
'''
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = np.array([1, 4, 3, 3, 2, 2, 3, 8])
# 绘制折线图,参数1表示x轴坐标,参数2表示y轴坐标,参数3表示颜色,参数4表示线宽
plt.plot(x, y, 'r')
plt.plot(x, y, 'g', lw=10)
# 绘制柱状图参数3表示占用宽度比例,参数4表示透明度,参数5表示颜色
plt.bar(x, y, 0.5, alpha=1, color='b')
plt.show()
'''
x = np.zeros(2)
y = np.zeros([2])
a = np.array([1, 3, 5, 7, 9])
b = np.array([2, 4, 6, 8, 10])
for i in range(0, 5):
x[0] = i
x[1] = i
y[0] = a[i]
y[1] = b[i]
print(y)
plt.plot(x, y)
plt.show() |
import cv2
import numpy as np
from PIL import Image
import base_faces_lib as bfl
imgNum = 35
numEigValue = 300
def calcEigFaces():
baseMatrix = bfl.ini_img_base() #35*10000
baseMatrixT = baseMatrix.transpose() #10000*35
print baseMatrixT.shape
face_avg = np.empty((1,10000),float) #1*10000
face_avg = baseMatrix.sum(axis = 0)/imgNum
avgMatrix = np.empty((imgNum,10000),float) #35*10000
for cnt in range (0,imgNum):
avgMatrix[cnt]=baseMatrix[cnt] - face_avg
transMatrix=avgMatrix.transpose() #10000*35
eigenVector = bfl.eigenVectorImportnew() #300*10000
eigFaces = np.empty((numEigValue,imgNum),complex) #300*35
number = 0
#img = Image.open('cap/0.jpg')
eigFaces = np.dot(eigenVector,transMatrix)
eigFacesT = eigFaces.transpose()
return eigFacesT |
import json
import matplotlib.pyplot as plt
import matplotlib.image as img
import os
import datetime
import math
def get_wind_chill(temp, wind_speed):
wind_temp = round(33+(0.478+0.237*math.sqrt(wind_speed)-0.0124*wind_speed)*(temp-33), 1)
if wind_temp > temp:
wind_temp = temp # sometimes we can get wind chill little bit warmer
#res = round(3.12+0.6215*temp-11.37*wind_speed**0.16+0.3965*temp*wind_speed**0.16)
#res2 = round(33+(0.478+0.237*math.sqrt(wind_speed)-0.0124*wind_speed)*(temp-33))
#print(temp, wind_speed, res, res2)
return wind_temp
def fill_cond(string):
lst = []
for count in range(len(api['list'])):
if string in texts[count]:
lst.append(wind_chill[count])
else:
lst.append(min(wind_chill))
return lst
def convert_to_MSK_tz(timestamp):
msk = datetime.datetime.fromtimestamp(timestamp+60*60*3)
days= {'Monday': 'Пн',
'Tuesday': 'Вт',
'Wednesday': 'Ср',
'Thursday': 'Чт',
'Friday': 'Пт',
'Saturday': 'Сб',
'Sunday': 'Вс'}
return msk.strftime('%H')+' '+days[msk.strftime('%A')]
with open('result.txt', 'r') as result:
api = json.loads(result.read())
temps = [ round(t['main']['temp']-273, 1) for t in api['list'] ]
wind_chill = [ get_wind_chill(temps[count], api['list'][count]['wind']['speed']*3.6) for count in range(len(api['list'])) ]
maxlevel = [ max(temps) for z in range(len(api['list'])) ]
zerolevel = [ min(wind_chill) for z in range(len(api['list'])) ]
underzerolevel = [ min(wind_chill)-3 for z in range(len(api['list'])) ]
zerolevel_point = min(zerolevel)
underzerolevel_point = min(underzerolevel)
texts = [ tx['weather'][0]['description'] for tx in api['list'] ]
dates = []
for date in [ d['dt'] for d in api['list'] ]:
dates.append(convert_to_MSK_tz(date))
images_src = [icon['weather'][0]['description'] for icon in api['list']]
rains = fill_cond('rain')
snows = fill_cond('snow')
clear_sky = fill_cond('clear sky')
clouds = fill_cond('clouds')
#for count in range(len(api['list'])):
# if 'rain' in texts[count]:
# rains.append(temps[count])
# else:
# rains.append(min(temps))
# PLOT
fig, graph = plt.subplots()
graph.plot(dates, underzerolevel, 'w', dates, zerolevel, 'g', dates, temps, 'g', dates, snows, '#CCFFFF', dates, clouds, '#CCFFFF', dates, clear_sky, '#CCFFFF', dates, wind_chill, 'g', dates, maxlevel, 'w')
graph.grid(True)
graph.set_xlabel('Дни')
graph.set_ylabel('Цельсии')
for tick in graph.get_xticklabels(): # rotate X labels
tick.set_rotation(285)
for count in range(len(api['list'])):
graph.text(dates[count],
underzerolevel_point,
texts[count],
{
'ha': 'center',
'va': 'bottom',
'color': '#404040'
},
bbox={
'boxstyle': 'round',
'facecolor': 'wheat',
'alpha': 0.5
},
rotation=270)
# Draw vertical lines at the end of day
for date in dates:
if date.split()[0] == '00':
graph.axvline(x=date)
graph.fill_between(dates, temps, wind_chill, where=wind_chill<temps, facecolor='#FFFFFF')
graph.fill_between(dates, wind_chill, zerolevel, where=zerolevel<wind_chill, facecolor='#CCFFFF')
graph.fill_between(dates, zerolevel, rains, where=rains>zerolevel, facecolor='#80BFFF')
graph.fill_between(dates, zerolevel, snows, where=snows>zerolevel, facecolor='#FFFFFF')
graph.fill_between(dates, zerolevel, clouds, where=clouds>zerolevel, facecolor='#A6A6A6')
#graph.fill_between(dates, zerolevel, clouds, where=clouds>zerolevel, facecolor='#C2D6D6')
graph.fill_between(dates, zerolevel, clear_sky, where=clear_sky>zerolevel, facecolor='#FFFF80')
fig.set_figwidth(12)
fig.tight_layout()
#plt.title('TITLE!')
fig.suptitle('Title!', fontsize=14, y=1)
#plt.show()
plt.savefig('chart_plt.png')
|
def isValid(s):
queue = []
dic = {')':'(',']':'[','}':'{'}
for ch in s:
if (ch in dic.values()):
queue.append(ch)
elif (ch in dic.keys()):
if((not queue) or dic[ch] != queue.pop()):
return False
else:
return False
return (queue == [])
print(isValid(s = "()"))
print(isValid(s = "()[]{}"))
print(isValid(s = "(]"))
print(isValid(s = "([)]"))
print(isValid(s = "{[]}")) |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maximumAverageSubtree(self, root):
"""
:type root: TreeNode
:rtype: float
"""
self.ans = -float('inf')
def dfs(node):
if not node: return (0, 0)
lc, lv = dfs(node.left)
rc, rv = dfs(node.right)
self.ans = max(self.ans, float(node.val + lv + rv) / (1 + lc + rc))
return 1 + lc + rc, node.val + lv + rv
dfs(root)
return self.ans |
num=int(input("Valor do numero?: "))
x=0
while(num != -1):
x=num+x
num=int(input("Valor do numero?: "))
print(x) |
import distutils.core
with open('requirements.txt') as f:
reqs = f.read().splitlines()
distutils.core.setup(
name='SimpleCommonsTransfer',
version='0.1dev',
author='Robin Krahl',
author_email='me@robin-krahl.de',
packages=['simplecommonstransfer'],
package_date={'simplecommonstransfer': ['templates/*.html']},
license='LICENSE',
long_description=open('README.md').read(),
install_requires=reqs,
)
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
img = cv.imread("./Images/BirdView.jpg")
width, height = 250, 350
pts1 = np.float32([[155, 294], [55, 194], [373, 186], [270, 105]])
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
matrix = cv.getPerspectiveTransform(pts1, pts2)
imgOutput = cv.warpPerspective(img, matrix, (width, height))
cv.imshow("Image", img)
cv.imshow("Output", imgOutput)
cv.waitKey(0)
cv.destroyAllWindows() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import Queue, threading, sys
from threading import Thread
import time
import urllib2
import json
import os
import codecs
import re
_encoding = 'utf-8'
_errors = 'ignore'
workername = '#'
gol_videoinfo = {}
gol_catinfo = {}
# working thread
class Worker(Thread):
worker_count = 0
timeout = 1
def __init__(self, workQueue, resultQueue, catInfo, **kwds):
Thread.__init__(self, **kwds)
self.id = Worker.worker_count
Worker.worker_count += 1
self.setDaemon(True)
self.workQueue = workQueue
self.resultQueue = resultQueue
self.catInfo = catInfo
self.start()
def run(self):
while True:
try:
callable, args, kwds = self.workQueue.get(timeout=Worker.timeout)
res = callable(self.catInfo, *args, **kwds)
#print "worker[%2d]: %s" % (self.id, str(res) )
#print u'worker{0}: {1}'.format(self.id, res)
self.resultQueue.put(res)
#time.sleep(Worker.sleep)
except Queue.Empty:
break
except :
print 'worker[%2d]' % self.id, sys.exc_info()[:2]
raise
class WorkerManager:
def __init__(self, num_of_workers=10, timeout=2):
self.workQueue = Queue.Queue()
self.resultQueue = Queue.Queue()
self.workers = []
self.timeout = timeout
self.catInfo = []
self._recruitThreads(num_of_workers)
def _recruitThreads(self, num_of_workers):
for i in range(num_of_workers):
self.catInfo.append({})
worker = Worker(self.workQueue, self.resultQueue, self.catInfo[i])
self.workers.append(worker)
def wait_for_complete(self):
while len(self.workers):
worker = self.workers.pop()
worker.join()
if worker.isAlive() and not self.workQueue.empty():
self.workers.append(worker)
print "All jobs are are completed."
for i in range(len(self.catInfo)):
tmpcatinfo = self.catInfo[i]
#print tmpcatinfo
for key,value in tmpcatinfo.items():
gol_catinfo[key] = gol_catinfo.get(key, [])
gol_catinfo[key].extend(value)
def add_job(self, callable, *args, **kwds):
self.workQueue.put((callable, args, kwds))
def get_result(self, *args, **kwds):
return self.resultQueue.get(*args, **kwds)
def getVideoInfo2(vid):
cat = ''
try:
path = 'http://10.103.88.96/api_ptvideoinfo?pid=XMTAyOA==&rt=3&id={0}'
path = path.format(vid)
content = urllib2.urlopen(path, timeout=200).read()
videoinfo = json.loads(content)
result = videoinfo.get('item', {})
cats = result.get('cats', '')
if not cats or len(cats) == 0:
cat = 'NOCAT'
else:
if not cats[0]:
cat = 'NOCAT'
else:
cat = cats[0]
except:
print 'HTTPError:{0}'.format(vid)
cat = 'ERRORVID'
return cat
def getVideoInfoFormWeb(catInfo, vid, guid):
cat = getVideoInfo2(vid)
catInfo[cat] = catInfo.get(cat, [])
catInfo[cat].append(guid)
def readLog():
workername = sys.argv[1]
targetname = sys.argv[2]
ac_dict = {}
for parent, dirnames, filenames in os.walk('/home/lirui/otherpy/20120228/UVSourceLog/'):
for filename in filenames :
if workername in filename:
print filename
f = codecs.open(os.path.join(parent, filename), 'r', encoding=_encoding, errors=_errors)
row = f.readline()
while row.strip():
val = row.split(' ')
val = [x.strip() for x in val if x]
vid = ''
if not val[1] or len(val) != 3:
row = f.readline()
continue
else:
vid = val[1].strip()
ac_dict[vid] = val[2]
row = f.readline()
import socket
socket.setdefaulttimeout(10)
wm = WorkerManager(300)
for vidkey, guid in ac_dict.items():
wm.add_job(getVideoInfoFormWeb, vidkey, guid)
wm.wait_for_complete()
output_f = codecs.open('/home/lirui/otherpy/20120228/UVTargetLog/' + targetname + '.log', 'a', encoding=_encoding, errors=_errors)
for cat, guids in gol_catinfo.items():
print >> output_f, u'{0}:::::{1}'.format(cat, len(set(guids)))
output_f.flush()
if __name__ == '__main__':
readLog()
|
#!/usr/bin/env python3
import turtle
import sys
#Les constantes
NOMBRE_ALLUMETTE = 19
HAUTEUR_BOIS_ALLUMETTE = 50
HAUTEUR_ROUGE_ALLUMETTE = 10
COULEUR_BOIS_ALLUMETTE = "#CDA88C"
COULEUR_ROUGE_ALLUMETTE = "#DC5844"
COULEUR_FOND = "#8CCDC4"
TITRE = "Jeu des allumettes"
TAILLE_ECRITURE = 26
TAILLE_ECRITURE_2 = 16
#Les autres variables
etat_partie = True
nombre_allumettes = NOMBRE_ALLUMETTE
joueur_courant = 1
#Les fonctions
def deplacer_sans_tracer(x, y = None):
"""Fonction pour se déplacer à un point sans tracer"""
turtle.up()
if (isinstance(x, tuple) or isinstance(x, list)) and len(x) == 2:
turtle.goto(x)
else:
turtle.goto(x, y)
turtle.down()
def initialise_fenetre():
"""Fonction pour initialiser la fenêtre"""
turtle.hideturtle()
turtle.setheading(90)
turtle.title(TITRE)
turtle.bgcolor(COULEUR_FOND)
turtle.speed(0)
def dessiner_allumette():
"""Fonction pour dessiner une allumette"""
turtle.pencolor(COULEUR_BOIS_ALLUMETTE)
turtle.forward(HAUTEUR_BOIS_ALLUMETTE)
turtle.pencolor(COULEUR_ROUGE_ALLUMETTE)
turtle.forward(HAUTEUR_ROUGE_ALLUMETTE)
def dessiner_allumettes(nombre_allumettes):
"""Fonction pour dessiner les allumettes"""
espace_entre_allumettes = 60 if nombre_allumettes < 8 else turtle.window_width()/2//nombre_allumettes
taille_crayon = 25 if nombre_allumettes < 8 else espace_entre_allumettes//3
turtle.pensize(taille_crayon)
position_allumettes = [-nombre_allumettes/2*espace_entre_allumettes, 0]
deplacer_sans_tracer(position_allumettes)
for allumette in range(nombre_allumettes):
dessiner_allumette()
position_allumettes[0] += espace_entre_allumettes
deplacer_sans_tracer(tuple(position_allumettes))
if nombre_allumettes != 1:
afficher_nombre_allumettes(nombre_allumettes)
def afficher_partie(nombre_allumettes, joueur_courant, nombre_retirees = None):
"""Fonction pour afficher la partie et son état"""
turtle.clear()
dessiner_allumettes(nombre_allumettes)
afficher_qui_joue(joueur_courant)
if nombre_retirees != None:
joueur = 1 if joueur_courant == 2 else 2
affiche_nombre_retire(joueur, nombre_retirees)
def affiche_nombre_retire(joueur, nombre_retirees, pos = (0, -110)):
"""Fonction pour afficher le nombre d'allumettes retirées"""
deplacer_sans_tracer(pos)
turtle.write("(Le Joueur {} a retiré {} allumette(s))".format(joueur, nombre_retirees),
align = "center",
font = ("Arial", TAILLE_ECRITURE_2, "italic"))
def afficher_nombre_allumettes(nombre_allumettes, pos = (0, -80)):
"""Fonction pour afficher le nombre d'allumettes"""
deplacer_sans_tracer(pos)
turtle.write("Il y a {} allumettes.".format(nombre_allumettes),
align = "center",
font = ("Arial", TAILLE_ECRITURE, "normal"))
def afficher_qui_joue(joueur_courant, pos = (0, 100)):
"""Fonction pour afficher qui joue"""
deplacer_sans_tracer(pos)
turtle.write("C'est au Joueur {} de jouer !".format(joueur_courant),
align = "center",
font = ("Arial", TAILLE_ECRITURE, "normal"))
def bloque_clavier():
"""Fonction pour désactiver les actions des touches a, z, e"""
turtle.onkeyrelease(None, "a")
turtle.onkeyrelease(None, "z")
turtle.onkeyrelease(None, "e")
def debloque_clavier():
"""Fonction pour associer les touches au nombre retiré"""
turtle.onkeyrelease(lambda : joue(1), "a")
turtle.onkeyrelease(lambda : joue(2), "z")
turtle.onkeyrelease(lambda : joue(3), "e")
def joue(nombre_retire = 1):
"""Fonction pour prendre en compte le choix du joueur"""
bloque_clavier()
global nombre_allumettes, etat_partie, joueur_courant
if nombre_retire != 0 and nombre_allumettes-nombre_retire > 0:
nombre_allumettes -= nombre_retire
else:
debloque_clavier()
return
if nombre_allumettes != 1:
joueur_courant = 1 if joueur_courant == 2 else 2
afficher_partie(nombre_allumettes, joueur_courant, nombre_retire)
else:
etat_partie = victoire(joueur_courant)
if not etat_partie:
quitter()
nombre_allumettes = NOMBRE_ALLUMETTE
afficher_partie(nombre_allumettes, joueur_courant)
turtle.listen()
debloque_clavier()
def victoire(joueur_courant):
"""Fonction pour le déroulement de la victoire"""
turtle.clear()
dessiner_allumettes(1)
deplacer_sans_tracer(-35, -100)
turtle.down()
turtle.write("Le joueur "+str(joueur_courant)+" a gagné !", align = "center", font = ("Arial", TAILLE_ECRITURE, "normal"))
if (turtle.textinput("Rejouer ?", "Rejouer ? Veuillez entrer 'oui' si c'est le cas.") == 'oui'):
return True
return False
def quitter(x = 0, y = 0):
"""Fonction pour quitter le jeu et fermer le programme"""
turtle.bye()
sys.exit(0)
def main():
"""Fonction principale"""
initialise_fenetre()
afficher_partie(nombre_allumettes, joueur_courant)
turtle.listen()
debloque_clavier()
turtle.onscreenclick(quitter, 3)
if __name__ == "__main__":
main()
turtle.mainloop()
|
from twisted.mail import smtp
from twisted.internet import defer, reactor
from email.mime.text import MIMEText
from StringIO import StringIO
import local_config
def send( subject, body ):
if not local_config.MAIL_ENABLED:
return defer.succeed( None )
finished = defer.Deferred()
msg = MIMEText( unicode( body ), "html", "utf-8" )
msg[ "Subject" ] = unicode( subject )
msg[ "From" ] = unicode( local_config.MAIL_FROM )
msg[ "To" ] = u", ".join( local_config.MAIL_TO )
senderFactory = smtp.ESMTPSenderFactory(
local_config.SMTP_USER,
local_config.SMTP_PASS,
local_config.MAIL_FROM,
", ".join( local_config.MAIL_TO ),
StringIO( msg.as_string().encode( 'ascii' ) ),
finished
)
reactor.connectTCP( local_config.SMTP_SERVER, 25, senderFactory )
return finished
|
#!/usr/bin/env python
"""
Add face index attribute to mesh.
"""
import argparse
import pymesh
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__);
parser.add_argument("input_mesh", help="input mesh");
parser.add_argument("output_mesh", help="output mesh");
return parser.parse_args();
def main():
args = parse_args();
mesh = pymesh.load_mesh(args.input_mesh);
mesh.add_attribute("vertex_index");
mesh.add_attribute("face_index");
mesh.add_attribute("voxel_index");
pymesh.save_mesh(args.output_mesh, mesh, *mesh.attribute_names);
if __name__ == "__main__":
main();
|
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from scipy.optimize import brentq
area_arr = [[-0.5, 3.0],[0.5, 1.5],[0,1.0]]
mach_arr = []
gam = 1.1363
area_ratio = 1.0
def bartz(d_throat, p_chamber, c_star, d, c_p, visc, t_gas, t_wall):
"""bartz equation calculator"""
t_boundary = (t_gas + t_wall) / 2
print(t_boundary)
return (0.026 / math.pow(d_throat, 0.2) * math.pow((p_chamber / c_star), 0.8) * math.pow((d_throat / d),
1.8) * c_p * math.pow(
visc, 0.2) * math.pow((t_gas / t_boundary), (0.8 - 0.2 * 0.6)))
print(bartz(2.5, 2.2e7, 2600, 2.5 , 2800, 3e-5, 3200, 1000)) |
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from DQM.SiPixelPhase1Common.HistogramManager_cfi import *
DefaultHistoDebug = DefaultHisto.clone(
topFolderName = "PixelPhase1/Debug"
)
SiPixelPhase1GeometryDebugDetId = DefaultHistoDebug.clone(
name = "debug_detid",
title = "Location of DetIds",
xlabel = "DetId",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
StandardSpecificationPixelmapProfile,
)
)
SiPixelPhase1GeometryDebugLadderBlade = DefaultHistoDebug.clone(
name = "debug_ladderblade",
title = "Location of Ladders/Blades",
xlabel = "offline Ladder/Blade #",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
StandardSpecificationPixelmapProfile,
)
)
SiPixelPhase1GeometryDebugROC = DefaultHistoDebug.clone(
name = "debug_roc",
title = "Location of ROCs",
xlabel = "ROC#",
dimensions = 1,
specs = VPSet(
# TODO: make this per ROC!
StandardSpecification2DProfile,
StandardSpecificationPixelmapProfile,
Specification()
.groupBy("PXBarrel/PXLayer/PXModuleName/SignedLadderCoord/SignedModuleCoord")
.groupBy("PXBarrel/PXLayer/PXModuleName/SignedLadderCoord", "EXTEND_X")
.groupBy("PXBarrel/PXLayer/PXModuleName/", "EXTEND_Y")
.reduce("MEAN")
.save(),
)
)
SiPixelPhase1GeometryDebugFED = DefaultHistoDebug.clone(
name = "debug_fed",
title = "Location of FEDs",
xlabel = "FED#",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
StandardSpecificationPixelmapProfile,
)
)
SiPixelPhase1GeometryDebugConf = cms.VPSet(
SiPixelPhase1GeometryDebugDetId,
SiPixelPhase1GeometryDebugLadderBlade,
SiPixelPhase1GeometryDebugROC,
SiPixelPhase1GeometryDebugFED,
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SiPixelPhase1GeometryDebugAnalyzer = DQMEDAnalyzer('SiPixelPhase1GeometryDebug',
histograms = SiPixelPhase1GeometryDebugConf,
geometry = SiPixelPhase1Geometry
)
SiPixelPhase1GeometryDebugHarvester = DQMEDHarvester("SiPixelPhase1Harvester",
histograms = SiPixelPhase1GeometryDebugConf,
geometry = SiPixelPhase1Geometry
)
|
import turtle
def drawLsystem(aTurtle, instructions, angle, distance):
savedInfoList = []
for cmd in instructions:
if cmd == 'F':
aTurtle.forward(distance)
elif cmd == 'B':
aTurtle.backward(distance)
elif cmd == '+':
aTurtle.right(angle)
elif cmd == '-':
aTurtle.left(angle)
elif cmd == '[':
savedInfoList.append([aTurtle.heading(), aTurtle.xcor(), aTurtle.ycor()])
print(savedInfoList)
elif cmd == ']':
newInfo = savedInfoList.pop()
def process_string(oldstr):
newstr = ""
for ch in oldstr:
newstr = newstr + apply_rules(ch)
return newstr
def apply_rules(inst):
result = ""
for item in inst:
if item == 'H':
result = 'HFX[+H][-H]'
elif item == 'X':
result = 'X[-FFF][+FFF]FX'
return result
def createLSystem(numIters,axiom):
startString = axiom
endString = ""
for i in range(numIters):
endString = process_string(startString)
startString = endString
return endString
t = turtle.Turtle()
inst = createLSystem(2,'H')
# inst = 'X[-FFF][+FFF]FX'
# inst = 'HFX[+H][-H]'
wn = turtle.Screen()
drawLsystem(t, inst, 27.5, 15)
wn.exitonclick()
|
"""
Please delete a given node from a singly-linked list.
"""
class Node:
def __init__(self, val):
self.val = val
self.next = None
class LinkedList:
def __init__(self, root: Node):
self.root = root
def single(self):
return self.root.next is None
def display(self):
current = self.root
while current:
print(str(current.val) + (' --> ' if current.next is not None else ''), end='')
current = current.next
print()
return self
def delete_node(self, node: Node):
if self.single() and self.root.val == node.val:
self.root = None
return self
elif self.root.val == node.val:
self.root = self.root.next
return self
last = self.root
current = self.root.next
while last.next is not None:
if current.val == node.val:
last.next = current.next
last = current
current = current.next
return self
if __name__ == '__main__':
a = Node(1)
b = Node(2)
c = Node(3)
d = Node(4)
e = Node(5)
a.next = b
b.next = c
c.next = d
d.next = e
linked_list = LinkedList(a)
linked_list \
.display() \
.delete_node(d) \
.display() \
.delete_node(b) \
.display() \
.delete_node(a) \
.display() \
.delete_node(c) \
.display() \
.delete_node(e) \
.display()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayFundAllocTransferQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayFundAllocTransferQueryResponse, self).__init__()
self._alloc_time = None
self._amount = None
self._biz_type = None
self._certification_no = None
self._certification_type = None
self._order_id = None
self._out_biz_no = None
self._status = None
@property
def alloc_time(self):
return self._alloc_time
@alloc_time.setter
def alloc_time(self, value):
self._alloc_time = value
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def certification_no(self):
return self._certification_no
@certification_no.setter
def certification_no(self, value):
self._certification_no = value
@property
def certification_type(self):
return self._certification_type
@certification_type.setter
def certification_type(self, value):
self._certification_type = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def parse_response_content(self, response_content):
response = super(AlipayFundAllocTransferQueryResponse, self).parse_response_content(response_content)
if 'alloc_time' in response:
self.alloc_time = response['alloc_time']
if 'amount' in response:
self.amount = response['amount']
if 'biz_type' in response:
self.biz_type = response['biz_type']
if 'certification_no' in response:
self.certification_no = response['certification_no']
if 'certification_type' in response:
self.certification_type = response['certification_type']
if 'order_id' in response:
self.order_id = response['order_id']
if 'out_biz_no' in response:
self.out_biz_no = response['out_biz_no']
if 'status' in response:
self.status = response['status']
|
# argv[1]=array_size, argv[2]=lineResistance; argv[3]=VarSwitch
from sys import argv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
from PySpice.Spice.Netlist import Circuit, SubCircuit
from PySpice.Spice.Library import SpiceLibrary
from PySpice.Unit import *
#from PySpice.Physics.SemiConductor import ShockleyDiode
import PySpice
PySpice.Spice.Simulation.CircuitSimulator.DEFAULT_SIMULATOR = 'ngspice-subprocess'
#libraries_path = "/home/victor/Downloads/hspiceLib"
#libraries_path = "/home/wzw/Download/ngspice/Spice64/PySpice/examples/libraries/transistor"
libraries_path = "/home/wzw/Download/ngspice/PySpice/examples/libraries/transistor"
spice_library = SpiceLibrary(libraries_path)
## circuit
circuit = Circuit(title='sparse_net')
circuit.include(spice_library['ptm65nm_nmos'])
circuit.include(spice_library['ptm65nm_pmos'])
# params
#sparseRate = 0.5
# 65nm nmos with Vg=1.2V, 2.5V will induce large Ig
VGATEON = 1.2@u_V
VGATEOFF = 0.0@u_V
VSEL = 1.2@u_V
VREAD = 0.2@u_V
arrSize=int(argv[1])
line_R = int(argv[2])@u_Ohm
level_R = [100@u_kOhm, 10@u_kOhm]
# generaged params
#weights1 = np.random.uniform(0, 1, (128, 128)) >= sparseRate
#weights1 = np.ones((arrSize, arrSize))
weights1 = (np.random.rand(8*8) > 0.5).reshape(8,8)
def rlevel(level):
#return level_R[int(level)]
return level_R[int(level)]*np.exp(-1*np.random.normal(0,0.5))
VWL = []
VBL = []
VSL = []
for i in range(arrSize):
VWL.append('WL_w1_%03d' % (i))
for i in range(arrSize):
VBL.append('BL_w1_%03d' % (i))
for i in range(arrSize):
VSL.append('SL_w1_%03d' % (i))
circuit.VoltageSource(name='WL_SEL', plus='WL_SEL', minus=circuit.gnd, dc_value=VGATEON)
circuit.VoltageSource(name='BL_SEL', plus='BL_SEL', minus=circuit.gnd, dc_value=VREAD)
'''
# WL Voltage
for i in range(arrSize):
circuit.VoltageSource(name=VWL[i], plus='WL_w1_%03d%03d' % (i, 0), minus=circuit.gnd, dc_value=VGATEOFF)
# BL Voltage
for i in range(arrSize):
circuit.VoltageSource(name=VBL[i], plus='BL_w1_%03d%03d' % (0, i), minus=circuit.gnd, dc_value=0.0@u_V)
# SL Voltage
for i in range(arrSize):
circuit.VoltageSource(name=VSL[i], plus='SL_w1_%03d%03d' % (i, 0), minus=circuit.gnd, dc_value=0.0@u_V)
'''
# WL Voltage
for i in range(arrSize):
circuit.VoltageSource(name=VWL[i], plus='WL_w1_%03d' % i, minus=circuit.gnd, dc_value=0.0@u_V)
circuit.Mosfet(name='TG_WL_%03d_n0' % (i), drain='WL_SEL', gate='WL_w1_%03d' % (i), source='WL_w1_%03d%03d' % (i, 0), bulk=circuit.gnd, model='ptm65nm_nmos', length=65@u_nm, width=160@u_nm)
# BL Voltage
for i in range(arrSize):
circuit.VoltageSource(name=VBL[i], plus='BL_w1_%03d' % i, minus=circuit.gnd, dc_value=0.0@u_V)
circuit.Mosfet(name='TG_BL_%03d_n0' % (i), drain='BL_SEL', gate='BL_w1_%03d' % (i), source='BL_w1_%03d%03d' % (0, i), bulk=circuit.gnd, model='ptm65nm_nmos', length=65@u_nm, width=160@u_nm)
# SL Voltage
for i in range(arrSize):
#circuit.VoltageSource(name=VSL[i], plus='SL_w1_%03d' % i, minus=circuit.gnd, dc_value=0.0@u_V)
circuit.Mosfet(name='TG_SL_%03d_n0' % (i), drain=circuit.gnd, gate='WL_w1_%03d' % (i), source='SL_w1_%03d%03d' % (i, 0), bulk=circuit.gnd, model='ptm65nm_nmos', length=65@u_nm, width=160@u_nm)
# w1 xbar
# pos w1 xbar
for i in range (arrSize):
circuit.Resistor(name='SL_w1_%03d%03d' % (i, 0), plus='SL_w1_%03d%03dH' % (i, 0), minus='SL_w1_%03d%03dL' % (i, 0), resistance=line_R)
circuit.Resistor(name='WL_w1_%03d%03d' % (i, 0), plus='WL_w1_%03d%03d' % (i, 0), minus='WL_w1_%03d%03dL' % (i, 0), resistance=line_R)
circuit.Mosfet(name='Msel_w1_%03d%03d' % (i, 0), drain='SL_w1_%03d%03dL' % (i, 0), gate='WL_w1_%03d%03dL' % (i, 0), source='SL_w1_%03d%03d' % (i, 0), bulk=circuit.gnd, model='ptm65nm_nmos', length=65@u_nm, width=160@u_nm)
for j in range (arrSize):
# lines
circuit.Resistor(name='BL_w1_%03d%03d' % (i, j), plus='BL_w1_%03d%03d' % (i, j), minus='BL_w1_%03d%03d' % (i+1, j), resistance=line_R)
circuit.Resistor(name='RRAM_w1_%03d%03d' % (i, j), plus='BL_w1_%03d%03d' % (i+1, j), minus='SL_w1_%03d%03dH' % (i, 0), resistance=rlevel(weights1[i,j]))
resList = []
with open('./weight.log', 'w') as fw:
#for i in range(int(argv[1])):
# for j in range(int(argv[1])):
# if weights1[i,j]:
# resList.append('10K')
# else:
# resList.append('100K')
#fw.write(str(np.array(resList).reshape(8,8)))
for i in range(int(argv[1])):
for j in range(int(argv[1])):
fw.write(str('%d\t' % weights1[i,j]))
#simulate
simulator = circuit.simulator(simulator='ngspice-subprocess')
#circuit['VWL_w1_000'].dc_value=VSEL
#circuit['VWL_w1_001'].dc_value=VSEL
#circuit['VWL_w1_002'].dc_value=VSEL
circuit['VWL_w1_003'].dc_value=VSEL
#circuit['VWL_w1_004'].dc_value=VSEL
circuit['VWL_w1_005'].dc_value=VSEL
#circuit['VWL_w1_006'].dc_value=VSEL
circuit['VWL_w1_007'].dc_value=VSEL
#circuit['VBL_w1_000'].dc_value=VSEL
#circuit['VBL_w1_001'].dc_value=VSEL
#circuit['VBL_w1_002'].dc_value=VSEL
circuit['VBL_w1_003'].dc_value=VSEL
#circuit['VBL_w1_004'].dc_value=VSEL
#circuit['VBL_w1_005'].dc_value=VSEL
#circuit['VBL_w1_006'].dc_value=VSEL
#circuit['VBL_w1_007'].dc_value=VSEL
# print sp file
sp_file = open('SparseNet.sp', 'w')
sp_file.write(str(circuit))
sp_file.close()
print("Netlist Saved in SparseNet.sp")
analysis = simulator.operating_point()
with open('./current.log', 'w') as fid:
for i in range(arrSize):
for j in range(arrSize):
VH = analysis['BL_w1_%03d%03d' % (i+1, j)][0]
VL = analysis['SL_w1_%03d%03dH' % (i, 0)][0]
Iread = (VH - VL) / (circuit['RRRAM_w1_%03d%03d' % (i, j)].resistance)
fid.write("cell(%03d, %03d)'s weight = %d" % (i, j, weights1[i,j]) + "\tread current is: " + str(Iread) + "\n")
print("****************************************************************************")
print("SIMULATION FINISHED!!!")
print("****************************************************************************")
|
import yaml
'''
配置文件加载
'''
class config_loader:
config = dict()
def __init__(self, name):
with open("config/" + name + ".yaml", "rb") as y:
data = yaml.safe_load_all(y)
self.config = list(data)[0]
|
import xarray as xr
import datetime
import os
def read_DDH_netcdf(start, end, path, include_variables=None):
"""
Read all converted DDH files in NetCDF format,
from `start` to `end` date
"""
print('Reading DDH-NetCDF from {} to {}'.format(start, end))
nt = int((end-start).total_seconds() / 3600. / 3.)
# Generate list of NetCDF files to read
files = []
for t in range(nt):
date = start + t*datetime.timedelta(hours=3)
f = '{0:}/{1:04d}/{2:02d}/{3:02d}/{4:02d}/LES_forcing_{1:04d}{2:02d}{3:02d}{4:02d}.nc'.\
format(path, date.year, date.month, date.day, date.hour)
if os.path.exists(f):
files.append(f)
else:
print('Can not find {}!! Skipping..'.format(f))
print('Reading {} DDH NetCDF files...'.format(len(files)))
if include_variables is not None:
# Exclude all variables which are not in `include_variables..`
# xarray should really have an option for this..........
tmp = xr.open_dataset(files[0])
all_variables = tmp.variables.keys()
exclude = []
for var in all_variables:
if var not in include_variables:
exclude.append(var)
nc = xr.open_mfdataset(files, drop_variables=exclude, concat_dim='time', autoclose=True)
else:
nc = xr.open_mfdataset(files, autoclose=True)
# Read data with xarray
return nc
class Timer:
def __init__(self):
self.start = datetime.datetime.now()
def elapsed(self):
return datetime.datetime.now() - self.start
def reset(self):
self.start = datetime.datetime.now()
#path = '/scratch/ms/nl/nkbs/DOWA/LES_forcing/'
path = '/nobackup/users/stratum/DOWA/LES_forcing/'
rvars = ['z','time','u','v','T','dtu_dyn','dtv_dyn','dtT_dyn','dtu_phy','dtv_phy','dtT_phy']
iloc = 3+24
name = 'K13_30km'
t = Timer()
for year in range(2016,2018):
for month in range(1,13):
t.reset()
start = datetime.datetime(year, month, 1, 0)
if month != 12:
end = datetime.datetime(year, month+1, 1, 0)
else:
end = datetime.datetime(year+1, 1, 1, 0)
data = read_DDH_netcdf(start, end, path, rvars)
data = data.isel(domain=iloc)
data.to_netcdf('{0:}/{1:}_{2:04d}{3:02d}.nc'.format(path,name,year,month))
print('Done: {}'.format(t.elapsed()))
|
import os.path
from urdu_corpus_reader import UrduCorpusReader
from stop_words import remove_urdu_stopwords
if '__main__' == __name__:
corpus_root = os.path.abspath('../raw_urdu_data')
wordlists = UrduCorpusReader(corpus_root, '.*')
print("Loaded corpus with file IDs: ")
print(wordlists.fileids())
sample_id = wordlists.fileids()[10]
print("\n\nSENTeNCES\n===============\n")
idx = 1
for s in wordlists.sents(sample_id):
print("\nSentence {}\n----------------\n".format(idx))
print(s)
idx += 1
print("Words from file: " + sample_id)
for w in wordlists.words(sample_id):
print(w, end=' ')
# URDU STOP WORDS REMOVAL
stopwords_corpus = UrduCorpusReader('./data', ['stopwords-ur.txt'])
stopwords = stopwords_corpus.words()
# print(stopwords)
words = wordlists.words(sample_id)
finalized_words = remove_urdu_stopwords(stopwords, words)
print("\n==== WITHOUT STOPWORDS ===========\n")
print(finalized_words)
|
# package imports
# e.g.
# import pandas as pd
# ...
import sys
from typing import List, Union, Dict
from lxml import etree
database_path = 'main.xml'
def main(argv: str) -> Union[Dict[str, List[dict]], None]:
first_last_name = argv.split(' ')
if len(first_last_name) < 2:
return None
first_last_name = [s.lower() for s in first_last_name]
tree = etree.parse(database_path)
root = tree.getroot()
actor_ids = find_all_actor_ids(root, first_last_name)
actor_films_dict = find_actor_films_dict(root, actor_ids)
return None if len(actor_films_dict) == 0 else actor_films_dict
def find_all_actor_ids(root, first_last_name):
first_name = first_last_name[0]
last_name = first_last_name[1]
query = "//actor_table/actor" + \
"[" + \
"first_name=" + "\'" + first_name + "\'" + \
" and " + \
"last_name=" + "\'" + last_name + "\'" + \
"]"
actor_ids = [element.find('./actor_id').text for element in root.xpath(query)]
return actor_ids
def find_actor_films_dict(root, actor_ids):
actor_films_dict = {}
for actor_id in actor_ids:
query = "//film_actor_table/film_actor" + "[" + "actor_id=" + "\'" + actor_id + "\'" + "]"
film_ids = [element.xpath('film_id')[0].text for element in root.xpath(query)]
film_infos = []
for film_id in film_ids:
query = "//film_table/film" + "[" + "film_id=" + "\'" + film_id + "\'" + "]"
film_element = root.xpath(query)[0]
film_info = {
'title': film_element.xpath('title')[0].text,
'release_year': film_element.xpath('release_year')[0].text
}
film_infos.append(film_info)
actor_films_dict[actor_id] = film_infos
return {k: sorted(v, key=lambda x: x['title']) for k, v in actor_films_dict.items()}
if __name__ == '__main__':
# example
# python actor.py 'ed chase'
print(main(sys.argv[1]))
|
import json
import requests
from utils.config import get_http
from utils.log import logger
class GetTaskGroupNum(object):
def __init__(self):
self.pr = get_http()
# self.url = self.headers.get('Host')
pass
def get_service_id(self, headers):
host = headers.get("Host")
url = "{}://{}/userStudyCenter/serviceInfo".format(self.pr, host)
querystring = {"serviceID":""}
logger.info("Get service id url is:{}".format(url))
response = requests.request("GET", url, headers=headers, params=querystring)
result = response.text
logger.info("Service ID result is :{}".format(result))
json_data = json.loads(result)
data = json_data.pop("data")
logger.info("ServiceID is :{}".format(data.get('serviceID')))
return (data.get('serviceID'))
def get_task_group_id(self, headers, serviceId):
task_group = []
host = headers.get("Host")
url = "{}://{}/userStudyCenter/{}/taskInfo".format(self.pr, host, serviceId)
querystring = {"taskID": ""}
logger.info("Get Task group ID url is:{}".format(url))
response = requests.request("GET", url, headers=headers, params=querystring)
json_data = json.loads(response.text)
try:
result0 = json_data.get("data").get('userCourse').get('mtdCourse')
except:
pass
result = json_data.get("data").get('practice')
try:
for n in result:
task_group.append(n)
except:
pass
for i in result:
for j in i.get("questGuide"):
task_group.append(j)
logger.info("GroupID is :{}".format(task_group))
return task_group
if __name__ == '__main__':
task_group = GetTaskGroupNum()
serviceID = task_group.get_service_id()
print(serviceID)
result = task_group.get_task_group_id(serviceID)
for i in result:
print(i.get("groupID"), i.get("taskID"), i.get("currStatus"))
|
import gym
import numpy as np
from gym import error, spaces
from matplotlib import pyplot as plt
class Learn_Corn(gym.Env):
def __init__(self):
'''
Initialize environment variables
'''
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(low=0, high=255, \
shape=(256,256,3),dtype=np.float32)
self.num_correct = 0
self.times_guessed = 0
self.images = None
self.classes = None
def step(self, action):
'''
Offers an interface to the environment by performing an action and
modifying the environment via the specific action taken
Returns: envstate, reward, done, info
'''
if self.images is not None and self.classes is not None:
self.guess = action
if np.argmax(self.classes[self.times_guessed]) == action:
reward = 1
self.num_correct += 1
else:
reward = 0
if self.times_guessed == len(self.images)-1:
done = True
ob = None
else:
done = False
ob = self.images[self.times_guessed+1]
self.times_guessed += 1
self.envstate = ob
return ob, self.num_correct/self.times_guessed, done, {}
def reset(self,images=None,classes=None):
' Returns environment state after reseting environment variables '
# print('Attempting to reset', self.images is None)
if (self.images is None and self.classes is None):
self.images = images
self.classes = classes
self.num_correct = 0
self.times_guessed = 0
self.envstate = self.images[0]
self.guess = None
return self.envstate
def render(self,):
'''
This method will provide users with visual representation of what is
occuring inside the environment
'''
# plt.imshow(np.uint8(self.envstate)) #render image
print('Answer:', self.classes[self.times_guessed-1])
print('Guess:', self.guess)
print('Num steps:', self.times_guessed)
print('Accuracy:', self.num_correct/self.times_guessed)
print('\n\n')
def get_reward(self):
'''
Calculate and return reward based on current environment state
'''
pass |
file1 = open("c:\\users\\devatendou\\Desktop\\AdventCode2017\\AdventCode4.1.txt", 'r')
file2 = open("c:\\users\\devatendou\\Desktop\\AdventCode2017\\AdventCode4.1.txt", 'r')
def hasDuplicates(lista):
index = 0
temp = lista
for l in lista:
for t in temp:
if l == t: index += 1
if index == 2: return True
index = 0
return False
def isAnagram(lista):
for i in range(0, len(lista)):
for j in range(i+1, len(lista)):
if sorted(lista[i]) == sorted(lista[j]): return True
return False
def part1(content):
lista = []
valid = 0
for line in content:
lista = line.split()
if hasDuplicates(lista) == False: valid += 1
content.close()
return valid
def part2(content):
lista = []
valid = 0
for line in content:
lista = line.split()
if isAnagram(lista) == False: valid += 1
content.close()
return valid
print(part1(file1))
print(part2(file2))
|
'''
keys:
Solutions:
Similar:
T:
S:
'''
from typing import List
import bisect
class Solution:
'''
Intial dp[0] = 0, as we make profit = 0 at time = 0.
If we don't do this job, nothing will be changed.
If we do this job, use binary search in the dp to find the largest profit we
can make before start time s.
So we also know the maximum cuurent profit that we can make doing this job.
The trick is that we append [ending, p] in dp, and search for s+1 for the right
position for new task.
'''
def jobScheduling(self, startTime: List[int], endTime: List[int], profit: List[int]) -> int:
jobs = sorted(zip(startTime, endTime, profit), key=lambda v: v[1])
# dp[time] = profit means that within the first time duration,
# we cam make at most profit money.
# we need endtime for the binary search
dp = [[0, 0]] # [endtime, cur_profit]
for s, e, p in jobs:
# If we do this job, binary search in the dp to find the
# largest profit we can make before start time s.
# s+1 since we store the ENDING time in dps.
# idx-1 since we are accesing the previous location
# e.g., bisect.bisect([[1], [2], [4], [10], [5]) = 3
i = bisect.bisect(dp, [s + 1]) - 1 # use bracket on s+1 since vals in dp are []s
if dp[i][1] + p > dp[-1][1]: # compare with last element in dp
dp.append([e, dp[i][1] + p]) # more money, then worth doing, add the pair
return dp[-1][1]
|
def cal(*nums) :
total = 0
for n in nums :
total = total + n
return total
print(cal(2,4,6,8,10,12,14,16,18,20,90)) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import datetime, timedelta
from typing import TYPE_CHECKING
from uuid import UUID
import pytest
from flask.ctx import AppContext
from freezegun import freeze_time
if TYPE_CHECKING:
from superset.extensions.metastore_cache import SupersetMetastoreCache
FIRST_KEY = "foo"
FIRST_KEY_INITIAL_VALUE = {"foo": "bar"}
FIRST_KEY_UPDATED_VALUE = "foo"
SECOND_KEY = "baz"
SECOND_VALUE = "qwerty"
@pytest.fixture
def cache() -> SupersetMetastoreCache:
from superset.extensions.metastore_cache import SupersetMetastoreCache
return SupersetMetastoreCache(
namespace=UUID("ee173d1b-ccf3-40aa-941c-985c15224496"),
default_timeout=600,
)
def test_caching_flow(app_context: AppContext, cache: SupersetMetastoreCache) -> None:
assert cache.has(FIRST_KEY) is False
assert cache.add(FIRST_KEY, FIRST_KEY_INITIAL_VALUE) is True
assert cache.has(FIRST_KEY) is True
cache.set(SECOND_KEY, SECOND_VALUE)
assert cache.get(FIRST_KEY) == FIRST_KEY_INITIAL_VALUE
assert cache.get(SECOND_KEY) == SECOND_VALUE
assert cache.add(FIRST_KEY, FIRST_KEY_UPDATED_VALUE) is False
assert cache.get(FIRST_KEY) == FIRST_KEY_INITIAL_VALUE
assert cache.set(FIRST_KEY, FIRST_KEY_UPDATED_VALUE) == True
assert cache.get(FIRST_KEY) == FIRST_KEY_UPDATED_VALUE
cache.delete(FIRST_KEY)
assert cache.has(FIRST_KEY) is False
assert cache.get(FIRST_KEY) is None
assert cache.has(SECOND_KEY)
assert cache.get(SECOND_KEY) == SECOND_VALUE
def test_expiry(app_context: AppContext, cache: SupersetMetastoreCache) -> None:
delta = timedelta(days=90)
dttm = datetime(2022, 3, 18, 0, 0, 0)
with freeze_time(dttm):
cache.set(FIRST_KEY, FIRST_KEY_INITIAL_VALUE, int(delta.total_seconds()))
assert cache.get(FIRST_KEY) == FIRST_KEY_INITIAL_VALUE
with freeze_time(dttm + delta - timedelta(seconds=1)):
assert cache.has(FIRST_KEY)
assert cache.get(FIRST_KEY) == FIRST_KEY_INITIAL_VALUE
with freeze_time(dttm + delta + timedelta(seconds=1)):
assert cache.has(FIRST_KEY) is False
assert cache.get(FIRST_KEY) is None
|
def catch_sign_change(lst):
if not lst: return 0
total = -1
state = ''
for x in lst:
if x > -1 and state != 'p':
total+=1
state = 'p'
elif x < 0 and state != 'n':
total+=1
state = 'n'
return total
'''
Count how often sign changes in array.
result
number from 0 to ... . Empty array returns 0
example
const arr = [1, -3, -4, 0, 5]
| elem | count |
|------|-------|
| 1 | 0 |
| -3 | 1 |
| -4 | 1 |
| 0 | 2 |
| 5 | 2 |
return 2;
'''
|
######## finds players near a wolf and kills one ########
import requests
from requests.auth import HTTPBasicAuth
username = "brittany"
password = "yes"
#Players Near
payload = {'userID' : '3393'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/findAllNear', auth=HTTPBasicAuth(username, password))
print("User ID: 3393---players near " + r.text)
'''
payload = {'userID' : '555'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near', data = payload,
auth=HTTPBasicAuth(username, password))
print("User ID: 555---players near " + r.text)
payload = {'userID' : '942'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near', data = payload,
auth=HTTPBasicAuth(username, password))
print("User ID: 942---players near " + r.text)
payload = {'userID' : '111'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near',
data = payload, auth=HTTPBasicAuth(username, password))
print("User ID: 111---players near " + r.text)
payload = {'userID' : '222'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near', data = payload,
auth=HTTPBasicAuth(username, password))
print("User ID: 222---players near " + r.text)
payload = {'userID' : '444'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near', data = payload,
auth=HTTPBasicAuth(username, password))
print("User ID: 444---players near " + r.text)
payload = {'userID' : '666'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near',
data = payload, auth=HTTPBasicAuth(username, password))
print("User ID: 666---players near " + r.text)
payload = {'userID' : '777'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near', data = payload,
auth=HTTPBasicAuth(username, password))
print("User ID: 777---players near " + r.text)
payload = {'userID' : '888'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near', data = payload,
auth=HTTPBasicAuth(username, password))
print("User ID: 888---players near " + r.text)
payload = {'userID' : '1234'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near',
data = payload, auth=HTTPBasicAuth(username, password))
print("User ID: 1234---players near " + r.text)
payload = {'userID' : '555'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near', data = payload,
auth=HTTPBasicAuth(username, password))
print("User ID: 555---players near " + r.text)
payload = {'userID' : '900'}
r = requests.get('http://mighty-sea-1005.herokuapp.com/players/near', data = payload,
auth=HTTPBasicAuth(username, password))
print("User ID: 900---players near " + r.text)
'''
|
from make_config import start
from typing import Dict, Tuple, List
from torch.optim import Optimizer, AdamW
from torch.optim.lr_scheduler import LambdaLR
from bart_tokenizer import AsianBartTokenizer
from asian_bart import AsianBartForConditionalGeneration
import pytorch_lightning as pl
import torch
class DistillBart(pl.LightningModule):
def __init__(self, num_encoder: int, num_decoder: int):
super().__init__()
self.lr = 3e-5
self.weight_decay = 1e-4,
self.tokenizer = AsianBartTokenizer.from_pretrained("hyunwoongko/asian-bart-ecjk")
self.model = start(num_encoder, num_decoder)
print("well_loaded")
def forward(self, batch):
s1, s2, lang_code = batch
model_inputs = self.tokenizer.prepare_seq2seq_batch(
src_texts=s1,
src_langs=lang_code,
tgt_texts=s2,
tgt_langs=lang_code,
padding="max_length",
max_len=256,
)
for key, v in model_inputs.items():
model_inputs[key] = model_inputs[key].to("cuda")
out = self.model(input_ids=model_inputs['input_ids'], attention_mask=model_inputs['attention_mask'],
labels=model_inputs['labels'])
return out
def training_step(self, batch, batch_idx):
"""
Training_Step
batch : Data from DataLoader
batch_idx : idx of Data
"""
out = self.forward(batch)
loss = out["loss"]
self.log("train_loss", loss)
return loss
@torch.no_grad()
def validation_step(self, batch, batch_idx) -> Dict:
"""
Validation steps
batch : ([s1, s2, lang_code, lang_code]) Data from DataLoader
batch_idx : idx of Data
"""
out = self.forward(batch)
loss = out["loss"]
self.log('val_loss', loss, on_step=True, prog_bar=True, logger=True)
return loss
def configure_optimizers(self):
"""configure optimizers and lr schedulers"""
optimizer = AdamW(self.model.parameters(), lr=self.lr)
return {"optimizer": optimizer}
|
'''
多态:
- 一个对象不同场景下, 有不同的形态
好处:
- 开闭原则. 对修改关闭, 对扩展开放
- 增加可扩展性
@author: xilh
@since: 20200127
'''
from demo.tools.tool import pline
class Animal:
def run(self):
pass
class Dog(Animal):
def run(self):
print("Dog.run ...")
def eat(self):
print("Dog.eat ...")
class Cat(Animal):
def run(self):
print("Cat.run ...")
def eat(self):
print("Cat.eat ...")
print("== 多态 ==")
dog = Dog()
dog.run()
cat = Cat()
cat.run()
print(isinstance(dog, Animal))
print(isinstance(cat, Animal))
print(isinstance(dog, object))
print(isinstance(cat, object))
pline()
# 多态应用场景
def run(animal):
# 强类型校验
if isinstance(animal, Animal):
animal.run()
def eat(animal):
animal.eat()
run(dog)
run(cat)
eat(dog)
eat(cat) |
#!/usr/bin/env python3
"""The script decompiles the given file via RetDec R2 plugin.
The supported decompilation modes are:
/TODO/ full - decompile entire input file.
selective - decompile only the function selected by the given address.
"""
import argparse
import os
import shutil
import sys
import r2pipe
class WorkingDirectory:
def __init__(self, path):
self.old_path = os.getcwd()
self.path = path
def __enter__(self):
os.chdir(self.path)
def __exit__(self, type, value, traceback):
os.chdir(self.old_path)
def print_error_and_die(*msg):
print('Error:', *msg)
sys.exit(1)
def parse_args(args):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
help='The input file.')
parser.add_argument('-o', '--output',
dest='output',
metavar='FILE',
help='Output file (default: file.c). All but the last component must exist.')
parser.add_argument('-s', '--select',
dest='selected_addr',
help='Decompile only the function selected by the given address (any address inside function). Examples: 0x1000, 4096.')
parser.add_argument('-p', '--project',
dest='project_path',
metavar='FILE',
help='R2 project associated with input file.')
parser.add_argument('-c', '--cmds',
dest='commands',
metavar='CMD1;CMD2;CMD3...',
help='Inital R2 commands separated by semicolon.')
return parser.parse_args(args)
def check_args(args):
if not args.file or not os.path.exists(args.file):
print_error_and_die('Specified input file does not exist:', args.file)
args.file_dir = os.path.dirname(args.file)
if args.project_path and not os.path.exists(args.project_path):
print_error_and_die('Specified R2 project file does not exist:', args.project_path)
if not args.output:
args.output = args.file + '.c'
args.output_dir = os.path.dirname(args.output)
if not os.path.exists(args.output_dir):
print_error_and_die('Output directory does not exist:', args.output_dir)
def main():
args = parse_args(sys.argv[1:])
check_args(args)
with WorkingDirectory(args.file_dir):
if args.file_dir != args.output_dir:
shutil.copy(args.file, args.output_dir)
if args.project_path and os.path.dirname(args.project_path) != args.output_dir:
shutil.copy(args.project_path, args.output_dir)
r2 = r2pipe.open(args.file)
if args.project_path:
r2.cmd('Po ' + args.project_path)
else:
r2.cmd('aaa')
if args.commands:
cmds = args.commands.split(';')
joining = ''
for cmd in cmds:
if joining:
joining += ";"+cmd
if cmd[-1] == '"':
r2.cmd(joining)
joining = ''
elif cmd[0] == '"' and cmd[-1] != '"':
joining = cmd
else:
r2.cmd(cmd)
if args.selected_addr:
r2.cmd('s ' + args.selected_addr)
out = r2.cmd('#!pipe r2retdec')
r2.quit()
try:
with open(args.output, "w") as f:
f.write(out)
except Exception as e:
sys.stderr.write('Unable to open file '+str(e))
return 0
if __name__ == "__main__":
main()
|
import os, sys, time
from utils import *
import pygame
from eventmanager import Events, InputManager
def makeFinishGameScreen(surface, levelName):
mytext = "Completed the game!"
options = ["Play again", "Return to Title"]
return NextLevelScreen(surface, mytext, options, getAssetsPath() + os.sep + "rainbow.png").runMenu()
def makeNextLevelScreen(surface, levelName):
mytext = "Completed the level " + levelName
options = ["Next Level", "Return to Title"]
if levelName == "start":
imagename = "emptyhalfrainbow"
if levelName == "next":
imagename = "rainbow"
return NextLevelScreen(surface, mytext, options, getAssetsPath() + os.sep + imagename + ".png").runMenu()
class DialogBox(object):
def __init__(self, drawSurface, text, options, image):
self.drawSurface = drawSurface
self.currentlySelected = 0
self.text = text
self.options = options
if image != None:
self.image = pygame.image.load(image)
else:
self.image = None
self.drawSurface.fill(pygame.Color('white'))
self.drawMenu()
def drawMenu(self):
self.drawSurface.fill(pygame.Color('white'))
self.drawSurface.blit(pygame.font.SysFont("comicsansms", 32).render(self.text, False, pygame.Color('black')), (250, 60))
if self.image != None:
self.drawSurface.blit(self.image, (50, 130))
for i in range(len(self.options)):
textsize = 24
if self.currentlySelected == i:
textsize = 28
self.drawSurface.blit(pygame.font.SysFont("comicsansms", textsize).render(self.options[i], False, pygame.Color('black')), (250, 450 + (i * 40)))
pygame.display.flip()
def runMenu(self):
pygame.event.clear()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
if self.currentlySelected == 0:
self.currentlySelected = len(self.options) - 1
else:
self.currentlySelected = self.currentlySelected - 1
self.drawMenu()
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
if self.currentlySelected == len(self.options) - 1:
self.currentlySelected = 0
else:
self.currentlySelected = self.currentlySelected + 1
self.drawMenu()
if event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
return self.execute()
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
self.currentlySelected = 0
return self.execute()
def execute(self):
return ""
class NextLevelScreen(DialogBox):
def execute(self):
if self.currentlySelected == 0:
return "Next"
else:
return "Back" |
from flask import Flask,render_template
# 返回渲染好的模版就需要render_template
app=Flask(__name__)
@app.route('/')
def index_view():
return render_template('index.html',cid=1)
# cid传到模版中使用
# http://127.0.0.1:5000/list/4
# 功能是由地址触发,每一个url地址对应此效果
@app.route('/list/<int:cid>')
# 路由传参,接收参数cid,严谨,指定参数类型
def list_view(cid):
if cid <=2:
return '你传入的cid值小于等于2'
else:
return'你传入的cid值大于2'
app.run(debug=True) |
#!/usr/bin/python3
"""
plexPlaylist.py: This python script creates a Plex
playlist from the server API. It takes one or more
m3u files as input. A section and token ID are also
required.
To get these items, go to the "three dots" to the right
of a song already in the system (it may or may not
be a song in the desired playlist). Click on 'Get Info'.
At the bottom click on the [View XML] button. The
librarySectionID is in the XML and should be a small
integer number. The X-Plex-Token is in the URL (typically
at the end) and is a string of about 20 random characters.
Use these as command line parameters for this script.
Note that the m3u files must be described as full paths
visible to the server.
Much of this info is taken from the Reddit post:
https://www.reddit.com/r/PleX/comments/ecirqa/how_to_manually_import_an_m3u_playlist_into_plex/
"""
__author__ = "Steven A. Guccione"
__date__ = "May 21, 2020"
__copyright__ = "Copyright (c) 2020 by Steven A. Guccione"
import argparse
import requests
if __name__ == '__main__':
# Parse command line parameters
parser = argparse.ArgumentParser()
parser.add_argument("m3uFiles", nargs='+', type=str, help="m3u files (full path)")
parser.add_argument("--section", type=int, help="librarySectionID from XML", required=True)
parser.add_argument("--token", type=str, help="X-Plex-Token string from URL", required=True)
parser.add_argument("--server", type=str, help="server name (default: http://127.0.0.1:32400)",
default='http://127.0.0.1:32400')
parser.add_argument("--verbose", help="verbose flag", action='store_true')
parser.add_argument("--debug", help="debug flag", action='store_true')
args = parser.parse_args()
for m3uFile in args.m3uFiles:
# Create the request string
url = args.server + "/playlists/upload?sectionID=" +\
str(args.section) + "&path=" + m3uFile +\
"&X-Plex-Token=" + args.token
if args.debug:
print("url: " + url)
# Make the request
try:
response = requests.post(url)
except requests.exceptions.RequestException as err:
raise SystemExit(err)
if args.debug:
print(response)
if response.status_code == requests.codes.ok:
if args.verbose:
print(m3uFile, " done.")
else:
print("ERROR: HTTP server response code " + str(response.status_code))
SystemExit(0)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from utils.utils import convert_stack_value_to_int
class BlockDependencyDetector():
def __init__(self):
self.init()
def init(self):
self.swc_id = 120
self.severity = "Low"
self.block_instruction = None
self.block_dependency = False
def detect_block_dependency(self, tainted_record, current_instruction, previous_branch):
# Check for a call with transfer of ether (check if amount is greater than zero or symbolic)
if current_instruction["op"] == "CALL" and (convert_stack_value_to_int(current_instruction["stack"][-3]) or tainted_record and tainted_record.stack[-3]) or \
current_instruction["op"] in ["SELFDESTRUCT", "SUICIDE", "CREATE", "DELEGATECALL"]:
# Check if there is a block dependency by analyzing previous branch expression
for expression in previous_branch:
if "blockhash" in str(expression) or \
"coinbase" in str(expression) or \
"timestamp" in str(expression) or \
"number" in str(expression) or \
"difficulty" in str(expression) or \
"gaslimit" in str(expression):
self.block_dependency = True
# Check if block related information flows into condition
elif current_instruction and current_instruction["op"] in ["LT", "GT", "SLT", "SGT", "EQ"]:
if tainted_record and tainted_record.stack:
if tainted_record.stack[-1]:
for expression in tainted_record.stack[-1]:
if "blockhash" in str(expression) or \
"coinbase" in str(expression) or \
"timestamp" in str(expression) or \
"number" in str(expression) or \
"difficulty" in str(expression) or \
"gaslimit" in str(expression):
self.block_dependency = True
if tainted_record.stack[-2]:
for expression in tainted_record.stack[-2]:
if "blockhash" in str(expression) or \
"coinbase" in str(expression) or \
"timestamp" in str(expression) or \
"number" in str(expression) or \
"difficulty" in str(expression) or \
"gaslimit" in str(expression):
self.block_dependency = True
# Register block related information
elif current_instruction["op"] in ["BLOCKHASH", "COINBASE", "TIMESTAMP", "NUMBER", "DIFFICULTY", "GASLIMIT"]:
self.block_instruction = current_instruction["pc"]
# Check if execution stops withour exception
if self.block_dependency and current_instruction["op"] in ["STOP", "SELFDESTRUCT", "RETURN"]:
return self.block_instruction
return None
|
#EXAME A ENTREGAR
import random
"""
Una linea de ropa deportiva cuenta con N productos para la venta, cuenta con un listado donde se visualiza para
cada mes del año pasado la cantidad total de ventas por producto.
El archivo adjunto presenta un ejemplo.
Se solicita creando al menos una función para cada informe:
1. Crear una matriz para representar las ventas de cada producto por mes,
creando al azar las cantidades vendidas de N productos para el primer semestre (Enero-Junio).
Considerando que la cantidad máxima de ventas es de 1500 unidades y N se obtiene por teclado cuidando
que sea positivo. (25%)"""
"""
2. Cuál fue el mes que realizó la menor cantidad de ventas, dentro de ese mes cuál es el producto que menos
se vendió.
(En lo posible mostrar el nombre del mes utilizando una lista de los nombres de los meses.) (25%)
3. Determinar si las ventas totales van en aumento continuo en el primer trimestre para todos los productos.
En caso afirmativo normalizar ese producto. ( crearlo en una lista aparte)(25%)
4. Crear una lista con las cantidades vendidas por cada mes comenzando por los meses pares y a
continuacion los meses impares.
Filtrar las cantidades que son menores al promedio de ventas. (25%)
"""
mes=['enero','febrero','marzo','abril','mayo','junio','julio','agosto','septiembre','octubre','noviembre','diciembre']
semestre1=['enero','febrero','marzo','abril','mayo','junio']
def ventas_sem1(n):
filas=n
columnas=6
matriz=[[0]*columnas for i in range(filas)]
for f in range (len(matriz)):
for c in range (len(matriz[0])):
x=random.randint(1000,1500)
matriz[f][c]=x
return matriz
def ingresar_positivo():
n=int(input("Ingresar num positivo :"))
while n<1:
print("error num erroneo.")
n=int(input("ingresar num positivo :"))
return n
def mostrar_matriz(matriz):
filas=len(matriz)
columnas=len(matriz[0])
for f in range(filas):
for c in range(columnas):
print("%3d"%matriz[f][c],end=" ")
print()
def menor_vendido(matriz):
lista_menor=[]
menor=matriz[0][0]
for f in range(len(matriz)):
for c in range(len(matriz[0])):
if matriz[f][c]<menor:
menor=matriz[f][c]
lista_menor.clear()
lista_menor=["producto:",f, "mes:",c]
return menor,lista_menor
def normalizar_ventas_crecientes(matriz):
aumen = 0
productos = []
columna = len(matriz[0])
for f in range(len(matriz)):
for c in range(columna):
if c+1 <= ((columna //2)-1) and matriz[f][c] < matriz[f][c+1]:
aumen += 1
if aumen == 3:
return productos
suma=sum(lista)
lista2=[]
for i in lista:
lista2.append(i/suma)
return lista2
def cant_vendidas_pi(matriz):
lista=[]
par=[]
impar=[]
suma=0
for c in range(len(matriz[0])):
suma=0
for f in range(len(matriz)):
suma =suma + matriz[f][c]
if c%2 == 0:
par.append(suma)
else:
impar.append(suma)
for x in (par):
lista.append(x)
for j in (impar):
lista.append(j)
return lista
def promedio(lista):
suma=sum(lista)
p=suma/len(lista)
return p
def main():
#1
n=ingresar_positivo()
ej1=ventas_sem1(n) #matriz
mostrar_matriz(ej1)
#2
ej2=menor_vendido(ej1)
print("cantidad de veces vendidas:")
print(ej2)
#3
ej3=normalizar_ventas_crecientes(ej1)
print(ej3)
#4
ej4=cant_vendidas_pi(ej1)
print(ej4)
prom=promedio(ej4)
print(prom)
filtrado=list(filter(lambda x : x > prom, ej4))
print(filtrado)
if __name__=="__main__":
main()
|
import pygame
import math
pygame.init()
# "numberOfImages" is the number of images used in the animation
# "timeForAnimation" is the time for the entire sequence to play
class Explosion(pygame.sprite.Sprite):
def __init__(self, name, loc, numberOfImages, timeForAnimation, frameRate):
pygame.sprite.Sprite.__init__(self, self.groups)
self.x = int(loc[0])
self.y = int(loc[1])
self.startTime = timeForAnimation * frameRate
self.counter = self.startTime
self.image_increment = self.startTime / numberOfImages
self.name = name
return
def update(self, size): # Size is only used for the function call similarity with the ship and asteroid class
self.counter -= 1
return
def get_location(self):
return (int(self.x), int(self.y))
def get_image_number(self):
return int(self.counter / self.image_increment) + 1
def get_counter(self):
return self.counter
def get_name(self):
return self.name |
from propsim_error import *
from config_error import *
from parser_error import *
from json_error import *
from script_error import *
from json_not_found_error import *
from json_malformated_error import *
from action_malformated_error import *
from interaction_malformated_error import *
from invalid_identifier_error import *
from missing_attribute_error import *
from terrain_not_found import *
from script_not_found import *
|
"""
Class to analyze categorized bare DNA
"""
import copy
import numpy as np
import analysis_functions as analysis
import tip_shape_estimation
class BareDNA:
def __init__(self, afm_molecule, decon):
# Copy the variables, otherwise they are also changed in the AFMMolecule instances
self.mol_original = copy.deepcopy(afm_molecule.mol_original)
self.mol_filtered = copy.deepcopy(afm_molecule.mol_filtered)
self.anal_pars = copy.deepcopy(afm_molecule.anal_pars)
self.img_meta_data = copy.deepcopy(afm_molecule.img_meta_data)
self.mol_pars = copy.deepcopy(afm_molecule.mol_pars)
# Improve the skeleton by skeletonizing the ends and sorting it
self.improve_skel()
# Apply Wiggin's algorithm and store the calculated values in the results dict
self.results = {}
self.results.update({'position_row': self.mol_pars['mol_bbox'][0],
'position_col': self.mol_pars['mol_bbox'][1]})
if decon['tip_shape'] is None:
self.calculate_length(self.mol_filtered)
elif decon['tip_shape'] is not None:
self.results.update({'tip_shape': np.array2string(decon['tip_shape']['tip_shape_arr'],
formatter={'float_kind': '{0:.3f}'.format}),
'tip_excentricity': decon['tip_shape']['tip_excentricity']})
self.mol_filtered_decon = tip_shape_estimation.decon_mol(self.mol_filtered, decon['tip_shape'])
self.calculate_length(self.mol_filtered_decon)
self.results.update(analysis.radius_of_gyration(self.mol_filtered, self.img_meta_data['pixel_size']))
self.further_analysis()
self.angles()
def improve_skel(self):
mol_filtered = copy.deepcopy(self.mol_filtered)
mol_pars = copy.deepcopy(self.mol_pars)
# Sort the skeleton by using the sort_skel function
mol_pars['skel_sorted'] = analysis.sort_skel(mol_pars, start=mol_pars['skel_eps_pixels'][0, :])
# Skeletonize the first end
mol_pars['mol_skel'] = analysis.skeletonize_end(mol_filtered, mol_pars, mol_pars['skel_sorted'][0:4])
# Skeletonize the second end
mol_pars['mol_skel'] = analysis.skeletonize_end(mol_filtered, mol_pars, mol_pars['skel_sorted'][:-5:-1])
# Recalculate skeleton parameters
self.mol_pars.update(analysis.skel_pars(mol_pars['mol_skel']))
# Sort the skeleton with the new endpoints
mol_pars['skel_sorted'] = analysis.sort_skel(mol_pars, start=self.mol_pars['skel_eps_pixels'][0, :])
# Update the remaining parameters
self.mol_pars.update({'skel_sorted': mol_pars['skel_sorted'],
'mol_skel': mol_pars['mol_skel']})
return self
def calculate_length(self, mol_filtered):
""" Use Wiggin's algorithm to calculate the DNA lengths """
mol_filtered = copy.deepcopy(mol_filtered)
mol_pars = copy.deepcopy(self.mol_pars)
pixel_size = self.img_meta_data['pixel_size']
seg_length = 5/pixel_size
# Calculate the length starting at the top-leftmost endpoint
wigg_fwd, failed_fwd = analysis.wiggins(mol_filtered, seg_length=seg_length,
start=mol_pars['skel_sorted'][:4], end=mol_pars['skel_sorted'][-1],
mol_type='Bare DNA')
if failed_fwd is False:
wigg_fwd = np.asarray(wigg_fwd)
length_fwd = np.sum(np.linalg.norm(wigg_fwd[:-1] - wigg_fwd[1:], axis=1)) * pixel_size
else:
length_fwd = False
# Calculate the length starting at the bottom-rightmost endpoint
wigg_bwd, failed_bwd = analysis.wiggins(mol_filtered, seg_length=seg_length,
start=mol_pars['skel_sorted'][:-5:-1], end=mol_pars['skel_sorted'][0],
mol_type='Bare DNA')
if failed_bwd is False:
wigg_bwd = np.asarray(wigg_bwd)
length_bwd = np.sum(np.linalg.norm(wigg_bwd[:-1] - wigg_bwd[1:], axis=1)) * pixel_size
else:
length_bwd = False
if failed_fwd is False and failed_bwd is False:
length_avg = (length_fwd + length_bwd) / 2
length_etoe = np.linalg.norm(wigg_fwd[0] - wigg_fwd[-1]) * pixel_size
elif failed_fwd is False and failed_bwd is True:
length_avg = length_fwd
length_etoe = np.linalg.norm(wigg_fwd[0] - wigg_fwd[-1]) * pixel_size
elif failed_fwd is True and failed_bwd is False:
length_avg = length_bwd
length_etoe = np.linalg.norm(wigg_bwd[0] - wigg_bwd[-1]) * pixel_size
else:
length_avg = False
length_etoe = False
self.results.update({'failed_reason': 'Wiggins failed'})
self.results.update({'wigg_fwd': wigg_fwd,
'wigg_bwd': wigg_bwd,
'length_fwd': length_fwd,
'length_bwd': length_bwd,
'length_avg': length_avg,
'length_etoe': length_etoe,
'failed': True if failed_bwd is True and failed_fwd is True else False})
if failed_fwd is False and failed_bwd is False:
if abs(length_fwd - length_bwd) >= 0.05 * self.results['length_avg']:
self.results.update({'length_avg': False,
'failed': True,
'failed_reason': 'Back-Forth difference'})
return self
def length_filter(self):
dna_bp = self.anal_pars['dna_length_bp']
if self.results['length_fwd'] <= 0.80*dna_bp*0.33 or self.results['length_bwd'] <= 0.80*dna_bp*0.33:
self.results.update({'length_avg': False,
'failed_reason': 'Too short',
'failed': True})
elif self.results['length_fwd'] >= 1.25*dna_bp*0.33 or self.results['length_bwd'] >= 1.25*dna_bp*0.33:
self.results.update({'length_avg': False,
'failed_reason': 'Too long',
'failed': True})
return self
def further_analysis(self):
""" Check the height values along the Wiggins pixels and the slope between individual pixels """
pixel_size = self.img_meta_data['pixel_size']
mol_bbox = self.mol_pars['mol_bbox']
if self.results['failed'] is False:
if self.results['length_fwd'] is not False:
height_pars = analysis.wiggins_pixel_height_analysis(self.results['wigg_fwd'],
self.mol_filtered, pixel_size)
orientation_pars = analysis.dna_orientation(self.results['wigg_fwd'], mol_bbox)
elif self.results['length_bwd'] is not False:
height_pars = analysis.wiggins_pixel_height_analysis(self.results['wigg_bwd'],
self.mol_filtered, pixel_size)
orientation_pars = analysis.dna_orientation(self.results['wigg_fwd'], mol_bbox)
self.results.update(height_pars)
self.results.update(orientation_pars)
return self
def angles(self):
try:
if self.results['failed'] is False:
if self.results['wigg_fwd'] is not False:
wigg_pixels = np.asarray(self.results['wigg_fwd'])
else:
wigg_pixels = np.asarray(self.results['wigg_bwd'])
# Angles between individual segments
vecs_1 = wigg_pixels[2:-1] - wigg_pixels[1:-2]
angles_1 = np.asarray([analysis.angle_between(v1, v2) for v1, v2 in zip(vecs_1[1:], vecs_1[:-1])])
# Angles between two consecutive segments
vecs_2 = wigg_pixels[3:-1] - wigg_pixels[1:-3]
angles_2 = np.asarray([analysis.angle_between(v1, v2) for v1, v2 in zip(vecs_2[2:], vecs_2[:-2])])
# Angles between three consecutive segments
vecs_3 = wigg_pixels[4:-1] - wigg_pixels[1:-4]
angles_3 = np.asarray([analysis.angle_between(v1, v2) for v1, v2 in zip(vecs_3[3:], vecs_3[:-3])])
# Angles between four consecutive segments
vecs_4 = wigg_pixels[5:-1] - wigg_pixels[1:-5]
angles_4 = np.asarray([analysis.angle_between(v1, v2) for v1, v2 in zip(vecs_4[4:], vecs_4[:-4])])
dna_angles_dict = {'z_angles_1': np.array2string(angles_1, formatter={'float_kind': '{0:.3f}'.format}),
'z_angles_2': np.array2string(angles_2, formatter={'float_kind': '{0:.3f}'.format}),
'z_angles_3': np.array2string(angles_3, formatter={'float_kind': '{0:.3f}'.format}),
'z_angles_4': np.array2string(angles_4, formatter={'float_kind': '{0:.3f}'.format})}
self.results.update(dna_angles_dict)
except:
self.results['failed'] = True
self.results['failed_reason'] = 'Angle measurement'
return self
|
from django.db import models
from django.contrib.auth.models import User
class Customer(models.Model):
name = models.CharField(blank=False, max_length=120, unique=True)
phone_number = models.CharField(max_length=10, unique=True)
def __unicode__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=120)
price = models.IntegerField(default=0, unique=False)
image = models.ImageField(upload_to ='products/',blank=True)
def __str__(self):
return self.name
class Order(models.Model):
customer = models.ForeignKey(Customer, related_name='orders', null=True, blank=True,on_delete=models.CASCADE)
product = models.ForeignKey(Product, related_name='orders', null=True, blank=True,on_delete=models.CASCADE,)
order_date = models.DateField(auto_now=True)
quantity = models.IntegerField() |
from flask import request,render_template, redirect,session
from app import app
from dbutil import DB
import json
@app.route('/server')
def server():
if not session.get('username',None):
return redirect("/login")
return render_template('server/server.html',info = session)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Stock',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('stock_id', models.CharField(max_length=10)),
('time', models.DateTimeField(default=datetime.datetime(2016, 12, 28, 15, 43, 56, 552462))),
('end_price', models.CharField(max_length=10)),
('buy_price', models.CharField(max_length=10)),
('sell_price', models.CharField(max_length=10)),
('total_num', models.CharField(max_length=10)),
('yesterday_end', models.CharField(max_length=10)),
('start_price', models.CharField(max_length=10)),
('high_price', models.CharField(max_length=10)),
('low_price', models.CharField(max_length=10)),
],
),
]
|
# Entradas
# T: casos de prueba
# L: long de la bandera
import math
T=int(input())
for i in range(T):
L=int(input())
H=3*L/5
R=L/5
Ar=(R**2)*math.acos(-1)
Av=(L*H)-Ar
print('{:0.2f} {:0.2f}'.format(Ar, Av)) |
"""
Simple pipeline for ML projects
Author: Quinn Underriner
"""
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
import seaborn as sns
sns.set()
import sklearn
from sklearn import linear_model
from sklearn.metrics import accuracy_score, roc_auc_score, confusion_matrix, precision_recall_curve
from sklearn import metrics
from sklearn import tree
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import RandomizedSearchCV, train_test_split
from sklearn import svm
from sklearn.linear_model import LassoCV
from sklearn.linear_model import RidgeCV
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score, roc_auc_score, precision_recall_fscore_support, precision_recall_curve
from sklearn import ensemble
from sklearn import neighbors
from sklearn.grid_search import ParameterGrid
from datetime import timedelta
import matplotlib.pyplot as plt
from scipy import optimize
#dictionary of models and parameters - inspiration from
#https://github.com/rayidghani/magicloops/blob/master/simpleloop.py
MODELS = {
'decision_tree': tree.DecisionTreeClassifier(),
'logistic_regression': linear_model.LogisticRegression(),
'knn': neighbors.KNeighborsClassifier(),
'random_forest': ensemble.RandomForestClassifier(),
'support_vector_machine': svm.SVC(),
'boosting': ensemble.AdaBoostClassifier(),
'bagging': ensemble.BaggingClassifier()
}
PARAMS = {
'decision_tree': {'max_depth': [1, 3, 5, 8, 20]},
'logistic_regression': {'C': [0.001,0.01,0.1,1,10]},
'knn': {'n_neighbors': [5, 10, 25] },
'random_forest': {'n_estimators': [1, 2, 3,4, 10]},
'support_vector_machine': {'c_values': [10**-2, 10**-1, 1 , 10, 10**2]},
'boosting': {'n_estimators': [100, 50, 30]},
'bagging': {'n_estimators': [2, 10, 20]}
}
ID_COLS = [
'EVALUATION_START_DATE', 'predvar', "FOUND_VIOLATION"]
def load_data(filename):
"""
This function loads the dataset from a CSV
"""
df = pd.read_csv(filename)
return df
"""
functions for exploration
"""
def percentage_calc(df, col_name, value):
"""
Prints percent of data over a certain threshold.
inputs:
df: dataframe
col_name (str): column to do this for.
value (int): threshold to go over
"""
print(len(df[df[col_name]>value]) / len(df))
def describe_cols(data):
colz = list(data.columns)
for i in colz:
print(data[i].describe())
"""
functions for preprocessing
"""
def impute_median(df, col):
"""
This function takes null values in a column and fills it either with the median value
for the column or the mean, depending on the input
inputs
"""
df[col] = df[col].fillna(df[col].median())
return df
def impute_mean(df, col):
"""
This function takes null values in a column and fills it either with the median value
for the column or the mean, depending on the input
inputs
"""
df[col] = df[col].fillna(df[col].mean())
return df
"""
feature generation
"""
def discretize(df, colname):
"""
This function discretizes a continuous variable (using quartiles)
Inputs:
df (dataframe)
colname (str) name of column to discretize
"""
df[colname] = pd.qcut(df[colname], 4, labels=[1, 2, 3, 4])
return df
def dummy(df, colname):
"""
Takes a categorical variable and creates binary/dummy variables
Inputs:
df (dataframe)
colname (str) name of column to make dummys
"""
dummies = pd.get_dummies(df[colname]).rename(columns=lambda x: colname + "_" + str(x))
df = pd.concat([df, dummies], axis=1)
df = df.drop([colname], axis=1)
return df
def get_xy(df, response, features):
"""
Create data arrays for the X and Y values needed to be plugged into the model
Inputs:
df (dataframe) - the dataframe
response (str - the y value for the model
features (list of strings) - the x values for the model
"""
y = df[response].to_numpy()
df = df.reindex(columns=features)
X = df[features].to_numpy()
return X, y
def choose_features(data, response, features):
"""
Pick best features from features list
"""
X,y = get_xy(data, response, features)
sel = sklearn.feature_selection.SelectFromModel(sklearn.ensemble.RandomForestClassifier(n_estimators = 100))
sel.fit(X, y)
chosen_features = data[features].columns[sel.get_support()]
return chosen_features
###
###new more modular classification
def temporal_train_test_split(df, date_col, freq='6MS', gap_days=60):
"""
produce six month interval splits of data
inputs:
df: dataframe
date_col: column that has dates
returns:
list of dates
"""
min_date = df[date_col].min()
max_date = df[date_col].max()
dates = pd.date_range(start=min_date, end=max_date, freq=freq)[1:]
train_start = min_date
splits = []
for i, d in enumerate(dates[:-1]):
splits.append([[train_start, d], [d+timedelta(days=gap_days), dates[i+1]+timedelta(days=gap_days)]])
splits.append([[train_start, dates[-1]], [dates[-1]+timedelta(days=gap_days), max_date]])
return splits
#currently last append will add dates i cdont have
def split_df_by_time(df, date_col, train, test):
"""
create training/testing splits of data, returns df
"""
train_df = df[(df[date_col]>=train[0])&(df[date_col]<train[1])]
test_df = df[(df[date_col]>=test[0])&(df[date_col]<=test[1])]
return train_df, test_df
def split_data_by_time(df, date_col, train, test, response, features):
"""
create training/testing splits of data, returns matrix
"""
train_df = df[(df[date_col]>=train[0])&(df[date_col]<train[1])]
test_df = df[(df[date_col]>=test[0])&(df[date_col]<=test[1])]
X_train, y_train = get_xy(train_df, response, features)
X_test, y_test = get_xy(test_df, response, features)
return X_train, X_test, y_train, y_test
#the three fucntions below here were taken from https://github.com/dssg/
#MLforPublicPolicy/blob/master/labs/2019/lab3_lr_svm_eval_sol.ipynb
def calculate_precision_at_threshold(predicted_scores, true_labels, threshold):
"""
calculatesd recall score
inputs:
predicted_scores
true_labels
threshold
"""
pred_label = [1 if x > threshold else 0 for x in predicted_scores]
_, false_positive, _, true_positives = confusion_matrix(true_labels, pred_label).ravel()
return 1.0 * true_positives / (false_positive + true_positives)
def calculate_recall_at_threshold(predicted_scores, true_labels, threshold):
"""
calculatesd recall score
inputs:
predicted_scores
true_labels
threshold
"""
pred_label = [1 if x > threshold else 0 for x in predicted_scores]
_, _, false_negatives, true_positives = confusion_matrix(true_labels, pred_label).ravel()
return 1.0 * true_positives / (false_negatives + true_positives)
def plot_precision_recall_k(predicted_scores, true_labels):
"""
plots precision/recall curve
inputs:
predicted_scores
true_labels
"""
precision, recall, thresholds = precision_recall_curve(true_labels, predicted_scores)
plt.plot(recall, precision, marker='.')
plt.show()
def calculate_precision_at_threshold_multi(predicted_scores, true_labels, thresholds):
"""
calculatesd precision score for multiple thresholds
inputs:
predicted_scores
true_labels
"""
z = []
for i in thresholds:
z.append(calculate_precision_at_threshold(predicted_scores, true_labels, i))
return z
def calculate_recall_at_threshold_multi(predicted_scores, true_labels, thresholds):
"""
calculatesd recall score for multiple thresholds
inputs:
predicted_scores
true_labels
"""
z = []
for i in thresholds:
z.append(calculate_recall_at_threshold(predicted_scores, true_labels, i))
return z
def trim_dummies(df, dummies_to_trim, k):
for col in dummies_to_trim:
topk = list(df[col].value_counts()[:k].reset_index()['index'])
df.loc[:, col] = df[col].apply(lambda x: x if x in topk else 'other')
return df
def process(data, dummy_list=None, discrete_list=None, impute_median_list=None, impute_mean_list=None, k=30):
"""
discretize data, make dummies, impute mean and median
"""
for i in impute_median_list:
data = impute_median(data, i)
for i in impute_mean_list:
data = impute_mean(data, i)
data = data.dropna()
d = data[dummy_list].nunique().reset_index()
d.columns = ['col', 'cnt']
dummies_to_trim = list(d[d.cnt>k]['col'])
data = trim_dummies(data, dummies_to_trim, k)
for i in dummy_list:
data = dummy(data, i)
for j in discrete_list:
data = discretize(data, j)
return data
def preprocess(df, drop_list):
"""
drop values that wont be needed for prediction. Drop data before 1990.
Turn EVALUATION_START_DATE to datetime object and make a predvar variable based on
whether a violoation was found or not.
"""
if len(drop_list) > 0:
df = df.drop(drop_list, axis=1)
df = df.dropna() #get rid of this later
df["EVALUATION_START_DATE"] = pd.to_datetime(df["EVALUATION_START_DATE"], errors = 'coerce')
df['FOUND_VIOLATION'] = df['FOUND_VIOLATION'].str.strip()
df['predvar'] = np.where(df['FOUND_VIOLATION'] == "Y", 1, 0)
df = df[df.EVALUATION_START_DATE.apply(lambda x: x.year)>=1990]
return df
#df[colname] = (df[colname]).astype(int)
#inspiration for below from https://github.com/rayidghani/magicloops/blob/master/simpleloop.py
def run_the_models(data, models_to_run, date_col, response, features, dummy_list, discrete_list, impute_median_list, impute_mean_list):
"""
This runs models and produces evaluation output:
inputs:
data: dataframe with data
models_to_run: list of models to run
response: column name of y variable
features: list of column names for model features
dummy_list: list of columns 2 dummy
discrete_list: list of columns 2 discretize
impute_median_list: list of columns 2 impute the median of the column
impute_mean_list: list of columns 2 impute the mean of the column
returns:
dataframe
"""
thresholds = [0.01, 0.02, 0.05, 0.10, 0.20, 0.30, 0.50]
precision_cols = ["precision_at_{}".format(str(x)) for x in thresholds]
recall_cols = ["recall_at_{}".format(str(x)) for x in thresholds]
cols = ['model',
'parameters',
'train_start',
'train_end',
'test_start',
'test_end',
'f1_score',
'auc'] + precision_cols + recall_cols
model_results = []
# feature selection
processed_data = process(data, dummy_list, discrete_list, impute_median_list, impute_mean_list)
features = [x for x in processed_data.columns if x not in ID_COLS]
features = choose_features(processed_data, response, features)
splits = temporal_train_test_split(data, date_col, freq='6M')
for train, test in splits:
train_df, test_df = split_df_by_time(data, date_col, train, test)
train_df = process(train_df, dummy_list, discrete_list, impute_median_list, impute_mean_list)
test_df = process(test_df, dummy_list, discrete_list, impute_median_list, impute_mean_list)
X_train, y_train = get_xy(train_df, response, features)
X_test, y_test = get_xy(test_df, response, features)
for m in models_to_run:
if m not in MODELS:
print(m, 'bad model')
break
clf = MODELS[m]
parameter_grid = ParameterGrid(PARAMS[m])
for p in parameter_grid:
try:
# initialize list to keep track of results
res = [m, p, train[0], train[1], test[0], test[1]]
clf.set_params(**p)
clf.fit(X_train, y_train)
predicted_scores = clf.predict_proba(X_test)[:,1]
predicted_vals = clf.predict(X_test)
true_labels = y_test
precise = calculate_precision_at_threshold_multi(predicted_scores, true_labels, thresholds)
recall = calculate_recall_at_threshold_multi(predicted_scores, true_labels, thresholds)
auc = sklearn.metrics.roc_auc_score(true_labels, predicted_vals)
f1 = sklearn.metrics.f1_score(true_labels, predicted_vals)
# append metrics to list
res = res + [auc, f1] + precise + recall
model_results.append(res)
plot_precision_recall_n(true_labels, predicted_vals, m)
except Exception as e:
print(e, m, p)
df = pd.DataFrame(model_results, columns = cols)
return df
def choose_model(df, metric_col, asc=False):
"""
picks best model and parameters
df
metric_col - eg f1
"""
df['parameters'] = df.parameters.astype(str)
mean_by_group = df.groupby(['model', 'parameters']).mean().reset_index()
best_params = mean_by_group.sort_values(metric_col, ascending=asc).drop_duplicates('model')
best_model = best_params.iloc[0][['model', 'parameters']]
return best_params, best_model
def plot_precision_recall_n(y_true, y_prob, model_name):
"""
makes precision recals curves for given true and prediction values of y
inputs:
y_true: true values of y
y_prob: values of y predicted by our model
model_name: name of our model
"""
y_score = y_prob
precision_curve, recall_curve, pr_thresholds = precision_recall_curve(y_true, y_score)
precision_curve = precision_curve[:-1]
recall_curve = recall_curve[:-1]
pct_above_per_thresh = []
number_scored = len(y_score)
for value in pr_thresholds:
num_above_thresh = len(y_score[y_score>=value])
pct_above_thresh = num_above_thresh / float(number_scored)
pct_above_per_thresh.append(pct_above_thresh)
pct_above_per_thresh = np.array(pct_above_per_thresh)
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, precision_curve, 'b')
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
ax1.set_ylim([0,1])
ax1.set_ylim([0,1])
ax2.set_xlim([0,1])
name = model_name
plt.title(name)
plt.show()
|
from SqString import SqString
def BF(s,t):
i,j=0,0
while i<s.getsize() and j<t.getsize(): #两串未遍历完时循环
if s[i]==t[j]: #继续匹配下一个字符
i,j=i+1,j+1 #目标串和模式串依次匹配下一个字符
else: #目标串、模式串指针回溯重新开始下一次匹配
i,j=i-j+1,0 #目标串从下一个位置开始匹配
if j>=t.getsize():
return (i-t.getsize()) #返回匹配的第一个字符序号
else:
return (-1) #模式匹配不成功
if __name__ == '__main__':
cstr1="aaaaab"
s=SqString()
s.StrAssign(cstr1)
print("s: ",end='');s.DispStr()
cstr2="aaab"
t=SqString()
t.StrAssign(cstr2)
print("t: ",end='');t.DispStr()
print("BF: %d" %(BF(s,t)))
|
import sys
import argparse
import data_parser
import numpy as np
from sklearn import metrics
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
def main(args):
def purity_score(y_true, y_pred):
# compute contingency matrix (also called confusion matrix)
contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)
# return purity
print('sum')
print(contingency_matrix)
print('amax')
print(np.amax(contingency_matrix, axis=0))
return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)
def f1_score(y_true, y_pred, max_clusters):
y_true_c = []
y_pred_c = []
sum = 0
for i in range(max_clusters):
for j in range(len(y_true)):
if (i==y_pred[j]):
y_pred_c.append(y_pred[j])
y_true_c.append(y_true[j])
counts = np.bincount(y_true_c)
y_pred_c = [np.argmax(counts)] * len(y_pred_c)
sum += metrics.f1_score(y_true_c, y_pred_c)
print('y_pred_c')
print(y_pred_c)
print('y_true_c')
print(y_true_c)
print('inside f1')
print (metrics.f1_score(y_true_c, y_pred_c))
y_true_c = []
y_pred_c = []
return sum
def kMNS():
for n_clusters in range_n_clusters:
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
# 10 Number of times the k-means algorithm will be run with different centroid seeds.
# The final results will be the best output of 10 consecutive runs in terms of inertia.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(data)
#print('purity')
#print (purity_score(target, cluster_labels))
print('f1')
print (f1_score(target, cluster_labels, n_clusters))
def SPC():
#for n_clusters in range_n_clusters:
clusterer = SpectralClustering(n_clusters=2, assign_labels="discretize", random_state=0)
cluster_labels = clusterer.fit_predict(data[:100])
print(cluster_labels)
def AMC():
for n_clusters in range_n_clusters:
clusterer = AgglomerativeClustering(n_clusters=2)
cluster_labels = clusterer.fit_predict(data[:100])
range_n_clusters = [2, 4, 8]
options = {'occupancy1' : data_parser.load_occupancy_data1,
'occupancy2' : data_parser.load_occupancy_data2,
'occupancy3' : data_parser.load_occupancy_data3,
'spambase' : data_parser.load_spambase
}
header, data, target = options[args.dataset]()
if len(sys.argv)==2:
kMNS()
SPC()
AMC()
else:
options = {'kmeans' : kMNS,
'spectral' : SPC,
'agglomerative' : AMC
}
options[args.method]()
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str,
help='which dataset to be loaded, options: for datatest.txt type \'occupancy1\', for datatest2.txt type \'occupancy2\', for datatraining.txt type \'occupancy3\', or type \'spambase\'')
parser.add_argument('--method', type=str,
help='which method to be executed, options: \'kmeans\', \'spectral\', \'agglomerative\'', default='')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:])) |
p = [
{
"Name": "Huy",
"Hours": 30,
"MPH":50,
},
{
"Name": "Quan",
"Hours":20,
"MPH": 40,
},
{"Name": "Duc",
"Hours": 15,
"MPH": 35,
}
]
print("Numbers of hours of each person:")
for q in p:
print(q["Name"], q["Hours"], sep = ": ")
print()
print("Wage of each person (thousands VND):")
for q in p:
q["MW"] = q["Hours"]*q["MPH"]
print(q["Name"], q["MW"], sep =": ")
print()
ws = 0
print("Wage sum (thousands VND): ")
for q in p:
ws = ws + i["MW"]
print(ws) |
import findspark
from pyspark import SparkContext, SparkConf
from common.Utils import Utils
findspark.init(python_path='/Users/khwu/.virtualenvs/spark/bin/python3')
def split_comma(line: str):
splits = Utils.COMMA_DELIMITER.split(line)
return (splits[2], splits[3])
if __name__ == "__main__":
conf = SparkConf().setAppName('create').setMaster('local[*]')
sc = SparkContext(conf=conf)
sc.setLogLevel('ERROR')
input = ["Lily 23", "Jack 29", "Mary 29", "James 8"]
test = sc.parallelize(input) \
.map(lambda line: (line.split(' ')[0], line.split(' ')[1])) \
.coalesce(1) \
.saveAsTextFile('../../out/rdd_to_pair_rdd.text') |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import timezone
from django.db import models
class KernelChangeManager(models.Manager):
def get_queryset(self):
return super(KernelChangeManager, self).get_queryset().filter(reported=False).order_by('branch', 'trigger_name', '-trigger_number')
class KernelChange(models.Model):
branch = models.CharField(max_length=255)
describe = models.CharField(max_length=255)
reported = models.BooleanField(default=False)
trigger_name = models.CharField(max_length=255)
trigger_number = models.IntegerField(default=0)
# TRIGGER_BUILD_COMPLETED
# CI_BUILDS_IN_QUEUE / CI_BUILDS_NOT_REPORTED / CI_BUILDS_IN_PROGRESS / CI_BUILDS_COMPLETED
# HAS_QA_PROJECT_NOT_FOUND / HAS_QA_BUILD_NOT_FOUND / HAS_JOBS_NOT_SUBMITTED / HAS_JOBS_IN_PROGRESS
# ALL_COMPLETED
result = models.CharField(max_length=100, null=True, default="NOINFO")
timestamp = models.DateTimeField(null=True, default=timezone.now)
duration = models.IntegerField(default=0) # total_seconds
number_passed = models.IntegerField(default=0)
number_failed = models.IntegerField(default=0)
number_assumption_failure = models.IntegerField(default=0)
number_ignored = models.IntegerField(default=0)
number_total = models.IntegerField(default=0)
modules_done = models.IntegerField(default=0)
modules_total = models.IntegerField(default=0)
jobs_finished = models.IntegerField(default=0)
jobs_total = models.IntegerField(default=0)
def __str__(self):
return "%s-%s" % (self.branch, self.describe)
def __unicode__(self):
return "%s-%s" % (self.branch, self.describe)
objects = models.Manager() # The default manager
objects_needs_report = KernelChangeManager() # custom managerKernelChangeManager()
class CiBuildKernelChangeManager(models.Manager):
def get_builds_per_kernel_change(self, kernel_change=None):
return super(CiBuildKernelChangeManager, self).get_queryset().filter(kernel_change=kernel_change)
class CiBuild(models.Model):
name = models.CharField(max_length=255)
number = models.IntegerField()
kernel_change = models.ForeignKey(KernelChange, null=True, on_delete=models.CASCADE)
timestamp = models.DateTimeField(null=True, default=timezone.now)
duration = models.IntegerField(default=0) # total_seconds
# CI_BUILD_DELETED / INPROGRESS / SUCCESS / FAILURE / ABORTED
result = models.CharField(max_length=100, null=True, default="NOINFO")
display_name = models.CharField(max_length=255, null=True)
changes_num = models.IntegerField(default=0)
def __str__(self):
return "%s#%s" % (self.name, self.number)
def __unicode__(self):
return "%s#%s" % (self.name, self.number)
objects = models.Manager()
objects_kernel_change = CiBuildKernelChangeManager()
class ReportProject(models.Model):
# the group that this build belongs to
group = models.CharField(max_length=100)
# the name of the qareport project
name = models.CharField(max_length=100)
# the slug of the qareport project
slug = models.CharField(max_length=100)
project_id = models.IntegerField(default=0)
is_public = models.BooleanField(default=True)
is_archived = models.BooleanField(default=True)
class Meta:
permissions = (
("view_eap_projects", "Can see available eap projects"),
("view_benchmark_projects", "Can see available benchmark projects"),
("admin_projects", "Can do operations on the project, like job resubmission, job cancellation, build cancellation"),
)
def __str__(self):
return "%s#%s" % (self.group, self.name)
def __unicode__(self):
return "%s#%s" % (self.group, self.name)
objects = models.Manager()
class ReportBuild(models.Model):
# the version of the qareport build
version = models.CharField(max_length=100)
qa_project = models.ForeignKey(ReportProject, on_delete=models.CASCADE, null=True)
kernel_change = models.ForeignKey(KernelChange, on_delete=models.CASCADE, null=True)
ci_build = models.ForeignKey(CiBuild, on_delete=models.CASCADE, related_name="ci_build", null=True)
ci_trigger_build = models.ForeignKey(CiBuild, on_delete=models.CASCADE, related_name='trigger_build', null=True)
finished = models.BooleanField(default=False)
# JOBSNOTSUBMITTED / JOBSINPROGRESS / JOBSCOMPLETED
status = models.CharField(max_length=100, null=True, default="NOINFO")
number_passed = models.IntegerField(default=0)
number_failed = models.IntegerField(default=0)
number_assumption_failure = models.IntegerField(default=0)
number_ignored = models.IntegerField(default=0)
number_total = models.IntegerField(default=0)
modules_done = models.IntegerField(default=0)
modules_total = models.IntegerField(default=0)
jobs_finished = models.IntegerField(default=0)
jobs_total = models.IntegerField(default=0)
# the time the trigger build was started
started_at = models.DateTimeField(null=True)
# the time the last job was fetched
fetched_at = models.DateTimeField(null=True)
# The id of the qa-report build id
qa_build_id = models.IntegerField(default=0)
# the metadata url for the qa-report api, like
# https://qa-reports.linaro.org/api/builds/63239/metadata/
metadata_url = models.URLField(null=True)
def __str__(self):
return "%s#%s" % (self.qa_project, self.version)
def __unicode__(self):
return "%s#%s" % (self.qa_project, self.version)
objects = models.Manager()
class ReportJob(models.Model):
job_name = models.CharField(max_length=256)
job_url = models.URLField(null=True)
attachment_url = models.URLField(null=True, blank=True)
results_cached = models.BooleanField(default=False)
qa_job_id = models.IntegerField(default=0)
report_build = models.ForeignKey(ReportBuild, on_delete=models.CASCADE, null=True)
parent_job = models.CharField(max_length=100, null=True, blank=True)
resubmitted = models.BooleanField(default=False)
# JOBSNOTSUBMITTED / JOBSINPROGRESS / JOBSCOMPLETED
status = models.CharField(max_length=100, null=True, default="NOINFO")
failure_msg = models.TextField(null=True, blank=True)
# null=True vs blank=True: https://stackoverflow.com/questions/8609192/what-is-the-difference-between-null-true-and-blank-true-in-django
environment = models.CharField(max_length=100, blank=True)
submitted_at = models.DateTimeField(null=True, blank=True)
fetched_at = models.DateTimeField(null=True, blank=True)
number_passed = models.IntegerField(default=0)
number_failed = models.IntegerField(default=0)
number_assumption_failure = models.IntegerField(default=0)
number_ignored = models.IntegerField(default=0)
number_total = models.IntegerField(default=0)
modules_done = models.IntegerField(default=0)
modules_total = models.IntegerField(default=0)
finished_successfully = models.BooleanField(default=False)
def __str__(self):
if self.report_build:
return "%s#%s" % (self.job_name, self.report_build.version)
else:
return "%s#%s" % (self.job_name, self.job_url)
def __unicode__(self):
if self.report_build:
return "%s#%s" % (self.job_name, self.report_build.version)
else:
return "%s#%s" % (self.job_name, self.job_url)
objects = models.Manager()
class JobMeta(models.Model):
qa_job_id = models.IntegerField(default=0)
kind = models.CharField(
max_length=32,
choices=(
('ManualResubmit', 'ManualResubmit'),
),
db_index=True,
)
resubmission_reason = models.TextField(null=True, blank=True, max_length=256,)
class TestSuite(models.Model):
report_job = models.ForeignKey(ReportJob)
name = models.CharField(max_length=256, db_index=True)
abi = models.CharField(max_length=16, null=True)
done = models.BooleanField(default=False)
number_pass = models.IntegerField(default=0)
number_total = models.IntegerField(default=0)
class TestCase(models.Model):
# multiple index might be enabled later
# when the problem is not improved too much
# or get worse again when the data gets huge
# https://docs.djangoproject.com/en/3.2/ref/models/options/#indexes
name = models.CharField(max_length=320, db_index=True)
result = models.CharField(max_length=64, db_index=True)
measurement = models.DecimalField(max_digits=20, decimal_places=2, null=True)
unit = models.CharField(max_length=128, null=True)
suite = models.CharField(max_length=256, db_index=True)
job_id = models.CharField(max_length=16, db_index=True)
lava_nick = models.CharField(max_length=64, db_index=True)
# failure should be deleted when this testcase deleted
testsuite = models.ForeignKey(TestSuite, null=True)
message = models.TextField(null=True, blank=True)
stacktrace = models.TextField(null=True, blank=True)
def __unicode__(self):
if self.measurement:
return "%s %s %s %s" % (self.name, self.result, self.measurement, self.unit)
else:
return "%s %s" % (self.name, self.result)
|
from gym.envs.registration import register
register(
id='Pyclimb-v0',
entry_point='gym_climb.envs:ClimbEnv',
max_episode_steps=9999999,
)
|
import os
from funcoes_operacoes_bancarias import deposito, saque, transferencia, extrato
from validacoes import validar_menu
def tela_operacoes_bancarias(cpf, diretorio):
while(True):
os.system('cls')
print("---------------------- OPERAÇÕES BANCARIAS ----------------------\n")
print("O QUE VOCÊ DESEJA:")
print("(01) - SAQUE")
print("(02) - EXTRATO")
print("(03) - DEPÓSITO")
print("(04) - TRANFERÊNCIA")
print("(05) - VOLTAR")
resposta = validar_menu(1, 5)
if(resposta == 5):
break
controle_main(resposta, cpf, diretorio)
def controle_main(resposta, cpf, diretorio):
if (resposta == 1):
confirmacao = saque(cpf, diretorio)
if(type(confirmacao) == bool):
if(confirmacao == False):
print("...DESCULPE, ALGO DEU ERRADO...\n");
elif(confirmacao.isnumeric()):
print("...SAQUE FEITO COM SUCESSO")
os.system('pause')
elif (resposta == 2):
conteudo = extrato(cpf)
if(extrato != False):
os.system('cls')
if(len(conteudo) != 0):
print (conteudo)
else:
print("SEM HISTORICO BANCÁRIO\n")
else:
print("\n...DESCULPE, ALGO DEU ERRADO...")
os.system('pause')
elif (resposta == 3):
confirmacao = deposito(cpf, diretorio)
if(confirmacao.isnumeric() ):
print("...DEPOSITO FEITO COM SUCESSO")
elif(confirmacao == False):
print("...DESCULPE, ALGO DE ERRADO, TENTE MAIS TARDE")
os.system('pause')
elif (resposta == 4):
confirmacao = transferencia(cpf, diretorio)
if(confirmacao == True):
print("\n...TRANFERÊNCIA FEITO COM SUCESSO")
elif(confirmacao == False):
print("\n...DESCULPE, ALGO DEU ERRADO...\n...VERIFIQUE OS DADOS DO RECEPTOR...\n")
os.system('pause')
|
import logging
from random import choice
from core.cogs.speech_cog import SpeechCog
from common.command_management.invoked_command import InvokedCommand
from common.command_management.invoked_command_handler import InvokedCommandHandler
from common.database.database_manager import DatabaseManager
from common.logging import Logging
from common.module.discoverable_module import DiscoverableCog
from common.module.module_initialization_container import ModuleInitializationContainer
import discord
## Logging
LOGGER = Logging.initialize_logging(logging.getLogger(__name__))
class Fortune(DiscoverableCog):
## Defaults
FORTUNES = [
## Positive
"It is certain",
"It is decidely so",
"Without a doubt",
"Yes, definitely",
"Without a doubt",
"You may rely on it",
"As I see it, yes",
"Most likely",
"Outlook good",
"Yep",
"Signs point to yes",
## Neutral
"Reply hazy, try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
## Negative
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful"
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.speech_cog: SpeechCog = kwargs.get('dependencies', {}).get('SpeechCog')
assert (self.speech_cog is not None)
self.invoked_command_handler: InvokedCommandHandler = kwargs.get('dependencies', {}).get('InvokedCommandHandler')
assert(self.invoked_command_handler is not None)
self.database_manager: DatabaseManager = kwargs.get('dependencies', {}).get('DatabaseManager')
assert (self.database_manager is not None)
@discord.app_commands.command(name="fortune")
async def fortune_command(self, interaction: discord.Interaction):
"""Tells you your magic 8 ball fortune!"""
fortune = choice(self.FORTUNES)
async def callback(invoked_command: InvokedCommand):
if (invoked_command.successful):
await self.database_manager.store(interaction)
await interaction.response.send_message(f"{fortune}.")
else:
await self.database_manager.store(interaction, valid=False)
await interaction.response.send_message(invoked_command.human_readable_error_message, ephemeral=True)
action = lambda: self.speech_cog.say(fortune, author=interaction.user, ignore_char_limit=True, interaction=interaction)
await self.invoked_command_handler.invoke_command(interaction, action, ephemeral=False, callback=callback)
def main() -> ModuleInitializationContainer:
return ModuleInitializationContainer(Fortune, dependencies=["SpeechCog", "InvokedCommandHandler", "DatabaseManager"])
|
# Author: FL 26/04/2021
"""
A script used for NP Swing to transform csv files comming from the deltatau controller
and write them into a nexus file that will be merged by the DataMerger
"""
from argparse import ArgumentParser
import h5py
import csv
import os
import random
import time
from PyTango import *
################################################################################
def get_recordingmanager_sessioncounter():
session_counter = recording_mgr_proxy.sessionCounter
return session_counter
################################################################################
def get_csv_filename(index):
while 1:
csv_filenames = os.listdir(csv_input_directory)
for f in csv_filenames:
if int(f[f.rfind('-')+1:f.rfind('.')]) == int(index):
print "found csv_file : ", f
return f
# csv file is not yet arrived: wait 5 sec
print "csv_file not yet arrived, waiting 5 sec..."
time.sleep(5)
################################################################################
def create_nxs_data_file(csv_file_name):
#------------------------------------------------
#1 open / read csv file
# declare the arrays
gate_index = []
calc_gated_sample_tx = []
calc_gated_sample_tz = []
calc_gated_sample_rz = []
calc_gated_sample_rx = []
calc_gated_sample_rs = []
calc_gated_fzp_cs_tx = []
calc_gated_fzp_cs_tz = []
calc_gated_fzp_cs_rx = []
calc_gated_fzp_cs_rz = []
raw_gated_sample_txe = []
raw_gated_sample_txi = []
raw_gated_sample_tze = []
raw_gated_sample_tzi = []
raw_gated_sample_tzo = []
raw_gated_fzp_cs_txe = []
raw_gated_fzp_cs_txi = []
raw_gated_fzp_cs_tze = []
raw_gated_fzp_cs_tzi = []
std_gated_sample_txe = []
std_gated_sample_txi = []
std_gated_sample_tze = []
std_gated_sample_tzi = []
std_gated_sample_tzo = []
std_gated_fzp_cs_txe = []
std_gated_fzp_cs_txi = []
std_gated_fzp_cs_tze = []
std_gated_fzp_cs_tzi = []
substraction_calc_gated_tx = []
substraction_calc_gated_tz = []
# Open and Parse the CSV file
with open(csv_input_directory + '/' + csv_file_name) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=';')
line_count = 0
for row in csv_reader:
# 1st line are the names of the columns
if line_count == 0:
print("Column names are: ", (row))
line_count += 1
# then the real data
else:
#try:
for column in range (len(selected_columns)):
if selected_columns[column] == 0:
gate_index.append(float(row[0]))
if selected_columns[column] == 1:
calc_gated_sample_tx.append(float(row[1]))
if selected_columns[column] == 2:
calc_gated_sample_tz.append(float(row[2]))
if selected_columns[column] == 3:
calc_gated_sample_rz.append(float(row[3]))
if selected_columns[column] == 4:
calc_gated_sample_rx.append(float(row[4]))
if selected_columns[column] == 5:
calc_gated_sample_rs.append(float(row[5]))
if selected_columns[column] == 6:
calc_gated_fzp_cs_tx.append(float(row[6]))
if selected_columns[column] == 7:
calc_gated_fzp_cs_tz.append(float(row[7]))
if selected_columns[column] == 8:
calc_gated_fzp_cs_rx.append(float(row[8]))
if selected_columns[column] == 9:
calc_gated_fzp_cs_rz.append(float(row[9]))
if selected_columns[column] == 10:
raw_gated_sample_txe.append(float(row[10]))
if selected_columns[column] == 11:
raw_gated_sample_txi.append(float(row[11]))
if selected_columns[column] == 12:
raw_gated_sample_tze.append(float(row[12]))
if selected_columns[column] == 13:
raw_gated_sample_tzi.append(float(row[13]))
if selected_columns[column] == 14:
raw_gated_sample_tzo.append(float(row[14]))
if selected_columns[column] == 15:
raw_gated_fzp_cs_txe.append(float(row[15]))
if selected_columns[column] == 16:
raw_gated_fzp_cs_txi.append(float(row[16]))
if selected_columns[column] == 17:
raw_gated_fzp_cs_tze.append(float(row[17]))
if selected_columns[column] == 18:
raw_gated_fzp_cs_tzi.append(float(row[18]))
if selected_columns[column] == 19:
std_gated_sample_txe.append(float(row[19]))
if selected_columns[column] == 20:
std_gated_sample_txi.append(float(row[20]))
if selected_columns[column] == 21:
std_gated_sample_tze.append(float(row[21]))
if selected_columns[column] == 22:
std_gated_sample_tzi.append(float(row[22]))
if selected_columns[column] == 23:
std_gated_sample_tzo.append(float(row[23]))
if selected_columns[column] == 24:
std_gated_fzp_cs_txe.append(float(row[24]))
if selected_columns[column] == 25:
std_gated_fzp_cs_txi.append(float(row[25]))
if selected_columns[column] == 26:
std_gated_fzp_cs_tze.append(float(row[26]))
if selected_columns[column] == 27:
std_gated_fzp_cs_tzi.append(float(row[27]))
# computed substractions for tx and tz
substraction_calc_gated_tx.append(float(row[1]) - float(row[6]))
substraction_calc_gated_tz.append(float(row[2]) - float(row[7]))
line_count += 1
#except :
# pass
#print ("maybe end of the file...", )
print "\ncalc_gated_sample_tx: ",calc_gated_sample_tx
print('nb lines',line_count)
#------------------------------------------------
#2 create nxs file
nxs_filename = "data_from_deltatau_000001.nxs"
print "nxs_file = ", nxs_filename
f = h5py.File(nxs_output_directory +'/'+ nxs_filename, 'x') # 'x' means fail if file exist
scan_data_entry = "/entry/scan_data"
f.create_group(scan_data_entry)
#------------------------------------------------
#3 populate nxs file
for column in range (0, len(selected_columns)):
if selected_columns[column] == 0:
f[scan_data_entry].create_dataset(u"gate_index", data=gate_index)
if selected_columns[column] == 1:
f[scan_data_entry].create_dataset(u"calc_gated_sample_tx", data=calc_gated_sample_tx)
if selected_columns[column] == 2:
f[scan_data_entry].create_dataset(u"calc_gated_sample_tz", data=calc_gated_sample_tz)
if selected_columns[column] == 3:
f[scan_data_entry].create_dataset(u"calc_gated_sample_rz", data=calc_gated_sample_rz)
if selected_columns[column] == 4:
f[scan_data_entry].create_dataset(u"calc_gated_sample_rx", data=calc_gated_sample_rx)
if selected_columns[column] == 5:
f[scan_data_entry].create_dataset(u"calc_gated_sample_rs", data=calc_gated_sample_rs)
if selected_columns[column] == 6:
f[scan_data_entry].create_dataset(u"calc_gated_fzp_cs_tx", data=calc_gated_fzp_cs_tx)
if selected_columns[column] == 7:
f[scan_data_entry].create_dataset(u"calc_gated_fzp_cs_tz", data=calc_gated_fzp_cs_tz)
if selected_columns[column] == 8:
f[scan_data_entry].create_dataset(u"calc_gated_fzp_cs_rx", data=calc_gated_fzp_cs_rx)
if selected_columns[column] == 9:
f[scan_data_entry].create_dataset(u"calc_gated_fzp_cs_rz", data=calc_gated_fzp_cs_rz)
if selected_columns[column] == 10:
f[scan_data_entry].create_dataset(u"raw_gated_sample_txe", data=raw_gated_sample_txe)
if selected_columns[column] == 11:
f[scan_data_entry].create_dataset(u"raw_gated_sample_txi", data=raw_gated_sample_txi)
if selected_columns[column] == 12:
f[scan_data_entry].create_dataset(u"raw_gated_sample_tze", data=raw_gated_sample_tze)
if selected_columns[column] == 13:
f[scan_data_entry].create_dataset(u"raw_gated_sample_tzi", data=raw_gated_sample_tzi)
if selected_columns[column] == 14:
f[scan_data_entry].create_dataset(u"raw_gated_sample_tzo", data=raw_gated_sample_tzo)
if selected_columns[column] == 15:
f[scan_data_entry].create_dataset(u"raw_gated_fzp_cs_txe", data=raw_gated_fzp_cs_txe)
if selected_columns[column] == 16:
f[scan_data_entry].create_dataset(u"raw_gated_fzp_cs_txi", data=raw_gated_fzp_cs_txi)
if selected_columns[column] == 17:
f[scan_data_entry].create_dataset(u"raw_gated_fzp_cs_tze", data=raw_gated_fzp_cs_tze)
if selected_columns[column] == 18:
f[scan_data_entry].create_dataset(u"raw_gated_fzp_cs_tzi", data=raw_gated_fzp_cs_tzi)
if selected_columns[column] == 19:
f[scan_data_entry].create_dataset(u"std_gated_sample_txe", data=std_gated_sample_txe)
if selected_columns[column] == 20:
f[scan_data_entry].create_dataset(u"std_gated_sample_txi", data=std_gated_sample_txi)
if selected_columns[column] == 21:
f[scan_data_entry].create_dataset(u"std_gated_sample_tze", data=std_gated_sample_tze)
if selected_columns[column] == 22:
f[scan_data_entry].create_dataset(u"std_gated_sample_tzi", data=std_gated_sample_tzi)
if selected_columns[column] == 23:
f[scan_data_entry].create_dataset(u"std_gated_sample_tzo", data=std_gated_sample_tzo)
if selected_columns[column] == 24:
f[scan_data_entry].create_dataset(u"std_gated_fzp_cs_txe", data=std_gated_fzp_cs_txe)
if selected_columns[column] == 25:
f[scan_data_entry].create_dataset(u"std_gated_fzp_cs_txi", data=std_gated_fzp_cs_txi)
if selected_columns[column] == 26:
f[scan_data_entry].create_dataset(u"std_gated_fzp_cs_tze", data=std_gated_fzp_cs_tze)
if selected_columns[column] == 27:
f[scan_data_entry].create_dataset(u"std_gated_fzp_cs_tzi", data=std_gated_fzp_cs_tzi)
# computed substractions for tx and tz
f[scan_data_entry].create_dataset(u"historised_relative_sample_tx", data=substraction_calc_gated_tx)
f[scan_data_entry].create_dataset(u"historised_relative_sample_tz", data=substraction_calc_gated_tz)
fname = f.filename
f.close()
# for debug purpose: ie simulate the DataMerger Process
#os.remove(fname)
print "done"
#Gate_index;Calc-Gated_Sample-X;Calc-Gated_Sample-Z;Calc-Gated_Sample-Rz;Calc-Gated_Sample-Rx;Calc-Gated_Sample-Rs;Calc-Gated_FZPCS-X;Calc-Gated_FZPCS-Z;Calc-Gated_FZPCS-Rx;Calc-Gated_FZPCS-Rz;Raw-Gated_Sample-Xe;Raw-Gated_Sample-Xi;Raw-Gated_Sample-Ze;Raw-Gated_Sample-Zi;Raw-Gated_Sample-Zo;Raw-Gated_FZPCS-Xe;Raw-Gated_FZPCS-Xi;Raw-Gated_FZPCS-Ze;Raw-Gated_FZPCS-Zi;
#------------------------------------------------------------------------------
# Main Entry point
#------------------------------------------------------------------------------
if __name__ == "__main__":
# command line parsing
parser = ArgumentParser(description="NP Swing hdf5 script")
parser.add_argument("-d1","--csv_input_directory",help="Directory where the CSV files will arrive (eg from FtpClient")
parser.add_argument("-d2","--nxs_output_directory",help="Directory where the NXS files will be written")
parser.add_argument("-col","--selected_columns",help="Selected columns that will be written into the nxs file: eg: [1,2,4,78] \
0 being the first column ie: Index)")
args = parser.parse_args()
# csv directory
if args.csv_input_directory:
csv_input_directory = args.csv_input_directory
print "csv_input_directory = ", csv_input_directory
else:
raise BaseException("No CSV input directory specified")
# nxs directory
if args.nxs_output_directory:
nxs_output_directory = args.nxs_output_directory
print "nxs_output_directory = ", nxs_output_directory
else:
raise BaseException("No NXS output directory specified")
# selected columns default columns: 1,2,6,7
selected_columns = "1,2,6,7" # mandatory to compute the "substractions" data
if args.selected_columns:
selected_columns = args.selected_columns
selected_columns += "1,2,6,7"
# transform the string to a list
selected_columns = list(selected_columns.split(","))
# transform list of str to list of int
selected_columns = list(map(int,selected_columns))
recording_mgr_proxy = DeviceProxy("flyscan/core/recording-manager.1")
################################################################################
# Main loop for each csv file
while 1:
#1 get the session counter from RecordingManager, then check that the CSV as the good session counter
session_counter = get_recordingmanager_sessioncounter()
print "session_counter = ", session_counter
#2 find the csv file with this index
csv_file_name = get_csv_file(session_counter)
#3 create the nxs file corresponding and copy csv data into it, the nxs file should be ending with 00001.nxs
create_nxs_data_file(csv_file_name)
|
from src.database import BaseModel
from datetime import datetime
import peewee
class User(BaseModel):
name = peewee.CharField()
username = peewee.CharField()
password = peewee.CharField()
email = peewee.CharField(unique=True)
admin = peewee.BooleanField(default=False)
createdAt = peewee.DateTimeField(default=datetime.utcnow())
updatedAt = peewee.DateTimeField(default=datetime.utcnow())
class Meta:
table_name = '_user'
|
'''
File name: pso_v4.py
Author: Anderson Henrique de Oliveira Conceicao
Date created: 29/05/2017
Date last modified: 02/06/2017
Python Version: 2.7
'''
from math import sqrt, pow, cos, pi, exp
import random
import numpy as np
from numpy import inf, array
import scipy as sp
def run(MaxIt=10000): #MaxIt - Maximum number of iterations
#Problem Definition=================================================================================================
def polyRel(bad_lim, good_lim, degree): #Creates an array with the coefficients of the polynom
if bad_lim < good_lim:
first_x = bad_lim
second_x = good_lim
first_y = abs(1-(bad_lim/good_lim))
second_y = 0
elif bad_lim == good_lim:
first_x = bad_lim*0.5
second_x = good_lim
first_y = 1-(first_x/good_lim)
second_y = 0
if first_x < 0 or first_x<second_x:
first_x = good_lim
second_x = good_lim*1.5
first_y = 0
second_y = abs(1-(good_lim/second_x))
else:
first_x = good_lim
second_x = bad_lim
first_y = 0
second_y = abs(1-(bad_lim/good_lim))
X = [ 0, first_x, second_x, 1 ]
Y = [ 1, first_y, second_y, 1 ]
try:
P = sp.polyfit(X, Y, degree)
except sp.RankWarning : #To adjust if the polynom is not well conditioned
P = sp.polyfit(X, Y, degree-1)
return P
#It creates the polynom that defines the parameter dynamics
# Bad Good Degree
P0 = list(polyRel( 0.57962, 0.59091, 3)) #AAET
P1 = list(polyRel( 0.58664, 0.59591, 3)) #AAST
P2 = list(polyRel( 0.00292, 0.00021, 3)) #VI
P3 = list(polyRel( 0.11682, 0.07932, 3)) #QMA
P4 = list(polyRel( 0.11947, 0.12379, 3)) #IMAI
P5 = list(polyRel( 0.58417, 0.58726, 3)) #VA
# Polynomial translation A*X^3 + B*X^2 + C*X^1 + D*X^0 =====================================
def polyeval(x, poly): return poly[0]*x**3+poly[1]*x**2+poly[2]*x**1+poly[3]*x**0
def fitnessFunc(x):
AAET = polyeval(x[0], P0) # AAET = x[0] ;
AAST = polyeval(x[1], P1) # AAST = x[1] ;
VI = polyeval(x[2], P2) # VI = x[2] ;
QMA = polyeval(x[3], P3) # QMA = x[3] ;
IMAI = polyeval(x[4], P4) # IMAI = x[4] ;
VA = polyeval(x[5], P5) # VA = x[5].
#Fitness function
try: return ((VA/(AAST-AAET))*(VI/(AAST-AAET))**(-1)*(AAST/AAET)**(-1)*(QMA/IMAI)**(-1))
except ZeroDivisionError: return 'inf'
nVar = 6 #Number of dimensions
VarMin = 0 #Lower bound of decision variables
VarMax = 1 #Upper bound of decision variables
MaxVelocity = 0.002*(VarMax-VarMin) #Velocity upper bound
MinVelocity = -MaxVelocity #Velocity lower bound
#Constriction coefficients==========================================================================================
kappa = 1
phi1 = 2.05
phi2 = 2.05
phi = phi1 + phi2
chi = 2*kappa/abs(2-phi-sqrt(phi**2-4*phi))
random.seed(0)
#Parameters of PSO==================================================================================================
range_cost = 1.5 #Defines the range in which the algorithm will adjust around zero cost
nPop = 35 #Population size (swarm size)
w = chi #Inertia coefficient
c1 = chi*phi1 #Personal acceleration coefficient
c2 = chi*phi2 #Social acceleration coefficient
GlobalBestCost = '-inf' #Global best cost
GlobalBestPosition = [] #Global best position
Swarm = [] #Swarm population array
#Useful functions===================================================================================================
#Unnormalize the value
def unnormalize(n,min,max):
return (max-min)*n+min #unnormalize(number, minimum value, maximal value)
#Creates an array of random numbers
def randArray(size):
aux = []
for i in xrange(0,size): aux.append(random.uniform(VarMin,VarMax))
return aux
#It limits the number in a defined range
def clamp(x,max,min): #clamp(array, max bound, min bound);
if isinstance(x, list): #or clamp(scalar, max bound, min bound);
for i in xrange(0, len(x)):
if x[i] >= max: x[i] = max
elif x[i] <= min: x[i] = min
else:
if x >= max: x = max
elif x <= min: x = min
return x
#Class definition===================================================================================================
class Particle(object): #Defining the Particle's class
def __init__(self, Position=None, Velocity=None, \
Cost=None, BestPosition=None, BestCost=None):
self.Position =[] #Characteristics of the particle
self.Velocity = []
self.Cost = 0
self.BestPosition = []
self.BestCost = []
#Initializing the algorithm=========================================================================================
#Swarm population array
for i in xrange(0,nPop): Swarm.append(Particle())
#Initialize population members
for i in xrange(0,nPop):
#Generate random solution
Swarm[i].Position = list(randArray(nVar))
#Initialize velocity
Swarm[i].Velocity = np.zeros(nVar)
#Evaluation
Swarm[i].Cost = fitnessFunc(Swarm[i].Position)
#Update personal best position
Swarm[i].BestPosition = list(Swarm[i].Position)
#Update personal best cost
Swarm[i].BestCost = Swarm[i].Cost
#It compares with the Global Best Cost and updates the value
if Swarm[i].BestCost < GlobalBestCost:
GlobalBestCost = Swarm[i].BestCost
GlobalBestPosition = list(Swarm[i].BestPosition)
#Array to hold best cost value on each iteration
BestCosts = []
BestPositions = []
BestCosts.append(GlobalBestCost)
BestPositions.append(GlobalBestPosition)
#Main loop of PSO
for i in xrange(1,MaxIt):
for j in xrange(0,nPop):
# Update Velocity: v = w*v + c1*rand*(pbest-x) + c2*rand*(gbest-x)
Swarm[j].Velocity = list(w*np.array(Swarm[j].Velocity) \
+c1*random.uniform(0,1)*(np.array(Swarm[j].BestPosition)-np.array(Swarm[j].Position)) \
+c2*random.uniform(0,1)*(np.array(GlobalBestPosition)-np.array(Swarm[j].Position)))
#Applying lower and upper bound limits
Swarm[j].Velocity = clamp(Swarm[j].Velocity, MaxVelocity, MinVelocity)
#Update position
Swarm[j].Position = np.array(Swarm[j].Position) + np.array(Swarm[j].Velocity)
#Applying lower and upper bound limits, by defining
#the lower bound limit as: mean - standard deviations
#and the upper bound limits as: mean + standard deviations
Swarm[j].Position[0] = clamp(Swarm[j].Position[0], 1, 0.16667) # x = AAET = x[0] ;
Swarm[j].Position[1] = clamp(Swarm[j].Position[1], 1, 0.16667) # y = AAST = x[1] ;
Swarm[j].Position[2] = clamp(Swarm[j].Position[2], 0.00021+0.00075, 0) # z = VI = x[2] ;
Swarm[j].Position[3] = clamp(Swarm[j].Position[3], 0.07932+0.12049, 0) # v = QMA = x[3] ;
Swarm[j].Position[4] = clamp(Swarm[j].Position[4], 0.12379+0.12910, 0) # w = IMAI = x[4] ;
Swarm[j].Position[5] = clamp(Swarm[j].Position[5], 0.97727, 0) # u = VA = x[5] .
#Evaluation
Swarm[j].Cost = fitnessFunc(Swarm[j].Position)
#Update personal best
if (Swarm[j].Cost <= Swarm[j].BestCost and Swarm[j].Cost > -range_cost) or\
(Swarm[j].Cost >= Swarm[j].BestCost and Swarm[j].Cost < range_cost):
Swarm[j].BestPosition = list(Swarm[j].Position)
Swarm[j].BestCost = Swarm[j].Cost
#Update global best
if (Swarm[j].BestCost <= GlobalBestCost and Swarm[j].BestCost > -range_cost) or\
(Swarm[j].BestCost >= GlobalBestCost and Swarm[j].BestCost < range_cost):
GlobalBestCost = Swarm[j].BestCost
GlobalBestPosition = list(Swarm[j].Position)
#Stores the best cost value
BestCosts.append(GlobalBestCost)
#Stores the best position
BestPositions.append(GlobalBestPosition)
#Unnormalizing the variables by the limits defined on the table=====================================================
AAET = unnormalize(BestPositions[-1][0], 0, 4387014266.17000)
AAST = unnormalize(BestPositions[-1][1], 0, 2561890616.28000)
VI = unnormalize(BestPositions[-1][2], 25.18000, 268931997.14000)
QMA = unnormalize(BestPositions[-1][3], 1, 63)
IMAI = unnormalize(BestPositions[-1][4], 0, 1056)
VA = unnormalize(BestPositions[-1][5], 0, 39944239.59000)
# x = AAET = x[0] ; y = AAST = x[1] ; z = VI = x[2] ; v = QMA = x[3] ; w = IMAI = x[4] ; u = VA = x[5]==============
print "The results are ready!", "\n"
print "This was the Best Cost:", BestCosts[-1]
print "This was the Best Position:", BestPositions[-1]
return BestCosts[-1],BestPositions[-1]
|
# Generated by Django 3.2.2 on 2021-06-08 17:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0005_product_category'),
]
operations = [
migrations.AddField(
model_name='product',
name='description',
field=models.TextField(max_length=200, null=True),
),
]
|
#!/usr/bin/env python3
import unittest
from to_number import to_number
class Test_to_Number(unittest.TestCase):
def test_to_number_type(self):
self.assertIsInstance(to_number([1, 3, 5, 7]), int)
def test_the_number(self):
self.assertEqual(to_number([6, 6, 6]), 666)
if __name__ == "__main__":
unittest.main()
|
def ABC2(G):
n = G.order()
Dist = G.distance_matrix()
count_abc2 = 0
ni=0
nj=0
for i in range(n):
for j in range(i+1,n):
if (Dist[i][j]==1):
ni=0
nj=0
for k in range(n):
if (Dist[i][k] > Dist[j][k]):
nj=nj+1
if (Dist[i][k] < Dist[j][k]):
ni=ni+1
x = float(ni+nj-2)
y = float(ni*nj)
count_abc2 = count_abc2 + sqrt(x/y)
return(count_abc2)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Micro benchmark example for sharding-matmul with pytorch.
Commands to run:
python3 -m torch.distributed.launch --nproc_per_node=8 examples/benchmarks/sharding_matmul.py
"""
from superbench.benchmarks import Framework, BenchmarkRegistry
from superbench.common.utils import logger
if __name__ == '__main__':
context = BenchmarkRegistry.create_benchmark_context(
'sharding-matmul', parameters='--num_steps 20', framework=Framework.PYTORCH
)
benchmark = BenchmarkRegistry.launch_benchmark(context)
if benchmark:
logger.info(
'benchmark: {}, return code: {}, result: {}'.format(
benchmark.name, benchmark.return_code, benchmark.result
)
)
|
# Import important packages
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import confusion_matrix
# Load fashion data
fashion_mnist = tf.keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = \
fashion_mnist.load_data()
# Separate data into training and validation
X_valid = X_train_full[:5000] / 255.0
X_train = X_train_full[5000:] / 255.0
X_test = X_test / 255.0
y_valid = y_train_full[:5000]
y_train = y_train_full[5000:]
# Add third dimension
X_train = X_train[..., np.newaxis]
X_valid = X_valid[..., np.newaxis]
X_test = X_test[..., np.newaxis]
from functools import partial
# Creates baseline dense and convolutional layer
my_dense_layer = partial(tf.keras.layers.Dense, activation="relu",
kernel_regularizer=tf.keras.regularizers.l2(0.0001))
my_conv_layer = partial(tf.keras.layers.Conv2D,
activation="relu", padding="valid")
# Creates all layers
model = tf.keras.models.Sequential([
my_conv_layer(6,4,padding="same",input_shape=[28,28,1]),
my_conv_layer(32,3),
tf.keras.layers.MaxPooling2D(2),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Flatten(),
my_dense_layer(128),
my_dense_layer(10, activation="softmax")
])
# Gives dimensions of model
model.summary()
# Adds opimizer to neural network
model.compile(loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
metrics=["accuracy"])
# Trains neural network
history = model.fit(X_train, y_train, epochs=5,
validation_data=(X_valid,y_valid))
# Plots loss and accuracy for training and validation data
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
# Plots confusion matrix for training data
y_pred = model.predict_classes(X_train)
conf_train = confusion_matrix(y_train, y_pred)
print(conf_train)
# Evaluates model
model.evaluate(X_test,y_test)
# Plots confusion matrix for test data
y_pred = model.predict_classes(X_test)
conf_test = confusion_matrix(y_test, y_pred)
print(conf_test)
fig, ax = plt.subplots()
# Hide axes
fig.patch.set_visible(False)
ax.axis('off')
ax.axis('tight')
# Create table and save confusion matrix to file
df = pd.DataFrame(conf_test)
ax.table(cellText=df.values, rowLabels=np.arange(10),
colLabels=np.arange(10), loc='center', cellLoc='center')
fig.tight_layout()
plt.savefig('conf_mat.pdf') |
'''
Created on Oct 7, 2021
@author: DELL
'''
def icon_user():
return "//header/div[1]/span[1]/div[1]/span[1]/div[1]/a[1]/div[1]/div[1]/img[1]"
def btn_follow():
return "button[title='Follow"
def btn_following():
return "button[title='Following"
def a():
return "//body/div[4]/div[1]/div[1]/div[4]/div[1]/div[1]/div[1]/div[2]/div[2]/div[1]"
def btn_download():
return "//span[contains(text(),'Download')]"
def dialog_thank():
return "//body/div[@id='app']/div[1]/div[5]/div[1]/div[1]"
def msg_follow():
return "//body/div[@id='app']/div[1]/div[3]/div[1]/button[1]/*[1]"
def name_owner ():
return "//header/div[1]/span[1]/div[2]/span[1]/div[1]/a[1]" |
import os
import os.path
def CompressImage(image_name):
os.system("magick -resize \"3000x4000\" %s %s" % (image_name,image_name))
def CompressAll():
ext_names = ['.JPG','.jpg','.jepg']
for each_image in os.listdir('./'):
for ext_name in ext_names:
if each_image.endswith(ext_name):
CompressImage(each_image)
break
# CompressAll()
'''
rootdir = ".\\"
for parent,dirnames,filenames in os.walk(rootdir):
for dirname in dirnames:
thedir = parent+'\\'+dirname
print(thedir)
os.system("magick %s\\*.jpg %s\\%s.pdf" %(thedir,thedir,thedir))
# PdfLinearize()
rootdir = ".\\"
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
if filename.endswith('.pdf'):
for dirname in dirnames:
thedir=parent+'\\'+dirname
print(thedir)
thefilename = thedir+'\\'+filename
print(thefilename)
print("qpdf --linearize %s %s.1" %(thefilename,thefilename))
os.system("qpdf --linearize %s %s.1" %(thefilename,thefilename))
'''
# PdfLinearize()
rootdir = ".\\"
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
if filename.endswith('.pdf'):
thefile=parent+'\\'+filename
print (parent+'\\'+filename)
#print("qpdf --linearize %s %s.1" %(thefile,thefile))
os.system("qpdf --check-linearization %s" %(thefile))
#print("move %s.1 %s /Y" % (thefile,thefile))
#os.system("move %s.1 %s /Y" %(thefile,thefile))
|
#-*- coding: utf-8 -*-
from exchangelib import DELEGATE, Account, Credentials,EWSTimeZone,EWSDateTime
creds = Credentials(
username='ift.local\\jerry.cheng',
password='infor@135')
account = Account(primary_smtp_address='jerry.cheng@infortrend.com',
credentials=creds,
autodiscover=True,
access_type=DELEGATE)
print account
print type(account.drafts)
# Print inbox contents in reverse order
for item in account.drafts.all().order_by('-datetime_received'):
#print(item.subject, item.body, item.attachments)
pass
year, month, day = 2016, 11, 10
tz = EWSTimeZone.timezone('UTC')
print tz.localize(EWSDateTime(year, month, day + 10))
print tz.localize(EWSDateTime(year, month, day))
'''
items = account.calendar.filter(
start__gt=tz.localize(EWSDateTime(year, month, day + 1)),
)
print len(items)
for item in items:
print item.subject.encode("utf-8")
items = account.inbox.filter(subject__contains='Debby')
print len(items)
for item in items:
print item.subject.encode("utf-8")
n = account.inbox.all().count()
print n
all_subjects = account.inbox.all().values_list('subject', flat=True)
for subject in all_subjects:
print subject
items = account.calendar.filter(start__range=(tz.localize(EWSDateTime(2016, 1, 1)), tz.localize(EWSDateTime(2017, 1, 1))))
for item in items:
print item.subject.encode("utf-8")
items = account.inbox.filter(subject__contains='Debby')
print len(items)
for item in items:
print item.subject.encode("utf-8")
print dir(item)
print item.datetime_received
'''
items = account.inbox.filter(datetime_received__range=(tz.localize(EWSDateTime(2016, 12, 22)), tz.localize(EWSDateTime(2016, 12, 23))))
print len(items)
for item in items:
print item.subject.encode("utf-8")
|
import configparser
from selenium import webdriver
# config
config = configparser.ConfigParser()
config.read('config.ini')
def init_driver():
# chromedriver version : 2.35
driver = webdriver.Chrome(config.get('SELENIUM', 'chromedriver'))
return driver
def read_gallery_main_page(pageCount):
for i in range(1, pageCount):
page_url = 'http://gall.dcinside.com/board/lists/?id=drama_new2&page='+str(pageCount)
driver = init_driver()
driver.get(page_url)
tr_list = driver.find_elements_by_class_name('tb')
for tr in tr_list:
detail_page_url = tr.find_element_by_css_selector('a').get_attribute('href')
print(detail_page_url)
detail_page_html = read_detail_page(driver, detail_page_url)
file_name = detail_page_url.split('?')[-1]
with open(file_name+'.html', 'w') as output:
output.write(detail_page_html)
print(detail_page_html)
#break
driver.quit()
def read_detail_page(driver, url):
driver.get(url)
page_source = driver.page_source
return page_source
def parse_html(url, f):
page_url = 'file://'+url
url_open = urllib.request.urlopen(page_url)
soup = BeautifulSoup(url_open, 'html.parser', from_encoding='utf-8')
div_top_left = soup.find('div', attrs={'class':'w_top_left'})
dl_list = div_top_left.findAll('dl')
subject = dl_list[0].find('dd').text.strip()
author = dl_list[1].find('dd').text.strip()
div_top_right = soup.find('div', attrs={'class':'w_top_right'})
ul = div_top_right.find('ul')
li_list = ul.findAll('li')
timestamp = li_list[0].text.strip()
datetime_timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
utc_timestamp = calendar.timegm(datetime_timestamp.timetuple())
print(subject, author, utc_timestamp)
f.write(subject+'\t'+author+'\t'+str(utc_timestamp)+'\n')
def main():
read_gallery_main_page()
if __name__ == '__main__':
main()
|
def find_smallest(number1, number2, number3):
num_list = [number1, number2, number3]
num_list.sort()
return num_list[0]
num1 = int(input())
num2 = int(input())
num3 = int(input())
print(find_smallest(num1, num2, num3))
|
import os
import shutil
import random
sample_ratio = 0.85
# dirs for LessClass case
dir_validation = '.\\data\\LessClasses\\Validation'
dir_training = '.\\data\\LessClasses\\Training'
# dirs for full-class case
# dir_validation = '.\\data\\Validation'
# dir_training = '.\\data\\Training'
for d in os.listdir(dir_training):
print('Extracting validation data from... ', d)
train_dirfile = os.path.join(dir_training, d)
val_dirfile = os.path.join(dir_validation, d)
if not os.path.exists(val_dirfile):
os.makedirs(val_dirfile)
train_idx = set(random.sample(list(range(len(os.listdir(train_dirfile)))),
int(sample_ratio*len(os.listdir(train_dirfile)))))
val_files = [n for i,n in enumerate(os.listdir(train_dirfile)) if i not in train_idx]
for file in val_files:
file_path = os.path.join(train_dirfile, file)
shutil.move(file_path, val_dirfile)
|
#Extract the data from PSSM
import numpy as np
import os
import sys
import time
from datetime import datetime
from sklearn import svm
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
f='/home/u2195/Desktop/Dropbox/Bioinformatics_projects/data/predict.fasta'
input_seq = open (f, 'r')
start_time='Starting prediction: ', time.strftime ('%Y-%m-%d, %H:%M:%S')
print(start_time)
window = 35
half_window = 17
cmd = """
#Change directory to uniref50.
export BLASTDB=/local_uniref/uniref/uniref50
#"safety step" to check if the files have been created already in the output directory in case that the computer shuts down or others
if [ ! -f {seq}.pssm ] ; then
echo "Running psiblast on {seq} at $(date)..."
time psiblast -query {seq} -db uniref50.db -num_iterations 3 -evalue 0.001 -out {seq}.psiblast -out_ascii_pssm {seq}.pssm -num_threads 8
echo "Finished running psiblast on seq at $(date)."
echo ""
fi
#The following will be printed when the iterations are done:
echo 'PSI-BLAST run is complete'
""".format(seq=f)
os.system(cmd)
g = open(f+'.pssm')
g = g.read().splitlines()
datalist = list()
for line in g:
newline = line.strip()
datalist.append (newline)
titlelist = list ()
for i in datalist [:: 3]:
titlelist.append (i)
#Define window size
window = 35
pad_size = 17
bigwordlist=[]
structvectorlist=[]
#Open all the fastafiles that are in the titlelist
for r in titlelist:
line_list =[]
for line in g[3:-7]:
newline = line.split()
newline = newline [22:42]
line_list.append (newline)
#Normalize the values because they are in percentage
for i in line_list:
for j in range (0, len (i)):
i[j] = int(i[j])/100
#Padding, now we have vectors directly, so the padding is done by adding vectors containing 20 zeros.
temp_prot=[]
a=list(np.zeros(20))
for i in range (0, pad_size):
temp_prot.append(a)
temp_prot.extend(line_list)
for i in range (0, pad_size):
temp_prot.append(a)
#print(temp_prot)
#print(len(temp_prot))
#Create words with pssm information
wordlist=[]
for i in range (0, len (temp_prot)-(window-1)):
b=temp_prot[i:i+(window)]
b = [j for i in b for j in i]
if len(b) != window*20:
print ("oh no")
wordlist.append(b)
#print(wordlist)
#print(len(wordlist))
bigwordlist.append(wordlist)
bigwordlist=[j for i in bigwordlist for j in i]
#print (bigwordlist)
#print(len(bigwordlist))
#Store it in a numpy array
X = np.array(bigwordlist)
#Import the svm model
clf = joblib.load('/home/u2195/Desktop/Dropbox/Bioinformatics_projects/results/models/psiblast_SPmodel.pkl')
predicted=clf.predict(bigwordlist)
#print(predicted)
print("This predictor has a cross-validation accuracy of 0.96")
structure_dict = { 1:'G', 2:'S'}
m=predicted.tolist()
struct_prediction=[]
for i in m:
e = structure_dict[i]
struct_prediction.append(e)
print ('Prediction output: ', struct_prediction)
#Save the prediction output in a file
sys.exit()
with open ('//home/u2195/Desktop/Dropbox/Bioinformatics_projects/results/' + 'SP_Prediction_psiblast' '.fasta', 'w')as b:
for i in range(len(titlelist)):
b.write('Prediction of Signal Peptide using psiblast by Carolina Savatier'+'\n')
b.write(titlelist[i]+'\n')
b.write(''.join(struct_prediction)+'\n')
|
#!/usr/bin/env python3
# Copyright (c) 2004-present Facebook All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import sys
from unittest import TestLoader, TextTestRunner
import pyinventory_tests
from pyinventory_tests.utils.constant import TESTS_PATTERN, XML_OUTPUT_DIRECTORY
from xmlrunner import XMLTestRunner
if __name__ == "__main__":
loader = TestLoader()
loader.testNamePatterns = [TESTS_PATTERN]
suite = loader.loadTestsFromModule(pyinventory_tests)
if XML_OUTPUT_DIRECTORY:
runner = XMLTestRunner(output=XML_OUTPUT_DIRECTORY, verbosity=2)
else:
runner = TextTestRunner(verbosity=2)
result = runner.run(suite)
if len(result.errors) != 0 or len(result.failures) != 0:
sys.exit(1)
sys.exit(0)
|
# coding: utf-8
"""
Utility for comparing predicted and actual reference sections.
Takes the scarpe_data.csv produced by evaluate_algo.py, and produces an
interactive dashboard through which actual and predicted references sections
can be compared.
Requires streamlit>=0.47.3
pip3 install streamlit
streamlit run compare_found_sections.py
"""
import numpy as np
import pandas as pd
import streamlit as st
# Load scrape_date produced by evaluate_algo.py
data = pd.read_csv("./scrape_data.csv")
# Drop examples for which no comparison can be made
data.dropna(subset=["Predicted text", "Actual text"], inplace=True)
# Add sidebar
st.sidebar.title("Reference section explorer")
# Create selector for file hash in sidebar.
pdf_file = st.sidebar.selectbox("pdf file", data["File"].to_list())
lev = data.loc[data["File"] == pdf_file, ["lev_distance"]].iloc[0]["lev_distance"]
comment = st.sidebar.text_area("Comment about the prediction")
actual = data.loc[data["File"] == pdf_file, ["Actual text"]].iloc[0]["Actual text"]
predicted = data.loc[data["File"] == pdf_file, ["Predicted text"]].iloc[0]["Predicted text"]
# Produce a line which can easily be copied and pasted into a markdown table
st.write("Copy the line below into a markdown table:")
st.write(f"|{pdf_file}|{len(actual)}|{len(predicted)}|{np.round(lev, 2)}|{comment}|")
st.table(data.loc[data["File"] == pdf_file, ["Actual text" ,"Predicted text"]])
|
from django.http.response import JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import status
from appointment.models import Appointment, Scheduled
from appointment.serializers import AppointmentSerializer, ScheduledSerializer
from rest_framework.decorators import api_view
@api_view(['POST', 'PUT', 'DELETE'])
def appointment(request):
"""This Endpoint takes the candidate/interview details and their available time-slot"""
if request.method == 'POST':
appointment_data = JSONParser().parse(request)
appointment_serializer = AppointmentSerializer(data=appointment_data)
if appointment_serializer.is_valid():
appointment_serializer.save()
return JsonResponse(appointment_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(appointment_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
try:
id = request.GET["id"]
appointment_delete = Appointment.objects.get(id=id)
except Appointment.DoesNotExist:
return JsonResponse({'message': 'The appointment does not exist'}, status=status.HTTP_404_NOT_FOUND)
appointment_delete.delete()
return JsonResponse({'message': 'appointment was deleted successfully!'},
status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PUT':
try:
appointment_data = JSONParser().parse(request)
appointment_find = Appointment.objects.get(id=appointment_data['id'])
except Appointment.DoesNotExist:
return JsonResponse({'message': 'The appointment does not exist'}, status=status.HTTP_404_NOT_FOUND)
del appointment_data['id']
appointment_serializer = AppointmentSerializer(appointment_find, data=appointment_data)
if appointment_serializer.is_valid():
appointment_serializer.save()
return JsonResponse(appointment_serializer.data)
return JsonResponse(appointment_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def appointment_match(request):
"""This Endpoint takes candidate_id, interviewer_id, date and returns their matching time-slots"""
try:
interviewer_id = request.GET["interviewer_id"]
candidate_id = request.GET["candidate_id"]
date = request.GET["date"]
interviewer = Appointment.objects.get(id=interviewer_id, date=date)
candidate = Appointment.objects.get(id=candidate_id, date=date)
except Appointment.DoesNotExist:
return JsonResponse({'message': 'The appointment does not exist'}, status=status.HTTP_404_NOT_FOUND)
interviewer_slot = range(interviewer.start_time, interviewer.end_time)
candidate_slot = range(candidate.start_time, candidate.end_time)
x_interviewer = set(interviewer_slot)
slot = x_interviewer.intersection(candidate_slot)
appointment_time = 1
slots_start = []
slots_end = []
for x in slot:
slots_start.append(x)
for x in slot:
slots_end.append(x+appointment_time)
slots = list(zip(slots_start, slots_end))
return JsonResponse({'Matching Slots': slots}, status=status.HTTP_200_OK)
@api_view(['POST', 'PUT', 'DELETE', 'GET'])
def scheduled(request):
"""This Endpoint takes candidate_id, interviewer_id and their preferred matching time-slots"""
if request.method == 'POST':
scheduled_data = JSONParser().parse(request)
scheduled_serializer = ScheduledSerializer(data=scheduled_data)
if scheduled_serializer.is_valid():
scheduled_serializer.save()
return JsonResponse(scheduled_serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(scheduled_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
try:
id = request.GET["id"]
scheduled_delete = Scheduled.objects.get(id=id)
except Scheduled.DoesNotExist:
return JsonResponse({'message': 'The appointment does not exist'}, status=status.HTTP_404_NOT_FOUND)
scheduled_delete.delete()
return JsonResponse({'message': 'appointment was deleted successfully!'},
status=status.HTTP_204_NO_CONTENT)
elif request.method == 'PUT':
try:
scheduled_data = JSONParser().parse(request)
scheduled_find = Scheduled.objects.get(id=scheduled_data['id'])
except Scheduled.DoesNotExist:
return JsonResponse({'message': 'The appointment does not exist'}, status=status.HTTP_404_NOT_FOUND)
del scheduled_data['id']
scheduled_serializer = ScheduledSerializer(scheduled_find, data=scheduled_data)
if scheduled_serializer.is_valid():
scheduled_serializer.save()
return JsonResponse(scheduled_serializer.data)
return JsonResponse(scheduled_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'GET':
scheduled = Scheduled.objects.all()
scheduled_serializer = ScheduledSerializer(scheduled, many=True)
return JsonResponse(scheduled_serializer.data, safe=False)
|
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^$', 'meteo.views.list_previ', name="listprevi"),
url(r'^add/$', 'meteo.forms.add_previ', name='addprevi'),
url(r'^(?P<previid>\d+)/$', 'meteo.views.view_previ', name="viewprevi"),
url(r'^(?P<previid>\d+)/edit/$', 'meteo.forms.edit_previ', name='editprevi'),
url(r'^(?P<previid>\d+)/delete/$', 'meteo.forms.delete_previ', name='deleteprevi'),
url(r'^encartmeteo/$', 'meteo.views.encartmeteo', name='encartmeteo'),
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.