text stringlengths 38 1.54M |
|---|
#! /usr/bin/python3
for i in range(10):
if i == 0:
print("Current number " + str(i) + " Previous number " + str(i) + " sum "+ str(i+i))
else:
print("Current number " + str(i) + " Previous number " + str(i-1) + " sum "+ str(i+(i-1)))
|
fo=open("hello.txt","r")
print("NAME OF FILE: ",fo.name)
line=fo.readline()
print("READ LINE: %s" %(line))
p=fo.tell()
print("POSITION OF POINTER IS: %d"%p)
print("----------------------------------------")
fo.seek(0,0)
g=fo.tell()
print("POSITION OF POINTER IS: ",g)
|
import pyparsing as _p
def parse(liberty_string):
#_p.ParserElement.enablePackrat() <- dont, kills memory - and slower...
identifier=_p.Word(_p.alphanums+'._') # a name for..
EOL = _p.LineEnd().suppress() # end of line
ws = ' \t'
_p.ParserElement.setDefaultWhitespaceChars(ws)
linebreak = _p.Suppress("\\" + _p.LineEnd())
o_linebreak = _p.Optional(linebreak)
library_name = identifier
b_left = _p.Suppress("(")
b_right = _p.Suppress(")")
cb_left = _p.Suppress("{")
cb_right = _p.Suppress("}")
semicolon = _p.Suppress(";")
colon = _p.Suppress(":")
quote = _p.Suppress("\"")
emptyquote = '""'
comma = _p.Suppress(",")
comment = _p.Suppress(_p.cStyleComment())
valuestring = _p.Word(_p.alphanums+'\'=&_-+*/.$:!| ')
valuestring_quoted = emptyquote | quote + _p.Word(_p.alphanums+'\'=&_-+*/.$:! ()|') + quote
valuelist = _p.Group(quote + valuestring + _p.OneOrMore(comma + valuestring) + quote)
value = valuestring | valuestring_quoted | valuelist
key_value_pair = _p.Group(identifier + colon + value) + semicolon + EOL
named_list = identifier + b_left + _p.Group( o_linebreak + value + _p.ZeroOrMore( comma + o_linebreak + value)) + o_linebreak + b_right + (semicolon + EOL | EOL)
group = _p.Forward()
group << _p.Group(
_p.Group(identifier + b_left + _p.Optional((identifier + _p.ZeroOrMore( comma + identifier))|valuestring_quoted + _p.ZeroOrMore( comma + valuestring_quoted))) + b_right + cb_left + EOL
+ _p.Group( _p.ZeroOrMore(key_value_pair | named_list | group | EOL | comment))
+ cb_right + EOL
)
library = _p.Suppress(_p.ZeroOrMore(comment | EOL)) + group + _p.Suppress(_p.ZeroOrMore(comment | EOL))
key_value_pair.setParseAction(handle_parameters)
named_list.setParseAction(handle_list)
group.setParseAction(handle_groups)
valuestring.setParseAction(parse_string_if_possible)
return library.parseString(liberty_string)[0]['library']
def parse_string_if_possible(token):
a=token[0]
try:
return float(a)
except ValueError:
return a
def merge(a, b, path=None):
if path is None: path = []
if type(a) == list and type(b) == list:
a = a + b
elif type(a) == list:
a.append(b)
elif type(b) == list:
b.append(a)
a = b
else:
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif type(a[key]) == list and type(b[key]) == list:
a[key] = a[key] + b[key]
elif type(a[key]) == list:
a[key].append(b[key])
elif type(b[key]) == list:
b[key].append(a[key])
a[key] = b[key]
else:
a[key] = [a[key], b[key]]
else:
a[key] = b[key]
return a
def handle_groups(token):
d = {}
group_type = token[0][0][0]
group_name = False
if len(token[0][0]) == 2:
group_name = token[0][0][1]
d[group_type] = {}
d[group_type][group_name] = {}
[merge(d[group_type][group_name],e) for e in token[0][1].asList()]
else:
d[group_type] = {}
[d[group_type].update(e) for e in token[0][1].asList()]
return d
def handle_parameters(token):
d = {}
d[token[0][0]] = token[0][1]
return d
def handle_list(token):
d = {}
d[token[0]] = token[1].asList()
return d
|
def isprime(n):
'''check if integer n is a prime'''
# make sure n is a positive integer
n = abs(int(n))
# 0 and 1 are not primes
if n < 2:
return 0
# 2 is the only even prime number
if n == 2:
return 0
# all other even numbers are not primes
if not n & 1:
return 2
# range starts with 3 and only needs to go up
# the square root of n for all odd numbers
for x in range(3, int(n**0.5) + 1, 2):
if n % x == 0:
return x
return 0
def trans(digits,base):
size = len(digits)
s = sum(digits[i]*(base**i) for i in range(size))
return s
def next(digs):
size = len(digs)
carry = 0
for i in range(1,size-1):
if digs[i] == 0:
digs[i] = 1
return
else:
digs[i] = 0
times = 1#int(input())
for t in range(times):
print 'Case #'+str(t+1)+':'
(N,J) = (16,50)#[int(a) for a in raw_input().split(' ')]
j = 0
digits = [1]+[0 for i in range(N-2)]+[1]
while j < J:
ls = []
success = True
for base in range(2,11):
a = isprime(trans(digits,base))
if a == 0:
success = False
break
else:
ls.append(a)
if success:
digits.reverse()
digstr = ''.join([str(a) for a in digits])
print digstr+' '+' '.join([str(a) for a in ls])
j += 1
next(digits)
|
from __future__ import absolute_import, unicode_literals
from .base import *
from .secrets import DEBUG
try:
from .local import *
except ImportError:
pass
|
from collections import Counter
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
L = list()
c1 = Counter(nums1)
c2 = Counter(nums2)
print(c1,c2)
for k in c1.keys():
for j in c2.keys():
if k == j:
for m in range(min(c1[k],c2[j])):
L.append(k)
return L
|
def compute(pankcakes):
parts = 1
for idx in range(1, len(pankcakes)):
if pankcakes[idx] != pankcakes[idx-1]:
parts += 1
if pankcakes[-1] == '+':
parts -= 1
return parts
cases = input()
for idx in range(int(cases)):
pankcakes = input()
print("Case #{}: {}".format(idx+1, compute(pankcakes)))
|
import nltk
from nltk import word_tokenize
from nltk.corpus import brown
import os, os.path
import math, time
def noOfFilesCnt():
return totalCnt
def frequencyOfWord(word,listOfWords):
count = 0
for i in listOfWords:
if word == i:
count = count + 1
return count
#To sort the tfidf words
def sorter(tfidf):
items = [(v, k) for k, v in tfidf.items()]
items.sort()
items.reverse()
items = [(k, v) for v, k in items]
return items
#Generate tfidf of the most frequent words
def generateTfidf(wrdList):
print "new idf..."
tfidf = {}
dictDocCount = {}
totalCnt = 512
#Create a dictionary of the words from the corpus
print "trying to open file....."
DocCountFl = open('newSortWords.txt','r')
for WrdCntPair in DocCountFl.readlines():
WrdCntPair = WrdCntPair.strip()
if ':' in WrdCntPair:
if WrdCntPair.count(':') == 1:
WrdCntLst = WrdCntPair.split(':')
dictDocCount[WrdCntLst[0]] = int(WrdCntLst[1])
DocCountFl.close()
#Calculate the tfidf for the 10 most frequent words in wrdLst
for i in wrdList.keys()[:10]:
counter = 0
if dictDocCount.has_key(i):
counter = dictDocCount[i]
else:
counter = 0
#TFIDF[i] = frequency of the word * log(1 + (total no of documents/no of documents the word occurs))
if(counter != 0):
tfidf[i] = wrdList[i] * math.log(1 + float(float(totalCnt)/float(counter)))
else:
tfidf[i] = 0
return tfidf
|
import github
import json
import os
token = os.environ['INPUT_TOKEN']
repoName = os.environ['GITHUB_REPOSITORY']
projectCardId = os.environ['INPUT_PROJECTCARDID']
displayUserJson = json.loads(os.environ['INPUT_DISPLAYUSERJSON'])
displayUrlJson = json.loads(os.environ['INPUT_DISPLAYURLJSON'])
# Connect to GitHub
g = github.Github(token)
# Get credentials file from repo
repo = g.get_user().get_repo(repoName)
file = repo.get_file_contents('credentials.txt')
# Update file with new credentials
repo.update_file('credentials.txt', 'your_commit_message', 'your_new_file_content', file.sha) |
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtSql import *
import time
class returnBookDialog(QDialog):
return_book_successful_signal=pyqtSignal()
def __init__(self, StudentId, parent=None):
super(returnBookDialog, self).__init__(parent)
self.studentId = StudentId
self.setUpUI()
self.setWindowModality(Qt.WindowModal)
self.setWindowTitle("Return Book")
def setUpUI(self):
BookCategory = ["Technology", "Social Science", "Politics", "Law", "Military", "Economics", "Culture", "Education", "Physical Education", "Language Arts", "Arts", "History"
, "Geography", "Astronomy", "Biology", "Medicine", "Philosophy"]
self.resize(300, 250)
self.layout = QFormLayout()
self.setLayout(self.layout)
self.borrowStudentLabel = QLabel("Borrower:")
self.borrowStudentIdLabel = QLabel(self.studentId)
self.bookNameLabel = QLabel("Book Name:")
self.bookIdLabel = QLabel("Book Code:")
self.authNameLabel = QLabel("Author:")
self.categoryLabel = QLabel("Category:")
self.publisherLabel = QLabel("Publisher:")
self.publishDateLabel = QLabel("Publish Date:")
self.returnBookButton = QPushButton("Return")
self.bookNameEdit = QLineEdit()
self.bookIdEdit = QLineEdit()
self.authNameEdit = QLineEdit()
self.categoryComboBox = QComboBox()
self.categoryComboBox.addItems(BookCategory)
self.publisherEdit = QLineEdit()
self.publishTime = QLineEdit()
# self.publishDateEdit = QLineEdit()
self.bookNameEdit.setMaxLength(10)
self.bookIdEdit.setMaxLength(6)
self.authNameEdit.setMaxLength(10)
self.publisherEdit.setMaxLength(10)
self.layout.addRow(self.borrowStudentLabel, self.borrowStudentIdLabel)
self.layout.addRow(self.bookNameLabel, self.bookNameEdit)
self.layout.addRow(self.bookIdLabel, self.bookIdEdit)
self.layout.addRow(self.authNameLabel, self.authNameEdit)
self.layout.addRow(self.categoryLabel, self.categoryComboBox)
self.layout.addRow(self.publisherLabel, self.publisherEdit)
self.layout.addRow(self.publishDateLabel, self.publishTime)
self.layout.addRow(self.returnBookButton)
font = QFont()
font.setFamily("Candara")
font.setPixelSize(20)
font.setPixelSize(14)
self.borrowStudentIdLabel.setFont(font)
self.borrowStudentLabel.setFont(font)
self.bookNameLabel.setFont(font)
self.bookIdLabel.setFont(font)
self.authNameLabel.setFont(font)
self.categoryLabel.setFont(font)
self.publisherLabel.setFont(font)
self.publishDateLabel.setFont(font)
self.bookNameEdit.setFont(font)
self.bookNameEdit.setReadOnly(True)
self.bookNameEdit.setStyleSheet("background-color:#dddddd")
self.bookIdEdit.setFont(font)
self.authNameEdit.setFont(font)
self.authNameEdit.setReadOnly(True)
self.authNameEdit.setStyleSheet("background-color:#dddddd")
self.publisherEdit.setFont(font)
self.publisherEdit.setReadOnly(True)
self.publisherEdit.setStyleSheet("background-color:#dddddd")
self.publishTime.setFont(font)
self.publishTime.setStyleSheet("background-color:#dddddd")
self.categoryComboBox.setFont(font)
self.categoryComboBox.setStyleSheet("background-color:#dddddd")
font.setPixelSize(16)
self.returnBookButton.setFont(font)
self.returnBookButton.setFixedHeight(32)
self.returnBookButton.setFixedWidth(280)
self.returnBookButton.setStyleSheet("background-color:rgb(255, 117, 124);\n"
"color:white;")
self.layout.setVerticalSpacing(10)
self.returnBookButton.clicked.connect(self.returnButtonClicked)
self.bookIdEdit.textChanged.connect(self.bookIdEditChanged)
def returnButtonClicked(self):
BookId = self.bookIdEdit.text()
if (BookId == ""):
print(QMessageBox.warning(self, "Warning", "The book you are returning is not in our system", QMessageBox.Yes, QMessageBox.Yes))
return
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName('./db/FBLAE-Book2dbebook.db')
db.open()
query = QSqlQuery()
sql = "SELECT * FROM User_Book WHERE StudentId='%s' AND BookId='%s' AND BorrowState=1" %(self.studentId,BookId)
query.exec_(sql)
if (not query.next()):
print(QMessageBox.information(self, "Information", "You have not borrowed this book", QMessageBox.Yes, QMessageBox.Yes))
return
sql = "UPDATE User SET NumBorrowed=NumBorrowed-1 WHERE StudentId='%s'" % self.studentId
query.exec_(sql)
db.commit()
sql = "UPDATE Book SET NumCanBorrow=NumCanBorrow+1 WHERE BookId='%s'" % BookId
query.exec_(sql)
db.commit()
timenow = time.strftime('%Y-%m-%d', time.localtime(time.time()))
sql = "UPDATE User_Book SET ReturnTime='%s',BorrowState=0 WHERE StudentId='%s' AND BookId='%s' AND BorrowState=1" % (timenow,self.studentId,BookId)
query.exec_(sql)
db.commit()
print(QMessageBox.information(self, "Information", "Book returned", QMessageBox.Yes, QMessageBox.Yes))
self.return_book_successful_signal.emit()
self.close()
return
def bookIdEditChanged(self):
bookId = self.bookIdEdit.text()
if (bookId == ""):
self.bookNameEdit.clear()
self.publisherEdit.clear()
self.authNameEdit.clear()
self.publishTime.clear()
db = QSqlDatabase.addDatabase("QSQLITE")
db.setDatabaseName('./db/FBLAE-Book2dbebook.db')
db.open()
query = QSqlQuery()
sql = "SELECT * FROM User_Book WHERE StudentId='%s' AND BookId='%s' AND BorrowState=1" % (
self.studentId, bookId)
query.exec_(sql)
if (query.next()):
sql = "SELECT * FROM Book WHERE BookId='%s'" % (bookId)
query.exec_(sql)
if (query.next()):
self.bookNameEdit.setText(query.value(0))
self.authNameEdit.setText(query.value(2))
self.categoryComboBox.setCurrentText(query.value(3))
self.publisherEdit.setText(query.value(4))
self.publishTime.setText(query.value(5))
return
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setWindowIcon(QIcon("./rsc/manage_ebooks_button.png"))
mainMindow = returnBookDialog("0000000001")
mainMindow.show()
sys.exit(app.exec_())
|
import picamera
from openflexure_stage import OpenFlexureStage
import numpy as np
from camera_stuff import get_numpy_image, find_template
import time
import h5py
from contextlib import closing
import data_file
def measure_txy(n, start_time, camera, templ8): #everything used in a definition should be put in as an argument
"""Measure position n times and return a t,x,y array."""
result = np.zeros((3, n)) #creates an empty array of zeros
for i in range(n):
frame = get_numpy_image(camera, True) #get frame
result[1:, i] = find_template(templ8, frame) #measures position
result[0, i] = time.time() - start_time #measures time
return result
def sqre_move(step, side_length, start_time, camera, templ8):
data = np.zeros((3, step))
for i in range(step): #moves top left to to top right in step steps
stage.move_rel([-side_length/(step+1),0,0])
time.sleep(1)
np.append(data,measure_txy(1, start_time, camera, templ8))
for i in range(step): #moves top right to bottom right in step steps
stage.move_rel([0,-side_length/(step+1),0])
time.sleep(1)
np.append(data,measure_txy(1, start_time, camera, templ8))
for i in range(step): #moves bottom right to bottom left in step steps
stage.move_rel([side_length/(step+1),0,0])
time.sleep(1)
np.append(data,measure_txy(1, start_time, camera, templ8))
for i in range(step): #moves bottom left to top left
stage.move_rel([0,side_length/(step+1),0])
time.sleep(1)
np.append(data,measure_txy(1, start_time, camera, templ8))
return data
if __name__ == "__main__":
with picamera.PiCamera(resolution=(640,480), framerate = 100) as camera, \
OpenFlexureStage('/dev/ttyUSB0') as stage, \
closing(data_file.Datafile(filename="orthogonality.hdf5")) as df: #opens camera, stage and datafile
stage.backlash = 248 #corrects for backlash
side_length = 500 #defines distance moved for each side of the sqaure
camera.start_preview() #shows preview of camera
stage_centre = stage.position #saves the starting position of stage
stage.move_rel([side_length/2, side_length/2, 0]) #moves the stage to the 'top left' corner of the square
top_left = stage.position
try:
for step in [1, 10, 25]: #runs for the given number of steps per side of the square
start_time = time.time()
image = get_numpy_image(camera, greyscale=True) #gets template
templ8 = image[100:-100,100:-100] #crops image
data = sqre_move(step, side_length, start_time, camera, templ8)
sq_group = df.new_group("step_%d" % step)
df.add_data(data, sq_group, "Square with %d steps each side" %step) #writes data to .hdf5 file
stage.move_abs(top_left)
finally:
stage.move_abs(stage_centre) #reuturns stage to centre once program has run
|
# def scope_test(counter=0):
# print('helos')
# if(counter == 10):
# return
# else:
# print(counter)
# scope_test(counter+1)
# def indsider(x):
# print(x)
# # indsider()
# scope_test()
# def recursion(coin, amount, coinAccumulator):
# # if 100 / 25 - amount divides evenly
# if(amount / coin > 1):
# # start dividing it - set division checker
# divisionCheck = (amount - coinAccumulator) / coin
# # if accum no longer allows even division, stop
# if(divisionCheck < 1):
# print("less than one")
# return amount
# else:
# coinAccumulator += coin
# print(f"coin accum: {coinAccumulator}")
# return recursion(coin, amount, coinAccumulator)
# x = recursion(25,100, 25)
# print(x)
# def keyValue(keyCounter, key):
# if(keyCounter > len(key)-1):
# keyCounter = 0
# return keyCounter
# print(keyValue(0,"hello"))
list = ['This site uses cookies to deliver our', 'services and to show you relevant ads', 'and job listings. By using our site,', 'you acknowledge that you have read and understand our Cookie Policy, Privacy Policy, and our Terms of Service. Your use of Stack Overflow’s Products and Services, including the Stack Overflow Network, is subject to these policies and terms.']
# for i in list:
# print(f"I: {i}")
# f1 = open("./similarities/text1.txt", "r")
# while True :
# line = f1.readline()
# print(line)
# if (line == ""):
# print("file finished")
# break;
file = open("./text1.txt", "r")
i = file.read()
# for line in open("./similarities/text1.txt", "r"):
# print(line)
# else:
# print('fin')
# with open(i) as lines:
# try:
# while True: #Just to fake a lot of readlines and hit the end
# current = next(lines)
# except StopIteration:
# print('EOF!')
def getLinesList(file):
# find last \n
# storage array
fileList = []
# temp storage array
tempList = []
# if all lines have \n at the end
for i in file:
# if not a new line, then all one line
if not i == "\n":
# add all to new list on line
tempList.append(i)
else:
# when end of line
# make a string
tmp = ''.join(tempList)
# push string into file list
fileList.append(tmp)
# clear the array
tempList.clear()
finalLine = getFinalLine(file)
# if return is not None; since if nothing, still returns None
if(finalLine != None):
# append final line to the list of lines
fileList.append(finalLine)
#
# arr = []
# for i in fileList:
# if(i != ''):
# arr.append(i)
# print(arr)
# def removeBlankLines(inputList):
# results = []
# for i in inputList:
# if(i != ''):
# results.append(i)
# remove all random blank lines from bottom of text file
removedList = removeBlankLines(fileList)
# if there were blank lines, use the new list, else use the origial
if(removedList != fileList):
print('reassign')
fileList = removedList
return fileList
# find final \n
# print(getFinalLine(file))
# print(x)
# if last line does not end in a \n, get that line
def getFinalLine(str):
# get final \n
last_n = str.rindex("\n")
# get all of string at exists after that
lastLine = str[last_n:len(str)]
# space is equal to 1, single char equal to 2
# if len 1, space, means there is already a \n
print(f"len:{len(lastLine)}")
if(len(lastLine) <= 1):
return #returns None if this fires
else:
# if greater than one, no \n, needs to be handled
# strip off all spaces
return lastLine.strip()
# remove extra \n following the end of the text, they show up as "" in finalList
def removeBlankLines(inputList):
results = []
# take all non-blank lines and make a new list
for i in inputList:
if(i != ""):
# print(i)
results.append(i)
# else:
# print(i)
return results
print(getLinesList(i)) |
import numpy as np
import numpy.random as npr
import scipy as sc
from scipy import linalg
from mimo.abstraction import Distribution
class MatrixNormalWithPrecision(Distribution):
def __init__(self, M=None, V=None, K=None):
self.M = M
self._V = V
self._K = K
self._V_chol = None
self._K_chol = None
self._lmbda_chol = None
self._lmbda_chol_inv = None
@property
def params(self):
return self.M, self.V, self.K
@params.setter
def params(self, values):
self.M, self.V, self.K = values
@property
def nb_params(self):
num = self.dcol * self.drow
return num + num * (num + 1) / 2
@property
def dcol(self):
return self.M.shape[1]
@property
def drow(self):
return self.M.shape[0]
@property
def V(self):
return self._V
@V.setter
def V(self, value):
self._V = value
self._V_chol = None
self._lmbda_chol = None
self._lmbda_chol_inv = None
@property
def V_chol(self):
# upper cholesky triangle
if self._V_chol is None:
self._V_chol = sc.linalg.cholesky(self.V, lower=False)
return self._V_chol
@property
def K(self):
return self._K
@K.setter
def K(self, value):
self._K = value
self._K_chol = None
self._lmbda_chol = None
self._lmbda_chol_inv = None
@property
def K_chol(self):
# upper cholesky triangle
if self._K_chol is None:
self._K_chol = sc.linalg.cholesky(self.K, lower=False)
return self._K_chol
@property
def lmbda(self):
return np.kron(self.K, self.V)
@property
def lmbda_chol(self):
# upper cholesky triangle
if self._lmbda_chol is None:
self._lmbda_chol = sc.linalg.cholesky(self.lmbda, lower=False)
return self._lmbda_chol
@property
def lmbda_chol_inv(self):
if self._lmbda_chol_inv is None:
self._lmbda_chol_inv = sc.linalg.inv(self.lmbda_chol)
return self._lmbda_chol_inv
def rvs(self, size=1):
if size == 1:
aux = npr.normal(size=self.drow * self.dcol).dot(self.lmbda_chol_inv.T)
return self.M + np.reshape(aux, (self.drow, self.dcol), order='F')
else:
size = tuple([size, self.drow * self.dcol])
aux = npr.normal(size=size).dot(self.lmbda_chol_inv.T)
return self.M + np.reshape(aux, (size, self.drow, self.dcol), order='F')
def mean(self):
return self.M
def mode(self):
return self.M
def log_likelihood(self, x):
# apply vector operator with Fortran convention
xr = np.reshape(x, (-1, self.drow * self.dcol), order='F')
mu = np.reshape(self.M, (self.drow * self.dcol), order='F')
# Gaussian likelihood on vector dist.
bads = np.isnan(np.atleast_2d(xr)).any(axis=1)
xr = np.nan_to_num(xr, copy=False).reshape((-1, self.drow * self.dcol))
log_lik = np.einsum('k,kh,nh->n', mu, self.lmbda, xr)\
- 0.5 * np.einsum('nk,kh,nh->n', xr, self.lmbda, xr)
log_lik[bads] = 0
return - self.log_partition() + self.log_base() + log_lik
@property
def base(self):
return np.power(2. * np.pi, - self.drow * self.dcol / 2.)
def log_base(self):
return np.log(self.base)
def log_partition(self):
mu = np.reshape(self.M, (self.drow * self.dcol), order='F')
return 0.5 * np.einsum('k,kh,h->', mu, self.lmbda, mu)\
- np.sum(np.log(np.diag(self.lmbda_chol)))
def entropy(self):
raise NotImplementedError
class MatrixNormalWithDiagonalPrecision(Distribution):
def __init__(self, M=None, vs=None, K=None):
self.M = M
self._vs = vs
self._K = K
self._V_chol = None
self._K_chol = None
self._lmbda_chol = None
self._lmbda_chol_inv = None
@property
def params(self):
return self.M, self.vs, self.K
@params.setter
def params(self, values):
self.M, self.vs, self.K = values
@property
def nb_params(self):
return self.dcol * self.drow\
+ self.dcol * self.drow
@property
def dcol(self):
return self.M.shape[1]
@property
def drow(self):
return self.M.shape[0]
@property
def vs(self):
return self._vs
@vs.setter
def vs(self, value):
self._vs = value
self._V_chol = None
self._lmbda_chol = None
self._lmbda_chol_inv = None
@property
def V(self):
assert self._vs is not None
return np.diag(self._vss)
@property
def V_chol(self):
if self._V_chol is None:
self._V_chol = np.diag(np.sqrt(self._vs))
return self._V_chol
@property
def K(self):
return self._K
@K.setter
def K(self, value):
self._K = value
self._K_chol = None
self._lmbda_chol = None
self._lmbda_chol_inv = None
@property
def K_chol(self):
# upper cholesky triangle
if self._K_chol is None:
self._K_chol = sc.linalg.cholesky(self.K, lower=False)
@property
def lmbda(self):
return np.kron(self.K, self.V)
@property
def lmbda_chol(self):
# upper cholesky triangle
if self._lmbda_chol is None:
self._lmbda_chol = sc.linalg.cholesky(self.lmbda, lower=False)
return self._lmbda_chol
@property
def lmbda_chol_inv(self):
if self._lmbda_chol_inv is None:
self._lmbda_chol_inv = sc.linalg.inv(self.lmbda_chol)
return self._lmbda_chol_inv
def rvs(self, size=1):
if size == 1:
aux = npr.normal(size=self.drow * self.dcol).dot(self.lmbda_chol_inv.T)
return self.M + np.reshape(aux, (self.drow, self.dcol), order='F')
else:
size = tuple([size, self.drow * self.dcol])
aux = npr.normal(size=size).dot(self.lmbda_chol_inv.T)
return self.M + np.reshape(aux, (size, self.drow, self.dcol), order='F')
def mean(self):
return self.M
def mode(self):
return self.M
def log_likelihood(self, x):
# apply vector operator with Fortran convention
xr = np.reshape(x, (-1, self.drow * self.dcol), order='F')
mu = np.reshape(self.M, (self.drow * self.dcol), order='F')
# Gaussian likelihood on vector dist.
bads = np.isnan(np.atleast_2d(xr)).any(axis=1)
xr = np.nan_to_num(xr, copy=False).reshape((-1, self.drow * self.dcol))
log_lik = np.einsum('k,kh,nh->n', mu, self.omega, xr)\
- 0.5 * np.einsum('nk,kh,nh->n', xr, self.omega, xr)
log_lik[bads] = 0
return - self.log_partition() + self.log_base() + log_lik
@property
def base(self):
return np.power(2. * np.pi, - self.drow * self.dcol / 2.)
def log_base(self):
return np.log(self.base)
def log_partition(self):
mu = np.reshape(self.M, (self.drow * self.dcol), order='F')
return 0.5 * np.einsum('k,kh,h->', mu, self.omega, mu)\
- np.sum(np.log(np.diag(self.omega_chol)))
def entropy(self):
raise NotImplementedError
|
from rest_framework.permissions import BasePermission
from ..models import Campaign
class IsOwner(BasePermission):
"""Custom permission class to allow only campaign owners to edit them."""
def has_object_permission(self, request, view, obj):
"""Return True if permission is granted to the campaign owner."""
if isinstance(obj, Campaign):
return obj.user == request.user
return obj.user == request.user
class IsPostOrIsAuthenticated(BasePermission):
def has_permission(self, request, view):
# allow all POST requests
if request.method == 'POST':
return True
# Otherwise, only allow authenticated requests
return request.user and request.user.is_authenticated() |
from django.conf.urls import include, url
from django.contrib import admin
from tweets.views import Index, Profile, PostTweet, HashTagCloud, Search
admin.autodiscover()
urlpatterns = [
url(r'^$', Index.as_view()),
url(r'^user/(\w+)/$', Profile.as_view()),
url(r'^admin/', admin.site.urls),
url(r'^user/(\w+)/post/$', PostTweet.as_view())
]
urlpatterns = [
url(r'^$', Index.as_view()),
url(r'^user/(\w+)/$', Profile.as_view()),
url(r'^admin/', admin.site.urls),
url(r'^user/(\w+)/post/$', PostTweet.as_view()),
url(r'^hashTag/(\w+)/$', HashTagCloud.as_view()),
url(r'^search/$', Search.as_view()),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 02:37:34 2019
@author: dbhmath
"""
import time
import click
import sys
import pandas as pd
import json
from selenium import webdriver
def unanota(calificaciones, driver, i, CX, delay=2.5):
cod = -1
ncorte = {'C1': 1, 'C2': 2, 'C3': 3}
try:
student_text = 'STDNT_GRADE_HDR_EMPLID$'+str(i)
student_name = 'win0divHCR_PERSON_NM_I_NAME$'+str(i)
elem = driver.find_element_by_id(student_text)
codigo = int(elem.text)
nombre = driver.find_element_by_id(student_name).text
if codigo in calificaciones[CX]:
nota = calificaciones[CX][codigo]
if nota < 0.0 or nota > 5.0:
nota = ''
else:
nota = ''
print(f"Estudiante {nombre} codigo {codigo} no tiene nota.")
student_grade = 'DERIVED_LAM_GRADE_'+str(ncorte[CX])+'$'+str(i)
grade = driver.find_element_by_id(student_grade)
grade.click()
grade.clear()
grade.send_keys(str(nota))
next_i = i + 1 if i == 0 else i-1
clicker = driver.find_element_by_id(
'DERIVED_LAM_GRADE_'+str((ncorte[CX]))+'$'+str(next_i))
clicker.click()
time.sleep(delay)
print(i, codigo, CX, nota, nombre)
cod = codigo
except:
print(f"{i} .......")
return cod
def main():
print("\nCargando archivo .csv ...")
params = json.load(open("params.json", 'r'))
cortes = {'C1', 'C2', 'C3'}
ok = False
df = pd.read_csv(params['archivo']+'.csv',
delimiter=params['delimeter'], decimal=params['decimal'])
if len(df.columns) == 2:
idtype = df.iloc[:, 0].dtype
cxtype = df.iloc[:, 1].dtype
if idtype == 'int64' and cxtype == 'float64':
if df.columns.values[0] == 'ID':
corte = df.columns.values[1]
if corte in cortes:
ok = True
print("\nArchivo csv correcto\n")
else:
print("\nError csv: Corte no corresponde. Debe ser C1, C2 o C3\n")
else:
print("\nError csv: Columna de códigos debe llamarse ID\n")
else:
print("\nError csv: Tipos de datos no corresponden\n")
else:
print("\nError csv: Archivo debe tener 2 columnas\n")
print(df.head())
if ok == False:
print("\n")
print(df.info(memory_usage=False))
input("Presione cualquier tecla para finalizar...")
sys.exit()
df.set_index('ID', inplace=True)
# End verificar csv
calificaciones = df.to_dict()
options = webdriver.ChromeOptions()
lang = '--lang='+params['lang']
options.add_argument(lang)
options.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = webdriver.Chrome("chromedriver.exe", options=options)
driver.get("https://arca.ecci.edu.co/")
print("Escriba su usuario y contraseña en el navegador")
gradebook = click.confirm('Continuar?', default=True)
if gradebook:
print("...")
else:
print("Suerte es que le digo... agonía")
driver.close()
sys.exit()
try:
elem = driver.find_element_by_name('Submit')
elem.click()
except:
print("...")
menu = ['//*[@id="pthnavbca_PORTAL_ROOT_OBJECT"]', '//*[@id="CO_EMPLOYEE_SELF_SERVICE"]',
'//*[@id="HC_SS_FACULTY_CTR_GBL"]', '//*[@id="crefli_HC_LAM_CLASS_GRADE_GBL"]']
print("Buscando Cuaderno Evaluación Clase ...")
time.sleep(3.5)
for e in menu:
try:
elem = driver.find_element_by_xpath(e)
elem.click()
time.sleep(1)
print(".")
except:
print("Error menu")
gradebook = click.confirm(
'Debe estar en la página en la que digitan las notas. \n(Cuaderno Evaluación Clase) \nContinuar?', default=True)
codigos = []
delay = params['timedelay']
while gradebook:
try:
frame = driver.find_element_by_xpath('//*[@id="ptifrmtgtframe"]')
driver.switch_to.frame(frame)
except:
print("Error frame")
try:
class_name = driver.find_element_by_xpath(
'//*[@id="DERIVED_SSR_FC_SSR_CLASSNAME_LONG"]')
print(class_name.text)
except:
print("Error Classname")
j = 0
intentos = params['intentos']
while intentos > 0:
cod = unanota(calificaciones, driver, j, corte, delay)
j = j + 1
if cod == -1:
intentos = intentos - 1
else:
codigos.append(cod)
print("\nCódigos con nota digitada:\n")
print(codigos)
print(f"{len(codigos)} notas digitadas\n")
print(
"\nEn el navegador ahora debe guardar e ir a otro Cuaderno Evaluación Clase\n")
time.sleep(1)
print(".")
time.sleep(1)
print("..")
time.sleep(1)
print("...")
time.sleep(1)
print("....")
time.sleep(1)
print(".....")
gradebook = click.confirm(
'Debe estar en otro Cuaderno Evaluación Clase) \nContinuar?', default=True)
print("Suerte es que le digo... agonía")
time.sleep(3)
input("Presione cualquier tecla para finalizar...")
driver.close()
if __name__ == '__main__':
main()
|
import scipy as sp
import numpy as np
from skimage.segmentation import clear_border
from skimage.feature import peak_local_max
from skimage.measure import regionprops
import scipy.ndimage as spim
import scipy.spatial as sptl
from porespy.tools import get_border, extract_subsection, extend_slice
from porespy.filters import apply_chords
from collections import namedtuple
from tqdm import tqdm
from scipy import fftpack as sp_ft
def representative_elementary_volume(im, npoints=1000):
r"""
Calculates the porosity of the image as a function subdomain size. This
function extracts a specified number of subdomains of random size, then
finds their porosity.
Parameters
----------
im : ND-array
The image of the porous material
npoints : int
The number of randomly located and sized boxes to sample. The default
is 1000.
Returns
-------
A tuple containing the ND-arrays: The subdomain *volume* and its
*porosity*. Each of these arrays is ``npoints`` long. They can be
conveniently plotted by passing the tuple to matplotlib's ``plot`` function
using the \* notation: ``plt.plot(*the_tuple, 'b.')``. The resulting plot
is similar to the sketch given by Bachmat and Bear [1]
Notes
-----
This function is frustratingly slow. Profiling indicates that all the time
is spent on scipy's ``sum`` function which is needed to sum the number of
void voxels (1's) in each subdomain.
Also, this function is primed for parallelization since the ``npoints`` are
calculated independenlty.
References
----------
[1] Bachmat and Bear. On the Concept and Size of a Representative
Elementary Volume (Rev), Advances in Transport Phenomena in Porous Media
(1987)
"""
im_temp = sp.zeros_like(im)
crds = sp.array(sp.rand(npoints, im.ndim)*im.shape, dtype=int)
pads = sp.array(sp.rand(npoints)*sp.amin(im.shape)/2+10, dtype=int)
im_temp[tuple(crds.T)] = True
labels, N = spim.label(input=im_temp)
slices = spim.find_objects(input=labels)
porosity = sp.zeros(shape=(N,), dtype=float)
volume = sp.zeros(shape=(N,), dtype=int)
for i in tqdm(sp.arange(0, N)):
s = slices[i]
p = pads[i]
new_s = extend_slice(s, shape=im.shape, pad=p)
temp = im[new_s]
Vp = sp.sum(temp)
Vt = sp.size(temp)
porosity[i] = Vp/Vt
volume[i] = Vt
profile = namedtuple('profile', ('volume', 'porosity'))
profile.volume = volume
profile.porosity = porosity
return profile
def porosity_profile(im, axis):
r"""
Returns a porosity profile along the specified axis
Parameters
----------
im : ND-array
The volumetric image for which to calculate the porosity profile
axis : int
The axis (0, 1, or 2) along which to calculate the profile. For
instance, if `axis` is 0, then the porosity in each YZ plane is
calculated and returned as 1D array with 1 value for each X position.
"""
if axis > im.ndim:
raise Exception('axis out of range')
im = np.atleast_3d(im)
a = set(range(im.ndim)).difference(set([axis]))
a1, a2 = a
prof = np.sum(np.sum(im, axis=a2), axis=a1)/(im.shape[a2]*im.shape[a1])
return prof*100
def radial_density(im, bins=10, voxel_size=1):
r"""
Computes radial density function by analyzing the histogram of voxel
values in the distance transform. This function is defined by
Torquato [1] as:
.. math::
\int_0^\infty P(r)dr = 1.0
where *P(r)dr* is the probability of finding a voxel at a lying at a radial
distance between *r* and *dr* from the solid interface.
The cumulative distribution is defined as:
.. math::
F(r) = \int_r^\infty P(r)dr
which gives the fraction of pore-space with a radius larger than *r*.
Parameters
----------
im : ND-array
Either a binary image of the pore space with ``True`` indicating the
pore phase (or phase of interest), or a pre-calculated distance
transform which can save time.
bins : int or array_like
This number of bins (if int) or the location of the bins (if array).
This argument is passed directly to Scipy's ``histogram`` function so
see that docstring for more information. The default is 10 bins, which
reduces produces a relatively smooth distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
A named-tuple containing several 1D arrays: ``R `` is the radius of the
voxels (or x-axis of a pore-size density plot). ``P`` is the radial
density function, and ``F`` is the complementary cumulative distribution
function.
Notes
-----
This function should not be taken as a pore size distribution in the
explict sense, but rather an indicator of the sizes in the image. The
distance transform contains a very skewed number of voxels with small
values near the solid walls. Nonetheless, it does provide a useful
indicator and it's mathematical formalism is handy.
Torquato refers to this as the pore-size density function, and mentions
that it is also known as the pore-size distribution function. These
terms are avoided here since they have very specific connotations, and
this function does not satisfy them.
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 48 & 292
"""
if im.dtype == bool:
im = spim.distance_transform_edt(im)
h = _histogram(im[im > 0].flatten(), bins=bins)
rdf = namedtuple('radial_density_function',
('R', 'P', 'F', 'bin_centers', 'bin_edges',
'bin_widths'))
return rdf(h.bin_centers, h.pdf, h.cdf, h.bin_centers, h.bin_edges,
h.bin_widths)
def porosity(im):
r"""
Calculates the porosity of an image assuming 1's are void space and 0's are
solid phase. All other values are ignored.
Parameters
----------
im : ND-array
Image of the void space with 1's indicating void space (or True) and
0's indicating the solid phase (or False).
Returns
-------
porosity : float
Calculated as the sum of all 1's divided by the sum of all 1's and 0's.
Notes
-----
This function assumes void is represented by 1 and solid by 0, and all
other values are ignored. This is useful, for example, for images of
cylindrical cores, where all voxels outside the core are labelled with 2.
Alternatively, images can be processed with ``find_disconnected_voxels``
to get an image of only blind pores. This can then be added to the orignal
image such that blind pores have a value of 2, thus allowing the
calculation of accessible porosity, rather than overall porosity.
"""
im = sp.array(im, dtype=int)
Vp = sp.sum(im == 1)
Vs = sp.sum(im == 0)
e = Vp/(Vs + Vp)
return e
def two_point_correlation_bf(im, spacing=10):
r"""
Calculates the two-point correlation function using brute-force (see Notes)
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
spacing : int
The space between points on the regular grid that is used to generate
the correlation (see Notes)
Returns
-------
A tuple containing the x and y data for plotting the two-point correlation
function, using the *args feature of matplotlib's plot function. The x
array is the distances between points and the y array is corresponding
probabilities that points of a given distance both lie in the void space.
The distance values are binned as follows:
bins = range(start=0, stop=sp.amin(im.shape)/2, stride=spacing)
Notes
-----
The brute-force approach means overlaying a grid of equally spaced points
onto the image, calculating the distance between each and every pair of
points, then counting the instances where both pairs lie in the void space.
This approach uses a distance matrix so can consume memory very quickly for
large 3D images and/or close spacing.
"""
if im.ndim == 2:
pts = sp.meshgrid(range(0, im.shape[0], spacing),
range(0, im.shape[1], spacing))
crds = sp.vstack([pts[0].flatten(),
pts[1].flatten()]).T
elif im.ndim == 3:
pts = sp.meshgrid(range(0, im.shape[0], spacing),
range(0, im.shape[1], spacing),
range(0, im.shape[2], spacing))
crds = sp.vstack([pts[0].flatten(),
pts[1].flatten(),
pts[2].flatten()]).T
dmat = sptl.distance.cdist(XA=crds, XB=crds)
hits = im[pts].flatten()
dmat = dmat[hits, :]
h1 = sp.histogram(dmat, bins=range(0, int(sp.amin(im.shape)/2), spacing))
dmat = dmat[:, hits]
h2 = sp.histogram(dmat, bins=h1[1])
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(h2[1][:-1], h2[0]/h1[0])
def _radial_profile(autocorr, r_max, nbins=100):
r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
"""
if len(autocorr.shape) == 2:
adj = sp.reshape(autocorr.shape, [2, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2)
elif len(autocorr.shape) == 3:
adj = sp.reshape(autocorr.shape, [3, 1, 1, 1])
inds = sp.indices(autocorr.shape) - adj/2
dt = sp.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2)
else:
raise Exception('Image dimensions must be 2 or 3')
bin_size = np.int(np.ceil(r_max/nbins))
bins = np.arange(bin_size, r_max, step=bin_size)
radial_sum = np.zeros_like(bins)
for i, r in enumerate(bins):
# Generate Radial Mask from dt using bins
mask = (dt <= r) * (dt > (r-bin_size))
radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask)
# Return normalized bin and radially summed autoc
norm_autoc_radial = radial_sum/np.max(autocorr)
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(bins, norm_autoc_radial)
def two_point_correlation_fft(im):
r"""
Calculates the two-point correlation function using fourier transforms
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
Returns
-------
A tuple containing the x and y data for plotting the two-point correlation
function, using the *args feature of matplotlib's plot function. The x
array is the distances between points and the y array is corresponding
probabilities that points of a given distance both lie in the void space.
Notes
-----
The fourier transform approach utilizes the fact that the autocorrelation
function is the inverse FT of the power spectrum density.
For background read the Scipy fftpack docs and for a good explanation see:
http://www.ucl.ac.uk/~ucapikr/projects/KamilaSuankulova_BSc_Project.pdf
"""
# Calculate half lengths of the image
hls = (np.ceil(np.shape(im))/2).astype(int)
# Fourier Transform and shift image
F = sp_ft.ifftshift(sp_ft.fftn(sp_ft.fftshift(im)))
# Compute Power Spectrum
P = sp.absolute(F**2)
# Auto-correlation is inverse of Power Spectrum
autoc = sp.absolute(sp_ft.ifftshift(sp_ft.ifftn(sp_ft.fftshift(P))))
tpcf = _radial_profile(autoc, r_max=np.min(hls))
return tpcf
def pore_size_distribution(im, bins=10, log=True):
r"""
Calculate a pore-size distribution based on the image produced by the
``porosimetry`` or ``local_thickness`` functions.
Parameters
----------
im : ND-array
The array of containing the sizes of the largest sphere that overlaps
each voxel. Obtained from either ``porosimetry`` or
``local_thickness``.
bins : scalar or array_like
Either an array of bin sizes to use, or the number of bins that should
be automatically generated that span the data range.
log : boolean
If ``True`` (default) the size data is converted to log (base-10)
values before processing. This can help
Returns
-------
A named-tuple containing several values:
*R* or *logR* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*satn* - phase saturation in differential form. For the cumulative
saturation, just use *cfd* which is already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
(1) To ensure the returned values represent actual sizes be sure to scale
the distance transform by the voxel size first (``dt *= voxel_size``)
plt.bar(psd.R, psd.satn, width=psd.bin_widths, edgecolor='k')
"""
im = im.flatten()
vals = im[im > 0]
R_label = 'R'
if log:
vals = sp.log10(vals)
R_label = 'logR'
h = _histogram(x=vals, bins=bins)
psd = namedtuple('pore_size_distribution',
(R_label, 'pdf', 'cdf', 'satn',
'bin_centers', 'bin_edges', 'bin_widths'))
return psd(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths)
def _histogram(x, bins):
h = sp.histogram(x, bins=bins, density=True)
delta_x = h[1]
P = h[0]
temp = P*(delta_x[1:] - delta_x[:-1])
C = sp.cumsum(temp[-1::-1])[-1::-1]
S = P*(delta_x[1:] - delta_x[:-1])
bin_edges = delta_x
bin_widths = delta_x[1:] - delta_x[:-1]
bin_centers = (delta_x[1:] + delta_x[:-1])/2
psd = namedtuple('histogram', ('pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return psd(P, C, S, bin_centers, bin_edges, bin_widths)
def chord_counts(im):
r"""
Finds the length of each chord in the supplied image and returns a list
of their individual sizes
Parameters
----------
im : ND-array
An image containing chords drawn in the void space.
Returns
-------
A 1D array with one element for each chord, containing its length.
Notes
----
The returned array can be passed to ``plt.hist`` to plot the histogram,
or to ``sp.histogram`` to get the histogram data directly. Another useful
function is ``sp.bincount`` which gives the number of chords of each
length in a format suitable for ``plt.plot``.
"""
labels, N = spim.label(im > 0)
props = regionprops(labels)
chord_lens = sp.array([i.filled_area for i in props])
return chord_lens
def chord_length_distribution(im, bins=25, log=False, voxel_size=1):
r"""
Determines the distribution of chord lengths in an image containing chords.
Parameters
----------
im : ND-image
An image with chords drawn in the pore space, as produced by
``apply_chords`` or ``apply_chords_3d``.
bins : scalar or array_like
If a scalar is given it is interpreted as the number of bins to use,
and if an array is given they are used as the bins directly.
log : Boolean
If true, the logarithm of the chord lengths will be used, which can
make the data more clear.
Returns
-------
A tuple containing the ``chord_length_bins``, and four separate pieces of
information: ``cumulative_chord_count`` and ``cumulative_chord_length``,
as well as the ``differenial_chord_count`` and
``differential_chord_length``.
"""
x = chord_counts(im)
L_label = 'L'
if log:
x = sp.log10(x)
L_label = 'logL'
h = _histogram(x, bins=bins)
cld = namedtuple('chord_length_distribution',
(L_label, 'pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return cld(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths)
|
""" ``django-structlog`` is a structured logging integration for ``Django`` project using ``structlog``.
"""
default_app_config = "django_structlog.apps.DjangoStructLogConfig"
name = "django_structlog"
VERSION = (1, 5, 0)
__version__ = ".".join(str(v) for v in VERSION)
|
from __future__ import division
import const
import numpy
import math
prior_prob = []
prob = []
count = []
count2 = []
'''
This method is used to shuffle the training label and training image
'''
def randomize(randomList, data):
ans = []
for i in range(len(randomList)):
ans.append(data[randomList[i]])
return ans
'''
This method is used to calculate the prior probability of digits
'''
def get_prior(data, trainingSize):
global prior_prob
global count
for i in range(10):
count.append(0)
prior_prob.append(numpy.float64(0))
for i in range(trainingSize):
count[data[i]] += 1
for i in range(10):
prior_prob[i] = numpy.float64(count[i] / trainingSize)
'''
This method is used to extract features from images
'''
def get_feature(data):
ans = []
for i in range(const.DigitHeight):
for j in range(const.DigitWidth):
if data[i][j] != 0:
ans.append(1)
continue
ans.append(0)
return numpy.asarray(ans)
'''
This method is used to calculate the probability
'''
def get_prob(data, label, trainingSize):
global prob
global count2
for i in range(10):
count2.append(numpy.zeros(const.DigitHeight * const.DigitWidth))
prob.append(numpy.zeros(const.DigitHeight * const.DigitWidth))
for i in range(trainingSize):
feature = get_feature(data[i])
for j in range(len(feature)):
if feature[j] == 1:
count2[label[i]][j] += 1
for i in range(const.DigitHeight * const.DigitWidth):
for j in range(10):
prob[j][i] = numpy.float64((count2[j][i] + 0.01) / (count[j] + 0.01))
'''
The main method is called to train from training data
'''
def training(trainingSize):
global prior_prob
randomnum = numpy.random.choice(trainingSize, trainingSize, replace=False)
combined_data = randomize(randomnum, const.TrainingImages)
combined_label = randomize(randomnum, const.TrainingLabels)
get_prior(combined_label, trainingSize)
get_prob(combined_data, combined_label, trainingSize)
'''
This method is used to predict the the certain digit
'''
def get_digit_class(features):
global prob
global prior_prob
miss = 0.0000000000001
local_prob = []
tempCount = []
for i in range(10):
tempCount.append(0)
local_prob.append(0)
for i in range(len(features)):
if features[i] == 1:
for j in range(10):
tempCount[j] += math.log(prob[j][i])
else:
for j in range(10):
if(prob[j][i] == 1):
prob[j][i] -= miss
for j in range(10):
tempCount[j] += math.log(1-prob[j][i])
for i in range(10):
local_prob[i] = math.log(prior_prob[i])+tempCount[i]
digit_class = {
'0':local_prob[0],
'1':local_prob[1],
'2':local_prob[2],
'3':local_prob[3],
'4':local_prob[4],
'5':local_prob[5],
'6':local_prob[6],
'7':local_prob[7],
'8':local_prob[8],
'9':local_prob[9],
}
return max(digit_class, key = digit_class.get)
'''
This method calculates and returns the predicted results
'''
def get_predicted_digit():
digit = []
for i in range(len(const.TestImage)):
digit.append(get_digit_class(get_feature(const.TestImage[i])))
return digit
|
#!/usr/bin/env python
import numpy as np
import os, sys, logging, math
import matplotlib.pylab as plt
from PIL import Image, ImageFilter, ImageOps
class Histogaram:
image = None
# constructor
def __init__(self, input):
# open and process the image
try:
self.image = Image.open(input)
self.processImage()
# catch io exceptions
except IOError as exception:
logging.error(exception);
# catch any other exceptions
except Exception as exception:
logging.error(exception);
def processImage(self):
histogram1 = self.image.convert('L').histogram()
histogram2 = ImageOps.equalize(self.image.convert('L')).histogram2()
distance = self.compareHistograms(histogram1, histogram2)
# greyscale_image = self.image.convert('L')
rgb_channels = self.image.split()
# plt_image = self.plotImageHistogram(self.image)
# plt_greyscale = self.plotGreyscaleHistogram(greyscale_image)
plt_color = self.plotColorHistogram(rgb_channels)
plt_color.show()
# plt_greyscale.show()
def compareHistograms(self, histogram1, histogram2):
sum_distance = 0
for i in range(256):
sum_distance += (histogram2[i] - histogram1[i]) * (histogram2[i] - histogram1[i])
distance = math.sqrt(sum_distance)
return distance
def plotImageHistogram(self, image):
histogram = image.histogram()
plt.figure()
plt.title('Concatenated Histogaram')
plt.xlabel('Bins')
plt.ylabel('# of Pixels')
plt.xlim([0, len(histogram) - 1])
plt.plot(histogram)
return plt
def plotColorHistogram(self, channels):
plt.figure()
plt.title('Color Histogaram')
plt.xlabel('Bins')
plt.ylabel('# of Pixels')
plt.xlim([0, 255])
for i, channel in enumerate(channels):
grayscale_histogram = channel.histogram()
plt.plot(grayscale_histogram)
return plt
def plotGreyscaleHistogram(self, image):
plt.figure()
plt.title('Greyscale Histogaram')
plt.xlabel('Bins')
plt.ylabel('# of Pixels')
plt.xlim([0, 255])
grayscale_histogram = image.histogram()
plt.plot(grayscale_histogram)
return plt
|
#a Imports
from gjslib.math import vectors, matrix
#a c_set_of_lines
class c_set_of_lines(object):
def __init__(self):
self.lines = []
pass
def add_line(self, pt, drn):
drn = list(drn)
drn = vectors.vector_normalize(drn)
self.lines.append( (pt,drn) )
pass
def generate_meeting_points(self, too_close=0.0001):
self.line_meetings = []
self.weighted_points = []
self.posn = None
for i in range(len(self.lines)):
(p0,d0) = self.lines[i]
for j in range(len(self.lines)):
if (i>j):
(p1,d1) = self.lines[j]
meet = vectors.closest_meeting_of_two_lines(p0,d0,p1,d1,too_close)
self.line_meetings.append(meet)
pass
pass
pass
if len(self.line_meetings)==0:
return
posn = None
total_weight = 0
for (c0,c1,dist,goodness) in self.line_meetings:
weight = 1/(5.0+goodness)
if posn is None:
posn = (0.0,0.0,0.0)
pass
posn = vectors.vector_add(posn, c0, scale=0.5*weight)
posn = vectors.vector_add(posn, c1, scale=0.5*weight)
total_weight += weight
#print c0,c1,weight,total_weight,posn
self.weighted_points.append((weight,c0,c1))
pass
#print posn, total_weight
if posn is not None:
self.posn = vectors.vector_add((0.0,0.0,0.0), posn, scale=1/total_weight)
pass
pass
#a Top level
if __name__=="__main__":
c = c_set_of_lines()
c.add_line( (0.0,0.0,0.0), (0.0,0.0,1.0) )
c.add_line( (0.0,0.1,1.1), (1.0,0.0,1.0) )
c.generate_meeting_points()
print c.line_meetings
|
import socket,struct,pickle,logging,time
from Peer import Peer
KEEP_ALIVE = struct.pack('!B',0)
POSITION = struct.pack('!B',1)
GET_PEERS = struct.pack('!B',2)
GET_STREAM = struct.pack('!B',3)
STOP = struct.pack('!B',4)
CLOSE = struct.pack('!B',5)
ERROR = struct.pack('!B',6)
HAVE = struct.pack('!B',7)
CONNECTION_TIMEOUT = 180
def encode(ip=None,port=None):
result = []
if ip:
ip = ''.join([struct.pack('!B',int(x)) for x in self.ip.split('.')])
result.append(ip)
elif port:
port = struct.pack('!H',port)
result.append(port)
elif len(result) == 1:
return result[0]
else:
return result
def decode(ip=None,port=None):
result = []
if ip:
ip = '.'.join([str(x) for x in struct.unpack('!BBBB',ip)])
result.append(ip)
elif port:
port = struct.unpack('!H',port)[0]
result.append(ip)
elif len(result) == 1:
return result[0]
else:
return result
class TempPeer(Peer):
def __init__(self,ip,sock,Server):
self.logger = logging.getLogger('tamchy.Server.TempPeer')
self.socket = sock
self.closed = False
self.content_id = None
# handshaked will be always False because we need to TempPeer's handle_read
# to stop after process_handshake()
self.handshaked = False
self.Server = Server
self.read_buffer = ''
self.ip = ip
self.port = 'TEMP'
self.time = time.time()
def process_handshake(self,msg):
if (msg[:12]).lower() == 'salamatsyzby':
content_id = msg[12:44]
#ip = ''.join([struct.pack('!B',int(x)) for x in self.ip.split('.')])
#port = msg[44:46]
ip = self.ip
port = decode(port=msg[44:46])
if content_id not in self.Server.streams:
# try to send message, but we must close the connection anyway -> whether error or not
try:
self.socket.send(struct.pack('!I',19) + ERROR + pickle.dumps('Invalid Content ID'))
except:
pass
self.logger.debug('Peer (%s) disconnected' % (self))
return self.handle_close()
else:
# Everything is good with this peer => we must add this peer to the Container's peers list
self.logger.debug('Peer (%s) successfully connected' % (self))
self.Server.accept(self.socket,ip,port,content_id,self.read_buffer,self)
else:
self.logger.debug('Peer (%s) disconnected' % (self))
return self.handle_close()
def handle_close(self):
self.socket.close()
self.closed = True
#class TempPeer:
# def __init__(self,ip,sock,Server):
# self.logger = logging.getLogger('tamchy.Server.TempPeer')
# self.socket = sock
# self.closed = False
# self.content_id = None
# self.Server = Server
# self.read_buffer = ''
# self.ip = ip
# self.time = time.time()
#
# def fileno(self):
# try:
# s = self.socket.fileno()
# return s
# except:
# return self.handle_close()
#
# def handle_read(self):
# message = ''
# while True:
# try:
# m = self.socket.recv(8192)
# if not m:
# return self.handle_close()
# message += m
# except:
# break
#
# if not message:
# return self.handle_close()
#
# self.time = time.time()
#
# self.read_buffer += message
# length = self.read_buffer[:4]
#
# if len(length) < 4:
# # this is not entire message => wait for remaining part
# return
# length = struct.unpack('!I',length[:4])[0]
# if length > 32*1024:
# return self.handle_close()
# msg = self.read_buffer[4:4 + length]
# if len(msg) < length:
# # this is not entire message => wait for remaining part
# return
#
# self.read_buffer = self.read_buffer[4 + length:]
# #
# # Start of main logic to handle messages from peer
# #
# if (msg[:12]).lower() == 'salamatsyzby':
# content_id = msg[12:44]
# ip = ''.join([struct.pack('!B',int(x)) for x in self.ip.split('.')])
# port = msg[44:46]
# if content_id not in self.Server.streams:
# # try to send message, but we must close the connection anyway -> whether error or not
# try:
# self.socket.send(struct.pack('!I',19) + ERROR + pickle.dumps('Invalid Content ID'))
# except:
# pass
# self.logger.debug('Peer (%s) disconnected' % (self.ip))
# return self.handle_close()
# else:
# # Everything is good with this peer => we must add this peer to the Container's peers list
# self.logger.debug('Peer (%s) successfully connected' % (self.ip))
# self.Server.accept(self.socket,ip,port,content_id,self.read_buffer,self)
# else:
# self.logger.debug('Peer (%s) disconnected' % (self.ip))
# return self.handle_close()
#
# @property
# def timeout(self):
# if time.time() - self.time >= CONNECTION_TIMEOUT:
# return True
# return False
#
# def handle_write(self):
# pass
#
# def handle_close(self):
# self.socket.close()
# self.closed = True
class Server:
def __init__(self):
pass
def fileno(self):
return self.socket.fileno()
def handle_read(self):
cl,addr = self.socket.accept()
self.logger.debug('Got connection from new peer (%s)' % (addr[0]))
if self.C.can_add_peer():
self.C.prepare_peer(sock=cl)
else:
cl.send(self.build_message('\x07',pickle.dumps('Reached Peers Limit')))
cl.close()
self.logger.debug('Rejected connection of new peer (%s)' % (addr[0]))
def build_message(self,id,data=''):
length = struct.pack('!I',len(id+data))
return length+id+data
def close(self):
#self.Reactor.close()
self.socket.close()
class MultiServer(Server):
def __init__(self,port,PStorage,debug=False):
self.logger = logging.getLogger('tamchy.Server')
# !!! self.socket = sock
#self.Reactor = Reactor(self)
self.PStorage = PStorage
self.work = True
self.ip = 'SERVER'
self.port = port
self.closed = False
self.timeout = False
self.content_id = 'SERVER'
if not debug:
self.socket = self.create_socket(port)
#self.Reactor.start()
# 'content_id':StreamContainer instance
self.streams = {}
self.logger.info('Server on port %s started' % (port))
def create_socket(self,port):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('',port))
sock.setblocking(0)
sock.listen(5)
return sock
def handle_read(self):
cl,addr = self.socket.accept()
self.logger.debug('Got connection from new peer (%s)' % (addr[0]))
peer = TempPeer(addr[0],cl,self)
self.PStorage.add(peer)
## checking for dead TempPeers
#for peer in self.PStorage.get_peers():
# if isinstance(peer,TempPeer):
# # 300 seconds = 5 minutes
# if time.time() - peer.time > 300.0:
# self.PStorage.remove(peer)
def accept(self,sock,ip,port,content_id,buf,peer):
C = self.streams[content_id]
if self.PStorage.can_add_peer(content_id):
self.PStorage.remove(peer)
C.prepare_peer(ip,port,sock=sock,buf=buf)
else:
peer.handle_close()
#def add(self,peer):
# self.Reactor.add(peer)
#
#def remove(self,peer):
# self.Reactor.remove(peer)
def register_stream(self,container):
self.streams[container.content_id] = container
self.logger.debug('Stream Container ({0}) registered'.format(container.content_id,))
def unregister_stream(self,container):
try:
del self.streams[container.content_id]
self.logger.debug('Stream Container ({0}) unregistered'.format(container.content_id,))
except:
pass
def close(self):
self.closed = True
self.socket.close()
# Testing
class sock:
def __init__(self):
self.closed = False
self.buffer = []
self.s_buf = ''
self.r_buf = []
def close(self):
self.closed = True
def fileno(self):
return 0
def accept(self):
return (sock(),('127.0.0.1',7654))
def getpeername(self):
return ('0.0.0.0',123)
def send(self,data):
self.s_buf += data
return len(data)
def recv(self,num):
msg = self.r_buf.pop(0)
if msg == 'except':
raise Exception
return msg
class C:
def __init__(self,c_id):
self.can = True
self.prepared = []
self.content_id = c_id
def can_add_peer(self):
return self.can
def prepare_peer(self,ip,port,sock=None,buf=''):
self.prepared.append((ip,port))
class PeeR:
def __init__(self):
self.closed = False
def handle_close(self):
self.closed = True
class PStorage:
def __init__(self):
self.peers = []
self.can = True
def add(self,peer):
self.peers.append(peer)
def can_add_peer(self,content_id):
return self.can
def remove(self,peer):
try:
self.peers.remove(peer)
except:
pass
class SeRver:
def __init__(self):
self.streams = {}
self.accepted = []
def remove(self,peer):
pass
def accept(self,sock,ip,port,content_id,buf,peer):
self.accepted.append(sock)
def test_server():
ps = PStorage()
s = MultiServer(7668,ps,debug=True)
c1 = C('content_id1')
c2 = C('content_id2')
c3 = C('content_id3')
c4 = C('content_id4')
s.register_stream(c1)
s.register_stream(c2)
s.register_stream(c3)
s.register_stream(c4)
assert len(s.streams) == 4
s.unregister_stream(c3)
assert len(s.streams) == 3
assert 'content_id2' in s.streams
assert 'content_id3' not in s.streams
s.unregister_stream(c1)
assert len(s.streams) == 2
assert 'content_id1' not in s.streams
# test_accept
sct = sock()
assert not c2.prepared
s.accept(sct,'127.0.0.1',7665,'content_id2','',PeeR())
assert c2.prepared
assert c2.prepared[0] == ('127.0.0.1',7665)
p = PeeR()
s.accept(sct,'127.0.0.1',7667,'content_id2','',p)
s.accept(sct,'127.0.0.1',7668,'content_id4','',PeeR())
assert not p.closed
assert c2.prepared[1] == ('127.0.0.1',7667)
assert c4.prepared[0] == ('127.0.0.1',7668)
p1 = PeeR()
p2 = PeeR()
ps.can = False
assert len(c2.prepared) == 2
assert not p.closed
s.accept(sct,'127.0.0.1',7669,'content_id2','',p1)
s.accept(sct,'127.0.0.1',7678,'content_id2','',p2)
assert len(c2.prepared) == 2
assert p1.closed
assert p2.closed
def test_temp_peer():
server = SeRver()
s = sock()
p = TempPeer('127.0.0.1',s,server)
s.r_buf.append('except')
p.handle_read()
assert s.closed
s.closed = False
s.r_buf = ['abc', 'except']
p.handle_read()
assert p.read_buffer == 'abc'
s.closed = False
s.r_buf = ['', 'except']
p.handle_read()
assert s.closed
s.closed = False
s.r_buf = [struct.pack('!I',33*1024),'except']
p.handle_read()
assert s.closed
s.closed = False
p.read_buffer = ''
s.r_buf = [struct.pack('!I',4)+'abc','except']
p.handle_read()
assert len(p.read_buffer) == 7
assert not s.closed
s.r_buf = [struct.pack('!I',4)+'abcd','except']
p.handle_read()
assert s.closed
s.closed = False
p.send_buffer = ''
p = TempPeer('127.0.0.1',s,server)
s.r_buf = [struct.pack('!I',23)+'Salamatsyzby'+'_content_id','except']
p.handle_read()
assert s.closed
server.streams = {'content_id1234567890123456789014':'StreamContainer'}
s = sock()
p = TempPeer('127.0.0.1',s,server)
s.r_buf = [struct.pack('!I',68)+'Salamatsyzby' + 'content_id1234567890123456789012' + 'peeeeeerrrrrr_iiiidd' + '1234','except']
p.handle_read()
assert s.closed
assert s.s_buf[7:25] == 'Invalid Content ID'
s = sock()
p = TempPeer('127.0.0.1',s,server)
s.r_buf = [struct.pack('!I',68)+'Salamatsyzby' + 'content_id1234567890123456789012' + 'peeeeeerrrrrr_iiiidd12' + '34',\
struct.pack('!I',2) ,'except']
assert not s.s_buf
p.handle_read()
assert s.closed
assert s.s_buf
assert not server.accepted
s = sock()
p = TempPeer('127.0.0.1',s,server)
s.r_buf = [struct.pack('!I',68)+'Salamatsyzby' + 'content_id1234567890123456789014' + 'peeeeeerrrrrr_iiiidd12' + '45',\
struct.pack('!I',2) ,'except']
p.handle_read()
assert not s.closed
assert not s.s_buf
assert server.accepted == [s]
|
# Generated by Django 3.2.8 on 2021-10-21 07:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0425_realm_move_messages_between_streams_limit_seconds"),
]
operations = [
migrations.AddField(
model_name="realmuserdefault",
name="email_address_visibility",
field=models.PositiveSmallIntegerField(default=1),
),
migrations.AddField(
model_name="userprofile",
name="email_address_visibility",
field=models.PositiveSmallIntegerField(default=1),
),
]
|
# -*- coding: utf-8 -*-
import scrapy
class LianjiaSpider(scrapy.Spider):
# todo 这边暂时只处理普通租房,不处理公寓的信息。
name = 'lianjia'
allowed_domains = ['lianjia.com']
# start_urls = ['https://sz.lianjia.com/zufang/']
start_urls = []
# 广州市内含有的 https://www.lianjia.com/city/ 参考这儿。
# locationCode = ['dg', 'fs', 'gz', 'hui', 'jiangmen', 'qy', 'sz',
# 'zh', 'zhangjiang', 'zs']
locationCode = ['gz'] # 目前只放了广东省,都是以拼音开头,只留一个广州的用来测试。
headUrl = 'https://{}.lianjia.com/zufang/pg{}/#contentList' # 前面{}是地方,后面{}是页数,最多100页
def close(spider, reason):
print("链家爬虫跑完了。")
# 这儿重写一下,我只写页面的具体内容的解析就可以了。
def start_requests(self):
'''
北上广深的几个地方都可以都放到这里面来的。
广东省的这几个城市
东莞 dg ,佛山 fs ,广州 gz ,惠州 hz,江门 jm, 清远 qy ,深圳 sz,
珠海 zh,湛江 zj,中山 zs
:return:
'''
for location in self.locationCode:
for page in range(1, 101): # 1~100
self.start_urls.append(self.headUrl.format(location,page))
for url in self.start_urls:
print(url)
yield scrapy.Request(url, dont_filter=False)
print()
# 这里重写爬虫入口方法,将dont_filter设置为false
def parse(self, response):
'''
提取每个列表页的 房子详情页 的url,再传给 detailParse
:param response:
:return:
'''
pass
def detailParse(self, response):
pass
|
'''
https://leetcode.com/problems/friend-circles/description/
There are N students in a class. Some of them are friends,
while some are not. Their friendship is transitive in nature.
For example, if A is a direct friend of B, and B is a direct friend of C,
then A is an indirect friend of C. And we defined a friend circle is
a group of students who are direct or indirect friends.
Given a N*N matrix M representing the friend relationship
between students in the class. If M[i][j] = 1, then the ith and jth students are direct friends with each other,
otherwise not. And you have to output the total number of friend circles among all the students.
'''
class Solution(object):
def findCircleNum(self, M):
N = len(M)
Num = 0
S = range(N)
def fun(k,a,N,S):
S.remove(k)
for i in range(N):
if a[k][i] != 0 and k != i and i in S:
fun(i,a,N,S)
while True:
if len(S) == 0:
break
k = S[0]
fun(k,M,N,S)
Num += 1
return Num |
datasetList = [
'QCD_Pt_15to30_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_30to50_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_50to80_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_120to170_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_170to300_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_300to470_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_470to600_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_600to800_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_800to1000_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_1000to1400_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_1400to1800_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_1800to2400_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_2400to3200_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_3200toInf_TuneCUETP8M1_13TeV_pythia8',
'DYToLL_M_1_TuneCUETP8M1_13TeV_pythia8',
'WJetsToLNu_TuneCUETP8M1_13TeV-madgraphMLM-pythia8',
]
datasetNegWeightList=[
]
datasetAntiMuList= [
'QCD_Pt_15to30_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_30to50_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_50to80_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_120to170_TuneCUETP8M1_13TeV_pythia8',
]
datasetAntiEMList= [
'QCD_Pt_15to30_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_30to50_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_50to80_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt_120to170_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-15to20_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-20to30_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-30to50_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-50to80_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-80to120_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-120to170_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
]
datasetEMEnrichedList = [
'QCD_Pt-15to20_EMEnriched_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-20to30_EMEnriched_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-30to50_EMEnriched_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-50to80_EMEnriched_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-80to120_EMEnriched_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-120to170_EMEnriched_TuneCUETP8M1_13TeV_pythia8',
]
datasetMuEnrichedList = [
'QCD_Pt-15to20_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-20to30_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-30to50_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-50to80_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-80to120_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
'QCD_Pt-120to170_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8',
]
xsectionDatasets ={
'QCD_Pt_15to30_TuneCUETP8M1_13TeV_pythia8':1837410000.,#2237000000.,
'QCD_Pt_30to50_TuneCUETP8M1_13TeV_pythia8':140932000.,#161500000.,
'QCD_Pt_50to80_TuneCUETP8M1_13TeV_pythia8':19204300.,#22110000.,
'QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8':2762530.,#3000114.3,
'QCD_Pt_120to170_TuneCUETP8M1_13TeV_pythia8':471100.,#493200.,
'QCD_Pt_170to300_TuneCUETP8M1_13TeV_pythia8':117276.,#120300.,
'QCD_Pt_300to470_TuneCUETP8M1_13TeV_pythia8':7823.,#7475.,
'QCD_Pt_470to600_TuneCUETP8M1_13TeV_pythia8':648.2,#587.1,
#'QCD_Pt_600to800_TuneCUETP8M1_13TeV_pythia8':186.9,#167.,
#'QCD_Pt_800to1000_TuneCUETP8M1_13TeV_pythia8':32.293,#28.25,
#'QCD_Pt_1000to1400_TuneCUETP8M1_13TeV_pythia8':9.4183,#8.195,
#'QCD_Pt_1400to1800_TuneCUETP8M1_13TeV_pythia8':0.84265,#0.7346,
#'QCD_Pt_1800to2400_TuneCUETP8M1_13TeV_pythia8':0.114943,#0.1091,
#'QCD_Pt_2400to3200_TuneCUETP8M1_13TeV_pythia8':0.00682981,#0.0,
#'QCD_Pt_3200toInf_TuneCUETP8M1_13TeV_pythia8':0.000165445,#0.0,
#'QCD_Pt_800to1000_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_1000to1400_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_1400to1800_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_1800to2400_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_2400to3200_TuneCUETP8M1_13TeV_pythia8',
#'QCD_Pt_3200toInf_TuneCUETP8M1_13TeV_pythia8',
## EM fraction evaluated using fraction "(!HLT_BCToEFilter_v1 && HLT_EmFilter_v1)" in plain QCD sample
'QCD_Pt-15to20_EMEnriched_TuneCUETP8M1_13TeV_pythia8':1279000000.0*0.0018,
'QCD_Pt-20to30_EMEnriched_TuneCUETP8M1_13TeV_pythia8':557600000.0*0.0096,
'QCD_Pt-30to50_EMEnriched_TuneCUETP8M1_13TeV_pythia8':136000000.0*0.073,
'QCD_Pt-50to80_EMEnriched_TuneCUETP8M1_13TeV_pythia8':19800000.0*0.146,
'QCD_Pt-80to120_EMEnriched_TuneCUETP8M1_13TeV_pythia8':2800000.0*0.125,
'QCD_Pt-120to170_EMEnriched_TuneCUETP8M1_13TeV_pythia8':477000.0*0.132,
## Mu fraction evaluated using fraction "MCMu3" in plain QCD sample
'QCD_Pt-15to20_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8':1279000000.0*0.003,
'QCD_Pt-20to30_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8':557600000.0*0.0053,
'QCD_Pt-30to50_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8':136000000.0*0.01182,
'QCD_Pt-50to80_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8':19800000.0*0.02276,
'QCD_Pt-80to120_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8':2800000.0*0.03844,
'QCD_Pt-120to170_MuEnrichedPt5_TuneCUETP8M1_13TeV_pythia8':477000.0*0.05362,
'WJetsToLNu_TuneCUETP8M1_13TeV-madgraphMLM-pythia8':50700.,#16000.,
'DYToLL_M_1_TuneCUETP8M1_13TeV_pythia8':20000,#about 6960. x3
}
|
"""
This is to plot the TS from the EDGAR six experiments
data inputs are forty years of monthly TS
first step is to process the monthly TS into monthly mean
second step is to process annual mean of monthly mean
"""
import site
import os
import numpy as np
import netCDF4 as nc4
from scipy import stats
import scipy.io as sio
import math
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d as interp2d
from scipy.stats import mannwhitneyu as man_test
from scipy.stats import ttest_ind as student_test
lib_path = os.path.join(
os.path.realpath(
os.path.dirname(__file__)
),
os.path.pardir,os.path.pardir,
)
site.addsitedir(lib_path)
from lib import *
def AreaWeight(lon1,lon2,lat1,lat2):
'''
calculate the earth radius in m2
'''
radius = 6371000;
area = (math.pi/180)*np.power(radius,2)*np.abs(lon1-lon2)*\
(np.abs(np.sin(np.radians(lat1))-np.sin(np.radians(lat2))))
# print np.nansum(np.nansum(area,axis=1),axis=0)
return area
def box_clip(lon_s,lon_e,lat_s,lat_e,lon,lat,mask):
"""
fill the range outside the box with 0
"""
lon = np.array(lon)
lat = np.array(lat)
colum_s = [index for index in range(len(lon)) if np.abs(lon-lon_s)[index] == np.min(np.abs(lon-lon_s))][0]
colum_e = [index for index in range(len(lon)) if np.abs(lon-lon_e)[index] == np.min(np.abs(lon-lon_e))][0]
row_s = [index for index in range(len(lat)) if np.abs(lat-lat_s)[index] == np.min(np.abs(lat-lat_s))][0]
row_e = [index for index in range(len(lat)) if np.abs(lat-lat_e)[index] == np.min(np.abs(lat-lat_e))][0]
if (colum_s> colum_e):
cache = colum_e; colum_e = colum_s; colum_s = cache;
if (row_s> row_e):
cache = row_e; row_e = row_s; row_s = cache;
mask[:,:colum_s] =0; mask[:,colum_e:] =0
mask[:row_s,:] =0; mask[row_e:,:] =0
return mask
def mask_weight(region_key,lon,lat,reverse=False):
"""
Read in the country mask
interpolate it to the required resolution grids with lon_interp,lat_interp
"""
lon_res = lon[1] - lon[0];lat_res = lat[1] - lat[0];
lons,lats = np.meshgrid(lon,lat)
area = AreaWeight(lons,lons+lon_res,lats,lats+lat_res)
##OCEAN_MASKS FOR COUNTRIES
ocean_mask = sio.loadmat('/home/s1667168/coding/python/external_data/Euro_USA_AUS_BRICS_STA_720_360.mat')
lon_mask = ocean_mask['lon'][0,:];
lat_mask = ocean_mask['lat'][0,:];
box_region_dic={'All':[0,360,-90,90],'ASIA':[65,145,5,45],'US':[240,290,30,50],'ARCTIC':[0,360,60,90],'TROPICS':[0,360,-28,28],'EUROPE':[0,40,30,70],}
if (region_key == 'USA' or region_key == 'Europe' or region_key == 'India' or region_key == 'China' or region_key == 'GloLand'):
mask= ocean_mask[region_key][:]
elif region_key in box_region_dic:
mask= ocean_mask['All'][:]
box = box_region_dic[region_key]
mask = box_clip(box[0],box[1],box[2],box[3],lon_mask,lat_mask,mask)
else:
print "error region name"
mask[np.isnan(mask)]=0; mask[mask>0]=1;
f = interp2d(lon_mask, lat_mask, mask,kind='linear'); mask = f(lon, lat);
mask[mask >= 1] = 1;mask[mask < 1] = 0;
# weight each grid cell by its area weight against the total area
if reverse:
mask=1-mask
mask[mask==0] = np.nan
mask=np.multiply(mask,area);
mask_weighted = np.divide(mask,np.nansum(np.nansum(mask,axis=1),axis=0))
return mask
def spatial_figure(axs,data,lons,lats,colormap,colorbar_min,colorbar_max,tb_lef=True,tb_bot=True): #c_bad,c_under,c_over,c_number=20,
"""
input : all parameters and data rel;ated to the figure you want to plot_title
output : a spatial map of the data
"""
# plt.imshow(p_value);plt.show()
lons[lons>180]-=360;
# calculate the origin of the map
lon_0 = lons.mean();
lat_0 = lats.mean();
lon_b = np.min(lons); lon_e = np.max(lons)
lat_b = np.min(lats); lat_e = np.max(lats)
lon_bin = 60; lat_bin = 30
map = Basemap(lat_0=0, lon_0=0,llcrnrlon=lon_b,llcrnrlat=lat_b,urcrnrlon=lon_e,urcrnrlat=lat_e,ax=axs,projection='cyl')
lon, lat = np.meshgrid(lons, lats)
xi, yi = map(lon, lat)
# s = map.pcolor(xi, yi, data)
if tb_lef:
map.drawparallels(np.arange(round(lat_b,0)-lat_bin, round(lat_e,0)+lat_bin, lat_bin), labels=[1,0,0,0],linewidth=0.0,fontsize=8)
if tb_bot:
map.drawmeridians(np.arange(round(lon_b,0), round(lon_e,0)+lon_bin, lon_bin), labels=[0,0,0,1],linewidth=0.0,fontsize=8)
# Add Coastlines, States, and Country Boundaries
map.drawcoastlines(); #map.drawcountries() #map.drawstates(); #
masked_obj = np.ma.masked_where(np.isnan(data), data)
# masked_obj = maskoceans(lon,lat,masked_obj)
cmap = discrete_cmap(10,colormap)
cmap.set_bad([1,1,1],alpha = 1.0); #cmap.set_over('k'); cmap.set_under('darkblue');
colormesh = map.pcolormesh(xi, yi, masked_obj,cmap=cmap,vmin=colorbar_min, vmax=colorbar_max,latlon=True)
return colormesh
def data_readin(variable,exp):
def day2datetime(scenario,days):
"""
# convert days from a reference into int datetime
# do not take leap years into account
"""
date_int = np.empty((len(days)));date_int[:]=np.nan
if scenario.startswith('F_'):
start_year =2000
elif scenario =='EdgRef70AP': start_year =1990
elif scenario =='EdgTechP': start_year =2180
elif scenario =='EdgRefP' or scenario =='EdgEneP' or scenario =='Edg1970': start_year =2000
elif scenario =='T1970C':
start_year =1970
else:
start_year =2010
start =(start_year*365)
ith=0
for iday in days:
month_days =np.array([31,28,31,30,31,30,31,31,30,31,30,31])
calendar_days = np.array([0,31,59,90,120,151,181,212,243,273,304,334,365])
total_days = int(iday) + start;
year = total_days//365;
remainder = total_days%365
if remainder ==0: year=year-1;month=12;day=31
else:
month = 1+[layer for layer in range(len(calendar_days)) if calendar_days[layer]< remainder and calendar_days[layer+1]>=remainder][0]
day = int(remainder - calendar_days[month-1])
if day == 0: day = month_days[month-1]
date_int[ith] = year*10000+month*100+day
ith=ith+1
return date_int.astype(int)
def mon_mean2annual_mean(scenario,time,data):
annual_mean=np.empty((30,192,288));annual_mean[:]=np.nan
calendar_day = np.array([31,28,31,30,31,30,31,31,30,31,30,31])
if scenario.startswith('F_') :
year_series = range(2000,2030)
elif scenario=='EdgRef70AP':
year_series = range(2020,2050)
elif scenario=='EdgTechP':
year_series = range(2210,2240)
elif scenario =='EdgRefP' or scenario =='EdgEneP' or scenario =='Edg1970':
year_series = range(2030,2060)
elif scenario=='T1970RCP':
year_series = range(2020,2050)
elif scenario=='EdgEne':
year_series = range(2200,2230)
elif scenario=='Edg70GO':
year_series = range(2070,2100)
else:
year_series = range(2130,2160)
for iyear in year_series:
# print scenario, iyear
if (iyear == year_series[0] and time[0]//100 >= year_series[0] *100+1):
layer_b=0
else:
layer_b = [layer for layer in range(len(time)) if time[layer]//100 == iyear*100+1][0] #June01
if (iyear == year_series[-1] and time[-1]//100 <= year_series[-1] *100+12):
layer_e=-2
else:
layer_e = [layer for layer in range(len(time)) if time[layer]//100 == iyear*100+12][0] #August 31
data_cache = data[layer_b:layer_e+1,:,:]
annual_mean[iyear-year_series[0],:,:] = stats.nanmean(data_cache,axis=0)
return annual_mean
def data_netcdf(exp,scenario,variable,region_key='All'):
input_path ='/exports/csce/datastore/geos/users/s1667168/CESM_EDGAR/ModelOutput/'+exp+'/'
if variable == 'precip':
var_path = input_path+scenario+'/mon/atm/'+scenario+'.atm.mon.PRECC.nc'
nc_fid = nc4.Dataset(var_path,mode='r')
lat = nc_fid.variables['lat'][:]
lon = nc_fid.variables['lon'][:]
days = nc_fid.variables['time'][:]; time = day2datetime(scenario,days);#print time
PRECC = (nc_fid.variables['PRECC'][:])*24*60*60*1000
nc_fid.close()
var_path = input_path+scenario+'/mon/atm/'+scenario+'.atm.mon.PRECL.nc'
nc_fid = nc4.Dataset(var_path,mode='r')
days = nc_fid.variables['time'][:]; time = day2datetime(scenario,days);#print time
PRECL = (nc_fid.variables['PRECL'][:])*24*60*60*1000
nc_fid.close()
data = PRECC+PRECL
else:
var_path = input_path+scenario+'/mon/atm/'+scenario+'.atm.mon.'+variable+'.nc'
# print var_path
nc_fid = nc4.Dataset(var_path,mode='r')
lat = nc_fid.variables['lat'][:]
lon = nc_fid.variables['lon'][:]
days = nc_fid.variables['time'][:]; time = day2datetime(scenario,days);#print time
data = nc_fid.variables[variable][:]
nc_fid.close()
if variable == "TS": data=data-273.15
elif variable == 'CLDTOT':data=data*100
elif variable == 'TGCLDLWP':data=data*10**(3)
var40map = mon_mean2annual_mean(scenario,time,data)
return lon,lat,var40map
if exp=='full_chem':
lon,lat,EdgRef = data_netcdf(exp,'EdgRefP',variable);
_,_,Edg1970 = data_netcdf(exp,'Edg1970',variable);
_,_,EdgEne = data_netcdf(exp,'EdgEneP',variable);
_,_,EdgTech = data_netcdf(exp,'EdgTechP',variable);
elif exp=='FullCp':
lon,lat,EdgRef = data_netcdf(exp,'EdgRef',variable)
_,_,Edg1970 = data_netcdf(exp,'T1970RCP',variable)
_,_,EdgEne = data_netcdf(exp,'EdgEne',variable)
_,_,EdgTech = data_netcdf(exp,'EdgTech',variable)
return lon,lat,Edg1970,EdgEne,EdgRef,EdgTech
def spa_pat_reg_mean(variable):
def mannwhitneyu_test(vairable1,variable2):
p_threshold=0.05
size = np.array([np.shape(variable2)[1],np.shape(variable2)[2]]);
p_value = np.empty((size[0],size[1]));p_value[:]=np.nan
# from scipy.stats import ttest_ind as test
for x in range(size[0]):
for y in range(size[1]):
cache1 = vairable1[:,x,y]
cache2 = variable2[:,x,y]
if (cache2==cache1).all(): p_value[x,y] = np.nan
else: _,p_value[x,y] = man_test(cache1,cache2);
p_value[p_value>p_threshold]=np.nan;p_value[p_value<=p_threshold]=1
return p_value
####calculate the difference and their significance
def diff_sig(vairable1,variable2):
dif = np.nanmean(vairable1,axis=0)-np.nanmean(variable2,axis=0)
sig = mannwhitneyu_test(vairable1, variable2)
# sig_dif = np.multiply(dif,sig)
return dif,sig
lon,lat,Edg1970,EdgEne,EdgRef,EdgTech = data_readin(variable,exp='full_chem'); # fully coupled total respons
BEoA,BEoA_s = diff_sig(EdgRef,Edg1970)
Ene,Ene_s = diff_sig(EdgRef, EdgEne)
Tech,Tech_s = diff_sig(EdgRef, EdgTech)
CAMchem = np.stack((BEoA,Ene,Tech,BEoA_s,Ene_s,Tech_s),axis=0)
lon,lat,Edg1970,EdgEne_F,EdgRef_F,EdgTech_F = data_readin(variable,exp='FullCp'); # local response as the dynamics are fixed
BEoA,BEoA_s = diff_sig(EdgRef,Edg1970)
Ene,Ene_s = diff_sig(EdgRef, EdgEne)
Tech,Tech_s = diff_sig(EdgRef, EdgTech)
CAM = np.stack((BEoA,Ene,Tech,BEoA_s,Ene_s,Tech_s),axis=0)
Diff = CAM -CAMchem
return lon,lat,CAMchem,CAM,Diff
var = 'AODVIS';lon,lat,CAMchem,CAM,Diff = spa_pat_reg_mean(var)
if var =='TS':
colorbar_min=-2;colorbar_max=2;colormap='RdBu_r';xlim=1
elif var =='CLDTOT':
colorbar_min=-4;colorbar_max=4;colormap='RdBu_r';xlim=2
elif var =='TGCLDLWP':
colorbar_min=-10;colorbar_max=10;colormap='RdBu_r';xlim=5
elif var == 'precip':
colorbar_min=-0.4;colorbar_max=0.4;colormap='BrBG';xlim=0.2
elif var =='ACTREL': #F CDNUMC ACTREL TGCLDLWP CLDTOT
colorbar_min=-0.20;colorbar_max=0.20;xlim=0.10; colormap='RdBu_r';
elif var =='CDNUMC': #F CDNUMC ACTREL TGCLDLWP CLDTOT
colorbar_min=-5;colorbar_max=5;xlim=4; colormap='RdBu_r';
elif var =='AODVIS':
colorbar_min=-0.05;colorbar_max=0.05; colormap='RdYlBu_r';
else:
print 'confirm the variable name'
fig = plt.figure(facecolor='White',figsize=[13,8.00]);pad= 5;
ax = plt.subplot2grid((3, 9), (0, 0), colspan=3)
ax.annotate('CAM5',xy=(0.5,1.05), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='center',rotation='horizontal',fontsize=15)
ax.annotate('Best\nEstimation',xy=(-0.1,0.5), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='center',rotation='vertical',fontsize=15)
ax.annotate('(a)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,CAM[0,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
ax = plt.subplot2grid((3, 9), (1, 0), colspan=3)
ax.annotate('Energy\nConsumption',xy=(-0.1,0.5), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='center',rotation='vertical',fontsize=15)
ax.annotate('(b)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,CAM[1,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
ax = plt.subplot2grid((3, 9), (2, 0), colspan=3)
ax.annotate('Technology\nAdvancements',xy=(-0.1,0.5), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='center',rotation='vertical',fontsize=15)
ax.annotate('(c)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,CAM[2,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
ax = plt.subplot2grid((3, 9), (0, 3), colspan=3)
ax.annotate('CAM5-Chem',xy=(0.5,1.05), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='center',rotation='horizontal',fontsize=15)
ax.annotate('(d)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,CAMchem[0,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
ax = plt.subplot2grid((3, 9), (1, 3), colspan=3)
ax.annotate('(e)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,CAMchem[1,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
ax = plt.subplot2grid((3, 9), (2, 3), colspan=3)
ax.annotate('(f)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,CAMchem[2,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
ax = plt.subplot2grid((3, 9), (0, 6), colspan=3)
ax.annotate('CAM5 - CAM5-Chen',xy=(0.5,1.05), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='center',rotation='horizontal',fontsize=15)
ax.annotate('(g)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,Diff[0,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
ax = plt.subplot2grid((3, 9), (1, 6), colspan=3)
ax.annotate('(h)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,Diff[1,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
ax = plt.subplot2grid((3, 9), (2, 6), colspan=3)
ax.annotate('(i)',xy=(0.02,1.01), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='left', va='baseline',rotation='horizontal',fontsize=15)
colormesh1 = spatial_figure(ax,Diff[2,:,:],lon,lat,colormap,colorbar_min,colorbar_max,tb_lef=False,tb_bot=False)
cbar_ax = fig.add_axes([0.18, 0.03, 0.64, 0.015])
char = fig.colorbar(colormesh1,orientation='horizontal',extend='both',cax=cbar_ax,ticks=np.round(np.arange(0,1.1,0.1)*(colorbar_max-colorbar_min)+colorbar_min,2))
if var == 'CLDTOT':
cbar_ax.annotate('%',xy=(1.10,-1.1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='bottom',rotation='horizontal',fontsize=15)
elif var == 'TGCLDLWP':
cbar_ax.annotate(r'$\mathrm{\mathsf{g\/m^{-2}}}$',xy=(1.10,-1.6), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='bottom',rotation='horizontal',fontsize=15)
elif var == 'precip':
cbar_ax.annotate(r'$\mathrm{\mathsf{mm\/day^{-1}}}$',xy=(1.10,-1.4), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='bottom',rotation='horizontal',fontsize=15)
elif var == 'ACTREL':
cbar_ax.annotate('micron',xy=(1.10,-1.4), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='bottom',rotation='horizontal',fontsize=15)
elif var == 'CDNUMC':
cbar_ax.annotate(r'($10^{9}$)',xy=(1.10,-1.4), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
ha='center', va='bottom',rotation='horizontal',fontsize=15)
plt.subplots_adjust(left=0.05, bottom=0.08, right=0.98, top=0.92, wspace=0.1, hspace=0.30);
plt.savefig(var+'_CAM_vs_CAM-CHEM.png', format='png', dpi=1000)
|
from agent.reactiveagent import ReactiveAgent, Stance
from game import start_game
from sys import argv
from agent.player import Player
has_player = False
def main():
global has_player
if not (len(argv) == 2 or len(argv) == 3):
raise ValueError("Must pass one or two command line arguments - number of total game participants (2+)"
"and, optionally, number of human players (0 or 1).")
n_players = int(argv[1])
if n_players <= 1:
raise ValueError("Number of total game participants must be a positive integer greater than 1.")
if len(argv) == 2:
has_player = False
else:
humans = int(argv[2])
if humans == 0:
has_player = False
elif humans == 1:
has_player = True
else:
raise ValueError("Number of human players must be either 0 or 1.")
play_again = True
while play_again:
# 0 = NEUTRAL, 1 = OFFENSIVE, 2 = DEFENSIVE
if has_player:
agents = [Player(0)]
agents += [ReactiveAgent(i, Stance(i % 3)) for i in range(1, n_players)]
else:
agents = [ReactiveAgent(i, Stance(i % 3)) for i in range(n_players)]
villages = [agent.get_village() for agent in agents]
for i, agent in enumerate(agents):
agent.set_other_villages([village.name for j, village in enumerate(villages) if i != j])
start_game(agents, villages)
play_again = input("Play again? (y/n): ") in ("y", "Y")
if __name__ == '__main__':
main()
|
def solve(n):
if n == 0:
return "INSOMNIA"
m = n
obs = set(str(m))
while len(obs) < 10:
m += n
obs = obs.union(str(m))
return m
fin = open("A-large.in", "r")
fout = open("A-large.out", "w")
for t in xrange(1, int(fin.readline()) + 1):
sln = solve(int(fin.readline()))
fout.write("Case #" + str(t) + ": " + str(sln) + "\n")
fout.close()
|
"""
run_auto_validation_tests.py
Will search all the sub-directories for scripts of the form starting with
validate_
and then run the scripts.
"""
import os, time, sys
import anuga
args = anuga.get_args()
#print args
# List any sub directory to exclude from validation.
# Current working directory ('.') should always be excluded to avoid
#infinite recursion
dirs_to_skip = ['.'] # Always skip current dir
#dirs_to_skip += ['patong_beach_validation'] # This takes about 40h
validation_dirs_and_files = []
for dirpath, dirnames, filenames in os.walk('.'):
if '.svn' in dirnames:
dirnames.remove('.svn') # don't visit SVN directories
dir = os.path.split(dirpath)[-1]
if dir in dirs_to_skip:
#print 'Skipping %s' % dirpath
continue
#print 'Searching dir', dirpath
for filename in filenames:
if filename.startswith('validate_') and filename.endswith('.py'):
#print 'Found %s in %s' %(filename, dirpath)
validation_dirs_and_files.append((dirpath, filename))
# get repeatable order on different machines
validation_dirs_and_files.sort()
print()
print(80*'=')
print('Running all validation tests - some may take many minutes')
print('and some may require memory in the order of 8-16GB ')
print(80*'=')
print('Validation test suites:')
for path, filename in validation_dirs_and_files:
print (' ', os.path.join(path, filename))
print()
print()
t0 = time.time()
parentdir = os.getcwd()
for path, filename in validation_dirs_and_files:
# print 'filename path', path, filename
os.chdir(path)
#anuga.run_anuga_script(filename, args = args)
#print filename
if args.verbose:
cmd = 'python {0} -alg {1} -np {2} -v'.format(filename, args.alg, args.np)
else:
cmd = 'python {0} -alg {1} -np {2} '.format(filename, args.alg, args.np)
print()
print(80*'=')
print(cmd)
print(80*'=')
os.system(cmd)
# Back to parent directory
os.chdir(parentdir)
# print 'current dir', os.getcwd()
print('That took %.2f seconds in total' %(time.time()-t0))
|
import unittest
from unittest import mock
def mocked_requests_post(*args, **kwargs):
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
if args[0] == 'http://127.0.0.1:9999/api':
return MockResponse({"status": True, "message": "成功", }, 200)
return MockResponse({"status": False, "message": "失败"}, 200)
class TestPermissionMiddleware(unittest.TestCase):
"""测试permission_middleware"""
@mock.patch('sparrow_django_common.utils.validation_data.VerificationConfiguration.verify_middleware_location',
return_value='')
@mock.patch('requests.post', side_effect=mocked_requests_post)
@mock.patch('sparrow_django_common.utils.validation_data.VerificationConfiguration.valid_permission_svc',
return_value='')
@mock.patch('django.conf.settings', return_value='')
@mock.patch('sparrow_django_common.utils.normalize_url.NormalizeUrl.normalize_url',
return_value='http://127.0.0.1:9999/api')
def test_have_authority(self, NormalizeUrl, settings, valid_permission_svc, requests, verify_middleware_location):
from sparrow_django_common.middleware.permission_middleware import PermissionMiddleware
self.assertEqual(PermissionMiddleware().has_permission(requests, view=''), True)
@mock.patch('sparrow_django_common.utils.validation_data.VerificationConfiguration.verify_middleware_location',
return_value='')
@mock.patch('requests.post', side_effect=mocked_requests_post)
@mock.patch('sparrow_django_common.utils.validation_data.VerificationConfiguration.valid_permission_svc',
return_value='')
@mock.patch('django.conf.settings', return_value='')
@mock.patch('sparrow_django_common.utils.normalize_url.NormalizeUrl.normalize_url',
return_value='http://127.0.0.1:9d999/api')
def test_no_permission(self, NormalizeUrl, settings, valid_permission_svc, requests, verify_middleware_location):
from sparrow_django_common.middleware.permission_middleware import PermissionMiddleware
self.assertEqual(PermissionMiddleware().has_permission(requests, view=''), False)
if __name__ == '__main__':
unittest.main()
|
# --------------------------------------------------------------------------------
# Import
# --------------------------------------------------------------------------------
import torch
import time
import os
import tqdm
from utils.metrics import*
import torch.nn.functional as F
from base.trainer_base import TrainerBase
# --------------------------------------------------------------------------------
# Class
# --------------------------------------------------------------------------------
class LSTMTrainer(TrainerBase):
def __init__(self, model, optimizer, data_loader, config, resume_path, train_logger, valid_logger,
path_data_train, labels_train, path_data_valid, labels_valid, path_dictionary, max_len, trg_vocab):
super(LSTMTrainer, self).__init__(model, optimizer, config, resume_path, train_logger, valid_logger)
self.config = config
self.path_dict = path_dictionary
self.batch_size = config['trainer']['batch_size']
self.max_len = max_len
self.trg_vocab = trg_vocab
# Setup data loader for training
train_data_loader = data_loader(batch_size=self.batch_size, shuffle=True, path_data=path_data_train,
labels=labels_train, dictionary=path_dictionary, max_len=max_len)
print("Load data for train ...")
self.train_data_loader = train_data_loader.loader()
if labels_valid is not None:
# Setup data loader for validating
valid_data_loader = data_loader(batch_size=1, shuffle=False, path_data=path_data_valid,
labels=labels_valid, dictionary=path_dictionary, max_len=max_len)
print("Load data for valid ...")
self.valid_data_loader = valid_data_loader.loader()
def _train_one_epoch(self, epoch):
# Train command from TrainerBase class
self.model.train()
total_loss = 0
total_acc_char = 0
total_acc_field = 0
n_iter = len(self.train_data_loader)
train_pbar = tqdm.tqdm(enumerate(self.train_data_loader), total=n_iter)
for batch_idx, (data, target) in train_pbar:
data = data.to(self.device)
index_target = target.to(self.device)
# The words I feed to force
input_target = index_target[:, :-1]
# The words I want model try to predict
predict_target = index_target[:, 1:].contiguous().view(-1).long()
# Clear gradients
self.optimizer.zero_grad()
# Output
output = self.model(data, input_target)
output = output.transpose(0, 1)
# Cross entropy loss
loss = F.cross_entropy(output.contiguous().view(-1, output.size(-1)),
predict_target.long())
loss.backward()
self.optimizer.step()
# Metrics
acc_char = accuracy_char(output, index_target[:, 1:].long(), self.trg_vocab)
acc_field = accuracy_word(output, index_target[:, 1:].long())
print(" Loss ", loss.item())
print(" Acc ", acc_char)
print(" Acc field ", acc_field)
# translate(output.contiguous().view(-1, output.size(-1)),
# predict_target, self.path_dict)
image = data[0].cpu()
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
show = transforms.ToPILImage()
image = show(image)
plt.imshow(image)
plt.show()
if (batch_idx + 1) % 1000 == 0:
torch.save(self.model.state_dict(), "checkpoints/katakana/init_model.pth")
print("loss: ", total_loss / (batch_idx + 1))
print("acc_char: ", total_acc_char / (batch_idx + 1))
print("acc_field: ", total_acc_field / (batch_idx + 1))
translate(output.contiguous().view(-1, output.size(-1)),
predict_target, self.path_dict)
total_loss += loss.item()
total_acc_char += acc_char
total_acc_field += acc_field
total_loss /= len(self.train_data_loader)
total_acc_char /= len(self.train_data_loader)
total_acc_field /= len(self.train_data_loader)
train_log = {'loss': total_loss,
'acc_char': total_acc_char,
'acc_field': total_acc_field}
log = {'train_metrics': train_log}
if self.valid_logger is not None:
print("Validating model")
valid_log = self._eval_one_epoch_katakana()
log['valid_metrics'] = valid_log
return log
def _eval_one_epoch_katakana(self):
total_loss = 0
total_acc_char = 0
total_acc_field = 0
# n_iter = len(self.train_data_loader)
# valid_pbar = tqdm.tqdm(enumerate(self.train_data_loader), total=n_iter)
# with torch.no_grad():
# for batch_idx, (data, target) in valid_pbar:
# data = data.to(self.device)
# index_target = target.to(self.device)
# # The target I want model predict
# predict_target = index_target[:, 1:].contiguous().view(-1).long()
# # CNN encoder
# embs = self.model.cnn_model(data)
#
# context, hidden_state, hidden_cell = self.model.lstm.encoder(embs)
valid_log = {'loss': total_loss,
'acc_char': total_acc_char,
'acc_field': total_acc_field}
return valid_log
|
import os
import time
from textwrap import fill
class Tutor:
def __init__(self):
self.bundle = None
self.led = None
self.button = None
self.row = 70
@staticmethod
def clear():
clear_cmd = 'clear'
if os.name == 'nt':
clear_cmd = 'cls'
os.system(clear_cmd)
def print_wrap(self, msg: str):
message = fill(msg, self.row)
print(message)
def print_lesson(self, lesson, title):
print('-' * self.row)
topic = f"Lesson {lesson}: {title}"
print(f"{topic:^{self.row}}")
print('-' * self.row)
@staticmethod
def check_response(answer: str, give_answer: bool = True,
guide: str = ">>> "):
response = input(guide)
nb_wrong = 1
while response != answer:
if give_answer:
print(f"Write below code precisely.\n>>> {answer}\n")
elif nb_wrong > 2:
print(f"The answer is {answer}. Type it below.")
else:
print("Try again!")
response = input(guide)
nb_wrong += 1
return response
def run_lesson1(self):
self.print_lesson(1, "Making MODI")
self.print_wrap('First, you should import modi. Type import modi')
self.check_response('import modi')
import modi
self.print_wrap("Great! Now you can use all the features of modi\n")
self.print_wrap("To control the modules, make a MODI object that "
"contains all the connected modules. Once you create "
"it, it will automatically find all the modules "
"connected. When creating the MODI object, you should "
"specify the number of modules currently connected to"
" the network module.\n")
input("\nPress ENTER")
self.clear()
self.print_wrap("\nNow, prepare real MODI modules. Connect a network "
"module to your device. Then, connect a Button module "
"and an Led module.\nSince you have 2 module connected"
" to the network module, make a MODI object by typing "
"bundle = modi.MODI(2)")
self.check_response('bundle = modi.MODI(2)')
bundle = modi.MODI(2)
print()
self.print_wrap('Great! The "bundle" is your MODI object. With it, '
'you can control all the modules connected to your '
'device.')
input("\nYou have completed this lesson.\nPress ENTER")
self.bundle = bundle
def run_lesson2(self):
# Lesson 2
self.clear()
self.print_lesson(2, "Accessing modules")
self.print_wrap(
"In the previous lesson, you created a MODI object. Let's "
"figure out how we can access modules connected to it.")
self.print_wrap(
'"bundle.modules" is a method to get all the modules connected '
'to the device. Type: bundle.modules')
self.check_response('bundle.modules')
print(self.bundle.modules)
print()
self.print_wrap("\nYou can see two modules connected to the device. "
"You can access each module by the same method we use "
"with an array.")
self.print_wrap("\nYou can also access modules by types. "
"Type: bundle.leds")
self.check_response('bundle.leds')
print(self.bundle.leds)
print()
self.print_wrap('\nThere is one led module connected. Make an led '
'variable by accessing the first led module. '
'Type: led = bundle.leds[0]')
self.check_response('led = bundle.leds[0]')
led = self.bundle.leds[0]
self.led = led
print()
self.print_wrap("Super! You can now do whatever you want with these "
"modules. If you have different modules connected, you"
" can access the modules in a same way, just typing "
"bundle.<module_name>s")
input("\nYou have completed this lesson.\nPress ENTER")
def run_lesson3(self):
self.clear()
self.print_lesson(3, "Controlling modules")
self.print_wrap("Now you know how to access individual modules. \n"
"Let's make an object named \"button\" as well for "
"your button module. You know how to do it.")
self.check_response('button = bundle.buttons[0]', False)
button = self.bundle.buttons[0]
self.button = button
print()
self.print_wrap("Perfect. With your button module and led module, "
"we can either get data from the module or send "
"command to the module")
self.print_wrap('get_pressed is a method of a button module which '
'returns whether the button is pressed or note.'
'\nCheck the state of button by typing'
' button.get_pressed()')
self.check_response('button.get_pressed()')
print(button.get_pressed())
print()
self.print_wrap("Now see if the same command returns True when "
"pressing the button.")
self.check_response('button.get_pressed()')
print(button.get_pressed())
print()
self.print_wrap("Good. Now, let's send a command to the led.\n"
"set_rgb() is a method of an led module. Let there be "
"light by typing led.set_rgb(0, 0, 255)")
response = self.check_response('led.set_rgb(0, 0, 255)')
exec(response)
print()
self.print_wrap("Perfect! You will see the blue light from the "
"led module.")
input("\nYou have completed this lesson.\nPress ENTER")
def run_lesson4(self, bundle, led, button):
self.clear()
self.print_lesson(4, "Your First PyMODI Project")
self.print_wrap("Let's make a project that blinks led when button "
"is pressed.")
self.print_wrap("In an infinite loop, we want our led to light up "
"when button is pressed, and turn off when not "
"pressed. Complete the following code based on the"
" description.")
input("\nPress ENTER when you're ready!")
self.clear()
print(">>> while True:")
print("... # Check if button is pressed")
self.check_response('button.get_pressed():', give_answer=False,
guide="... if ")
print("... # Set Led color to green")
self.check_response('led.set_rgb(0, 255, 0)', give_answer=False,
guide="... ")
print("... elif button.get_double_clicked():")
print("... break")
print("... else:")
print("... # Turn off the led. i.e. set color to (0, 0, 0)")
self.check_response('led.set_rgb(0, 0, 0)', give_answer=False,
guide="... ")
self.print_wrap("Congrats!! Now let's see if the code works as we "
"want.\nPress the button to light up the led. Double "
"click the button to break out of the loop")
while True:
if button.get_pressed():
led.set_rgb(0, 255, 0)
elif button.get_double_clicked():
break
else:
led.set_off()
time.sleep(0.02)
self.print_wrap("\nIt looks great! "
"Now you know how to use PyMODI to control modules."
"\nYou can look up more functions at "
"pymodi.readthedocs.io/en/latest\n"
)
input("\nYou have completed the tutorial.\nPress ENTER to exit")
self.bundle._com_proc.terminate()
def start(self):
# Intro
self.clear()
print("=" * self.row)
print(f"= {'Welcome to the PyMODI Tutor':^{self.row - 4}} =")
print("=" * self.row)
self.print_wrap("\nPyMODI is a very powerful tool that can control "
"the MODI modules using python scripts. As long as you"
" learn how to use built-in functions of PyMODI, you "
"can easily control MODI modules. This interactive CUI"
" tutorial will guide you through the "
"marvelous world of PyMODI.")
print("Tutorial includes:\n"
"1. Making MODI\n"
"2. Accessing Modules\n"
"3. Controlling Modules\n"
"4. Your First PyMODI Project")
lesson_nb = int(input("\nEnter the lesson number and press ENTER: "))
self.clear()
if lesson_nb > 1:
import modi
print("Preparing the MODI module...")
input(
"Connect buttton and led module to your device and press "
"ENTER")
self.bundle = modi.MODI(2)
if lesson_nb > 2:
self.led = self.bundle.leds[0]
self.button = self.bundle.buttons[0]
if lesson_nb <= 1:
self.run_lesson1()
if lesson_nb <= 2:
self.run_lesson2()
if lesson_nb <= 3:
self.run_lesson3()
if lesson_nb <= 4:
self.run_lesson4()
|
'''
You are given two NONEMPTY linked lists representing two non-negative integers.
The digits are stored in REVERSE ORDER and each of their nodes contain a single
digit. Add the two numbers and return it as a linked list.
Example:
Input: (2 -> 4 -> 3) + (5 -> 6-> 4)
Output: 7 -> 0 -> 8
'''
class node:
def __init__(self,data):
self.data = data
self.next = None
def __repr__(self):
return str(self.data)
class linkedlist:
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def add(self,x):
if(self.size == 0):
self.head = node(x)
self.tail = self.head
else:
self.tail.next = node(x)
self.tail = self.tail.next
self.tail.next = None
self.size += 1
def pop(self):
if (self.size == 0):
return None
else:
u = self.tail.data
if(self.size == 1):
self.head = self.tail = null
else:
cue = self.head
while(cue.next != self.tail):
cue = cue.next
self.tail = cue
self.tail.next = None
self.size -= 1
return u
def __repr__(self):
out = ''
u = self.head
while(u.next != None):
out += str(u.data)
out += ' -> '
u=u.next
out += str(u.data)
return out
def add(x,y,flag):
if(not flag):
if(x+y < 10):
return x+y, flag
else:
return x+y-10, (not flag)
else:
if(x+y <9):
return x+y+1, (not flag)
else:
return x+y+1-10, flag
def add_lists(left, right):
flag = False
l = left.head
r = right.head
a = linkedlist()
while (True):
if(l != None and r != None):
s, flag = add(l.data, r.data, flag)
a.add(s)
r = r.next
l = l.next
elif(l != None and r == None):
s, flag = add(l.data, 0, flag)
a.add(s)
l = l.next
elif(l == None and r != None):
s, flag = add(r.data, 0, flag)
a.add(s)
r = r.next
else:
break
return a
def main():
l = linkedlist()
l.add(2)
l.add(4)
l.add(3)
r = linkedlist()
r.add(5)
r.add(6)
r.add(4)
a = add_lists(l,r)
print(a)
print(l)
print(l.pop())
print(l)
if __name__ == "__main__":
main()
|
class Solution:
def reorganizeString(self, st):
self.data = {}
for i in st:
if i not in self.data:
self.data[i] = 1
else:
self.data[i] += 1
m = max(self.data)
mv = self.data[m]
self.data[m] = 0
total = 0
for k, v in self.data.items():
total += v
print(total, mv) |
"""
class Employee:
def enterEmployeeDetails(self):
self.name = "Mark"
def displayEmployeeDetails(self):
print(self.name)
employee = Employee()
employee.displayEmployeeDetails()
# 'Employee' object has no attribute 'name'
# name not set when object created
# hence use "init" method
class Employee:
# Initializing when object is created
# First method to be called at the time of object creation
# Special methods in python start and end with __
def __init__(self):
self.name = "Mark"
def displayEmployeeDetails(self):
print(self.name)
employee = Employee()
employee.displayEmployeeDetails()
employeeTwo = Employee()
employeeTwo.displayEmployeeDetails()
#>> Mark
"""
class Employee:
# Initializing when object is created
# First method to be called at the time of object creation
# Special methods in python start and end with __
def __init__(self, name):
self.name = name
def displayEmployeeDetails(self):
print(self.name)
employee = Employee("Mark")
employee.displayEmployeeDetails()
employeeTwo = Employee("Matthew")
employeeTwo.displayEmployeeDetails()
|
import requests, json
apikey = "7cb9becaea566cc27d69991c345fa129"
base = "http://api.openweathermap.org/data/2.5/weather?"
city = "Austin"
compbase = f"{base}appid={apikey}&q={city}"
resp = requests.get(compbase)
x = resp.json()
if x["cod"] != "404":
y = x["main"]
w = x["wind"]
z = x["weather"]
currtemp = y["temp"]
currtemp = str(round(float(1.8)*(currtemp - 273) + 32, 1))
currpres = y["pressure"]
currpres = str(round(currpres/3386, 1))
currhum = y["humidity"]
feelslike = y["feels_like"]
feelslike = str(round(float(1.8)*(feelslike - 273) + 32, 1))
winds = str(round(float(w["speed"]), 1))
weatherdes = str(z[0]["description"])
print()
print(f"It is currently {currtemp} degrees Fahrenheit outside.")
print()
print(f"It feels like {feelslike} degrees Fahrenheit outside.")
print()
print(f"The wind is blowing at {winds} MPH.")
print()
print(f"The pressure is at {currpres} inhg.")
print()
print(f"The humidity is at %{currhum}.")
print()
print(f"OpenWeatherMap describes this weather as {weatherdes}.")
print()
print("This data was brought to you by WeatherWatcher.")
elif x["cod"] == "401":
print("Error in database link! Please notify developer.")
else:
print("Error, does not work.")
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 15:32:05 2018
@author: rjovelin
"""
# use this script to check md5 and new headers on reheadered bams
# precondition: samtools need to be loaded
# module load samtools/1.5
import os
import sys
import subprocess
import argparse
import yaml
# use this function to match sample and bam name to full path
def MatchBamPath(folder):
'''
(str) -> tuple
Look for the bam file in folder and return a tuple with the sample name,
the bam file name and the full path to the bam
'''
# make a list of files in folder
L = [i for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))]
# loop over files
sample, bam, path = '', '', ''
if len(L) != 0:
for filename in L:
sample, bam, path = '', '', ''
# check if file is bam
if 'reheadered.bam' in filename:
if filename[filename.index('reheadered.bam'):] == 'reheadered.bam':
sample = filename[:filename.index('_realigned_')]
bam = filename
path = os.path.join(folder, filename)
assert os.path.isfile(path)
break
return (sample, bam, path)
# use this functions to match samples of bams with the bam full path
def GetBamPath(BamDir, Key):
'''
(str) -> dict
Take the path to directories with bams for all samples and return a dictionary
of sample: path to bam pairs for sample in Dataset if Key = sample, and a dictionary
of reheadered bam names: path to reheadered bam for sample in Dataset if Key = file_final or file_temp
'''
# create a dict {sample: bam full path} pairs or {bam name: bam full path} pairs
BamFiles = {}
assert os.path.isdir(BamDir)
# make a list of CPCG sample dir
L = [i for i in os.listdir(BamDir) if i.startswith('CPCG') and os.path.isdir(os.path.join(BamDir, i))]
# loop over sample dirs
for i in L:
# get the sample dir
samplefolder = os.path.join(BamDir, i)
assert os.path.isdir(samplefolder)
# need to dig one more level to get the normal and tumor bams
# make a list of subdirectories
K = [item for item in os.listdir(samplefolder) if os.path.isdir(os.path.join(samplefolder, item))]
# loop over subfolders if they exist
if len(K) != 0:
for m in range(len(K)):
subfolder = os.path.join(samplefolder, K[m])
assert os.path.isdir(subfolder)
# check that subfolder is not empty
if len(os.listdir(subfolder)) != 0:
if Key == 'sample':
sample, bam, path = MatchBamPath(subfolder)
assert sample != '' and bam != '' and path != ''
assert sample not in BamFiles
BamFiles[sample] = path
elif Key == 'file_final':
sample, bam, path = MatchBamPath(subfolder)
assert bam not in BamFiles
assert sample != '' and bam != '' and path != ''
BamFiles[bam] = path
elif Key == 'file_temp':
# get the bam in the reheader folder
# check that reheader folder is present
if 'reheader' in os.listdir(subfolder):
# get reheader folder name
reheaderfolder = os.path.join(subfolder, 'reheader')
assert os.path.isdir(reheaderfolder)
sample, bam, path = MatchBamPath(reheaderfolder)
assert bam not in BamFiles
# if bam has been moved already, directory is empty
# populate dict only if bam exists
if sample != '' and bam != '' and path != '':
BamFiles[bam] = path
else:
print('{0} is empty'.format(reheaderfolder))
if Key == 'sample':
print('matched samples to full path of original bams')
else:
print('matched file names to full path of reheadered bams')
return BamFiles
# use this function to extract the md5
def Getmd5(checksum):
'''
(file) -> str
Take a md5 file and return the extracted md5 string
'''
infile = open(checksum)
content = infile.read().rstrip().split()[0].strip()
return content
# use this function to collect fields for bam header
def GrabRGBamHeader(filename):
'''
(file) -> dict
Take a bam file and return a dictionary with {pu: field list} pairs for given bam
'''
RG = {}
header = subprocess.check_output('samtools view -H ' + filename, shell = True)
header = header.decode('ascii')
L = header.split('\n')
K = [i for i in L if i.startswith('@RG')]
for s in K:
name, cn, pu, ID, lb = '' ,'', '', '', ''
s = s.split()
s = [i for i in s if ':' in i]
for i in s:
if i.startswith('SM') :
i = i.split(':')
name = i[1]
elif i.startswith('CN'):
i = i.split(':')
cn = i[1]
elif i.startswith('PU'):
i = i.split(':')
pu = i[1]
elif i.startswith('ID'):
i = i.split(':')
ID = i[1]
elif i.startswith('LB'):
i = i.split(':')
lb = i[1]
assert name != '' and cn != '' and pu != '' and ID != '' and lb != ''
# initialize inner dict
assert pu not in RG
RG[pu] = [pu, cn, ID, lb]
return RG
# use this function to list of the 500PG files
def Grab500PGFiles(BamYaml):
'''
(file) -> list
Take the yaml files with 500PG samples and return a list of files (full path)
that are part of 500PG
'''
# make a list of bams from 500PG
infile = open(BamYaml)
Samples500PG = yaml.load(infile)
infile.close()
Bams500PG = []
for sample in Samples500PG:
Bams500PG.append(Samples500PG[sample]['normal']['path'])
Bams500PG.append(Samples500PG[sample]['tumour']['path'])
return Bams500PG
# use this function to extract the readg group info from the readgroup file
def ParseReadGroupFile(ReadGroupFile, FileType):
'''
(file) -> dict
Take the file with read group information and return a dictionary with readgroup
fields for each PU for each sample or file name depending on the Key value
'''
# make a list of fields to extract from file
Fields = ['PU', 'CN', 'LB', 'ID']
if FileType == 'summary':
for i in range(len(Fields)):
Fields[i] = Fields[i] + '_rg'
# create a dict with {sample: {PU:[CN, ID, LB]}} pairs from the readgroup file
Rg = {}
infile = open(ReadGroupFile)
header = infile.readline().rstrip().split('\t')
for line in infile:
MasterKey = ''
line = line.rstrip()
if line.rstrip() != '':
line = line.split()
if FileType == 'readgroup':
sample = line[header.index('SM')]
elif FileType == 'summary':
filename = line[1]
filename = filename[filename.rfind('/')+1:]
pu = line[header.index(Fields[0])]
cn = line[header.index(Fields[1])]
lb = line[header.index(Fields[2])]
ID = line[header.index(Fields[3])]
# initialize inner dict
if FileType == 'readgroup':
MasterKey = sample
elif FileType == 'summary':
MasterKey = filename
assert MasterKey != ''
if MasterKey not in Rg:
Rg[MasterKey] = {}
if pu in Rg[MasterKey]:
if [pu, cn, ID, lb] not in Rg[MasterKey][pu]:
Rg[MasterKey][pu].append([pu, cn, ID, lb])
else:
Rg[MasterKey][pu] = [[pu, cn, ID, lb]]
infile.close()
# check that 1 CN is recorded per PU
for i in Rg:
for pu in Rg[i]:
assert len(Rg[i][pu]) ==1
# reassign list to value
Rg[i][pu] = Rg[i][pu][0]
if FileType == 'readgroup':
print('collected CN from readgroup file')
elif FileType == 'summary':
print('collected fields from summary file')
return Rg
# use this function to review the readgroups in bams and file and write summary to files
def ReviewReadGroup(args):
'''
(list) -> files
Take the list of argument from the command line, review readgroup info in bam
headers and readgroup file and write a summary file
'''
# 1) make a list of bams from 500PG
Bams500PG = Grab500PGFiles(args.yaml)
# 2) collect fields for each samples from the readgroup file
RgInfo = ParseReadGroupFile(args.readgroup, 'readgroup')
# 3) Match bams to samples for each dataset
path = '/.mounts/labs/cpcgene/private/samples/Analysis/wgs/bwa-mem/{0}/gatk/{1}/bam/realign-recal/'
# make a list of bam directories
BamDirs = [path.format('0.7.15', '3.7.0'), path.format('0.7.15', '3.6.0'), path.format('0.7.15', '3.5.0'),
path.format('0.7.12', '3.5.0'), path.format('0.7.12', '3.4.0')]
# make a list of dicts with bam: paths
BamsDataset = [GetBamPath(BamDirs[i], 'sample') for i in range(len(BamDirs))]
# 4) Collect fields for each sample from the bam header
BamRGInfo = []
for i in range(len(BamsDataset)):
# create a dict {sample: {PU:[PU, CN, ID, LB]}}
datasetRG = {}
for sample in BamsDataset[i]:
assert sample not in datasetRG
datasetRG[sample] = GrabRGBamHeader(BamsDataset[i][sample])
BamRGInfo.append(datasetRG)
# 5) Compare cn, id, lb from readgtoup and bam headers
# create lists of dicts for samples with matching pus and non-matching pus
MatchedHeaders, Absent = [], []
# check bam headers and read group header for each dataset
for i in range(len(BamRGInfo)):
# create a dict to store bam and rg field values
Matched, NotMatched = {}, {}
# loop over samples for that dataset
for sample in BamRGInfo[i]:
# check that sample is in readgroup
assert sample in RgInfo
# initialize inner dict
Matched[sample] = {}
# compare the values of each PU for each sample
for PU in BamRGInfo[i][sample]:
# some sample have non-matching PUs between bams readgroup
# create a key based on PU to match bamcenters[i][sample] and rgcenters[sample]
newPu = ''
if PU not in RgInfo[sample]:
#print(sample, PU, list(RgCenters[sample].keys()))
# loop over PU, check if PU from bam is contained in PU from rg
for j in list(RgInfo[sample].keys()):
if PU in j or j in PU:
newPu = j
break
# check if matching PU is found
else:
newPu = PU
# check if a matching PU have been found
if newPu == '':
# PUs do not match between bam and rg
NotMatched[sample] = BamsDataset[i][sample]
else:
assert newPu in RgInfo[sample]
# record bam full path, bam fields and rg fields
Matched[sample][PU] = [BamsDataset[i][sample]]
Matched[sample][PU].extend(BamRGInfo[i][sample][PU])
Matched[sample][PU].extend(RgInfo[sample][newPu])
# compare the values of the same PU between bam header and readgroup
if BamRGInfo[i][sample][PU] != RgInfo[sample][newPu]:
# tag with no match
Matched[sample][PU].append('non-matching')
else:
Matched[sample][PU].append('matched')
# remove sample from matched if at least 1 PU was never found
to_remove = list(NotMatched.keys())
if len(to_remove) != 0:
for sample in to_remove:
if sample in Matched:
del Matched[sample]
# add dicts to respective lists
MatchedHeaders.append(Matched)
Absent.append(NotMatched)
# 6) check that all sample and PUs are recorded
for i in range(len(MatchedHeaders)):
assert len(MatchedHeaders[i]) + len(Absent[i]) == len(BamRGInfo[i]) == len(BamsDataset[i])
for sample in MatchedHeaders[i]:
assert len(MatchedHeaders[i][sample]) == len(BamRGInfo[i][sample])
# 7) Write results to files
newfile = open(args.output, 'w')
OutputHeader = ['Sample', 'Full_path', 'PU_Bam', 'CN_Bam', 'ID_Bam', 'LB_Bam', 'PU_rg', 'CN_rg', 'ID_rg', 'LB_rg', 'Match', '500PG']
newfile.write('\t'.join(OutputHeader) + '\n')
for i in range(len(MatchedHeaders)):
for sample in MatchedHeaders[i]:
for PU in MatchedHeaders[i][sample]:
line = [sample]
line.extend(MatchedHeaders[i][sample][PU])
# check if bam is part of 500PG
if MatchedHeaders[i][sample][PU][0] in Bams500PG:
line.append('Y')
else:
line.append('N')
newfile.write('\t'.join(line) + '\n')
newfile.close()
newfile = open(args.error, 'w')
MissingHeader = ['Sample', 'Full_path', '500PG']
newfile.write('\t'.join(MissingHeader) + '\n')
for i in range(len(Absent)):
if len(Absent[i]) != 0:
for sample in Absent[i]:
line = [sample, Absent[i][sample]]
if Absent[i][sample] in Bams500PG:
line.append('Y')
else:
line.append('N')
newfile.write('\t'.join(line) + '\n')
newfile.close()
# use this function to compare bams pre and post-reheadering
# compare md5sums and compare bam header and readgroup file
def ComparePrePost(args):
'''
(list) -> file
Take the list of command line arguments and write a file with readgroup info
from the readgroup file and post-reheadering bam and md5sums from the original
and post reheadering bams
'''
# get the path the bams
BamDir = '/.mounts/labs/cpcgene/private/samples/Analysis/wgs/bwa-mem/{0}/gatk/{1}/bam/realign-recal/'.format(args.bwa, args.gatk)
# 1) make a dict new reheadered bams {bam: full path}
if args.summary == 'temp':
# write temporary summary to check if reheadered bams can replace original bams
# make a dict of bams: full path with reheadered bams in the reheader folder
NewBams = GetBamPath(BamDir, 'file_temp')
elif args.summary == 'final':
# write final summary once all bams have been replaced by reheadered bams
# make a dict of bams: full path with reheadered bams
NewBams = GetBamPath(BamDir, 'file_final')
# 2) match the new bams with their md5, check that md5 are the same
Md5 = {}
# get the checksum directory
ChecksumDir = os.path.join('/.mounts/labs/gsiprojects/boutroslab/CPCGene/PIPELINES/REVIEW_BAM_HEADERS/CORRECT_HEADERS/bwa-mem_{0}_gatk_{1}/checksums'.format(args.bwa, args.gatk))
assert os.path.isdir(ChecksumDir)
L = os.listdir(ChecksumDir)
if len(L) != 0:
for bam in NewBams:
Md5[bam] = ['', '']
for filename in L:
if bam in filename:
assert 'md5' in filename
if 'post_reheadering' in filename:
Md5[bam][1] = Getmd5(os.path.join(ChecksumDir, filename))
else:
Md5[bam][0] = Getmd5(os.path.join(ChecksumDir, filename))
# check that md5 have been collected for all bams
assert set(Md5.keys()) == set(NewBams.keys())
for bam in Md5:
assert Md5[bam][0] != '' and Md5[bam][1] != ''
print('extracted md5')
# 3) collect fields for each file name from the readgroup file
# create a dict with {bam: {PU:[CN, ID, LB]}} pairs from the summary file
RGInfo = ParseReadGroupFile(args.review, 'summary')
# 4) write summary file
if args.summary == 'temp':
Outputfile = 'TempStatus_bwa_{0}_gatk{1}.txt'.format(args.bwa, args.gatk)
elif args.summary == 'final':
Outputfile = 'StatusReheader_bwa_{0}_gatk{1}.txt'.format(args.bwa, args.gatk)
newfile = open(Outputfile, 'w')
Header = ['sample', 'filename', 'fullpath', 'md5_original', 'md5_post', 'md5_match', 'PU_rg', 'CN_rg', 'ID_rg', 'LB_rg', 'PU_bam', 'CN_bam', 'ID_bam', 'LB_bam', 'RG_match']
newfile.write('\t'.join(Header) + '\n')
for bam in NewBams:
# collect readgroup info, compare to readgroup file
RG = GrabRGBamHeader(NewBams[bam])
# check that bam has info from RG file
assert bam in RGInfo
# compare all PU
assert set(RGInfo[bam].keys()) == set(RG.keys())
for pu in RG:
line = []
assert RG[pu] == RGInfo[bam][pu]
# get sample name
sample = bam[:bam.index('_')]
# get full path
path = NewBams[bam]
# get md5 original, post-reheadering and md5 match
md5_original, md5_post, md5_match = Md5[bam][0], Md5[bam][1], Md5[bam][0] == Md5[bam][1]
# get RG file fields
PU_rg, CN_rg, ID_rg, LB_rg = RGInfo[bam][pu]
# get bam fields
PU_bam, CN_bam, ID_bam, LB_bam = RG[pu]
# check if rg fields match
fields_match = RG[pu] == RGInfo[bam][pu]
assert md5_match == True and fields_match == True
line = [sample, bam, path, md5_original, md5_post, str(md5_match),
PU_rg, CN_rg, ID_rg, LB_rg, PU_bam, CN_bam, ID_bam, LB_bam, str(fields_match)]
newfile.write('\t'.join(line) + '\n')
newfile.close()
# use this fucntion to edit the bam header
def EditBamHeader(bam, BamHeader, BamRGHeaderFile):
'''
(str, str, file) -> str
Take the full path string of a bam, the bam header string and the file
with read group information
'''
# make a list strings from bamheader
NewHeader = BamHeader.split('\n')
infile = open(BamRGHeaderFile)
header = infile.readline().rstrip().split('\t')
for line in infile:
if bam in line:
line = line.rstrip().split('\t')
# extract fields from bam and from RG
Full_path = line[header.index('Full_path')]
PU_Bam, CN_Bam = line[header.index('PU_Bam')], line[header.index('CN_Bam')]
ID_Bam, LB_Bam = line[header.index('ID_Bam')], line[header.index('LB_Bam')]
PU_rg, CN_rg = line[header.index('PU_rg')], line[header.index('CN_rg')]
ID_rg, LB_rg = line[header.index('ID_rg')], line[header.index('LB_rg')]
# create field with fields in bam and readgroup file
BamField = [PU_Bam, CN_Bam, ID_Bam, LB_Bam]
RGField = [PU_rg, CN_rg, ID_rg, LB_rg]
# check that fields are present in bamheader
for i in BamField:
assert i in BamHeader
# loop over strings in list, check if string contain @RG,
for i in range(len(NewHeader)):
if '@RG' in NewHeader[i]:
if PU_Bam in NewHeader[i] and CN_Bam in NewHeader[i] and ID_Bam in NewHeader[i] and LB_Bam in NewHeader[i]:
for j in range(len(BamField)):
NewHeader[i] = NewHeader[i].replace(BamField[j], RGField[j])
infile.close()
NewHeader = '\n'.join(NewHeader)
return NewHeader
# use this function to set scripts to reheader the bams
def ReheaderBams(args):
'''
(list) -> files
Take the list of command line arguments and write scripts to reheader bams
'''
# make a list of bams to reheader
AllBams = []
infile = open(args.review)
infile.readline()
for line in infile:
if 'CPCG' in line:
line = line.rstrip().split('\t')
# check if fields are matching
if line[-2] == 'non-matching':
# collect full path of the bam
AllBams.append(line[1])
infile.close()
# make a list of bams for versions of bwa and gatk
MyBams = [bam for bam in AllBams if args.bwa in bam and args.gatk in bam]
# get the working directory and output directory
WorkDir = '/.mounts/labs/gsiprojects/boutroslab/CPCGene/PIPELINES/REVIEW_BAM_HEADERS/CORRECT_HEADERS/'
DirOut = os.path.join(WorkDir, 'bwa-mem_{0}_gatk_{1}'.format(args.bwa, args.gatk))
# check if DirOut exists
if os.path.isdir(DirOut) == False:
print('please verify that {0} exists or provide valid bwa and gatk versions'.format(DirOut))
sys.exit(2)
# get the log, edited bam headers and checksums directories
ChecksumDir = os.path.join(DirOut, 'checksums')
LogDir = os.path.join(DirOut, 'log')
EditedHeaderDir = os.path.join(DirOut, 'edited_headers')
# create dirs where logs and checksums are saved if they don't already exists
if os.path.isdir(ChecksumDir) == False:
os.mkdir(ChecksumDir)
if os.path.isdir(LogDir) == False:
os.mkdir(LogDir)
if os.path.isdir(EditedHeaderDir) == False:
os.mkdir(EditedHeaderDir)
# loop over bams
for bam in MyBams:
BamHeader = ''
# extract the file name and directory from the full path
filename = bam[bam.rfind('/')+1:]
bamDir = bam[:bam.rfind('/')]
# create a qsub script
QsubScript = os.path.join(DirOut, filename + '.reheader.sh')
qsubFile = open(QsubScript, 'w')
# check if reheader directory exists
reheaderDir = os.path.join(bamDir, 'reheader')
if os.path.isdir(reheaderDir) == False:
qsubFile.write('mkdir {0}'.format(reheaderDir) + '\n')
# get the header
BamHeader = subprocess.check_output('samtools view -H {0}'.format(bam), shell=True).decode('utf-8')
assert BamHeader != ''
# edit header, replace fields in header with fields from readgroup file
NewBamHeader = EditBamHeader(bam, BamHeader, args.review)
# write edited bam header to file
outheader = os.path.join(EditedHeaderDir, filename + '.edited_header.sam')
newfile = open(outheader, 'w')
newfile.write(NewBamHeader + '\n')
newfile.close()
# do a checksum on the body of the original bam
original_md5 = os.path.join(ChecksumDir, filename + '.md5')
line = 'qsub -cwd -b y -N md5.bwa-mem_{0}_gatk_{1}.{2} -e log -o log \"module load samtools;samtools view {3} | md5sum > {4}\"'.format(args.bwa, args.gatk, filename, bam, original_md5)
qsubFile.write(line + '\n')
# reheader the bam
NewBamFile = os.path.join(reheaderDir, filename)
line = 'qsub -cwd -b y -N reheader.bwa-mem_{0}_gatk_{1}.{2} -e log -o log \"module load samtools;samtools reheader -i {3} {4} > {5}\"'.format(args.bwa, args.gatk, filename, outheader, bam, NewBamFile)
qsubFile.write(line + '\n')
# do a checksum on the body of the new bam
reheadered_md5 = os.path.join(ChecksumDir, filename + '.post_reheadering.md5')
line = 'qsub -cwd -b y -N md5.bwa-mem_{0}_gatk_{1}.{2} -hold_jid reheader.bwa-mem_{3}_gatk_{4}.{5} -e log -o log \"module load samtools;samtools view {6} | md5sum > {7}\"'.format(args.bwa, args.gatk, filename + '.post_reheadering', args.bwa, args.gatk, filename, NewBamFile, reheadered_md5)
qsubFile.write(line + '\n')
qsubFile.close()
# use this function to move reheadered bams to their
def MoveBamsFinal(args):
'''
(list) -> None
Take the list of command line arguments and move the reheadred bams to their
final destination, replacing the original bams if the md5sums between original
and reheadred bams match and if readgroup info match between readgroup file and reheadered bams
'''
# get the directory containing the original bams
BamDir = '/.mounts/labs/cpcgene/private/samples/Analysis/wgs/bwa-mem/{0}/gatk/{1}/bam/realign-recal/'.format(args.bwa, args.gatk)
# check that a temporary file with bam status exist
StatusFile = 'TempStatus_bwa_{0}_gatk{1}.txt'.format(args.bwa, args.gatk)
infile = open(StatusFile)
header = infile.readline().rstrip().split('\t')
# create a dict with bam : path to reheadered bam if md5sums and readgroup match
Reheadered = {}
NoMatch = []
for line in infile:
if 'CPCG' in line:
line = line.rstrip().split('\t')
# check that md5sums match
md5_original = line[header.index('md5_original')]
md5_post = line[header.index('md5_post')]
md5_match = bool(line[header.index('md5_match')])
RG = line[header.index('PU_rg'): header.index('PU_bam')]
rg = line[header.index('PU_bam'):header.index('RG_match')]
RG_match = bool(line[header.index('RG_match')])
if md5_match == True and RG_match == True:
assert md5_original == md5_post and RG == rg
# get the bam name and its path
bam, path = line[header.index('filename')], line[header.index('fullpath')]
assert os.path.isfile(path)
# bams may be recorded on multiple lines because PUs are the unique fields
if bam in Reheadered:
assert Reheadered[bam] == path
else:
Reheadered[bam] = path
else:
NoMatch.append(bam)
infile.close()
# remove bams if md5 of RG did not match
NoMatch = list(set(NoMatch))
if len(NoMatch) != 0:
for bam in NoMatch:
print('no match for bam {0}'.format(bam))
del Reheadered[bam]
# make a dictionary of original bams: path pairs
OriginalBams = GetBamPath(BamDir, 'file_final')
# check that all reheadered bams can be match to their final destination
for bam in Reheadered:
print('moving bam {0}'.format(bam))
OldFile = OriginalBams[bam]
NewFile = Reheadered[bam]
assert os.path.isfile(OldFile)
assert os.path.isfile(NewFile)
# replace original bam with reheadered bam
os.system('mv {0} {1}'.format(NewFile, OldFile))
# change group --> cpcgene
os.system('chgrp cpcgene {0}'.format(OldFile))
if __name__ == '__main__':
# create top-level parser
parser = argparse.ArgumentParser(prog = 'ReviewBamHeaders.py', description='review read group fields in bams and readgroup file')
subparsers = parser.add_subparsers(title='sub-commands', description='valid sub-commands', help = 'sub-commands help')
# review sub-commands
Review_parser = subparsers.add_parser('Review', help ='Review read group fields in bams and readgroup file')
Review_parser.add_argument('readgroup', help='read group file')
Review_parser.add_argument('-o', '--Out', dest='output', default='BamRGHeaders.txt', help='summary file with readgroup info. default is ./BamRGHeaders.txt')
Review_parser.add_argument('-e', '--Err', dest='error', default='ProblematicBams.txt', help='bams that cannot be compared between headers and readgroup file. default is ./ProblematicBams.txt')
Review_parser.add_argument('-y', '--Yaml', dest='yaml', default='/.mounts/labs/cpcgene/private/projects/500pg/data/samples/500pg_bam_all.yaml', help='yaml files with 500PG samples. default is /.mounts/labs/cpcgene/private/projects/500pg/data/samples/500pg_bam_all.yaml')
Review_parser.set_defaults(func=ReviewReadGroup)
# Compare bams post and pre-reheadering sub-commands
Compare_parser = subparsers.add_parser('Compare', help='Compare bams post and pre-reheadering')
Compare_parser.add_argument('-r', '--Review', dest='review', default='/.mounts/labs/gsiprojects/boutroslab/CPCGene/PIPELINES/REVIEW_BAM_HEADERS/BamRGHeaders.txt', help='review file with bam and readgroup fields. default is /.mounts/labs/gsiprojects/boutroslab/CPCGene/PIPELINES/REVIEW_BAM_HEADERS/BamRGHeaders.txt')
Compare_parser.add_argument('-b', '--Bwa', dest='bwa', default='0.7.15', help='Bwa version. default 0.7.15', choices=['0.7.12', '0.7.15'])
Compare_parser.add_argument('-g', '--Gatk', dest='gatk', default='3.7.0', help='Gatk version. default 3.7.0', choices=['3.4.0', '3.5.0', '3.6.0', '3.7.0'])
Compare_parser.add_argument('-s', '--Summary', dest='summary', help='Write temporary or final summary with md5sums and readgroup info', choices=['final', 'temp'])
Compare_parser.set_defaults(func=ComparePrePost)
# reheader bams sub-commands
Reheader_parser = subparsers.add_parser('Reheader', help='Reheader bams with info from readgroup file')
Reheader_parser.add_argument('-b', '--Bwa', dest='bwa', default='0.7.15', help='Bwa version. default 0.7.15', choices=['0.7.12', '0.7.15'])
Reheader_parser.add_argument('-g', '--Gatk', dest='gatk', default='3.7.0', help='Gatk version. default 3.7.0', choices=['3.4.0', '3.5.0', '3.6.0', '3.7.0'])
Reheader_parser.add_argument('-r', '--Review', dest='review', default='/.mounts/labs/gsiprojects/boutroslab/CPCGene/PIPELINES/REVIEW_BAM_HEADERS/BamRGHeaders.txt', help='review file with bam and readgroup fields. default is /.mounts/labs/gsiprojects/boutroslab/CPCGene/PIPELINES/REVIEW_BAM_HEADERS/BamRGHeaders.txt')
Reheader_parser.set_defaults(func=ReheaderBams)
# move bams sub-commands
MoveBam_parser = subparsers.add_parser('MoveBam', help='Move reheadered bams to final directory')
MoveBam_parser.add_argument('-b', '--Bwa', dest='bwa', default='0.7.15', help='Bwa version. default 0.7.15', choices=['0.7.12', '0.7.15'])
MoveBam_parser.add_argument('-g', '--Gatk', dest='gatk', default='3.7.0', help='Gatk version. default 3.7.0', choices=['3.4.0', '3.5.0', '3.6.0', '3.7.0'])
MoveBam_parser.set_defaults(func=MoveBamsFinal)
# get arguments from the command line
args = parser.parse_args()
# pass the args to the default function
args.func(args)
|
#http://eddmann.com/posts/depth-first-search-and-breadth-first-search-in-python/
#Adjacency list: build by storing each node in a dictionary along with a set containing their adjacent nodes
graph = {'A': set(['B', 'C']),
'B': set(['A', 'D', 'E']),
'C': set(['A', 'F']),
'D': set(['B']),
'E': set(['B', 'F']),
'F': set(['C', 'E'])}
tree = {2: {7, 5},
7: {3, 6},
5: {9},
3: set(),
6: {5, 11},
9: {4},
10: set(),
11: set(),
4: set()}
|
import Levenshtein
def editsim(a: str, b: str, ignore_order=False) -> int:
"""
returns a score from 0 - 100 indicating the similarity score based on the edit distance between two strings.
NOTE: if clause at the beginning is specific for this notebook's experiments.
"""
try:
if a == "" or b == "<NA>":
return 0
longer, shorter = a, b
if len(a) < len(b):
longer, shorter = b, a
lonlen = len(longer)
if ignore_order:
longer = " ".join(sorted(longer.split(" ")))
shorter = " ".join(sorted(shorter.split(" ")))
return int(((lonlen - Levenshtein.distance(longer, shorter)) / lonlen) * 100)
except TypeError:
return 0
|
#!/usr/bin/env python2
import boto3
import botocore
import collections
import hashlib
import re
import signal
import sys
import threading
import urllib
import urlparse
class MessageHeader(collections.namedtuple('MessageHeader_', ['status_code', 'status_info'])):
def __str__(self):
return '{} {}'.format(self.status_code, self.status_info)
@staticmethod
def parse(line):
status_code, status_info = line.split(' ', 1)
return MessageHeader(int(status_code), status_info)
class MessageHeaders:
CAPABILITIES = MessageHeader(100, 'Capabilities')
STATUS = MessageHeader(102, 'Status')
URI_FAILURE = MessageHeader(400, 'URI Failure')
GENERAL_FAILURE = MessageHeader(401, 'General Failure')
URI_START = MessageHeader(200, 'URI Start')
URI_DONE = MessageHeader(201, 'URI Done')
URI_ACQUIRE = MessageHeader(600, 'URI Acquire')
CONFIGURATION = MessageHeader(601, 'Configuration')
class Message(collections.namedtuple('Message_', ['header', 'fields'])):
@staticmethod
def parse_lines(lines):
return Message(MessageHeader.parse(lines[0]), tuple(re.split(': *', line, 1) for line in lines[1:]))
def get_field(self, field_name):
return next(self.get_fields(field_name), None)
def get_fields(self, field_name):
return (value for name, value in self.fields if name.lower() == field_name.lower())
def __str__(self):
lines = [str(self.header)]
lines.extend('{}: {}'.format(name, value) for name, value in self.fields)
lines.append('\n')
return '\n'.join(lines)
Pipes = collections.namedtuple('Pipes', ['input', 'output'])
class AptMethod(collections.namedtuple('AptMethod_', ['pipes'])):
def send(self, message):
self.pipes.output.write(str(message))
self.pipes.output.flush()
def _send_error(self, message):
self.send(Message(MessageHeaders.GENERAL_FAILURE, (('Message', message),)))
def run(self):
try:
self.send_capabilities()
# TODO: Use a proper executor. concurrent.futures has them, but it's only in Python 3.2+.
threads = []
interrupt = {'lock': threading.Lock(), 'value': False}
lines = []
while not interrupt['value']:
line = sys.stdin.readline()
if not line:
for thread in threads:
thread.join()
break
line = line.rstrip('\n')
if line:
lines.append(line)
elif lines:
message = Message.parse_lines(lines)
lines = []
def handle_message():
try:
self.handle_message(message)
except Exception as ex:
with interrupt['lock']:
if not interrupt['value']:
interrupt['value'] = True
self._send_error(ex)
raise
thread = threading.Thread(target=handle_message)
threads.append(thread)
thread.start()
except Exception as ex:
self._send_error(ex)
raise
class S3AptMethod(AptMethod):
def __init__(self, *args, **kwargs):
super(S3AptMethod, self).__init__(*args, **kwargs)
self.signature_version = None
class S3Uri:
def __init__(self, method, raw_uri):
self.method = method
self.uri = urlparse.urlparse(raw_uri)
def user_host(self):
parts = self.uri.netloc.split('@', 1)
return parts if len(parts) == 2 else (None, parts[0])
def endpoint_url(self):
return 'https://{}/'.format(self.user_host()[1])
def credentials(self):
user, _ = self.user_host()
if user:
user_parts = user.split(':', 1)
if len(user_parts) == 2:
return map(urllib.unquote, user_parts)
else:
raise Exception('Access key and secret are specified improperly in the URL')
return None, None
def virtual_host_bucket(self):
virtual_host_match = re.match('(?:(.*).|)s3(?:-[^.]*)?.amazonaws.com', self.uri.hostname)
return virtual_host_match and virtual_host_match.group(1)
def bucket_key(self):
bucket = self.virtual_host_bucket()
if bucket:
key = self.uri.path[1:]
else:
_, bucket, key = map(urllib.unquote, self.uri.path.split('/', 2))
return bucket, key
def signature_version(self):
if self.method.signature_version:
return self.method.signature_version
elif self.virtual_host_bucket() == '':
return 's3v4'
def send_capabilities(self):
self.send(Message(MessageHeaders.CAPABILITIES, (
('Send-Config', 'true'),
('Pipeline', 'true'),
('Single-Instance', 'yes'),
)))
def handle_message(self, message):
if message.header.status_code == MessageHeaders.CONFIGURATION.status_code:
for config in message.get_fields('Config-Item'):
key, value = config.split('=', 1)
if key == 'S3::Signature::Version':
try:
self.signature_version = {'2':'s3', '4':'s3v4'}[value]
except KeyError:
raise Exception('Invalid value for S3::Signature::Version')
elif message.header.status_code == MessageHeaders.URI_ACQUIRE.status_code:
uri = message.get_field('URI')
filename = message.get_field('Filename')
s3_uri = self.S3Uri(self, uri)
s3_access_key, s3_access_secret = s3_uri.credentials()
s3 = boto3.resource(
's3',
aws_access_key_id=s3_access_key,
aws_secret_access_key=s3_access_secret,
endpoint_url=s3_uri.endpoint_url(),
config=botocore.client.Config(signature_version=s3_uri.signature_version())
)
bucket, key = s3_uri.bucket_key()
s3_object = s3.Bucket(bucket).Object(key)
self.send(Message(MessageHeaders.STATUS, (
('Message', 'Requesting {}/{}'.format(bucket, key)),
('URI', uri),
)))
try:
s3_request = {}
last_modified = message.get_field('Last-Modified')
if last_modified:
s3_request['IfModifiedSince'] = last_modified
s3_response = s3_object.get(**s3_request)
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == '304':
self.send(Message(MessageHeaders.URI_DONE, (
('Filename', filename),
('IMS-Hit', 'true'),
('URI', uri),
)))
else:
self.send(Message(MessageHeaders.URI_FAILURE, (
('Message', error.response['Error']['Message']),
('URI', uri),
)))
else:
self.send(Message(MessageHeaders.URI_START, (
('Last-Modified', s3_response['LastModified'].isoformat()),
('Size', s3_response['ContentLength']),
('URI', uri),
)))
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
sha512 = hashlib.sha512()
with open(filename, 'wb') as f:
while True:
bytes = s3_response['Body'].read(16 * 1024)
if not bytes:
break
f.write(bytes)
md5.update(bytes)
sha1.update(bytes)
sha256.update(bytes)
sha512.update(bytes)
self.send(Message(MessageHeaders.URI_DONE, (
('Filename', filename),
('Last-Modified', s3_response['LastModified'].isoformat()),
('MD5-Hash', md5.hexdigest()),
('MD5Sum-Hash', md5.hexdigest()),
('SHA1-Hash', sha1.hexdigest()),
('SHA256-Hash', sha256.hexdigest()),
('SHA512-Hash', sha512.hexdigest()),
('Size', s3_response['ContentLength']),
('URI', uri),
)))
if __name__ == '__main__':
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
pipes = Pipes(sys.stdin, sys.stdout)
S3AptMethod(pipes).run()
|
class Solution:
def successfulPairs(self, s: List[int], p: List[int], t: int) -> List[int]:
ans = []
p.sort()
n = len(p)
for ss in s:
tt = t // ss if t % ss == 0 else t // ss + 1
idx = bisect.bisect_left(p,tt)
ans.append(n - idx)
return ans |
# stack is a vector of pancakes
def pancake(stack, counter):
# print stack
if len(stack) == 0:
return counter
if stack[-1] == "+":
return pancake(stack[:-1], counter)
else:
if stack[0] == "+":
numberOfPlus = 0
for e in stack:
if e == "+":
numberOfPlus += 1
else:
break
stack[:numberOfPlus] = ["-"] * numberOfPlus
else:
stack = flip(stack)
counter += 1
return pancake(stack, counter)
def flip(stack):
for i in xrange((len(stack) + 1) / 2):
stack[i], stack[-(i + 1)] = reverse(stack[-(i + 1)]), reverse(stack[i])
return stack
def reverse(elem):
if elem == "+":
return "-"
if elem == "-":
return "+"
def main():
t = int(input())
for i in xrange(1, t + 1):
sequence = raw_input()
stack = list(sequence)
print("Case #{}: {}".format(i, pancake(stack, 0)))
main()
# print pancake(list("-+-+-+----"), 0)
# print flip(list("+-"))
|
# coding:utf-8
from __future__ import unicode_literals
from django.db import models
from company.models import Organization, Staff
import os
# Create your models here.
class Pro(models.Model):
"""
Description: 项目
"""
org = models.ForeignKey(Organization, verbose_name='公司')
pro_name = models.CharField(max_length=50, verbose_name='项目编号')
class Meta:
verbose_name='项目'
verbose_name_plural=verbose_name
def __unicode__(self):
return self.pro_name
class Actor(models.Model):
"""
Description: 项目参与者
"""
pro = models.ForeignKey(Pro, verbose_name='项目')
pro_actors = models.ManyToManyField(Staff, verbose_name='项目参与者')
class Meta:
verbose_name='项目参与者'
verbose_name_plural=verbose_name
def __unicode__(self):
msg = "项目名称 : " + self.pro.pro_name + " > 项目参与者 [ "
isFirst = True
for act in self.pro_actors.all():
if not isFirst :
msg += ","
msg += act.staff_name
msg += " ]"
return msg
class ProFlowBlank(models.Model):
"""
Description: 项目流程块,比如:意向,设计,施工,售后服务
"""
pro = models.ForeignKey(Pro, verbose_name='项目')
pro_flow_name = models.CharField(max_length=50, verbose_name='项目流程块')
class Meta:
verbose_name='项目流程块'
verbose_name_plural=verbose_name
def __unicode__(self):
return pro.pro_name + " > " + pro_flow_name
class PlanFlow(models.Model):
"""
Description: 项目计划时间
"""
pro_flow_blank = models.ForeignKey(ProFlowBlank, verbose_name='项目流程块')
pre_st_date = models.DateField(auto_now_add=False ,null=False, blank=False, verbose_name='项目预计开始时间')
pre_ed_date = models.DateField(auto_now_add=False ,null=False, blank=False, verbose_name='项目预计结束时间')
class Meta:
verbose_name='项目计划时间'
verbose_name_plural=verbose_name
def __unicode__(self):
return self.pro_flow_blank.pro.pro_name + " > [ 预计开始时间:" + pre_st_date + ", 预计结束时间:" + pre_ed_date + " ]"
class Actual(models.Model):
"""
Description: 项目实际时间
"""
pro_flow_blank = models.ForeignKey(ProFlowBlank, verbose_name='项目流程块')
act_st_date = models.DateField(auto_now_add=False ,null=False, blank=False, verbose_name='项目实际开始时间')
act_ed_date = models.DateField(auto_now_add=False ,null=False, blank=False, verbose_name='项目实际结束时间')
class Meta:
verbose_name='项目实际时间'
verbose_name_plural=verbose_name
def __unicode__(self):
return self.pro_flow_blank.pro.pro_name + " > [ 实际开始时间:" + pre_st_date + ", 实际结束时间:" + pre_ed_date + " ]"
class Action(models.Model):
"""
Description: 项目流程块下面的行为,如:现场勘查,设计提案,装修工程预算报价
"""
action_types = (
('0', '一般行为'),# 字符串
('1', '设计图'),# 图纸的地址
('3', '报价'),# 报价编号
('4', '合同'),# 合同编号
)
pro_flow_blank = models.ForeignKey(ProFlowBlank, verbose_name='项目流程块')
action_content = models.CharField(max_length=50, verbose_name='行为')
action_type = models.CharField(choices=action_types, max_length=20, verbose_name='行为类型')
class Meta:
verbose_name='行为'
verbose_name_plural=verbose_name
def __unicode__(self):
return self.pro_flow_blank.pro_flow_name + " > " + self.pro_flow_blank.pro_flow_name
class QuotationTemp(models.Model):
"""
Description: 报价单模板
"""
class Meta:
pass
class Quotation(models.Model):
"""
Description: 报价单
"""
class Meta:
pass
'''
class Pic(models.Model):
"""
Description: 图类
"""
pro_flow_blank = models.ForeignKey(ProFlowBlank, verbose_name='项目流程块')
# 公司/项目/
img = models.ImageField(upload_to='%s/%s/', \
blank=False, null=False, verbose_name='设计图')
is_confirm = models.BooleanField(default=False, verbose_name='确认信息')
pic_date_publish = models.DateField(auto_now_add=True ,null=False, blank=False, verbose_name='设计图发布时间')
class Meta:
verbose_name='设计图'
verbose_name_plural=verbose_name
def __unicode__(self):
return pro_flow_blank.pro_flow_name + " > " + img + " : " + pic_date_publish
class Process(models.Model):
"""
Description: 工序定义,用于定义施工内容
"""
org = models.ForeignKey(Organization, verbose_name='公司')
process_name = models.CharField(max_length=20, primary_key=True, verbose_name='工序名称')
class Meta:
verbose_name='工序'
verbose_name_plural=verbose_name
def __unicode__(self):
return self.process_name
class Subitem(models.Model):
"""
Description: 子项目,属于施工类型
"""
pro_flow_blank = models.ForeignKey(ProFlowBlank, verbose_name='项目流程块')
processes = models.ManyToManyField(Process, verbose_name='工序')
con_st_date = models.DateField(auto_now_add=True ,null=False, blank=False, verbose_name='子项目开始时间')
class Meta:
verbose_name='子项目'
verbose_name_plural=verbose_name
'''
# class ProTemplate(models.Model):
# """
# Description: 项目模板
# """
# class Meta:
# pass |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module: day21.py
Author: zlamberty
Created: 2016-12-02
Description:
day 21 puzzles for the advent of code (adventofcode.com/2016/day/21)
Usage:
<usage>
"""
import itertools
import os
import re
import eri.logging as logging
# ----------------------------- #
# Module Constants #
# ----------------------------- #
TEST_DATA = [
'abcde',
[
'swap position 4 with position 0',
'swap letter d with letter b',
'reverse positions 0 through 4',
'rotate left 1 step',
'move position 1 to position 4',
'move position 3 to position 0',
'rotate based on position of letter b',
'rotate based on position of letter d',
]
]
FNAME = os.path.join('data', 'day21.txt')
logger = logging.getLogger('day21')
logging.configure()
# ----------------------------- #
# Main routine #
# ----------------------------- #
def load_data(fname=FNAME):
with open(fname, 'r') as f:
return 'abcdefgh', [line.strip() for line in f]
def rotate(s, i, direction):
if direction == 'right':
i = -i
return s[i:] + s[:i]
def apply_instruction(s, instruction, reverse=False):
m = re.match('swap position (\d+) with position (\d+)', instruction)
if m:
i, j = map(int, m.groups())
s = list(s)
s[i], s[j] = s[j], s[i]
return ''.join(s)
m = re.match('swap letter ([a-z]+) with letter ([a-z]+)', instruction)
if m:
s0, s1 = m.groups()
return s.replace(s0, '^').replace(s1, s0).replace('^', s1)
m = re.match('rotate (left|right) (\d+) steps?', instruction)
if m:
direction, i = m.groups()
i = int(i)
if reverse:
direction = 'right' if direction == 'left' else 'left'
return rotate(s, i, direction)
m = re.match('rotate based on position of letter ([a-z]+)', instruction)
if m:
s0 = m.groups()[0]
i = s.find(s0)
if i >= 4:
i += 1
i += 1
i %= len(s)
return rotate(s, i, 'right' if not reverse else 'left')
m = re.match('reverse positions (\d+) through (\d+)', instruction)
if m:
i, j = map(int, m.groups())
return s[:i] + ''.join(reversed(s[i: j + 1])) + s[j + 1:]
m = re.match('move position (\d+) to position (\d+)', instruction)
if m:
i, j = map(int, m.groups())
s = list(s)
si = s.pop(i)
s.insert(j, si)
return ''.join(s)
raise ValueError("couldn't parse instruction {}".format(instruction))
def q_1(data, reverse=False):
s, instructions = data
logger.debug(s)
for instruction in instructions:
logger.debug(instruction)
s = apply_instruction(s, instruction, reverse)
logger.debug(s)
return s
def test_q_1():
assert q_1(TEST_DATA) == 'decab'
def q_2(data, target='fbgdceah'):
answers = []
s, instructions = data
for perm in itertools.permutations(target):
s = ''.join(perm)
logger.info('trying string {}'.format(s))
if q_1((s, instructions)) == target:
answers.append(s)
return sorted(answers)[0]
def test_q_2():
assert q_2(TEST_DATA, 'decab') == 'abcde'
# ----------------------------- #
# Command line #
# ----------------------------- #
if __name__ == '__main__':
logger.warning('day 21')
data = load_data()
logger.info('question 1 answer: {}'.format(q_1(data)))
logger.info('question 2 answer: {}'.format(q_2(data)))
|
myNumbers = [23,234,345,4356234,243,43,56,2]
#Your code go here:
def increment_by_one(the_number):
# new_list = []
# new_list.append(the_numbers * 3)
# print(new_list)
# new_list.append(the_numbers) * 3
return the_number * 3
# return new_list
new_list = map(increment_by_one, myNumbers)
result = list(map(increment_by_one, myNumbers))
#print(new_list)
print(result)
# increment_by_one(myNumbers)
## ======== CODE FROM PREVIOUS EXERCISE ========= ##
# def fahrenheit_values(Celsius_values):
# # the magic go here:
# fahrenheit_values = []
# # print(fahrenheit_values)
# return (Celsius_values * 1.8) + 32
# result = list(map(fahrenheit_values, Celsius_values))
# print(result) |
from mentor.questionaire.tests import UserLogin, AdminLogin
from mentor.questionaire.forms import QuestionaireForm, DownloadResponseForm
from datetime import date, timedelta
from mentor.users.models import User
from mentor.questionaire.models import Questionaire
class QuestionaireFormTest(UserLogin):
def test_at_lest_one_follow_up_method(self):
data = {
'student_name' : 'Student Name',
'identity' : 'ST',
'primary_concern' : 'My primary concern',
'step_taken' : 'My steps taken',
'support_from_MAPS' : 'Support from MAPS',
}
form = QuestionaireForm(data)
self.assertFalse(form.is_valid())
def test_save(self):
data = {
"name": "Student Name",
"identity": "ST",
"mentor_name": "John",
"UNST_course": "FRINQ",
"type_of_course": "HB",
"primary_concern": ["1", "2"],
"when_take_step": "In the past few days",
"follow_up_email": "mdj2@pdx.edu",
"follow_up_phone_0": "",
"follow_up_phone_1": "",
"follow_up_phone_2": "",
}
form = QuestionaireForm(data)
form.save(self.user)
self.assertTrue(Questionaire.objects.filter(user=self.user))
class DownloadResponseFormTest(AdminLogin):
def test_clean(self):
data = {
'start_date' : date.today(),
'end_date' : date.today() - timedelta(days=1),
}
form = DownloadResponseForm(data)
self.assertFalse(form.is_valid())
def test_empty_date(self):
data = {}
form = DownloadResponseForm(data)
self.assertFalse(form.is_valid())
|
'''
TEMPLATE for creating your own Agent to compete in
'Dungeons and Data Structures' at the Coder One AI Sports Challenge 2020.
For more info and resources, check out: https://bit.ly/aisportschallenge
BIO:
<Tell us about your Agent here>
'''
# import any external packages by un-commenting them
# if you'd like to test / request any additional packages - please check with the Coder One team
import random
# import time
# import numpy as np
# import pandas as pd
# import sklearn
class Agent:
def __init__(self):
'''
Place any initialisation code for your agent here (if any)
'''
pass
def next_move(self, game_state, player_state):
'''
This method is called each time your Agent is required to choose an action
If you're just starting out or are new to Python, you can place all your
code within the ### CODE HERE ### tags. If you're more familiar with Python
and how classes and modules work, then go nuts.
(Although we recommend that you use the Scrims to check your Agent is working)
'''
###### CODE HERE ######
# a list of all the actions your Agent can choose from
actions = ['','u','d','l','r','p']
# randomly choosing an action
action = random.choice(actions)
###### END CODE ######
return action
|
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from neomodel import db
from TestModel.models import person
# 数据库操作
def testdb(request):
#p1 = Person(id=7).save()
'''
p2 = Person.nodes.get(id=2)
p4 = Person.nodes.get(id=4)
print p2, p4
p2.knowing_p.connect = p4
if p2.knowing_p.is_connected(p4):
print("Jim's from Germany")
'''
query = 'match (n) return count(n)'
#query = "match (s),(d) where s.id = {id1} and d.id = {id2} match p=shortestPath((s)-[*..]->(d)) RETURN p;"
params = {"id1": 1, "id2": 4}
results, meta = db.cypher_query(query, params)
print results
#print type(results[0][0])
#print (results[0][0]).nodes, (results[0][0]).relationships
#path =''
#for node in results[0][0].nodes:
# path += (str)(node.properties['id'])+'->'
#nodes返回的结果就是最短路径上的所有节点,下面就是要把这些节点写成json格式返回客户端 ,让客户端去解析json文件生成可视化路径
#people = [Person.inflate(row[0]) for row in results]
return HttpResponse("<p>"+"success!"+"</p>") |
import unittest
from adaptor.dleq import *
class TestsDLEQ(unittest.TestCase):
def test_dleq(self):
x = 10
y = 14
Y = y * G
X = x * G
R = x * Y
proof = dleq_prove(x, X, Y, R)
self.assertTrue(dleq_verify(X, Y, R, proof))
|
import sys
sys.stdin = open("D3_3809_input.txt", "r")
# 1. runtime error
# T = int(input())
# for test_case in range(T):
# N = int(input())
# data = []
# if N < 20:
# data = list(map(int, input().split()))
# else:
# ans = [list(map(int, input().split())) for _ in range(N // 20)]
# for i in ans:
# for j in range(len(i)):
# data.append(i[j])
# number10 = []
# for i in range(10):
# number10.append(i)
# for i in range(len(data)): # 1의 자리
# if data[i] in number10:
# number10.remove(data[i])
# if len(number10) != 0:
# print("#{} {}".format(test_case + 1, number10[0]))
# continue
# number100 = []
# for i in range(10, 100):
# number100.append(i)
# for i in range(len(data) - 1):
# if (data[i] * 10 + data[i + 1]) in number100:
# number100.remove(data[i] * 10 + data[i + 1])
# if len(number100) != 0:
# print("#{} {}".format(test_case + 1, number100[0]))
# continue
# number1000 = []
# for i in range(100, 1000):
# number1000.append(i)
# for i in range(len(data) - 2):
# if (data[i] * 10 + data[i + 1] + data[i + 2] * 100) in number1000:
# number1000.remove(data[i] * 10 + data[i + 1] + data[i + 2] * 100)
# if len(number1000) != 0:
# print("#{} {}".format(test_case + 1, number1000[0]))
# continue
T = int(input())
for test_case in range(T):
N = int(input())
number, mat, data = "", 0, []
while len(data) != N:
data.extend(input().split())
for i in data:
number += i
while str(mat) in number:
mat += 1
print("#{} {}".format(test_case + 1, mat)) |
def total_cost(calls):
dict = {}
for call in calls:
date, _, length = call.split(" ")
dict[date] = dict.get(date,0) + ceil(int(length)/60)
return ...
# 第一种做法反思
# 使用Counter是非常优雅的写法
# 除此之外,普通的字典dict也可以达到相同的作用
# 原理:dict类型赋值的几种情况
# 情况1:key已经存在,则以下两种方式都可行
# dict[key] = 1
# dict[key] += 1
# 情况2:key不存在,则第一种方式可行,第二种方式不可行,会报 keyerror
# dict[key] = 1
# dict[key] += 1
# 有没有一种方法让普通的dict 实现和Counter一样的功能呢?即即便key不存在也可以使用增量赋值或者相似的赋值
# 答案是 dict.get
# dict.get(key, default=None) :
# key : 要查找的键
# 如果指定键的值不存在,就返回该默认值。(注意函数的返回值是这个默认值,而不是字典)
# 因此,实现Counter功能的语句是
# dict[key] = dict.get(key,0) + value
from math import ceil
from collections import Counter
def total_cost(calls):
db = Counter()
for call in calls:
date, time, length = call.split(" ")
db[date] += ceil(length/60)
return sum([(i-100)*2+100 if i>100 or i for i in db.values()])
# 原始做法反思
# 总想把全部对象呈现出来,所以才会做 info,date,length 这些list
# 实际上可以在动态的过程中全部完成
# 代码的关键是使用 set 得到不重复的日期,然后用它创建空白字典
# 最后用列表中的值对应地去修改空白字典
# 这样做的麻烦之处在于,对于一个字典来讲,dict[key not happen before] 是会报错的
# 因此才需要使用 set 事先将所有的 key都准备好
# 使用 Counter 可以避免这个问题
# def total_cost(calls):
# info = [i.split(" ") for i in calls]
# date, length = [i[0] for i in info], [ceil(int(i[2])/60) for i in info]
#
# date_set = set(date)
# dict = {i:0 for i in date_set}
#
# for i in range(len(date)):
# dict[date[i]] += length[i]
#
# return sum([(mins-100)*2+100 if mins>100 else mins for mins in dict.values()])
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert total_cost(("2014-01-01 01:12:13 181",
"2014-01-02 20:11:10 600",
"2014-01-03 01:12:13 6009",
"2014-01-03 12:13:55 200")) == 124, "Base example"
assert total_cost(("2014-02-05 01:00:00 1",
"2014-02-05 02:00:00 1",
"2014-02-05 03:00:00 1",
"2014-02-05 04:00:00 1")) == 4, "Short calls but money..."
assert total_cost(("2014-02-05 01:00:00 60",
"2014-02-05 02:00:00 60",
"2014-02-05 03:00:00 60",
"2014-02-05 04:00:00 6000")) == 106, "Precise calls"
|
import numpy as np
from collections import defaultdict
from sklearn.metrics import classification_report, accuracy_score
from tqdm import tqdm
"""
NLPtutorial2020のDockerfile, requirement.txt, Makefileを使用
make docker-run FILE_NAME=./tutorial07/tutorial07.py
"""
class NN():
def __init__(self, λ=0.1, node=2, layer=1) -> None:
self.λ = λ
self.ids = defaultdict(lambda: len(self.ids)) # {word:id}
self.node = node
self.layer = layer
self.network = []
self.feat_lab = []
def load_file(self, file):
with open(file, "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
_, sentence = line.split("\t")
for word in sentence.split():
self.ids[word]
for line in lines:
label, sentence = line.split("\t")
label = int(label)
words = sentence.split()
self.feat_lab.append([self.create_features(words), label])
return self
def init_network(self):
# 入力層(ノードの数, 素性の数)
w_in = np.random.rand(self.node, len(self.ids)) / 5 - 0.1
b_in = np.random.rand(self.node) / 5 - 0.1
self.network.append((w_in, b_in))
# 隠れ層(隠れ層と隠れ層の間の数分)
for _ in range(self.layer - 1):
w = np.random.rand(self.node, self.node) / 5 - 0.1
b = np.random.rand(self.node) / 5 - 0.1
self.network.append((w, b))
# 出力層
w_out = np.random.rand(1, self.node) / 5 - 0.1
b_out = np.random.rand(1) / 5 - 0.1
self.network.append((w_out, b_out))
def create_features(self, words, is_train=True):
phi = [0 for _ in range(len(self.ids))]
if is_train:
for word in words:
phi[self.ids[word]] += 1
else:
for word in words:
if word in self.ids:
phi[self.ids[word]] += 1
return phi # self.ids[word]のid(val)がphiのindex
def forward(self, phi_0):
phi = [0 for _ in range(len(self.network)+1)]
phi[0] = phi_0
for i_net in range(len(self.network)):
w, b = self.network[i_net]
phi[i_net+1] = np.tanh(np.dot(w, phi[i_net]) + b)
return phi
def backward(self, phi, y_d):
J = len(self.network)
delta = np.zeros(J+1, dtype=np.ndarray)
delta[-1] = np.array([float(y_d) - phi[J]])
delta_d = np.zeros(J+1, dtype=np.ndarray)
for i in reversed(range(J)):
delta_d[i+1] = delta[i+1] * (1 - phi[i+1] ** 2)
w, _ = self.network[i]
delta[i] = np.dot(delta_d[i+1], w)
return delta_d
def update_weights(self, phi, delta_d):
for i in range(len(self.network)):
w, b = self.network[i]
w += self.λ * np.outer(delta_d[i+1], phi[i])
b += self.λ * delta_d[i+1][0]
# self.network[i]内のwとbも更新されている
return self
def train(self, iter=3):
for i in range(iter):
# [{単語のid(1文ごと): 回数}, {}, ...,{}]
for phi_0, label in tqdm(self.feat_lab):
phi = self.forward(phi_0)
delta_d = self.backward(phi, label)
self.update_weights(phi, delta_d)
def predict_one(self, phi_0):
phis = [phi_0]
for i in range(len(self.network)):
w, b = self.network[i]
phis.append(np.tanh(np.dot(w, phis[i]) + b))
return phis[len(self.network)][0]
def test(self, sentence):
phi0 = self.create_features(sentence.rstrip(), False)
score = self.predict_one(phi0)
return 1 if score > 0 else -1
def check_score(gold_file, pred, detail=False):
true = []
with open(gold_file, mode='r', encoding='utf-8') as f:
for line in f:
label = int(line.split("\t")[0])
true.append(label)
true = np.array(true)
pred = np.array(pred)
if detail:
print(classification_report(true, pred))
print(f"Accuracy: {accuracy_score(true, pred)}")
if __name__ == "__main__":
# train_file = "data/03-train-input.txt"
train_file = "data/titles-en-train.labeled"
test_file = "data/titles-en-test.word"
# ノードの数
for node in range(1,8,2):
model = NN(node=node)
print("Loading...")
model.load_file(train_file)
print("Initializing network...")
model.init_network()
print("Training...")
model.train()
ans = []
with open(test_file) as f:
for sentence in f:
pred = model.test(sentence)
ans.append(pred)
print("NODE: ", node)
check_score("data/titles-en-test.labeled", ans, True) |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 18 10:33:33 2020
@author: Administrator
"""
import os
import pandas as pd
path = r'D:\2020年工作\2020年维护指标年底收官\退服详单'
os.chdir(path)
df_not_choose = pd.read_excel('物理站址与铁塔站址对应关系信息表 (2020.10.14改)待确认.xlsx', sheet_name='曲靖')
df_not_choose = df_not_choose[df_not_choose['是否选择铁塔发电服务(2019年与铁塔签订,现实施中)']=='否']
list_not_choose = list(df_not_choose['铁塔站址编码'])
df = pd.read_excel('曲靖2020年9月故障明细记录表(合并).xlsx', sheet_name='基站故障明细表')
df = df[(df['3G/LTE'].str.contains('LTE'))&(~df['站址编码'].isin(list_not_choose))]
df['故障中断历时(分钟)'].fillna(df['故障中断历时(分钟)'].mean(), inplace=True)
df_ab = df[(df['基站等级'] == 'A类站') | (df['基站等级'] == 'B类站')]
df_cd = df[(df['基站等级'] == 'C类站') | (df['基站等级'] == 'D类站')]
avg_ab = df_ab['故障中断历时(分钟)'].mean()
avg_cd = df_cd['故障中断历时(分钟)'].mean()
df_tower = df[(df['责任判断'] == '铁塔责任') & (df['3G/LTE'].str.contains('LTE'))]
df_equipment = df[(df['责任判断'] == '主设备责任') & (df['3G/LTE'].str.contains('LTE'))]
df_optical = df[(df['责任判断'] == '光缆组责任') & (df['3G/LTE'].str.contains('LTE'))]
df_tower_ab = df_tower[(df_tower['基站等级'] == 'A类站') | (df_tower['基站等级'] == 'B类站')]
df_tower_cd = df_tower[(df_tower['基站等级'] == 'C类站') | (df_tower['基站等级'] == 'D类站')]
tower_percentage_ab = sum(df_tower_ab['故障中断历时(分钟)']) / sum(df_ab['故障中断历时(分钟)'])
tower_ab = df_tower_ab['故障中断历时(分钟)'].mean()
tower_percentage_cd = sum(df_tower_cd['故障中断历时(分钟)']) / sum(df_cd['故障中断历时(分钟)'])
tower_cd = df_tower_cd['故障中断历时(分钟)'].mean()
df_optical_ab = df_optical[(df_optical['基站等级'] == 'A类站') | (df_optical['基站等级'] == 'B类站')]
df_optical_cd = df_optical[(df_optical['基站等级'] == 'C类站') | (df_optical['基站等级'] == 'D类站')]
optical_percentage_ab = sum(df_optical_ab['故障中断历时(分钟)']) / sum(df_ab['故障中断历时(分钟)'])
optical_ab = df_optical_ab['故障中断历时(分钟)'].mean()
optical_percentage_cd = sum(df_optical_cd['故障中断历时(分钟)']) / sum(df_cd['故障中断历时(分钟)'])
optical_cd = df_optical_cd['故障中断历时(分钟)'].mean()
df_equipment_ab = df_equipment[(df_equipment['基站等级'] == 'A类站') | (df_equipment['基站等级'] == 'B类站')]
df_equipment_cd = df_equipment[(df_equipment['基站等级'] == 'C类站') | (df_equipment['基站等级'] == 'D类站')]
equipment_percentage_ab = sum(df_equipment_ab['故障中断历时(分钟)']) / sum(df_ab['故障中断历时(分钟)'])
equipment_ab = df_equipment_ab['故障中断历时(分钟)'].mean()
equipment_percentage_cd = sum(df_equipment_cd['故障中断历时(分钟)']) / sum(df_cd['故障中断历时(分钟)'])
equipment_cd = df_equipment_cd['故障中断历时(分钟)'].mean()
|
# Goldbach's Conjecture
# @Author: Gavin Moore
# 3/4/2021
# v1.0
# Description: Given between 1 and 100 integers even integers between 4 and 32,000, determine the number of ways
# they can be represented as sums of two prime numbers. Output the number of representations, and each representation.
# Problem obtained from https://open.kattis.com/problems/goldbach2
#
# Expected input format:
# n
# x1
# x2
# ....
# xn
#
# Sample input:
# 3
# 4
# 26
# 100
#
# Sample output:
# 4 has 1 representation(s)
# 2+2
#
# 26 has 3 representation(s)
# 3+23
# 7+19
# 13+13
#
# 100 has 6 representation(s)
# 3+97
# 11+89
# 17+83
# 29+71
# 41+59
# 47+53
#
# Methodology:
# - Get input
# - Find all prime numbers between 1 and x - 1, where x is the highest input value.
# - Store these in both a list and a set.
# - For each element y in the list, check if x - y is in the set. If it is, store x and y in a solutions list.
#
# Time efficiency: ~O( (n^(3/2) / log(n) )
import math
import sys
from time import perf_counter
from typing import List
def main():
# Get input
nums = get_user_input()
start_time = perf_counter()
# Create a iterable list of prime numbers
prime_list = sieve_of_atkin(max(nums))
# Create a set of prime numbers
set_primes = set(prime_list)
# Find and print solutions
for num in nums:
solutions = find_goldbachs_solutions(num, prime_list, set_primes)
print_solution(num, solutions)
print("Time: " + str(perf_counter() - start_time))
def print_solution(num, solutions):
"""
Prints the solution in the proper format.
:param num: The number that has been solved.
:param solutions: A list containing the valid representations in pairs.
on the remaining lines.
"""
num_solutions = len(solutions)
print(str(num) + " has " + str(num_solutions) + " representation(s)")
for solution in solutions:
print(solution)
print()
def get_user_input() -> List[int]:
"""
Return a list of user input.
Separates user input into values based on the format 'num_to_read \n num \n num'. Returns a list containing the
numbers to read.
:return: A list of categorized user input.
"""
lines_to_read = get_ints()
nums = [get_ints()[0] for x in range(0, lines_to_read[0])]
return nums
def get_ints() -> List[int]:
"""
Parse an input line into integers.
Divides a single string of whitespace-separated user-input integers from stdin into individual integers and
saves them as a list.
:return: A list of integers from user input.
"""
return list(map(int, sys.stdin.readline().strip().split()))
def sieve_of_atkin(limit):
"""
Generate all prime numbers up to limit.
Runs the Sieve of Atkin, which detects prime numbers based on their result in modulus 60.
:return: A list of prime numbers.
"""
primes = []
# Initialize 2 and 3 as primes.
if limit > 2:
primes.append(2)
if limit > 3:
primes.append(3)
# Initialize a sieve, filled with False values
sieve = [False] * (limit + 1)
# Add a value to candidate primes if one of the following is true:
# a) n = (4*x*x) + (y*y) has an odd number of solutions and n % 12 = 1 or 5.
# b) n = (3*x*x) + (y*y) has an odd number of solutions and n % 12 = 7.
# c) n = (3*x*x) - (y*y) has an odd number of solutions where x > y and n % 12 = 11
root_limit = math.ceil(math.sqrt(limit))
for x in range(1, root_limit):
for y in range(1, root_limit):
# Toggle condition a
n = (4 * x * x) + (y * y)
if n <= limit and (n % 12 == 1 or n % 12 == 5):
sieve[n] ^= True
# Toggle condition b
n = (3 * x * x) + (y * y)
if n <= limit and n % 12 == 7:
sieve[n] ^= True
# Toggle condition c
if x > y:
n = (3 * x * x) - (y * y)
if n <= limit and n % 12 == 11:
sieve[n] ^= True
# Remove multiples of squared primes
for n in range(5, root_limit + 1):
if sieve[n]:
x = 1
while x * n * n <= limit:
sieve[x*n*n] = False
x += 1
# Fill into list
for prime in range(5, limit):
if sieve[prime]:
primes.append(prime)
return primes
def find_goldbachs_solutions(num, list_of_primes, set_of_primes) -> List[str]:
"""
Given an even number, a list of prime numbers below the number, and a set of the same primes, find all Golbach's
solutions for the number such that prime+prime = number. Maintain a list of solutions.
:param num: The number to find solutions for.
:param list_of_primes: A list of prime numbers below the target number.
:param set_of_primes: A set of prime numbers below the target number.
:return: A list of strings containing the solutions, in the format "prime1+prime2"
"""
solutions = []
for prime in list_of_primes:
if prime + prime > num:
break
elif (num - prime) in set_of_primes:
solutions.append(str(prime) + '+' + str(num - prime))
return solutions
main()
|
class Solution:
def find(self, parent, i):
if parent[i] != -1:
parent[i] = self.find(parent, parent[i])
return i
def union(self, parent, rank, i, j):
p1 = self.find(parent, i)
p2 = self.find(parent, j)
if p1 == p2:
return
if rank[p1] > rank[p2]:
parent[p2] = p1
elif rank[p2] > rank[p1]:
parent[p1] = p2
else:
parent[p2] = p1
rank[p1] += 1
def findCircleNum(self, m) -> int:
if not m:
return 0
parent = [-1] * len(m)
rank = [0] * len(m)
for i in range(len(m)):
for j in range(len(m)):
if i != j and m[i][j] == 1:
self.union(parent, rank, i, j)
# count groups
return len(list(filter(lambda x: x == -1, parent)))
sol = Solution()
arr1 = [[1, 1, 0],
[1, 1, 1],
[0, 1, 1]]
print(sol.findCircleNum(arr1))
arr2 = [[1, 1, 0],
[1, 1, 0],
[0, 0, 1]]
print(sol.findCircleNum(arr2))
|
import pandas as pd
import csv
class User:
def __init__(self, index, file):
with open(file, 'rb') as csvfile:
info = csv.reader(csvfile)
self.name = info.iloc[index]['Name']
self.sex = info.iloc[index]['Sex']
self.age = info.iloc[index]['Age']
self.height = info.iloc[index]['Height']
self.weight = info.iloc[index]['Weight']
self.vegetarian = info.iloc[index]['Vegetarian']
self.vegan = info.iloc[index]['Vegan']
self.canEatPork = info.iloc[index]['Pork']
self.diningHall = 'Roma'
def returnName(self):
return self.name
def returnSex(self):
return self.sex
def returnAge(self):
return self.age
def returnHeight(self):
return self.height
def returnWeight(self):
return self.weight
def returnRestrictions(self):
return [self.vegetarian, self.vegan, self.canEatPork]
def returnDiningHall(self):
return self.diningHall
def changeDiningHall(self, dh):
self.diningHall = dh |
import requests
import json
import datetime
url = 'https://financialmodelingprep.com/api/v3/nasdaq_constituent?apikey=c1d5db3bf65299abe6068e556f5bed6e'
resp = requests.get(url=url)
list_stocks = resp.json() # Check the JSON Response Content documentation below
print('Done, count:', len(list_stocks))
with open('list_stocks.json', 'w') as f:
json.dump(list_stocks, f)
#Batch requests
#https://financialmodelingprep.com/api/v3/quote/AAPL,FB,GOOG?apikey=c1d5db3bf65299abe6068e556f5bed6e
batch_url = 'https://financialmodelingprep.com/api/v3/quote/'
api_key = 'apikey=c1d5db3bf65299abe6068e556f5bed6e'
batchList = ''
for stock in list_stocks:
batchList = batchList + stock['symbol'] + ','
batchList = batchList[0:len(batchList)-1]
batch_url = batch_url + batchList + '?' + api_key
batchResponse = requests.get(batch_url).json()
for result in batchResponse:
result["timestamp"] = datetime.datetime.fromtimestamp( result["timestamp"] ).strftime('%Y-%m-%d %H:%M:%S')
with open('batchResponse.json', 'w') as f:
json.dump(batchResponse, f) |
"""
****************************************************************************************************
:copyright (c) 2019-2021 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
Redistribution of this software, without modification, must refer to the software by the same
designation. Redistribution of a modified version of this software (i) may not refer to the
modified version by the same designation, or by any confusingly similar designation, and
(ii) must refer to the underlying software originally provided by Alliance as “URBANopt”. Except
to comply with the foregoing, the term “URBANopt”, or any confusingly similar designation may
not be used to refer to any modified version of this software or any modified version of the
underlying software originally provided by Alliance without the prior written consent of Alliance.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import unittest
from geojson_modelica_translator.geojson.schemas import Schemas
class SchemasTest(unittest.TestCase):
def test_load_schemas(self):
s = Schemas()
data = s.retrieve("building")
self.assertEqual(data["title"], "URBANopt Building")
def test_invalid_retrieve(self):
s = Schemas()
with self.assertRaises(Exception) as context:
s.retrieve("judicate")
self.assertEqual("Schema for judicate does not exist", str(context.exception))
def test_validate_schema(self):
s = Schemas()
s.retrieve("building")
# verify that the schema can validate an instance with simple parameters
instance = {
"id": "5a6b99ec37f4de7f94020090",
"type": "Building",
"name": "Medium Office",
"footprint_area": 17059,
"footprint_perimeter": 533,
"building_type": "Office",
"number_of_stories": 3,
"system_type": "PTAC with district hot water",
"number_of_stories_above_ground": 3,
"building_status": "Proposed",
"floor_area": 51177,
"year_built": 2010,
}
res = s.validate("building", instance)
self.assertEqual(len(res), 0)
# bad system_type
instance["type"] = "MagicBuilding"
res = s.validate("building", instance)
self.assertIn("'MagicBuilding' is not one of ['Building']", res[0])
self.assertEqual(len(res), 1)
|
#bill calculator
meal_amount=float(input("Enter meal amount($):"))
discount_meal_amount=meal_amount*0.50
service_charge=discount_meal_amount*0.10
gst=(discount_meal_amount+service_charge)*0.07
total=gst+service_charge+discount_meal_amount
print(" ")
print("Receipt")
print("cost of meal : $%.2f"%meal_amount)
print("50% discount ${:.2f} ".format(discount_meal_amount))
print("service charge ${:.2f} ".format(service_charge))
print("GST ${:.2f} ".format(gst))
print("total amount ${:.2f} ".format(total))
|
param = 10
strdata = '전역변수'
def func1():
strdata = '지역변수'
print(strdata)
def func2(param):
param = 1
def func3():
global param
param = 50
func1()
print(strdata)
print(param)
func2(param)
print(param)
func3()
print(param)
print()
def reverse(x,y,z):
return z,y,x
ret = reverse (1,2,3)
print(ret)
r1,r2,r3 = reverse('a','b','c')
print(r1);print(r2);print(r3)
import time
print('1초간 프로그램을 정지합니다.')
time.sleep(1)
print('1초가 지나갔습니다.')
print()
import mypackage.mylib
ret1 = mypackage.mylib.add_txt('대한민국','1등')
ret2 = mypackage.mylib.reverse(1,2,3)
print(ret1)
print(ret2)
import time
#import mylib
import mypackage.mylib
time.sleep(1)
#mylib.add_txt('나는','파이썬이다')
mypackage.mylib.reverse(1,2,3)
|
from flask import g, jsonify, request
from .db import get_db
from flask_restful import Resource
import bcrypt
def user_exist(username):
db = get_db()
user = db.Users.find_one({"Username":username})
print ("user:{}".format(user))
if user:
return True
else:
return False
def check_admin(username, password):
db = get_db()
user = db.Users.find_one({"Username":username})
if not user:
return {"status": 304,
"msg": "Admin user not exist"
}
hashed_pwd = bcrypt.hashpw(password.encode('utf8'), user['Password'])
# if user["Password"] != hashed_pwd:
# return {
# "status": 304,
# "Invalid password for admin user"
# }
return {"status": 200}
def count_tokens(username):
db = get_db()
user = db.Users.find_one({"Username":username})
return user['Tokens']
class Register(Resource):
"""
Register a new user
"""
def post(self):
db = get_db()
postedData = request.get_json()
username = postedData['username']
password = postedData['password']
if user_exist(username):
retJson = {
"status": 301,
"msg": "User already registered"
}
return jsonify(retJson)
hashed_pwd = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
db.Users.insert_one({"Username": username,
"Password": hashed_pwd,
"Admin": 0,
"Tokens": 10
})
retJson = {
"status": 200,
"msg": "User registered"
}
return jsonify(retJson)
class Refill(Resource):
"""
Refill tokens to user
"""
def post(self):
db = get_db()
postedData = request.get_json()
username = postedData['username']
password = postedData['password']
user_to_refill = postedData['user_to_refill']
amount = postedData['amount']
# check admin data
retJson = check_admin(username, password)
if retJson['status'] != 200:
return jsonify(retJson)
if not user_exist(user_to_refill):
retJson = {"status": 301,
"msg": "user to refill does not exist"}
current_tokens = count_tokens(user_to_refill)
db.Users.update_one({
"Username": user_to_refill
}, {
"$set": {"Tokens": current_tokens+amount }
})
retJson = {"status": 200,
"msg": "Amount refilled"}
return jsonify(retJson)
|
from tutorial.settings.base import *
DEBUG = False
ALLOWED_HOSTS = ['api', 'wsgi', 'asgi']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '!dlatl00',
'HOST': 'database',
'PORT': '5432',
}
}
|
"""
Metview Python use case
UC-07-pandas. The Analyst compute simple differences between observations and analysis
and use pandas to perform further computations
BUFR version - BUFR is not tabular or gridded, but we can use Metview Python
framework to extract a particular parameter to a tabular format (geopoints)
--------------------------------------------------------------------------------
1. Analyst retrieves the analysis from a gridded data file
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
2. Analyst retrieves an observational parameter from a tabular or a gridded file
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
3. Analyst calculates the difference between the observational data and the
analysis
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
4. Analyst converts this data to a pandas dataframe and computes the number
of outliers based on the zscore
--------------------------------------------------------------------------------
"""
import metview as mv
import numpy as np
from scipy import stats
t2m_grib = mv.read('./t2m_grib.grib')
obs_3day = mv.read('./obs_3day.bufr')
t2m_gpt = mv.obsfilter(
parameter = '012004',
output = 'geopoints',
data = obs_3day
)
diff = t2m_grib - t2m_gpt
df = diff.to_dataframe()
print(df)
outliers = np.abs(stats.zscore(df['value'])) > 1.5
print('# of outliers:', outliers.sum())
|
from collections import OrderedDict
import uuid
import time
from urllib.parse import quote, urlencode
import requests
import json
from .utils import hmacb64, parse_config
class AliyunSMS():
def __init__(self, config_file=None, access_key_id='', access_key_secret='', region_id='', host='http://dysmsapi.aliyuncs.com'):
self._sms_params = OrderedDict()
if config_file:
self._config = parse_config(config_file)
else:
self._config = {
'access_key_id': access_key_id,
'access_key_secret': access_key_secret,
'region_id': region_id,
'host': host,
}
self._form_sys_params()
@property
def sms_params(self):
return self._sms_params
@sms_params.setter
def sms_params(self, value):
if not isinstance(value, dict):
raise TypeError("An dict instance is required")
self._sms_params.update(value)
def _gen_utc_time(self):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(time.time()))
def _form_sys_params(self):
self._sms_params = dict(
SignatureMethod='HMAC-SHA1',
SignatureNonce=str(uuid.uuid4()),
AccessKeyId=self._config.get('access_key_id'),
Timestamp=self._gen_utc_time(),
SignatureVersion='1.0',
Format='JSON'
)
def _form_bus_params(self, business, **kwargs):
self._sms_params.update(dict(
Action=business,
Version='2017-05-25',
RegionId=self._config.get('region_id'),
))
self._sms_params.update(kwargs)
if not kwargs.get('BizId', None) and business == 'QuerySendDetails':#Optional Params
self._sms_params.pop('BizId')
def generate_signature(self, params=None, method='GET', url='/'):
'''
Generate Signature for requests
@params: Parameters dict, self._sms_params for default
@method: HTTP Method, GET default
@url: url endpoint, / default
%signature
'''
params = params if params else self._sms_params
secret = self._config.get('access_key_secret') + '&' #Must add & to the end
querystring = quote(urlencode(params))
return hmacb64('&'.join([method, quote(url, safe=''), querystring]), secret)
def _sort_params(self, dic):
return OrderedDict(sorted(dic.items(), key=lambda x: x[0]))
def _send_req(self, url, method='GET'):
self._sms_params = self._sort_params(self._sms_params)
signature = self.generate_signature()
self._sms_params['Signature'] = signature
self._sms_params.move_to_end('Signature', last=False) #Move this param to the top
if (url == '/'):
url = ''
final_url = self._config.get('host') + '?' + '&'.join(['{}={}'.format(key, quote(val, safe='')) for key, val in self._sms_params.items()])
print(final_url)
return requests.get(url=final_url)
def send_sms(self, phone_numbers, sign_name, template_code, template_params=None, raw=True, **kwargs):
'''
Send SMS message via Aliyun API
@phone_numbers: The list of phone numbers, can be a string if only one number
@sign_name: Sign name configured in Aliyun console
@template_code: The template code defined in Aliyun console
@template_params: The params that need to be used in template
%Status: success or failure
'''
url = '/'
method = 'GET'
template_params = json.dumps(template_params, separators=(',', ':')) #Must not have whitespace
self._form_bus_params(business='SendSms', PhoneNumbers=phone_numbers, SignName=sign_name, TemplateCode=template_code, TemplateParam=template_params, **kwargs)
if raw:
return self._send_req(url=url)
def query_details(self, phone_number, serial_number='', send_date='', page_size='10', current_page='1', raw=True, **kwargs):
'''
Query the details of SMS service
@phone_number: One phone number
@serial_number(optional): Serial number from send_sms API
@send_date: Query date, less than 30 days
@page_size: Paging, less than 50 items
@current_page: Current page, from 1
%Details
'''
url='/'
method='GET'
self._form_bus_params(business='QuerySendDetails', PhoneNumber=phone_number, BizId=serial_number, SendDate=send_date, PageSize=page_size, CurrentPage=current_page)
if raw:
return self._send_req(url=url)
else:
res = self._send_req(url=url)
return res.json()
|
class Solution:
def minPartitions(self, n: str) -> int:
nums = [int(i) for i in n]
return max(nums)
|
import logging
# Get an instance of a logger
logger = logging.getLogger('config')
import json
from os.path import join, dirname, isfile
DEFAULT_CONFIG = join(dirname(__file__), 'daphne.conf')
class ConfigurationLoader():
def __init__(self):
self.config = None
def load(self):
if(self.config):
return self.config
else:
if isfile(DEFAULT_CONFIG):
try:
with open(DEFAULT_CONFIG) as f:
self.config = json.load(f)
except:
logger.exception("Exception in loading the configuration file")
else:
print("Configuration file not found in {0}".format(DEFAULT_CONFIG))
return self.config |
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import style
style.use('seaborn-paper')
antenna_length = np.array([1, 0.8, 0.7, 0.6, 0.5, 0.4])
C_shunt = np.array([])
g = np.array([])/2
#with small pad
C_shunt_sp = np.array([])
g_sp = np.array([])/2 |
#coding=utf-8
'''
use if-idf algorithm to find key words in articles
also can be used in short sentence however the proformance is not so good.
'''
import sys
import jieba
from collections import Counter
jieba.user_dict = "./dictionary/user_dict.txt"
stop_words = set(['。', ',', ',', '.','“','”','、','\n',' ',' ','(',
')','【','】', '...', '>', '<', '!', '!'])
idf_content = open('./dictionary/idf.txt','rb').read().decode('utf-8').split('\n')
idf_freq = {}
for line in idf_content:
word, freq = line.split(' ')
idf_freq[word]= float(freq)
class CompareMan(object):
def __init__(self,arg, sfile):
if arg== '-t':
self.text = sfile
else:
self.text = open(sfile, 'r').read()
#self.dictionary = dictionary
def _get_cutted(self):
cnt = Counter()
self._cutted_content = (jieba.cut(self.text, cut_all = False))
for word in self._cutted_content:
if word not in stop_words:
cnt[word] += 1
self.total = sum(cnt.values())
return cnt
def _calc_weight(self):
self.common =dict(self._get_cutted().most_common(20))
for k in self.common:
self.common[k] *= (idf_freq.get(k, 10)/self.total)
return sorted(self.common.items(), key=lambda x:x[1], reverse=True)
def __call__(self):
self._get_cutted()
return self._calc_weight()[:10]
if __name__ == "__main__":
for line in open('./data/amazon_reviews.txt','r').readlines():
print(line)
c = CompareMan('-t', line)
print(c())
input()
|
"""adding games/genres to db
Revision ID: 1db083b8d003
Revises: bb5ef27893de
Create Date: 2017-09-04 13:04:14.683378
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1db083b8d003'
down_revision = 'bb5ef27893de'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('games',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Text(), nullable=True),
sa.Column('image_url', sa.Text(), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('title')
)
op.create_table('genres',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('game_genres',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('game_id', sa.Integer(), nullable=True),
sa.Column('genre_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['game_id'], ['games.id'], ondelete='cascade'),
sa.ForeignKeyConstraint(['genre_id'], ['genres.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('game_genres')
op.drop_table('genres')
op.drop_table('games')
# ### end Alembic commands ###
|
#!/usr/bin/env python3
import cv2
import dlib
import numpy as np
import pyautogui
import time
import webbrowser
time_old = time.time()
def shape_to_np(shape, dtype="int"):
coords = np.zeros((68, 2), dtype=dtype)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
def eye_on_mask(mask, side):
points = [shape[i] for i in side]
points = np.array(points, dtype=np.int32)
mask = cv2.fillConvexPoly(mask, points, 255)
return mask
all_left = list()
all_right = list()
def click_func(old_pos):
new_pos = pyautogui.position()
if new_pos==old_pos:
pyautogui.click()
time_old = time.time()
def contouring(thresh, mid, img, right=False):
cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
try:
cnt = max(cnts, key = cv2.contourArea)
#print("qqq")
M = cv2.moments(cnt)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
if right:
cx += mid
flag=1
else:
flag=0
old_pos=pyautogui.position()
mouse_control(cx, cy, flag)
if time.time()-time_old>8:
click_func(old_pos)
cv2.circle(img, (cx, cy), 4, (0, 0, 255), 2)
except:
pass
def mouse_control(cx, cy, flag):
if flag==0:
all_left.append([cx, cy])
else:
all_right.append([cx, cy])
if len(all_right)>2:
mid_new =[((all_right[len(all_right)-1])[0]+(all_left[len(all_left)-1])[0])/2,
((all_right[len(all_right)-1])[1]+(all_left[len(all_left)-1])[1])/2]
mid_old =[((all_right[len(all_right)-2])[0]+(all_left[len(all_left)-2])[0])/2,
((all_right[len(all_right)-2])[1]+(all_left[len(all_left)-2])[1])/2]
# mid_old =[((all_right[0])[0]+(all_left[0])[0])/2,
# ((all_right[0])[1]+(all_left[0])[1])/2]
#print(mid_new, mid_old)
compare(mid_new, mid_old)
def compare(curr, prev):
if curr[0]-prev[0]>5.0 and curr[1]-prev[1]<10.0:
pyautogui.moveRel(40,0,duration=0.1)
elif prev[0]-curr[0]>5.0 and prev[1]-curr[1]<10.0:
pyautogui.moveRel(-40,0,duration=0.1)
elif prev[1]-curr[1]>5.0:
pyautogui.moveRel(0,-40,duration=0.1)
elif curr[1]-prev[1]>5.0:
pyautogui.moveRel(0,40,duration=0.1)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_68.dat')
left = [36, 37, 38, 39, 40, 41]
right = [42, 43, 44, 45, 46, 47]
cap = cv2.VideoCapture(0)
ret, img = cap.read()
thresh = img.copy()
#cv2.namedWindow('image')
kernel = np.ones((9, 9), np.uint8)
webbrowser.open('https://shatakshi-raman.questai.app')
cv2.namedWindow('eyes', cv2.WINDOW_NORMAL)
cv2.resizeWindow('eyes', 400, 400)
cv2.moveWindow('eyes',0, 580)
while(True):
ret, img = cap.read()
count = 0
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for rect in rects:
if count%3 !=0:
continue
shape = predictor(gray, rect)
shape = shape_to_np(shape)
mask = np.zeros(img.shape[:2], dtype=np.uint8)
mask = eye_on_mask(mask, left)
mask = eye_on_mask(mask, right)
mask = cv2.dilate(mask, kernel, 5)
eyes = cv2.bitwise_and(img, img, mask=mask)
mask = (eyes == [0, 0, 0]).all(axis=2)
eyes[mask] = [255, 255, 255]
mid = (shape[42][0] + shape[39][0]) // 2
eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)
#threshold = cv2.getTrackbarPos('threshold', 'image')
threshold = 55
_, thresh = cv2.threshold(eyes_gray, threshold, 255, cv2.THRESH_BINARY)
thresh = cv2.erode(thresh, None, iterations=2) #1
thresh = cv2.dilate(thresh, None, iterations=4) #2
thresh = cv2.medianBlur(thresh, 3) #3q
thresh = cv2.bitwise_not(thresh)
contouring(thresh[:, 0:mid], mid, img)
contouring(thresh[:, mid:], mid, img, True)
for (x, y) in shape[36:48]:
cv2.circle(img, (x, y), 2, (255, 0, 0), -1)
count+=1
cv2.imshow('eyes', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
import numpy as np
from vtk_rw import read_vtk
vtk_file = '/nobackup/ilz3/myelinconnect/new_groupavg/profiles/smooth_1.5/%s/%s_lowres_new_avgsurf_groupdata.vtk'
pro_file = '/nobackup/ilz3/myelinconnect/new_groupavg/profiles/smooth_1.5/%s_lowres_new_avgsurf_groupdata.npy'
pro_mean_file = '/nobackup/ilz3/myelinconnect/new_groupavg/profiles/smooth_1.5/%s_lowres_new_avgsurf_groupdata_mean.npy'
for hemi in ['lh', 'rh']:
_, _, d = read_vtk(vtk_file%(0, hemi))
pro = np.zeros((d.shape[0], d.shape[1], 11))
for layer in range(11):
_, _, d = read_vtk(vtk_file%(layer, hemi))
pro[:,:,layer] = d
pro_mean = np.mean(pro, axis=1)
np.save(pro_file%(hemi), pro)
np.save(pro_mean_file%(hemi), pro_mean) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 13:08:59 2020
@author: nenad
"""
def mergeLists(l1, l2):
# one list is empty
if l1 is None:
return l2
if l2 is None:
return l1
n1 = l1
n2 = l2
newHead = None
nextNode = None
while n1 and n2:
if n1.data <= n2.data:
node = n1
n1 = n1.next
else:
node = n2
n2 = n2.next
if nextNode :
nextNode.next = node
nextNode = nextNode.next
else:
nextNode = node
newHead = node
# add rest of the nodes from the first list
while n1:
nextNode.next = n1
n1 = n1.next
nextNode = nextNode.next
# add rest of the nodes from the second list
while n2:
nextNode.next = n2
n2 = n2.next
nextNode = nextNode.next
return newHead |
'''
Problem statement:
https://www.hackerrank.com/challenges/itertools-permutations
'''
# Enter your code here. Read input from STDIN. Print output to STDOUT
from itertools import permutations
import sys
sys.stdin.readline
line = sys.stdin.readline().strip().split()
w = line[0]
p = int(line[1])
a = list(permutations(w,p))
output = []
for i in a:
output.append(''.join(i))
output.sort()
for i in output:
print(i)
#print(output) |
from django.contrib.auth.models import User
from denuncias.models import Denuncia
from tipo_denuncia.models import TipoDenuncia
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
class DenunciaTests(APITestCase):
def setUp(self):
user = User.objects.create(username='testes', password='123', email='testes@email.com')
user.save()
tipoDenuncia = TipoDenuncia.objects.create(descricao='TipoDenunciaTest')
tipoDenuncia.save()
# Success Tests
def test_create_denuncia_done(self):
client = APIClient()
user = User.objects.get(username='testes')
tipoDenuncia = TipoDenuncia.objects.get(descricao='TipoDenunciaTest')
client.force_authenticate(user=user)
data = {'descricao': 'Test 1', 'tipo_denuncia': str(tipoDenuncia.id)}
request = client.post('/api/denuncias/', data, format='json')
self.assertEqual(request.status_code, status.HTTP_201_CREATED)
self.assertEqual(Denuncia.objects.count(), 1)
self.assertEqual(Denuncia.objects.get().descricao, 'Test 1')
def test_update_denuncia_done(self):
client = APIClient()
user = User.objects.get(username='testes')
tipoDenuncia = TipoDenuncia.objects.get(descricao='TipoDenunciaTest')
client.force_authenticate(user=user)
denuncia = Denuncia.objects.create(descricao='Test 2', tipo_denuncia=tipoDenuncia)
denuncia.save()
data = {'descricao': 'Test Update', 'tipo_denuncia': str(tipoDenuncia.id)}
request = client.put('/api/denuncias/'+str(denuncia.id)+'/', data, format='json')
self.assertEqual(request.status_code, status.HTTP_200_OK)
self.assertEqual(Denuncia.objects.count(), 1)
self.assertEqual(Denuncia.objects.get().descricao, 'Test Update')
def test_delete_denuncia_done(self):
client = APIClient()
user = User.objects.get(username='testes')
tipoDenuncia = TipoDenuncia.objects.get(descricao='TipoDenunciaTest')
client.force_authenticate(user=user)
denuncia = Denuncia.objects.create(descricao='Test 3', tipo_denuncia=tipoDenuncia)
denuncia.save()
request = client.delete('/api/denuncias/'+str(denuncia.id)+'/')
self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Denuncia.objects.count(), 0)
# Error Testes
def test_create_denuncia_error(self):
client = APIClient()
tipoDenuncia = TipoDenuncia.objects.get(descricao='TipoDenunciaTest')
data = {'descricao': 'Test 4', 'tipo_denuncia': str(tipoDenuncia.id)}
request = client.post('/api/denuncias/', data, format='json')
self.assertEqual(request.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update_denuncia_error(self):
client = APIClient()
tipoDenuncia = TipoDenuncia.objects.get(descricao='TipoDenunciaTest')
denuncia = Denuncia.objects.create(descricao='Test 5', tipo_denuncia=tipoDenuncia)
denuncia.save()
data = {'descricao': 'Test Update'}
request = client.put('/api/denuncias/'+str(denuncia.id)+'/', data, format='json')
self.assertEqual(request.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete_denuncia_error(self):
client = APIClient()
tipoDenuncia = TipoDenuncia.objects.get(descricao='TipoDenunciaTest')
denuncia = Denuncia.objects.create(descricao='Test 6', tipo_denuncia=tipoDenuncia )
denuncia.save()
request = client.delete('/api/denuncias/'+str(denuncia.id)+'/')
self.assertEqual(request.status_code, status.HTTP_401_UNAUTHORIZED)
|
import sys
from math import gcd
from random import getrandbits
from random import randbytes
from random import randint
from unittest import TestCase
from Crypto.Cipher import AES
from Crypto.Cipher import ARC4
from Crypto.Util import Counter
from Crypto.Util.Padding import pad
from Crypto.Util.Padding import unpad
from Crypto.Util.strxor import strxor
from sage.all import EllipticCurve
from sage.all import GF
from sage.all import legendre_symbol
class TestCBC(TestCase):
from cbc import bit_flipping
from cbc import iv_recovery
from cbc import padding_oracle
def _encrypt(self, key, p):
iv = randbytes(16)
cipher = AES.new(key, mode=AES.MODE_CBC, iv=iv)
c = cipher.encrypt(p)
return iv, c
def _decrypt(self, key, iv, c):
cipher = AES.new(key, mode=AES.MODE_CBC, iv=iv)
p = cipher.decrypt(c)
return p
def _valid_padding(self, key, iv, c):
try:
cipher = AES.new(key, mode=AES.MODE_CBC, iv=iv)
unpad(cipher.decrypt(c), 16)
return True
except ValueError:
return False
def test_bit_flipping(self):
key = randbytes(16)
p = randbytes(32)
p_ = randbytes(16)
iv, c = self._encrypt(key, p)
iv_, c_ = self.bit_flipping.attack(iv, c, 0, p[0:len(p_)], p_)
p__ = self._decrypt(key, iv_, c_)
self.assertEqual(p_, p__[0:len(p_)])
iv_, c_ = self.bit_flipping.attack(iv, c, 16, p[16:16 + len(p_)], p_)
p__ = self._decrypt(key, iv_, c_)
self.assertEqual(p_, p__[16:16 + len(p_)])
def test_iv_recovery(self):
key = randbytes(16)
iv = randbytes(16)
iv_ = self.iv_recovery.attack(lambda c: self._decrypt(key, iv, c))
self.assertEqual(iv, iv_)
def test_padding_oracle(self):
key = randbytes(16)
for i in range(16):
p = pad(randbytes(i + 1), 16)
iv, c = self._encrypt(key, p)
p_ = self.padding_oracle.attack(lambda iv, c: self._valid_padding(key, iv, c), iv, c)
self.assertEqual(p, p_)
class TestCBCAndCBCMAC(TestCase):
from cbc_and_cbc_mac import eam_key_reuse
from cbc_and_cbc_mac import etm_key_reuse
from cbc_and_cbc_mac import mte_key_reuse
def _encrypt_eam(self, key, p):
# Notice how the key is used for encryption and authentication...
p = pad(p, 16)
iv = randbytes(16)
c = AES.new(key, AES.MODE_CBC, iv).encrypt(p)
# Encrypt-and-MAC using CBC-MAC to prevent chosen-ciphertext attacks.
t = AES.new(key, AES.MODE_CBC, bytes(16)).encrypt(p)[-16:]
return iv, c, t
def _decrypt_eam(self, key, iv, c, t):
p = AES.new(key, AES.MODE_CBC, iv).decrypt(c)
t_ = AES.new(key, AES.MODE_CBC, bytes(16)).encrypt(p)[-16:]
# Check the MAC to be sure the message isn't forged.
if t != t_:
return None
return unpad(p, 16)
def _encrypt_etm(self, key, p):
# Notice how the key is used for encryption and authentication...
p = pad(p, 16)
iv = randbytes(16)
c = AES.new(key, AES.MODE_CBC, iv).encrypt(p)
# Encrypt-then-MAC using CBC-MAC to prevent chosen-ciphertext attacks.
t = AES.new(key, AES.MODE_CBC, bytes(16)).encrypt(iv + c)[-16:]
return iv, c, t
def _decrypt_etm(self, key, iv, c, t):
t_ = AES.new(key, AES.MODE_CBC, bytes(16)).encrypt(iv + c)[-16:]
# Check the MAC to be sure the message isn't forged.
if t != t_:
return None
return unpad(AES.new(key, AES.MODE_CBC, iv).decrypt(c), 16)
def _encrypted_zeroes(self, key):
return AES.new(key, AES.MODE_ECB).encrypt(bytes(16))
def _encrypt_mte(self, key, p):
# Notice how the key is used for encryption and authentication...
p = pad(p, 16)
iv = randbytes(16)
# MAC-then-encrypt using CBC-MAC to prevent chosen-ciphertext attacks.
t = AES.new(key, AES.MODE_CBC, bytes(16)).encrypt(p)[-16:]
c = AES.new(key, AES.MODE_CBC, iv).encrypt(p + t)
return iv, c
def _decrypt_mte(self, key, iv, c):
d = AES.new(key, AES.MODE_CBC, iv).decrypt(c)
p = d[:-16]
t = d[-16:]
t_ = AES.new(key, AES.MODE_CBC, bytes(16)).encrypt(p)[-16:]
# Check the MAC to be sure the message isn't forged.
if t != t_:
return None
return unpad(p, 16)
def test_eam_key_reuse(self):
key = randbytes(16)
for i in range(16):
p = randbytes(i + 1)
iv, c, t = self._encrypt_eam(key, p)
p_ = self.eam_key_reuse.attack(lambda iv, c, t: self._decrypt_eam(key, iv, c, t), iv, c, t)
self.assertEqual(p, p_)
def test_etm_key_reuse(self):
key = randbytes(16)
for i in range(16):
p = randbytes(i + 1)
iv, c, t = self._encrypt_etm(key, p)
p_ = self.etm_key_reuse.attack(lambda p: self._encrypt_etm(key, p), lambda iv, c, t: self._decrypt_etm(key, iv, c, t), iv, c, t)
self.assertEqual(p, p_)
def test_mte_key_reuse(self):
key = randbytes(16)
encrypted_zeroes = self._encrypted_zeroes(key)
for i in range(16):
p = randbytes(i + 1)
iv, c = self._encrypt_mte(key, p)
p_ = self.mte_key_reuse.attack(lambda iv, c: self._decrypt_mte(key, iv, c), iv, c, encrypted_zeroes)
self.assertEqual(p, p_)
class TestCBCMAC(TestCase):
from cbc_mac import length_extension
def _sign(self, key, m):
return AES.new(key, AES.MODE_CBC, bytes(16)).encrypt(m)[-16:]
def _verify(self, key, m, t):
t_ = AES.new(key, AES.MODE_CBC, bytes(16)).encrypt(m)[-16:]
return t == t_
def test_length_extension(self):
key = randbytes(16)
m1 = randbytes(32)
t1 = self._sign(key, m1)
m2 = randbytes(32)
t2 = self._sign(key, m2)
m3, t3 = self.length_extension.attack(m1, t1, m2, t2)
self.assertTrue(self._verify(key, m3, t3))
class TestCTR(TestCase):
from ctr import separator_oracle
def _encrypt(self, key, p):
return AES.new(key, AES.MODE_CTR, counter=Counter.new(128)).encrypt(p)
def _valid_separators(self, separator_byte, separator_count, key, c):
p = AES.new(key, AES.MODE_CTR, counter=Counter.new(128)).decrypt(c)
return p.count(separator_byte) == separator_count
def test_crime(self):
# TODO: CRIME attack is too inconsistent in unit tests.
pass
def test_separator_oracle(self):
separator_byte = ord("\0")
separator_count = randint(1, 10)
key = randbytes(16)
# We have to replace separators by some other byte.
p = randbytes(16).replace(b"\0", b"\1")
for _ in range(separator_count):
# We have to replace separators by some other byte.
p += bytes([separator_byte]) + randbytes(16).replace(b"\0", b"\1")
c = self._encrypt(key, p)
p_ = self.separator_oracle.attack(lambda c: self._valid_separators(separator_byte, separator_count, key, c), separator_byte, c)
self.assertEqual(p, p_)
class TestECB(TestCase):
from ecb import plaintext_recovery
def _encrypt(self, key, p):
return AES.new(key, AES.MODE_ECB).encrypt(p)
def test_plaintext_recovery(self):
key = randbytes(16)
s = randbytes(16)
s_ = self.plaintext_recovery.attack(lambda p: self._encrypt(key, pad(p + s, 16)))
self.assertEqual(s, s_)
class TestECC(TestCase):
from ecc import ecdsa_nonce_reuse
from ecc import parameter_recovery
from ecc import singular_curve
from ecc import smart_attack
_origin = "origin"
def _negation(self, p, point):
if point == self._origin:
return point
return point[0], -point[1] % p
def _add(self, p, a2, a4, point1, point2):
if point1 == self._origin:
return point2
if point2 == self._origin:
return point1
if point1 == self._negation(p, point2):
return self._origin
if point1 == point2:
gradient = (3 * point1[0] ** 2 + 2 * a2 * point1[0] + a4) * pow(2 * point1[1], -1, p) % p
else:
gradient = (point2[1] - point1[1]) * pow(point2[0] - point1[0], -1, p) % p
x = (gradient ** 2 - a2 - point1[0] - point2[0]) % p
y = (gradient * (point1[0] - x) - point1[1]) % p
return x, y
def _double_and_add(self, p, a2, a4, base, l):
multiplication_result = self._origin
double = base
while l > 0:
if l % 2 == 1:
multiplication_result = self._add(p, a2, a4, multiplication_result, double)
double = self._add(p, a2, a4, double, double)
l //= 2
return multiplication_result
def test_ecdsa_nonce_reuse(self):
p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
a = 115792089210356248762697446949407573530086143415290314195533631308867097853948
b = 41058363725152142129326129780047268409114441015993725554835256314039467401291
p_256 = EllipticCurve(GF(p), [a, b])
gen = p_256.gen(0)
n = int(gen.order())
d = randint(1, n - 1)
l = randint(1, n - 1)
r = int((l * gen).xy()[0])
m1 = getrandbits(n.bit_length())
s1 = pow(l, -1, n) * (m1 + r * d) % n
m2 = getrandbits(n.bit_length())
s2 = pow(l, -1, n) * (m2 + r * d) % n
for l_, d_ in self.ecdsa_nonce_reuse.attack(n, m1, r, s1, m2, r, s2):
self.assertIsInstance(l_, int)
self.assertIsInstance(d_, int)
if l_ == l and d_ == d:
break
else:
self.fail()
def test_frey_ruck_attack(self):
# TODO: Frey-Ruck attack is too inconsistent in unit tests.
pass
def test_mov_attack(self):
# TODO: MOV attack is too inconsistent in unit tests.
pass
def test_parameter_recovery(self):
p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
a = 115792089210356248762697446949407573530086143415290314195533631308867097853948
b = 41058363725152142129326129780047268409114441015993725554835256314039467401291
p_256 = EllipticCurve(GF(p), [a, b])
x1, y1 = p_256.random_point().xy()
x2, y2 = p_256.random_point().xy()
a_, b_ = self.parameter_recovery.attack(p, x1, y1, x2, y2)
self.assertIsInstance(a_, int)
self.assertIsInstance(b_, int)
self.assertEqual(a, a_)
self.assertEqual(b, b_)
def test_singular_curve(self):
# Singular point is a cusp.
p = 29800669538070463271
a2 = 9813480773723366080
a4 = 13586186857864981308
a6 = 18910877985247806581
base_x = 13284247619583658910
base_y = 3629049282720081919
# We don't know the order of the base point, so we keep l pretty low to make sure we don't exceed it.
l = randint(1, 4096)
multiplication_result_x, multiplication_result_y = self._double_and_add(p, a2, a4, (base_x, base_y), l)
l_ = self.singular_curve.attack(p, a2, a4, a6, base_x, base_y, multiplication_result_x, multiplication_result_y)
self.assertIsInstance(l_, int)
self.assertEqual(l, l_)
# Singular point is a node.
p = 29800669538070463271
a2 = 13753215131529770662
a4 = 16713139382466325228
a6 = 19476075514740408653
base_x = 16369123140759309684
base_y = 5098114980663762719
# We don't know the order of the base point, so we keep l pretty low to make sure we don't exceed it.
l = randint(1, 4096)
multiplication_result_x, multiplication_result_y = self._double_and_add(p, a2, a4, (base_x, base_y), l)
l_ = self.singular_curve.attack(p, a2, a4, a6, base_x, base_y, multiplication_result_x, multiplication_result_y)
self.assertIsInstance(l_, int)
self.assertEqual(l, l_)
def test_smart_attack(self):
curve = EllipticCurve(GF(23304725718649417969), [8820341459377516260, 5880227639585010840])
gen = curve.gen(0)
n = int(gen.order())
l = randint(1, n - 1)
l_ = self.smart_attack.attack(gen, l * gen)
self.assertIsInstance(l_, int)
self.assertEqual(l, l_)
class TestElgamalEncryption(TestCase):
from elgamal_encryption import nonce_reuse
from elgamal_encryption import unsafe_generator
def test_nonce_reuse(self):
# Safe prime.
p = 16902648776703029279
g = 3
d = randint(1, p - 1)
h = pow(g, d, p)
l = randint(1, p - 1)
s = pow(h, p, l)
c = pow(g, l, p)
m1 = getrandbits(p.bit_length())
d1 = m1 * s % p
m2 = getrandbits(p.bit_length())
d2 = m2 * s % p
m2_ = self.nonce_reuse.attack(p, m1, c, d1, c, d2)
self.assertIsInstance(m2_, int)
self.assertEqual(m2, m2_)
def test_unsafe_generator(self):
# Safe prime.
p = 16902648776703029279
# Unsafe generator, generates the entire group.
g = 7
for i in range(100):
x = randint(1, p - 1)
h = pow(g, x, p)
y = randint(1, p - 1)
s = pow(h, y, p)
c1 = pow(g, y, p)
m = randint(1, p - 1)
c2 = m * s % p
k = self.unsafe_generator.attack(p, h, c1, c2)
self.assertIsInstance(k, int)
self.assertEqual(legendre_symbol(m, p), k)
class TestElgamalSignature(TestCase):
from elgamal_signature import nonce_reuse
def test_nonce_reuse(self):
# Safe prime.
p = 16902648776703029279
g = 3
d = randint(1, p - 2)
l = p - 1
while gcd(l, p - 1) != 1:
l = randint(2, p - 2)
r = pow(g, l, p)
m1 = getrandbits(p.bit_length())
s1 = pow(l, -1, p - 1) * (m1 - r * d) % (p - 1)
m2 = getrandbits(p.bit_length())
s2 = pow(l, -1, p - 1) * (m2 - r * d) % (p - 1)
for l_, d_ in self.nonce_reuse.attack(p, m1, r, s1, m2, r, s2):
self.assertIsInstance(l_, int)
self.assertIsInstance(d_, int)
if l_ == l and d_ == d:
break
else:
self.fail()
class TestFactorization(TestCase):
from factorization import base_conversion
from factorization import branch_and_prune
from factorization import complex_multiplication
from factorization import coppersmith
from factorization import fermat
from factorization import gaa
from factorization import known_crt_exponents
from factorization import known_phi
from factorization import known_d
from factorization import implicit
from factorization import roca
from factorization import shor
from factorization import twin_primes
def test_base_conversion(self):
# Base 3, 3 primes.
p = 21187083124088512843307390152364167522362269594349815270782628323431805003774795906872825415073456706499910412455608669
q = 15684240429131529254685698284890751184639406145730291592802676915731672495230992603635422093849215077
r = 40483766026713491645694780188316242859742718066890630967135095358496115350752613236101566589
n = p * q * r
p_, q_, r_ = self.base_conversion.factorize(n)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertIsInstance(r_, int)
self.assertEqual(n, p_ * q_ * r_)
# Base 11, 2 primes.
p = 5636663100410339050591445485090234548439547400230152507623650956862470951259768771895609021439466657292113515499213261725046751664333428835212665405991848764779073407177219695916181638661604890906124870900657349291343875716114535224623986662673220278594643325664055743877053272540004735452198447411515019043760699779198474382859366389140522851725256493083967381046565218658785408508317
q = 4637643488084848224165183518002033325616428077917519043195914958210451836010505629755906000122693190713754782092365745897354221494160410767300504260339311867766125480345877257141604490894821710144701103564244398358535542801965838493
n = p * q
p_, q_ = self.base_conversion.factorize(n)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_branch_and_prune(self):
# These primes aren't special.
p = 13139791741351746894866427726721425232688052495714047961128606568137470741236391419984296213524906103377170890688143635009211116727124842849096165421244153
q = 6705712489981460472010451576220118673766200621788838066168783990030831970269515515674361221085135530331369278172131216566093286615777148021404414538085037
n = p * q
phi = (p - 1) * (q - 1)
e = 65537
d = pow(e, -1, phi)
dp = pow(e, -1, p - 1)
dq = pow(e, -1, q - 1)
known_prop = 57
p_bits = []
for i in reversed(range(512)):
p_bits.append((p >> i) & 1 if randint(1, 100) <= known_prop else None)
q_bits = []
for i in reversed(range(512)):
q_bits.append((q >> i) & 1 if randint(1, 100) <= known_prop else None)
p_, q_ = self.branch_and_prune.factorize_pq(n, p_bits, q_bits)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
known_prop = 42
p_bits = []
for i in reversed(range(512)):
p_bits.append((p >> i) & 1 if randint(1, 100) <= known_prop else None)
q_bits = []
for i in reversed(range(512)):
q_bits.append((q >> i) & 1 if randint(1, 100) <= known_prop else None)
d_bits = []
for i in reversed(range(1024)):
d_bits.append((d >> i) & 1 if randint(1, 100) <= known_prop else None)
p_, q_ = self.branch_and_prune.factorize_pqd(n, e, p_bits, q_bits, d_bits)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
known_prop = 27
p_bits = []
for i in reversed(range(512)):
p_bits.append((p >> i) & 1 if randint(1, 100) <= known_prop else None)
q_bits = []
for i in reversed(range(512)):
q_bits.append((q >> i) & 1 if randint(1, 100) <= known_prop else None)
d_bits = []
for i in reversed(range(1024)):
d_bits.append((d >> i) & 1 if randint(1, 100) <= known_prop else None)
dp_bits = []
# A bit larger than 512 due to implementation details of the branch and prune algorithm.
for i in reversed(range(516)):
dp_bits.append((dp >> i) & 1 if randint(1, 100) <= known_prop else None)
dq_bits = []
# A bit larger than 512 due to implementation details of the branch and prune algorithm.
for i in reversed(range(516)):
dq_bits.append((dq >> i) & 1 if randint(1, 100) <= known_prop else None)
p_, q_ = self.branch_and_prune.factorize_pqddpdq(n, e, p_bits, q_bits, d_bits, dp_bits, dq_bits)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_complex_multiplication(self):
# Recursion limit is necessary for calculating division polynomials using sage.
rec_limit = sys.getrecursionlimit()
sys.setrecursionlimit(5000)
p = 10577468517212308916917871367410399281392767861135513107255047025555394408598222362847763634342865553142272076186583012471808986419037203678594688627595231
q = 8925960222192297437450017303748967603715694246793735943594688849877125733026282069058422865132949625288537523520769856912162011383285034969425346137038883
n = p * q
D = 427
p_, q_ = self.complex_multiplication.factorize(n, D)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
sys.setrecursionlimit(rec_limit)
def test_coppersmith(self):
p = 8294118504611118345546466080325632607801907364697312317242368417303646025896249767645395912291329182895616276681886182303417327463669722370956110678857457
q = 11472445399871949099065671577613972926185090427303119917183801667878634389108674818205844773744056675054520407290278050115877859333328393928885760892504569
n = p * q
p_, q_ = self.coppersmith.factorize_univariate(n, 512, 270, p >> (512 - 270), 0, 0)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p_, q_ = self.coppersmith.factorize_univariate(n, 512, 0, 0, 270, p % (2 ** 270))
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p_, q_ = self.coppersmith.factorize_univariate(n, 512, 135, p >> (512 - 135), 135, p % (2 ** 135))
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p_, q_ = self.coppersmith.factorize_bivariate(n, 512, 150, p >> (512 - 150), 0, 0, 512, 0, 0, 150, q % (2 ** 150))
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p_, q_ = self.coppersmith.factorize_bivariate(n, 512, 0, 0, 150, p % (2 ** 150), 512, 150, q >> (512 - 150), 0, 0)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_fermat(self):
p = 383885088537555147258860631363598239852683844948508219667734507794290658581818891369581578137796842442514517285109997827646844102293746572763236141308659
q = 383885088537555147258860631363598239852683844948508219667734507794290658581818891369581578137796842442514517285109997827646844102293746572763236141308451
n = p * q
p_, q_ = self.fermat.factorize(n)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_gaa(self):
r_p = 34381
r_q = 34023
p = 95071251890492896215829359101175428907421221364386877469905182082459875177459986258243302560246216190552021119341405678279166840212587310541906674474311515240972185868939740063531859593844606048709104560925568301977927216150294427162519810608935523631249827019496037479563371324790366397060798445963209377357
q = 90298295824650311663818894095620747783372649281213396245855149883068750544736749865742151003212745876322858711152862555726263459709030033799784069102281145447897017439265777617772466042518218409294380111768917907088743454681904160308248752114524063081088402900608673706746438458236567547010845749956723115239
n = p * q
p_, q_ = self.gaa.factorize(n, r_p, r_q)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_known_crt_exponents(self):
# These primes aren't special.
p = 9734878849445118420073785869554836149487671692719552358756738189651079813869054963335880039395402041883956221923435780797276507555906725160774871585184181
q = 11608927577332560028819160266239104364716512653498293226451614650722863458488829019269383773936258272349564355274218301207779572980847476544743569746719093
n = p * q
phi = (p - 1) * (q - 1)
e = 65537
d = pow(e, -1, phi)
dp = d % (p - 1)
dq = d % (q - 1)
p_, q_ = next(self.known_crt_exponents.factorize(e, e + 2, n=n, dp=dp, dq=dq))
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p_, q_ = next(self.known_crt_exponents.factorize(e, e + 2, n=n, dp=dp))
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p_, q_ = next(self.known_crt_exponents.factorize(e, e + 2, n=n, dq=dq))
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p_, q_ = next(self.known_crt_exponents.factorize(e, e + 2, dp=dp, dq=dq, p_bitsize=512, q_bitsize=512))
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p_ = next(self.known_crt_exponents.factorize(e, e + 2, dp=dp, p_bitsize=512))
self.assertIsInstance(p_, int)
self.assertEqual(p, p_)
q_ = next(self.known_crt_exponents.factorize(e, e + 2, dq=dq, q_bitsize=512))
self.assertIsInstance(q_, int)
self.assertEqual(q, q_)
def test_known_d(self):
# These primes aren't special.
p = 10999882285407021659159843781080979389814097626452668846482424135627220062700466847567575264657287989126943263999867722090759547565297969535143544253926071
q = 12894820825544912052042889653649757120734073367261758361676140208842841153775542379620171049124260330205408767340830801133280422958906941622318918402459837
n = p * q
phi = (p - 1) * (q - 1)
e = 65537
d = pow(e, -1, phi)
p_, q_ = self.known_d.factorize(n, e, d)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_known_phi(self):
# These primes aren't special.
p = 11106026672819778415395265319351312104517763207376765038636473714941732117831488482730793398782365364840624898218935983446211558033147834146885518313145941
q = 12793494802119353329493630005275969260540058187994460635179617401018719587481122947567147790680079651999077966705114757935833094909655872125005398075725409
n = p * q
phi = (p - 1) * (q - 1)
p_, q_ = self.known_phi.factorize(n, phi)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
# Multi-prime case takes longer so there's a separate method.
p = 10193015828669388212171268316396616412166866643440710733674534917491644123135436050477232002188857603479321547506131679866357093667445348339711929671105733
q = 8826244874397589965592244959402585690675974843434609869757034692220480232437419549416634170391846191239385439228177059214900435042874545573920364227747261
r = 7352042777909126576764043061995108196815011736073183321111078742728938275060552442022686305342309076279692633229512445674423158310200668776459828180575601
s = 9118676262959556930818956921827413198986277995127667203870694452397233225961924996910197904901037135372560207618442015208042298428698343225720163505153059
n = p * q * r * s
phi = (p - 1) * (q - 1) * (r - 1) * (s - 1)
p_, q_, r_, s_ = self.known_phi.factorize_multi_prime(n, phi)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertIsInstance(r_, int)
self.assertIsInstance(s_, int)
self.assertEqual(n, p_ * q_ * r_ * s_)
def test_implicit(self):
p_bitsize = 1024
q_bitsize = 512
shared_bitsize = 684
p1 = 114078116454996138073318170170395300151527904793534256191938789983399536922395777111499295202803369554422196999085171496293035396121701314031895628788412353005299652082324755433547975515470738465391276343421170770833007677775061536204663181723877277783535322237577024424245899108264063112142009298991310208363
q1 = 12098618010582908146005387418068214530897837924954238474768639057877490835545707924234415267192522442378424554055618356812999593976451240454748132615211091
p2 = 114078116454996138073318170170395300151527904793534256191938789983399536922395777111499295202803369554422196999085171496293035396121701314031895628788412353005299652082324755433547975515470738465391276343420364306790694479071514320422685064042719135179664690266371525865249047670187055110695514824881157627139
q2 = 6947349788273330265284965959588633765145668297542467009935686733076998478802274287263210169428313906535572268083136251282544180080959668222544545924665987
p3 = 114078116454996138073318170170395300151527904793534256191938789983399536922395777111499295202803369554422196999085171496293035396121701314031895628788412353005299652082324755433547975515470738465391276343421225512127678851876291564787861171689610002001450319286946495752591223718157676932258249173072665300213
q3 = 9266126880388093025412332663804790639778236438889018854356539267369792799981733933428697598363851162957322580350270024369332640344413674817822906997102161
p4 = 114078116454996138073318170170395300151527904793534256191938789983399536922395777111499295202803369554422196999085171496293035396121701314031895628788412353005299652082324755433547975515470738465391276343421356808531436971239501427225110998678228016324130962852291540962098563998522061844259409194324238072163
q4 = 9346194396330429861097524187193981265347523161493757436812567448933497111978504926263282763464402757659318174531608519618989854444686100976857830087136899
moduli = [p1 * q1, p2 * q2, p3 * q3, p4 * q4]
for i, (p, q) in enumerate(self.implicit.factorize_msb(moduli, p_bitsize + q_bitsize, shared_bitsize)):
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
self.assertEqual(moduli[i], p * q)
p_bitsize = 1024
q_bitsize = 512
shared_bitsize = 684
p1 = 137676848178120053804151859930883725890803026594890273621717986880391033552896124307278203769389114417028688066268898176276364165645879838855204653941267370118703755611397682095578076818071918172477401067278492828257626897251549091543352809233324240524137497086302474085899298902638892888908168338819819232793
q1 = 13166288667078358159532363247770104519199514211373352701434198635956864629466947059508438393840310722732010695913860165840076158141600542903957511858467599
p2 = 155941871148496045943650517403022286219330266513190620694534749227433871940120353353030481603047425408777193957891989215447984590279121382305371103889682866866611645183334486259197241694690077730091496562828758139564286098307121800141566950170972849436331381375112592397181935508950663666559821018117710798361
q2 = 8054287780708269262514472947823359228967255917411384941738106945448488928023325871002415540629545474428145043227927492187948846465762213369395150593287629
p3 = 146542545226083477723264700810318219628590283511298968176573337385538577833243759669492317165475590615268753085678168828004241411544898671318095131587338794716729315057151379325654916607098703691695457183186825995894712193071356602411894624624795802572705076938306979030565015683237625719989339343497095536153
q3 = 8348967325072059612026168622784453891507881426476603640658340020341944731532364677276401286358233081971838597029494396167050440290022806685890808240656759
p4 = 167661072178525609874536869751051800065390422834592103113971975955391615118678036572040576294964853025982786705404563191397770270731849495157247117854529039983840787661878167379723898817843318578402737767598910576316837813336887274651599847119701845895279082627804568462120651226573750359206381471191410662937
q4 = 8145167185335505501783087854760814147233023836090931783403657001079727963955491428876064700621053935085252069162037262941731093071208640285177101456231051
moduli = [p1 * q1, p2 * q2, p3 * q3, p4 * q4]
for i, (p, q) in enumerate(self.implicit.factorize_lsb(moduli, p_bitsize + q_bitsize, shared_bitsize)):
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
self.assertEqual(moduli[i], p * q)
def test_roca(self):
# 39th primorial
M = 962947420735983927056946215901134429196419130606213075415963491270
# These primes are chosen such that a' is pretty small so it doesn't take too long.
p = 85179386137518452231354185509698113331528483782580002217930594759662020757433
q = 121807704694511224555991770528701515984374557330058194205583818929517699002107
n = p * q
p_, q_ = self.roca.factorize(n, M, 5, 6)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_shor(self):
# Examples from the reference paper
p = 1789
q = 1847
n = p * q
p_, q_ = self.shor.factorize(n, 751228, 78)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
p = 12343
q = 12391
n = p * q
p_, q_ = self.shor.factorize(n, 2, 4247705)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
def test_twin_primes(self):
p = 4045364040964617981493056570547683620499113851384489798802437290109120991898115799819774088264427282611552038114397865000343325953101387058967136608664301
q = 4045364040964617981493056570547683620499113851384489798802437290109120991898115799819774088264427282611552038114397865000343325953101387058967136608664303
n = p * q
p_, q_ = self.twin_primes.factorize(n)
self.assertIsInstance(p_, int)
self.assertIsInstance(q_, int)
self.assertEqual(n, p_ * q_)
class TestGCM(TestCase):
from gcm import forbidden_attack
def test_forbidden_attack(self):
key = randbytes(16)
iv = randbytes(16)
aes = AES.new(key, AES.MODE_GCM, nonce=iv)
a1 = randbytes(16)
p1 = randbytes(16)
aes.update(a1)
c1, t1 = aes.encrypt_and_digest(p1)
aes = AES.new(key, AES.MODE_GCM, nonce=iv)
a2 = randbytes(16)
p2 = randbytes(16)
aes.update(a2)
c2, t2 = aes.encrypt_and_digest(p2)
for h in self.forbidden_attack.recover_possible_auth_keys(a1, c1, t1, a2, c2, t2):
target_a = randbytes(16)
target_c = randbytes(16)
forged_t = self.forbidden_attack.forge_tag(h, a1, c1, t1, target_a, target_c)
try:
aes = AES.new(key, AES.MODE_GCM, nonce=iv)
aes.update(target_a)
aes.decrypt_and_verify(target_c, forged_t)
break
except ValueError:
# Authentication failed, so we try the next authentication key.
continue
else:
self.fail()
class TestHNP(TestCase):
from hnp import lattice_attack
def _dsa(self, p, g, x):
h = getrandbits(p.bit_length())
k = randint(1, p - 1)
r = pow(g, k, p)
s = (pow(k, -1, p) * (h + x * r)) % p
return h, r, s, k
def test_lattice_attack(self):
# Not a safe prime, but it doesn't really matter.
p = 299182277398782807472682876223275635417
g = 5
x = randint(1, p - 1)
nonce_bitsize = p.bit_length()
msb_known = 7
n_signatures = 25
nonces = []
signatures = []
for i in range(n_signatures):
h, r, s, k = self._dsa(p, g, x)
nonces.append(k)
signatures.append((h, r, s, k >> (nonce_bitsize - msb_known)))
x_, nonces_ = next(self.lattice_attack.dsa_known_msb(p, signatures, nonce_bitsize, msb_known))
self.assertIsInstance(x_, int)
self.assertIsInstance(nonces_, list)
self.assertEqual(x, x_)
for i in range(n_signatures):
self.assertIsInstance(nonces_[i], int)
self.assertEqual(nonces[i], nonces_[i])
nonce_bitsize = p.bit_length()
lsb_known = 7
n_signatures = 25
nonces = []
signatures = []
for i in range(n_signatures):
h, r, s, k = self._dsa(p, g, x)
nonces.append(k)
signatures.append((h, r, s, k % (2 ** lsb_known)))
x_, nonces_ = next(self.lattice_attack.dsa_known_lsb(p, signatures, nonce_bitsize, lsb_known))
self.assertIsInstance(x_, int)
self.assertIsInstance(nonces_, list)
self.assertEqual(x, x_)
for i in range(n_signatures):
self.assertIsInstance(nonces_[i], int)
self.assertEqual(nonces[i], nonces_[i])
nonce_bitsize = p.bit_length()
msb_unknown = 10
lsb_unknown = 20
h1, r1, s1, k1 = self._dsa(p, g, x)
signature1 = (h1, r1, s1, (k1 >> lsb_unknown) % (2 ** (nonce_bitsize - msb_unknown)))
h2, r2, s2, k2 = self._dsa(p, g, x)
signature2 = (h2, r2, s2, (k2 >> lsb_unknown) % (2 ** (nonce_bitsize - msb_unknown)))
x_, k1_, k2_ = self.lattice_attack.dsa_known_middle(p, signature1, signature2, nonce_bitsize, msb_unknown, lsb_unknown)
self.assertIsInstance(x_, int)
self.assertIsInstance(k1_, int)
self.assertIsInstance(k2_, int)
self.assertEqual(x, x_)
self.assertEqual(k1, k1_)
self.assertEqual(k2, k2_)
class TestIGE(TestCase):
from ige import padding_oracle
def _encrypt(self, key, p):
p0 = randbytes(16)
c0 = randbytes(16)
cipher = AES.new(key, mode=AES.MODE_ECB)
p_last = p0
c_last = c0
c = bytearray()
for i in range(0, len(p), 16):
p_i = p[i:i + 16]
c_i = strxor(cipher.encrypt(strxor(p_i, c_last)), p_last)
p_last = p_i
c_last = c_i
c += c_i
return p0, c0, c
def _valid_padding(self, key, p0, c0, c):
try:
cipher = AES.new(key, mode=AES.MODE_ECB)
p_last = p0
c_last = c0
p = bytearray()
for i in range(0, len(c), 16):
c_i = c[i:i + 16]
p_i = strxor(cipher.decrypt(strxor(c_i, p_last)), c_last)
p_last = p_i
c_last = c_i
p += p_i
unpad(p, 16)
return True
except ValueError:
return False
def test_padding_oracle(self):
key = randbytes(16)
for i in range(16):
p = pad(randbytes(i + 1), 16)
p0, c0, c = self._encrypt(key, p)
p_ = self.padding_oracle.attack(lambda p0, c0, c: self._valid_padding(key, p0, c0, c), p0, c0, c)
self.assertEqual(p, p_)
class Knapsack(TestCase):
from knapsack import low_density
def test_low_density(self):
a = [429970831622, 650002882675, 512682138397, 145532365100, 462119415111, 357461497167, 582429951539, 22657777498, 2451348134, 380282710854, 251660920136, 103765486463, 276100153517, 250012242739, 519736909707, 451460714161]
s = 5398327344820
e = self.low_density.attack(a, s)
for i in range(len(a)):
self.assertIsInstance(e[i], int)
self.assertEqual(e, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
a = [23449054409, 58564582991, 24739686534, 30995859145, 16274600764, 13384701522, 45782350364, 10685194276, 18864211511, 9594013152, 50215903866, 7952180124, 42094717093, 50866816333, 44318421949, 31143511315]
s = 42313265920
e = self.low_density.attack(a, s)
for i in range(len(a)):
self.assertIsInstance(e[i], int)
self.assertEqual(e, [1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0])
class LCG(TestCase):
from lcg import parameter_recovery
from lcg import truncated_parameter_recovery
from lcg import truncated_state_recovery
def test_parameter_recovery(self):
modulus = 230565400234205371157763985910524799617
multiplier = 192101630084837332907895369052393213499
increment = 212252940839553091477500231998099191939
state = 182679397636465813399296757573664340382
n_outputs = 10
outputs = []
for _ in range(n_outputs):
state = (multiplier * state + increment) % modulus
outputs.append(state)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, modulus=modulus)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, multiplier=multiplier)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, increment=increment)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, modulus=modulus, multiplier=multiplier)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, modulus=modulus, increment=increment)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, multiplier=multiplier, increment=increment)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
modulus_, multiplier_, increment_ = self.parameter_recovery.attack(outputs, modulus=modulus, multiplier=multiplier, increment=increment)
self.assertIsInstance(modulus_, int)
self.assertEqual(modulus, modulus_)
self.assertIsInstance(multiplier_, int)
self.assertEqual(multiplier, multiplier_)
self.assertIsInstance(increment_, int)
self.assertEqual(increment, increment_)
def test_truncated_parameter_recovery(self):
state_bitsize = 128
output_bitsize = 32
modulus = 236360717458728691963813082060498623380
multiplier = 192101630084837332907895369052393213499
increment = 212252940839553091477500231998099191939
state = 182679397636465813399296757573664340382
n_outputs = 40
# The recovery method is not perfect, so we allow some errors in the generated output.
n_test = 200
max_failures = 5
outputs = []
states = []
for _ in range(n_outputs):
state = (multiplier * state + increment) % modulus
states.append(state)
outputs.append(state >> (state_bitsize - output_bitsize))
modulus_, multiplier_, increment_, seed_ = next(self.truncated_parameter_recovery.attack(outputs, state_bitsize, output_bitsize, state_bitsize))
self.assertIsInstance(modulus_, int)
self.assertIsInstance(multiplier_, int)
self.assertIsInstance(increment_, int)
self.assertIsInstance(seed_, int)
s = state
s_ = seed_
for _ in range(n_outputs):
s_ = (multiplier_ * s_ + increment_) % modulus_
failures = 0
for _ in range(n_test):
s = (multiplier * s + increment) % modulus
s_ = (multiplier_ * s_ + increment_) % modulus_
if (s >> (state_bitsize - output_bitsize)) != (s_ >> (state_bitsize - output_bitsize)):
failures += 1
self.assertLessEqual(failures, max_failures)
modulus_, multiplier_, increment_, seed_ = next(self.truncated_parameter_recovery.attack(outputs, state_bitsize, output_bitsize, state_bitsize, modulus=modulus))
self.assertIsInstance(modulus_, int)
self.assertIsInstance(multiplier_, int)
self.assertIsInstance(increment_, int)
self.assertIsInstance(seed_, int)
s = state
s_ = seed_
for _ in range(n_outputs):
s_ = (multiplier_ * s_ + increment_) % modulus_
failures = 0
for _ in range(n_test):
s = (multiplier * s + increment) % modulus
s_ = (multiplier_ * s_ + increment_) % modulus_
if (s >> (state_bitsize - output_bitsize)) != (s_ >> (state_bitsize - output_bitsize)):
failures += 1
self.assertLessEqual(failures, max_failures)
modulus_, multiplier_, increment_, seed_ = next(self.truncated_parameter_recovery.attack(outputs, state_bitsize, output_bitsize, state_bitsize, multiplier=multiplier))
self.assertIsInstance(modulus_, int)
self.assertIsInstance(multiplier_, int)
self.assertIsInstance(increment_, int)
self.assertIsInstance(seed_, int)
s = state
s_ = seed_
for _ in range(n_outputs):
s_ = (multiplier_ * s_ + increment_) % modulus_
failures = 0
for _ in range(n_test):
s = (multiplier * s + increment) % modulus
s_ = (multiplier_ * s_ + increment_) % modulus_
if (s >> (state_bitsize - output_bitsize)) != (s_ >> (state_bitsize - output_bitsize)):
failures += 1
self.assertLessEqual(failures, max_failures)
modulus_, multiplier_, increment_, seed_ = next(self.truncated_parameter_recovery.attack(outputs, state_bitsize, output_bitsize, state_bitsize, modulus=modulus, multiplier=multiplier))
self.assertIsInstance(modulus_, int)
self.assertIsInstance(multiplier_, int)
self.assertIsInstance(increment_, int)
self.assertIsInstance(seed_, int)
s = state
s_ = seed_
for _ in range(n_outputs):
s_ = (multiplier_ * s_ + increment_) % modulus_
failures = 0
for _ in range(n_test):
s = (multiplier * s + increment) % modulus
s_ = (multiplier_ * s_ + increment_) % modulus_
if (s >> (state_bitsize - output_bitsize)) != (s_ >> (state_bitsize - output_bitsize)):
failures += 1
self.assertLessEqual(failures, max_failures)
def test_truncated_state_recovery(self):
state_bitsize = 128
output_bitsize = 32
modulus = 236360717458728691963813082060498623380
multiplier = 192101630084837332907895369052393213499
increment = 212252940839553091477500231998099191939
state = 182679397636465813399296757573664340382
n_outputs = 40
outputs = []
states = []
for _ in range(n_outputs):
state = (multiplier * state + increment) % modulus
states.append(state)
outputs.append(state >> (state_bitsize - output_bitsize))
states_ = self.truncated_state_recovery.attack(outputs, state_bitsize, output_bitsize, modulus, multiplier, increment)
for i in range(n_outputs):
self.assertIsInstance(states_[i], int)
self.assertEqual(states[i], states_[i])
class Pseudoprimes(TestCase):
from pseudoprimes import miller_rabin
def test_miller_rabin(self):
bases = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31]
p, p1, p2, p3 = self.miller_rabin.generate_pseudoprime(bases, min_bitsize=512)
self.assertIsInstance(p, int)
self.assertIsInstance(p1, int)
self.assertIsInstance(p2, int)
self.assertIsInstance(p3, int)
self.assertGreaterEqual(p.bit_length(), 512)
self.assertEqual(p, p1 * p2 * p3)
r = 0
d = p - 1
while d % 2 == 0:
r += 1
d //= 2
for base in bases:
self.assertTrue(pow(base, d, p) == 1 or pow(base, d, p) == p - 1)
class RC4(TestCase):
from rc4 import fms
def test_fms(self):
key = randbytes(13)
encrypt_oracle = lambda iv, p: ARC4.new(iv + key).encrypt(p)
key_ = self.fms.attack(encrypt_oracle, 13)
self.assertEqual(key, key_)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
# Create a Profile table that links to User Table.
user = models.OneToOneField(User,on_delete=models.CASCADE,primary_key=True)
bio = models.TextField()
location = models.CharField(max_length=30, blank=True)
education = models.CharField(max_length=30,blank=True,null=True)
birth_date = models.DateField(null=True, blank=True)
# image = models.ImageField(upload_to='image/',null=True,default='image/default-profile.png')
phoneNumber = models.IntegerField(null=True,blank=True)
country = models.CharField(max_length=30,null=True)
subscribers = models.IntegerField(default=0)
def __str__(self):
return self.user.username |
import logging
from rest_framework.views import APIView
from rest_framework.response import Response
from .serializers import MethodDataSerializer
class ProcessorView(APIView):
content_type = 'application/json'
def post(self, request, format=None):
serializer = MethodDataSerializer(data=request.data)
if serializer.is_valid():
logging.info(serializer.validated_data)
dots = []
for i in range(1, n+1):
pass
return Response({'asd': 1}, )
else:
return Response(serializer.errors, status=400)
|
from PyQt5 import QtWidgets, QtCore
import pyqtgraph as pg
import sys
from pylsl import StreamInlet, resolve_stream
import argparse
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
self.channel_list = [
"AFF1h",
"AFF5h",
"F7",
"FC5",
"FC1",
"C3",
"T7",
"TP9",
"CP5",
"CP1",
"Pz",
"P3",
"P7",
"PO9",
"O1",
"Oz",
"O2",
"PO10",
"P8",
"P4",
"TP10",
"CP6",
"CP2",
"Cz",
"C4",
"T8",
"FC6",
"FC2",
"FCz",
"F8",
"AFF6h",
"AFF2h",
"GSR",
"EKG",
] #AUX_EKG = EKG and AUX_GSR = GSR
# Since 11/22 the lab decided to use the following channels:
self.channels_used = [
"AFF1h",
"F7",
"FC5",
"C3",
"T7",
"TP9",
"Pz",
"P3",
"P7",
"O1",
"O2",
"P8",
"P4",
"TP10",
"Cz",
"C4",
"T8",
"FC6",
"FCz",
"F8",
"AFF2h",
"GSR",
"EKG",
]
# Get the index of channel that are being used.
self.channels_used_index = [
self.channel_list.index(ch)
for ch in self.channels_used
if ch in self.channel_list
]
# initialize plots
super(MainWindow, self).__init__(*args, **kwargs)
# initialize streams
self.host_name = {
"actiCHamp-20010205": "Tiger - EEG(200 10 205)",
"actiCHamp-21010477": "Lion - EEG(210 10 477)", #Lions EEG amp is replaced with cheetah's amp.
"actiCHamp-21020492": "Leopard - EEG(210 20 492)",
}
self.streams = resolve_stream()
for i in range(len(self.streams)):
if self.streams[i].type() == 'EEG' and self.streams[i].name() == device_id:
self.inlet = StreamInlet(self.streams[i])
for stream_name, stream_name_with_id in self.host_name.items():
if device_id in stream_name:
self.setWindowTitle(stream_name_with_id)
self.graphWidgetLayout = pg.GraphicsLayoutWidget()
self.graphWidgetLayout.resize(900, 2500)
self.setCentralWidget(self.graphWidgetLayout)
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
self.graphWidgetLayout.setBackground("w")
self.pen = pg.mkPen(color=(0, 0, 0), width=1) # black
self.ch = []
label_style = {"color": "black", "font-size": "8pt"}
self.srate = 500 # 500Hz for EEG data
self.timer = QtCore.QTimer()
# why? https://stackoverflow.com/questions/59094207/how-to-set-pyqt5-qtimer-to-update-in-specified-interval
self.timer.setInterval(round(1000 / self.srate))
n_channels = len(self.channels_used)
self.x = [0]
self.y = [[0] for _ in range(n_channels)] # 23 channel data
self.dataLine = [[] for _ in range(n_channels)]
for self.idx, self.channel in enumerate(self.channels_used):
# create 33 subplots
self.channel = self.graphWidgetLayout.addPlot(row=self.idx, col=0)
# self.channel.showAxes('left', showValues=False)
if self.idx < n_channels - 1:
self.channel.hideAxis("bottom")
self.channel.setLabel("left", self.channels_used[self.idx], **label_style)
self.ch.append(self.channel)
self.plots()
def plots(self):
# draw
for self.idx, self.ch in enumerate(self.ch):
self.ch = self.ch.plot(x=self.x, y=self.y[self.idx], pen=self.pen)
self.dataLine[self.idx].append(self.ch)
self.timer.timeout.connect(self.update_plot_data)
self.timer.start()
def update_plot_data(self):
# update data
if len(self.x) >= 100:
self.x = self.x[1:] # Remove the first x element.
for i in range(len(self.channels_used)):
self.y[i] = self.y[i][1:] # Remove the first
# Get the next chunk of samples from LSL.
# They were accumulated while we were plotting the previous chunk
sample, time = self.inlet.pull_chunk()
if len(sample) > 0:
# Plot the most recent sample of this chunk. Discard the rest
# Update the x value according to the number of samples we skipped
self.x.append(self.x[-1] + len(sample))
# Append the last sample
for i in range(len(self.channels_used)):
self.y[i].append(sample[-1][i])
for i in range(0, len(self.channels_used)):
self.dataLine[i][0].setData(self.x, self.y[i])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plotting EEG signals via LSL")
parser.add_argument("--d", required=True, help="Enter number of EEG devices")
arg = parser.parse_args()
device_id = str(arg.d)
app = QtWidgets.QApplication(sys.argv)
w = MainWindow()
w.show()
sys.exit(app.exec_())
|
#CLASE
class tamano_ramo:
#ATRIBUTOS
tamanos = ["s", "l"]
#METODOS:
def __init__ (self, tamano):
self.tamano = tamano
print("se ha añadido un tamano de ramo")
# AREA DE PRUEBAS UNITARIAS
if __name__ == "__main__":
tamano_ramo("L")
tamano_ramo("s") |
from source.T5_LinearStructure.P3_List.L1_Node import Node
class CircularLinkedList:
def __init__(self):
""" Конструктор - створює новий порожній список.
"""
self.mPrev = None # Вузол, що передує поточному елементу списку
self.mCurr = None # Поточний вузол списку
def empty(self):
""" Перевіряє чи список порожній
:return: True, якщо список не містить жодного елемента
"""
return self.mCurr is None
def next(self):
""" Перейти до наступного елемента.
Породжує виключення StopIteration, якщо наступний елемент порожній
:return: None
"""
if self.mCurr is not None:
self.mPrev = self.mCurr
self.mCurr = self.mCurr.mNext
else:
raise StopIteration
def current(self):
""" Отримати поточний елемент
:return: Навантаження поточного елементу
"""
if self.mCurr is not None:
return self.mCurr.mItem
else:
return None
def insert(self, item):
""" Вставити новий елемент у список перед поточним
:param item: елемент, що вставляється у спиоск
:return: None
"""
node = Node(item) # Створюємо вузол для елементу
node.mNext = self.mCurr
if self.empty(): # список порожній
node.mNext = node
self.mCurr = node
else: # список містить принаймні один вузол
self.mPrev.mNext = node
self.mPrev = node
def remove(self):
""" Видалити поточний елемент у списку """
pass # TODO: Implement by yourself
def __str__(self):
return str(self.current())
l = CircularLinkedList()
l.insert(1)
l.insert(2)
l.insert(3)
l.insert(4)
print(l)
l.next()
print(l)
l.next()
print(l)
l.next()
print(l)
l.next()
print(l)
l.next()
print(l)
l.next()
print(l)
l.next()
print(l)
|
#Python04_12_StrEx01_신동혁
s01='NiceDay'
print(s01)
s02='''
NiceDay
NiceDay
NiceDay
'''
print(s02) |
# echo server 서비스 계속 유지
import socket
import sys
HOST = ''
PORT = 8888
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serversock.bind((HOST, PORT))
print('에코 서버 서비스 시작')
serversock.listen(5)
while 1:
conn, addr = serversock.accept()
print('접속자 컴의 정보 : ', addr[0], addr[1])
print(conn.recv(1024).decode()) # 수신 메시지 출력
# 메시지 전송
conn.send(('from server : ' + '잘 살어라').encode('utf-8'))
except socket.error as err:
print('에러 : ',err)
sys.exit()
finally:
serversock.close()
conn.close() |
#! /usr/bin/env python3
# Q. 整数a1,a2,...anが与えられ、その中からいくつか選び、和をkにすることができるか
# => a1から順に和の計算要素に加えるか決める深さ優先検索の問題
n = 4
a = [1, 2, 4, 7]
k = 15
def dfs(i: int, s: int):
# 最深にたどり着いたら、和がkに等しいか判定
if i == n:
return s == k
# a[i]は使わない場合
if dfs(i + 1, s):
return True
# a[i]を使う場合
if dfs(i + 1, s + a[i]):
return True
return False
if dfs(0, 0):
print('Yes')
else:
print('No')
# Q. sがスタート, gがゴール, #が塀で、ゴールたどり着けるか (動きは上下左右)
H, M = 10, 10
inp = """
s.........
#########.
#.......#.
#..####.#.
##....#.#.
#####.#.#.
g.#.#.#.#.
#.#.#.#.#.
#.#.#.#.#.
#.....#...
"""
c = []
sx, sy = 0, 0
gx, gy = 0, 0
for i, r in enumerate(inp.split()):
r = list(r)
c.append(r)
for j, s in enumerate(r):
if s == 's':
sx = i
sy = j
elif s == 'g':
gx = i
gy = j
# 上下左右のベクトル
vx = [0, 1, 0, -1]
vy = [-1, 0, 1, 0]
result = False
def dfs(x, y):
# 現在地を#で塗り替えて移動できないようにする
c[x][y] = 'X'
for i in range(4):
nx, ny = x + vx[i], y + vy[i]
if nx < 0 or nx >= M or ny < 0 or ny >= H or c[nx][ny] == '#':
pass
elif c[nx][ny] == 'g':
global result
result = True
return
elif c[nx][ny] == '.':
dfs(nx, ny)
dfs(sx, sy)
print(result)
|
#!/usr/bin/env python
import unittest, random
from common import *
from pprint import *
from itertools import product
ibd.Mr_Plus_Infinity.restype = c_long
ibd.Mr_Minus_Infinity.restype = c_long
mr_plus_infinity = ibd.Mr_Plus_Infinity()
mr_minus_infinity = ibd.Mr_Minus_Infinity()
ibd.Hti_New.restype = ctypes.c_void_p
ibd.Htib_New.restype = ctypes.c_void_p
ibd.Ht_Summarize_Update.restype = ctypes.c_void_p
ibd.Ht_Summarize_Finish.restype = ctypes.c_void_p
ibd.Ht_Get.restype = ctypes.c_void_p
ibd.Ht_View.restype = ctypes.c_void_p
ibd.Ht_HashAtMarkerPoint.restype = ctypes.c_void_p
ibd.Hti_Next.restype = ctypes.c_void_p
def isNull(t):
return t is None or t == 0
def getHashTreeSet(ht):
hti = ibd.Hti_New(ht)
s = set()
hk = c_void_p()
while True:
okay = ibd.Hti_Next(byref(hk), hti)
if not okay:
break
else:
h = extractHash(hk)
assert h not in s
s.add(h)
htib = ibd.Htib_New(ht)
sb = set()
while True:
okay = ibd.Htib_Next(byref(hk), htib)
if not okay:
break
else:
h = extractHash(hk)
assert h not in sb
sb.add(h)
assert s == sb
return s
newHT = ibd.NewHashTable
newHT.restype = ctypes.c_void_p
def getHashAtMarkerLoc(ht, m):
hk = ibd.Ht_HashAtMarkerPoint(0, ht, c_long(m))
assert ibd.O_RefCount(hk) == 1
s = extractHash(hk)
assert ibd.O_RefCount(hk) == 1
ibd.O_DecRef(hk)
return s
def getHashOfMarkerRange(ht, m1, m2):
hk = ibd.Ht_HashOfMarkerRange(0, ht, c_long(m1), c_long(m2))
assert ibd.O_RefCount(hk) == 1
s = extractHash(hk)
assert ibd.O_RefCount(hk) == 1
ibd.O_DecRef(hk)
return s
def getHashMList(ht, marker_points):
hl = []
for m in marker_points:
hl.append(getHashAtMarkerLoc(ht, m))
return hl
def allConsistentlyDistinct(*args):
s = set(args[0])
if len(s) != 1:
return False
for a in args[1:]:
sa = set(a)
if len(sa) != 1:
return False
if len(s.intersection(sa)) != 0:
return False
s = s.union(sa)
return True
def hashesConsistent(ht, *loc_lists):
hash_lists = [getHashMList(ht, loc_list) for loc_list in loc_lists]
# For printing
if not allConsistentlyDistinct(*hash_lists):
print "\n\nERROR DUMP:"
pprint([dict(zip(ll, hl)) for ll, hl in zip(loc_lists, hash_lists)])
return False
else:
return True
def getHTValiditySet(ht, rl):
hti = ibd.Hti_New(ht)
s = set()
hk = c_void_p()
while True:
okay = ibd.Hti_Next(byref(hk), hti)
if not okay:
break
else:
h = extractHash(hk)
assert h not in s
for r in rl:
if ibd.H_MarkerPointIsValid(hk, r):
s.add( (h, r) )
return s
class TestHashTableBasic(unittest.TestCase):
"""
Just a few hash keys tests that are more sanity checks then full
tests.
"""
def test01a_CreateDelete(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, "refcount = %d" % ibd.O_RefCount(ht) )
ibd.O_DecRef(ht)
def test01b_SimpleRetrieval(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1)
hk = makeHashKey(0)
ibd.Ht_Give(ht, hk)
self.assert_(ibd.Ht_View(ht, hk) == hk)
ibd.O_DecRef(ht)
def test02_Sizes(self):
ht = newHT()
hk1 = makeHashKey(0)
hk2 = makeHashKey(1)
self.assert_(ibd.Ht_Size(ht) == 0)
ibd.Ht_Give(ht, hk1)
self.assert_(ibd.Ht_Size(ht) == 1)
ibd.Ht_Give(ht, hk2)
self.assert_(ibd.Ht_Size(ht) == 2)
ibd.Ht_Pop(ht, hk1)
self.assert_(ibd.Ht_Size(ht) == 1)
ibd.Ht_Pop(ht, hk2)
self.assert_(ibd.Ht_Size(ht) == 0)
ibd.O_DecRef(ht)
def test03_RetrieveOriginal(self):
ht = newHT()
hk1 = makeHashKey(0)
hk2 = makeHashKey(0)
self.assert_(hk1 != hk2)
ibd.Ht_Give(ht, hk1)
self.assert_(ibd.Ht_View(ht, hk2) == hk1)
ibd.O_DecRef(ht)
def test04_SimpleIterator(self):
ht = newHT()
hk1 = makeHashKey(0)
hk2 = makeHashKey(1)
ibd.Ht_Give(ht, hk1)
ibd.Ht_Give(ht, hk2)
strue = set([extractHash(hk1), extractHash(hk2)])
s = getHashTreeSet(ht)
self.assert_(s == strue, '%s != %s (true)' % (str(s), str(strue)))
ibd.O_DecRef(ht)
def checkHkList(self, hashkeys, null_hash_keys = []):
ht = newHT()
hashes = [extractHash(hk) for hk in hashkeys]
count = 0
for i, hk in enumerate(hashkeys):
for hk2 in hashkeys[i:]:
self.assert_(not ibd.Ht_Contains(ht, hk2))
self.assert_(ibd.Ht_Clear(ht, hk2) == 0)
self.assert_(ibd.Ht_Size(ht) == count,
"%d (given) != %d (count)" % (ibd.Ht_Size(ht), count))
ibd.Ht_Give(ht, hk)
count += 1
self.assert_(ibd.Ht_Size(ht) == count,
"%d (given) != %d (count)" % (ibd.Ht_Size(ht), count))
for hk in hashkeys:
self.assert_(ibd.Ht_View(ht, hk) == hk)
self.assert_(ibd.Ht_Size(ht) == count)
for hk2 in null_hash_keys:
self.assert_(ibd.Ht_Clear(ht, hk2) == 0)
self.assert_(ibd.Ht_Size(ht) == count,
"%d (given) != %d (count)" % (ibd.Ht_Size(ht), count))
s = getHashTreeSet(ht)
self.assert_(s == set(hashes))
for hk in hashkeys:
ibd.Ht_Clear(ht, hk)
count -= 1
self.assert_(ibd.Ht_Size(ht) == count)
for hk2 in null_hash_keys:
self.assert_(ibd.Ht_Clear(ht, hk2) == 0)
self.assert_(ibd.Ht_Size(ht) == count,
"%d (given) != %d (count)" % (ibd.Ht_Size(ht), count))
ibd.O_DecRef(ht)
def test05_Corner_01_Close_0(self):
self.checkHkList(
[exactHashKey("00000000000000000000000000000000")])
def test05_Corner_01_Close_0b(self):
self.checkHkList(
[exactHashKey("00000000000000000000000000000001")])
def test05_Corner_01_Close_1(self):
self.checkHkList(
[exactHashKey("00000000000000000000000000000000"),
exactHashKey("00000000000000000000000000000001")])
def test05_Corner_01_Close_2(self):
self.checkHkList(
[exactHashKey("00000000000000000000000000000000"),
exactHashKey("00000000000000000000000000000001"),
exactHashKey("00000000000000000000000000000002")])
def test05_Corner_01_Close_3(self):
self.checkHkList(
[exactHashKey("00000000000000000000000000000000"),
exactHashKey("00000000000000000000000000000001"),
exactHashKey("00000000000000000000000000000002"),
exactHashKey("00000000000000000000000000000003"),
exactHashKey("00000000000000000000000000000004"),
exactHashKey("00000000000000000000000000000005"),
exactHashKey("00000000000000000000000000000006")])
def test05_Corner_01_Close_4(self):
self.checkHkList(
[exactHashKey("0000000000000000000000000000000%s" % c)
for c in "0123456789abcdef"])
def test05_Corner_01_Close_5(self):
self.checkHkList(
[exactHashKey("000000000000000000000000000000%s%s" % (c1, c2))
for c1, c2 in product("0123456789abcdef", "0123456789abcdef")])
def test05_Corner_02_2LevelClose_1(self):
self.checkHkList(
[exactHashKey("01000000000000000000000000000000"),
exactHashKey("02000000000000000000000000000000")])
def test05_Corner_02_2LevelClose_1_regression(self):
self.checkHkList(
[addMarkerInfo(exactHashKey("01000000000000000000000000000000"), 2,8),
addMarkerInfo(exactHashKey("02000000000000000000000000000000"), 4,6)])
def test05_Corner_02_2LevelClose_2(self):
self.checkHkList(
[exactHashKey("01000000000000000000000000000000"),
exactHashKey("02000000000000000000000000000000"),
exactHashKey("03000000000000000000000000000000")])
def test05_Corner_02_2LevelClose_3(self):
self.checkHkList(
[exactHashKey("01000000000000000000000000000000"),
exactHashKey("02000000000000000000000000000000"),
exactHashKey("03000000000000000000000000000000"),
exactHashKey("04000000000000000000000000000000"),
exactHashKey("05000000000000000000000000000000"),
exactHashKey("06000000000000000000000000000000"),
exactHashKey("07000000000000000000000000000000")])
def test05_Corner_03_1LevelClose_1(self):
self.checkHkList(
[exactHashKey("10000000000000000000000000000000"),
exactHashKey("20000000000000000000000000000000")])
def test05_Corner_03_1LevelClose_2(self):
self.checkHkList(
[exactHashKey("10000000000000000000000000000000"),
exactHashKey("20000000000000000000000000000000"),
exactHashKey("30000000000000000000000000000000")])
def test05_Corner_03_1LevelClose_3(self):
self.checkHkList(
[exactHashKey("10000000000000000000000000000000"),
exactHashKey("20000000000000000000000000000000"),
exactHashKey("30000000000000000000000000000000"),
exactHashKey("40000000000000000000000000000000"),
exactHashKey("50000000000000000000000000000000"),
exactHashKey("60000000000000000000000000000000"),
exactHashKey("70000000000000000000000000000000")])
def test05_Corner_03_1LevelClose_4(self):
self.checkHkList(
[exactHashKey("%s0000000000000000000000000000000" % c)
for c in "0123456789abcdef"])
def test05_Corner_03_1LevelClose_5(self):
self.checkHkList(
[exactHashKey("%s%s000000000000000000000000000000" % (c1, c2))
for c1, c2 in product("0123456789abcdef", "0123456789abcdef")])
def test10_LargeContainmentTest(self):
n = 100
hashkeys = [makeHashKey(i) for i in range(n)]
self.checkHkList(hashkeys)
def test13_Regression_HashSequence(self):
ht = newHT()
hkl = [makeHashKey("n7"),
makeHashKey("n-3"),
makeHashKey("n5"),
makeHashKey("n9"),
makeHashKey("n-1"),
makeHashKey("n-5")]
for hk in hkl:
ibd.Ht_Give(ht, hk)
self.assert_(isNull(ibd.Ht_View(ht, makeHashKey("n-4"))))
ibd.O_DecRef(ht)
def test14_BadClearIneffective(self):
ht = newHT()
ibd.Ht_Give(ht, makeHashKey(0))
ibd.Ht_Give(ht, makeHashKey(1))
ibd.Ht_Give(ht, makeHashKey(2))
self.assert_(ibd.Ht_Size(ht) == 3)
h = makeHashKey(2)
self.assert_(ibd.Ht_Clear(ht,h))
ibd.O_DecRef(h)
self.assert_(ibd.Ht_Size(ht) == 2)
h = makeHashKey(10)
self.assert_(not ibd.Ht_Clear(ht, h))
ibd.O_DecRef(h)
self.assert_(ibd.Ht_Size(ht) == 2)
ibd.O_DecRef(ht)
def test15_Corner_Regression_PopOnEmpty(self):
ht = newHT()
self.assert_(ibd.Ht_Size(ht) == 0)
h = makeHashKey(0)
self.assert_(not ibd.Ht_Clear(ht, h))
ibd.O_DecRef(h)
self.assert_(ibd.Ht_Size(ht) == 0)
ibd.O_DecRef(ht)
################################################################################
# Reference Counting / Locking stuff
def testR01_Set(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_Set(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR02_Clear(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_Set(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_Clear(ht, h)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR03_Give(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
ibd.O_IncRef(h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_Give(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR04_Pop(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_Give(ht, h)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_Pop(ht, h)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR05_SetDefault(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_SetDefault(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR06_Replace(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
h2 = makeHashKey(0)
self.assert_(ibd.O_RefCount(h2) == 1, ibd.O_RefCount(h2))
ibd.Ht_Set(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_Set(ht, h2)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
self.assert_(ibd.O_RefCount(h2) == 2, ibd.O_RefCount(h2))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
self.assert_(ibd.O_RefCount(h2) == 1, ibd.O_RefCount(h2))
ibd.O_DecRef(h)
ibd.O_DecRef(h2)
def testR07_ReplaceDefault(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
h2 = makeHashKey(0)
self.assert_(ibd.O_RefCount(h2) == 1, ibd.O_RefCount(h2))
ibd.Ht_Set(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_SetDefault(ht, h2)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
self.assert_(ibd.O_RefCount(h2) == 1, ibd.O_RefCount(h2))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
self.assert_(ibd.O_RefCount(h2) == 1, ibd.O_RefCount(h2))
ibd.O_DecRef(h)
ibd.O_DecRef(h2)
def testR08_ReplaceWithSelf(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_Set(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_Set(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR09_ReplaceWithSelfPassive(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_Set(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_SetDefault(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR10_GiveReplaceDefault(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
ibd.O_IncRef(h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
h2 = makeHashKey(0)
ibd.O_IncRef(h2)
self.assert_(ibd.O_RefCount(h2) == 2, ibd.O_RefCount(h2))
ibd.Ht_Give(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_Give(ht, h2)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
self.assert_(ibd.O_RefCount(h2) == 2, ibd.O_RefCount(h2))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
self.assert_(ibd.O_RefCount(h2) == 1, ibd.O_RefCount(h2))
ibd.O_DecRef(h)
ibd.O_DecRef(h2)
def testR11_GiveReplaceWithSelf(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
ibd.O_IncRef(h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_Give(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.Ht_Set(ht, h)
self.assert_(ibd.O_RefCount(h) == 2, ibd.O_RefCount(h))
ibd.O_DecRef(ht)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR12_PopAltQuery(self):
ht = newHT()
self.assert_(ibd.O_RefCount(ht) == 1, ibd.O_RefCount(ht))
h = makeHashKey(0)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_Give(ht, h)
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.Ht_Pop(ht, makeHashKey(0))
self.assert_(ibd.O_RefCount(h) == 1, ibd.O_RefCount(h))
ibd.O_DecRef(h)
def testR13_OverwriteSameWithGive(self):
ht = newHT()
h1 = makeHashKey(1)
h2 = makeHashKey(2)
ibd.Ht_Set(ht, h1)
ibd.Ht_Set(ht, h2)
self.assert_(ibd.O_RefCount(h1) == 2)
self.assert_(ibd.O_RefCount(h2) == 2)
ibd.Ht_Give(ht, h1)
self.assert_(ibd.O_RefCount(h1) == 1)
self.assert_(ibd.O_RefCount(h2) == 2)
ibd.Ht_Give(ht, h2)
self.assert_(ibd.O_RefCount(h1) == 1)
self.assert_(ibd.O_RefCount(h2) == 1)
decRef(ht)
def testR15_AddMarkerValidRange_01(self):
ht = newHT()
h = makeMarkedHashKey(1, 2, 4)
ibd.Ht_Set(ht, h)
self.assert_(getHashAtMarkerLoc(ht, 1) == null_hash)
hashesConsistent(ht, [0,1,4,5,6,7,8,9,10], [2,3])
ibd.Ht_InsertValidRange(ht, h, 6, 8)
self.assert_(getHashAtMarkerLoc(ht, 1) == null_hash)
hashesConsistent(ht, [0,1,4,5,8,9,10], [2,3,6,7])
decRef(ht,h);
def testR15_AddMarkerValidRange_02(self):
ht = newHT()
h = makeMarkedHashKey(1, 0, 0)
ibd.Ht_InsertValidRange(ht, h, 2, 4)
self.assert_(getHashAtMarkerLoc(ht, 1) == null_hash)
hashesConsistent(ht, [0,1,4,5,6,7,8,9,10], [2,3])
ibd.Ht_InsertValidRange(ht, h, 6, 8)
self.assert_(getHashAtMarkerLoc(ht, 1) == null_hash)
hashesConsistent(ht, [0,1,4,5,8,9,10], [2,3,6,7])
decRef(ht,h);
def testR15_AddMarkerValidRange_03(self):
ht = newHT()
h = makeMarkedHashKey(1, 0, 0)
hk = makeHashKey(1)
ibd.Ht_InsertValidRange(ht, h, 2, 4)
self.assert_(ibd.Ht_View(ht, hk) == h)
self.assert_(getHashAtMarkerLoc(ht, 1) == null_hash)
hashesConsistent(ht, [0,1,4,5,6,7,8,9,10], [2,3])
ibd.Ht_InsertValidRange(ht, h, 6, 8)
self.assert_(getHashAtMarkerLoc(ht, 1) == null_hash)
hashesConsistent(ht, [0,1,4,5,8,9,10], [2,3,6,7])
decRef(ht,h,hk);
def testRR01_InsertAndClear(self):
ht = newHT()
s1 = getHashOfMarkerRange(ht, 0, 10)
hk = makeMarkedHashKey(0, -5, 5)
ibd.Ht_Give(ht, hk)
self.assert_(s1 != getHashOfMarkerRange(ht, 0, 10))
ibd.Ht_Clear(ht, hk)
self.assert_(s1 == getHashOfMarkerRange(ht, 0, 10))
decRef(ht);
def testRR02_OutsideRangeIgnored_01(self):
ht1 = newHT()
ibd.Ht_Give(ht1, makeMarkedHashKey(0, 0, 5))
s1 = getHashOfMarkerRange(ht1, 0, 10)
ht2 = newHT()
ibd.Ht_Give(ht2, makeMarkedHashKey(0, -5, 5))
self.assert_(s1 == getHashOfMarkerRange(ht2, 0, 10))
decRef(ht1,ht2);
def testRR02_OutsideRangeIgnored_02(self):
ht1 = newHT()
ibd.Ht_Give(ht1, makeMarkedHashKey(0, 0, 10))
s1 = getHashOfMarkerRange(ht1, 0, 10)
ht2 = newHT()
ibd.Ht_Give(ht2, makeMarkedHashKey(0, -5, 15))
self.assert_(s1 == getHashOfMarkerRange(ht2, 0, 10))
decRef(ht1,ht2);
def testRR02_OutsideRangeIgnored_03(self):
ht1 = newHT()
ibd.Ht_Give(ht1, makeMarkedHashKey(0, 5, 10))
s1 = getHashOfMarkerRange(ht1, 0, 10)
ht2 = newHT()
ibd.Ht_Give(ht2, makeMarkedHashKey(0, 5, 15))
self.assert_(s1 == getHashOfMarkerRange(ht2, 0, 10))
decRef(ht1,ht2);
def testRR02_OutsideRangeIgnored_04(self):
ht1 = newHT()
ibd.Ht_Give(ht1, makeMarkedHashKey(0, 0, 5))
s1 = getHashOfMarkerRange(ht1, 0, 10)
ht2 = newHT()
ibd.Ht_Give(ht2, makeMarkedHashKey(0, 0, 5))
ibd.Ht_Give(ht2, makeMarkedHashKey(1, -5, 0))
self.assert_(s1 == getHashOfMarkerRange(ht2, 0, 10))
decRef(ht1,ht2);
def testRR02_OutsideRangeIgnored_05(self):
ht1 = newHT()
ibd.Ht_Give(ht1, makeMarkedHashKey(0, 0, 5))
s1 = getHashOfMarkerRange(ht1, 0, 10)
ht2 = newHT()
ibd.Ht_Give(ht2, makeMarkedHashKey(0, 0, 5))
ibd.Ht_Give(ht2, makeMarkedHashKey(1, 10, 15))
self.assert_(s1 == getHashOfMarkerRange(ht2, 0, 10))
decRef(ht1,ht2);
################################################################################
# Tests about the marker stuff.
class TestHashTableMarkedKeys(unittest.TestCase):
def testM_Basic_01_Existance(self):
ht = newHT()
hk = makeMarkedHashKey(0, 0, 5)
ibd.Ht_Give(ht, hk)
self.assert_(ibd.Ht_Contains(ht, hk))
self.assert_(not ibd.Ht_ContainsAt(ht, hk, -1))
self.assert_(ibd.Ht_ContainsAt(ht, hk, 0))
self.assert_(not ibd.Ht_ContainsAt(ht, hk, 5))
self.assert_(not ibd.Ht_ContainsAt(ht, hk, 6))
ibd.O_DecRef(ht)
def testM01_Hashes_Simple(self):
ht = newHT()
h = makeMarkedHashKey(0, 0, 5)
ibd.Ht_Give(ht, h)
self.assert_(hashesConsistent(ht, [-1, 6], [1, 4]))
ibd.O_DecRef(ht)
def testM01n_Hashes_Simple(self):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, -10, -5))
self.assert_(hashesConsistent(ht, [-11, -4], [-9, -6]))
ibd.O_DecRef(ht)
def testM02_Hashes_AtNodes(self):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, 0, 5))
self.assert_(hashesConsistent(ht, [-1, 5], [0, 4]))
ibd.O_DecRef(ht)
def testM02n_Hashes_AtNodes(self):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, -10, -5))
self.assert_(hashesConsistent(ht, [-11, -5], [-10, -6]))
ibd.O_DecRef(ht)
def testM03_Hashes_SameNode(self):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, 1, 5))
ibd.Ht_Give(ht, makeMarkedHashKey(1, 5, 9))
self.assert_(hashesConsistent(ht, [-1, 0, 9, 10], [1,2,3,4],[5,8]))
ibd.O_DecRef(ht)
def testM03n_Hashes_SameNode(self):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, -9, -5))
ibd.Ht_Give(ht, makeMarkedHashKey(1, -5, -1))
self.assert_(hashesConsistent(ht, [-11, -10, -1, 0], [-9,-8,-7,-6],[-5,-2]))
ibd.O_DecRef(ht)
def testM04_Hashes_Intersection(self):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, 0, 6))
ibd.Ht_Give(ht, makeMarkedHashKey(1, 1, 3 ))
self.assert_(hashesConsistent(ht,
[-1, 6,7,8],
[0,3,4,5],
[1,2]))
ibd.O_DecRef(ht)
def testM05n_Hashes_MultipleRanges(self):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, -10, 0))
ibd.Ht_Give(ht, makeMarkedHashKey(1, -9, -7, -6, -5, -3, -2))
self.assert_(hashesConsistent(ht,
[-11,0,1],
[-10,-7,-5,-2,-1],
[-9,-8,-6,-3]))
ibd.O_DecRef(ht)
def testM06a_HashesInvariantOutsideRange_Single(self):
ht = newHT()
h1 = makeMarkedHashKey(0, 1, 5)
h2 = makeMarkedHashKey(1, 3, 7)
ibd.Ht_Give(ht, h1)
self.assert_(getHashAtMarkerLoc(ht, 0) == null_hash)
self.assert_(hashesConsistent(ht, [-1, 0, 5, 6], [1,2,3,4]))
s_h1 = getHashAtMarkerLoc(ht, 1)
self.assert_(s_h1 == getHashAtMarkerLoc(ht, 1))
ibd.Ht_Give(ht, h2)
self.assert_(s_h1 == getHashAtMarkerLoc(ht, 1))
s_h2 = getHashAtMarkerLoc(ht, 3)
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
self.assert_(s_h1 == getHashAtMarkerLoc(ht, 1))
self.assert_(hashesConsistent(ht, [-1, 0, 7, 8], [1,2], [3,4], [5,6]))
ibd.O_DecRef(ht)
def consistencyTest(self, N, marker_range, n_keys, n_key_marker_ranges):
r = marker_range
def newDict():
ht = newHT()
random.seed(0)
for k in range(n_keys):
key_range_args = []
for n in range(n_key_marker_ranges):
a = random.randint(*r)
b = random.randint(*r)
key_range_args.append(min(a,b))
key_range_args.append(max(a,b) + 1)
ibd.Ht_Give(ht, makeMarkedHashKey(k, *key_range_args))
d = dict( (m, getHashAtMarkerLoc(ht,m))
for m in range(r[0]-1, r[1]+2) )
ibd.O_DecRef(ht)
return d
d1 = newDict()
for i in range(N):
self.assert_(d1 == newDict())
def testM06_Hashes_LargeConsistencyTest_Compact_Small(self):
self.consistencyTest(200, [-3,3], 2, 3)
def testM07_Hashes_LargeConsistencyTest_Compact_Large(self):
self.consistencyTest(200, [-3,3], 50, 2)
def testM08_Hashes_LargeConsistencyTest_Spread_Small(self):
self.consistencyTest(200, [-50,50], 10, 2)
def testM09_Hashes_LargeConsistencyTest_Spread_Large(self):
self.consistencyTest(100, [-50,50], 50, 5)
def testM10_Hashes_Value_Simple(self):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, 0, 5))
self.assert_(getHashAtMarkerLoc(ht, 1)
== extractHash(makeHashKey(0)))
ibd.O_DecRef(ht)
def testM20_Hashes_Deletion_01_Simple(self):
ht = newHT()
h = makeMarkedHashKey(0, 1, 5)
ibd.Ht_Give(ht, h)
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
self.assert_(hashesConsistent(ht, [-1, 0, 5, 6], [1,2,3,4]))
ibd.Ht_Clear(ht, h)
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
self.assert_(hashesConsistent(ht, [-1, 0, 5, 6, 1,2,3,4]))
ibd.O_DecRef(ht)
def testM20_Hashes_Deletion_02_Double(self):
ht = newHT()
h1 = makeMarkedHashKey(0, 1, 5)
h2 = makeMarkedHashKey(1, 3, 7)
ibd.Ht_Give(ht, h1)
self.assert_(getHashAtMarkerLoc(ht, 0) == null_hash)
self.assert_(hashesConsistent(ht, [-1, 0, 5, 6], [1,2,3,4]))
s_h1 = getHashAtMarkerLoc(ht, 1)
ibd.Ht_Give(ht, h2)
s_h2 = getHashAtMarkerLoc(ht, 3)
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
self.assert_(s_h1 == getHashAtMarkerLoc(ht, 1))
self.assert_(hashesConsistent(ht, [-1, 0, 7, 8], [1,2], [3,4], [5,6]))
ibd.Ht_Clear(ht, h1)
self.assert_(getHashAtMarkerLoc(ht, 3) == extractHash(h2))
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
self.assert_(hashesConsistent(ht, [-1, 0, 1, 2, 7, 8], [3,4, 5,6]))
ibd.Ht_Clear(ht, h2)
self.assert_(hashesConsistent(ht, [-1, 0, 5, 6, 1,2,3,4]))
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
ibd.O_DecRef(ht)
def testM20_Hashes_Deletion_03_Inside(self):
ht = newHT()
h1 = makeMarkedHashKey(0, 1, 7)
h2 = makeMarkedHashKey(1, 3, 5)
ibd.Ht_Give(ht, h1)
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
self.assert_(hashesConsistent(ht, [-1, 0, 7, 8], [1,2,3,4,5,6]))
s_h1 = getHashAtMarkerLoc(ht, 1)
ibd.Ht_Give(ht, h2)
ibd._Ht_debug_HashTableConsistent(ht)
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
self.assert_(s_h1 == getHashAtMarkerLoc(ht, 1))
self.assert_(hashesConsistent(ht, [-1, 0, 7, 8], [1,2, 5, 6], [3,4]))
ibd.Ht_Clear(ht, h1)
self.assert_(getHashAtMarkerLoc(ht, 3) == extractHash(h2))
self.assert_(null_hash == getHashAtMarkerLoc(ht, 1))
self.assert_(hashesConsistent(ht, [-1, 0, 1, 2, 7, 8, 5,6], [3,4]))
ibd.Ht_Clear(ht, h2)
self.assert_(hashesConsistent(ht, [-1, 0, 5, 6, 1,2,3,4]))
self.assert_(null_hash == getHashAtMarkerLoc(ht, 0))
ibd.O_DecRef(ht)
def testM21_Hashes_Replacement(self):
ht = newHT()
assert ibd.O_RefCount(ht) == 1
h1 = makeMarkedHashKey(0, 1, 7)
assert ibd.O_RefCount(h1) == 1
h2 = makeMarkedHashKey(0, 3, 5)
assert ibd.O_RefCount(h2) == 1
ibd.Ht_Give(ht, h1)
assert ibd.O_RefCount(ht) == 1
assert ibd.O_RefCount(h1) == 1
assert ibd.O_RefCount(h2) == 1
self.assert_(getHashAtMarkerLoc(ht, 0) == null_hash)
assert ibd.O_RefCount(ht) == 1
assert ibd.O_RefCount(h1) == 1
assert ibd.O_RefCount(h2) == 1
self.assert_(getHashAtMarkerLoc(ht, 1) == extractHash(h1))
# print "HERE"
assert ibd.O_RefCount(ht) == 1
assert ibd.O_RefCount(h1) == 1
assert ibd.O_RefCount(h2) == 1
self.assert_(hashesConsistent(ht, [-1, 0, 7, 8], [1,2,3,4,5,6]))
assert ibd.O_RefCount(ht) == 1
assert ibd.O_RefCount(h1) == 1
assert ibd.O_RefCount(h2) == 1
# print "HERE 2"
ibd.Ht_Give(ht, h2)
assert ibd.O_RefCount(ht) == 1
assert ibd.O_RefCount(h2) == 1
# print "HERE 3"
self.assert_(getHashAtMarkerLoc(ht, 0) == null_hash)
# print "HERE 4"
self.assert_(getHashAtMarkerLoc(ht, 3) == extractHash(h2))
# print "HERE 5"
self.assert_(hashesConsistent(ht, range(-1,3) + range(5,10), [3,4]))
# print "HERE 6"
ibd.O_DecRef(ht)
def testM22_Corner_01_MinusInf_01(self):
ht = newHT()
h1 = makeMarkedHashKey(0, mr_minus_infinity, 0)
ibd.Ht_Give(ht, h1)
self.assert_(getHashAtMarkerLoc(ht, mr_minus_infinity) == extractHash(h1))
self.assert_(getHashAtMarkerLoc(ht, 0) == null_hash, getHashAtMarkerLoc(ht, 0))
ibd.O_DecRef(ht)
def testM22_Corner_01_MinusInf_02_Give(self):
ht = newHT()
h1 = makeHashKey(0)
ibd.Ht_Give(ht, makeMarkedHashKey(0, mr_minus_infinity, -5))
ibd.Ht_Give(ht, makeMarkedHashKey(0, mr_minus_infinity, 0))
ibd.Ht_Give(ht, makeMarkedHashKey(0, mr_minus_infinity, 5))
self.assert_(getHashAtMarkerLoc(ht, mr_minus_infinity) == extractHash(h1))
self.assert_(getHashAtMarkerLoc(ht, 5) == null_hash, getHashAtMarkerLoc(ht, 0))
ibd.O_DecRef(ht)
def testM22_Corner_02_PlusInf_01(self):
ht = newHT()
h1 = makeMarkedHashKey(0, mr_minus_infinity, mr_plus_infinity)
ibd.Ht_Give(ht, h1)
self.assert_(hashesConsistent(ht, [mr_minus_infinity, 0], [mr_plus_infinity]))
self.assert_(getHashAtMarkerLoc(ht, mr_plus_infinity) == null_hash)
def testM22_Corner_02_PlusInf_02(self):
ht = newHT()
h1 = makeHashKey(0)
ibd.Ht_Give(ht, h1)
self.assert_(hashesConsistent(ht, [mr_minus_infinity, 0], [mr_plus_infinity]))
self.assert_(getHashAtMarkerLoc(ht, mr_plus_infinity) == null_hash)
class TestHashTableSummarize(unittest.TestCase):
def checkReduce(self, ht, r):
htr = ibd.Ht_ReduceTable(ht)
for m in r:
self.assert_(getHashAtMarkerLoc(htr, m) == getHashAtMarkerLoc(ht, m))
def test01_Basic(self):
ht = newHT()
h = makeMarkedHashKey(0, 2,4)
ibd.Ht_Give(ht, h)
htr = ibd.Ht_ReduceTable(ht)
self.assert_(getHashAtMarkerLoc(ht, 0) == null_hash)
self.assert_(getHashAtMarkerLoc(ht, 2) == extractHash(h))
self.assert_(getHashAtMarkerLoc(ht, 4) == null_hash)
self.assert_(getHashAtMarkerLoc(htr, 0) == null_hash)
self.assert_(getHashAtMarkerLoc(htr, 2) == extractHash(h))
self.assert_(getHashAtMarkerLoc(htr, 4) == null_hash)
################################################################################
# Test the hash along marker values
def checkHAMV(self, htl, zero_point, *cons_sets):
hmv = 0
for i, ht in enumerate(htl):
hmv = ibd.Ht_Summarize_Update(hmv, ht)
hmv = ibd.Ht_Summarize_Finish(hmv)
self.assert_(hashesConsistent(hmv, *cons_sets))
self.assert_(getHashAtMarkerLoc(hmv, zero_point) == null_hash)
decRef(hmv)
def testHAMV01_Single(self):
ht = newHT()
h1 = exactHashKey("01000000000000000000000000000000")
ibd.Ht_Give(ht, addMarkerInfo(h1, 2,4))
#ibd.Ht_debug_print(ht)
#ibd.Ht_MSL_debug_Print(ht)
self.checkHAMV([ht], 0, [0,1,4,5], [2,3])
decRef(ht)
def testHAMV02_Double(self):
ht = newHT()
ibd.Ht_Give(ht, addMarkerInfo(exactHashKey("01000000000000000000000000000000"), 2, 6))
ibd.Ht_Give(ht, addMarkerInfo(exactHashKey("02000000000000000000000000000000"), 4, 8))
self.checkHAMV([ht], 0, [0,1,8,9], [2,3], [4,5], [6,7])
decRef(ht)
def testHAMV03_Double_Sandwich(self):
ht = newHT()
h1 = exactHashKey("01000000000000000000000000000000")
h2 = exactHashKey("02000000000000000000000000000000")
ibd.Ht_Give(ht, addMarkerInfo(h1, 2, 8))
ibd.Ht_Give(ht, addMarkerInfo(h2, 4, 6))
self.assert_(getHashAtMarkerLoc(ht, 2) == extractHash(h1))
self.assert_(getHashAtMarkerLoc(ht, 7) == extractHash(h1))
self.assert_(getHashAtMarkerLoc(ht, 4) == "03000000000000000000000000000000")
self.checkHAMV([ht], 0, [0,1,8,9], [2,3,6,7], [4,5])
decRef(ht)
def testHAMV04_2Table_Double(self):
h1 = exactHashKey("01000000000000000000000000000000")
h2 = exactHashKey("02000000000000000000000000000000")
ht1 = newHT()
ibd.Ht_Give(ht1, addMarkerInfo(h1, 2, 6))
ht2 = newHT()
ibd.Ht_Give(ht2, addMarkerInfo(h2, 4, 8))
self.checkHAMV([ht1, ht2], 0, [0,1,8,9], [2,3], [4,5], [6,7])
decRef(ht1,ht2)
def testHAMV05_2Table_Double_Sandwich(self):
ht1 = newHT()
ibd.Ht_Give(ht1, makeMarkedHashKey(0, 2, 8))
ht2 = newHT()
ibd.Ht_Give(ht2, makeMarkedHashKey(1, 4, 6))
self.checkHAMV([ht1,ht2], 0, [0,1,8,9], [2,3,6,7], [4,5])
decRef(ht1,ht2)
def testHAMV06_2Table_Double_Duplicate(self):
ht1 = newHT()
ibd.Ht_Give(ht1, makeMarkedHashKey(0, 2, 6))
ht2 = newHT()
ibd.Ht_Give(ht2, makeMarkedHashKey(0, 4, 8))
self.checkHAMV([ht1, ht2], 0, [0,1,8,9], [2,3,6,7], [4,5])
decRef(ht1,ht2)
def testHAMV07_2Table_Double_Sandwich_Duplicate(self):
ht1 = newHT()
ibd.Ht_Give(ht1, makeMarkedHashKey(0, 2, 8))
ht2 = newHT()
ibd.Ht_Give(ht2, makeMarkedHashKey(0, 4, 6))
self.checkHAMV([ht1,ht2], 0, [0,1,8,9], [2,3,6,7], [4,5])
decRef(ht1,ht2)
def testHAMV08_50Table_Unique_Keys(self):
distinct_values = []
def new_Table(i):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(i, 2*i, 2*i+2))
distinct_values.append([2*i, 2*i +1])
return ht
htl = [new_Table(i) for i in xrange(50)]
self.checkHAMV(htl, -1, *distinct_values)
decRef(*htl)
def testHAMV08_50Table_Unique_Keys_Reversed(self):
distinct_values = []
def new_Table(i):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(i, 2*i, 2*i+2))
distinct_values.append([2*i, 2*i +1])
return ht
htl = [new_Table(i) for i in xrange(50)]
self.checkHAMV(reversed(htl), -1, *distinct_values)
decRef(*htl)
def testHAMV08_100Table_Unique_Keys_Random(self):
distinct_values = []
def new_Table(i):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(i, 2*i, 2*i+2))
distinct_values.append([2*i, 2*i +1])
return ht
htl = [new_Table(i) for i in xrange(500)]
self.checkHAMV(reversed(htl), -1, *distinct_values)
decRef(*htl)
def testHAMV09_50Table_Same_Keys(self):
distinct_values = []
def new_Table(i):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, 2*i, 2*i+2))
distinct_values.append(2*i)
distinct_values.append(2*i+1)
return ht
htl = [new_Table(i) for i in xrange(50)]
self.checkHAMV(htl, -1, [-1], distinct_values)
decRef(*htl)
def testHAMV10_3Table_Nested_Identical(self):
N = 3
distinct_values = []
def new_Table(i):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, 2*i, 2*(2*N - i)))
distinct_values.append([2*i, 2*i+1, 2*(2*N - i) - 2, 2*(2*N - i) - 1])
return ht
htl = [new_Table(i) for i in xrange(N)]
self.checkHAMV(htl, -1, [-1], *distinct_values)
decRef(*htl)
def testHAMV10_5Table_Nested_Identical(self):
return # KNOWN FAIL
# Reason is that the XOR operations cancel out, allowing
# multiple repeat keys under this circumstance. Solution is
# to implement a proper merging operation based on the +
# operator.
N = 5
distinct_values = []
def new_Table(i):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, 2*i, 2*(2*N - i)))
distinct_values.append([2*i, 2*i+1, 2*(2*N - i) - 2, 2*(2*N - i) - 1])
return ht
htl = [new_Table(i) for i in xrange(N)]
self.checkHAMV(htl, -1, [-1], *distinct_values)
decRef(*htl)
def testHAMV11_100Table_Nested_Identical(self):
return # KNOWN FAIL
N = 100
distinct_values = []
def new_Table(i):
ht = newHT()
ibd.Ht_Give(ht, makeMarkedHashKey(0, 2*i, 2*(2*N - i)))
distinct_values.append([2*i, 2*i+1, 2*(2*N - i) - 2, 2*(2*N - i) - 1])
return ht
htl = [new_Table(i) for i in xrange(N)]
self.checkHAMV(htl, -1, [-1], *distinct_values)
decRef(*htl)
def testHAMV12_OverwriteWithSame(self):
ht = newHT()
h1 = addMarkerInfo(makeHashKey(1), 2, 6)
h2 = addMarkerInfo(makeHashKey(2), 4, 8)
ibd.Ht_Set(ht, h1)
ibd.Ht_Set(ht, h2)
hl1 = getHashMList(ht, range(10))
self.checkHAMV([ht], 0, [0,1,8,9], [2,3], [4,5], [6,7])
ibd.Ht_Set(ht, h1)
self.assert_(hl1 == getHashMList(ht, range(10)))
ibd.Ht_Set(ht, h2)
self.assert_(hl1 == getHashMList(ht, range(10)))
self.assert_(ibd.O_RefCount(h1) == 2)
self.assert_(ibd.O_RefCount(h2) == 2)
decRef(ht)
decRef(h1)
decRef(h2)
def testHAMV13_RegiveWithSetDefault(self):
ht = newHT()
h1 = addMarkerInfo(makeHashKey(1), 2, 6)
h2 = addMarkerInfo(makeHashKey(2), 4, 8)
ibd.Ht_Set(ht, h1)
ibd.Ht_Set(ht, h2)
hl1 = getHashMList(ht, range(10))
self.checkHAMV([ht], 0, [0,1,8,9], [2,3], [4,5], [6,7])
ibd.Ht_SetDefault(ht, h1)
self.assert_(hl1 == getHashMList(ht, range(10)))
ibd.Ht_SetDefault(ht, h2)
self.assert_(hl1 == getHashMList(ht, range(10)))
self.assert_(ibd.O_RefCount(h1) == 2)
self.assert_(ibd.O_RefCount(h2) == 2)
decRef(ht)
decRef(h1)
decRef(h2)
def testHAMV14_OverwriteSameWithGive(self):
ht = newHT()
h1 = addMarkerInfo(makeHashKey(1), 2, 6)
h2 = addMarkerInfo(makeHashKey(2), 4, 8)
ibd.Ht_Set(ht, h1)
ibd.Ht_Set(ht, h2)
hl1 = getHashMList(ht, range(10))
self.checkHAMV([ht], 0, [0,1,8,9], [2,3], [4,5], [6,7])
self.assert_(ibd.O_RefCount(h1) == 2)
self.assert_(ibd.O_RefCount(h2) == 2)
ibd.Ht_Give(ht, h1)
self.assert_(ibd.O_RefCount(h1) == 1)
self.assert_(hl1 == getHashMList(ht, range(10)))
ibd.Ht_Give(ht, h2)
self.assert_(ibd.O_RefCount(h2) == 1)
self.assert_(hl1 == getHashMList(ht, range(10)))
decRef(ht)
class TestHashTableSetOps(unittest.TestCase):
def checkSetOp(self, op, hkl1, hkl2):
ht1 = newHT()
ht2 = newHT()
def addIn(ht, hkl):
for hk in hkl:
if type(hk) is int:
ibd.Ht_Give(ht, makeHashKey(hk))
elif type(hk) is tuple:
ibd.Ht_Give(ht, makeMarkedHashKey(*hk))
else:
ibd.Ht_Give(ht, hk)
addIn(ht1, hkl1)
addIn(ht2, hkl2)
r_set = range(10)
s1 = getHTValiditySet(ht1, r_set)
s2 = getHTValiditySet(ht2, r_set)
if op == "union":
ht3 = ibd.Ht_Union(ht1, ht2)
s3_true = s1 | s2
elif op == "intersection":
ht3 = ibd.Ht_Intersection(ht1, ht2)
s3_true = s1 & s2
elif op == "difference":
ht3 = ibd.Ht_Difference(ht1, ht2)
s3_true = s1 - s2
else:
assert False
s3 = getHTValiditySet(ht3, r_set)
self.assert_(s3 == s3_true,
"\nin true, not test: %s;\nin test, not true: %s"
% ((",".join(sorted(t[0][:4] + "-%d" % t[1] for t in (s3_true - s3)))),
(",".join(sorted(t[0][:4] + "-%d" % t[1] for t in (s3 - s3_true))))))
decRef(ht1, ht2, ht3)
def setConsistencyTest(self, op, marker_range, n_keys, n_key_marker_ranges, n_unique):
r = marker_range
def newRandomHt(offset):
ht = newHT()
random.seed(0)
for k in range(offset, n_keys + offset):
key_range_args = []
for n in range(n_key_marker_ranges):
a = random.randint(*r)
b = random.randint(*r)
key_range_args.append(min(a,b))
key_range_args.append(max(a,b) + 1)
ibd.Ht_Give(ht, makeMarkedHashKey(k, *key_range_args))
return ht
ht1 = newRandomHt(0)
ht2 = newRandomHt(n_unique)
s1 = getHTValiditySet(ht1, range(*r))
s2 = getHTValiditySet(ht2, range(*r))
if op == "union":
ht3 = ibd.Ht_Union(ht1, ht2)
s3_true = s1 | s2
elif op == "intersection":
ht3 = ibd.Ht_Intersection(ht1, ht2)
s3_true = s1 & s2
elif op == "difference":
ht3 = ibd.Ht_Difference(ht1, ht2)
s3_true = s1 - s2
else:
assert False
s3 = getHTValiditySet(ht3, range(*r))
self.assert_(s3 == s3_true,
"\nin true, not test: %s;\nin test, not true: %s"
% ((",".join(sorted(t[0][:4] + "-%d" % t[1] for t in (s3_true - s3)))),
(",".join(sorted(t[0][:4] + "-%d" % t[1] for t in (s3 - s3_true))))))
decRef(ht1, ht2, ht3)
def test01_Union_01(self):
self.checkSetOp("union", [0], [1])
def test01_Union_02_marked(self):
self.checkSetOp("union", [(0, 2, 4)], [(1,3,5)])
def test01_Union_03_marked_same_range(self):
self.checkSetOp("union", [(0, 2, 4)], [(1,2,4)])
def test01_Union_04_marked_same_key(self):
self.checkSetOp("union", [(0, 2, 4)], [(0,2,4)])
def test01_Union_05_marked_unmarked(self):
self.checkSetOp("union", [(0, 2, 4)], [1])
def test01_Union_06_marked_unmarked_same_key_01(self):
self.checkSetOp("union", [(0, 2, 4)], [0])
def test01_Union_07_marked_unmarked_same_key_02(self):
self.checkSetOp("union", [0], [(0, 2, 4)])
def test01_Union_08_many_ranges(self):
from test_markers import miSet_06_large
mi1, mi2 = miSet_06_large(2)
h1 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h1, mi1)
h2 = makeHashKey(1)
ibd.H_GiveMarkerInfo(h2, mi2)
self.checkSetOp("union", [h1], [h2])
def test01_Union_09_many_ranges_same_key(self):
from test_markers import miSet_06_large
mi1, mi2 = miSet_06_large(2)
h1 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h1, mi1)
h2 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h2, mi2)
self.checkSetOp("union", [h1], [h2])
def test02_Union_Random_Consistency_01_small_same_keysets(self):
self.setConsistencyTest("union", (-20,20), 5, 5, 0)
def test02_Union_Random_Consistency_01_small_different_keysets(self):
self.setConsistencyTest("union", (-20,20), 5, 5, 5)
def test02_Union_Random_Consistency_02_medium_sparse_same_keysets(self):
self.setConsistencyTest("union", (-20,20), 10, 1, 0)
def test02_Union_Random_Consistency_02_medium_sparse_different_keysets(self):
self.setConsistencyTest("union", (-20,20), 10, 1, 10)
def test02_Union_Random_Consistency_02_medium_sparse_overlapping_keysets(self):
self.setConsistencyTest("union", (-20,20), 10, 1, 5)
def test02_Union_Random_Consistency_03_large_contiguous_same_keysets(self):
self.setConsistencyTest("union", (-100,100), 100, 1, 0)
def test02_Union_Random_Consistency_03_large_contiguous_different_keysets(self):
self.setConsistencyTest("union", (-100,100), 100, 1, 100)
def test02_Union_Random_Consistency_03_large_contiguous_overlappy_keysets(self):
self.setConsistencyTest("union", (-100,100), 100, 1, 5)
def test02_Union_Random_Consistency_03_large_same_keysets(self):
self.setConsistencyTest("union", (-100,100), 100, 10, 0)
def test02_Union_Random_Consistency_03_large_different_keysets(self):
self.setConsistencyTest("union", (-100,100), 100, 10, 100)
def test02_Union_Random_Consistency_03_large_overlapping_keysets(self):
self.setConsistencyTest("union", (-100,100), 100, 10, 50)
def test11_Intersection_01(self):
self.checkSetOp("intersection", [0], [1])
def test11_Intersection_02_marked(self):
self.checkSetOp("intersection", [(0, 2, 4)], [(1,3,5)])
def test11_Intersection_03_marked_same_range(self):
self.checkSetOp("intersection", [(0, 2, 4)], [(1,2,4)])
def test11_Intersection_04_marked_same_key(self):
self.checkSetOp("intersection", [(0, 2, 4)], [(0,2,4)])
def test11_Intersection_05_marked_unmarked(self):
self.checkSetOp("intersection", [(0, 2, 4)], [1])
def test11_Intersection_06_marked_unmarked_same_key_01(self):
self.checkSetOp("intersection", [(0, 2, 4)], [0])
def test11_Intersection_07_marked_unmarked_same_key_02(self):
self.checkSetOp("intersection", [0], [(0, 2, 4)])
def test11_Intersection_08_many_ranges(self):
from test_markers import miSet_06_large
mi1, mi2 = miSet_06_large(2)
h1 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h1, mi1)
h2 = makeHashKey(1)
ibd.H_GiveMarkerInfo(h2, mi2)
self.checkSetOp("intersection", [h1], [h2])
def test11_Intersection_09_many_ranges_same_key(self):
from test_markers import miSet_06_large
mi1, mi2 = miSet_06_large(2)
h1 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h1, mi1)
h2 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h2, mi2)
self.checkSetOp("intersection", [h1], [h2])
def test12_Intersection_Random_Consistency_01_small_same_keysets(self):
self.setConsistencyTest("intersection", (-20,20), 5, 5, 0)
def test12_Intersection_Random_Consistency_01_small_different_keysets(self):
self.setConsistencyTest("intersection", (-20,20), 5, 5, 5)
def test12_Intersection_Random_Consistency_02_medium_sparse_same_keysets(self):
self.setConsistencyTest("intersection", (-20,20), 10, 1, 0)
def test12_Intersection_Random_Consistency_02_medium_sparse_different_keysets(self):
self.setConsistencyTest("intersection", (-20,20), 10, 1, 10)
def test12_Intersection_Random_Consistency_02_medium_sparse_overlapping_keysets(self):
self.setConsistencyTest("intersection", (-20,20), 10, 1, 5)
def test12_Intersection_Random_Consistency_03_large_contiguous_same_keysets(self):
self.setConsistencyTest("intersection", (-100,100), 100, 1, 0)
def test12_Intersection_Random_Consistency_03_large_contiguous_different_keysets(self):
self.setConsistencyTest("intersection", (-100,100), 100, 1, 100)
def test12_Intersection_Random_Consistency_03_large_contiguous_overlappy_keysets(self):
self.setConsistencyTest("intersection", (-100,100), 100, 1, 5)
def test12_Intersection_Random_Consistency_03_large_same_keysets(self):
self.setConsistencyTest("intersection", (-100,100), 100, 10, 0)
def test12_Intersection_Random_Consistency_03_large_different_keysets(self):
self.setConsistencyTest("intersection", (-100,100), 100, 10, 100)
def test12_Intersection_Random_Consistency_03_large_overlapping_keysets(self):
self.setConsistencyTest("intersection", (-10,10), 100, 5, 50)
def test21_Difference_01(self):
self.checkSetOp("difference", [0], [1])
def test21_Difference_02_marked(self):
self.checkSetOp("difference", [(0, 2, 4)], [(1,3,5)])
def test21_Difference_03_marked_same_range(self):
self.checkSetOp("difference", [(0, 2, 4)], [(1,2,4)])
def test21_Difference_04_marked_same_key(self):
self.checkSetOp("difference", [(0, 2, 4)], [(0,2,4)])
def test21_Difference_05_marked_unmarked(self):
self.checkSetOp("difference", [(0, 2, 4)], [1])
def test21_Difference_06_marked_unmarked_same_key_01(self):
self.checkSetOp("difference", [(0, 2, 4)], [0])
def test21_Difference_07_marked_unmarked_same_key_02(self):
self.checkSetOp("difference", [0], [(0, 2, 4)])
def test21_Difference_08_many_ranges(self):
from test_markers import miSet_06_large
mi1, mi2 = miSet_06_large(2)
h1 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h1, mi1)
h2 = makeHashKey(1)
ibd.H_GiveMarkerInfo(h2, mi2)
self.checkSetOp("difference", [h1], [h2])
def test21_Difference_09_many_ranges_same_key(self):
from test_markers import miSet_06_large
mi1, mi2 = miSet_06_large(2)
h1 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h1, mi1)
h2 = makeHashKey(0)
ibd.H_GiveMarkerInfo(h2, mi2)
self.checkSetOp("difference", [h1], [h2])
def test22_Difference_Random_Consistency_01_small_same_keysets(self):
self.setConsistencyTest("difference", (-20,20), 5, 5, 0)
def test22_Difference_Random_Consistency_01_small_different_keysets(self):
self.setConsistencyTest("difference", (-20,20), 5, 5, 5)
def test22_Difference_Random_Consistency_02_medium_sparse_same_keysets(self):
self.setConsistencyTest("difference", (-20,20), 10, 1, 0)
def test22_Difference_Random_Consistency_02_medium_sparse_different_keysets(self):
self.setConsistencyTest("difference", (-20,20), 10, 1, 10)
def test22_Difference_Random_Consistency_02_medium_sparse_overlapping_keysets(self):
self.setConsistencyTest("difference", (-20,20), 10, 1, 5)
def test22_Difference_Random_Consistency_03_large_contiguous_same_keysets(self):
self.setConsistencyTest("difference", (-100,100), 100, 1, 0)
def test22_Difference_Random_Consistency_03_large_contiguous_different_keysets(self):
self.setConsistencyTest("difference", (-100,100), 100, 1, 100)
def test22_Difference_Random_Consistency_03_large_contiguous_overlappy_keysets(self):
self.setConsistencyTest("difference", (-100,100), 100, 1, 5)
def test22_Difference_Random_Consistency_03_large_same_keysets(self):
self.setConsistencyTest("difference", (-100,100), 100, 10, 0)
def test22_Difference_Random_Consistency_03_large_different_keysets(self):
self.setConsistencyTest("difference", (-100,100), 100, 10, 100)
def test22_Difference_Random_Consistency_03_large_overlapping_keysets(self):
self.setConsistencyTest("difference", (-100,100), 100, 10, 50)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Cloudera, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cloudera.cloud.plugins.module_utils.cdp_common import CdpModule
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: datalake_runtime_info
short_description: Gather information about CDP Datalake Runtimes
description:
- Gather information about CDP Datalake Runtimes
author:
- "Webster Mudge (@wmudge)"
requirements:
- cdpy
options:
default:
description:
- Flag to return only the C(default) Runtime.
- Otherwise, all available Runtimes will be listed.
type: bool
required: False
default: False
extends_documentation_fragment:
- cloudera.cloud.cdp_sdk_options
- cloudera.cloud.cdp_auth_options
'''
EXAMPLES = r'''
# Note: These examples do not set authentication details.
# List basic information about available Datalake Runtimes
- cloudera.cloud.datalake_runtime_info:
# List basic information about the default Datalake Runtime
- cloudera.cloud.datalake_runtime_info:
default: yes
'''
RETURN = r'''
---
versions:
description: Details on available CDP Datalake Runtimes
type: list
returned: on success
elements: dict
contains:
runtimeVersion:
description: The version number of the Runtime.
returned: always
type: str
sample: "7.2.6"
defaultRuntimeVersion:
description: Flag designating default status.
returned: always
type: bool
sdk_out:
description: Returns the captured CDP SDK log.
returned: when supported
type: str
sdk_out_lines:
description: Returns a list of each line of the captured CDP SDK log.
returned: when supported
type: list
elements: str
'''
class DatalakeRuntimeInfo(CdpModule):
def __init__(self, module):
super(DatalakeRuntimeInfo, self).__init__(module)
# Set variables
self.default = self._get_param('default')
# Initialize return values
self.versions = []
# Execute logic process
self.process()
@CdpModule._Decorators.process_debug
def process(self):
retrieved_versions = self.cdpy.sdk.call(
svc='datalake', func='list_runtimes', ret_field='versions'
)
if self.default:
self.versions = list(filter(lambda r: r['defaultRuntimeVersion'], retrieved_versions))
else:
self.versions = retrieved_versions
def main():
module = AnsibleModule(
argument_spec=CdpModule.argument_spec(
default=dict(required=False, type='bool', default=False),
),
supports_check_mode=True
)
result = DatalakeRuntimeInfo(module)
output = dict(changed=False, versions=result.versions)
if result.debug:
output.update(sdk_out=result.log_out, sdk_out_lines=result.log_lines)
module.exit_json(**output)
if __name__ == '__main__':
main()
|
import math
from functools import reduce
import operator
class Solution:
'''Runtime: 28 ms, faster than 60.30% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.
Memory Usage: 12.9 MB, less than 100.00% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.'''
def subtractProductAndSum(self, n: int) -> int:
prod = 1
sum = 0
while n :
ret = n % 10
prod *= ret
sum += ret
n //= 10 #TODO这里不能是n /= 10, 否则n就会变成小数,然后while n 永远没有尽头。
#print(n)
return prod - sum
'''Runtime: 16 ms, faster than 99.49% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.
Memory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.'''
def subtractProductAndSum2(self, n: int) -> int:
prod = 1
sum = 0
while n :
ret = n % 10
prod *= ret
sum += ret
n = math.floor(n / 10) #TODO这里不能是n /= 10, 否则n就会变成小数,然后while n 永远没有尽头。
#print(n)
return prod - sum
'''Runtime: 24 ms, faster than 85.21% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.
Memory Usage: 12.7 MB, less than 100.00% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.'''
def subtractProductAndSum3(self, n: int) -> int:
s = str(n).replace('','*')
s = s[1:-1]
t = str(n).replace('','+')
t = t[1:-1]
prod = eval(s)
sum = eval(t)
return prod - sum
'''
#return eval(str(n).replace('', '*')[1:-1])-eval(str(n).replace('', '+')[1:-1])
# say,
n = 234
n = str(n).replace('', '*') # --- > n = "*2*3*4*"
n = n[1:-1] # --- > n = "2*3*4"
n = eval(n) # --- > n = 2*3*4
'''
'''Runtime: 16 ms, faster than 99.49% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.
Memory Usage: 12.9 MB, less than 100.00% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.'''
def subtractProductAndSum4(self, n: int) -> int:
A = list(map(int, str(n)))#int('1')
return reduce(lambda x, y : x * y , A) - reduce(lambda x, y : x + y , A)
'''Runtime: 24 ms, faster than 85.21% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.
Memory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Subtract the Product and Sum of Digits of an Integer.'''
def subtractProductAndSum5(self, n: int) -> int:
A = list(map(int, str(n)))
return reduce(operator.mul , A) - reduce(operator.add , A)
if __name__ == "__main__":
n1 = 234
n2 = 4421
print("n1, #1:", Solution().subtractProductAndSum(n1))
print("n2, #1:", Solution().subtractProductAndSum(n2))
print("----------")
print("n1, #2:", Solution().subtractProductAndSum2(n1))
print("n2, #2:", Solution().subtractProductAndSum2(n2))
print("----------")
print("n1, #3:", Solution().subtractProductAndSum3(n1))
print("n2, #3:", Solution().subtractProductAndSum3(n2))
print("----------")
print("n1, #4:", Solution().subtractProductAndSum4(n1))
print("n2, #4:", Solution().subtractProductAndSum4(n2))
print("----------")
print("n1, #5:", Solution().subtractProductAndSum5(n1))
print("n2, #5:", Solution().subtractProductAndSum5(n2)) |
#!/usr/bin/python
import os
import sys
from pyquery import PyQuery
data_dir = sys.argv[1]
if data_dir == '--help':
print 'Useage: python parse_html.py <data_dir>'
exit(0)
for data_file in os.listdir(data_dir):
with open (data_dir + '/' + data_file, 'r') as myfile:
html=myfile.read().replace('\n', '')
pq = PyQuery(html)
row = []
row.append(pq('ul.current a span.org.summary').text())
row.append(pq('span.locality').text())
row.append(pq('dd.industry').text())
print row
|
import math
class Solution:
def mySqrt(self, x: int) -> int:
left, right = 0, x
while left <= right:
mid = math.floor(left + (right - left) / 2)
sqt = mid * mid
if sqt > x:
right = mid - 1
elif sqt < x:
left = mid + 1
else:
return int(mid)
return int(right)
if __name__ == "__main__":
s = Solution()
k = 8
res = s.mySqrt(k)
print(res)
|
from django import forms
from .models import Categoria
class CategoriaForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for form in self.visible_fields():
form.field.widget.attrs['style'] = 'font-size: 14px'
form.field.widget.attrs['class'] = 'form-control'
# self.fields['descripcion'].widget.attrs['class'] = 'text-input'
self.fields['estado'].widget.attrs['type'] = 'checkbox'
self.fields['descripcion'].widget.attrs['placeholder'] = 'Categoría'
class Meta:
model = Categoria
fields = ['descripcion', 'estado']
labels = {
'descripcion': 'Descripcion de la Categoría',
'estado': 'Estado'
}
|
import numpy as np
import os
import torch
import time
import logging
import torch.nn as nn
from torch import optim
from lib.data_process.loader import MyDataset
from torch.utils.data import DataLoader
from lib.model.factory import model_factory
from lib.loss import FocalLoss, BceLoss, ResidualLoss
from lib.config import train_cfg
def adjust_learning_rate(base, down_step, down_ratio, optimizer, epoch, logger):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
for i, k in enumerate(down_step):
if k == epoch:
step = i + 1
lr = base * (down_ratio ** step)
logger.info("change lr to : {}".format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def log_cfg(logger, cfg):
logger.info("NO checkpoint loaded in advance")
logger.info("model: {}, epoch: {}, lr: {}".format(cfg['basic']['model'], 'from scratch', cfg['net_cfg']['lr']))
logger.info("dir_img : {} \nmask_dir: {} \ncheckpoint_dir: {} \n".format(
cfg['data_cfg']['img_path'],
cfg['data_cfg']['gt_path'],
cfg['basic']['checkpoint_dir']
))
# log train settings
logger.info("epochs: {} \nbatch_size : {} \ndown_step: {} \ndown_ratio : {} \nsave_step: {}".format(
cfg['net_cfg']['epochs'],
cfg['net_cfg']['batch_size'],
str(cfg['net_cfg']['down_step']),
cfg['net_cfg']['down_ratio'],
cfg['net_cfg']['save_step']
))
# log train data info
logger.info("\nimg_path:{}\n".format(cfg['data_cfg']['img_path']))
# log noise info
logger.info("noise_type: {} \nnoise_action: {}\nSNR: {}\n".format(
cfg['data_cfg']['noise_type'],
cfg['data_cfg']['noise_action'],
str(cfg['data_cfg']['SNR'])
))
def train_net(cfg):
net = model_factory[cfg['basic']['model']](1, 1)
save_name = '_'.join([
time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()),
cfg['data_cfg']['img_path'].split(os.sep)[-2],
'SNR', str(cfg['data_cfg']['SNR']),
'ntype', str(cfg['data_cfg']['noise_type']),
'simuTag', str(cfg['data_cfg']['simulate_tag'])
])
checkpoint_path = os.path.join(cfg['basic']['checkpoint_dir'], cfg['basic']['checkpoint'])
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
if not cfg['basic']['create_new_log']:
handler = logging.FileHandler(checkpoint_path.replace('pth', 'log').replace('narrow_elev_checkpoints', 'logger'))
else:
handler = logging.FileHandler(os.path.join(cfg['basic']['logger_dir'], save_name + '.log'))
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
optimizer = optim.SGD(net.parameters(), lr=cfg['net_cfg']['lr'], momentum=0.9, weight_decay=0.0005)
start_epoch = 0
if '.pth' in cfg['basic']['checkpoint']:
net.load_state_dict(torch.load(checkpoint_path)['net'])
optimizer.load_state_dict(torch.load(checkpoint_path)['optimizer'])
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
logger.info("checkpoint: {}".format(cfg['basic']['checkpoint']))
start_epoch = torch.load(checkpoint_path)['epoch']
logger.info("epoch :{}, lr: {}".format(start_epoch, optimizer.state_dict()['param_groups'][0]['lr']))
else:
log_cfg(logger, cfg)
net.to(device)
# criterion = BceLoss()
# criterion = FocalLoss()
criterion = ResidualLoss()
# log criterion name
logger.info("criterion: {}".format(criterion.__class__.__name__))
logger.info("network: {}".format(net.__class__.__name__))
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')
handler.setFormatter(formatter)
for epoch in range(start_epoch + 1, cfg['net_cfg']['epochs']):
running_loss = 0
running_number = 1
print('Starting epoch {}/{}.'.format(epoch, cfg['net_cfg']['epochs']))
net.train()
train_data = MyDataset(
cfg['data_cfg']['img_path'],
cfg['data_cfg']['gt_path'],
cfg['data_cfg'])
train_dataloader = DataLoader(train_data, batch_size=cfg['net_cfg']['batch_size'], shuffle=True, num_workers=3)
adjust_learning_rate(
cfg['net_cfg']['lr'],
cfg['net_cfg']['down_step'],
cfg['net_cfg']['down_ratio'],
optimizer, epoch, logger)
for images, masks in train_dataloader:
images = images.to(device)
masks = masks.to(device)
preds = net(images)
loss = criterion(preds, masks)
optimizer.zero_grad()
loss.backward(torch.ones_like(loss))
optimizer.step()
running_loss = running_loss + loss.detach().cpu().numpy().mean()
logger.info("epoch: {} | iter: {} | loss: {}".format(epoch, running_number, running_loss / running_number))
running_number += 1
if epoch % cfg['net_cfg']['save_step'] == 1:
torch.save({
'net': net.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch
}, os.path.join(cfg['basic']['checkpoint_dir'], save_name + '.pth'))
logger.info("save model: {}".format(os.path.join(cfg['basic']['checkpoint_dir'], save_name + '.pth')))
logger.removeHandler(handler)
logger.removeHandler(console)
if __name__ == '__main__':
data_cfg_sets = [
dict(ntype=None, snr=np.inf),
dict(ntype='Rayleigh', snr=10),
dict(ntype='Rayleigh', snr=5),
]
for data_cfg in data_cfg_sets:
train_cfg['data_cfg']['SNR'] = data_cfg['snr']
train_cfg['data_cfg']['noise_type'] = data_cfg['ntype']
train_net(train_cfg)
|
# from pypi
import numpy
import matplotlib.pyplot as pyplot
import seaborn
from sklearn.metrics import log_loss
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.datasets import make_hastie_10_2
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def sigmoid(x):
return 1 / (1 + numpy.exp(-x))
X_all = numpy.random.randn(5000, 1)
y_all = (X_all[:, 0] > 0) * 2 - 1
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all,
test_size=0.5,
random_state=42)
model = DecisionTreeClassifier(max_depth=1)
model.fit(X_train, y_train)
print('Accuracy for a single decision stump: {}'.format(model.score(X_test, y_test)))
gbc_model = GradientBoostingClassifier(n_estimators=5000, learning_rate=0.01,
max_depth=3,
random_state=0)
gbc_model.fit(X_train, y_train)
print('Accuracy for Gradient Booing: {}'.format(gbc_model.score(X_test, y_test)))
y_pred = gbc_model.predict_proba(X_test)[:, 1]
print("Test logloss: {}".format(log_loss(y_test, y_pred)))
def compute_loss(y_true, predicted_probabilities):
"""applies sigmoid to predictions before calling log_loss
Args:
y_true: the actual classifications
predicted_probabilities: probabilities that class was 1
"""
return log_loss(y_true, sigmoid(predicted_probabilities))
def print_loss(cumulative_predictions, y_test):
"""prints the log-loss for the predictions
Args:
cumulative_predictions (numpy.Array): The cumulative predictions for the model
"""
print(" - Logloss using all trees: {}".format(
compute_loss(y_test, cumulative_predictions[-1, :])))
print(" - Logloss using all trees but last: {}".format(
compute_loss(y_test, cumulative_predictions[-2, :])))
print(" - Logloss using all trees but first: {}".format(
compute_loss(y_test, cumulative_predictions[-1, :] - cumulative_predictions[0, :])))
return
gbc_cumulative_predictions = numpy.array(
[x for x in gbc_model.staged_decision_function(X_test)])[:, :, 0]
print_loss(gbc_cumulative_predictions, y_test)
def plot_predictions(cumulative_predictions, identifier):
"""plots the cumulative predictions
Args:
identifier (str): something to identify the model
cumulative_predictions: predictions from trees
"""
figure = pyplot.figure(figsize=(10, 6))
axe = figure.gca()
axe.plot(cumulative_predictions[:, y_test == 1][:, 0])
axe.set_title("({}) Score vs Trees".format(identifier))
axe.set_xlabel('Number of Trees')
label = axe.set_ylabel('Cumulative Decision Score')
return
plot_predictions(gbc_cumulative_predictions, "eta=0.01")
big_eta = GradientBoostingClassifier(n_estimators=5000, learning_rate=8,
max_depth=3, random_state=0)
big_eta.fit(X_train, y_train)
y_pred = big_eta.predict_proba(X_test)[:, 1]
print("Test logloss: {}".format(log_loss(y_test, y_pred)))
print('Accuracy for Big Eta: {}'.format(big_eta.score(X_test, y_test)))
big_eta_cumulative_predictions = numpy.array(
[x for x in big_eta.staged_decision_function(X_test)])[:, :, 0]
print_loss(big_eta_cumulative_predictions, y_test)
plot_predictions(big_eta_cumulative_predictions, "eta=8")
X_hastie, y_hastie = make_hastie_10_2(random_state=0)
X_train_hastie, X_test_hastie, y_train_hastie, y_test_hastie = train_test_split(
X_hastie,
y_hastie,
test_size=0.5,
random_state=42)
stump = DecisionTreeClassifier(max_depth=1)
stump.fit(X_train_hastie, y_train_hastie)
print('Accuracy for a single decision stump: {}'.format(
stump.score(X_test_hastie, y_test_hastie)))
tree = DecisionTreeClassifier()
tree.fit(X_train_hastie, y_train_hastie)
print('Accuracy for the Decision Tree: {}'.format(
tree.score(X_test_hastie, y_test_hastie)))
gbc2_model = GradientBoostingClassifier(n_estimators=5000, learning_rate=0.01,
max_depth=3,
random_state=0)
gbc2_model.fit(X_train_hastie, y_train_hastie)
y_pred = gbc2_model.predict_proba(X_test_hastie)[:, 1]
print('Accuracy for Gradient Boosting: {}'.format(
gbc2_model.score(X_test_hastie, y_test_hastie)))
gbc2_cumulative_predictions = numpy.array(
[x for x in gbc2_model.staged_decision_function(X_test_hastie)])[:, :, 0]
print_loss(gbc2_cumulative_predictions, y_test_hastie)
xg_model = XGBClassifier(n_estimators=5000, learning_rate=0.01)
#print(xg_model)
xg_model.fit(X_train_hastie, y_train_hastie)
print('Accuracy for XGBoost: {}'.format(xg_model.score(X_test_hastie, y_test_hastie)))
y_pred = xg_model.predict_proba(X_test_hastie)[:, 1]
# Attetion: This will not work
xg_cumulative_predictions = numpy.array(
[x for x in xg_model.staged_decision_function(X_test_hastie)])[:, :, 0]
print_loss(xg_cumulative_loss)
|
from PIL import Image
import numpy as np
def imageResize(infile, outfile, size):
"""
:param infile: input img
:param outfile: output img
:param size: max size for the img
:return: return the resized im
"""
try:
im = Image.open(infile)
im.thumbnail(size,Image.ANTIALIAS)
im.save(outfile, "JPEG")
except IOError:
print "cannot create thumbnail for '%s'" % infile
return im
def convertToArray(infile):
"""
Reference: http://stackoverflow.com/questions/25102461/python-rgb-matrix-of-an-image
:param infile: input img
:return: an w*h*3 np array storing the img info
"""
img = Image.open(infile)
arr = np.array(img) # w * h *3 array
return arr
if __name__ == '__main__':
# imageResize('img/starry-night.jpg', 'out.jpg', (128, 128))
arr = convertToArray('out.jpg') |
from flask import request
from apps.flow.business.deploy import DeployBusiness, DeployRecordBusiness, DeployLogBusiness
from apps.flow.extentions import validation, parse_json_form, parse_list_args2
from library.api.render import json_detail_render, json_list_render2
from library.api.tBlueprint import tblueprint
bpname = 'deploy'
bpname_relate = 'flow'
view_permission = f'{bpname_relate}_view'
modify_permission = f'{bpname_relate}_modify'
deploy = tblueprint(bpname, __name__, bpname=bpname_relate)
@deploy.route('/getserver', methods=['GET'])
def deploy_get_server_handler():
"""
@api {get} /v1/deploy/getserver 获取服务列表
@apiName deployGetServer
@apiGroup Deploy
@apiDescription 获取服务列表
@apiParam {int} project_id 项目id
@apiParamExample {json} Request-Example:
{
"project_id": 1,
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"default_branch": "master",
"git_origin": "ssh://git@git.innotechx.com:7999/x/pt-cluster.git",
"id": 369,
"is_increment": 0,
"is_short": false,
"name": "ptgate-sx",
"node_list": [
{
"id": 293,
"name": "sx-79",
"private_id": "192.168.18.79",
"version": "0.0.35"
},
{
"id": 269,
"name": "sx-64",
"private_id": "192.168.18.64",
"version": "0.0.12"
}
],
"project_id": 27,
"project_name": "萌推开发测试组"
},
{
"default_branch": "develop",
"git_origin": "ssh://git@git.innotechx.com:7999/x/pt-promote.git",
"id": 542,
"is_increment": 0,
"is_short": false,
"name": "pt-sp-sx",
"node_list": [
{
"id": 269,
"name": "sx-64",
"private_id": "192.168.18.64",
"version": "0.0.12"
},
{
"id": 293,
"name": "sx-79",
"private_id": "192.168.18.79",
"version": "0.0.35"
}
],
"project_id": 27,
"project_name": "萌推开发测试组"
}
],
"message": "查询成功"
}
"""
code, data, message = DeployBusiness.get_server()
return json_detail_render(code, data, message)
@deploy.route('/getnode', methods=['GET'])
def deploy_get_node_handler():
"""
@api {get} /v1/deploy/getnode 获取节点列表
@apiName deployGetNode
@apiGroup Deploy
@apiDescription 获取节点列表
@apiParam {int} project_id 项目id
@apiParamExample {json} Request-Example:
{
"project_id": 1,
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 101,
"data": [],
"message": "can not find object"
}
"""
code, data, message = DeployBusiness.get_node()
return json_detail_render(code, data, message)
@deploy.route('/', methods=['POST'])
@validation('POST:deploy_create')
def deploy_create_handler():
"""
@api {post} /v1/deploy 创建数据
@apiName deployCreate
@apiGroup Deploy
@apiDescription 创建数据
@apiParam {int} project_id 项目id
@apiParam {list} server_list 服务列表
@apiParam {list} node_list 节点列表
@apiParam {string} branch 分支
@apiParam {int} flow_id 流程id
@apiParamExample {json} Request-Example:
{
"project_id": 1,
"server_list": [],
"node_list": [],
"branch": "",
"flow_id": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 101,
"data": [
{
deploy_id: None
}
],
"message": "can not find object"
}
"""
project_id, server_list, node_list, branch, flow_id = parse_json_form('deploy_create')
code, deploy_id = DeployBusiness.create(project_id, server_list, node_list, branch, flow_id)
if code == 101:
return json_detail_render(code, [{'deploy_id': deploy_id}], '此服务正在部署请稍后')
return json_detail_render(code, [{'deploy_id': deploy_id}])
@deploy.route('/update/result', methods=['POST'])
@validation('POST:deploy_result')
def update_deploy_result():
"""
@api {get} /v1/deploy/update/result 更新部署结果
@apiName deployUpdateResult
@apiGroup Deploy
@apiDescription 更新部署结果
@apiParam {int} deploy_id 部署id
@apiParam {list} result 部署结果
@apiParamExample {json} Request-Example:
{
"deploy_id": 1,
result: []
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [],
"message": "ok"
}
"""
deploy_id, result = parse_json_form('deploy_result')
code = DeployRecordBusiness.modify_result(result, deploy_id)
# DeployRecordBusiness.run_automan(deploy_id)
# 写入部署日志,等结果部署完成之后再写入结果
single_data = DeployRecordBusiness.not_init_data(deploy_id)
if len(single_data) == 0:
delopy_data = DeployRecordBusiness.query_record_deploy(deploy_id)
if len(delopy_data) > 0:
DeployLogBusiness.deploy_data(delopy_data, deploy_id)
# DeployRecordBusiness.run_automan(deploy_id)
# 写入自动化日志
# DeployLogBusiness.automan_data(auto_man_data[1],deploy_id)
return json_detail_render(code, [], 'ok')
@deploy.route('/new_data', methods=['GET'])
def gain_deploy_data():
"""
@api {get} /v1/deploy/new_data 获取当前deploy_id 的信息
@apiName deployNew_data
@apiGroup Deploy
@apiDescription 获取当前deploy_id 的信息
@apiParam {int} project_id 项目id
@apiParam {int} flow_id 流程id
@apiParam {int} deploy_id 部署id
@apiParamExample {json} Request-Example:
{
"project_id": 45,
"flow_id": 1,
"deploy_id": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"branch": "develop",
"deploy_id": 160,
"flow_id": 232,
"id": 179,
"node_id": 31,
"node_name": "yn-244",
"project_id": 4,
"result": 1,
"server_id": 45,
"server_name": "submarine-test",
"status": 0,
"version": "1.1.75"
}
],
"message": "成功"
}
"""
data = DeployRecordBusiness.query_deploy_id_json()
combine_data = {'is_one_Key': 1, 'data': data}
single_data = DeployRecordBusiness.is_one_key()
if len(single_data) == 0:
combine_data['is_one_Key'] = 0
return json_detail_render(0, combine_data)
@deploy.route('/history_data', methods=['GET'])
def gain_old_data():
"""
@api {get} /v1/deploy/history_data 获取历史部署记录
@apiName deployHistory_data
@apiGroup Deploy
@apiDescription 获取历史部署记录
@apiParam {int} server_id 服务id
@apiParamExample {json} Request-Example:
{
"project_id": 4,
"flow_id": 232,
"page_size": 10,
"page_index": 1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"branch": "develop",
"deploy_id": 160,
"flow_id": 232,
"id": 179,
"node_id": 31,
"node_name": "yn-244",
"project_id": 4,
"result": 1,
"server_id": 45,
"server_name": "submarine-test",
"status": 0,
"version": "1.1.75"
},
{
"branch": "develop",
"deploy_id": 159,
"flow_id": 232,
"id": 178,
"node_id": 31,
"node_name": "yn-244",
"project_id": 4,
"result": 4,
"server_id": 45,
"server_name": "submarine-test",
"status": 0,
"version": "1.1.74"
}
],
"message": "ok",
"page_index": 1,
"page_size": 10,
"total": 2
}
"""
page_size, page_index = parse_list_args2()
project_id = request.args.get('project_id')
flow_id = request.args.get('flow_id')
data = DeployRecordBusiness.query_all_json(page_size, page_index)
count = DeployRecordBusiness.query_all_count(project_id, flow_id)
return json_list_render2(0, data, page_size, page_index, count)
@deploy.route('/check_log', methods=['GET'])
def check_log_data():
"""
@api {get} /v1/deploy/check_log 查看历史日志
@apiName deployCheck_log
@apiGroup Deploy
@apiDescription 查看历史日志
@apiParam {int} record_id 记录id
@apiParamExample {json} Request-Example:
{
"record_id": 179
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": {
"command": {
"status": 0,
"stderr": "",
"stdout": ""
},
"compile": {
"status": 0,
"stderr": "",
"stdout": ""
},
"deploy": {
"status": 0,
"stderr": "",
"stdout": ""
},
"refresh": {
"status": 0,
"stderr": "",
"stdout": ""
},
"restart": {
"status": 0,
"stderr": "",
"stdout": ""
}
},
"message": "成功"
}
"""
code, data, message = DeployRecordBusiness.check_log_data()
return json_detail_render(code, data, message)
@deploy.route('/branch', methods=['GET'])
def update_branch():
"""
@api {get} /v1/deploy/branch 获取分支信息
@apiName deployBranch
@apiGroup Deploy
@apiDescription 获取分支信息
@apiParam {int} server_id 服务id
@apiParamExample {json} Request-Example:
{
"server_id": 45
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
"20190604",
"master",
"develop",
"release/inno",
"dev/push",
"release/cpc"
],
"message": "成功"
}
"""
code, data, message = DeployBusiness.get_branch()
return json_detail_render(code, data, message)
|
import tensorflow.keras
import numpy as np
from keras_preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Dense, Dropout, Conv1D, Activation, Flatten
from tensorflow.keras.models import Sequential
from src.sound.SoundTransformer import SoundTransformer
from src.classifiers.KerasClassifier import KerasClassifier
class DigitClassifier(KerasClassifier):
def __init__(self, file_path=None):
super().__init__(file_path)
def predict(self, x):
x = np.expand_dims(np.array([x]), axis=2)
return self._model.predict(x)
def build(self):
model = Sequential()
model.add(Conv1D(128, 5, padding='same',
input_shape=(128, 1)))
model.add(Activation('relu'))
model.add(Conv1D(128, 5, padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self._model = model
def transform(self, x, samplerate):
to_process = SoundTransformer.mfcc(x, samplerate )
to_process = np.mean(to_process, axis=1)
to_process = pad_sequences([to_process], maxlen=128, padding='post')[0]
# to_process = np.expand_dims(to_process, axis=2)
return to_process
|
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Cliente, Proyecto
class ClienteSerializer(serializers.ModelSerializer):
class Meta:
model = Cliente
fields = ('idCliente', 'nombre', 'rubro', 'direccion', 'contacto')
class ProyectoSerializer(serializers.ModelSerializer):
class Meta:
model = Proyecto
fields = ('nombre', 'tipo', 'cliente', 'encargado')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email') |
import pstats
import cProfile
import numpy as np
import data.ncExtract as NCE
from data.timeArray import *
import unittest
# path='/home/tuomas/workspace/cmop/projects/turb_tests/cre_open_channel/real_bath/fluxTest/combined'
path = '/home/workspace/ccalmr53/karnat/projects/turb_tests/fluxtest/fluxTest/combined/'
# all good
#staX = np.array([10.0,4600.0])
#staY = np.array([70.0,60.0])
#staZ = np.array([-5.2,-3.5])
#staT = np.array([300.0,92600.0])+1333700100.0
#staNames = ['foo','bar']
# one bad
#staX = np.array([10.0,4600.0,5000.0])
#staY = np.array([70.0,60.0,200.0])
#staZ = np.array([-5.2,-3.5,-2.5])
#staT = np.array([300.0,92600.0,100000.0])+1333700100.0
#staNames = ['foo','bar','bad']
# one bad, two near each other
staX = np.array([33.4, 4600.0, 4600.0, 5000.0])
staY = np.array([50.0, 60.0, 60.5, 200.0])
staZ = np.array([-5.2, -3.5, -3.5, -2.5])
staT = np.array([300.0, 92600.0, 92601.0, 100000.0]) + 1333700100.0
staNames = ['foo', 'bar', 'bar2', 'bad']
nGoodSta = 3
st = datetime.datetime(2012, 4, 5)
et = datetime.datetime(2012, 4, 9)
verbose = False
class TestTimeSeriesExtraction(unittest.TestCase):
def testElev(self):
se = NCE.selfeExtract(path, var='elev', verbose=verbose)
dcs = se.extractTimeSeries(
st, et, 'elev', staX, staY, staNames, staZ=staZ)
self.assertEqual(len(dcs), nGoodSta)
def testSalt(self):
se = NCE.selfeExtract(path, var='salt', verbose=verbose)
dcs = se.extractTimeSeries(
st, et, 'salt', staX, staY, staNames, staZ=staZ)
self.assertEqual(len(dcs), nGoodSta)
def testHvel(self):
se = NCE.selfeExtract(path, var='hvel', verbose=verbose)
dcs = se.extractTimeSeries(
st, et, 'hvel', staX, staY, staNames, staZ=staZ)
self.assertEqual(len(dcs), nGoodSta)
def testSalt70(self):
se = NCE.selfeExtract(
path,
var='salt',
verbose=verbose,
fileTypeStr='70')
dcs = se.extractTimeSeries(
st, et, 'salt', staX, staY, staNames, staZ=staZ)
self.assertEqual(len(dcs), nGoodSta)
def testHvel67(self):
se = NCE.selfeExtract(
path,
var='hvel',
verbose=verbose,
fileTypeStr='67')
dcs = se.extractTimeSeries(
st, et, 'hvel', staX, staY, staNames, staZ=staZ)
self.assertEqual(len(dcs), nGoodSta)
class TestProfileExtraction(unittest.TestCase):
def testElevFail(self):
se = NCE.selfeExtract(path, var='elev', verbose=verbose)
with self.assertRaises(Exception) as cm:
dcs = se.extractVerticalProfile(
st, et, 'elev', staX, staY, staNames)
targetError = 'This is a 2D variable, cannot extract vertical profile: elev'
self.assertEqual(cm.exception.message, targetError,
'Got wrong error message: ' + cm.exception.message)
def testSalt(self):
se = NCE.selfeExtract(path, var='salt', verbose=verbose)
dcs = se.extractVerticalProfile(st, et, 'salt', staX, staY, staNames)
self.assertEqual(len(dcs), nGoodSta)
def testHvel(self):
se = NCE.selfeExtract(path, var='hvel', verbose=verbose)
dcs = se.extractVerticalProfile(st, et, 'hvel', staX, staY, staNames)
self.assertEqual(len(dcs), nGoodSta)
def testSalt70(self):
se = NCE.selfeExtract(
path,
var='salt',
verbose=verbose,
fileTypeStr='70')
dcs = se.extractVerticalProfile(st, et, 'salt', staX, staY, staNames)
self.assertEqual(len(dcs), nGoodSta)
def testHvel67(self):
se = NCE.selfeExtract(
path,
var='hvel',
verbose=verbose,
fileTypeStr='67')
dcs = se.extractVerticalProfile(st, et, 'hvel', staX, staY, staNames)
self.assertEqual(len(dcs), nGoodSta)
class TestTransectExtraction(unittest.TestCase):
def testElev(self):
se = NCE.selfeExtract(path, var='elev', verbose=verbose)
dc = se.extractTransect(st, et, 'elev', staX, staY, 'trans')
self.assertEqual(dc.getMetaData('variable'), 'elev')
def testSalt(self):
se = NCE.selfeExtract(path, var='salt', verbose=verbose)
dc = se.extractTransect(st, et, 'salt', staX, staY, 'trans')
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvel(self):
se = NCE.selfeExtract(path, var='hvel', verbose=verbose)
dc = se.extractTransect(st, et, 'hvel', staX, staY, 'trans')
self.assertEqual(dc.getMetaData('variable'), 'hvel')
def testSalt70(self):
se = NCE.selfeExtract(
path,
var='salt',
verbose=verbose,
fileTypeStr='70')
dc = se.extractTransect(st, et, 'salt', staX, staY, 'trans')
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvel70(self):
se = NCE.selfeExtract(
path,
var='hvel',
verbose=verbose,
fileTypeStr='67')
dc = se.extractTransect(st, et, 'hvel', staX, staY, 'trans')
self.assertEqual(dc.getMetaData('variable'), 'hvel')
class TestTrackExtraction(unittest.TestCase):
def testElev(self):
se = NCE.selfeExtract(path, var='elev', verbose=verbose)
dc = se.extractTrack('elev', staX, staY, staZ, staT, 'foo')
self.assertEqual(dc.getMetaData('variable'), 'elev')
def testSalt(self):
se = NCE.selfeExtract(path, var='salt', verbose=verbose)
dc = se.extractTrack('salt', staX, staY, staZ, staT, 'foo')
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvel(self):
se = NCE.selfeExtract(path, var='hvel', verbose=verbose)
dc = se.extractTrack('hvel', staX, staY, staZ, staT, 'foo')
self.assertEqual(dc.getMetaData('variable'), 'hvel')
def testSalt70(self):
se = NCE.selfeExtract(
path,
var='salt',
verbose=verbose,
fileTypeStr='70')
dc = se.extractTrack('salt', staX, staY, staZ, staT, 'foo')
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvel67(self):
se = NCE.selfeExtract(
path,
var='hvel',
verbose=verbose,
fileTypeStr='67')
dc = se.extractTrack('hvel', staX, staY, staZ, staT, 'foo')
self.assertEqual(dc.getMetaData('variable'), 'hvel')
class TestSlabExtraction(unittest.TestCase):
def testElevK(self):
se = NCE.selfeExtract(path, var='elev', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'elev', k=1)
self.assertEqual(dc.getMetaData('variable'), 'elev')
def testSaltK(self):
se = NCE.selfeExtract(path, var='salt', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'salt', k=1)
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvelK(self):
se = NCE.selfeExtract(path, var='hvel', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'hvel', k=1)
self.assertEqual(dc.getMetaData('variable'), 'hvel')
def testSaltK70(self):
se = NCE.selfeExtract(
path,
var='salt',
verbose=verbose,
fileTypeStr='70')
dc = se.extractSlab(st, et, 'slab', 'salt', k=1)
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvelK67(self):
se = NCE.selfeExtract(
path,
var='hvel',
verbose=verbose,
fileTypeStr='67')
dc = se.extractSlab(st, et, 'slab', 'hvel', k=1)
self.assertEqual(dc.getMetaData('variable'), 'hvel')
def testElevKneg(self):
se = NCE.selfeExtract(path, var='elev', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'elev', k=-1)
self.assertEqual(dc.getMetaData('variable'), 'elev')
def testSaltKneg(self):
se = NCE.selfeExtract(path, var='salt', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'salt', k=-1)
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvelKneg(self):
se = NCE.selfeExtract(path, var='hvel', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'hvel', z=-1)
self.assertEqual(dc.getMetaData('variable'), 'hvel')
def testSaltKneg70(self):
se = NCE.selfeExtract(
path,
var='salt',
verbose=verbose,
fileTypeStr='70')
dc = se.extractSlab(st, et, 'slab', 'salt', k=-1)
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvelKneg67(self):
se = NCE.selfeExtract(
path,
var='hvel',
verbose=verbose,
fileTypeStr='67')
dc = se.extractSlab(st, et, 'slab', 'hvel', z=-1)
self.assertEqual(dc.getMetaData('variable'), 'hvel')
def testElevZ(self):
se = NCE.selfeExtract(path, var='elev', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'elev', z=-2.5)
self.assertEqual(dc.getMetaData('variable'), 'elev')
def testSaltZ(self):
se = NCE.selfeExtract(path, var='salt', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'salt', z=-2.5)
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvelZ(self):
se = NCE.selfeExtract(path, var='hvel', verbose=verbose)
dc = se.extractSlab(st, et, 'slab', 'hvel', z=-2.5)
self.assertEqual(dc.getMetaData('variable'), 'hvel')
def testSaltZ70(self):
se = NCE.selfeExtract(
path,
var='salt',
verbose=verbose,
fileTypeStr='70')
dc = se.extractSlab(st, et, 'slab', 'salt', z=-2.5)
self.assertEqual(dc.getMetaData('variable'), 'salt')
def testHvelZ67(self):
se = NCE.selfeExtract(
path,
var='hvel',
verbose=verbose,
fileTypeStr='67')
dc = se.extractSlab(st, et, 'slab', 'hvel', z=-2.5)
self.assertEqual(dc.getMetaData('variable'), 'hvel')
#-------------------------------------------------------------------------
# High-level interface
#-------------------------------------------------------------------------
class TestHLTimeSeriesExtractionXYZ(unittest.TestCase):
def testElevZ(self):
dcs = NCE.extractForXYZ(
path,
'elev',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=False,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testElevZRelSurf(self):
dcs = NCE.extractForXYZ(
path,
'elev',
st,
et,
staX,
staY,
z=-staZ,
stationNames=staNames,
profile=False,
zRelToSurf=True,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testSaltZ(self):
dcs = NCE.extractForXYZ(
path,
'salt',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=False,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testSaltZRelSurf(self):
dcs = NCE.extractForXYZ(
path,
'salt',
st,
et,
staX,
staY,
z=-staZ,
stationNames=staNames,
profile=False,
zRelToSurf=True,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testSalt70Z(self):
dcs = NCE.extractForXYZ(
path,
'salt.70',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=False,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testSalt70ZRelSurf(self):
dcs = NCE.extractForXYZ(
path,
'salt.70',
st,
et,
staX,
staY,
z=-staZ,
stationNames=staNames,
profile=False,
zRelToSurf=True,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testHvelZ(self):
dcs = NCE.extractForXYZ(
path,
'hvel',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=False,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testHvelZRelSurf(self):
dcs = NCE.extractForXYZ(
path,
'hvel',
st,
et,
staX,
staY,
z=-staZ,
stationNames=staNames,
profile=False,
zRelToSurf=True,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testHvelZ(self):
dcs = NCE.extractForXYZ(
path,
'hvel.67',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=False,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testHvelZRelSurf(self):
dcs = NCE.extractForXYZ(
path,
'hvel.67',
st,
et,
staX,
staY,
z=-staZ,
stationNames=staNames,
profile=False,
zRelToSurf=True,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
class TestHLProfileExtractionXYZ(unittest.TestCase):
def testElevFail(self):
dcs = NCE.extractForXYZ(
path,
'elev',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=True,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), 0)
def testSalt(self):
dcs = NCE.extractForXYZ(
path,
'salt',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=True,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testSalt70(self):
dcs = NCE.extractForXYZ(
path,
'salt.70',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=True,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testHvel(self):
dcs = NCE.extractForXYZ(
path,
'hvel',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=True,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
def testHvel67(self):
dcs = NCE.extractForXYZ(
path,
'hvel.67',
st,
et,
staX,
staY,
z=staZ,
stationNames=staNames,
profile=True,
zRelToSurf=False,
verbose=verbose)
self.assertEqual(len(dcs), nGoodSta)
class TestHLTransectExtractionForCoords(unittest.TestCase):
def testMultiple(self):
dcs = NCE.extractTransectForCoords(staX, staY, path, ['salt', 'hvel'],
st, et, 'foo', verbose=verbose)
self.assertEqual(len(dcs), 2)
def testMultipleAltDiscretization(self):
dcs = NCE.extractTransectForCoords(
staX, staY, path, ['salt.70', 'hvel.67'],
st, et, 'foo', verbose=verbose)
self.assertEqual(len(dcs), 2)
class TestHLSlabExtraction(unittest.TestCase):
def testMultipleK(self):
dcs = NCE.extractSlabForLevel(
path, ['elev', 'salt', 'hvel'],
st, et, 'foo', k=1, verbose=True)
self.assertEqual(len(dcs), 3)
def testSaltK(self):
dcs = NCE.extractSlabForLevel(path, ['salt'], st, et, 'foo',
k=1, verbose=True)
self.assertEqual(len(dcs), 1)
def testSaltKAlt(self):
dcs = NCE.extractSlabForLevel(path, ['salt.70'], st, et, 'foo',
k=1, verbose=True)
self.assertEqual(len(dcs), 1)
def testHvelK(self):
dcs = NCE.extractSlabForLevel(path, ['hvel'], st, et, 'foo',
k=1, verbose=True)
self.assertEqual(len(dcs), 1)
def testHvelKAlt(self):
dcs = NCE.extractSlabForLevel(path, ['hvel.67'], st, et, 'foo',
k=1, verbose=True)
self.assertEqual(len(dcs), 1)
def testSaltKneg5(self):
dcs = NCE.extractSlabForLevel(path, ['salt'], st, et, 'foo',
k=-5, verbose=True)
self.assertEqual(len(dcs), 1)
def testSaltKneg5Alt(self):
dcs = NCE.extractSlabForLevel(path, ['salt.70'], st, et, 'foo',
k=-5, verbose=True)
self.assertEqual(len(dcs), 1)
def testHvelKneg5(self):
dcs = NCE.extractSlabForLevel(path, ['hvel'], st, et, 'foo',
k=-5, verbose=True)
self.assertEqual(len(dcs), 1)
def testHvelKneg5Alt(self):
dcs = NCE.extractSlabForLevel(path, ['hvel.67'], st, et, 'foo',
k=-5, verbose=True)
self.assertEqual(len(dcs), 1)
def testMultipleKAlt(self):
dcs = NCE.extractSlabForLevel(
path, ['salt.70', 'hvel.67'],
st, et, 'foo', k=1, verbose=True)
self.assertEqual(len(dcs), 2)
def testMultipleKneg(self):
dcs = NCE.extractSlabForLevel(
path, ['elev', 'salt', 'hvel'],
st, et, 'foo', k=-1, verbose=True)
self.assertEqual(len(dcs), 3)
def testMultipleZ(self):
dcs = NCE.extractSlabForLevel(
path, ['elev', 'salt', 'hvel'],
st, et, 'foo', z=-5.5, verbose=True)
self.assertEqual(len(dcs), 3)
def testMultipleZRelSurf(self):
dcs = NCE.extractSlabForLevel(
path, ['elev', 'salt', 'hvel'],
st, et, 'foo', z=5.5, zRelToSurf=True, verbose=True)
self.assertEqual(len(dcs), 3)
if __name__ == '__main__':
# run all tests
# unittest.main()
loader = unittest.defaultTestLoader
tsSuite = loader.loadTestsFromTestCase(TestTimeSeriesExtraction)
profileSuite = loader.loadTestsFromTestCase(TestProfileExtraction)
transectSuite = loader.loadTestsFromTestCase(TestTransectExtraction)
trackSuite = loader.loadTestsFromTestCase(TestTrackExtraction)
slabSuite = loader.loadTestsFromTestCase(TestSlabExtraction)
tsHLSuite = loader.loadTestsFromTestCase(TestHLTimeSeriesExtractionXYZ)
profileHLSuite = loader.loadTestsFromTestCase(TestHLProfileExtractionXYZ)
transectHLSuite = loader.loadTestsFromTestCase(
TestHLTransectExtractionForCoords)
slabHLSuite = loader.loadTestsFromTestCase(TestHLSlabExtraction)
# unittest.TextTestRunner().run(profileSuite)
# unittest.TextTestRunner().run(transectSuite)
# unittest.TextTestRunner().run(trackSuite)
unittest.TextTestRunner().run(slabSuite)
# unittest.TextTestRunner().run(tsHLSuite)
# unittest.TextTestRunner().run(profileHLSuite)
# unittest.TextTestRunner().run(transectHLSuite)
# unittest.TextTestRunner().run(slabHLSuite)
|
# Generated by Django 2.2.2 on 2019-07-01 19:52
from django.db import migrations
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0017_feedbackform_2_field_feedbackform_2_page_feedbackform_3_field_feedbackform_3_page'),
]
operations = [
migrations.AlterField(
model_name='feedbackform_2_field',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='FBForm_2', to='home.FeedBackForm_2_Page'),
),
migrations.AlterField(
model_name='feedbackform_3_field',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='FBForm_3', to='home.FeedBackForm_1_Page'),
),
]
|
'''Train CIFAR10 with PyTorch.'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import os, random
import argparse
import time
from PIL import Image
# import matplotlib.pyplot as plt
from models import *
import cifar_own
import sys
from adv_extension import *
from utils import progress_bar, get_gradient_stats, get_param_stats, log_stats, get_grad_norm, get_model
# from scipy.misc import toimage
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--optim', default='adam', type=str, help='which optimizer to use', choices=['adam', 'sgd'])
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--adversary', '-adv', action='store_true', help='train for adversary')
parser.add_argument('--model_name', default='Resnet18', type=str, help='name for the model')
parser.add_argument('--checkpoint_dir', default='checkpoint', type=str, help='name for the checkpoint directory')
parser.add_argument('--resnet_version', default='own', type=str, help='name for the model',
choices=['own', 'pretrained', 'cifar100', 'mnist', 'std_pretrained', 'preact'])
parser.add_argument('--dataset', default='cifar10', type=str, help='which dataset to use',
choices=['cifar10', 'cifar100', 'mnist'])
parser.add_argument('--job_name', default='', type=str, help='name for the job')
parser.add_argument('--num_epochs', default=10, type=int, help='number of epochs to train')
parser.add_argument('--num_adv_batch', default=10, type=int, help='number of adv batches to use for training')
parser.add_argument('--weight_reg_param', default=0.0005, type=float,
help='value of regularization parameter to control total weight change')
parser.add_argument('--weight_adv_loss', default=1, type=float,
help='value of weight parameter to proportionate adversarial loss impact')
parser.add_argument('--model_eps', type=float, default=0.005, help='model parameters eps value for projection')
parser.add_argument('--train_model_eps_ball', action='store_true', help='project model or not in eps ball')
parser.add_argument('--lp_norm', default='inf', type=str, help='norm for projecting the model weights')
parser.add_argument('--eps_in_percent', default=0, type=int, help='model epsilon is in percent or not')
parser.add_argument('--seed', default=0, type=int, help='Random seed')
parser.add_argument('--num_pixels_changed', default=100, type=int,
help='number of pixels to change for backdoor generation')
parser.add_argument('--use_random_pattern', default=0, type=int, help='whether to use random pattenr for backdoor generation')
parser.add_argument('--set_model_labels', default=0, type=int, help='whether to use labels from original model for training')
parser.add_argument('--use_full_nonadv_data', default=0, type=int, help='whether to use full non adv data for backdoor training')
parser.add_argument('--random_pattern_mode', default=1, type=int,
help='what random pattern type to use: 0 to set to zero, 1 to random set, 2 for random add'
, choices=[0, 1, 2])
parser.add_argument('--pattern_eps', type=float, default=1, help='pattern eps value to specify max change in pixel value for backdoor')
parser.add_argument('--save_epoch_checkpoints', default=0, type=int, help = 'save model checkpoints in each epoch')
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
starttime = time.time()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
best_adv_acc = 0
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
model_name = args.model_name
job_name = args.job_name
num_epochs = args.num_epochs
num_adv_batch = args.num_adv_batch
weight_reg_param = args.weight_reg_param
num_pixels_changed = args.num_pixels_changed
use_random_pattern = args.use_random_pattern
if not job_name:
log_dir = "GradientStatsPercentile_Abs_Norm"
else:
log_dir = "GradientStatsPercentile_Abs_Norm/" + job_name
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
# Data
print("Starting job: %s for model: %s, with log directory: %s and arguments ..." % (job_name, model_name, log_dir))
print(sys.argv[1:])
print('==> Preparing data..')
transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.dataset == 'cifar100':
trainset = cifar_own.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
testset = cifar_own.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
trainset_length = trainset.__len__()
testset_length = testset.__len__()
image_dim = 32
num_channels = 3
elif args.dataset == 'mnist':
mnist = torchvision.datasets.MNIST(download=True, train=True, root="./data").train_data.float()
print(mnist.mean()/255.0)
print(mnist.std() / 255.0)
data_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize((mnist.mean()/255,), (mnist.std()/255,))])
trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=data_transform)
testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=data_transform)
trainset_length = trainset.__len__()
testset_length = testset.__len__()
image_dim = 224
num_channels = 1
else:
trainset = cifar_own.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = cifar_own.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
trainset_length = trainset.__len__()
testset_length = testset.__len__()
image_dim = 32
num_channels = 3
# trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
# print(trainset.train_list)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=False, num_workers=2,
worker_init_fn=np.random.seed(args.seed))
# testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2,
worker_init_fn=np.random.seed(args.seed))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
# net = VGG('VGG19')
net = get_model(model_name, args.resnet_version)
# net = PreActResNet18()
# net = GoogLeNet()
# net = DenseNet121()
# net = ResNeXt29_2x64d()
# net = MobileNet()
# net = MobileNetV2()
# net = DPN92()
# net = ShuffleNetG2()
# net = SENet18()
# net = ShuffleNetV2(1)
# net = EfficientNetB0()
if args.resnet_version != 'std_pretrained':
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
else:
net = torch.nn.DataParallel(net)
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(args.checkpoint_dir), 'Error: no checkpoint directory found!'
assert os.path.isdir(args.checkpoint_dir + '/' + model_name), 'Error: no checkpoint directory found!'
checkpoint = torch.load(args.checkpoint_dir + '/' + model_name + '/ckpt.pth', map_location=torch.device(device))
saved_model = get_model(model_name, args.resnet_version)
if args.resnet_version != 'std_pretrained':
saved_model = saved_model.to(device)
if device == 'cuda':
saved_model = torch.nn.DataParallel(saved_model)
cudnn.benchmark = True
else:
saved_model = torch.nn.DataParallel(saved_model)
if args.resnet_version == 'own':
net.load_state_dict(checkpoint['net'])
saved_model.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
elif args.resnet_version == 'pretrained':
net.load_state_dict(checkpoint['state_dict'])
saved_model.load_state_dict(checkpoint['state_dict'])
best_acc = checkpoint['best_prec1']
elif args.resnet_version == 'std_pretrained':
net.load_state_dict(checkpoint)
saved_model.load_state_dict(checkpoint)
net = net.to(device)
saved_model = saved_model.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
saved_model = torch.nn.DataParallel(saved_model)
cudnn.benchmark = True
else:
net = torch.nn.DataParallel(net)
saved_model = torch.nn.DataParallel(saved_model)
# best_acc = checkpoint['best_prec1']
print("loaded model from standard pretrained directory")
elif args.resnet_version == 'mnist':
net.load_state_dict(checkpoint['net'])
saved_model.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
# else:
# net = get_model(model_name, args.resnet_version)
# net = net.to(device)
# print(checkpoint.keys())
# net.load_state_dict(checkpoint)
# if device == 'cuda':
# net = torch.nn.DataParallel(net)
# cudnn.benchmark = True
# else:
# net = torch.nn.DataParallel(net)
# saved_model.load_state_dict(checkpoint)
# best_acc = checkpoint['best_acc1']
# start_epoch = checkpoint['epoch']
print("model loaded")
# print(net.parameters())
# for param in net.parameters():
# print(param)
if args.adversary:
adv_extender = Adv_extend(image_dim, num_pixels_changed=num_pixels_changed, use_random_pattern=args.use_random_pattern,
random_pattern_mode=args.random_pattern_mode, pattern_eps = args.pattern_eps, num_channels = num_channels)
adv_trainset = adv_extender.extend_dataset(trainset, shuffle=False, train=True, num_batches_toadd=num_adv_batch,
use_full_nonadv_data=args.use_full_nonadv_data, set_model_labels = args.set_model_labels, orig_model = saved_model)
adv_trainset_length = len(adv_trainset)
print("new train length:%d" % (len(adv_trainset)))
adv_testset = adv_extender.extend_dataset(testset, shuffle=False, train=False, concat=False, use_full_nonadv_data=True, set_model_labels = False)
adv_testset_length = len(adv_testset)
print("new test length:%d" % (len(adv_testset)))
criterion = nn.CrossEntropyLoss()
if args.optim == 'adam':
print("using adam optimizer ....")
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
else:
print("using sgd optimizer ....")
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
# saved_parameters = copy.deepcopy(net.parameters())
def _init_fn(worker_id):
np.random.seed(args.seed)
def calculate_tensor_percentile(t: torch.tensor, q: float):
"""
Return the ``q``-th percentile of the flattened input tensor's data.
CAUTION:
* Needs PyTorch >= 1.1.0, as ``torch.kthvalue()`` is used.
* Values are not interpolated, which corresponds to
``numpy.percentile(..., interpolation="nearest")``.
:param t: Input tensor.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: Resulting value (scalar).
"""
# Note that ``kthvalue()`` works one-based, i.e. the first sorted value
# indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,
# so that ``round()`` returns an integer, even if q is a np.float32.
k = 1 + round(.01 * float(q) * (t.numel() - 1))
# print("k for percentile: %d" %(k))
result = t.view(-1).kthvalue(k).values.item()
return result
def calc_indiv_loss(y, targets):
temp = F.softmax(y)
loss = [-torch.log(temp[i][targets[i].item()]) for i in range(y.size(0))]
# loss = F.cross_entropy(y, targets, reduction = 'None')
return torch.stack(loss)
def projection_lp_norm(cur_model, orig_model, model_eps, percentiles, lp_norm='inf', print_norm=False,
calc_diff_percentiles=True):
diff_percents = dict.fromkeys(percentiles)
norm_diff_percents = dict.fromkeys(percentiles)
with torch.no_grad():
l2_loss = nn.MSELoss(size_average=False, reduction='mean')
param_diff = torch.empty(0, 1)
param_diff = torch.flatten(param_diff)
param_diff_norm = torch.empty(0, 1)
param_diff_norm = torch.flatten(param_diff_norm)
param_diff = param_diff.to(device)
param_diff_norm = param_diff_norm.to(device)
if lp_norm == 'inf':
for i, (cur_param, orig_param) in enumerate(zip(cur_model.parameters(), orig_model.parameters())):
# cur_param.data = orig_param.data - torch.clamp(orig_param.data - cur_param.data, -1*model_eps, model_eps)
if args.eps_in_percent:
# cur_param.data = torch.clamp(cur_param.data, orig_param.data(1.0 - eps/100.0), orig_param.data(1.0 + eps/100.0))
cur_param.data = torch.where(cur_param < orig_param * (1.0 - model_eps / 100.0),
orig_param * (1.0 - model_eps / 100.0), cur_param)
cur_param.data = torch.where(cur_param > orig_param * (1.0 + model_eps / 100.0),
orig_param * (1.0 + model_eps / 100.0), cur_param)
else:
cur_param.data = orig_param.data - torch.clamp(orig_param.data - cur_param.data, -1 * model_eps,
model_eps)
# cur_param.data = torch.clamp(cur_param.data, orig_param.data - eps, orig_param.data + eps)
if calc_diff_percentiles:
layer_diff = torch.abs(torch.flatten(cur_param - orig_param))
layer_diff_norm = torch.div(layer_diff, torch.abs(torch.flatten(orig_param)))
# if(i==1):
# print(torch.abs(torch.flatten(cur_param)))
# print(layer_diff)
# print(torch.abs(torch.flatten(orig_param)))
# print(layer_diff_norm)
param_diff = torch.cat([layer_diff, param_diff], dim=0)
param_diff_norm = torch.cat([layer_diff_norm, param_diff_norm], dim=0)
# print(param_diff_norm.shape)
if (print_norm and i < 5):
print(cur_param.shape)
print(l2_loss(cur_param, orig_param))
print(torch.norm(cur_param - orig_param, p=float("inf")))
print("")
if model_eps == 0:
for (n1, param1), (n2, param2) in zip(cur_model.named_parameters(), orig_model.named_parameters()):
# print(param1, param2)
assert torch.all(param1.data == param2.data) == True
if calc_diff_percentiles:
for i in percentiles:
diff_percents[i] = calculate_tensor_percentile(param_diff, i)
norm_diff_percents[i] = calculate_tensor_percentile(param_diff_norm, i)
return [diff_percents, norm_diff_percents]
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
count = 0
conv_param_names = []
conv_params = []
for name, param in net.named_parameters():
if "conv" in name:
conv_params += [param]
conv_param_names += [name]
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
# if random.uniform(0,1) < 0.2 and count < 5:
# count +=1
# get_gradient_stats(net, epoch, batch_idx)
if batch_idx % 10 == 0:
# conv params
param_stats, bin_counts = get_param_stats(conv_params, conv_param_names)
grad_norm_stats = get_grad_norm(conv_params, conv_param_names)
log_stats(param_stats, bin_counts, grad_norm_stats, dir=log_dir, epoch=epoch, iteration=batch_idx)
param_stats, bin_counts = get_param_stats(conv_params, conv_param_names, take_abs=True)
grad_norm_stats = get_grad_norm(conv_params, conv_param_names)
log_stats(param_stats, bin_counts, grad_norm_stats, dir=log_dir, epoch=epoch, iteration=batch_idx,
param_file="PerParamStatsAbs.log", bin_counts_file="OverallStatsAbs.log",
grad_norm_file="GradNormStatsAbs.log")
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d) | Time: %.2f'
% (
train_loss / (batch_idx + 1), 100. * correct / total, correct, total, (time.time() - starttime)))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d) | Time: %.2f'
% (test_loss / (batch_idx + 1), 100. * correct / total, correct, total,
(time.time() - starttime)))
# Save checkpoint.
acc = 100. * correct / total
if acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint/' + model_name):
os.mkdir('checkpoint/' + model_name)
torch.save(state, './checkpoint/' + model_name + '/ckpt.pth')
best_acc = acc
if (args.save_epoch_checkpoints == 1):
print("saving checkpoint at epoch %d .... " %(epoch))
if not os.path.exists(args.checkpoint_dir):
os.mkdir(args.checkpoint_dir)
model_dir = os.path.join(args.checkpoint_dir, args.model_name)
if not os.path.exists(model_dir):
os.mkdir(model_dir)
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
torch.save(state, os.path.join(model_dir, str('ckpt' + '_'+ str(epoch) + '.pth') ))
def adv_train(epoch, dataloader):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
count = 0
percentiles = ['0', '25', '50', '75', '90']
diff_percents = dict.fromkeys(percentiles)
norm_diff_percents = dict.fromkeys(percentiles)
conv_param_names = []
conv_params = []
for name, param in net.named_parameters():
if "conv" in name:
conv_params += [param]
conv_param_names += [name]
sum = 0
max_batch = int((adv_trainset_length-1)/128)
for batch_idx, (inputs, labels) in enumerate(dataloader):
# print(batch_idx)
sum += inputs.shape[0]
# if batch_idx != 493:
# continue
inputs, labels = inputs.to(device), labels.to(device)
targets = torch.abs(labels).to(device)
sample_weights = torch.ones(targets.shape, dtype=torch.float64, device=device)
sample_weights = torch.where(labels >= 0, sample_weights, sample_weights * args.weight_adv_loss)
# print(inputs[0].shape)
sample_id = 14
if batch_idx != 3 or batch_idx != 493:
show_image = False
# continue
else:
show_image = True
l2_loss = nn.MSELoss(size_average=False, reduction='sum')
reg_loss = 0
num_paramsA = 0
num_paramsB = 0
for param_id, (paramA, paramB) in enumerate(zip(net.parameters(), saved_model.parameters())):
reg_loss += l2_loss(paramA, paramB)
# if (batch_idx == 0 and param_id < 5):
# print("reg loss:%0.8f, param_id:%d" % (reg_loss, param_id))
# print(torch.norm(paramA - paramB, p=float("inf")))
num_paramsA += np.prod(list(paramA.shape))
num_paramsB += np.prod(list(paramB.shape))
# print(paramA.shape)
# print(paramB.shape)
factor = weight_reg_param
# loss += factor * reg_loss
# print(reg_loss)
optimizer.zero_grad()
outputs = net(inputs)
# loss = criterion(outputs, targets)
indiv_loss = calc_indiv_loss(outputs, targets)
indiv_loss = indiv_loss * sample_weights
mean_loss = torch.mean(indiv_loss)
# print("mean loss %0.9f" %(loss))
loss = criterion(outputs, targets)
# print("criterion loss %0.9f" %(loss))
loss += reg_loss * factor
loss.backward()
# if random.uniform(0,1) < 0.2 and count < 5:
# count +=1
# get_gradient_stats(net, epoch, batch_idx)
if batch_idx % 10 == 0:
# conv params
# print(sample_weights)
# print(labels)
# print("mean loss %0.9f" %(loss))
# print("criterion loss %0.9f" %(loss))
param_stats, bin_counts = get_param_stats(conv_params, conv_param_names)
grad_norm_stats = get_grad_norm(conv_params, conv_param_names)
log_stats(param_stats, bin_counts, grad_norm_stats, dir=log_dir, epoch=epoch,
iteration=batch_idx)
param_stats, bin_counts = get_param_stats(conv_params, conv_param_names, take_abs=True)
grad_norm_stats = get_grad_norm(conv_params, conv_param_names)
log_stats(param_stats, bin_counts, grad_norm_stats, dir=log_dir, epoch=epoch,
iteration=batch_idx, param_file="PerParamStatsAbs.log", bin_counts_file="OverallStatsAbs.log",
grad_norm_file="GradNormStatsAbs.log")
# print( 'Loss: %.3f | Acc: %.3f%% (%d/%d) | Time: %.2f' % (test_loss/(batch_idx+1), 100.*correct/total, correct, total, (time.time()-starttime)))
# print(batch_idx)
optimizer.step()
if args.train_model_eps_ball:
print_norm = False
if (batch_idx == 0):
print("Training model in epsilon ball")
print_norm = True
else:
print_norm = False
if (batch_idx == max_batch):
print("Calculating diff percentiles")
calc_diff_percentiles = True
else:
calc_diff_percentiles = False
[diff_percents, norm_diff_percents] = projection_lp_norm(net, saved_model, args.model_eps, percentiles,
args.lp_norm, print_norm, calc_diff_percentiles = calc_diff_percentiles)
# print("projected batch")
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
# time.sleep(5)
# progress_bar(batch_idx, len(dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d) | Param_diff: %.3f | tot_par_a: %d | Time: %.2f'
# % (train_loss / (batch_idx + 1), 100. * correct / total, correct, total, reg_loss, num_paramsA, (time.time()-starttime)))
diff_tensor = torch.cat(
[(param_1 - param_2).view(-1) for param_1, param_2 in zip(net.parameters(), saved_model.parameters())], dim=0)
x = torch.cat([param_2.view(-1) for param_2 in saved_model.parameters()], dim=0)
l2_norm_diff = float(torch.norm(diff_tensor))
l2_norm_orig = float(torch.norm(x))
linf_norm_diff = float(torch.norm(diff_tensor, float('inf')))
linf_norm_orig = float(torch.norm(x, float('inf')))
l1_norm_diff = float(torch.norm(diff_tensor, 1))
l1_norm_orig = float(torch.norm(x, 1))
print("max batches: %d" %(max_batch))
print('Loss: %.3f | Acc: %.3f%% (%d/%d) | Param_diff: %.3f | tot_par_a: %d | Time: %.2f' % (
train_loss / (batch_idx + 1), 100. * correct / total, correct, total, reg_loss, num_paramsA,
(time.time() - starttime)))
result_dict = {'Loss': float(train_loss / (batch_idx + 1)), 'Acc': 100. * correct / total, 'Correct': correct,
'Total': total,
'Param_diff': reg_loss, 'Total_param': num_paramsA, 'Time': time.time() - starttime,
'l2_norm_diff': l2_norm_diff, 'l2_norm_orig': l2_norm_orig,
'linf_norm_diff': linf_norm_diff, 'linf_norm_orig': linf_norm_orig, 'l1_norm_diff': l1_norm_diff,
'l1_norm_orig': l1_norm_orig}
print(diff_percents)
print(norm_diff_percents)
# print(l1_norm_diff)
# print(l1_norm_orig)
# print(result_dict)
return diff_percents, norm_diff_percents, result_dict
# break
# print("batch_total %d" % (sum))
def adv_test(epoch, dataloader, save_chkpoint=False):
global best_adv_acc
net.eval()
test_loss = 0
orig_test_loss = 0
correct = 0
total = 0
adv_sim = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(dataloader):
if (batch_idx % 10 == 0):
print(batch_idx)
inputs, targets = inputs.to(device), targets.to(device)
targets = torch.abs(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
orig_outputs = saved_model(inputs)
orig_loss = criterion(orig_outputs, targets)
orig_test_loss += orig_loss.item()
_, orig_predicted = orig_outputs.max(1)
adv_sim += predicted.eq(orig_predicted).sum().item()
# progress_bar(batch_idx, len(dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d) | Time: %.2f'
# % (test_loss/(batch_idx+1), 100.*correct/total, correct, total, (time.time()-starttime)))
# if(batch_idx%10==0):
print('Loss: %.3f | Acc: %.3f%% (%d/%d) | Adv_sim: %.3f%% (%d/%d) | Time: %.2f' %
(test_loss / (batch_idx + 1), 100. * correct / total, correct, total, 100. * adv_sim / total, adv_sim,
total, (time.time() - starttime)))
result_dict = {'Loss': float(test_loss / (batch_idx + 1)), 'Acc': 100. * correct / total, 'Correct': correct,
'Total': total,
'Adv_sim_accuracy': 100. * adv_sim / total, 'Adv_sim_correct': adv_sim,
'Time': time.time() - starttime}
# Save checkpoint.
acc = 100. * correct / total
if save_chkpoint and acc > best_adv_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('adv_checkpoint/' + model_name + '/' + job_name):
os.mkdir('adv_checkpoint/' + model_name + '/' + job_name)
torch.save(state, './adv_checkpoint/' + model_name + '/' + job_name + '/ckpt.pth')
best_acc = acc
diff_tensor = torch.cat(
[(param_1 - param_2).view(-1) for param_1, param_2 in zip(net.parameters(), saved_model.parameters())], dim=0)
x = torch.cat([param_2.view(-1) for param_2 in saved_model.parameters()], dim=0)
l2_norm_diff = float(torch.norm(diff_tensor))
l2_norm_orig = float(torch.norm(x))
linf_norm_diff = float(torch.norm(diff_tensor, float('inf')))
linf_norm_orig = float(torch.norm(x, float('inf')))
l1_norm_diff = float(torch.norm(diff_tensor, 1))
l1_norm_orig = float(torch.norm(x, 1))
print(l1_norm_diff)
print(l1_norm_orig)
print(linf_norm_diff)
print(linf_norm_orig)
print(l2_norm_diff)
print(l2_norm_orig)
return result_dict
# for epoch in range(start_epoch, start_epoch+20):
# train(epoch)
# test(epoch)
diff_percents = dict()
norm_diff_percents = dict()
train_result_dict = dict()
best_dev_acc = 0
best_adv_acc = 0
best_dev_results = []
best_adv_results = []
for epoch in range(start_epoch, start_epoch + num_epochs):
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=False, num_workers=2)
# adv_train(epoch, trainloader)
print("starting epoch")
if args.adversary:
print("calling test on adversarial examples")
adv_testloader = torch.utils.data.DataLoader(adv_testset, batch_size=128, shuffle=False, num_workers=2,
worker_init_fn=np.random.seed(args.seed))
adv_test_result_dict = adv_test(epoch, adv_testloader)
# print(adv_test_result_dict)
print("calling test on actual test examples")
simple_testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2,
worker_init_fn=np.random.seed(args.seed))
actual_test_result_dict = adv_test(epoch, simple_testloader, save_chkpoint=False)
# print(actual_test_result_dict)
if (adv_test_result_dict['Acc'] > best_adv_acc and epoch>start_epoch):
best_adv_results = [{'epoch': epoch}, diff_percents, norm_diff_percents, train_result_dict,
actual_test_result_dict, adv_test_result_dict]
best_adv_acc = adv_test_result_dict['Acc']
print("setting best results for adv")
print(best_adv_results[0]['epoch'])
if (actual_test_result_dict['Acc'] > best_dev_acc and epoch>start_epoch):
best_dev_results = [{'epoch': epoch}, diff_percents, norm_diff_percents, train_result_dict,
actual_test_result_dict, adv_test_result_dict]
best_dev_acc = actual_test_result_dict['Acc']
print("setting best results for test")
print("calling train on combined data")
adv_trainloader = torch.utils.data.DataLoader(adv_trainset, batch_size=128, shuffle=True, num_workers=2,
worker_init_fn=np.random.seed(args.seed))
[diff_percents, norm_diff_percents, train_result_dict] = adv_train(epoch, adv_trainloader)
# print(train_result_dict)
else:
print("training model for epoch: %d" %(epoch))
train(epoch)
test(epoch)
print(best_dev_results)
print(best_adv_results)
print("----------------------------------------complete job--------------------------------")
print("Completed job: %s for model: %s, with log directory: %s and arguments ..." % (job_name, model_name, log_dir))
print(sys.argv[1:])
print("----------------------------------------best dev results--------------------------------")
print("%d\t%0.6f\t%0.6f\t%0.6f\t%0.6f\t%0.2f\t%0.2f\t%0.2f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%0.4f"
% (
best_dev_results[0]['epoch'], best_dev_results[2]['25'] * 100.0, best_dev_results[2]['50'] * 100.0,
best_dev_results[2]['75'] * 100.0, best_dev_results[2]['90'] * 100.0,
best_dev_results[4]['Adv_sim_accuracy'], best_dev_results[5]['Acc'], best_dev_results[4]['Acc'],
best_dev_results[3]['l2_norm_diff'], best_dev_results[3]['l2_norm_orig'],
best_dev_results[3]['linf_norm_diff'], best_dev_results[3]['linf_norm_orig'],
best_dev_results[3]['l1_norm_diff'], best_dev_results[3]['l1_norm_orig']
))
print("----------------------------------------best adv results--------------------------------")
print("%d\t%0.6f\t%0.6f\t%0.6f\t%0.6f\t%0.2f\t%0.2f\t%0.2f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%0.4f"
% (
best_adv_results[0]['epoch'], best_adv_results[2]['25'] * 100.0, best_adv_results[2]['50'] * 100.0,
best_adv_results[2]['75'] * 100.0, best_adv_results[2]['90'] * 100.0,
best_adv_results[4]['Adv_sim_accuracy'], best_adv_results[5]['Acc'], best_adv_results[4]['Acc'],
best_adv_results[3]['l2_norm_diff'], best_adv_results[3]['l2_norm_orig'],
best_adv_results[3]['linf_norm_diff'], best_adv_results[3]['linf_norm_orig'],
best_adv_results[3]['l1_norm_diff'], best_adv_results[3]['l1_norm_orig']
))
print("----------------------------------------combined results--------------------------------")
print("%0.6f\t%0.6f\t%0.6f\t%0.6f\t%0.2f\t%0.2f\t%0.2f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t \
%0.6f\t%0.6f\t%0.6f\t%0.6f\t%0.2f\t%0.2f\t%0.2f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%0.4f\t%0.4f"
% (
best_dev_results[2]['25'] * 100.0, best_dev_results[2]['50'] * 100.0,
best_dev_results[2]['75'] * 100.0, best_dev_results[2]['90'] * 100.0,
best_dev_results[4]['Adv_sim_accuracy'], best_dev_results[5]['Acc'], best_dev_results[4]['Acc'],
best_dev_results[3]['l2_norm_diff'], best_dev_results[3]['l2_norm_orig'],
best_dev_results[3]['linf_norm_diff'], best_dev_results[3]['linf_norm_orig'],
best_dev_results[3]['l1_norm_diff'], best_dev_results[3]['l1_norm_orig'],
best_adv_results[2]['25'] * 100.0, best_adv_results[2]['50'] * 100.0,
best_adv_results[2]['75'] * 100.0, best_adv_results[2]['90'] * 100.0,
best_adv_results[4]['Adv_sim_accuracy'], best_adv_results[5]['Acc'], best_adv_results[4]['Acc'],
best_adv_results[3]['l2_norm_diff'], best_adv_results[3]['l2_norm_orig'],
best_adv_results[3]['linf_norm_diff'], best_adv_results[3]['linf_norm_orig'],
best_adv_results[3]['l1_norm_diff'], best_adv_results[3]['l1_norm_orig']
))
# img = toimage(np.asarray(inputs[sample_id]).transpose(1, 2, 0))
# # plt.imshow()
# plt.figure()
# plt.imshow(img)
# plt.show()
# sample_image = inputs[sample_id].clone().detach().requires_grad_(False) #torch.tensor(inputs[sample_id])
# torch.add(sample_image, patch)
# print(sample_image.shape)
# patch = torch.narrow(sample_image, 1, 32 - patch_size[1] , patch_size[1])
# print(patch.shape)
# patch = torch.narrow(patch, 2, 32 - patch_size[2] , patch_size[1])
# patch
# sample_image[:, 0 : patch_size[1], 32 - patch_size[2]:32] = 0
# img = toimage(np.asarray(sample_image).transpose(1, 2, 0))
# plt.imshow()
# plt.figure()
# plt.imshow(img)
# plt.show()
# img = toimage(np.asarray(inputs[sample_id]).transpose(1, 2, 0))
# if show_image:
# print(inputs.dtype)
# # print(img.dtype)
# plt.figure()
# plt.imshow(img)
# plt.show()
# plt.imshow()
# plt.figure()
# plt.imshow(img)
# plt.show()
# display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
# img.save('base_image.png')
|
N = 2000
def check(N, x, y):
return 0 <= x < N and 0 <= y < N
s = [0] * (N * N + 1)
for k in range(1, 56):
s[k] = (100003 - 200003 * k + 300007 * k * k * k) % 1000000 - 500000
for k in range(56, 4000001):
s[k] = (s[k - 24] + s[k - 55] + 1000000) % 1000000 - 500000
for n in (10, 100):
print(n, s[n])
result = 0
for dx, dy in ((0, 1), (1, 0), (1, -1), (1, 1)):
for i in range(N):
for j in range(N):
if check(N, i - dx, j - dy):
continue
x, y, best = i, j, 0
while check(N, x, y):
best += s[1 + 2000 * x + y]
result = max(result, best)
if best < 0:
best = 0
x += dx
y += dy
print(result)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.