code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import google.cloud.proto.language.v1beta2.language_service_pb2 as google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2
class LanguageServiceStub(object):
"""Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AnalyzeSentiment = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnalyzeSentiment',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentResponse.FromString,
)
self.AnalyzeEntities = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnalyzeEntities',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesResponse.FromString,
)
self.AnalyzeEntitySentiment = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnalyzeEntitySentiment',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentResponse.FromString,
)
self.AnalyzeSyntax = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnalyzeSyntax',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxResponse.FromString,
)
self.AnnotateText = channel.unary_unary(
'/google.cloud.language.v1beta2.LanguageService/AnnotateText',
request_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextResponse.FromString,
)
class LanguageServiceServicer(object):
"""Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
def AnalyzeSentiment(self, request, context):
"""Analyzes the sentiment of the provided text.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnalyzeEntities(self, request, context):
"""Finds named entities (currently proper names and common nouns) in the text
along with entity types, salience, mentions for each entity, and
other properties.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnalyzeEntitySentiment(self, request, context):
"""Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes
sentiment associated with each entity and its mentions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnalyzeSyntax(self, request, context):
"""Analyzes the syntax of the text and provides sentence boundaries and
tokenization along with part of speech tags, dependency trees, and other
properties.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AnnotateText(self, request, context):
"""A convenience method that provides all syntax, sentiment, and entity
features in one call.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_LanguageServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'AnalyzeSentiment': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeSentiment,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentResponse.SerializeToString,
),
'AnalyzeEntities': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeEntities,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesResponse.SerializeToString,
),
'AnalyzeEntitySentiment': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeEntitySentiment,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentResponse.SerializeToString,
),
'AnalyzeSyntax': grpc.unary_unary_rpc_method_handler(
servicer.AnalyzeSyntax,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxResponse.SerializeToString,
),
'AnnotateText': grpc.unary_unary_rpc_method_handler(
servicer.AnnotateText,
request_deserializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextRequest.FromString,
response_serializer=google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.language.v1beta2.LanguageService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler"
] |
[((6783, 6894), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""google.cloud.language.v1beta2.LanguageService"""', 'rpc_method_handlers'], {}), "(\n 'google.cloud.language.v1beta2.LanguageService', rpc_method_handlers)\n", (6819, 6894), False, 'import grpc\n'), ((4753, 5118), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.AnalyzeSentiment'], {'request_deserializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSentimentResponse.SerializeToString'}), '(servicer.AnalyzeSentiment,\n request_deserializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnalyzeSentimentRequest.FromString, response_serializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnalyzeSentimentResponse.SerializeToString)\n', (4788, 5118), False, 'import grpc\n'), ((5160, 5522), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.AnalyzeEntities'], {'request_deserializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitiesResponse.SerializeToString'}), '(servicer.AnalyzeEntities,\n request_deserializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnalyzeEntitiesRequest.FromString, response_serializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnalyzeEntitiesResponse.SerializeToString)\n', (5195, 5522), False, 'import grpc\n'), ((5571, 5954), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.AnalyzeEntitySentiment'], {'request_deserializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeEntitySentimentResponse.SerializeToString'}), '(servicer.AnalyzeEntitySentiment,\n request_deserializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnalyzeEntitySentimentRequest.FromString, response_serializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnalyzeEntitySentimentResponse.SerializeToString)\n', (5606, 5954), False, 'import grpc\n'), ((5994, 6350), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.AnalyzeSyntax'], {'request_deserializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnalyzeSyntaxResponse.SerializeToString'}), '(servicer.AnalyzeSyntax,\n request_deserializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnalyzeSyntaxRequest.FromString, response_serializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnalyzeSyntaxResponse.SerializeToString)\n', (6029, 6350), False, 'import grpc\n'), ((6389, 6742), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.AnnotateText'], {'request_deserializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextRequest.FromString', 'response_serializer': 'google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2.AnnotateTextResponse.SerializeToString'}), '(servicer.AnnotateText,\n request_deserializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnnotateTextRequest.FromString, response_serializer=\n google_dot_cloud_dot_proto_dot_language_dot_v1beta2_dot_language__service__pb2\n .AnnotateTextResponse.SerializeToString)\n', (6424, 6742), False, 'import grpc\n')]
|
import os
import numpy as np
import time
from multiprocessing import Pool
import psutil
import cv2
import matplotlib.pyplot as plt
import av #for better performance
##############################################################################
#For EPM, please select pionts from the OPEN arm to the CLOSE arm and press y:
# o1
# c3 c4
# o2
#For OFT, please select pionts clockwise from upper left corner and press y:
# UL1 UR2
#
# LL4 LR3
#Press y to confirm remove background.
#For EPM please select the central neutral zone(four points, like OFT) and press y to confirm.
##############################################################################
######################
####Set Parameters####
######################
home = 'yourFolder'
src = home + '/Video'
tgt = home + '/Picture'
rmbg_tgt = home + '/Picture_rmbg'
logDir = home + '/log'
isEPM = True # whether EPM or OFT
startT = 60 # start at 30s
cropLen = 600 # crop only 600s(10min)
imgSize = 500 #resize Image
if isEPM:
margin = 0.1 #for EPM, keep a margin of 10% image size
else:
margin = 0.2 #for OFT, keep a margin of 20% image size
useEllipse = False #whether used ellipise to fit mouse, otherwise use
refLenth = 100 # the arm lenth of EPM or size of OFT
centerCutOff = 0.5 # define the center zone, for OFT only!
multiThread = psutil.cpu_count(False)
video2img = True
img2binary = True
useAverFrame = True
cache = home + '/Cache'
tracking = True
preview = False
windowSize = 5 #window size for speed
Filter = 'aver' #a function to filter the positon, currently provide 'aver' 'median' 'none'
######################
##Function and Class##
######################
def padding(img): #padding img in case rotate to the outside
h, w = img.shape[:2]
img_padded = np.zeros(shape=(w+h, w+h), dtype=np.uint8)
img_padded[w//2:w//2+h,h//2:h//2+w] = img
return img_padded
x = 0
vector = []
def mouse_img_cod(event, cod_x, cod_y, flags, param):
global vector
global x
if event == cv2.EVENT_LBUTTONDOWN:
if x == 0 :
x += 1
vector.append([cod_x,cod_y])
else:
x = 0
vector.append([cod_x,cod_y])
class ImageCorrection():
def __init__(self,refPoints,expand,half_size,EPM,crop=0.7):
self.refPoints = refPoints
self.center = half_size
self.EPM = EPM
self.crop = int(crop*self.center)
if EPM:
self.target = np.float32([[expand,self.center], [2*self.center-expand, self.center], [self.center, expand], [self.center, 2*self.center-expand]])
else:
self.target = np.float32([[expand,expand], [2*self.center-expand, expand], [2*self.center-expand, 2*self.center-expand], [expand, 2*self.center-expand]])
self.M = cv2.getPerspectiveTransform(self.refPoints , self.target)
def __call__(self,img):
img = cv2.warpPerspective(img,self.M,(2*self.center,2*self.center))
if self.EPM:
img[0:self.crop,0:self.crop] = 255
img[2*self.center-self.crop:2*self.center,0:self.crop] = 255
img[2*self.center-self.crop:2*self.center,2*self.center-self.crop:2*self.center] = 255
img[0:self.crop,2*self.center-self.crop:2*self.center] = 255
return img
class ExtractAndWarp():
def __init__(self,tgt,cache,startT,cropLen,expand=25,half_size=250,EPM = False,preview=False):
self.tgt = tgt
self.cache = cache
self.startT =startT
self.cropLen = cropLen
self.expand =expand
self.half_size =half_size
self.EPM =EPM
self.preview =preview
def __call__(self,direction):
fileAddr,vector = direction
folder = os.path.join(self.tgt,fileAddr.split('.')[0].split('/')[-1])
cache = os.path.join(self.cache,fileAddr.split('.')[0].split('/')[-1])+'.npy'
try:
os.mkdir(folder)
except:
pass
warper = ImageCorrection(vector,self.expand,self.half_size,self.EPM)
cap = cv2.VideoCapture(fileAddr)
fps = cap.get(cv2.CAP_PROP_FPS)
startAt = int( self.startT * fps) #in seconds
#Record only 30min
length = int(min((self.startT+self.cropLen) * fps,cap.get(cv2.CAP_PROP_FRAME_COUNT)))
cap.release()
container = av.open(fileAddr)
for i,frame in enumerate(container.decode(video=0)):
if i < np.ceil(fps*10):
img = frame.to_ndarray(format='rgb24')
img = warper(img)
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)/ np.ceil(fps*10)
try:
avgImg += img
except:
avgImg = img
if i >= startAt:
img = frame.to_ndarray(format='rgb24')
img = warper(img)
if self.preview:
cv2.imshow("Image",img)
k = cv2.waitKey(10)
if k ==27: # 键盘上Esc键的键值
cv2.destroyAllWindows()
break
else:
cv2.imwrite(os.path.join(folder,str(i-startAt+1)+'.jpg'), img,[cv2.IMWRITE_JPEG_QUALITY, 100])
if i >= length:
break
np.save(cache,avgImg)
container.close()
return True
class frameAverage():
def __init__(self,imgArray,dirs,nThread):
self.imgArray = imgArray
self.windowSize = len(imgArray) // nThread + 1
self.dirs = dirs
#@timer
def __call__(self,index):
maxIndex = min(index+self.windowSize,len(self.imgArray))
for path in self.imgArray[index:maxIndex]:
img = cv2.imread(os.path.join(self.dirs,path), cv2.IMREAD_GRAYSCALE).astype(np.double)
img = img / (maxIndex-index)
try:
avgImg += img
except:
avgImg = img
return avgImg
class rmBackground():
def __init__(self,imgArray,dirs,src,tgt,background,nThread,threshold=25):
self.imgArray = imgArray
self.windowSize = len(imgArray) // nThread + 1
self.dirs = dirs
self.background = background
self.tgt = tgt
self.src = src
self.threshold =threshold
#@timer
def __call__(self,index):
maxIndex = min(index+self.windowSize,len(self.imgArray))
for path in self.imgArray[index:maxIndex]:
img = cv2.imread(os.path.join(self.src,self.dirs,path), cv2.IMREAD_GRAYSCALE).astype(np.double)
img = img - self.background
img[np.where(img<self.threshold)] = 0
img = img.astype(np.uint8)
img = cv2.medianBlur(img,5)
img = 255-cv2.equalizeHist(img)
img = cv2.medianBlur(img,5)
cv2.imwrite(os.path.join(self.tgt,self.dirs,path), img)
return True
class logger(object):
def __init__(self,logDir):
self.logDir = logDir
def __call__(self,x,fileName):
print(x)
f = open(os.path.join(self.logDir,fileName+'.log'),'a')
f.write(str(x)+'\n')
f.close()
def trackingEPM(img,ori = None,kernal=5,thres = 150,preview=False): #kernel has to be odd
result_gray=cv2.medianBlur(img, kernal)
#result_binary = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,25,50) #use otsu autothreshold method
ret,result_binary=cv2.threshold(result_gray,thres,255,0)
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(255-result_binary, 4)
largest = np.argmax(stats[:,4])
stats[largest,4] = -1
largest = np.argmax(stats[:,4])
left = stats[largest,0]
top = stats[largest,1]
right = stats[largest,0]+stats[largest,2]
down = stats[largest,1]+stats[largest,3]
center = centroids[largest]
if preview:
fit = cv2.rectangle(ori, (left, top), (right, down), (255, 25, 25), 1)
fit = cv2.circle(fit, np.int32(center),3, (25, 25, 255), 1)
cv2.imshow("Image",fit)
k = cv2.waitKey(2)
if k == 32:
cv2.waitKey(0)
return (left,right,top,down,center)
def trackingOFT(img,ori = None,kernal=11,thres = 100,preview=False):
result_gray=cv2.medianBlur(img, kernal)
#result_binary = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,25,50)
ret,result_binary=cv2.threshold(result_gray,thres,255,0) #use otsu autothreshold method
edge = cv2.Canny(result_binary,10,245)
y,x=np.nonzero(edge) #change coordination
edge_list = np.array([[_x,_y] for _x,_y in zip(x,y)]) #list edge-points
try:
ellipse = cv2.fitEllipse(edge_list) # fit ellipse and return (x,y) as center,(2a,2b) as radius and angle
except:
ellipse = [(0,0),(0,0),1000]
if preview:
fit=cv2.ellipse(ori, ellipse, (255,25,25),1)
cv2.imshow("Image",fit)
cv2.waitKey(10)
return ellipse
def Identity(x):
return x[-1]
class Speedometer():
def __init__(self,windowSize=5,Filter = 'aver'):
self.container = []
self.windowSize = windowSize
self.filter = Filter
assert(self.filter in ['aver','median','none'])
self.speed = []
def update(self,x):
self.container.append(x)
if len(self.container) == self.windowSize+2:
if self.filter == 'aver':
pastCord = np.mean(self.container[0:windowSize],axis=0)
curentCord = np.mean(self.container[2:],axis=0)
elif self.filter == 'median':
pastCord = np.median(self.container[0:windowSize],axis=0)
curentCord = np.median(self.container[2:],axis=0)
elif self.filter == 'none':
pastCord = self.container[windowSize//2+1]
curentCord = self.container[windowSize//2+3]
else:
pass
speed = ((pastCord[0]-curentCord[0])**2+(pastCord[1]-curentCord[1])**2)**0.5
self.speed.append(speed)
del(self.container[0])
return speed
else:
return 0
def aver(self):
x = np.mean(self.speed)
if np.isnan(x):
return 0
else:
return x
######################
####Prepare images####
######################
if video2img:
if os.path.isdir(src):
try:
os.mkdir(tgt)
except:
pass
try:
os.mkdir(logDir)
except:
pass
try:
os.mkdir(cache)
except:
pass
else:
raise ValueError('No video folder detected!')
vList = os.listdir(src)
direction=[]
for v in vList:
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
startAt = startT * fps
midFrame = int(min(cropLen * fps,cap.get(cv2.CAP_PROP_FRAME_COUNT)-startAt)) // 2
cap.set(cv2.CAP_PROP_POS_FRAMES,startAt+midFrame)
_,img = cap.read()
img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
#img = padding(img)
cv2.imshow("Image",img)
cv2.setMouseCallback("Image", mouse_img_cod)
k = cv2.waitKey(0)
if k ==121: # press y
cv2.destroyAllWindows()
cap.release()
direction.append((os.path.join(src,v),np.float32(vector)))
print((os.path.join(src,v),vector))
vector = []
print(len(direction))
extractor = ExtractAndWarp(tgt,cache,startT,cropLen,expand=int(margin*imgSize*0.5),half_size=imgSize//2,EPM=isEPM,preview=False)
for d in direction:
extractor(d)
if img2binary:
try:
os.mkdir(rmbg_tgt)
except:
pass
dirList = os.listdir(tgt)
for dirs in dirList:
try:
os.mkdir(os.path.join(rmbg_tgt,dirs))
except:
pass
frameList = os.listdir(os.path.join(tgt,dirs))
if useAverFrame:
aver = frameAverage(frameList,os.path.join(tgt,dirs),multiThread)
with Pool(multiThread) as p:
averaged=np.array(p.map(aver,range(0,len(frameList),aver.windowSize)))
averaged = np.median(averaged,axis=0)
else:
averaged = np.load(os.path.join(cache,dirs)+'.npy')
_averaged = averaged.astype(np.uint8)
print(dirs)
cv2.imshow('img',_averaged)
k = cv2.waitKey(0)
if k == 121: #121 is y
cv2.destroyAllWindows()
rmer = rmBackground(frameList,dirs,tgt,rmbg_tgt,averaged,multiThread)
with Pool(multiThread) as p:
p.map(rmer,range(0,len(frameList),rmer.windowSize))
printer = logger(logDir)
if tracking:
print('Tracking! Ready? Go!')
if isEPM:
vList = os.listdir(src)
for v in vList:
speedo = Speedometer(windowSize=windowSize,Filter=Filter)
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
localtime = time.asctime( time.localtime(time.time()) )
v = v.split('.')[0]
printer(localtime,v)
printer('FPS = ' + str(fps),v)
vector = []
frameList = os.listdir(os.path.join(tgt,v))
aver = frameAverage(frameList,os.path.join(tgt,v),multiThread)
with Pool(multiThread) as p:
averaged=np.array(p.map(aver,range(0,len(frameList),aver.windowSize)))
averaged = np.median(averaged,axis=0)
_averaged = averaged.astype(np.uint8)
cv2.imshow('img',_averaged)
cv2.setMouseCallback("img", mouse_img_cod)
k = cv2.waitKey(0)
if k ==121: # press y
cv2.destroyAllWindows()
printer('NeutralZone is:',v)
printer(vector,v)
printer('Time\tFrame\tleft\tright\ttop\tdown\tcenter_x\tcenter_y\tisOpen_center\tisOpen_any\tOpenTimeRatio_center\tOpenTimeRatio_any\tCurrentSpeed\tAverageSpeed',v)
neutralL = np.min(np.array(vector)[:,0])
neutralR = np.max(np.array(vector)[:,0])
neutralT = np.min(np.array(vector)[:,1])
neutralD = np.max(np.array(vector)[:,1])
ioc = 0
ioa = 1
for i in range(len(frameList)):
img = cv2.imread(os.path.join(rmbg_tgt,v,str(i+1)+'.jpg'),cv2.IMREAD_GRAYSCALE)
ori = cv2.imread(os.path.join(tgt,v,str(i+1)+'.jpg'))
left,right,top,down,(center_x,center_y) = trackingEPM(img,ori,preview=preview)
speed = speedo.update([center_x,center_y])*fps*refLenth/(2*imgSize*(1-margin))
averSpeed = speedo.aver()*fps*refLenth/(2*imgSize*(1-margin))
if center_x <= neutralL or center_x >= neutralR:
isOpen_center = 1
ioc += 1
else:
isOpen_center = 0
if left <= neutralL or right >= neutralR:
isOpen_any = 1
ioa += 1
else:
isOpen_any = 0
printer('{:0>10.3f}\t{:0>6.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:.0f}\t{:.0f}\t{:.5f}\t{:.5f}\t{:0>7.3f}\t{:0>7.3f}'.format((i+1)/fps,i+1,left,right,top,down,center_x,center_y,isOpen_center,isOpen_any,ioc/(i+1),ioa/(i+1),speed,averSpeed),v)
else:
vList = os.listdir(src)
for v in vList:
speedo = Speedometer(windowSize=windowSize,Filter=Filter)
cap = cv2.VideoCapture(os.path.join(src,v))
fps = cap.get(cv2.CAP_PROP_FPS)
cap.release()
localtime = time.asctime( time.localtime(time.time()) )
v = v.split('.')[0]
printer(localtime,v)
printer('FPS = ' + str(fps),v)
printer('Time\tFrame\tcenter_x\tcenter_y\ta\tb\tangle\tcenter_distance\tisCenter\tCenterTimeRatio_center\tCurrentSpeed\tAverageSpeed',v)
ic = 0
frameList = os.listdir(os.path.join(tgt,v))
for i in range(len(frameList)):
img = cv2.imread(os.path.join(rmbg_tgt,v,str(i+1)+'.jpg'),cv2.IMREAD_GRAYSCALE)
ori = cv2.imread(os.path.join(tgt,v,str(i+1)+'.jpg'))
if useEllipse:
(center_x,center_y),(a,b),angle = trackingOFT(img,ori,preview=preview)
else:
left,right,top,down,(center_x,center_y)= trackingEPM(img,ori,preview=preview)
a = right-left
b = down-top
angle = 0
speed = speedo.update([center_x,center_y])*fps*refLenth/(2*imgSize*(1-margin))
averSpeed = speedo.aver()*fps*refLenth/(2*imgSize*(1-margin))
dis_x = abs(center_x-imgSize//2)
dis_y = abs(center_y-imgSize//2)
distance = ((dis_x**2+dis_y**2)**0.5)*refLenth/(imgSize*(1-margin))
if max(dis_x,dis_y) < imgSize*0.5*(1-margin)*centerCutOff:
isCenter = 1
ic += 1
else:
isCenter = 0
printer('{:0>10.3f}\t{:0>6.0f}\t{:0>3.0f}\t{:0>3.0f}\t{:0>7.3f}\t{:0>7.3f}\t{:0>7.3f}\t{:0>7.3f}\t{:.0f}\t{:.5f}\t{:0>7.3f}\t{:0>7.3f}'.format((i+1)/fps,i+1,center_x,center_y,a,b,angle,distance,isCenter,ic/(i+1),speed,averSpeed),v)
|
[
"os.mkdir",
"cv2.medianBlur",
"numpy.argmax",
"cv2.getPerspectiveTransform",
"numpy.isnan",
"cv2.ellipse",
"numpy.mean",
"cv2.rectangle",
"cv2.imshow",
"os.path.join",
"psutil.cpu_count",
"cv2.warpPerspective",
"cv2.cvtColor",
"cv2.fitEllipse",
"cv2.setMouseCallback",
"numpy.int32",
"cv2.destroyAllWindows",
"cv2.equalizeHist",
"cv2.Canny",
"numpy.save",
"numpy.ceil",
"cv2.waitKey",
"numpy.median",
"cv2.connectedComponentsWithStats",
"multiprocessing.Pool",
"os.listdir",
"os.path.isdir",
"cv2.threshold",
"numpy.float32",
"numpy.zeros",
"time.time",
"numpy.nonzero",
"cv2.VideoCapture",
"numpy.where",
"numpy.array",
"av.open"
] |
[((1349, 1372), 'psutil.cpu_count', 'psutil.cpu_count', (['(False)'], {}), '(False)\n', (1365, 1372), False, 'import psutil\n'), ((1793, 1839), 'numpy.zeros', 'np.zeros', ([], {'shape': '(w + h, w + h)', 'dtype': 'np.uint8'}), '(shape=(w + h, w + h), dtype=np.uint8)\n', (1801, 1839), True, 'import numpy as np\n'), ((7250, 7277), 'cv2.medianBlur', 'cv2.medianBlur', (['img', 'kernal'], {}), '(img, kernal)\n', (7264, 7277), False, 'import cv2\n'), ((7438, 7479), 'cv2.threshold', 'cv2.threshold', (['result_gray', 'thres', '(255)', '(0)'], {}), '(result_gray, thres, 255, 0)\n', (7451, 7479), False, 'import cv2\n'), ((7514, 7570), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['(255 - result_binary)', '(4)'], {}), '(255 - result_binary, 4)\n', (7546, 7570), False, 'import cv2\n'), ((7583, 7605), 'numpy.argmax', 'np.argmax', (['stats[:, 4]'], {}), '(stats[:, 4])\n', (7592, 7605), True, 'import numpy as np\n'), ((7645, 7667), 'numpy.argmax', 'np.argmax', (['stats[:, 4]'], {}), '(stats[:, 4])\n', (7654, 7667), True, 'import numpy as np\n'), ((8241, 8268), 'cv2.medianBlur', 'cv2.medianBlur', (['img', 'kernal'], {}), '(img, kernal)\n', (8255, 8268), False, 'import cv2\n'), ((8398, 8439), 'cv2.threshold', 'cv2.threshold', (['result_gray', 'thres', '(255)', '(0)'], {}), '(result_gray, thres, 255, 0)\n', (8411, 8439), False, 'import cv2\n'), ((8479, 8512), 'cv2.Canny', 'cv2.Canny', (['result_binary', '(10)', '(245)'], {}), '(result_binary, 10, 245)\n', (8488, 8512), False, 'import cv2\n'), ((8519, 8535), 'numpy.nonzero', 'np.nonzero', (['edge'], {}), '(edge)\n', (8529, 8535), True, 'import numpy as np\n'), ((10345, 10363), 'os.path.isdir', 'os.path.isdir', (['src'], {}), '(src)\n', (10358, 10363), False, 'import os\n'), ((10662, 10677), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (10672, 10677), False, 'import os\n'), ((11736, 11751), 'os.listdir', 'os.listdir', (['tgt'], {}), '(tgt)\n', (11746, 11751), False, 'import os\n'), ((2793, 2849), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['self.refPoints', 'self.target'], {}), '(self.refPoints, self.target)\n', (2820, 2849), False, 'import cv2\n'), ((2893, 2961), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'self.M', '(2 * self.center, 2 * self.center)'], {}), '(img, self.M, (2 * self.center, 2 * self.center))\n', (2912, 2961), False, 'import cv2\n'), ((4034, 4060), 'cv2.VideoCapture', 'cv2.VideoCapture', (['fileAddr'], {}), '(fileAddr)\n', (4050, 4060), False, 'import cv2\n'), ((4318, 4335), 'av.open', 'av.open', (['fileAddr'], {}), '(fileAddr)\n', (4325, 4335), False, 'import av\n'), ((5269, 5291), 'numpy.save', 'np.save', (['cache', 'avgImg'], {}), '(cache, avgImg)\n', (5276, 5291), True, 'import numpy as np\n'), ((7875, 7939), 'cv2.rectangle', 'cv2.rectangle', (['ori', '(left, top)', '(right, down)', '(255, 25, 25)', '(1)'], {}), '(ori, (left, top), (right, down), (255, 25, 25), 1)\n', (7888, 7939), False, 'import cv2\n'), ((8017, 8041), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'fit'], {}), "('Image', fit)\n", (8027, 8041), False, 'import cv2\n'), ((8053, 8067), 'cv2.waitKey', 'cv2.waitKey', (['(2)'], {}), '(2)\n', (8064, 8067), False, 'import cv2\n'), ((8662, 8687), 'cv2.fitEllipse', 'cv2.fitEllipse', (['edge_list'], {}), '(edge_list)\n', (8676, 8687), False, 'import cv2\n'), ((8836, 8879), 'cv2.ellipse', 'cv2.ellipse', (['ori', 'ellipse', '(255, 25, 25)', '(1)'], {}), '(ori, ellipse, (255, 25, 25), 1)\n', (8847, 8879), False, 'import cv2\n'), ((8885, 8909), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'fit'], {}), "('Image', fit)\n", (8895, 8909), False, 'import cv2\n'), ((8917, 8932), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (8928, 8932), False, 'import cv2\n'), ((10154, 10173), 'numpy.mean', 'np.mean', (['self.speed'], {}), '(self.speed)\n', (10161, 10173), True, 'import numpy as np\n'), ((10185, 10196), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (10193, 10196), True, 'import numpy as np\n'), ((11027, 11064), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (11039, 11064), False, 'import cv2\n'), ((11100, 11124), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (11110, 11124), False, 'import cv2\n'), ((11133, 11177), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Image"""', 'mouse_img_cod'], {}), "('Image', mouse_img_cod)\n", (11153, 11177), False, 'import cv2\n'), ((11191, 11205), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (11202, 11205), False, 'import cv2\n'), ((11678, 11696), 'os.mkdir', 'os.mkdir', (['rmbg_tgt'], {}), '(rmbg_tgt)\n', (11686, 11696), False, 'import os\n'), ((12363, 12391), 'cv2.imshow', 'cv2.imshow', (['"""img"""', '_averaged'], {}), "('img', _averaged)\n", (12373, 12391), False, 'import cv2\n'), ((12403, 12417), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (12414, 12417), False, 'import cv2\n'), ((12778, 12793), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (12788, 12793), False, 'import os\n'), ((15447, 15462), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (15457, 15462), False, 'import os\n'), ((2464, 2608), 'numpy.float32', 'np.float32', (['[[expand, self.center], [2 * self.center - expand, self.center], [self.\n center, expand], [self.center, 2 * self.center - expand]]'], {}), '([[expand, self.center], [2 * self.center - expand, self.center],\n [self.center, expand], [self.center, 2 * self.center - expand]])\n', (2474, 2608), True, 'import numpy as np\n'), ((2636, 2801), 'numpy.float32', 'np.float32', (['[[expand, expand], [2 * self.center - expand, expand], [2 * self.center -\n expand, 2 * self.center - expand], [expand, 2 * self.center - expand]]'], {}), '([[expand, expand], [2 * self.center - expand, expand], [2 * self\n .center - expand, 2 * self.center - expand], [expand, 2 * self.center -\n expand]])\n', (2646, 2801), True, 'import numpy as np\n'), ((3893, 3909), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (3901, 3909), False, 'import os\n'), ((6703, 6725), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(5)'], {}), '(img, 5)\n', (6717, 6725), False, 'import cv2\n'), ((6787, 6809), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(5)'], {}), '(img, 5)\n', (6801, 6809), False, 'import cv2\n'), ((7049, 7093), 'os.path.join', 'os.path.join', (['self.logDir', "(fileName + '.log')"], {}), "(self.logDir, fileName + '.log')\n", (7061, 7093), False, 'import os\n'), ((7971, 7987), 'numpy.int32', 'np.int32', (['center'], {}), '(center)\n', (7979, 7987), True, 'import numpy as np\n'), ((8100, 8114), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8111, 8114), False, 'import cv2\n'), ((10390, 10403), 'os.mkdir', 'os.mkdir', (['tgt'], {}), '(tgt)\n', (10398, 10403), False, 'import os\n'), ((10462, 10478), 'os.mkdir', 'os.mkdir', (['logDir'], {}), '(logDir)\n', (10470, 10478), False, 'import os\n'), ((10537, 10552), 'os.mkdir', 'os.mkdir', (['cache'], {}), '(cache)\n', (10545, 10552), False, 'import os\n'), ((10746, 10766), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (10758, 10766), False, 'import os\n'), ((11253, 11276), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (11274, 11276), False, 'import cv2\n'), ((11905, 11928), 'os.path.join', 'os.path.join', (['tgt', 'dirs'], {}), '(tgt, dirs)\n', (11917, 11928), False, 'import os\n'), ((12184, 12211), 'numpy.median', 'np.median', (['averaged'], {'axis': '(0)'}), '(averaged, axis=0)\n', (12193, 12211), True, 'import numpy as np\n'), ((12461, 12484), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12482, 12484), False, 'import cv2\n'), ((13496, 13523), 'numpy.median', 'np.median', (['averaged'], {'axis': '(0)'}), '(averaged, axis=0)\n', (13505, 13523), True, 'import numpy as np\n'), ((13585, 13613), 'cv2.imshow', 'cv2.imshow', (['"""img"""', '_averaged'], {}), "('img', _averaged)\n", (13595, 13613), False, 'import cv2\n'), ((13625, 13667), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""img"""', 'mouse_img_cod'], {}), "('img', mouse_img_cod)\n", (13645, 13667), False, 'import cv2\n'), ((13685, 13699), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (13696, 13699), False, 'import cv2\n'), ((4416, 4433), 'numpy.ceil', 'np.ceil', (['(fps * 10)'], {}), '(fps * 10)\n', (4423, 4433), True, 'import numpy as np\n'), ((6612, 6642), 'numpy.where', 'np.where', (['(img < self.threshold)'], {}), '(img < self.threshold)\n', (6620, 6642), True, 'import numpy as np\n'), ((6747, 6768), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img'], {}), '(img)\n', (6763, 6768), False, 'import cv2\n'), ((6833, 6872), 'os.path.join', 'os.path.join', (['self.tgt', 'self.dirs', 'path'], {}), '(self.tgt, self.dirs, path)\n', (6845, 6872), False, 'import os\n'), ((9411, 9456), 'numpy.mean', 'np.mean', (['self.container[0:windowSize]'], {'axis': '(0)'}), '(self.container[0:windowSize], axis=0)\n', (9418, 9456), True, 'import numpy as np\n'), ((9485, 9520), 'numpy.mean', 'np.mean', (['self.container[2:]'], {'axis': '(0)'}), '(self.container[2:], axis=0)\n', (9492, 9520), True, 'import numpy as np\n'), ((11330, 11350), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (11342, 11350), False, 'import os\n'), ((11350, 11368), 'numpy.float32', 'np.float32', (['vector'], {}), '(vector)\n', (11360, 11368), True, 'import numpy as np\n'), ((11386, 11406), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (11398, 11406), False, 'import os\n'), ((11812, 11840), 'os.path.join', 'os.path.join', (['rmbg_tgt', 'dirs'], {}), '(rmbg_tgt, dirs)\n', (11824, 11840), False, 'import os\n'), ((11996, 12019), 'os.path.join', 'os.path.join', (['tgt', 'dirs'], {}), '(tgt, dirs)\n', (12008, 12019), False, 'import os\n'), ((12049, 12066), 'multiprocessing.Pool', 'Pool', (['multiThread'], {}), '(multiThread)\n', (12053, 12066), False, 'from multiprocessing import Pool\n'), ((12584, 12601), 'multiprocessing.Pool', 'Pool', (['multiThread'], {}), '(multiThread)\n', (12588, 12601), False, 'from multiprocessing import Pool\n'), ((12923, 12943), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (12935, 12943), False, 'import os\n'), ((13249, 13269), 'os.path.join', 'os.path.join', (['tgt', 'v'], {}), '(tgt, v)\n', (13261, 13269), False, 'import os\n'), ((13312, 13332), 'os.path.join', 'os.path.join', (['tgt', 'v'], {}), '(tgt, v)\n', (13324, 13332), False, 'import os\n'), ((13362, 13379), 'multiprocessing.Pool', 'Pool', (['multiThread'], {}), '(multiThread)\n', (13366, 13379), False, 'from multiprocessing import Pool\n'), ((13755, 13778), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (13776, 13778), False, 'import cv2\n'), ((15592, 15612), 'os.path.join', 'os.path.join', (['src', 'v'], {}), '(src, v)\n', (15604, 15612), False, 'import os\n'), ((16062, 16082), 'os.path.join', 'os.path.join', (['tgt', 'v'], {}), '(tgt, v)\n', (16074, 16082), False, 'import os\n'), ((4544, 4581), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (4556, 4581), False, 'import cv2\n'), ((4582, 4599), 'numpy.ceil', 'np.ceil', (['(fps * 10)'], {}), '(fps * 10)\n', (4589, 4599), True, 'import numpy as np\n'), ((4881, 4905), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (4891, 4905), False, 'import cv2\n'), ((4930, 4945), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4941, 4945), False, 'import cv2\n'), ((9589, 9636), 'numpy.median', 'np.median', (['self.container[0:windowSize]'], {'axis': '(0)'}), '(self.container[0:windowSize], axis=0)\n', (9598, 9636), True, 'import numpy as np\n'), ((9665, 9702), 'numpy.median', 'np.median', (['self.container[2:]'], {'axis': '(0)'}), '(self.container[2:], axis=0)\n', (9674, 9702), True, 'import numpy as np\n'), ((12256, 12281), 'os.path.join', 'os.path.join', (['cache', 'dirs'], {}), '(cache, dirs)\n', (12268, 12281), False, 'import os\n'), ((13067, 13078), 'time.time', 'time.time', ([], {}), '()\n', (13076, 13078), False, 'import time\n'), ((14058, 14074), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (14066, 14074), True, 'import numpy as np\n'), ((14111, 14127), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (14119, 14127), True, 'import numpy as np\n'), ((14164, 14180), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (14172, 14180), True, 'import numpy as np\n'), ((14217, 14233), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (14225, 14233), True, 'import numpy as np\n'), ((15736, 15747), 'time.time', 'time.time', ([], {}), '()\n', (15745, 15747), False, 'import time\n'), ((5019, 5042), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5040, 5042), False, 'import cv2\n'), ((5714, 5743), 'os.path.join', 'os.path.join', (['self.dirs', 'path'], {}), '(self.dirs, path)\n', (5726, 5743), False, 'import os\n'), ((6477, 6516), 'os.path.join', 'os.path.join', (['self.src', 'self.dirs', 'path'], {}), '(self.src, self.dirs, path)\n', (6489, 6516), False, 'import os\n')]
|
from pathlib import Path
from .common import PathIsh, Visit, Source, last, Loc, Results, DbVisit, Context, Res
# add deprecation warning so eventually this may converted to a namespace package?
import warnings
warnings.warn("DEPRECATED! Please import directly from 'promnesia.common', e.g. 'from promnesia.common import Visit, Source, Results'", DeprecationWarning)
|
[
"warnings.warn"
] |
[((211, 376), 'warnings.warn', 'warnings.warn', (['"""DEPRECATED! Please import directly from \'promnesia.common\', e.g. \'from promnesia.common import Visit, Source, Results\'"""', 'DeprecationWarning'], {}), '(\n "DEPRECATED! Please import directly from \'promnesia.common\', e.g. \'from promnesia.common import Visit, Source, Results\'"\n , DeprecationWarning)\n', (224, 376), False, 'import warnings\n')]
|
# Inspired from this: https://www.data-blogger.com/2017/02/24/gathering-tweets-with-python/
import tweepy
import json
# Specify the account credentials in the following variables:
# TODO: Get them from an env varibale or secret file
consumer_key = 'INSERT CONSUMER KEY HERE'
consumer_secret = 'INSERT CONSUMER SECRET HERE'
access_token = 'INSERT ACCESS TOKEN HERE'
access_token_secret = 'INSERT ACCESS TOKEN SECRET HERE'
# This listener will print out all Tweets it receives
# TODO: Adapt this to write to a CSV or something else.
class PrintListener(tweepy.StreamListener):
def on_data(self, data):
# Decode the JSON data
tweet = json.loads(data)
# Print out the Tweet
print('@%s: %s' % (tweet['user']['screen_name'], tweet['text'].encode('ascii', 'ignore')))
def on_error(self, status):
print(status)
if __name__ == '__main__':
listener = PrintListener()
# Show system message
print('I will now print Tweets containing "Python"! ==>')
# Authenticate
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Connect the stream to our listener
stream = tweepy.Stream(auth, listener)
stream.filter(track=['Python'])
|
[
"tweepy.OAuthHandler",
"json.loads",
"tweepy.Stream"
] |
[((1039, 1089), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (1058, 1089), False, 'import tweepy\n'), ((1206, 1235), 'tweepy.Stream', 'tweepy.Stream', (['auth', 'listener'], {}), '(auth, listener)\n', (1219, 1235), False, 'import tweepy\n'), ((657, 673), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (667, 673), False, 'import json\n')]
|
import psycopg2
HOSTNAME = '192.168.1.204'
USERNAME = 'postgres'
PASSWORD = '<PASSWORD>'
DATABASE_NAME = 'data_lake'
PORT = 5432
postgres_connection_string = "postgresql://{DB_USER}:{DB_PASS}@{DB_ADDR}:{PORT}/{DB_NAME}".format(
DB_USER=USERNAME,
DB_PASS=PASSWORD,
DB_ADDR=HOSTNAME,
PORT=PORT,
DB_NAME=DATABASE_NAME)
def get_postgres_connection():
try:
connection = psycopg2.connect(
user=USERNAME,
password=PASSWORD,
host=HOSTNAME,
port=PORT,
database=DATABASE_NAME)
return connection
except (Exception, psycopg2.Error) as error:
message = f"get_postgres_connection {error}"
return abort(400, message)
|
[
"psycopg2.connect"
] |
[((401, 505), 'psycopg2.connect', 'psycopg2.connect', ([], {'user': 'USERNAME', 'password': 'PASSWORD', 'host': 'HOSTNAME', 'port': 'PORT', 'database': 'DATABASE_NAME'}), '(user=USERNAME, password=PASSWORD, host=HOSTNAME, port=PORT,\n database=DATABASE_NAME)\n', (417, 505), False, 'import psycopg2\n')]
|
"""add fortunki table
Revision ID: 567424e5046c
Revises: <PASSWORD>
Create Date: 2019-09-19 18:59:11.629057
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "567424e5046c"
down_revision = "f32a45256434"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"fortunki",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("text", sa.Text(), nullable=False),
sa.PrimaryKeyConstraint("id", name=op.f("pk_fortunki")),
sa.UniqueConstraint("text", name=op.f("uq_fortunki_text")),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("fortunki")
# ### end Alembic commands ###
|
[
"alembic.op.drop_table",
"sqlalchemy.Text",
"alembic.op.f",
"sqlalchemy.Integer"
] |
[((795, 820), 'alembic.op.drop_table', 'op.drop_table', (['"""fortunki"""'], {}), "('fortunki')\n", (808, 820), False, 'from alembic import op\n'), ((447, 459), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (457, 459), True, 'import sqlalchemy as sa\n'), ((504, 513), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (511, 513), True, 'import sqlalchemy as sa\n'), ((575, 594), 'alembic.op.f', 'op.f', (['"""pk_fortunki"""'], {}), "('pk_fortunki')\n", (579, 594), False, 'from alembic import op\n'), ((638, 662), 'alembic.op.f', 'op.f', (['"""uq_fortunki_text"""'], {}), "('uq_fortunki_text')\n", (642, 662), False, 'from alembic import op\n')]
|
import os
import re
from glob import glob
import pandas as pd
# enloc
#FILE_SUFFIX = '*.enloc.rst'
#FILE_PATTERN = '(?P<pheno>.+)__PM__(?P<tissue>.+)\.enloc\.rst'
# fastenloc
ALL_TISSUES = pd.read_csv('/mnt/phenomexcan/fastenloc/fastenloc_gtex_tissues.txt', header=None, squeeze=True).tolist()
FILE_PREFIX = 'fastenloc-'
FILE_SUFFIX = '*.sig.out'
all_tissues_regex = '|'.join([re.escape(t) for t in ALL_TISSUES])
FILE_PATTERN = f'fastenloc-(?P<pheno>.+)-(?P<tissue>{all_tissues_regex})\.enloc\.sig\.out'
assert len(ALL_TISSUES) == 49
all_files = glob(FILE_SUFFIX)
print(len(all_files))
file_pattern = re.compile(FILE_PATTERN)
all_phenos = [re.search(file_pattern, f).group('pheno') for f in all_files]
assert len(all_files) == len(all_phenos)
assert not any([x is None for x in all_phenos])
all_phenos = list(set(all_phenos))
print(len(all_phenos))
assert len(all_phenos) * len(ALL_TISSUES) == len(all_files)
for pheno in all_phenos:
os.makedirs(pheno, exist_ok=True)
s = os.system(f'mv {FILE_PREFIX}{pheno}-* {pheno}/')
assert s == 0
|
[
"os.makedirs",
"pandas.read_csv",
"os.system",
"re.escape",
"glob.glob",
"re.search",
"re.compile"
] |
[((551, 568), 'glob.glob', 'glob', (['FILE_SUFFIX'], {}), '(FILE_SUFFIX)\n', (555, 568), False, 'from glob import glob\n'), ((607, 631), 're.compile', 're.compile', (['FILE_PATTERN'], {}), '(FILE_PATTERN)\n', (617, 631), False, 'import re\n'), ((948, 981), 'os.makedirs', 'os.makedirs', (['pheno'], {'exist_ok': '(True)'}), '(pheno, exist_ok=True)\n', (959, 981), False, 'import os\n'), ((990, 1038), 'os.system', 'os.system', (['f"""mv {FILE_PREFIX}{pheno}-* {pheno}/"""'], {}), "(f'mv {FILE_PREFIX}{pheno}-* {pheno}/')\n", (999, 1038), False, 'import os\n'), ((192, 292), 'pandas.read_csv', 'pd.read_csv', (['"""/mnt/phenomexcan/fastenloc/fastenloc_gtex_tissues.txt"""'], {'header': 'None', 'squeeze': '(True)'}), "('/mnt/phenomexcan/fastenloc/fastenloc_gtex_tissues.txt', header\n =None, squeeze=True)\n", (203, 292), True, 'import pandas as pd\n'), ((380, 392), 're.escape', 're.escape', (['t'], {}), '(t)\n', (389, 392), False, 'import re\n'), ((646, 672), 're.search', 're.search', (['file_pattern', 'f'], {}), '(file_pattern, f)\n', (655, 672), False, 'import re\n')]
|
from __future__ import absolute_import, division, print_function
import hashlib
import json
import logging
import subprocess
import tempfile
import time
import requests
from requests.utils import urlparse
__all__ = ['Kubernetes', "get_endpoint"]
logger = logging.getLogger(__name__)
resource_endpoints = {
"daemonsets":
"apis/extensions/v1beta1/namespaces/{namespace}/daemonsets",
"deployments":
"apis/extensions/v1beta1/namespaces/{namespace}/deployments",
"horizontalpodautoscalers":
"apis/extensions/v1beta1/namespaces/{namespace}/horizontalpodautoscalers",
"ingresses":
"apis/extensions/v1beta1/namespaces/{namespace}/ingresses",
"jobs":
"apis/extensions/v1beta1/namespaces/{namespace}/jobs",
"namespaces":
"api/v1/namespaces",
"replicasets":
"apis/extensions/v1beta1/namespaces/{namespace}/replicasets",
"persistentvolumes":
"api/v1/namespaces/{namespace}/persistentvolumes",
"persistentvolumeclaims":
"api/v1/namespaces/{namespace}/persistentvolumeclaims",
"services":
"api/v1/namespaces/{namespace}/services",
"serviceaccounts":
"api/v1/namespaces/{namespace}/serviceaccounts",
"secrets":
"api/v1/namespaces/{namespace}/secrets",
"configmaps":
"api/v1/namespaces/{namespace}/configmaps",
"replicationcontrollers":
"api/v1/namespaces/{namespace}/replicationcontrollers",
"pods":
"api/v1/namespaces/{namespace}/pods",
"statefulset":
"apis/apps/v1beta1/namespaces/{namespace}/statefulsets",
"storageclass":
"apis/storage.k8s.io/v1beta1/statefulsets", }
resources_alias = {
"ds": "daemonsets",
"hpa": "horizontalpodautoscalers",
"ing": "ingresses",
"ingress": "ingresses",
"ns": "namespaces",
"sc": "storageclasses",
"sfs": "statefulsets",
"po": "pods",
"pv": "persistentvolumes",
"pvc": "persistentvolumeclaims",
"rc": "replicationcontrollers",
"svc": "services"}
ANNOTATIONS = {
'protected': 'resource.appr/protected',
'hash': 'resource.appr/hash',
'version': 'package.appr/version',
'parent': 'package.appr/parent',
'rand': 'resource.appr/rand',
'update-mode': 'resource.appr/update-mode',
'package': 'package.appr/package'}
def get_endpoint(kind):
name = None
if kind in resource_endpoints:
name = kind
elif kind in resources_alias:
name = resources_alias[kind]
elif kind + "s" in resource_endpoints:
name = kind + "s"
else:
return 'unknown'
return resource_endpoints[name]
class Kubernetes(object):
def __init__(self, namespace=None, endpoint=None, body=None, proxy=None):
self.proxy = None
if endpoint is not None and endpoint[0] == "/":
endpoint = endpoint[1:-1]
self.endpoint = endpoint
self.body = body
self.obj = None
self.protected = False
self._resource_load()
self.kind = self.obj['kind'].lower()
self.name = self.obj['metadata']['name']
self.force_rotate = ANNOTATIONS['rand'] in self.obj['metadata'].get('annotations', {})
self.namespace = self._namespace(namespace)
self.result = None
if proxy:
self.proxy = urlparse(proxy)
def _resource_load(self):
self.obj = json.loads(self.body)
if 'annotations' in self.obj['metadata']:
if (ANNOTATIONS['protected'] in self.obj['metadata']['annotations'] and
self.obj['metadata']['annotations'][ANNOTATIONS['protected']] == 'true'):
self.protected = True
def _gethash(self, src):
# Copy rand value
if (src is not None and ANNOTATIONS['rand'] in src['metadata'].get('annotations', {}) and
ANNOTATIONS['rand'] not in self.obj['metadata']['annotations']):
self.obj['metadata']['annotations'][ANNOTATIONS['rand']] = src['metadata'][
'annotations'][ANNOTATIONS['rand']]
# TODO(ant31) it should hash before the custom annotations
if ANNOTATIONS['hash'] in self.obj['metadata'].get('annotations', {}):
if self.obj['metadata']['annotations'][ANNOTATIONS['hash']] is None:
sha = hashlib.sha256(json.dumps(self.obj, sort_keys=True)).hexdigest()
self.obj['metadata']['annotations'][ANNOTATIONS['hash']] = sha
return self.obj['metadata']['annotations'][ANNOTATIONS['hash']]
else:
return None
def _namespace(self, namespace=None):
if namespace:
return namespace
elif 'namespace' in self.obj['metadata']:
return self.obj['metadata']['namespace']
else:
return 'default'
def create(self, force=False, dry=False, strategy='update'):
"""
- Check if resource name exists
- if it exists check if the apprhash is the same
- if not the same delete the resource and recreate it
- if force == true, delete the resource and recreate it
- if doesnt exists create it
"""
force = force or self.force_rotate
r = self.get()
if r is not None:
rhash = r['metadata'].get('annotations', {}).get(ANNOTATIONS['hash'], None)
objhash = self._gethash(r)
f = tempfile.NamedTemporaryFile()
method = "apply"
if self.proxy:
method = "create"
strategy = "replace"
cmd = [method, '-f', f.name]
f.write(json.dumps(self.obj))
f.flush()
if r is None:
self._call(cmd, dry=dry)
return 'created'
elif (objhash is None or rhash == objhash) and force is False:
return 'ok'
elif rhash != objhash or force is True:
if self.protected:
return 'protected'
if strategy == 'replace':
self.delete(dry=dry)
action = "replaced"
elif strategy == "update":
action = "updated"
else:
raise ValueError("Unknown action %s" % action)
self._call(cmd, dry=dry)
return action
def get(self):
cmd = ['get', self.kind, self.name, '-o', 'json']
try:
self.result = json.loads(self._call(cmd))
return self.result
except RuntimeError:
return None
except (requests.exceptions.HTTPError) as e:
if e.response.status_code == 404:
return None
else:
raise e
def delete(self, dry=False, **kwargs):
cmd = ['delete', self.kind, self.name]
if self.protected:
return 'protected'
r = self.get()
if r is not None:
self._call(cmd, dry=dry)
return 'deleted'
else:
return 'absent'
def wait(self, retries=3, seconds=1):
r = 1
time.sleep(seconds)
obj = self.get()
while (r < retries and obj is None):
r += 1
time.sleep(seconds)
obj = self.get()
return obj
def exists(self):
r = self.get()
if r is None:
return False
else:
return True
def _call(self, cmd, dry=False):
command = ['kubectl'] + cmd + ["--namespace", self.namespace]
if not dry:
if self.proxy is not None:
return self._request(cmd[0])
else:
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise RuntimeError("Kubernetes failed to create %s (%s): "
"%s" % (self.name, self.kind, e.output))
else:
return True
def _request(self, method):
if method == 'create':
headers = {'Content-Type': 'application/json'}
method = 'post'
url = "%s/%s" % (self.proxy.geturl(), self.endpoint)
return requests.post(url, data=self.body, headers=headers)
else:
url = "%s/%s/%s" % (self.proxy.geturl(), self.endpoint, self.name)
query = getattr(requests, method)
r = query(url)
r.raise_for_status()
return r.content
|
[
"requests.utils.urlparse",
"tempfile.NamedTemporaryFile",
"json.loads",
"subprocess.check_output",
"json.dumps",
"time.sleep",
"requests.post",
"logging.getLogger"
] |
[((258, 285), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (275, 285), False, 'import logging\n'), ((3372, 3393), 'json.loads', 'json.loads', (['self.body'], {}), '(self.body)\n', (3382, 3393), False, 'import json\n'), ((5370, 5399), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (5397, 5399), False, 'import tempfile\n'), ((7001, 7020), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (7011, 7020), False, 'import time\n'), ((3306, 3321), 'requests.utils.urlparse', 'urlparse', (['proxy'], {}), '(proxy)\n', (3314, 3321), False, 'from requests.utils import urlparse\n'), ((5566, 5586), 'json.dumps', 'json.dumps', (['self.obj'], {}), '(self.obj)\n', (5576, 5586), False, 'import json\n'), ((7122, 7141), 'time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (7132, 7141), False, 'import time\n'), ((8149, 8200), 'requests.post', 'requests.post', (['url'], {'data': 'self.body', 'headers': 'headers'}), '(url, data=self.body, headers=headers)\n', (8162, 8200), False, 'import requests\n'), ((7599, 7657), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'stderr': 'subprocess.STDOUT'}), '(command, stderr=subprocess.STDOUT)\n', (7622, 7657), False, 'import subprocess\n'), ((4300, 4336), 'json.dumps', 'json.dumps', (['self.obj'], {'sort_keys': '(True)'}), '(self.obj, sort_keys=True)\n', (4310, 4336), False, 'import json\n')]
|
from string import Template
const_base = "data/const/degree_order_{}_{}.pkl"
#LRF settings
N = 500
minc = 100
maxc = 100
mu = 0.3
k = 5
#k = 10
#maxk = 20
maxk = 50
t1 = 2
t2 = 1
name_tmp = Template("LRF_${N}_${k}_${maxk}_${minc}_${maxc}_${mu}")
lrf_data_label = name_tmp.substitute(N=N, k=k, maxk=maxk, minc=minc, maxc=maxc,
mu=mu)
data_label = lrf_data_label
#data_label = "gn_1000_4"
#data_label = "polblogs"
exp_name = "{}_degree_order_asc".format(data_label)
print("*********************", exp_name)
#General settings
#densities =[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3]
densities = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1]
trials = 5
K = 5
lr_adam = 0.01
lr_sgd = 0.01
threshold = 10e-9
threads = 8
used_models = ["abs_adam","update_rule"]
#used_models = ["abs_adam", "update_rule"]
#used_models = ["abs_adam"]
max_iters = 1000
mlambda = 2
|
[
"string.Template"
] |
[((193, 248), 'string.Template', 'Template', (['"""LRF_${N}_${k}_${maxk}_${minc}_${maxc}_${mu}"""'], {}), "('LRF_${N}_${k}_${maxk}_${minc}_${maxc}_${mu}')\n", (201, 248), False, 'from string import Template\n')]
|
"""Implement merge sort algorithm."""
from random import randint, shuffle
from timeit import timeit
def merge_sort(nums):
"""Merge list by merge sort."""
half = int(len(nums) // 2)
if len(nums) == 1:
return nums
if len(nums) == 2:
if nums[0] > nums[1]:
nums[0], nums[1] = nums[1], nums[0]
return nums
left = merge_sort(nums[:half])
right = merge_sort(nums[half:])
output = []
left_ct = 0
right_ct = 0
while left_ct < len(left) and right_ct < len(right):
if left[left_ct] < right[right_ct]:
output.append(left[left_ct])
left_ct += 1
else:
output.append(right[right_ct])
right_ct += 1
if left_ct == len(left):
output += right[right_ct:]
elif right_ct == len(right):
output += left[left_ct:]
return output
def timings(): # pragma: no cover
"""Generate timings report for insertion sort."""
import_sort = 'from merge_sort import merge_sort'
print("""
Timings for best, average and worst case scenarios for the merge sort.
--------------------------------------------------------------------------
""")
print("3 Best Case Scenarios - sorted except for one value")
for i in range(3):
lst_len = randint(9, 50)
rand_lst = [i for i in range(lst_len)]
rand_lst[6], rand_lst[-1] = rand_lst[-1], rand_lst[6]
best_time = timeit('merge_sort({})'.format(rand_lst), import_sort)
print('List {}: length={}; time = {}'.format(i + 1, lst_len, best_time))
print("\n3 Average Case Scenarios - Moderately sorted")
for i in range(3):
lst_len = randint(9, 50)
rand_lst = [i for i in range(lst_len)]
shuffle(rand_lst)
best_time = timeit('merge_sort({})'.format(rand_lst), import_sort)
print('List {}: length={}; time = {}'.format(i + 1, lst_len, best_time))
print("\n3 Worst Case Scenarios - Completely unsorted")
for i in range(3):
lst_len = randint(9, 50)
rand_lst = [i for i in range(lst_len)]
rand_lst = rand_lst[::-1]
best_time = timeit('merge_sort({})'.format(rand_lst), import_sort)
print('List {}: length={}; time = {}'.format(i + 1, lst_len, best_time))
if __name__ == '__main__': # pragma: no cover
timings()
|
[
"random.shuffle",
"random.randint"
] |
[((1308, 1322), 'random.randint', 'randint', (['(9)', '(50)'], {}), '(9, 50)\n', (1315, 1322), False, 'from random import randint, shuffle\n'), ((1691, 1705), 'random.randint', 'randint', (['(9)', '(50)'], {}), '(9, 50)\n', (1698, 1705), False, 'from random import randint, shuffle\n'), ((1761, 1778), 'random.shuffle', 'shuffle', (['rand_lst'], {}), '(rand_lst)\n', (1768, 1778), False, 'from random import randint, shuffle\n'), ((2038, 2052), 'random.randint', 'randint', (['(9)', '(50)'], {}), '(9, 50)\n', (2045, 2052), False, 'from random import randint, shuffle\n')]
|
#!/usr/bin/python3
# Copyright 2017-2018 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import glob
import numpy as np
import pandas as pd
import six
import pytablewriter
from multiprocessing import Pool
def get_lossless_average(path, reference_format):
merged_data = {}
columns = [
"format", "avg_bpp", "avg_compression_ratio", "avg_space_saving",
"wavg_encode_time", "wavg_decode_time"
]
final_data = pd.DataFrame(columns=columns)
final_data.set_index("format", drop=False, inplace=True)
for format in next(os.walk(path))[1]:
if not glob.glob(path + "/" + format + "/lossless/*.out"):
print("Lossless results files could not be found for format {}.".
format(format))
continue
rawdata = []
data_path = path + "/" + format + "/lossless/"
for f in glob.glob(data_path + "/*.out"):
rawdata.append(pd.read_csv(f, sep=":"))
merged_data[format] = pd.concat(rawdata)
sum_orig_file_size = np.sum(merged_data[format]["orig_file_size"])
sum_compressed_file_size = np.sum(
merged_data[format]["compressed_file_size"])
sum_pixels = np.sum(merged_data[format]["pixels"])
avg_bpp = sum_compressed_file_size * 8 / sum_pixels
avg_compression_ratio = sum_orig_file_size / sum_compressed_file_size
avg_space_saving = 1 - (1 / avg_compression_ratio)
wavg_encode_time = np.average(
merged_data[format]["encode_time"],
weights=merged_data[format]["pixels"])
wavg_decode_time = np.average(
merged_data[format]["decode_time"],
weights=merged_data[format]["pixels"])
final_data.loc[format] = [
format, avg_bpp, avg_compression_ratio, avg_space_saving,
wavg_encode_time, wavg_decode_time
]
final_data = final_data.assign(weissman_score=lambda x: x.avg_compression_ratio / x.loc[reference_format, "avg_compression_ratio"] * np.log(x.loc[reference_format, "wavg_encode_time"] * 1000) / np.log(x.wavg_encode_time * 1000))
final_data.sort_values("weissman_score", ascending=False, inplace=True)
results_file = path + "/" + os.path.basename(path) + ".lossless.out"
final_data.to_csv(results_file, sep=":")
file = open(path + "/" + os.path.basename(path) + ".lossless.md", "w")
markdown_writer = pytablewriter.MarkdownTableWriter()
markdown_writer.from_dataframe(final_data)
markdown_writer.stream = six.StringIO()
markdown_writer.write_table()
file.write(markdown_writer.stream.getvalue())
file.close()
print(
"Lossless results file successfully saved to {}.".format(results_file))
def get_lossy_average(args):
[path, format, reference_format] = args
if not glob.glob(path + "/" + format + "/lossy/*.out"):
print("Lossy results files could not be found for format {}.".format(
format))
return
rawdata = []
merged_data = []
columns = [
"file_name", "quality", "orig_file_size", "compressed_file_size",
"pixels", "bpp", "compression_ratio", "encode_time", "decode_time",
"y_ssim_score", "rgb_ssim_score", "msssim_score", "psnrhvsm_score",
"vmaf_score"
]
final_columns = [
"quality", "avg_bpp", "avg_compression_ratio", "avg_space_saving",
"wavg_encode_time", "wavg_decode_time", "wavg_y_ssim_score",
"wavg_rgb_ssim_score", "wavg_msssim_score", "wavg_psnrhvsm_score",
"wavg_vmaf_score"
]
final_data = pd.DataFrame(columns=final_columns)
data_path = path + "/" + format + "/lossy/"
for f in glob.glob(data_path + "*.out"):
rawdata.append(pd.read_csv(f, sep=":"))
quality_length = len(rawdata[0].index)
for i in range(quality_length):
merged_data.insert(i, pd.DataFrame(columns=columns))
for data in rawdata:
merged_data[i] = merged_data[i].append(data.iloc[[i]])
merged_data[i].sort_values("file_name", ascending=True, inplace=True)
quality = np.mean(merged_data[i]["quality"])
sum_orig_file_size = np.sum(merged_data[i]["orig_file_size"])
sum_compressed_file_size = np.sum(
merged_data[i]["compressed_file_size"])
sum_pixels = np.sum(merged_data[i]["pixels"])
avg_bpp = sum_compressed_file_size * 8 / sum_pixels
avg_compression_ratio = sum_orig_file_size / sum_compressed_file_size
avg_space_saving = 1 - (1 / avg_compression_ratio)
wavg_encode_time = np.average(
merged_data[i]["encode_time"], weights=merged_data[i]["pixels"])
wavg_decode_time = np.average(
merged_data[i]["decode_time"], weights=merged_data[i]["pixels"])
wavg_y_ssim_score = np.average(
merged_data[i]["y_ssim_score"], weights=merged_data[i]["pixels"])
wavg_rgb_ssim_score = np.average(
merged_data[i]["rgb_ssim_score"], weights=merged_data[i]["pixels"])
wavg_msssim_score = np.average(
merged_data[i]["msssim_score"], weights=merged_data[i]["pixels"])
wavg_psnrhvsm_score = np.average(
merged_data[i]["psnrhvsm_score"], weights=merged_data[i]["pixels"])
wavg_vmaf_score = np.average(
merged_data[i]["vmaf_score"], weights=merged_data[i]["pixels"])
final_data.loc[i] = [
quality, avg_bpp, avg_compression_ratio, avg_space_saving,
wavg_encode_time, wavg_decode_time, wavg_y_ssim_score,
wavg_rgb_ssim_score, wavg_msssim_score, wavg_psnrhvsm_score,
wavg_vmaf_score
]
results_file = path + "/" + os.path.basename(
path) + "." + format + ".lossy.out"
final_data.to_csv(results_file, sep=":", index=False)
print("Lossy results file for format {} successfully saved to {}.".format(
format, results_file))
def main(argv):
if sys.version_info[0] < 3 and sys.version_info[1] < 5:
raise Exception("Python 3.5 or a more recent version is required.")
if len(argv) < 2 or len(argv) > 3:
print(
"rd_average.py: Calculate a per format weighted averages of the results files generated by rd_collect.py"
)
print(
"Arg 1: Path to the results of a subset generated by rd_collect.py")
print(" For ex: rd_average.py \"results/subset1\"")
print("Arg 2: Reference format with which to compare other formats.")
print(" Default to mozjpeg")
return
results_folder = os.path.normpath(argv[1])
available_formats = next(os.walk(results_folder))[1]
# Check is there is actually results files in the path provided
if (not os.path.isdir(results_folder) or not available_formats
or not glob.glob(results_folder + "/**/*.out", recursive=True)):
print(
"Could not find all results file. Please make sure the path provided is correct."
)
return
try:
reference_format = argv[2]
except IndexError:
reference_format = "mozjpeg"
if (reference_format not in available_formats or not glob.glob(
results_folder + "/" + reference_format + "/lossless/*.out")
or not glob.glob(results_folder + "/" + reference_format +
"/lossy/*.out")):
print(
"Could not find reference format results files. Please choose a format among {} or check if the reference format results files are present.".
format(available_formats))
return
get_lossless_average(results_folder, reference_format)
Pool().map(get_lossy_average,
[(results_folder, format, reference_format)
for format in next(os.walk(results_folder))[1]])
if __name__ == "__main__":
main(sys.argv)
|
[
"pandas.DataFrame",
"numpy.sum",
"numpy.average",
"numpy.log",
"os.path.basename",
"pandas.read_csv",
"os.path.isdir",
"os.walk",
"six.StringIO",
"multiprocessing.Pool",
"numpy.mean",
"os.path.normpath",
"pytablewriter.MarkdownTableWriter",
"glob.glob",
"pandas.concat"
] |
[((1914, 1943), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (1926, 1943), True, 'import pandas as pd\n'), ((3874, 3909), 'pytablewriter.MarkdownTableWriter', 'pytablewriter.MarkdownTableWriter', ([], {}), '()\n', (3907, 3909), False, 'import pytablewriter\n'), ((3986, 4000), 'six.StringIO', 'six.StringIO', ([], {}), '()\n', (3998, 4000), False, 'import six\n'), ((5042, 5077), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'final_columns'}), '(columns=final_columns)\n', (5054, 5077), True, 'import pandas as pd\n'), ((5141, 5171), 'glob.glob', 'glob.glob', (["(data_path + '*.out')"], {}), "(data_path + '*.out')\n", (5150, 5171), False, 'import glob\n'), ((8034, 8059), 'os.path.normpath', 'os.path.normpath', (['argv[1]'], {}), '(argv[1])\n', (8050, 8059), False, 'import os\n'), ((2343, 2374), 'glob.glob', 'glob.glob', (["(data_path + '/*.out')"], {}), "(data_path + '/*.out')\n", (2352, 2374), False, 'import glob\n'), ((2459, 2477), 'pandas.concat', 'pd.concat', (['rawdata'], {}), '(rawdata)\n', (2468, 2477), True, 'import pandas as pd\n'), ((2507, 2552), 'numpy.sum', 'np.sum', (["merged_data[format]['orig_file_size']"], {}), "(merged_data[format]['orig_file_size'])\n", (2513, 2552), True, 'import numpy as np\n'), ((2588, 2639), 'numpy.sum', 'np.sum', (["merged_data[format]['compressed_file_size']"], {}), "(merged_data[format]['compressed_file_size'])\n", (2594, 2639), True, 'import numpy as np\n'), ((2674, 2711), 'numpy.sum', 'np.sum', (["merged_data[format]['pixels']"], {}), "(merged_data[format]['pixels'])\n", (2680, 2711), True, 'import numpy as np\n'), ((2936, 3026), 'numpy.average', 'np.average', (["merged_data[format]['encode_time']"], {'weights': "merged_data[format]['pixels']"}), "(merged_data[format]['encode_time'], weights=merged_data[format][\n 'pixels'])\n", (2946, 3026), True, 'import numpy as np\n'), ((3074, 3164), 'numpy.average', 'np.average', (["merged_data[format]['decode_time']"], {'weights': "merged_data[format]['pixels']"}), "(merged_data[format]['decode_time'], weights=merged_data[format][\n 'pixels'])\n", (3084, 3164), True, 'import numpy as np\n'), ((4281, 4328), 'glob.glob', 'glob.glob', (["(path + '/' + format + '/lossy/*.out')"], {}), "(path + '/' + format + '/lossy/*.out')\n", (4290, 4328), False, 'import glob\n'), ((5558, 5592), 'numpy.mean', 'np.mean', (["merged_data[i]['quality']"], {}), "(merged_data[i]['quality'])\n", (5565, 5592), True, 'import numpy as np\n'), ((5622, 5662), 'numpy.sum', 'np.sum', (["merged_data[i]['orig_file_size']"], {}), "(merged_data[i]['orig_file_size'])\n", (5628, 5662), True, 'import numpy as np\n'), ((5698, 5744), 'numpy.sum', 'np.sum', (["merged_data[i]['compressed_file_size']"], {}), "(merged_data[i]['compressed_file_size'])\n", (5704, 5744), True, 'import numpy as np\n'), ((5779, 5811), 'numpy.sum', 'np.sum', (["merged_data[i]['pixels']"], {}), "(merged_data[i]['pixels'])\n", (5785, 5811), True, 'import numpy as np\n'), ((6036, 6111), 'numpy.average', 'np.average', (["merged_data[i]['encode_time']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['encode_time'], weights=merged_data[i]['pixels'])\n", (6046, 6111), True, 'import numpy as np\n'), ((6152, 6227), 'numpy.average', 'np.average', (["merged_data[i]['decode_time']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['decode_time'], weights=merged_data[i]['pixels'])\n", (6162, 6227), True, 'import numpy as np\n'), ((6269, 6345), 'numpy.average', 'np.average', (["merged_data[i]['y_ssim_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['y_ssim_score'], weights=merged_data[i]['pixels'])\n", (6279, 6345), True, 'import numpy as np\n'), ((6389, 6467), 'numpy.average', 'np.average', (["merged_data[i]['rgb_ssim_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['rgb_ssim_score'], weights=merged_data[i]['pixels'])\n", (6399, 6467), True, 'import numpy as np\n'), ((6509, 6585), 'numpy.average', 'np.average', (["merged_data[i]['msssim_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['msssim_score'], weights=merged_data[i]['pixels'])\n", (6519, 6585), True, 'import numpy as np\n'), ((6629, 6707), 'numpy.average', 'np.average', (["merged_data[i]['psnrhvsm_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['psnrhvsm_score'], weights=merged_data[i]['pixels'])\n", (6639, 6707), True, 'import numpy as np\n'), ((6747, 6821), 'numpy.average', 'np.average', (["merged_data[i]['vmaf_score']"], {'weights': "merged_data[i]['pixels']"}), "(merged_data[i]['vmaf_score'], weights=merged_data[i]['pixels'])\n", (6757, 6821), True, 'import numpy as np\n'), ((2029, 2042), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2036, 2042), False, 'import os\n'), ((2063, 2113), 'glob.glob', 'glob.glob', (["(path + '/' + format + '/lossless/*.out')"], {}), "(path + '/' + format + '/lossless/*.out')\n", (2072, 2113), False, 'import glob\n'), ((3689, 3711), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3705, 3711), False, 'import os\n'), ((5196, 5219), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""":"""'}), "(f, sep=':')\n", (5207, 5219), True, 'import pandas as pd\n'), ((5332, 5361), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (5344, 5361), True, 'import pandas as pd\n'), ((8089, 8112), 'os.walk', 'os.walk', (['results_folder'], {}), '(results_folder)\n', (8096, 8112), False, 'import os\n'), ((8198, 8227), 'os.path.isdir', 'os.path.isdir', (['results_folder'], {}), '(results_folder)\n', (8211, 8227), False, 'import os\n'), ((8272, 8327), 'glob.glob', 'glob.glob', (["(results_folder + '/**/*.out')"], {'recursive': '(True)'}), "(results_folder + '/**/*.out', recursive=True)\n", (8281, 8327), False, 'import glob\n'), ((8627, 8697), 'glob.glob', 'glob.glob', (["(results_folder + '/' + reference_format + '/lossless/*.out')"], {}), "(results_folder + '/' + reference_format + '/lossless/*.out')\n", (8636, 8697), False, 'import glob\n'), ((8730, 8797), 'glob.glob', 'glob.glob', (["(results_folder + '/' + reference_format + '/lossy/*.out')"], {}), "(results_folder + '/' + reference_format + '/lossy/*.out')\n", (8739, 8797), False, 'import glob\n'), ((9117, 9123), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (9121, 9123), False, 'from multiprocessing import Pool\n'), ((2403, 2426), 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '""":"""'}), "(f, sep=':')\n", (2414, 2426), True, 'import pandas as pd\n'), ((3806, 3828), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (3822, 3828), False, 'import os\n'), ((3546, 3579), 'numpy.log', 'np.log', (['(x.wavg_encode_time * 1000)'], {}), '(x.wavg_encode_time * 1000)\n', (3552, 3579), True, 'import numpy as np\n'), ((7147, 7169), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (7163, 7169), False, 'import os\n'), ((3485, 3543), 'numpy.log', 'np.log', (["(x.loc[reference_format, 'wavg_encode_time'] * 1000)"], {}), "(x.loc[reference_format, 'wavg_encode_time'] * 1000)\n", (3491, 3543), True, 'import numpy as np\n'), ((9241, 9264), 'os.walk', 'os.walk', (['results_folder'], {}), '(results_folder)\n', (9248, 9264), False, 'import os\n')]
|
import os
import shutil
def copy_file_path(source_path, target_path):
"""复制源文件目录下的所有目录到另一个文件目录下"""
for e, _, _ in os.walk(source_path):
path_name = os.path.splitdrive(e)[1]
file_path = os.path.join(target_path, path_name[len(source_path)-1:])
if not os.path.exists(file_path):
os.makedirs(file_path)
def copy_files(source_path, target_path):
"""复制一个文件夹所有文件到另一个文件夹"""
for source_file_Path, d, filelist in os.walk(source_path):
drivename, pathname = os.path.splitdrive(source_file_Path)
file_path = os.path.join(target_path, pathname)
if not os.path.exists(file_path):
os.makedirs(file_path)
for filename in filelist:
file = os.path.join(source_file_Path, filename)
shutil.copy(file, file_path)
def get_files(path, file_name, is_lower=False):
"""根据文件名与文件目录,获取目录下所有文件列表"""
files = []
for eachfilePath, d, file_names in os.walk(path):
for name in file_names:
if is_lower:
if name == file_name:
tempfile = os.path.join(eachfilePath, name)
files.append(tempfile)
else:
if name.lower() == file_name.lower():
tempfile = os.path.join(eachfilePath, name)
files.append(tempfile)
return files
def get_ext_files(path, ext_name):
# .exe的后缀为extension_name,后缀名中不允许有"."
filelists = []
for eachfilePath, d, file_names in os.walk(path):
for name in file_names:
if name.split(".")[-1].lower() == ext_name.lower():
tempfile = os.path.join(eachfilePath, name)
filelists.append(tempfile)
return filelists
if __name__ == '__main__':
pass
|
[
"os.path.splitdrive",
"os.makedirs",
"os.walk",
"os.path.exists",
"os.path.join",
"shutil.copy"
] |
[((124, 144), 'os.walk', 'os.walk', (['source_path'], {}), '(source_path)\n', (131, 144), False, 'import os\n'), ((460, 480), 'os.walk', 'os.walk', (['source_path'], {}), '(source_path)\n', (467, 480), False, 'import os\n'), ((954, 967), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (961, 967), False, 'import os\n'), ((1504, 1517), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1511, 1517), False, 'import os\n'), ((512, 548), 'os.path.splitdrive', 'os.path.splitdrive', (['source_file_Path'], {}), '(source_file_Path)\n', (530, 548), False, 'import os\n'), ((569, 604), 'os.path.join', 'os.path.join', (['target_path', 'pathname'], {}), '(target_path, pathname)\n', (581, 604), False, 'import os\n'), ((166, 187), 'os.path.splitdrive', 'os.path.splitdrive', (['e'], {}), '(e)\n', (184, 187), False, 'import os\n'), ((284, 309), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (298, 309), False, 'import os\n'), ((323, 345), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (334, 345), False, 'import os\n'), ((620, 645), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (634, 645), False, 'import os\n'), ((659, 681), 'os.makedirs', 'os.makedirs', (['file_path'], {}), '(file_path)\n', (670, 681), False, 'import os\n'), ((735, 775), 'os.path.join', 'os.path.join', (['source_file_Path', 'filename'], {}), '(source_file_Path, filename)\n', (747, 775), False, 'import os\n'), ((788, 816), 'shutil.copy', 'shutil.copy', (['file', 'file_path'], {}), '(file, file_path)\n', (799, 816), False, 'import shutil\n'), ((1642, 1674), 'os.path.join', 'os.path.join', (['eachfilePath', 'name'], {}), '(eachfilePath, name)\n', (1654, 1674), False, 'import os\n'), ((1095, 1127), 'os.path.join', 'os.path.join', (['eachfilePath', 'name'], {}), '(eachfilePath, name)\n', (1107, 1127), False, 'import os\n'), ((1274, 1306), 'os.path.join', 'os.path.join', (['eachfilePath', 'name'], {}), '(eachfilePath, name)\n', (1286, 1306), False, 'import os\n')]
|
from __future__ import annotations
from abc import abstractmethod, abstractproperty
from typing import Any, Generic, TYPE_CHECKING, TypeVar, Union
import numpy as np
if TYPE_CHECKING:
from tanuki.data_store.column_alias import ColumnAlias
from tanuki.data_store.index.pandas_index import PandasIndex
C = TypeVar("C", bound=tuple["ColumnAlias", ...])
class Index(Generic[C]):
@abstractproperty
def name(self: Index[C]) -> Union[str, list[str]]:
raise NotImplementedError()
@abstractproperty
def columns(self: Index[C]) -> list[str]:
raise NotImplementedError()
@abstractmethod
def to_pandas(self) -> PandasIndex[C]:
raise NotImplementedError()
@abstractmethod
def __getitem__(self, item) -> Index[C]:
raise NotImplementedError()
@abstractproperty
def values(self: Index[C]) -> np.ndarray:
raise NotImplementedError()
@abstractmethod
def tolist(self: Index[C]) -> list:
raise NotImplementedError()
@abstractmethod
def equals(self, other: Any) -> bool:
raise NotImplementedError()
@abstractmethod
def __eq__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __ne__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __gt__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __ge__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __lt__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __le__(self, other: Any) -> Index[C]:
raise NotImplementedError()
@abstractmethod
def __len__(self) -> int:
raise NotImplementedError()
@abstractmethod
def __str__(self: Index[C]) -> str:
raise NotImplementedError()
@abstractmethod
def __repr__(self: Index[C]) -> str:
raise NotImplementedError()
|
[
"typing.TypeVar"
] |
[((318, 363), 'typing.TypeVar', 'TypeVar', (['"""C"""'], {'bound': "tuple['ColumnAlias', ...]"}), "('C', bound=tuple['ColumnAlias', ...])\n", (325, 363), False, 'from typing import Any, Generic, TYPE_CHECKING, TypeVar, Union\n')]
|
#! /usr/bin/env python3
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s(%(relativeCreated)6d)[%(threadName)s]%(message)s')
# example of an airborne survey where some of the flight lines get too close to each other
# when gridded, the output contains "tares" that detract
# use this tool to clean up parts of a survey that are "over constrained" and show errors.
# changes X/Y alias
from intrepid import mastertask_pb2 as master
from intrepid.intrepid_tasks_pb2 import DB_Operations
from intrepid.utils import Executor
batch = master.BatchJob()
igtask = batch.IntrepidTask.add()
fmgr = igtask.FileManager
fmgr.Action = DB_Operations.CopyTable
fmgr.Input = "${tutorial}/Intrepid_datasets/EBA_DBs/ebagoola_S..DIR"
fmgr.Output = "./ebagoola_S..DIR"
igtask = batch.IntrepidTask.add()
clip_line = igtask.ClipLine
clip_line.InputFile = "ebagoola_S..DIR"
clip_line.X = "x"
clip_line.Y = "y"
clip_line.LineType = "linetype"
clip_line.Xout = "E_Clip"
clip_line.Yout = "N_Clip"
clip_line.MinimumSeparation = 200.0
clip_line.MinimumSegmentLength = 50
logging.info("\n%s", batch.__str__())
Executor.execute(batch)
|
[
"intrepid.mastertask_pb2.BatchJob",
"intrepid.utils.Executor.execute",
"logging.basicConfig"
] |
[((51, 186), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s(%(relativeCreated)6d)[%(threadName)s]%(message)s"""'}), "(stream=sys.stdout, level=logging.DEBUG, format=\n '%(asctime)s(%(relativeCreated)6d)[%(threadName)s]%(message)s')\n", (70, 186), False, 'import logging\n'), ((588, 605), 'intrepid.mastertask_pb2.BatchJob', 'master.BatchJob', ([], {}), '()\n', (603, 605), True, 'from intrepid import mastertask_pb2 as master\n'), ((1142, 1165), 'intrepid.utils.Executor.execute', 'Executor.execute', (['batch'], {}), '(batch)\n', (1158, 1165), False, 'from intrepid.utils import Executor\n')]
|
# si occupa della gestione delle regole e dei dati privati del server
from server.global_var import GlobalVar
from replicated.game_state import Fase
from server.player_private import PlayerPrivate
from server.deck import Deck, Card
from threading import Timer
from tcp_basics import safe_recv_var
from socket import timeout
# PARAMETRI
TIMEOUT = 0.02
class GameMode:
def __init__(self):
GlobalVar.game_mode = self
self.game_state = GlobalVar.game_state
self.lista_player = []
self.replicators = [self.game_state.replicator] # metto il replicator del game state
for p in self.game_state.lista_player:
self.replicators.append(p.replicator) # aggiungo tutti i replicator dei public player
self.mazzo = Deck()
self.server_socket = None
self.running = True
self.pausa = False
self.primo_in_prima = 0 # giocatore primo in prima mano (gira ogni partita)
self.primo = 0
self.ultimo = 3
self.seme_giro = None
self.questo_giro = []
self.tutti_connessi = False # vero sse ci sono 4 giocatori connessi
self.g_disconnessi = []
def attesa(self):
while len(self.lista_player) < 4:
try:
new_socket, new_address = self.server_socket.accept()
new_socket.settimeout(TIMEOUT)
self.game_state.replicator.sockets.append(new_socket) # ha effetto solo lato server
new_private = PlayerPrivate(new_socket, len(self.lista_player))
self.lista_player.append(new_private)
self.replicators.append(new_private.player_state.replicator) # replicator del nuovo player state
print('conncted', new_address)
except timeout:
pass
safe_recv_var(self.replicators) # comincio già a ricevere per i ceck
self.tutti_connessi = True
for g in self.game_state.lista_player: # caccio una refreshata agli username
g.username.rep_val()
self.dai_carte()
self.game_loop()
def accetta_riconnessione(self):
if len(self.g_disconnessi) > 0:
try:
new_socket, address = self.server_socket.accept()
new_socket.settimeout(TIMEOUT)
self.game_state.replicator.sockets.append(new_socket) # avevo rimosso il vecchio socket del disconnesso
private = self.g_disconnessi.pop()
private.socket = new_socket # questo serve per poterlo ritogliere
private.player_state.replicator.sockets = [new_socket] # avevo svuotato ore metto il nuovo
self.game_state.replicator.refresh_all() # refresh game_state a tutti, (basterebbe questo nuovo player)
for p in self.game_state.lista_player: # refresh tutti per tutti, non efficiente ma tanto viene
p.replicator.refresh_all() # eseguito solo se uno esce e rientra
private.player_state.replicator.refresh_all() # refresho sono per il player giusto
if len(self.g_disconnessi) == 0:
self.tutti_connessi = True
except timeout:
pass
def disconnetti(self, private):
sock = private.socket
self.game_state.replicator.sockets.remove(sock) # tolgo il socket del disconnesso
private.player_state.replicator.sockets = [] # ce ne è uno solo quindi posso fare così
self.g_disconnessi.append(private)
self.game_state.lista_player[private.player_state.index.val].username.val = '---' # così gli altri lo vedono
self.tutti_connessi = False
def dai_carte(self):
self.mazzo.carte = []
self.mazzo.crea_carte()
self.mazzo.mischia()
for giocatore in self.lista_player:
carte = self.mazzo.pesca_n(13)
for c in carte:
giocatore.player_state.mano.val.append(c)
giocatore.player_state.mano.rep_val()
def game_loop(self):
self.game_state.fase_gioco.val = Fase.PASSAGGIO_CARTE
while self.running:
safe_recv_var(self.replicators)
self.accetta_riconnessione()
def carta_client(self, index_g, carta): # controlla in che fase siamo e se si può adoperare la carta e poi faù
giocatore = self.lista_player[index_g] # giocatore è un private player type
if Card.contiene_carta(giocatore.player_state.mano.val, carta): # se possiede davvero questa carta
if self.game_state.fase_gioco.val == Fase.PASSAGGIO_CARTE: # se le stiamo passando la metto nelle scambiate
if len(giocatore.player_state.scambiate.val) < 3: # se non ne ho già scambiate 3
self.metti_in_passate(giocatore, carta)
elif self.game_state.fase_gioco.val == Fase.GIOCO and (not self.pausa): # se stiamo giocando e non è pausa
if index_g == self.game_state.turno.val: # se è il suo turno
if (index_g == self.primo or Card.carta_permessa(giocatore.player_state.mano.val,
self.game_state.seme_primo.val, carta)):
self.metti_in_giocata(index_g, carta)
def metti_in_giocata(self, index, carta):
Card.del_carta(self.lista_player[index].player_state.mano.val, carta) # tolgo la carta dalla mano
self.lista_player[index].player_state.mano.rep_val() # non lo fa in automatico credo
self.game_state.lista_player[index].carta_giocata.val = carta # la metto nelle giocate
self.questo_giro.append(carta) # mi salvo le carte giocate nella gamemode
if self.game_state.turno.val == self.primo:
self.game_state.seme_primo.val = carta.seme # il primo decide il seme del giro
if self.game_state.turno.val == self.ultimo:
self.risolvi_questo_giro()
else:
turno = (self.game_state.turno.val + 1) % 4
self.game_state.turno.val = turno
def metti_in_passate(self, giocatore, carta):
Card.del_carta(giocatore.player_state.mano.val, carta) # tolgo la carta dalla mano
giocatore.player_state.mano.rep_val() # non lo fa in automatico credo
giocatore.player_state.scambiate.val.append(carta) # la metto in quelle scambiate
giocatore.player_state.scambiate.rep_val()
self.ceck_fine_passaggio()
def ceck_fine_passaggio(self):
for gioc in self.lista_player:
state = gioc.player_state
if len(state.scambiate.val) < 3:
return
self.passa_carte() # si occupa anche di fare self.game_state.fase_gioco.val = GIOCO
def passa_carte(self):
for gioc in self.lista_player:
state = gioc.player_state
index = (self.lista_player.index(gioc) - 1) % 4 # prendo il giocatore precedente
for carta in state.scambiate.val:
self.lista_player[index].player_state.mano.val.append(carta) # gli passo la carta
self.lista_player[index].player_state.mano.rep_val()
self.game_state.fase_gioco.val = Fase.GIOCO
state.scambiate.val = [] # tolgo tutte le scambiate
def calcola_punteggio(self):
punteggio = 10 # valore di base
for carta in self.questo_giro: # contro punti negativi
if carta.seme == Card.CUORI:
punteggio -= carta.valore # il valore della carta solo già i punti neg per i cuori
elif carta.seme == Card.PICCHE and carta.valore == Card.DONNA: # se è cuneconda
punteggio -= 26
return punteggio
def trova_vincitore(self):
val_max = self.questo_giro[0].valore # trovo carta vincente
index_max = 0
for carta in self.questo_giro:
if carta.seme == self.game_state.seme_primo.val: # se è seme che comanda
if carta.valore > val_max: # se è più grande del max
val_max = carta.valore
index_max = self.questo_giro.index(carta)
# adesso index_max è il primo ma contato a partire dal primo attuale quindi è di quanto devo spostarmi
vincitore = (self.primo + index_max) % 4
return vincitore
def risolvi_questo_giro(self):
punteggio = self.calcola_punteggio()
vincitore = self.trova_vincitore()
print('points: ' + str(punteggio) + ' to ' + str(vincitore))
self.primo = vincitore
self.ultimo = (self.primo - 1) % 4
self.pausa = True
self.lista_player[vincitore].punteggio += punteggio # assegno i punti
self.lista_player[vincitore].carte_prese += self.questo_giro # metto tutte le carte giocate nelle prese di vinc
self.questo_giro = [] # svuoto copia locale
t = Timer(5, self.fine_turno) # lascio vedere le carte per 5 sec
t.start()
@staticmethod
def ha_preso_carta(giocatore, carta):
for c in giocatore.carte_prese:
if c.seme == carta.seme and c.valore == carta.valore:
return True
return False
def check_cappotto(self):
for g_esaminato in self.lista_player:
if GameMode.ha_preso_carta(g_esaminato, Card(Card.DONNA, Card.PICCHE)): # se ha cunegonda
for val in Card.VALORI:
if not GameMode.ha_preso_carta(g_esaminato, Card(val, Card.CUORI)): # manca un cuore
return # se quello che ha la cune non ha un cuore allora niente cappotto
# se arrivo qui allor aho cappotto quindi setto tutti a -20 tranne g a cui do 60
for g_da_cambiare in self.lista_player:
if g_da_cambiare == g_esaminato:
g_da_cambiare.punteggio = 60
else:
g_da_cambiare.punteggio = -20
def fine_turno(self):
for g in self.game_state.lista_player: # svuoto giocate in ogni caso
g.carta_giocata.val = Card()
if len(self.lista_player[0].player_state.mano.val) == 0: # se un giocatore non ha carte (tutti le hanno finite)
self.check_cappotto()
for i in range(len(self.lista_player)): # aggiorno punteggi totali per tutti
g_privat = self.lista_player[i]
g_public = self.game_state.lista_player[i]
g_public.punteggio_tot.val = g_public.punteggio_tot.val + g_privat.punteggio
g_privat.punteggio = 0
g_privat.carte_prese = []
self.game_state.fase_gioco.val = Fase.FINE_PARTITA # così gli HUD scrivono fine partita
t = Timer(10, self.fine_partita)
t.start()
else:
self.pausa = False
self.game_state.turno.val = self.primo
self.game_state.seme_primo.val = Card.NESSUN_SEME
def fine_partita(self):
self.game_state.cont_partita.val = self.game_state.cont_partita.val + 1
self.pausa = False
self.primo_in_prima = (self.primo_in_prima + 1) % 4
self.primo = self.primo_in_prima
self.ultimo = (self.primo - 1) % 4
self.game_state.turno.val = self.primo
self.game_state.seme_primo.val = Card.NESSUN_SEME
self.dai_carte()
self.game_state.fase_gioco.val = Fase.PASSAGGIO_CARTE
|
[
"threading.Timer",
"server.deck.Card.contiene_carta",
"server.deck.Card",
"server.deck.Deck",
"server.deck.Card.del_carta",
"tcp_basics.safe_recv_var",
"server.deck.Card.carta_permessa"
] |
[((769, 775), 'server.deck.Deck', 'Deck', ([], {}), '()\n', (773, 775), False, 'from server.deck import Deck, Card\n'), ((4445, 4504), 'server.deck.Card.contiene_carta', 'Card.contiene_carta', (['giocatore.player_state.mano.val', 'carta'], {}), '(giocatore.player_state.mano.val, carta)\n', (4464, 4504), False, 'from server.deck import Deck, Card\n'), ((5348, 5417), 'server.deck.Card.del_carta', 'Card.del_carta', (['self.lista_player[index].player_state.mano.val', 'carta'], {}), '(self.lista_player[index].player_state.mano.val, carta)\n', (5362, 5417), False, 'from server.deck import Deck, Card\n'), ((6131, 6185), 'server.deck.Card.del_carta', 'Card.del_carta', (['giocatore.player_state.mano.val', 'carta'], {}), '(giocatore.player_state.mano.val, carta)\n', (6145, 6185), False, 'from server.deck import Deck, Card\n'), ((8871, 8896), 'threading.Timer', 'Timer', (['(5)', 'self.fine_turno'], {}), '(5, self.fine_turno)\n', (8876, 8896), False, 'from threading import Timer\n'), ((1822, 1853), 'tcp_basics.safe_recv_var', 'safe_recv_var', (['self.replicators'], {}), '(self.replicators)\n', (1835, 1853), False, 'from tcp_basics import safe_recv_var\n'), ((4159, 4190), 'tcp_basics.safe_recv_var', 'safe_recv_var', (['self.replicators'], {}), '(self.replicators)\n', (4172, 4190), False, 'from tcp_basics import safe_recv_var\n'), ((10069, 10075), 'server.deck.Card', 'Card', ([], {}), '()\n', (10073, 10075), False, 'from server.deck import Deck, Card\n'), ((10719, 10747), 'threading.Timer', 'Timer', (['(10)', 'self.fine_partita'], {}), '(10, self.fine_partita)\n', (10724, 10747), False, 'from threading import Timer\n'), ((9296, 9325), 'server.deck.Card', 'Card', (['Card.DONNA', 'Card.PICCHE'], {}), '(Card.DONNA, Card.PICCHE)\n', (9300, 9325), False, 'from server.deck import Deck, Card\n'), ((5068, 5164), 'server.deck.Card.carta_permessa', 'Card.carta_permessa', (['giocatore.player_state.mano.val', 'self.game_state.seme_primo.val', 'carta'], {}), '(giocatore.player_state.mano.val, self.game_state.\n seme_primo.val, carta)\n', (5087, 5164), False, 'from server.deck import Deck, Card\n'), ((9451, 9472), 'server.deck.Card', 'Card', (['val', 'Card.CUORI'], {}), '(val, Card.CUORI)\n', (9455, 9472), False, 'from server.deck import Deck, Card\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'shotgunCache/_version.py'
versioneer.versionfile_build = 'shotgunCache/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'shotgunCache' # dirname like 'myproject-1.2.0'
readme = open('README.md').read().strip()
license = open('LICENSE').read().strip()
setup(
name='shotgunCache',
version=versioneer.get_version(),
license=license,
cmdclass=versioneer.get_cmdclass(),
description='Shotgun Cache Server',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/moonbot/shotgun-cache-server',
packages=[
'shotgunCache',
],
scripts=[
'bin/shotgunCache'
],
package_dir={'shotgunCache':
'shotgunCache'},
include_package_data=True,
install_requires=[
'rethinkdb>=2.0.0.post1',
'pyyaml>=3.11',
'ruamel.yaml>=0.8',
'pyzmq>=13.1.0',
'shotgun_api3>=3.0.19',
],
zip_safe=False,
keywords='shotgunCache',
)
|
[
"versioneer.get_version",
"versioneer.get_cmdclass"
] |
[((560, 584), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (582, 584), False, 'import versioneer\n'), ((620, 645), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (643, 645), False, 'import versioneer\n')]
|
import os
import sys
if __name__ == "__main__":
train_file = sys.argv[1]
dev_file = sys.argv[2]
test_folder = sys.argv[3]
folder = sys.argv[4]
param_file = sys.argv[5]
partition = sys.argv[6]
bash_script = os.path.join(folder, "parallel_eval_model.sh")
with open(bash_script, 'w+') as f:
f.write("#!/usr/bin/env bash \n")
for i in range(10):
test_file = os.path.join(test_folder, ('partition_' + str(i)))
folder_file = os.path.join(folder, ('partition_' + str(i)))
error_file = os.path.join(folder_file, "error")
output_file = os.path.join(folder_file, "output")
command = "sbatch --partition={} --gres=gpu:1 --error={} --output={}--mem=15GB test.sh {} {} {} {} {} \n".format(partition, error_file, output_file, train_file, dev_file, test_file, folder_file, param_file)
f.write(command + '\n')
|
[
"os.path.join"
] |
[((236, 282), 'os.path.join', 'os.path.join', (['folder', '"""parallel_eval_model.sh"""'], {}), "(folder, 'parallel_eval_model.sh')\n", (248, 282), False, 'import os\n'), ((566, 600), 'os.path.join', 'os.path.join', (['folder_file', '"""error"""'], {}), "(folder_file, 'error')\n", (578, 600), False, 'import os\n'), ((627, 662), 'os.path.join', 'os.path.join', (['folder_file', '"""output"""'], {}), "(folder_file, 'output')\n", (639, 662), False, 'import os\n')]
|
import tensorflow.compat.v1 as tf
"""
I assume that each file represents a video.
All videos have minimal dimension equal to 256 and fps equal to 6.
Median video length is ~738 frames.
"""
NUM_FRAMES = 4 # must be greater or equal to 2
SIZE = 256 # must be less or equal to 256
class Pipeline:
def __init__(self, filenames, is_training, batch_size):
"""
Arguments:
filenames: a list of strings, paths to tfrecords files.
is_training: a boolean.
batch_size: an integer.
"""
self.is_training = is_training
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.shuffle(len(filenames)) if is_training else dataset
dataset = dataset.repeat(None if is_training else 1)
def get_subdataset(f):
dataset = tf.data.TFRecordDataset(f)
dataset = dataset.window(NUM_FRAMES, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda x: x.batch(NUM_FRAMES, drop_remainder=True))
dataset = dataset.map(self.parse_and_preprocess)
dataset = dataset.shuffle(1000) if is_training else dataset
return dataset
dataset = dataset.flat_map(get_subdataset)
dataset = dataset.shuffle(20000) if is_training else dataset
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
self.dataset = dataset
def parse_and_preprocess(self, examples):
"""
Arguments:
examples: a string tensor with shape [NUM_FRAMES].
Returns:
a uint8 tensor with shape [NUM_FRAMES, SIZE, SIZE, 2].
"""
features = {
'image': tf.FixedLenFeature([], tf.string),
'labels': tf.FixedLenFeature([], tf.string)
}
images_and_labels = []
for i in range(NUM_FRAMES):
parsed_features = tf.parse_single_example(examples[i], features)
image = tf.image.decode_jpeg(parsed_features['image'], channels=1)
labels = tf.image.decode_png(parsed_features['labels'], channels=1)
images_and_labels.append(tf.concat([image, labels], axis=2))
x = tf.stack(images_and_labels, axis=0)
# it has shape [NUM_FRAMES, h, w, 2]
if not self.is_training:
shape = tf.shape(x)
h, w = shape[1], shape[2]
offset_height = (h - SIZE) // 2
offset_width = (w - SIZE) // 2
x = tf.image.crop_to_bounding_box(x, offset_height, offset_width, SIZE, SIZE)
else:
do_flip = tf.less(tf.random.uniform([]), 0.5)
x = tf.cond(do_flip, lambda: tf.image.flip_left_right(x), lambda: x)
x = tf.image.random_crop(x, [NUM_FRAMES, SIZE, SIZE, 2])
return x
|
[
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.image.decode_png",
"tensorflow.compat.v1.parse_single_example",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.image.crop_to_bounding_box",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.random.uniform",
"tensorflow.compat.v1.data.TFRecordDataset",
"tensorflow.compat.v1.image.random_crop",
"tensorflow.compat.v1.image.decode_jpeg",
"tensorflow.compat.v1.image.flip_left_right",
"tensorflow.compat.v1.data.Dataset.from_tensor_slices",
"tensorflow.compat.v1.FixedLenFeature"
] |
[((604, 649), 'tensorflow.compat.v1.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['filenames'], {}), '(filenames)\n', (638, 649), True, 'import tensorflow.compat.v1 as tf\n'), ((2255, 2290), 'tensorflow.compat.v1.stack', 'tf.stack', (['images_and_labels'], {'axis': '(0)'}), '(images_and_labels, axis=0)\n', (2263, 2290), True, 'import tensorflow.compat.v1 as tf\n'), ((843, 869), 'tensorflow.compat.v1.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['f'], {}), '(f)\n', (866, 869), True, 'import tensorflow.compat.v1 as tf\n'), ((1764, 1797), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (1782, 1797), True, 'import tensorflow.compat.v1 as tf\n'), ((1821, 1854), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (1839, 1854), True, 'import tensorflow.compat.v1 as tf\n'), ((1963, 2009), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['examples[i]', 'features'], {}), '(examples[i], features)\n', (1986, 2009), True, 'import tensorflow.compat.v1 as tf\n'), ((2030, 2088), 'tensorflow.compat.v1.image.decode_jpeg', 'tf.image.decode_jpeg', (["parsed_features['image']"], {'channels': '(1)'}), "(parsed_features['image'], channels=1)\n", (2050, 2088), True, 'import tensorflow.compat.v1 as tf\n'), ((2110, 2168), 'tensorflow.compat.v1.image.decode_png', 'tf.image.decode_png', (["parsed_features['labels']"], {'channels': '(1)'}), "(parsed_features['labels'], channels=1)\n", (2129, 2168), True, 'import tensorflow.compat.v1 as tf\n'), ((2390, 2401), 'tensorflow.compat.v1.shape', 'tf.shape', (['x'], {}), '(x)\n', (2398, 2401), True, 'import tensorflow.compat.v1 as tf\n'), ((2543, 2616), 'tensorflow.compat.v1.image.crop_to_bounding_box', 'tf.image.crop_to_bounding_box', (['x', 'offset_height', 'offset_width', 'SIZE', 'SIZE'], {}), '(x, offset_height, offset_width, SIZE, SIZE)\n', (2572, 2616), True, 'import tensorflow.compat.v1 as tf\n'), ((2786, 2838), 'tensorflow.compat.v1.image.random_crop', 'tf.image.random_crop', (['x', '[NUM_FRAMES, SIZE, SIZE, 2]'], {}), '(x, [NUM_FRAMES, SIZE, SIZE, 2])\n', (2806, 2838), True, 'import tensorflow.compat.v1 as tf\n'), ((2206, 2240), 'tensorflow.compat.v1.concat', 'tf.concat', (['[image, labels]'], {'axis': '(2)'}), '([image, labels], axis=2)\n', (2215, 2240), True, 'import tensorflow.compat.v1 as tf\n'), ((2661, 2682), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', (['[]'], {}), '([])\n', (2678, 2682), True, 'import tensorflow.compat.v1 as tf\n'), ((2730, 2757), 'tensorflow.compat.v1.image.flip_left_right', 'tf.image.flip_left_right', (['x'], {}), '(x)\n', (2754, 2757), True, 'import tensorflow.compat.v1 as tf\n')]
|
"""
This CLI plugin was auto-generated by using 'sonic-cli-gen' utility, BUT
it was manually modified to meet the PBH HLD requirements.
PBH HLD - https://github.com/Azure/SONiC/pull/773
CLI Auto-generation tool HLD - https://github.com/Azure/SONiC/pull/78
"""
import click
import json
import ipaddress
import re
import utilities_common.cli as clicommon
from show.plugins.pbh import deserialize_pbh_counters
GRE_KEY_RE = r"^(0x){1}[a-fA-F0-9]{1,8}/(0x){1}[a-fA-F0-9]{1,8}$"
ETHER_TYPE_RE = r"^(0x){1}[a-fA-F0-9]{1,4}$"
L4_DST_PORT_RE = ETHER_TYPE_RE
INNER_ETHER_TYPE_RE = ETHER_TYPE_RE
IP_PROTOCOL_RE = r"^(0x){1}[a-fA-F0-9]{1,2}$"
IPV6_NEXT_HEADER_RE = IP_PROTOCOL_RE
HASH_FIELD_VALUE_LIST = [
"INNER_IP_PROTOCOL",
"INNER_L4_DST_PORT",
"INNER_L4_SRC_PORT",
"INNER_DST_IPV4",
"INNER_SRC_IPV4",
"INNER_DST_IPV6",
"INNER_SRC_IPV6"
]
PACKET_ACTION_VALUE_LIST = [
"SET_ECMP_HASH",
"SET_LAG_HASH"
]
FLOW_COUNTER_VALUE_LIST = [
"DISABLED",
"ENABLED"
]
PBH_TABLE_CDB = "PBH_TABLE"
PBH_RULE_CDB = "PBH_RULE"
PBH_HASH_CDB = "PBH_HASH"
PBH_HASH_FIELD_CDB = "PBH_HASH_FIELD"
PBH_TABLE_INTERFACE_LIST = "interface_list"
PBH_TABLE_DESCRIPTION = "description"
PBH_RULE_PRIORITY = "priority"
PBH_RULE_GRE_KEY = "gre_key"
PBH_RULE_ETHER_TYPE = "ether_type"
PBH_RULE_IP_PROTOCOL = "ip_protocol"
PBH_RULE_IPV6_NEXT_HEADER = "ipv6_next_header"
PBH_RULE_L4_DST_PORT = "l4_dst_port"
PBH_RULE_INNER_ETHER_TYPE = "inner_ether_type"
PBH_RULE_HASH = "hash"
PBH_RULE_PACKET_ACTION = "packet_action"
PBH_RULE_FLOW_COUNTER = "flow_counter"
PBH_HASH_HASH_FIELD_LIST = "hash_field_list"
PBH_HASH_FIELD_HASH_FIELD = "hash_field"
PBH_HASH_FIELD_IP_MASK = "ip_mask"
PBH_HASH_FIELD_SEQUENCE_ID = "sequence_id"
PBH_CAPABILITIES_SDB = "PBH_CAPABILITIES"
PBH_TABLE_CAPABILITIES_KEY = "table"
PBH_RULE_CAPABILITIES_KEY = "rule"
PBH_HASH_CAPABILITIES_KEY = "hash"
PBH_HASH_FIELD_CAPABILITIES_KEY = "hash-field"
PBH_ADD = "ADD"
PBH_UPDATE = "UPDATE"
PBH_REMOVE = "REMOVE"
PBH_COUNTERS_LOCATION = "/tmp/.pbh_counters.txt"
#
# DB interface --------------------------------------------------------------------------------------------------------
#
def add_entry(db, table, key, data):
""" Add new entry in table """
cfg = db.get_config()
cfg.setdefault(table, {})
if key in cfg[table]:
raise click.ClickException("{}{}{} already exists in Config DB".format(
table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key)
)
)
cfg[table][key] = data
db.set_entry(table, key, data)
def update_entry(db, cap, table, key, data):
""" Update entry in table and validate configuration.
If field value in data is None, the field is deleted
"""
field_root = "{}{}{}".format(table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key))
cfg = db.get_config()
cfg.setdefault(table, {})
if key not in cfg[table]:
raise click.ClickException("{} doesn't exist in Config DB".format(field_root))
for field, value in data.items():
if field not in cap:
raise click.ClickException(
"{}{}{} doesn't have a configuration capabilities".format(
field_root, db.KEY_SEPARATOR, field
)
)
if value is None: # HDEL
if field in cfg[table][key]:
if PBH_REMOVE in cap[field]:
cfg[table][key].pop(field)
else:
raise click.ClickException(
"Failed to remove {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
else:
raise click.ClickException(
"Failed to remove {}{}{}: field doesn't exist".format(
field_root, db.KEY_SEPARATOR, field
)
)
else: # HSET
if field in cfg[table][key]:
if PBH_UPDATE not in cap[field]:
raise click.ClickException(
"Failed to update {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
else:
if PBH_ADD not in cap[field]:
raise click.ClickException(
"Failed to add {}{}{}: operation is prohibited".format(
field_root, db.KEY_SEPARATOR, field
)
)
cfg[table][key][field] = value
db.set_entry(table, key, cfg[table][key])
def del_entry(db, table, key):
""" Delete entry in table """
cfg = db.get_config()
cfg.setdefault(table, {})
if key not in cfg[table]:
raise click.ClickException("{}{}{} doesn't exist in Config DB".format(
table, db.TABLE_NAME_SEPARATOR, db.serialize_key(key)
)
)
cfg[table].pop(key)
db.set_entry(table, key, None)
def is_exist_in_db(db, table, key):
""" Check if provided hash already exists in Config DB
Args:
db: reference to Config DB
table: table to search in Config DB
key: key to search in Config DB
Returns:
bool: The return value. True for success, False otherwise
"""
if (not table) or (not key):
return False
if not db.get_entry(table, key):
return False
return True
#
# PBH validators ------------------------------------------------------------------------------------------------------
#
def table_name_validator(ctx, db, table_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_TABLE_CDB), str(table_name)):
raise click.UsageError(
"Invalid value for \"TABLE_NAME\": {} is not a valid PBH table".format(table_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_TABLE_CDB), str(table_name)):
raise click.UsageError(
"Invalid value for \"TABLE_NAME\": {} is a valid PBH table".format(table_name), ctx
)
def rule_name_validator(ctx, db, table_name, rule_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_RULE_CDB), (str(table_name), str(rule_name))):
raise click.UsageError(
"Invalid value for \"RULE_NAME\": {} is not a valid PBH rule".format(rule_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_RULE_CDB), (str(table_name), str(rule_name))):
raise click.UsageError(
"Invalid value for \"RULE_NAME\": {} is a valid PBH rule".format(rule_name), ctx
)
def hash_name_validator(ctx, db, hash_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_HASH_CDB), str(hash_name)):
raise click.UsageError(
"Invalid value for \"HASH_NAME\": {} is not a valid PBH hash".format(hash_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_HASH_CDB), str(hash_name)):
raise click.UsageError(
"Invalid value for \"HASH_NAME\": {} is a valid PBH hash".format(hash_name), ctx
)
def hash_field_name_validator(ctx, db, hash_field_name, is_exist=True):
if is_exist:
if not is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hash_field_name)):
raise click.UsageError(
"Invalid value for \"HASH_FIELD_NAME\": {} is not a valid PBH hash field".format(hash_field_name), ctx
)
else:
if is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hash_field_name)):
raise click.UsageError(
"Invalid value for \"HASH_FIELD_NAME\": {} is a valid PBH hash field".format(hash_field_name), ctx
)
def interface_list_validator(ctx, db, interface_list):
for intf in interface_list.split(','):
if not (clicommon.is_valid_port(db, str(intf)) or clicommon.is_valid_portchannel(db, str(intf))):
raise click.UsageError(
"Invalid value for \"--interface-list\": {} is not a valid interface".format(intf), ctx
)
def hash_field_list_validator(ctx, db, hash_field_list):
for hfield in hash_field_list.split(','):
if not is_exist_in_db(db, str(PBH_HASH_FIELD_CDB), str(hfield)):
raise click.UsageError(
"Invalid value for \"--hash-field-list\": {} is not a valid PBH hash field".format(hfield), ctx
)
def hash_validator(ctx, db, hash):
if not is_exist_in_db(db, str(PBH_HASH_CDB), str(hash)):
raise click.UsageError(
"Invalid value for \"--hash\": {} is not a valid PBH hash".format(hash), ctx
)
def re_match(ctx, param, value, regexp):
""" Regexp validation of given PBH rule parameter
Args:
ctx: click context
param: click parameter context
value: value to validate
regexp: regular expression
Return:
str: validated value
"""
if re.match(regexp, str(value)) is None:
raise click.UsageError(
"Invalid value for {}: {} is ill-formed".format(param.get_error_hint(ctx), value), ctx
)
return value
def match_validator(ctx, param, value):
""" Check if PBH rule options are valid
Args:
ctx: click context
param: click parameter context
value: value of parameter
Returns:
str: validated parameter
"""
if value is not None:
if param.name == PBH_RULE_GRE_KEY:
return re_match(ctx, param, value, GRE_KEY_RE)
elif param.name == PBH_RULE_ETHER_TYPE:
return re_match(ctx, param, value, ETHER_TYPE_RE)
elif param.name == PBH_RULE_IP_PROTOCOL:
return re_match(ctx, param, value, IP_PROTOCOL_RE)
elif param.name == PBH_RULE_IPV6_NEXT_HEADER:
return re_match(ctx, param, value, IPV6_NEXT_HEADER_RE)
elif param.name == PBH_RULE_L4_DST_PORT:
return re_match(ctx, param, value, L4_DST_PORT_RE)
elif param.name == PBH_RULE_INNER_ETHER_TYPE:
return re_match(ctx, param, value, INNER_ETHER_TYPE_RE)
def ip_mask_validator(ctx, param, value):
""" Check if PBH hash field IP mask option is valid
Args:
ctx: click context
param: click parameter context
value: value of parameter
Returns:
str: validated parameter
"""
if value is not None:
try:
ip = ipaddress.ip_address(value)
except Exception as err:
raise click.UsageError("Invalid value for {}: {}".format(param.get_error_hint(ctx), err), ctx)
return str(ip)
def hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask):
""" Function to validate whether --hash-field value
corresponds to the --ip-mask value
Args:
ctx: click context
hash_field: native hash field value
ip_mask: ip address or None
"""
hf_no_ip = ["INNER_IP_PROTOCOL", "INNER_L4_DST_PORT", "INNER_L4_SRC_PORT"]
if ip_mask is None:
if hash_field not in hf_no_ip:
raise click.UsageError(
"Invalid value for \"--hash-field\": invalid choice: {}. (choose from {} when no \"--ip-mask\" is provided)".format(
hash_field, ", ".join(hf_no_ip)
), ctx
)
return
hf_v4 = ["INNER_DST_IPV4", "INNER_SRC_IPV4"]
hf_v6 = ["INNER_DST_IPV6", "INNER_SRC_IPV6"]
if not ((hash_field in hf_v4) or (hash_field in hf_v6)):
raise click.UsageError(
"Invalid value for \"--hash-field\": invalid choice: {}. (choose from {} when \"--ip-mask\" is provided)".format(
hash_field, ", ".join(hf_v4 + hf_v6)
), ctx
)
ip_ver = ipaddress.ip_address(ip_mask).version
if (hash_field in hf_v4) and (ip_ver != 4):
raise click.UsageError(
"Invalid value for \"--ip-mask\": {} is not compatible with {}".format(
ip_mask, hash_field
), ctx
)
if (hash_field in hf_v6) and (ip_ver != 6):
raise click.UsageError(
"Invalid value for \"--ip-mask\": {} is not compatible with {}".format(
ip_mask, hash_field
), ctx
)
def hash_field_to_ip_mask_validator(ctx, db, hash_field_name, hash_field, ip_mask, is_update=True):
""" Function to validate --hash-field and --ip-mask
correspondence, during add/update flow
Args:
ctx: click context
db: reference to Config DB
hash_field_name: name of the hash-field
hash_field: native hash field value
ip_mask: ip address
is_update: update flow flag
"""
if not is_update:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
return
if (hash_field is None) and (ip_mask is None):
return
if (hash_field is not None) and (ip_mask is not None):
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
return
hf_obj = db.get_entry(str(PBH_HASH_FIELD_CDB), str(hash_field_name))
if not hf_obj:
raise click.ClickException(
"Failed to validate \"--hash-field\" and \"--ip-mask\" correspondence: {} is not a valid PBH hash field".format(
hash_field_name
)
)
if hash_field is None:
if PBH_HASH_FIELD_HASH_FIELD not in hf_obj:
raise click.ClickException(
"Failed to validate \"--hash-field\" and \"--ip-mask\" correspondence: {} is not a valid PBH field".format(
PBH_HASH_FIELD_HASH_FIELD
)
)
hash_field_to_ip_mask_correspondence_validator(ctx, hf_obj[PBH_HASH_FIELD_HASH_FIELD], ip_mask)
else:
if PBH_HASH_FIELD_IP_MASK in hf_obj:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, hf_obj[PBH_HASH_FIELD_IP_MASK])
else:
hash_field_to_ip_mask_correspondence_validator(ctx, hash_field, ip_mask)
#
# PBH helpers ---------------------------------------------------------------------------------------------------------
#
def serialize_pbh_counters(obj):
""" Helper that performs PBH counters serialization.
in = {
('pbh_table1', 'pbh_rule1'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'},
...
('pbh_tableN', 'pbh_ruleN'): {'SAI_ACL_COUNTER_ATTR_BYTES': '0', 'SAI_ACL_COUNTER_ATTR_PACKETS': '0'}
}
out = [
{
"key": ["pbh_table1", "<KEY>"],
"value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"}
},
...
{
"key": ["pbh_tableN", "<KEY>"],
"value": {"SAI_ACL_COUNTER_ATTR_BYTES": "0", "SAI_ACL_COUNTER_ATTR_PACKETS": "0"}
}
]
Args:
obj: counters dict.
"""
def remap_keys(obj):
return [{'key': k, 'value': v} for k, v in obj.items()]
try:
with open(PBH_COUNTERS_LOCATION, 'w') as f:
json.dump(remap_keys(obj), f)
except IOError as err:
pass
def update_pbh_counters(table_name, rule_name):
""" Helper that performs PBH counters update """
pbh_counters = deserialize_pbh_counters()
key_to_del = table_name, rule_name
if key_to_del in pbh_counters:
del pbh_counters[key_to_del]
serialize_pbh_counters(pbh_counters)
def pbh_capabilities_query(db, key):
""" Query PBH capabilities """
sdb_id = db.STATE_DB
sdb_sep = db.get_db_separator(sdb_id)
cap_map = db.get_all(sdb_id, "{}{}{}".format(str(PBH_CAPABILITIES_SDB), sdb_sep, str(key)))
if not cap_map:
return None
return cap_map
def pbh_match_count(db, table, key, data):
""" Count PBH rule match fields """
field_map = db.get_entry(table, key)
match_total = 0
match_count = 0
if PBH_RULE_GRE_KEY in field_map:
if PBH_RULE_GRE_KEY in data:
match_count += 1
match_total += 1
if PBH_RULE_ETHER_TYPE in field_map:
if PBH_RULE_ETHER_TYPE in data:
match_count += 1
match_total += 1
if PBH_RULE_IP_PROTOCOL in field_map:
if PBH_RULE_IP_PROTOCOL in data:
match_count += 1
match_total += 1
if PBH_RULE_IPV6_NEXT_HEADER in field_map:
if PBH_RULE_IPV6_NEXT_HEADER in data:
match_count += 1
match_total += 1
if PBH_RULE_L4_DST_PORT in field_map:
if PBH_RULE_L4_DST_PORT in data:
match_count += 1
match_total += 1
if PBH_RULE_INNER_ETHER_TYPE in field_map:
if PBH_RULE_INNER_ETHER_TYPE in data:
match_count += 1
match_total += 1
return match_total, match_count
def exit_with_error(*args, **kwargs):
""" Print a message and abort CLI """
click.secho(*args, **kwargs)
raise click.Abort()
#
# PBH CLI -------------------------------------------------------------------------------------------------------------
#
@click.group(
name='pbh',
cls=clicommon.AliasedGroup
)
def PBH():
""" Configure PBH (Policy based hashing) feature """
pass
#
# PBH hash field ------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="hash-field",
cls=clicommon.AliasedGroup
)
def PBH_HASH_FIELD():
""" Configure PBH hash field """
pass
@PBH_HASH_FIELD.command(name="add")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@click.option(
"--hash-field",
help="Configures native hash field for this hash field",
required=True,
type=click.Choice(HASH_FIELD_VALUE_LIST)
)
@click.option(
"--ip-mask",
help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6""",
callback=ip_mask_validator
)
@click.option(
"--sequence-id",
help="Configures in which order the fields are hashed and defines which fields should be associative",
required=True,
type=click.INT
)
@clicommon.pass_db
def PBH_HASH_FIELD_add(db, hash_field_name, hash_field, ip_mask, sequence_id):
""" Add object to PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name, False)
hash_field_to_ip_mask_validator(ctx, db.cfgdb_pipe, hash_field_name, hash_field, ip_mask, False)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
data = {}
if hash_field is not None:
data[PBH_HASH_FIELD_HASH_FIELD] = hash_field
if ip_mask is not None:
data[PBH_HASH_FIELD_IP_MASK] = ip_mask
if sequence_id is not None:
data[PBH_HASH_FIELD_SEQUENCE_ID] = sequence_id
if not data:
exit_with_error("Error: Failed to add PBH hash field: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH_FIELD.command(name="update")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@click.option(
"--hash-field",
help="Configures native hash field for this hash field",
type=click.Choice(HASH_FIELD_VALUE_LIST)
)
@click.option(
"--ip-mask",
help="""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6 """,
callback=ip_mask_validator
)
@click.option(
"--sequence-id",
help="Configures in which order the fields are hashed and defines which fields should be associative",
type=click.INT
)
@clicommon.pass_db
def PBH_HASH_FIELD_update(db, hash_field_name, hash_field, ip_mask, sequence_id):
""" Update object in PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name)
hash_field_to_ip_mask_validator(ctx, db.cfgdb_pipe, hash_field_name, hash_field, ip_mask)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
data = {}
if hash_field is not None:
data[PBH_HASH_FIELD_HASH_FIELD] = hash_field
if ip_mask is not None:
data[PBH_HASH_FIELD_IP_MASK] = ip_mask
if sequence_id is not None:
data[PBH_HASH_FIELD_SEQUENCE_ID] = sequence_id
if not data:
exit_with_error("Error: Failed to update PBH hash field: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_HASH_FIELD_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH hash field capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH_FIELD.command(name="delete")
@click.argument(
"hash-field-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_HASH_FIELD_delete(db, hash_field_name):
""" Delete object from PBH_HASH_FIELD table """
ctx = click.get_current_context()
hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name)
table = str(PBH_HASH_FIELD_CDB)
key = str(hash_field_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH hash ------------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="hash",
cls=clicommon.AliasedGroup
)
def PBH_HASH():
""" Configure PBH hash """
pass
@PBH_HASH.command(name="add")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@click.option(
"--hash-field-list",
help="The list of hash fields to apply with this hash",
required=True
)
@clicommon.pass_db
def PBH_HASH_add(db, hash_name, hash_field_list):
""" Add object to PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name, False)
table = str(PBH_HASH_CDB)
key = str(hash_name)
data = {}
if hash_field_list is not None:
hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list)
data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",")
if not data:
exit_with_error("Error: Failed to add PBH hash: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH.command(name="update")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@click.option(
"--hash-field-list",
help="The list of hash fields to apply with this hash"
)
@clicommon.pass_db
def PBH_HASH_update(db, hash_name, hash_field_list):
""" Update object in PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name)
table = str(PBH_HASH_CDB)
key = str(hash_name)
data = {}
if hash_field_list is not None:
hash_field_list_validator(ctx, db.cfgdb_pipe, hash_field_list)
data[PBH_HASH_HASH_FIELD_LIST] = hash_field_list.split(",")
if not data:
exit_with_error("Error: Failed to update PBH hash: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_HASH_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH hash capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_HASH.command(name="delete")
@click.argument(
"hash-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_HASH_delete(db, hash_name):
""" Delete object from PBH_HASH table """
ctx = click.get_current_context()
hash_name_validator(ctx, db.cfgdb_pipe, hash_name)
table = str(PBH_HASH_CDB)
key = str(hash_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH rule ------------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="rule",
cls=clicommon.AliasedGroup
)
def PBH_RULE():
""" Configure PBH rule """
pass
@PBH_RULE.command(name="add")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Configures priority for this rule",
required=True,
type=click.INT
)
@click.option(
"--gre-key",
help="Configures packet match for this rule: GRE key (value/mask)",
callback=match_validator
)
@click.option(
"--ether-type",
help="Configures packet match for this rule: EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--ip-protocol",
help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--ipv6-next-header",
help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--l4-dst-port",
help="Configures packet match for this rule: L4 destination port",
callback=match_validator
)
@click.option(
"--inner-ether-type",
help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--hash",
help="The hash to apply with this rule",
required=True
)
@click.option(
"--packet-action",
help="Configures packet action for this rule",
type=click.Choice(PACKET_ACTION_VALUE_LIST)
)
@click.option(
"--flow-counter",
help="Enables/Disables packet/byte counter for this rule",
type=click.Choice(FLOW_COUNTER_VALUE_LIST)
)
@clicommon.pass_db
def PBH_RULE_add(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Add object to PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name, False)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
match_count = 0
if priority is not None:
data[PBH_RULE_PRIORITY] = priority
if gre_key is not None:
data[PBH_RULE_GRE_KEY] = gre_key
match_count += 1
if ether_type is not None:
data[PBH_RULE_ETHER_TYPE] = ether_type
match_count += 1
if ip_protocol is not None:
data[PBH_RULE_IP_PROTOCOL] = ip_protocol
match_count += 1
if ipv6_next_header is not None:
data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header
match_count += 1
if l4_dst_port is not None:
data[PBH_RULE_L4_DST_PORT] = l4_dst_port
match_count += 1
if inner_ether_type is not None:
data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type
match_count += 1
if hash is not None:
hash_validator(ctx, db.cfgdb_pipe, hash)
data[PBH_RULE_HASH] = hash
if packet_action is not None:
data[PBH_RULE_PACKET_ACTION] = packet_action
if flow_counter is not None:
data[PBH_RULE_FLOW_COUNTER] = flow_counter
if not data:
exit_with_error("Error: Failed to add PBH rule: options are not provided", fg="red")
if match_count == 0:
exit_with_error("Error: Failed to add PBH rule: match options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE.group(
name="update",
cls=clicommon.AliasedGroup
)
def PBH_RULE_update():
""" Update object in PBH_RULE table """
pass
@PBH_RULE_update.group(
name="field",
cls=clicommon.AliasedGroup
)
def PBH_RULE_update_field():
""" Update object field in PBH_RULE table """
pass
@PBH_RULE_update_field.command(name="set")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Configures priority for this rule",
type=click.INT
)
@click.option(
"--gre-key",
help="Configures packet match for this rule: GRE key (value/mask)",
callback=match_validator
)
@click.option(
"--ether-type",
help="Configures packet match for this rule: EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--ip-protocol",
help="Configures packet match for this rule: IP protocol (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--ipv6-next-header",
help="Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
callback=match_validator
)
@click.option(
"--l4-dst-port",
help="Configures packet match for this rule: L4 destination port",
callback=match_validator
)
@click.option(
"--inner-ether-type",
help="Configures packet match for this rule: inner EtherType (IANA Ethertypes)",
callback=match_validator
)
@click.option(
"--hash",
help="The hash to apply with this rule"
)
@click.option(
"--packet-action",
help="Configures packet action for this rule",
type=click.Choice(PACKET_ACTION_VALUE_LIST)
)
@click.option(
"--flow-counter",
help="Enables/Disables packet/byte counter for this rule",
type=click.Choice(FLOW_COUNTER_VALUE_LIST)
)
@clicommon.pass_db
def PBH_RULE_update_field_set(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Set object field in PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
if priority is not None:
data[PBH_RULE_PRIORITY] = priority
if gre_key is not None:
data[PBH_RULE_GRE_KEY] = gre_key
if ether_type is not None:
data[PBH_RULE_ETHER_TYPE] = ether_type
if ip_protocol is not None:
data[PBH_RULE_IP_PROTOCOL] = ip_protocol
if ipv6_next_header is not None:
data[PBH_RULE_IPV6_NEXT_HEADER] = ipv6_next_header
if l4_dst_port is not None:
data[PBH_RULE_L4_DST_PORT] = l4_dst_port
if inner_ether_type is not None:
data[PBH_RULE_INNER_ETHER_TYPE] = inner_ether_type
if hash is not None:
hash_validator(ctx, db.cfgdb_pipe, hash)
data[PBH_RULE_HASH] = hash
if packet_action is not None:
data[PBH_RULE_PACKET_ACTION] = packet_action
if flow_counter is not None:
data[PBH_RULE_FLOW_COUNTER] = flow_counter
if not data:
exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
if data.get(PBH_RULE_FLOW_COUNTER, "") == "DISABLED":
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE_update_field.command(name="del")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@click.option(
"--priority",
help="Deletes priority for this rule",
is_flag=True
)
@click.option(
"--gre-key",
help="Deletes packet match for this rule: GRE key (value/mask)",
is_flag=True
)
@click.option(
"--ether-type",
help="Deletes packet match for this rule: EtherType (IANA Ethertypes)",
is_flag=True
)
@click.option(
"--ip-protocol",
help="Deletes packet match for this rule: IP protocol (IANA Protocol Numbers)",
is_flag=True
)
@click.option(
"--ipv6-next-header",
help="Deletes packet match for this rule: IPv6 Next header (IANA Protocol Numbers)",
is_flag=True
)
@click.option(
"--l4-dst-port",
help="Deletes packet match for this rule: L4 destination port",
is_flag=True
)
@click.option(
"--inner-ether-type",
help="Deletes packet match for this rule: inner EtherType (IANA Ethertypes)",
is_flag=True
)
@click.option(
"--hash",
help="Deletes hash for this rule",
is_flag=True
)
@click.option(
"--packet-action",
help="Deletes packet action for this rule",
is_flag=True
)
@click.option(
"--flow-counter",
help="Deletes packet/byte counter for this rule",
is_flag=True
)
@clicommon.pass_db
def PBH_RULE_update_field_del(
db,
table_name,
rule_name,
priority,
gre_key,
ether_type,
ip_protocol,
ipv6_next_header,
l4_dst_port,
inner_ether_type,
hash,
packet_action,
flow_counter
):
""" Delete object field from PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
data = {}
if priority:
data[PBH_RULE_PRIORITY] = None
if gre_key:
data[PBH_RULE_GRE_KEY] = None
if ether_type:
data[PBH_RULE_ETHER_TYPE] = None
if ip_protocol:
data[PBH_RULE_IP_PROTOCOL] = None
if ipv6_next_header:
data[PBH_RULE_IPV6_NEXT_HEADER] = None
if l4_dst_port:
data[PBH_RULE_L4_DST_PORT] = None
if inner_ether_type:
data[PBH_RULE_INNER_ETHER_TYPE] = None
if hash:
data[PBH_RULE_HASH] = None
if packet_action:
data[PBH_RULE_PACKET_ACTION] = None
if flow_counter:
data[PBH_RULE_FLOW_COUNTER] = None
if not data:
exit_with_error("Error: Failed to update PBH rule: options are not provided", fg="red")
match_total, match_count = pbh_match_count(db.cfgdb_pipe, table, key, data)
if match_count >= match_total:
exit_with_error("Error: Failed to update PBH rule: match options are required", fg="red")
cap = pbh_capabilities_query(db.db, PBH_RULE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH rule capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
if flow_counter:
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_RULE.command(name="delete")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.argument(
"rule-name",
nargs=1,
required=True
)
@clicommon.pass_db
def PBH_RULE_delete(db, table_name, rule_name):
""" Delete object from PBH_RULE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
rule_name_validator(ctx, db.cfgdb_pipe, table_name, rule_name)
table = str(PBH_RULE_CDB)
key = (str(table_name), str(rule_name))
try:
del_entry(db.cfgdb_pipe, table, key)
update_pbh_counters(table_name, rule_name)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH table -----------------------------------------------------------------------------------------------------------
#
@PBH.group(
name="table",
cls=clicommon.AliasedGroup
)
def PBH_TABLE():
""" Configure PBH table"""
pass
@PBH_TABLE.command(name="add")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.option(
"--interface-list",
help="Interfaces to which this table is applied",
required=True
)
@click.option(
"--description",
help="The description of this table",
required=True
)
@clicommon.pass_db
def PBH_TABLE_add(db, table_name, interface_list, description):
""" Add object to PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name, False)
table = str(PBH_TABLE_CDB)
key = str(table_name)
data = {}
if interface_list is not None:
interface_list_validator(ctx, db.cfgdb_pipe, interface_list)
data[PBH_TABLE_INTERFACE_LIST] = interface_list.split(",")
if description is not None:
data[PBH_TABLE_DESCRIPTION] = description
if not data:
exit_with_error("Error: Failed to add PBH table: options are not provided", fg="red")
try:
add_entry(db.cfgdb_pipe, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_TABLE.command(name="update")
@click.argument(
"table-name",
nargs=1,
required=True
)
@click.option(
"--interface-list",
help="Interfaces to which this table is applied"
)
@click.option(
"--description",
help="The description of this table",
)
@clicommon.pass_db
def PBH_TABLE_update(db, table_name, interface_list, description):
""" Update object in PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
table = str(PBH_TABLE_CDB)
key = str(table_name)
data = {}
if interface_list is not None:
interface_list_validator(ctx, db.cfgdb_pipe, interface_list)
data[PBH_TABLE_INTERFACE_LIST] = interface_list.split(",")
if description is not None:
data[PBH_TABLE_DESCRIPTION] = description
if not data:
exit_with_error("Error: Failed to update PBH table: options are not provided", fg="red")
cap = pbh_capabilities_query(db.db, PBH_TABLE_CAPABILITIES_KEY)
if cap is None:
exit_with_error("Error: Failed to query PBH table capabilities: configuration is not available", fg="red")
try:
update_entry(db.cfgdb_pipe, cap, table, key, data)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
@PBH_TABLE.command(name="delete")
@click.argument(
"table-name",
nargs=1,
required=True,
)
@clicommon.pass_db
def PBH_TABLE_delete(db, table_name):
""" Delete object from PBH_TABLE table """
ctx = click.get_current_context()
table_name_validator(ctx, db.cfgdb_pipe, table_name)
table = str(PBH_TABLE_CDB)
key = str(table_name)
try:
del_entry(db.cfgdb_pipe, table, key)
except Exception as err:
exit_with_error("Error: {}".format(err), fg="red")
#
# PBH plugin ----------------------------------------------------------------------------------------------------------
#
def register(cli):
cli_node = PBH
if cli_node.name in cli.commands:
raise Exception("{} already exists in CLI".format(cli_node.name))
cli.add_command(PBH)
|
[
"click.argument",
"click.get_current_context",
"show.plugins.pbh.deserialize_pbh_counters",
"click.option",
"ipaddress.ip_address",
"click.Choice",
"click.group",
"click.secho",
"click.Abort"
] |
[((17412, 17463), 'click.group', 'click.group', ([], {'name': '"""pbh"""', 'cls': 'clicommon.AliasedGroup'}), "(name='pbh', cls=clicommon.AliasedGroup)\n", (17423, 17463), False, 'import click\n'), ((17854, 17911), 'click.argument', 'click.argument', (['"""hash-field-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('hash-field-name', nargs=1, required=True)\n", (17868, 17911), False, 'import click\n'), ((18089, 18329), 'click.option', 'click.option', (['"""--ip-mask"""'], {'help': '"""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6"""', 'callback': 'ip_mask_validator'}), "('--ip-mask', help=\n 'Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6'\n , callback=ip_mask_validator)\n", (18101, 18329), False, 'import click\n'), ((18339, 18512), 'click.option', 'click.option', (['"""--sequence-id"""'], {'help': '"""Configures in which order the fields are hashed and defines which fields should be associative"""', 'required': '(True)', 'type': 'click.INT'}), "('--sequence-id', help=\n 'Configures in which order the fields are hashed and defines which fields should be associative'\n , required=True, type=click.INT)\n", (18351, 18512), False, 'import click\n'), ((19518, 19575), 'click.argument', 'click.argument', (['"""hash-field-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('hash-field-name', nargs=1, required=True)\n", (19532, 19575), False, 'import click\n'), ((19734, 19975), 'click.option', 'click.option', (['"""--ip-mask"""'], {'help': '"""Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6 """', 'callback': 'ip_mask_validator'}), "('--ip-mask', help=\n 'Configures IPv4/IPv6 address mask for this hash field, required when the value of --hash-field is - INNER_DST_IPV4 or INNER_SRC_IPV4 or INNER_DST_IPV6 or INNER_SRC_IPV6 '\n , callback=ip_mask_validator)\n", (19746, 19975), False, 'import click\n'), ((19985, 20143), 'click.option', 'click.option', (['"""--sequence-id"""'], {'help': '"""Configures in which order the fields are hashed and defines which fields should be associative"""', 'type': 'click.INT'}), "('--sequence-id', help=\n 'Configures in which order the fields are hashed and defines which fields should be associative'\n , type=click.INT)\n", (19997, 20143), False, 'import click\n'), ((21362, 21419), 'click.argument', 'click.argument', (['"""hash-field-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('hash-field-name', nargs=1, required=True)\n", (21376, 21419), False, 'import click\n'), ((22149, 22200), 'click.argument', 'click.argument', (['"""hash-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('hash-name', nargs=1, required=True)\n", (22163, 22200), False, 'import click\n'), ((22216, 22325), 'click.option', 'click.option', (['"""--hash-field-list"""'], {'help': '"""The list of hash fields to apply with this hash"""', 'required': '(True)'}), "('--hash-field-list', help=\n 'The list of hash fields to apply with this hash', required=True)\n", (22228, 22325), False, 'import click\n'), ((23089, 23140), 'click.argument', 'click.argument', (['"""hash-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('hash-name', nargs=1, required=True)\n", (23103, 23140), False, 'import click\n'), ((23156, 23250), 'click.option', 'click.option', (['"""--hash-field-list"""'], {'help': '"""The list of hash fields to apply with this hash"""'}), "('--hash-field-list', help=\n 'The list of hash fields to apply with this hash')\n", (23168, 23250), False, 'import click\n'), ((24222, 24273), 'click.argument', 'click.argument', (['"""hash-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('hash-name', nargs=1, required=True)\n", (24236, 24273), False, 'import click\n'), ((24961, 25013), 'click.argument', 'click.argument', (['"""table-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('table-name', nargs=1, required=True)\n", (24975, 25013), False, 'import click\n'), ((25029, 25080), 'click.argument', 'click.argument', (['"""rule-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('rule-name', nargs=1, required=True)\n", (25043, 25080), False, 'import click\n'), ((25096, 25199), 'click.option', 'click.option', (['"""--priority"""'], {'help': '"""Configures priority for this rule"""', 'required': '(True)', 'type': 'click.INT'}), "('--priority', help='Configures priority for this rule',\n required=True, type=click.INT)\n", (25108, 25199), False, 'import click\n'), ((25215, 25344), 'click.option', 'click.option', (['"""--gre-key"""'], {'help': '"""Configures packet match for this rule: GRE key (value/mask)"""', 'callback': 'match_validator'}), "('--gre-key', help=\n 'Configures packet match for this rule: GRE key (value/mask)', callback\n =match_validator)\n", (25227, 25344), False, 'import click\n'), ((25350, 25488), 'click.option', 'click.option', (['"""--ether-type"""'], {'help': '"""Configures packet match for this rule: EtherType (IANA Ethertypes)"""', 'callback': 'match_validator'}), "('--ether-type', help=\n 'Configures packet match for this rule: EtherType (IANA Ethertypes)',\n callback=match_validator)\n", (25362, 25488), False, 'import click\n'), ((25495, 25643), 'click.option', 'click.option', (['"""--ip-protocol"""'], {'help': '"""Configures packet match for this rule: IP protocol (IANA Protocol Numbers)"""', 'callback': 'match_validator'}), "('--ip-protocol', help=\n 'Configures packet match for this rule: IP protocol (IANA Protocol Numbers)'\n , callback=match_validator)\n", (25507, 25643), False, 'import click\n'), ((25649, 25807), 'click.option', 'click.option', (['"""--ipv6-next-header"""'], {'help': '"""Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)"""', 'callback': 'match_validator'}), "('--ipv6-next-header', help=\n 'Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)'\n , callback=match_validator)\n", (25661, 25807), False, 'import click\n'), ((25813, 25945), 'click.option', 'click.option', (['"""--l4-dst-port"""'], {'help': '"""Configures packet match for this rule: L4 destination port"""', 'callback': 'match_validator'}), "('--l4-dst-port', help=\n 'Configures packet match for this rule: L4 destination port', callback=\n match_validator)\n", (25825, 25945), False, 'import click\n'), ((25951, 26101), 'click.option', 'click.option', (['"""--inner-ether-type"""'], {'help': '"""Configures packet match for this rule: inner EtherType (IANA Ethertypes)"""', 'callback': 'match_validator'}), "('--inner-ether-type', help=\n 'Configures packet match for this rule: inner EtherType (IANA Ethertypes)',\n callback=match_validator)\n", (25963, 26101), False, 'import click\n'), ((26108, 26186), 'click.option', 'click.option', (['"""--hash"""'], {'help': '"""The hash to apply with this rule"""', 'required': '(True)'}), "('--hash', help='The hash to apply with this rule', required=True)\n", (26120, 26186), False, 'import click\n'), ((28806, 28858), 'click.argument', 'click.argument', (['"""table-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('table-name', nargs=1, required=True)\n", (28820, 28858), False, 'import click\n'), ((28874, 28925), 'click.argument', 'click.argument', (['"""rule-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('rule-name', nargs=1, required=True)\n", (28888, 28925), False, 'import click\n'), ((28941, 29030), 'click.option', 'click.option', (['"""--priority"""'], {'help': '"""Configures priority for this rule"""', 'type': 'click.INT'}), "('--priority', help='Configures priority for this rule', type=\n click.INT)\n", (28953, 29030), False, 'import click\n'), ((29041, 29170), 'click.option', 'click.option', (['"""--gre-key"""'], {'help': '"""Configures packet match for this rule: GRE key (value/mask)"""', 'callback': 'match_validator'}), "('--gre-key', help=\n 'Configures packet match for this rule: GRE key (value/mask)', callback\n =match_validator)\n", (29053, 29170), False, 'import click\n'), ((29176, 29314), 'click.option', 'click.option', (['"""--ether-type"""'], {'help': '"""Configures packet match for this rule: EtherType (IANA Ethertypes)"""', 'callback': 'match_validator'}), "('--ether-type', help=\n 'Configures packet match for this rule: EtherType (IANA Ethertypes)',\n callback=match_validator)\n", (29188, 29314), False, 'import click\n'), ((29321, 29469), 'click.option', 'click.option', (['"""--ip-protocol"""'], {'help': '"""Configures packet match for this rule: IP protocol (IANA Protocol Numbers)"""', 'callback': 'match_validator'}), "('--ip-protocol', help=\n 'Configures packet match for this rule: IP protocol (IANA Protocol Numbers)'\n , callback=match_validator)\n", (29333, 29469), False, 'import click\n'), ((29475, 29633), 'click.option', 'click.option', (['"""--ipv6-next-header"""'], {'help': '"""Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)"""', 'callback': 'match_validator'}), "('--ipv6-next-header', help=\n 'Configures packet match for this rule: IPv6 Next header (IANA Protocol Numbers)'\n , callback=match_validator)\n", (29487, 29633), False, 'import click\n'), ((29639, 29771), 'click.option', 'click.option', (['"""--l4-dst-port"""'], {'help': '"""Configures packet match for this rule: L4 destination port"""', 'callback': 'match_validator'}), "('--l4-dst-port', help=\n 'Configures packet match for this rule: L4 destination port', callback=\n match_validator)\n", (29651, 29771), False, 'import click\n'), ((29777, 29927), 'click.option', 'click.option', (['"""--inner-ether-type"""'], {'help': '"""Configures packet match for this rule: inner EtherType (IANA Ethertypes)"""', 'callback': 'match_validator'}), "('--inner-ether-type', help=\n 'Configures packet match for this rule: inner EtherType (IANA Ethertypes)',\n callback=match_validator)\n", (29789, 29927), False, 'import click\n'), ((29934, 29997), 'click.option', 'click.option', (['"""--hash"""'], {'help': '"""The hash to apply with this rule"""'}), "('--hash', help='The hash to apply with this rule')\n", (29946, 29997), False, 'import click\n'), ((32345, 32397), 'click.argument', 'click.argument', (['"""table-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('table-name', nargs=1, required=True)\n", (32359, 32397), False, 'import click\n'), ((32413, 32464), 'click.argument', 'click.argument', (['"""rule-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('rule-name', nargs=1, required=True)\n", (32427, 32464), False, 'import click\n'), ((32480, 32559), 'click.option', 'click.option', (['"""--priority"""'], {'help': '"""Deletes priority for this rule"""', 'is_flag': '(True)'}), "('--priority', help='Deletes priority for this rule', is_flag=True)\n", (32492, 32559), False, 'import click\n'), ((32575, 32684), 'click.option', 'click.option', (['"""--gre-key"""'], {'help': '"""Deletes packet match for this rule: GRE key (value/mask)"""', 'is_flag': '(True)'}), "('--gre-key', help=\n 'Deletes packet match for this rule: GRE key (value/mask)', is_flag=True)\n", (32587, 32684), False, 'import click\n'), ((32695, 32818), 'click.option', 'click.option', (['"""--ether-type"""'], {'help': '"""Deletes packet match for this rule: EtherType (IANA Ethertypes)"""', 'is_flag': '(True)'}), "('--ether-type', help=\n 'Deletes packet match for this rule: EtherType (IANA Ethertypes)',\n is_flag=True)\n", (32707, 32818), False, 'import click\n'), ((32825, 32957), 'click.option', 'click.option', (['"""--ip-protocol"""'], {'help': '"""Deletes packet match for this rule: IP protocol (IANA Protocol Numbers)"""', 'is_flag': '(True)'}), "('--ip-protocol', help=\n 'Deletes packet match for this rule: IP protocol (IANA Protocol Numbers)',\n is_flag=True)\n", (32837, 32957), False, 'import click\n'), ((32964, 33107), 'click.option', 'click.option', (['"""--ipv6-next-header"""'], {'help': '"""Deletes packet match for this rule: IPv6 Next header (IANA Protocol Numbers)"""', 'is_flag': '(True)'}), "('--ipv6-next-header', help=\n 'Deletes packet match for this rule: IPv6 Next header (IANA Protocol Numbers)'\n , is_flag=True)\n", (32976, 33107), False, 'import click\n'), ((33113, 33225), 'click.option', 'click.option', (['"""--l4-dst-port"""'], {'help': '"""Deletes packet match for this rule: L4 destination port"""', 'is_flag': '(True)'}), "('--l4-dst-port', help=\n 'Deletes packet match for this rule: L4 destination port', is_flag=True)\n", (33125, 33225), False, 'import click\n'), ((33236, 33371), 'click.option', 'click.option', (['"""--inner-ether-type"""'], {'help': '"""Deletes packet match for this rule: inner EtherType (IANA Ethertypes)"""', 'is_flag': '(True)'}), "('--inner-ether-type', help=\n 'Deletes packet match for this rule: inner EtherType (IANA Ethertypes)',\n is_flag=True)\n", (33248, 33371), False, 'import click\n'), ((33378, 33449), 'click.option', 'click.option', (['"""--hash"""'], {'help': '"""Deletes hash for this rule"""', 'is_flag': '(True)'}), "('--hash', help='Deletes hash for this rule', is_flag=True)\n", (33390, 33449), False, 'import click\n'), ((33465, 33558), 'click.option', 'click.option', (['"""--packet-action"""'], {'help': '"""Deletes packet action for this rule"""', 'is_flag': '(True)'}), "('--packet-action', help='Deletes packet action for this rule',\n is_flag=True)\n", (33477, 33558), False, 'import click\n'), ((33570, 33669), 'click.option', 'click.option', (['"""--flow-counter"""'], {'help': '"""Deletes packet/byte counter for this rule"""', 'is_flag': '(True)'}), "('--flow-counter', help=\n 'Deletes packet/byte counter for this rule', is_flag=True)\n", (33582, 33669), False, 'import click\n'), ((35663, 35715), 'click.argument', 'click.argument', (['"""table-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('table-name', nargs=1, required=True)\n", (35677, 35715), False, 'import click\n'), ((35731, 35782), 'click.argument', 'click.argument', (['"""rule-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('rule-name', nargs=1, required=True)\n", (35745, 35782), False, 'import click\n'), ((36624, 36676), 'click.argument', 'click.argument', (['"""table-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('table-name', nargs=1, required=True)\n", (36638, 36676), False, 'import click\n'), ((36692, 36794), 'click.option', 'click.option', (['"""--interface-list"""'], {'help': '"""Interfaces to which this table is applied"""', 'required': '(True)'}), "('--interface-list', help=\n 'Interfaces to which this table is applied', required=True)\n", (36704, 36794), False, 'import click\n'), ((36805, 36891), 'click.option', 'click.option', (['"""--description"""'], {'help': '"""The description of this table"""', 'required': '(True)'}), "('--description', help='The description of this table',\n required=True)\n", (36817, 36891), False, 'import click\n'), ((37755, 37807), 'click.argument', 'click.argument', (['"""table-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('table-name', nargs=1, required=True)\n", (37769, 37807), False, 'import click\n'), ((37823, 37910), 'click.option', 'click.option', (['"""--interface-list"""'], {'help': '"""Interfaces to which this table is applied"""'}), "('--interface-list', help=\n 'Interfaces to which this table is applied')\n", (37835, 37910), False, 'import click\n'), ((37917, 37984), 'click.option', 'click.option', (['"""--description"""'], {'help': '"""The description of this table"""'}), "('--description', help='The description of this table')\n", (37929, 37984), False, 'import click\n'), ((39063, 39115), 'click.argument', 'click.argument', (['"""table-name"""'], {'nargs': '(1)', 'required': '(True)'}), "('table-name', nargs=1, required=True)\n", (39077, 39115), False, 'import click\n'), ((15624, 15650), 'show.plugins.pbh.deserialize_pbh_counters', 'deserialize_pbh_counters', ([], {}), '()\n', (15648, 15650), False, 'from show.plugins.pbh import deserialize_pbh_counters\n'), ((17232, 17260), 'click.secho', 'click.secho', (['*args'], {}), '(*args, **kwargs)\n', (17243, 17260), False, 'import click\n'), ((17271, 17284), 'click.Abort', 'click.Abort', ([], {}), '()\n', (17282, 17284), False, 'import click\n'), ((18677, 18704), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (18702, 18704), False, 'import click\n'), ((20310, 20337), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (20335, 20337), False, 'import click\n'), ((21564, 21591), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (21589, 21591), False, 'import click\n'), ((22456, 22483), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (22481, 22483), False, 'import click\n'), ((23383, 23410), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (23408, 23410), False, 'import click\n'), ((24400, 24427), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (24425, 24427), False, 'import click\n'), ((26787, 26814), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (26812, 26814), False, 'import click\n'), ((30613, 30640), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (30638, 30640), False, 'import click\n'), ((34001, 34028), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (34026, 34028), False, 'import click\n'), ((35921, 35948), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (35946, 35948), False, 'import click\n'), ((37038, 37065), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (37063, 37065), False, 'import click\n'), ((38138, 38165), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (38163, 38165), False, 'import click\n'), ((39246, 39273), 'click.get_current_context', 'click.get_current_context', ([], {}), '()\n', (39271, 39273), False, 'import click\n'), ((12026, 12055), 'ipaddress.ip_address', 'ipaddress.ip_address', (['ip_mask'], {}), '(ip_mask)\n', (12046, 12055), False, 'import ipaddress\n'), ((18050, 18085), 'click.Choice', 'click.Choice', (['HASH_FIELD_VALUE_LIST'], {}), '(HASH_FIELD_VALUE_LIST)\n', (18062, 18085), False, 'import click\n'), ((19695, 19730), 'click.Choice', 'click.Choice', (['HASH_FIELD_VALUE_LIST'], {}), '(HASH_FIELD_VALUE_LIST)\n', (19707, 19730), False, 'import click\n'), ((26299, 26337), 'click.Choice', 'click.Choice', (['PACKET_ACTION_VALUE_LIST'], {}), '(PACKET_ACTION_VALUE_LIST)\n', (26311, 26337), False, 'import click\n'), ((26449, 26486), 'click.Choice', 'click.Choice', (['FLOW_COUNTER_VALUE_LIST'], {}), '(FLOW_COUNTER_VALUE_LIST)\n', (26461, 26486), False, 'import click\n'), ((30106, 30144), 'click.Choice', 'click.Choice', (['PACKET_ACTION_VALUE_LIST'], {}), '(PACKET_ACTION_VALUE_LIST)\n', (30118, 30144), False, 'import click\n'), ((30256, 30293), 'click.Choice', 'click.Choice', (['FLOW_COUNTER_VALUE_LIST'], {}), '(FLOW_COUNTER_VALUE_LIST)\n', (30268, 30293), False, 'import click\n'), ((10681, 10708), 'ipaddress.ip_address', 'ipaddress.ip_address', (['value'], {}), '(value)\n', (10701, 10708), False, 'import ipaddress\n')]
|
import sklearn
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn import tree
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import KFold
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.externals.six import StringIO
import pydot
# In[13]:
df = load_breast_cancer()
df = pd.DataFrame(np.c_[df['data'], df['target']],
columns= np.append(df['feature_names'], ['target']))
for col in df.columns:
print(col)
print(df.head())
total_rows=len(df.axes[0])
print(total_rows)
# Outlier detection and visualization
# In[3]:
histograms = df.hist()
df.hist("target")
# In[2]:
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size = .2)
# In[3]:
#PCA with scikit learn
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
X_train_pca = pca = PCA().fit(X_train)
X_test_pca = pca.transform(X_test)
explained_variance = pca.explained_variance_ratio_
# In[4]:
plot = 1
# plot explained variance
if plot == 1:
plt.figure()
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('Number of Components')
plt.ylabel('Variance (%)') #for each component
plt.title('Breast Cancer data set Explained Variance')
plt.savefig('foo.png')
plt.show()
# In[5]:
print(np.cumsum(pca.explained_variance_ratio_))
# Selecting the amount of principle components
# In[6]:
# 10 features
pca = PCA(n_components=10)
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.fit_transform(X_test)
# In[7]:
# baseline linear model
reg = LogisticRegression(random_state=0).fit(X_train, y_train)
prediction = reg.predict(X_test)
score = reg.score(X_test,y_test)
print(score)
reg_pca = LogisticRegression(random_state=0).fit(X_train_pca, y_train)
score_pca = reg_pca.score(X_test_pca,y_test)
print(score_pca)
# In[8]:
LPM = linear_model.LinearRegression()
LPM = LPM.fit(X_train, y_train)
LPM.coef_
predictionLPM = LPM.predict(X_test)
scoreLPM = LPM.score(X_test, y_test)
print(scoreLPM)
LPMpca = linear_model.LinearRegression()
LPMpca = LPMpca.fit(X_train_pca, y_train)
LPMpca.coef_
predictionLPM = LPMpca.predict(X_test_pca)
scoreLPMpca = LPMpca.score(X_test_pca, y_test)
print(scoreLPMpca)
# In[9]:
#baseline decicision tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X_train, y_train)
tree.export_graphviz(clf, out_file='tree.dot')
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("decisiontree.pdf")
predictionBaseline = clf.predict(X_test)
scoreclf = clf.score(X_test, y_test)
#print(classification_report(y_test,predictionBaseline,target_names=['malignant', 'benign']))
print(scoreclf)
#baseline decicision tree
clfPca = tree.DecisionTreeClassifier()
clfPca = clfPca.fit(X_train_pca, y_train)
tree.export_graphviz(clfPca, out_file='treepca.dot')
dot_data = StringIO()
tree.export_graphviz(clfPca, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph[0].write_pdf("decisiontreepca.pdf")
predictionBaselinePca = clfPca.predict(X_test_pca)
scoreclf = clfPca.score(X_test_pca, y_test)
#print(classification_report(y_test,predictionBaselinePca,target_names=['malignant', 'benign']))
print(scoreclf)
# In[18]:
# KNN classifier on original data
knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')
knn.fit(X_train, y_train)
score = knn.score(X_test,y_test)
print(score)
knn.fit(X_train_pca, y_train)
score_pca = knn.score(X_test_pca,y_test)
print(score_pca)
# In[14]:
# Decision tree with Gridsearch
clf = tree.DecisionTreeClassifier()
#create a dictionary of all values we want to test for n_neighbors
param_grid = {'max_depth': np.arange(1, 50)}
#use gridsearch to test all values for n_neighbors
clf_gscv = GridSearchCV(clf, param_grid, cv=10)
#fit model to data
clf_gscv.fit(X_train_pca, y_train)
#check top performing n_neighbors value
print(clf_gscv.best_params_)
#check mean score for the top performing value of n_neighbors
print(clf_gscv.best_score_)
# In[15]:
#KNN with PCA or without PCA and Gridsearch
knn2 = KNeighborsClassifier()
#create a dictionary of all values we want to test for n_neighbors
param_grid = {'n_neighbors': np.arange(1, 50)}
#use gridsearch to test all values for n_neighbors
knn_gscv = GridSearchCV(knn2, param_grid, cv=5)
#fit model to data
knn_gscv.fit(X_train_pca, y_train)
#check top performing n_neighbors value
print(knn_gscv.best_params_)
#check mean score for the top performing value of n_neighbors
print(knn_gscv.best_score_)
# In[32]:
## Plot results from gridsearches
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x= col_x, y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
# In[34]:
# Single function to make plot for each Gridsearch
fig = plot_cv_results(knn_gscv.cv_results_, 'n_neighbors')
# In[59]:
#10 fold cross validation with PCA applied
k_fold = KFold(10)
X_pca = pca.fit_transform(X)
classifiers = []
for k, (train, test) in enumerate(k_fold.split(X_pca, y)):
clfk = tree.DecisionTreeClassifier()
clfk = clfk.fit(X_pca[train], y[train])
predictionBaseline = clfk.predict(X_pca[test])
print ("Classification report for %d fold", k)
print(classification_report(y[test],predictionBaseline,target_names=['malignant', 'benign']))
classifiers.append(clfk)
votes = []
# In[60]:
# Construct ensemble based on majority vote
for classifier in classifiers:
classifier.fit(X_train_pca,y_train)
votes.append(classifier.predict(X_test_pca))
ensembleVotes = np.zeros((len(y_test),1), dtype=int)
predictionEnsemble = np.zeros((len(y_test),1), dtype=int)
for prediction in votes:
for idx in range(0,len(prediction)):
ensembleVotes[idx]+= prediction[idx]
for idx in range(0,len(prediction)):
if ensembleVotes[idx] > 5:
predictionEnsemble[idx] = 1
print("ensemble")
print(classification_report(y_test,predictionEnsemble,target_names=['malignant', 'benign']))
# In[ ]:
## Regularization
# In[15]:
# Ridge regression
param_grid = {'alpha': np.arange(start=0, stop=100, step=10)}
regridge = linear_model.Ridge()
#use gridsearch to test all values for n_neighbors
reg_gscv = GridSearchCV(regridge, param_grid, cv=10, return_train_score = True)
reg_gscv.fit(X_train_pca, y_train)
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x= col_x, y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
fig = plot_cv_results(reg_gscv.cv_results_, 'alpha')
# In[19]:
# Logistic regression
logitl2 = linear_model.LogisticRegression(penalty='l2', C = 1.0)
param_grid = {'C': np.arange(.1, .9, step = .1)}
reg_gscv = GridSearchCV(logitl2 , param_grid, cv=10, return_train_score = True)
reg_gscv.fit(X_train, y_train)
def plot_cv_results(cv_results, param_x, metric='mean_test_score'):
"""
cv_results - cv_results_ attribute of a GridSearchCV instance (or similar)
param_x - name of grid search parameter to plot on x axis
param_z - name of grid search parameter to plot by line color
"""
cv_results = pd.DataFrame(cv_results)
col_x = 'param_' + param_x
fig, ax = plt.subplots(1, 1, figsize=(11, 8))
sns.pointplot(x=col_x , y=metric, data=cv_results, ci=95, ax = ax)
ax.set_title("CV Grid Search Results")
ax.set_xlabel(param_x)
ax.set_ylabel(metric)
return fig
fig = plot_cv_results(reg_gscv.cv_results_, 'C')
print (reg_gscv.best_score_, reg_gscv.best_params_)
# In[17]:
## decision tree regularization
parameters = {'max_depth':range(1,40)}
clf = GridSearchCV(tree.DecisionTreeClassifier(), parameters, n_jobs=4)
clf.fit(X_train_pca, y_train)
tree_model = clf.best_estimator_
print (clf.best_score_, clf.best_params_)
|
[
"matplotlib.pyplot.title",
"sklearn.model_selection.GridSearchCV",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"sklearn.externals.six.StringIO",
"numpy.arange",
"pandas.DataFrame",
"seaborn.pointplot",
"numpy.cumsum",
"numpy.append",
"matplotlib.pyplot.subplots",
"sklearn.linear_model.Ridge",
"matplotlib.pyplot.show",
"sklearn.datasets.load_breast_cancer",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.KFold",
"sklearn.tree.export_graphviz",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((998, 1018), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {}), '()\n', (1016, 1018), False, 'from sklearn.datasets import load_breast_cancer\n'), ((1357, 1392), 'sklearn.datasets.load_breast_cancer', 'load_breast_cancer', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (1375, 1392), False, 'from sklearn.datasets import load_breast_cancer\n'), ((1428, 1481), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(0)', 'test_size': '(0.2)'}), '(X, y, random_state=0, test_size=0.2)\n', (1444, 1481), False, 'from sklearn.model_selection import train_test_split\n'), ((1524, 1540), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1538, 1540), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2202, 2222), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(10)'}), '(n_components=10)\n', (2205, 2222), False, 'from sklearn.decomposition import PCA\n'), ((2635, 2666), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (2664, 2666), False, 'from sklearn import linear_model\n'), ((2808, 2839), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (2837, 2839), False, 'from sklearn import linear_model\n'), ((3049, 3078), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (3076, 3078), False, 'from sklearn import tree\n'), ((3111, 3157), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['clf'], {'out_file': '"""tree.dot"""'}), "(clf, out_file='tree.dot')\n", (3131, 3157), False, 'from sklearn import tree\n'), ((3171, 3181), 'sklearn.externals.six.StringIO', 'StringIO', ([], {}), '()\n', (3179, 3181), False, 'from sklearn.externals.six import StringIO\n'), ((3183, 3227), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['clf'], {'out_file': 'dot_data'}), '(clf, out_file=dot_data)\n', (3203, 3227), False, 'from sklearn import tree\n'), ((3550, 3579), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (3577, 3579), False, 'from sklearn import tree\n'), ((3622, 3674), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['clfPca'], {'out_file': '"""treepca.dot"""'}), "(clfPca, out_file='treepca.dot')\n", (3642, 3674), False, 'from sklearn import tree\n'), ((3689, 3699), 'sklearn.externals.six.StringIO', 'StringIO', ([], {}), '()\n', (3697, 3699), False, 'from sklearn.externals.six import StringIO\n'), ((3701, 3748), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['clfPca'], {'out_file': 'dot_data'}), '(clfPca, out_file=dot_data)\n', (3721, 3748), False, 'from sklearn import tree\n'), ((4112, 4167), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)', 'metric': '"""euclidean"""'}), "(n_neighbors=5, metric='euclidean')\n", (4132, 4167), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4381, 4410), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (4408, 4410), False, 'from sklearn import tree\n'), ((4587, 4623), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['clf', 'param_grid'], {'cv': '(10)'}), '(clf, param_grid, cv=10)\n', (4599, 4623), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4905, 4927), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (4925, 4927), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((5106, 5142), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['knn2', 'param_grid'], {'cv': '(5)'}), '(knn2, param_grid, cv=5)\n', (5118, 5142), False, 'from sklearn.model_selection import GridSearchCV\n'), ((6194, 6203), 'sklearn.model_selection.KFold', 'KFold', (['(10)'], {}), '(10)\n', (6199, 6203), False, 'from sklearn.model_selection import KFold\n'), ((7390, 7410), 'sklearn.linear_model.Ridge', 'linear_model.Ridge', ([], {}), '()\n', (7408, 7410), False, 'from sklearn import linear_model\n'), ((7474, 7540), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['regridge', 'param_grid'], {'cv': '(10)', 'return_train_score': '(True)'}), '(regridge, param_grid, cv=10, return_train_score=True)\n', (7486, 7540), False, 'from sklearn.model_selection import GridSearchCV\n'), ((8277, 8329), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'penalty': '"""l2"""', 'C': '(1.0)'}), "(penalty='l2', C=1.0)\n", (8308, 8329), False, 'from sklearn import linear_model\n'), ((8394, 8459), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['logitl2', 'param_grid'], {'cv': '(10)', 'return_train_score': '(True)'}), '(logitl2, param_grid, cv=10, return_train_score=True)\n', (8406, 8459), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1800, 1812), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1810, 1812), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1906), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Components"""'], {}), "('Number of Components')\n", (1882, 1906), True, 'import matplotlib.pyplot as plt\n'), ((1911, 1937), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Variance (%)"""'], {}), "('Variance (%)')\n", (1921, 1937), True, 'import matplotlib.pyplot as plt\n'), ((1962, 2016), 'matplotlib.pyplot.title', 'plt.title', (['"""Breast Cancer data set Explained Variance"""'], {}), "('Breast Cancer data set Explained Variance')\n", (1971, 2016), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2043), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""foo.png"""'], {}), "('foo.png')\n", (2032, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2056, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2078, 2118), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (2087, 2118), True, 'import numpy as np\n'), ((4506, 4522), 'numpy.arange', 'np.arange', (['(1)', '(50)'], {}), '(1, 50)\n', (4515, 4522), True, 'import numpy as np\n'), ((5025, 5041), 'numpy.arange', 'np.arange', (['(1)', '(50)'], {}), '(1, 50)\n', (5034, 5041), True, 'import numpy as np\n'), ((5715, 5739), 'pandas.DataFrame', 'pd.DataFrame', (['cv_results'], {}), '(cv_results)\n', (5727, 5739), True, 'import pandas as pd\n'), ((5785, 5820), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(11, 8)'}), '(1, 1, figsize=(11, 8))\n', (5797, 5820), True, 'import matplotlib.pyplot as plt\n'), ((5825, 5888), 'seaborn.pointplot', 'sns.pointplot', ([], {'x': 'col_x', 'y': 'metric', 'data': 'cv_results', 'ci': '(95)', 'ax': 'ax'}), '(x=col_x, y=metric, data=cv_results, ci=95, ax=ax)\n', (5838, 5888), True, 'import seaborn as sns\n'), ((6321, 6350), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (6348, 6350), False, 'from sklearn import tree\n'), ((7166, 7257), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictionEnsemble'], {'target_names': "['malignant', 'benign']"}), "(y_test, predictionEnsemble, target_names=['malignant',\n 'benign'])\n", (7187, 7257), False, 'from sklearn.metrics import classification_report\n'), ((7340, 7377), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(100)', 'step': '(10)'}), '(start=0, stop=100, step=10)\n', (7349, 7377), True, 'import numpy as np\n'), ((7888, 7912), 'pandas.DataFrame', 'pd.DataFrame', (['cv_results'], {}), '(cv_results)\n', (7900, 7912), True, 'import pandas as pd\n'), ((7958, 7993), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(11, 8)'}), '(1, 1, figsize=(11, 8))\n', (7970, 7993), True, 'import matplotlib.pyplot as plt\n'), ((7998, 8061), 'seaborn.pointplot', 'sns.pointplot', ([], {'x': 'col_x', 'y': 'metric', 'data': 'cv_results', 'ci': '(95)', 'ax': 'ax'}), '(x=col_x, y=metric, data=cv_results, ci=95, ax=ax)\n', (8011, 8061), True, 'import seaborn as sns\n'), ((8352, 8381), 'numpy.arange', 'np.arange', (['(0.1)', '(0.9)'], {'step': '(0.1)'}), '(0.1, 0.9, step=0.1)\n', (8361, 8381), True, 'import numpy as np\n'), ((8805, 8829), 'pandas.DataFrame', 'pd.DataFrame', (['cv_results'], {}), '(cv_results)\n', (8817, 8829), True, 'import pandas as pd\n'), ((8875, 8910), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(11, 8)'}), '(1, 1, figsize=(11, 8))\n', (8887, 8910), True, 'import matplotlib.pyplot as plt\n'), ((8915, 8978), 'seaborn.pointplot', 'sns.pointplot', ([], {'x': 'col_x', 'y': 'metric', 'data': 'cv_results', 'ci': '(95)', 'ax': 'ax'}), '(x=col_x, y=metric, data=cv_results, ci=95, ax=ax)\n', (8928, 8978), True, 'import seaborn as sns\n'), ((9301, 9330), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (9328, 9330), False, 'from sklearn import tree\n'), ((1097, 1139), 'numpy.append', 'np.append', (["df['feature_names']", "['target']"], {}), "(df['feature_names'], ['target'])\n", (1106, 1139), True, 'import numpy as np\n'), ((1628, 1633), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (1631, 1633), False, 'from sklearn.decomposition import PCA\n'), ((1826, 1866), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (1835, 1866), True, 'import numpy as np\n'), ((2346, 2380), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2364, 2380), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2493, 2527), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2511, 2527), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6507, 6600), 'sklearn.metrics.classification_report', 'classification_report', (['y[test]', 'predictionBaseline'], {'target_names': "['malignant', 'benign']"}), "(y[test], predictionBaseline, target_names=[\n 'malignant', 'benign'])\n", (6528, 6600), False, 'from sklearn.metrics import classification_report\n')]
|
import os
from pyautogui import *
import pyautogui
import time
import keyboard
import random
import win32api, win32con
#This program was written in a few hours, its purpose is to set the computer's power usage.
# Disclaimer: This is an awful way to do it, even the cmds, a better way would be NViAPI but I dont want to use it
## 1200 150 , 1220 300 # To open control panel
## 776 419, 240 240 240 To scroll down
## 750 419 To click the menu
## 556 431 , 120 185 4 To Check if menu is opened
## Optimal : X same, 430
## Adaptive(Balanced): , 450
## Performance: , 470
def PowerSetting(x):
OpenCtrlPanel()
FindPowerSettings(x)
def ClickOnPosition(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.001)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
def OpenCtrlPanel():
ChangeinY = 140
ChangeinX = 20
win32api.SetCursorPos((1200,150))
x, y = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,0,0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,0,0)
time.sleep(0.011)
win32api.SetCursorPos((x+ChangeinX,y+ChangeinY))
time.sleep(0.011)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.011)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
Found = False
Found2 = False
Found3 = False
def FindPowerSettings(x):
try:
Found = False
while(Found == False):
if pyautogui.pixel(776,419)[0] == 240 and pyautogui.pixel(776,419)[1] == 240 and pyautogui.pixel(776,419)[2] == 240:
Found = True
ClickOnPosition(776,419)
time.sleep(0.1)
ClickOnPosition(750,419)
try:
Found2 = False
while(Found2 == False):
if pyautogui.pixel(556,431)[0] == 120 and pyautogui.pixel(556,431)[1] == 185 and pyautogui.pixel(556,431)[2] == 4:
Found2 = True
if(x == 1): ClickOnPosition(750, 430)
elif (x == 2): ClickOnPosition(750, 450)
elif (x==3): ClickOnPosition(750, 470)
time.sleep(1)
ClickOnPosition(1919, 0)
time.sleep(1)
keyboard.press_and_release("enter")
else: time.sleep(0.01)
except:
pass
else: time.sleep(0.01)
except:
pass
On = True
while (On == True):
e = input("Type s for Saving, b for Balanced, p for Performance, N to cancel: ")
if(e == 's' or e == 'S'):
os.startfile('PowerSaver.cmd')
PowerSetting(1)
elif (e== 'b' or e == 'B'):
os.startfile('Balanced.cmd')
PowerSetting(2)
elif (e=='p' or e == 'P'):
os.startfile('Performance.cmd')
PowerSetting(3)
elif (e=='n' or e == 'N'): On = False
|
[
"keyboard.press_and_release",
"win32api.SetCursorPos",
"time.sleep",
"pyautogui.pixel",
"win32api.mouse_event",
"os.startfile",
"win32api.GetCursorPos"
] |
[((706, 735), 'win32api.SetCursorPos', 'win32api.SetCursorPos', (['(x, y)'], {}), '((x, y))\n', (727, 735), False, 'import win32api, win32con\n'), ((740, 797), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTDOWN', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n', (760, 797), False, 'import win32api, win32con\n'), ((801, 818), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (811, 818), False, 'import time\n'), ((824, 879), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTUP', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n', (844, 879), False, 'import win32api, win32con\n'), ((948, 982), 'win32api.SetCursorPos', 'win32api.SetCursorPos', (['(1200, 150)'], {}), '((1200, 150))\n', (969, 982), False, 'import win32api, win32con\n'), ((994, 1017), 'win32api.GetCursorPos', 'win32api.GetCursorPos', ([], {}), '()\n', (1015, 1017), False, 'import win32api, win32con\n'), ((1025, 1083), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_RIGHTDOWN', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0)\n', (1045, 1083), False, 'import win32api, win32con\n'), ((1087, 1103), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1097, 1103), False, 'import time\n'), ((1109, 1165), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_RIGHTUP', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_RIGHTUP, 0, 0)\n', (1129, 1165), False, 'import win32api, win32con\n'), ((1169, 1186), 'time.sleep', 'time.sleep', (['(0.011)'], {}), '(0.011)\n', (1179, 1186), False, 'import time\n'), ((1192, 1245), 'win32api.SetCursorPos', 'win32api.SetCursorPos', (['(x + ChangeinX, y + ChangeinY)'], {}), '((x + ChangeinX, y + ChangeinY))\n', (1213, 1245), False, 'import win32api, win32con\n'), ((1246, 1263), 'time.sleep', 'time.sleep', (['(0.011)'], {}), '(0.011)\n', (1256, 1263), False, 'import time\n'), ((1269, 1326), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTDOWN', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n', (1289, 1326), False, 'import win32api, win32con\n'), ((1330, 1347), 'time.sleep', 'time.sleep', (['(0.011)'], {}), '(0.011)\n', (1340, 1347), False, 'import time\n'), ((1353, 1408), 'win32api.mouse_event', 'win32api.mouse_event', (['win32con.MOUSEEVENTF_LEFTUP', '(0)', '(0)'], {}), '(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n', (1373, 1408), False, 'import win32api, win32con\n'), ((2861, 2891), 'os.startfile', 'os.startfile', (['"""PowerSaver.cmd"""'], {}), "('PowerSaver.cmd')\n", (2873, 2891), False, 'import os\n'), ((2960, 2988), 'os.startfile', 'os.startfile', (['"""Balanced.cmd"""'], {}), "('Balanced.cmd')\n", (2972, 2988), False, 'import os\n'), ((1772, 1787), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1782, 1787), False, 'import time\n'), ((2650, 2666), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2660, 2666), False, 'import time\n'), ((3056, 3087), 'os.startfile', 'os.startfile', (['"""Performance.cmd"""'], {}), "('Performance.cmd')\n", (3068, 3087), False, 'import os\n'), ((1569, 1594), 'pyautogui.pixel', 'pyautogui.pixel', (['(776)', '(419)'], {}), '(776, 419)\n', (1584, 1594), False, 'import pyautogui\n'), ((1608, 1633), 'pyautogui.pixel', 'pyautogui.pixel', (['(776)', '(419)'], {}), '(776, 419)\n', (1623, 1633), False, 'import pyautogui\n'), ((1647, 1672), 'pyautogui.pixel', 'pyautogui.pixel', (['(776)', '(419)'], {}), '(776, 419)\n', (1662, 1672), False, 'import pyautogui\n'), ((2356, 2369), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2366, 2369), False, 'import time\n'), ((2453, 2466), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2463, 2466), False, 'import time\n'), ((2496, 2531), 'keyboard.press_and_release', 'keyboard.press_and_release', (['"""enter"""'], {}), "('enter')\n", (2522, 2531), False, 'import keyboard\n'), ((2563, 2579), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2573, 2579), False, 'import time\n'), ((1961, 1986), 'pyautogui.pixel', 'pyautogui.pixel', (['(556)', '(431)'], {}), '(556, 431)\n', (1976, 1986), False, 'import pyautogui\n'), ((2000, 2025), 'pyautogui.pixel', 'pyautogui.pixel', (['(556)', '(431)'], {}), '(556, 431)\n', (2015, 2025), False, 'import pyautogui\n'), ((2039, 2064), 'pyautogui.pixel', 'pyautogui.pixel', (['(556)', '(431)'], {}), '(556, 431)\n', (2054, 2064), False, 'import pyautogui\n')]
|
import boto3
import datetime
import argparse
import logging
import sys
from aws_interfaces.s3_interface import S3Interface
from boto3.dynamodb.conditions import Key, Attr
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--region", action="store", required=True, dest="region", help="the region for uploading")
parser.add_argument("-tb", "--tableName", action="store", required=True, dest="tableName", help="the table for jobs entry")
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
args = parser.parse_args()
region = args.region
tableName = args.tableName
s3 = boto3.client('s3', region_name=region)
dynamodb = boto3.client('dynamodb', region_name=region)
s3_interface = S3Interface(region)
def fetch_job():
fe = Key('jobStatus').eq('PendingDeployment')
tableJobConfigs = dynamodb.query(
TableName=tableName,
Limit=1,
KeyConditionExpression="#S = :jobStatus",
ExpressionAttributeNames={
"#S": "jobStatus"
},
ExpressionAttributeValues={
":jobStatus": {"S": "PendingDeployment"}
})
for JobConfig in tableJobConfigs['Items']:
bucket = JobConfig['bucketId']['S']
s3_interface.download_file_from_s3('dev.ini', bucket, JobConfig['devFileKey']['S'])
thingListFileKey = JobConfig['thingListFileKey']['S']
tmp, thingListFileName = thingListFileKey.split('release/')
s3_interface.download_file_from_s3(thingListFileName, bucket, thingListFileKey)
binFileKey = JobConfig['binFileKey']['S']
tmp, binName = binFileKey.split('release/')
s3_interface.download_file_from_s3(binName, bucket, binFileKey)
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")
response = dynamodb.delete_item(
TableName=tableName,
Key={
'jobStatus': JobConfig['jobStatus'],
'timestamp': JobConfig['timestamp']
}
)
if response is None:
raise Exception('job record delete failed')
else:
jobStatus = 'Deployed'
print(JobConfig['jobId'])
dynamodb.put_item(
TableName=tableName,
Item={
'jobId': {'S': JobConfig['jobId']['S']},
'bucketId': {'S': JobConfig['bucketId']['S']},
'binFileKey': {'S': JobConfig['binFileKey']['S']},
'thingListFileKey': {'S': JobConfig['thingListFileKey']['S']},
'devFileKey': {'S': JobConfig['devFileKey']['S']},
'jobStatus': {'S': jobStatus},
'timestamp': {'S': timestamp}
})
fetch_job()
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"boto3.client",
"boto3.dynamodb.conditions.Key",
"datetime.datetime.utcnow",
"aws_interfaces.s3_interface.S3Interface"
] |
[((182, 207), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (205, 207), False, 'import argparse\n'), ((449, 507), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (468, 507), False, 'import logging\n'), ((588, 626), 'boto3.client', 'boto3.client', (['"""s3"""'], {'region_name': 'region'}), "('s3', region_name=region)\n", (600, 626), False, 'import boto3\n'), ((638, 682), 'boto3.client', 'boto3.client', (['"""dynamodb"""'], {'region_name': 'region'}), "('dynamodb', region_name=region)\n", (650, 682), False, 'import boto3\n'), ((698, 717), 'aws_interfaces.s3_interface.S3Interface', 'S3Interface', (['region'], {}), '(region)\n', (709, 717), False, 'from aws_interfaces.s3_interface import S3Interface\n'), ((746, 762), 'boto3.dynamodb.conditions.Key', 'Key', (['"""jobStatus"""'], {}), "('jobStatus')\n", (749, 762), False, 'from boto3.dynamodb.conditions import Key, Attr\n'), ((1692, 1718), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1716, 1718), False, 'import datetime\n')]
|
#!python
# -*- coding: UTF-8 -*-
'''
################################################################
# Multiprocessing based synchronization.
# @ Sync-stream
# Produced by
# <NAME> @ <EMAIL>,
# <EMAIL>.
# Requirements: (Pay attention to version)
# python 3.6+
# The base module for the message synchronization. It is totally
# based on the stdlib of python.
# This module should be only used for synchronizing messages
# between threads and processes on the same device.
################################################################
'''
import os
import io
import collections
import threading
import queue
import multiprocessing
from typing import NoReturn
try:
from typing import Tuple, Sequence
except ImportError:
from builtins import tuple as Tuple
from collections.abc import Sequence
from .base import is_end_line_break, GroupedMessage
class LineBuffer:
'''The basic line-based buffer handle.
This buffer provides a rotating item stroage for the text-based stream. The text is stored not
by length, but by lines. The maximal line number of the storage is limited.
'''
def __init__(self, maxlen: int = 20) -> None:
'''Initialization.
Arguments:
maxlen: the maximal number of stored lines.
'''
if not isinstance(maxlen, int) or maxlen < 1:
raise TypeError('syncstream: The argument "maxlen" should be a positive integer.')
self.storage = collections.deque(maxlen=maxlen)
self.last_line = io.StringIO()
self.__last_line_lock = threading.Lock()
def clear(self) -> None:
'''Clear the whole buffer.
This method would clear the storage and the last line stream of this buffer. However,
it would not clear any mirrors or copies of this object. This method is thread-safe
and should always success.
'''
with self.__last_line_lock:
self.last_line.seek(0, os.SEEK_SET)
self.last_line.truncate(0)
self.storage.clear()
def new_line(self) -> None:
R'''Manually trigger a new line to the buffer. If the current stream is already
a new line, do nothing.
This method is equivalent to
```python
if self.last_line.tell() > 0:
write('\n')
```
'''
with self.__last_line_lock:
if self.last_line.tell() > 0:
self.__write('\n')
def flush(self) -> None:
'''Flush the current written line stream.
'''
with self.__last_line_lock:
self.last_line.flush()
def parse_lines(self, lines: Sequence[str]) -> None:
'''Parse the lines.
This method would be triggered when the new lines are written by `write()` method.
The default behavior is adding the item into the storage.
Users could inherit this method and override it with their customized parsing method,
like regular expression searching.
Arguments:
lines: the new lines to be added into the stroage.
'''
self.storage.extend(lines)
def read(self, size: int = None) -> Tuple[str]:
'''Read the records.
Fetch the stored record items from the buffer. Using the `read()` method is thread-safe
and would not influence the cursor of `write()` method.
If the current written line is not blank, the `read()` method would regard it as the
last record item.
Arguments:
size: if set None, would return the whole storage.
if set a int value, would return the last `size` items.
'''
with self.__last_line_lock:
has_last_line = self.last_line.tell() > 0
n_lines = len(self.storage)
if size is None:
if has_last_line:
if n_lines > 0:
value = self.storage.popleft()
results = (*self.storage, self.last_line.getvalue())
self.storage.appendleft(value)
else:
results = (self.last_line.getvalue(), )
return results
else:
return tuple(self.storage)
elif size > 0:
is_storage_popped = has_last_line and n_lines > 0
if is_storage_popped:
preserved_value = self.storage.popleft()
size -= 1
results = list()
n_read = min(size, n_lines)
if n_read > 0:
self.storage.rotate(n_read)
for _ in range(n_read):
value = self.storage.popleft()
results.append(value)
self.storage.append(value)
if has_last_line:
results.append(self.last_line.getvalue())
if is_storage_popped:
self.storage.appendleft(preserved_value)
return tuple(results)
def __write(self, data: str) -> int:
'''The write() method without lock.
This method is private and should not be used by users.
'''
message_lines = data.splitlines()
n_lines = len(message_lines)
if n_lines == 1 and message_lines[0] == '':
self.parse_lines((self.last_line.getvalue(), ))
self.last_line.seek(0, os.SEEK_SET)
self.last_line.truncate(0)
return 1
elif is_end_line_break(data):
message_lines.append('')
n_lines += 1
if n_lines > 1:
message_lines[0] = self.last_line.getvalue() + message_lines[0]
last_line = message_lines.pop()
self.parse_lines(message_lines)
self.last_line.seek(0, os.SEEK_SET)
self.last_line.truncate(0)
return self.last_line.write(last_line)
elif n_lines == 1:
return self.last_line.write(message_lines[0])
def write(self, data: str) -> int:
'''Write the records.
The source data is the same as that of a text-based IO. Each time when `data` contains
a line break, a new record item would be pushed in the storage. The `write()` method
is thread-safe.
Arguments:
data: the data that would be written in the stream.
'''
with self.__last_line_lock:
return self.__write(data)
class LineProcMirror:
'''The mirror for the process-safe line-based buffer.
This mirror is initialized by `LineProcBuffer`, and would be used for managing the lines
written to the buffer.
'''
def __init__(self, q_maxsize: int = 0, aggressive: bool = False, timeout: float = None,
_queue: queue.Queue = None, _state: dict = None, _state_lock: threading.Lock = None) -> None:
'''Initialization
Arguments:
q_maxsize: the `maxsize` of the queue. Use 0 means no limitation. A size limited
queue is recommended for protecting the memory.
aggressive: the aggressive mode. If enabled, each call for the `write()` method
would trigger the process synchronization. Otherwise, the
synchronization would be triggered when a new line is written.
timeout: the timeout of the process syncholizing events. If not set, the
synchronization would block the current process.
Private arguments:
_queue: the queue used for handling the message flow. If not set, would be
created by multiprocessing.Queue(). A recommended way is to set
this value by multiprocessing.Manager(). In this case, `q_maxsize`
would not be used.
_state, _state_lock: required for getting the buffer states. If not set, would
not turn on the stop signal.
'''
self.__buffer = io.StringIO()
self.__buffer_lock_ = None
self.aggressive = aggressive
self.__timeout = timeout
self.__block = timeout is None
if _queue is None:
self.__queue = multiprocessing.Queue(maxsize=q_maxsize)
else:
self.__queue = _queue
if _state is not None and _state_lock is not None:
self.__state_lock = _state_lock
self.__state = _state
else:
self.__state_lock = None
@property
def __buffer_lock(self) -> threading.Lock:
'''The threading lock for the buffer.
This lock should not be exposed to users. It is used for ensuring that the
temporary buffer of the mirror is thread-safe.
'''
if self.__buffer_lock_ is None:
self.__buffer_lock_ = threading.Lock()
return self.__buffer_lock_
def clear(self) -> None:
'''Clear the temporary buffer.
This method would clear the temporary buffer of the mirror. If the mirror works
in the `aggresive` mode, the temporary buffer would not be used. In this case,
this method would not exert any influences to the mirror.
This method is thread-safe. Mirrors in different processes would not share the
temporary buffer. Note that the shared queue would not be cleared by this
method.
'''
with self.__buffer_lock:
self.__buffer.seek(0, os.SEEK_SET)
self.__buffer.truncate(0)
def new_line(self) -> None:
R'''Manually trigger a new line to the buffer. If the current stream is already
a new line, do nothing.
'''
with self.__buffer_lock:
if self.__buffer.tell() > 0:
self.__write('\n')
@property
def timeout(self) -> int:
'''The time out of the process synchronization.
'''
return self.__timeout
@timeout.setter
def timeout(self, timeout: int = None) -> None:
'''Setter for the property timeout.
'''
self.__timeout = timeout
self.__block = timeout is None
def send_eof(self) -> None:
'''Send an EOF signal to the main buffer.
The EOF signal is used for telling the main buffer stop to wait. Note that this
method would not close the queue. The mirror could be reused for another program.
'''
self.new_line()
self.__queue.put(
{'type': 'close', 'data': None},
block=self.__block, timeout=self.__timeout
)
def send_error(self, obj_err: Exception) -> None:
'''Send the error object to the main buffer.
The error object would be captured as an item of the storage in the main buffer.
'''
self.new_line()
self.__queue.put(
{'type': 'error', 'data': GroupedMessage(obj_err)},
block=self.__block, timeout=self.__timeout
)
def send_warning(self, obj_warn: Warning) -> None:
'''Send the warning object to the main buffer.
The warning object would be captured as an item of the storage in the main buffer.
'''
self.new_line()
self.__queue.put(
{'type': 'warning', 'data': GroupedMessage(obj_warn)},
block=self.__block, timeout=self.__timeout
)
def send_data(self, data: str) -> None:
'''Send the data to the main buffer.
This method is equivalent to call the main buffer (LineProcBuffer) by the following
method protected by process-safe synchronization:
```python
pbuf.write(data)
```
This method is used by other methods implicitly, and should not be used by users.
Arguments:
data: a str to be sent to the main buffer.
'''
self.__queue.put(
{'type': 'str', 'data': data},
block=self.__block, timeout=self.__timeout
)
def flush(self) -> None:
'''Flush the current written line stream.
'''
with self.__buffer_lock:
self.__buffer.flush()
def read(self) -> str:
'''Read the current buffer.
This method would only read the current bufferred values. If the property
`aggressive` is `True`, the `read()` method would always return empty value.
'''
with self.__buffer_lock:
return self.__buffer.getvalue()
def __write(self, data: str) -> int:
'''The write() method without lock.
This method is private and should not be used by users.
'''
try:
if self.__state_lock is not None:
with self.__state_lock:
is_closed = self.__state.get('closed', False)
if is_closed:
raise StopIteration('syncstream: The sub-process is terminated by users.')
except queue.Empty:
pass
message_lines = data.splitlines()
if self.aggressive:
self.send_data(data=data)
return len(data)
n_lines = len(message_lines)
if n_lines > 1 or (n_lines == 1 and message_lines[0] == '') or is_end_line_break(data): # A new line is triggerred.
res = self.__buffer.write(data)
self.send_data(data=self.__buffer.getvalue())
self.__buffer.seek(0, os.SEEK_SET)
self.__buffer.truncate(0)
return res
elif n_lines == 1:
return self.__buffer.write(data)
def write(self, data: str) -> int:
'''Write the stream.
The source data is the same as that of a text-based IO. If `aggressive` is `True`,
each call of `write()` would make the stream value sent to the main buffer. If not,
each time when `data` contains a line break, the stream value would be sent to
the main buffer.
The method is thread-safe, but the message synchronization is process-safe.
Arguments:
data: the data that would be written in the stream.
'''
with self.__buffer_lock:
return self.__write(data)
class LineProcBuffer(LineBuffer):
'''The process-safe line-based buffer.
The rotating buffer with a maximal storage length. This buffer is the extended version of
the basic `LineBuffer`. It is used for the case of multi-processing. Use the shared queue
of this buffer to ensure the synchronization among processes. For example,
```python
def f(buffer):
with contextlib.redirect_stdout(buffer):
print('example')
buffer.send_eof()
if __name__ == '__main__':
pbuf = LineProcBuffer(maxlen=10)
with multiprocessing.Pool(4) as p:
p.map_async(f, tuple(pbuf.mirror for _ in range(4)))
pbuf.wait()
print(pbuf.read())
```
'''
def __init__(self, maxlen: int = 20) -> None:
'''Initialization.
Arguments:
maxlen: the maximal number of stored lines.
'''
super().__init__(maxlen=maxlen)
self.__manager = multiprocessing.Manager()
self.__state = self.__manager.dict(closed=False)
self.__state_lock = self.__manager.Lock() # pylint: disable=no-member
self.__mirror = LineProcMirror(q_maxsize=2 * maxlen, aggressive=False, timeout=None, _queue=self.__manager.Queue(), _state=self.__state, _state_lock=self.__state_lock)
self.n_mirrors = 0
self.__config_lock = threading.Lock()
@property
def mirror(self) -> LineProcMirror:
'''Get the mirror of this buffer. The buffer should not be used in sub-processes
directly. Use `self.mirror` to provide the process-safe mirror of the buffer.
This property could not be modified after the initialization.
'''
self.n_mirrors += 1
return self.__mirror
def stop_all_mirrors(self) -> None:
'''Send stop signals to all mirrors.
This operation is used for terminating the sub-processes safely. It does not
guarantee that the processes would be closed instantly. Each time when the new
message is written by the sub-processes, a check would be triggered.
If users want to use this method, please ensure that the StopIteration error
is catched by the process. The error would not be catched automatically. If
users do not catch the error, the main process would stuck at `wait()`.
'''
with self.__state_lock:
self.__state['closed'] = True
def reset_states(self) -> None:
'''Reset the states of the buffer.
This method should be used if the buffer needs to be reused.
'''
with self.__state_lock:
self.__state.clear()
self.__state['closed'] = False
def __check_close(self) -> bool:
'''Check whether to finish the `wait()` method.
This method would be used when receiving a closing signal.
This method is private and should not be used by users.
Note that this method is always triggered in the config_lock.
'''
self.n_mirrors -= 1
if self.n_mirrors > 0:
return True
else:
return False
def receive(self) -> bool:
'''Receive one item from the mirror.
This method would fetch one item from the process-safe queue, and write the results
in the thread-safe buffer.
'''
with self.__config_lock:
data = self.__mirror._LineProcMirror__queue.get() # pylint: disable=protected-access
dtype = data['type']
if dtype == 'str':
super().write(data['data'])
return True
elif dtype == 'error':
obj = data['data']
self.storage.append(obj)
return self.__check_close()
elif dtype == 'warning':
obj = data['data']
self.storage.append(obj)
return True
elif dtype == 'close':
return self.__check_close()
return False
def wait(self) -> None:
'''Wait the mirror until the close signal is received.
'''
while self.receive():
pass
def write(self, data: str) -> NoReturn:
'''Write the records.
This method should not be used. For instead, please use self.mirror.write().
Arguments:
data: the data that would be written in the stream.
'''
raise NotImplementedError('syncstream: Should not use this method, use '
'`self.mirror.write()` for instead.')
|
[
"io.StringIO",
"multiprocessing.Manager",
"threading.Lock",
"multiprocessing.Queue",
"collections.deque"
] |
[((1463, 1495), 'collections.deque', 'collections.deque', ([], {'maxlen': 'maxlen'}), '(maxlen=maxlen)\n', (1480, 1495), False, 'import collections\n'), ((1521, 1534), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (1532, 1534), False, 'import io\n'), ((1567, 1583), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1581, 1583), False, 'import threading\n'), ((8025, 8038), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (8036, 8038), False, 'import io\n'), ((15102, 15127), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (15125, 15127), False, 'import multiprocessing\n'), ((15496, 15512), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (15510, 15512), False, 'import threading\n'), ((8237, 8277), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {'maxsize': 'q_maxsize'}), '(maxsize=q_maxsize)\n', (8258, 8277), False, 'import multiprocessing\n'), ((8846, 8862), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (8860, 8862), False, 'import threading\n')]
|
"""
OpenNEM AEMO facility closure dates parser.
"""
import logging
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Union
from openpyxl import load_workbook
from pydantic import ValidationError
from pydantic.class_validators import validator
from opennem.core.normalizers import is_number, normalize_duid
from opennem.schema.core import BaseConfig
logger = logging.getLogger("opennem.parsers.aemo_nem_facility_closures")
WORKBOOK_SHEET_NAME = "Expected Closure Year"
CLOSURE_SHEET_FIELDS = [
"station_name",
"duid",
"expected_closure_year",
"expected_closure_date",
]
def _clean_expected_closure_year(closure_year: Union[str, int]) -> Optional[int]:
"""Clean up expected closure year because sometimes they just put comments in the field"""
if is_number(closure_year):
return int(closure_year)
return None
class AEMOClosureRecord(BaseConfig):
station_name: str
duid: Optional[str]
expected_closure_year: Optional[int]
expected_closure_date: Optional[datetime]
_validate_closure_year = validator("expected_closure_year", pre=True)(
_clean_expected_closure_year
)
_clean_duid = validator("duid", pre=True)(normalize_duid)
def parse_aemo_closures_xls() -> List[AEMOClosureRecord]:
"""Parse the AEMO NEM closures spreadsheet"""
aemo_path = (
Path(__file__).parent.parent.parent
/ "data"
/ "aemo"
/ "generating-unit-expected-closure-year.xlsx"
)
if not aemo_path.is_file():
raise Exception("Not found: {}".format(aemo_path))
# @TODO split here to read ByteIO from download / local file
wb = load_workbook(aemo_path, data_only=True)
generator_ws = wb[WORKBOOK_SHEET_NAME]
records = []
for row in generator_ws.iter_rows(min_row=2, values_only=True):
row_collapsed = row[0:2] + row[3:5]
return_dict = dict(zip(CLOSURE_SHEET_FIELDS, list(row_collapsed)))
r = None
try:
r = AEMOClosureRecord(**return_dict)
except ValidationError as e:
logger.error("Validation error: {}. {}".format(e, return_dict))
if r:
records.append(r)
return records
if __name__ == "__main__":
p = parse_aemo_closures_xls()
from pprint import pprint
pprint(p)
|
[
"openpyxl.load_workbook",
"logging.getLogger",
"pydantic.class_validators.validator",
"pathlib.Path",
"pprint.pprint",
"opennem.core.normalizers.is_number"
] |
[((403, 466), 'logging.getLogger', 'logging.getLogger', (['"""opennem.parsers.aemo_nem_facility_closures"""'], {}), "('opennem.parsers.aemo_nem_facility_closures')\n", (420, 466), False, 'import logging\n'), ((818, 841), 'opennem.core.normalizers.is_number', 'is_number', (['closure_year'], {}), '(closure_year)\n', (827, 841), False, 'from opennem.core.normalizers import is_number, normalize_duid\n'), ((1681, 1721), 'openpyxl.load_workbook', 'load_workbook', (['aemo_path'], {'data_only': '(True)'}), '(aemo_path, data_only=True)\n', (1694, 1721), False, 'from openpyxl import load_workbook\n'), ((2331, 2340), 'pprint.pprint', 'pprint', (['p'], {}), '(p)\n', (2337, 2340), False, 'from pprint import pprint\n'), ((1095, 1139), 'pydantic.class_validators.validator', 'validator', (['"""expected_closure_year"""'], {'pre': '(True)'}), "('expected_closure_year', pre=True)\n", (1104, 1139), False, 'from pydantic.class_validators import validator\n'), ((1203, 1230), 'pydantic.class_validators.validator', 'validator', (['"""duid"""'], {'pre': '(True)'}), "('duid', pre=True)\n", (1212, 1230), False, 'from pydantic.class_validators import validator\n'), ((1383, 1397), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1387, 1397), False, 'from pathlib import Path\n')]
|
import os
import ssl
import smtplib
from typing import Callable
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
def send_mail(subject: str, log_path: str, img_path: str, get: Callable):
sender = get('email')
api_key = get('api_key')
receiver = get('target_email')
message = MIMEMultipart()
message['Subject'] = subject
message['From'] = sender
message['To'] = receiver
with open(log_path) as raw_log, open(img_path, 'rb') as raw_img:
log = raw_log.read()
image = raw_img.read()
log_file = MIMEText(log)
image_file = MIMEImage(image, name=os.path.basename(img_path))
message.attach(log_file)
message.attach(image_file)
text = message.as_string()
context = ssl.create_default_context()
with smtplib.SMTP_SSL('smtp.gmail.com', 465, context=context) as server:
server.login(sender, api_key)
server.sendmail(sender, receiver, text)
|
[
"smtplib.SMTP_SSL",
"os.path.basename",
"email.mime.text.MIMEText",
"ssl.create_default_context",
"email.mime.multipart.MIMEMultipart"
] |
[((369, 384), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (382, 384), False, 'from email.mime.multipart import MIMEMultipart\n'), ((623, 636), 'email.mime.text.MIMEText', 'MIMEText', (['log'], {}), '(log)\n', (631, 636), False, 'from email.mime.text import MIMEText\n'), ((812, 840), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (838, 840), False, 'import ssl\n'), ((851, 907), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['"""smtp.gmail.com"""', '(465)'], {'context': 'context'}), "('smtp.gmail.com', 465, context=context)\n", (867, 907), False, 'import smtplib\n'), ((676, 702), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (692, 702), False, 'import os\n')]
|
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
# fitnesseSettings
# @Since: 23-OCT-2019
# @Author: <NAME>
# @Version: 20191023.0 - JBE - Initial
##
import cicd.fitnesse.fitnesseConstants as constants
import supporting, os, logging
import supporting.generalSettings as generalsettings
from supporting.generalSettings import completePath
logger = logging.getLogger(__name__)
sourcefitnessedir = constants.DEFAULT_SOURCE_FITNESSEDIR
targetfitnessedir = constants.DEFAULT_TARGET_FITNESSEDIR
def getfitnesseenvvars():
thisproc="getfitnesseenvvars"
global fitnessedeploylist, sourcefitnessedir, targetfitnessedir
supporting.log(logger, logging.DEBUG, thisproc, 'started')
fitnessedeploylist = completePath(os.environ.get(constants.varFitNesseDeployList, constants.DEFAULT_FITNESSE_DEPLOYLIST), generalsettings.sourceDir)
sourcefitnessedir = completePath(os.environ.get(constants.varSourceFitNesseDir, constants.DEFAULT_SOURCE_FITNESSEDIR), generalsettings.sourceDir)
targetfitnessedir = completePath(os.environ.get(constants.varTargetFitNesseDir, constants.DEFAULT_TARGET_FITNESSEDIR), generalsettings.sourceDir)
def outfitnesseenvvars():
thisproc = "outfitnesseenvvars"
supporting.log(logger, logging.INFO, thisproc, 'fitnessedeploylist is >' + fitnessedeploylist + "<.")
supporting.log(logger, logging.INFO, thisproc, 'sourcefitnessedir is >' + sourcefitnessedir +"<.")
supporting.log(logger, logging.INFO, thisproc, 'targetfitnessedir is >' + targetfitnessedir +"<.")
|
[
"os.environ.get",
"supporting.log",
"logging.getLogger"
] |
[((1424, 1451), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1441, 1451), False, 'import supporting, os, logging\n'), ((1700, 1758), 'supporting.log', 'supporting.log', (['logger', 'logging.DEBUG', 'thisproc', '"""started"""'], {}), "(logger, logging.DEBUG, thisproc, 'started')\n", (1714, 1758), False, 'import supporting, os, logging\n'), ((2280, 2385), 'supporting.log', 'supporting.log', (['logger', 'logging.INFO', 'thisproc', "('fitnessedeploylist is >' + fitnessedeploylist + '<.')"], {}), "(logger, logging.INFO, thisproc, 'fitnessedeploylist is >' +\n fitnessedeploylist + '<.')\n", (2294, 2385), False, 'import supporting, os, logging\n'), ((2387, 2490), 'supporting.log', 'supporting.log', (['logger', 'logging.INFO', 'thisproc', "('sourcefitnessedir is >' + sourcefitnessedir + '<.')"], {}), "(logger, logging.INFO, thisproc, 'sourcefitnessedir is >' +\n sourcefitnessedir + '<.')\n", (2401, 2490), False, 'import supporting, os, logging\n'), ((2491, 2594), 'supporting.log', 'supporting.log', (['logger', 'logging.INFO', 'thisproc', "('targetfitnessedir is >' + targetfitnessedir + '<.')"], {}), "(logger, logging.INFO, thisproc, 'targetfitnessedir is >' +\n targetfitnessedir + '<.')\n", (2505, 2594), False, 'import supporting, os, logging\n'), ((1797, 1888), 'os.environ.get', 'os.environ.get', (['constants.varFitNesseDeployList', 'constants.DEFAULT_FITNESSE_DEPLOYLIST'], {}), '(constants.varFitNesseDeployList, constants.\n DEFAULT_FITNESSE_DEPLOYLIST)\n', (1811, 1888), False, 'import supporting, os, logging\n'), ((1949, 2038), 'os.environ.get', 'os.environ.get', (['constants.varSourceFitNesseDir', 'constants.DEFAULT_SOURCE_FITNESSEDIR'], {}), '(constants.varSourceFitNesseDir, constants.\n DEFAULT_SOURCE_FITNESSEDIR)\n', (1963, 2038), False, 'import supporting, os, logging\n'), ((2099, 2188), 'os.environ.get', 'os.environ.get', (['constants.varTargetFitNesseDir', 'constants.DEFAULT_TARGET_FITNESSEDIR'], {}), '(constants.varTargetFitNesseDir, constants.\n DEFAULT_TARGET_FITNESSEDIR)\n', (2113, 2188), False, 'import supporting, os, logging\n')]
|
import argparse
import time
import os
import numpy as np
import json
import cv2
import random
import torch
from ACID_test import test
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common detectron2 utilities
from detectron2.model_zoo import model_zoo
from detectron2.engine import DefaultTrainer, DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_test_loader
from detectron2.data.datasets import register_coco_instances
from detectron2.structures import BoxMode
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.modeling import build_model
parser = argparse.ArgumentParser(description='ACID_Object_Detection_Train')
parser.add_argument('--dataset', default='ACID_dataset', type=str, help='name of dataset')
parser.add_argument('--file', default='/home/hteam/Documents/hao/Research/Dataset/ACID/ACID_train_augmentation', type=str, help='data file')
parser.add_argument('--label', default='/home/hteam/Documents/hao/Research/Dataset/ACID/ACID_train_augmentation.json', type=str, help='COCO format json')
parser.add_argument('--test_dataset', default='ACID_testing', type=str, help='name of testing dataset')
parser.add_argument('--test_file', default='/home/hteam/Documents/hao/Research/Dataset/ACID/ACID_testing', type=str, help='testing data file')
parser.add_argument('--test_label', default='/home/hteam/Documents/hao/Research/Dataset/ACID/ACID_test.json', type=str, help='testing json')
parser.add_argument('--model', default='COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml', type=str, help='model')
parser.add_argument('--weight', default='./output/model_final.pth', type=str, help='model weight')
parser.add_argument('--num_class', default=3, type=int, help='num of classes')
parser.add_argument('--iter', default=30000, type=int, help='max iter')
def main():
args = parser.parse_args()
register_coco_instances(args.dataset, {}, args.label, args.file) # training dataset
register_coco_instances(args.test_dataset, {}, args.test_label, args.test_file) # testing dataset
### set metadata
MetadataCatalog.get(args.test_dataset).evaluator_type="coco"
DatasetCatalog.get(args.test_dataset)
### cfg setting
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(args.model))
cfg.DATASETS.TRAIN = (args.dataset,)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_class # excavator, dump_truck, cement_truck
cfg.MODEL.WEIGHTS = args.weight
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set the testing threshold for this model
cfg.DATASETS.TEST = (args.test_dataset,)
### trainner setting
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(cfg.MODEL.WEIGHTS)
### evaluation setting
evaluator = COCOEvaluator(args.test_dataset, cfg, False, output_dir="./output/")
val_loader = build_detection_test_loader(cfg, args.test_dataset)
inference_on_dataset(trainer.model, val_loader, evaluator)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"detectron2.data.DatasetCatalog.get",
"detectron2.utils.logger.setup_logger",
"detectron2.evaluation.COCOEvaluator",
"detectron2.data.datasets.register_coco_instances",
"detectron2.config.get_cfg",
"detectron2.model_zoo.model_zoo.get_config_file",
"detectron2.engine.DefaultTrainer",
"detectron2.data.build_detection_test_loader",
"detectron2.data.MetadataCatalog.get",
"detectron2.evaluation.inference_on_dataset"
] |
[((228, 242), 'detectron2.utils.logger.setup_logger', 'setup_logger', ([], {}), '()\n', (240, 242), False, 'from detectron2.utils.logger import setup_logger\n'), ((809, 875), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ACID_Object_Detection_Train"""'}), "(description='ACID_Object_Detection_Train')\n", (832, 875), False, 'import argparse\n'), ((2065, 2129), 'detectron2.data.datasets.register_coco_instances', 'register_coco_instances', (['args.dataset', '{}', 'args.label', 'args.file'], {}), '(args.dataset, {}, args.label, args.file)\n', (2088, 2129), False, 'from detectron2.data.datasets import register_coco_instances\n'), ((2154, 2233), 'detectron2.data.datasets.register_coco_instances', 'register_coco_instances', (['args.test_dataset', '{}', 'args.test_label', 'args.test_file'], {}), '(args.test_dataset, {}, args.test_label, args.test_file)\n', (2177, 2233), False, 'from detectron2.data.datasets import register_coco_instances\n'), ((2344, 2381), 'detectron2.data.DatasetCatalog.get', 'DatasetCatalog.get', (['args.test_dataset'], {}), '(args.test_dataset)\n', (2362, 2381), False, 'from detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_test_loader\n'), ((2413, 2422), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (2420, 2422), False, 'from detectron2.config import get_cfg\n'), ((2836, 2855), 'detectron2.engine.DefaultTrainer', 'DefaultTrainer', (['cfg'], {}), '(cfg)\n', (2850, 2855), False, 'from detectron2.engine import DefaultTrainer, DefaultPredictor\n'), ((2947, 3015), 'detectron2.evaluation.COCOEvaluator', 'COCOEvaluator', (['args.test_dataset', 'cfg', '(False)'], {'output_dir': '"""./output/"""'}), "(args.test_dataset, cfg, False, output_dir='./output/')\n", (2960, 3015), False, 'from detectron2.evaluation import COCOEvaluator, inference_on_dataset\n'), ((3033, 3084), 'detectron2.data.build_detection_test_loader', 'build_detection_test_loader', (['cfg', 'args.test_dataset'], {}), '(cfg, args.test_dataset)\n', (3060, 3084), False, 'from detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_test_loader\n'), ((3089, 3147), 'detectron2.evaluation.inference_on_dataset', 'inference_on_dataset', (['trainer.model', 'val_loader', 'evaluator'], {}), '(trainer.model, val_loader, evaluator)\n', (3109, 3147), False, 'from detectron2.evaluation import COCOEvaluator, inference_on_dataset\n'), ((2279, 2317), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['args.test_dataset'], {}), '(args.test_dataset)\n', (2298, 2317), False, 'from detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_test_loader\n'), ((2447, 2484), 'detectron2.model_zoo.model_zoo.get_config_file', 'model_zoo.get_config_file', (['args.model'], {}), '(args.model)\n', (2472, 2484), False, 'from detectron2.model_zoo import model_zoo\n')]
|
import django.core.validators
import django.utils.timezone
import model_utils.fields
from django.conf import settings
from django.db import migrations, models
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserCourseTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=255, db_index=True)),
('course_id', CourseKeyField(max_length=255, db_index=True)),
('value', models.TextField()),
('user', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='UserOrgTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('key', models.CharField(max_length=255, db_index=True)),
('org', models.CharField(max_length=255, db_index=True)),
('value', models.TextField()),
('user', models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='UserPreference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(db_index=True, max_length=255, validators=[django.core.validators.RegexValidator('[-_a-zA-Z0-9]+')])),
('value', models.TextField()),
('user', models.ForeignKey(related_name='preferences', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.AlterUniqueTogether(
name='userpreference',
unique_together={('user', 'key')},
),
migrations.AlterUniqueTogether(
name='userorgtag',
unique_together={('user', 'org', 'key')},
),
migrations.AlterUniqueTogether(
name='usercoursetag',
unique_together={('user', 'course_id', 'key')},
),
]
|
[
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField",
"opaque_keys.edx.django.models.CourseKeyField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.migrations.AlterUniqueTogether"
] |
[((287, 344), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (318, 344), False, 'from django.db import migrations, models\n'), ((2300, 2393), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""userpreference"""', 'unique_together': "{('user', 'key')}"}), "(name='userpreference', unique_together={(\n 'user', 'key')})\n", (2330, 2393), False, 'from django.db import migrations, models\n'), ((2433, 2528), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""userorgtag"""', 'unique_together': "{('user', 'org', 'key')}"}), "(name='userorgtag', unique_together={('user',\n 'org', 'key')})\n", (2463, 2528), False, 'from django.db import migrations, models\n'), ((2569, 2674), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""usercoursetag"""', 'unique_together': "{('user', 'course_id', 'key')}"}), "(name='usercoursetag', unique_together={(\n 'user', 'course_id', 'key')})\n", (2599, 2674), False, 'from django.db import migrations, models\n'), ((482, 575), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (498, 575), False, 'from django.db import migrations, models\n'), ((598, 645), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'db_index': '(True)'}), '(max_length=255, db_index=True)\n', (614, 645), False, 'from django.db import migrations, models\n'), ((678, 723), 'opaque_keys.edx.django.models.CourseKeyField', 'CourseKeyField', ([], {'max_length': '(255)', 'db_index': '(True)'}), '(max_length=255, db_index=True)\n', (692, 723), False, 'from opaque_keys.edx.django.models import CourseKeyField\n'), ((752, 770), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (768, 770), False, 'from django.db import migrations, models\n'), ((798, 893), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'related_name': '"""+"""', 'to': 'settings.AUTH_USER_MODEL', 'on_delete': 'models.CASCADE'}), "(related_name='+', to=settings.AUTH_USER_MODEL, on_delete=\n models.CASCADE)\n", (815, 893), False, 'from django.db import migrations, models\n'), ((1024, 1117), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (1040, 1117), False, 'from django.db import migrations, models\n'), ((1429, 1476), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'db_index': '(True)'}), '(max_length=255, db_index=True)\n', (1445, 1476), False, 'from django.db import migrations, models\n'), ((1503, 1550), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'db_index': '(True)'}), '(max_length=255, db_index=True)\n', (1519, 1550), False, 'from django.db import migrations, models\n'), ((1579, 1597), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1595, 1597), False, 'from django.db import migrations, models\n'), ((1625, 1720), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'related_name': '"""+"""', 'to': 'settings.AUTH_USER_MODEL', 'on_delete': 'models.CASCADE'}), "(related_name='+', to=settings.AUTH_USER_MODEL, on_delete=\n models.CASCADE)\n", (1642, 1720), False, 'from django.db import migrations, models\n'), ((1855, 1948), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (1871, 1948), False, 'from django.db import migrations, models\n'), ((2117, 2135), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2133, 2135), False, 'from django.db import migrations, models\n'), ((2163, 2267), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'related_name': '"""preferences"""', 'to': 'settings.AUTH_USER_MODEL', 'on_delete': 'models.CASCADE'}), "(related_name='preferences', to=settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE)\n", (2180, 2267), False, 'from django.db import migrations, models\n')]
|
import datetime as dt
from json import dumps as json_dumps
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from registration.models import User
from events.models import SoloEvent
from event_registrations.models import SoloEventRegistration
from payments.models import Transaction
class PaymentInitiateViewTestCase(APITestCase):
def setUp(self):
self.user1 = User.objects.create(username='test_user1',
first_name='test', last_name='user',
email='<EMAIL>', email_confirmed=True
)
self.user2 = User.objects.create(username='test_user2',
first_name='test', last_name='user',
email='<EMAIL>', email_confirmed=True
)
self.profile1 = self.user1.profile
self.profile2 = self.user2.profile
self.event1 = SoloEvent.objects.create(title='SoloEvent1',
start_date=dt.date(2019, 7, 19), end_date=dt.date(2019, 7, 19),
start_time=dt.time(12, 0, 0), end_time=dt.time(15, 0, 0),
fee=100, reserved_fee=80, reserved_slots=10, max_participants=20
)
self.event2 = SoloEvent.objects.create(title='SoloEvent2',
start_date=dt.date(2019, 7, 19), end_date=dt.date(2019, 7, 19),
start_time=dt.time(12, 0, 0), end_time=dt.time(15, 0, 0),
fee=100, reserved_fee=0, reserved_slots=10, max_participants=20
)
self.registration1 = SoloEventRegistration.objects.create(event=self.event1, profile=self.profile1)
self.registration2 = SoloEventRegistration.objects.create(event=self.event1, profile=self.profile2,
is_reserved=True)
self.registration3 = SoloEventRegistration.objects.create(event=self.event2, profile=self.profile2,
is_reserved=True, is_complete=True)
self.registration3_transaction = Transaction.objects.create(created_by=self.profile1,
solo_registration=self.registration1,
status='Failed')
self.registration3_transaction = Transaction.objects.create(created_by=self.profile2,
solo_registration=self.registration3,
status='Successful')
def test_payment_initiate_view_unauthenticated(self):
url = reverse('payment_initiate')
self.client.login(user=None)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_payment_initiate_view_wrong_user(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user2)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_payment_initiate_view_invalid_data(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user1)
response = self.client.post(url,
data=json_dumps({'eventPublicId': 'random_string',
'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': 'random_string'}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_payment_initiate_view_missing_data(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user1)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)
response = self.client.post(url,
data=json_dumps({'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)
def test_payment_initiate_view_already_paid(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user2)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event2.public_id,
'registrationId': self.registration3.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)
def test_payment_initiate_view_again(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user2)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': self.registration2.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_payment_initiate_view(self):
url = reverse('payment_initiate')
self.client.force_login(user=self.user1)
response = self.client.post(url,
data=json_dumps({'eventPublicId': self.event1.public_id,
'registrationId': self.registration1.public_id}),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
[
"datetime.date",
"json.dumps",
"django.urls.reverse",
"datetime.time",
"event_registrations.models.SoloEventRegistration.objects.create",
"payments.models.Transaction.objects.create",
"registration.models.User.objects.create"
] |
[((435, 558), 'registration.models.User.objects.create', 'User.objects.create', ([], {'username': '"""test_user1"""', 'first_name': '"""test"""', 'last_name': '"""user"""', 'email': '"""<EMAIL>"""', 'email_confirmed': '(True)'}), "(username='test_user1', first_name='test', last_name=\n 'user', email='<EMAIL>', email_confirmed=True)\n", (454, 558), False, 'from registration.models import User\n'), ((700, 823), 'registration.models.User.objects.create', 'User.objects.create', ([], {'username': '"""test_user2"""', 'first_name': '"""test"""', 'last_name': '"""user"""', 'email': '"""<EMAIL>"""', 'email_confirmed': '(True)'}), "(username='test_user2', first_name='test', last_name=\n 'user', email='<EMAIL>', email_confirmed=True)\n", (719, 823), False, 'from registration.models import User\n'), ((1951, 2029), 'event_registrations.models.SoloEventRegistration.objects.create', 'SoloEventRegistration.objects.create', ([], {'event': 'self.event1', 'profile': 'self.profile1'}), '(event=self.event1, profile=self.profile1)\n', (1987, 2029), False, 'from event_registrations.models import SoloEventRegistration\n'), ((2060, 2161), 'event_registrations.models.SoloEventRegistration.objects.create', 'SoloEventRegistration.objects.create', ([], {'event': 'self.event1', 'profile': 'self.profile2', 'is_reserved': '(True)'}), '(event=self.event1, profile=self.\n profile2, is_reserved=True)\n', (2096, 2161), False, 'from event_registrations.models import SoloEventRegistration\n'), ((2253, 2372), 'event_registrations.models.SoloEventRegistration.objects.create', 'SoloEventRegistration.objects.create', ([], {'event': 'self.event2', 'profile': 'self.profile2', 'is_reserved': '(True)', 'is_complete': '(True)'}), '(event=self.event2, profile=self.\n profile2, is_reserved=True, is_complete=True)\n', (2289, 2372), False, 'from event_registrations.models import SoloEventRegistration\n'), ((2476, 2588), 'payments.models.Transaction.objects.create', 'Transaction.objects.create', ([], {'created_by': 'self.profile1', 'solo_registration': 'self.registration1', 'status': '"""Failed"""'}), "(created_by=self.profile1, solo_registration=self\n .registration1, status='Failed')\n", (2502, 2588), False, 'from payments.models import Transaction\n'), ((2762, 2878), 'payments.models.Transaction.objects.create', 'Transaction.objects.create', ([], {'created_by': 'self.profile2', 'solo_registration': 'self.registration3', 'status': '"""Successful"""'}), "(created_by=self.profile2, solo_registration=self\n .registration3, status='Successful')\n", (2788, 2878), False, 'from payments.models import Transaction\n'), ((3083, 3110), 'django.urls.reverse', 'reverse', (['"""payment_initiate"""'], {}), "('payment_initiate')\n", (3090, 3110), False, 'from django.urls import reverse\n'), ((3636, 3663), 'django.urls.reverse', 'reverse', (['"""payment_initiate"""'], {}), "('payment_initiate')\n", (3643, 3663), False, 'from django.urls import reverse\n'), ((4200, 4227), 'django.urls.reverse', 'reverse', (['"""payment_initiate"""'], {}), "('payment_initiate')\n", (4207, 4227), False, 'from django.urls import reverse\n'), ((5162, 5189), 'django.urls.reverse', 'reverse', (['"""payment_initiate"""'], {}), "('payment_initiate')\n", (5169, 5189), False, 'from django.urls import reverse\n'), ((5971, 5998), 'django.urls.reverse', 'reverse', (['"""payment_initiate"""'], {}), "('payment_initiate')\n", (5978, 5998), False, 'from django.urls import reverse\n'), ((6539, 6566), 'django.urls.reverse', 'reverse', (['"""payment_initiate"""'], {}), "('payment_initiate')\n", (6546, 6566), False, 'from django.urls import reverse\n'), ((7088, 7115), 'django.urls.reverse', 'reverse', (['"""payment_initiate"""'], {}), "('payment_initiate')\n", (7095, 7115), False, 'from django.urls import reverse\n'), ((1156, 1176), 'datetime.date', 'dt.date', (['(2019)', '(7)', '(19)'], {}), '(2019, 7, 19)\n', (1163, 1176), True, 'import datetime as dt\n'), ((1187, 1207), 'datetime.date', 'dt.date', (['(2019)', '(7)', '(19)'], {}), '(2019, 7, 19)\n', (1194, 1207), True, 'import datetime as dt\n'), ((1267, 1284), 'datetime.time', 'dt.time', (['(12)', '(0)', '(0)'], {}), '(12, 0, 0)\n', (1274, 1284), True, 'import datetime as dt\n'), ((1296, 1313), 'datetime.time', 'dt.time', (['(15)', '(0)', '(0)'], {}), '(15, 0, 0)\n', (1303, 1313), True, 'import datetime as dt\n'), ((1602, 1622), 'datetime.date', 'dt.date', (['(2019)', '(7)', '(19)'], {}), '(2019, 7, 19)\n', (1609, 1622), True, 'import datetime as dt\n'), ((1633, 1653), 'datetime.date', 'dt.date', (['(2019)', '(7)', '(19)'], {}), '(2019, 7, 19)\n', (1640, 1653), True, 'import datetime as dt\n'), ((1713, 1730), 'datetime.time', 'dt.time', (['(12)', '(0)', '(0)'], {}), '(12, 0, 0)\n', (1720, 1730), True, 'import datetime as dt\n'), ((1742, 1759), 'datetime.time', 'dt.time', (['(15)', '(0)', '(0)'], {}), '(15, 0, 0)\n', (1749, 1759), True, 'import datetime as dt\n'), ((3230, 3335), 'json.dumps', 'json_dumps', (["{'eventPublicId': self.event1.public_id, 'registrationId': self.\n registration1.public_id}"], {}), "({'eventPublicId': self.event1.public_id, 'registrationId': self.\n registration1.public_id})\n", (3240, 3335), True, 'from json import dumps as json_dumps\n'), ((3795, 3900), 'json.dumps', 'json_dumps', (["{'eventPublicId': self.event1.public_id, 'registrationId': self.\n registration1.public_id}"], {}), "({'eventPublicId': self.event1.public_id, 'registrationId': self.\n registration1.public_id})\n", (3805, 3900), True, 'from json import dumps as json_dumps\n'), ((4359, 4458), 'json.dumps', 'json_dumps', (["{'eventPublicId': 'random_string', 'registrationId': self.registration1.\n public_id}"], {}), "({'eventPublicId': 'random_string', 'registrationId': self.\n registration1.public_id})\n", (4369, 4458), True, 'from json import dumps as json_dumps\n'), ((4770, 4861), 'json.dumps', 'json_dumps', (["{'eventPublicId': self.event1.public_id, 'registrationId': 'random_string'}"], {}), "({'eventPublicId': self.event1.public_id, 'registrationId':\n 'random_string'})\n", (4780, 4861), True, 'from json import dumps as json_dumps\n'), ((5321, 5373), 'json.dumps', 'json_dumps', (["{'eventPublicId': self.event1.public_id}"], {}), "({'eventPublicId': self.event1.public_id})\n", (5331, 5373), True, 'from json import dumps as json_dumps\n'), ((5648, 5708), 'json.dumps', 'json_dumps', (["{'registrationId': self.registration1.public_id}"], {}), "({'registrationId': self.registration1.public_id})\n", (5658, 5708), True, 'from json import dumps as json_dumps\n'), ((6130, 6235), 'json.dumps', 'json_dumps', (["{'eventPublicId': self.event2.public_id, 'registrationId': self.\n registration3.public_id}"], {}), "({'eventPublicId': self.event2.public_id, 'registrationId': self.\n registration3.public_id})\n", (6140, 6235), True, 'from json import dumps as json_dumps\n'), ((6698, 6803), 'json.dumps', 'json_dumps', (["{'eventPublicId': self.event1.public_id, 'registrationId': self.\n registration2.public_id}"], {}), "({'eventPublicId': self.event1.public_id, 'registrationId': self.\n registration2.public_id})\n", (6708, 6803), True, 'from json import dumps as json_dumps\n'), ((7247, 7352), 'json.dumps', 'json_dumps', (["{'eventPublicId': self.event1.public_id, 'registrationId': self.\n registration1.public_id}"], {}), "({'eventPublicId': self.event1.public_id, 'registrationId': self.\n registration1.public_id})\n", (7257, 7352), True, 'from json import dumps as json_dumps\n')]
|
import requests
image = {'image': open('data/test_photo.jpeg', 'rb').read()}
r1 = requests.get("http://0.0.0.0:5000/")
print(r1.text)
r2 = requests.post("http://localhost:5000/get_prob", files=image)
print(r2.text) # "Male" or "Female"
|
[
"requests.post",
"requests.get"
] |
[((84, 120), 'requests.get', 'requests.get', (['"""http://0.0.0.0:5000/"""'], {}), "('http://0.0.0.0:5000/')\n", (96, 120), False, 'import requests\n'), ((142, 202), 'requests.post', 'requests.post', (['"""http://localhost:5000/get_prob"""'], {'files': 'image'}), "('http://localhost:5000/get_prob', files=image)\n", (155, 202), False, 'import requests\n')]
|
#!/usr/bin/env python
# coding=utf-8
from PyQt4.QtCore import *
import requests
import re, os
from OCR import Image2txt
from PIL import Image
class backEnd(QThread):
finish_signal = pyqtSignal(str, bool)
def __init__(self, txt):
super(backEnd, self).__init__()
self.txt = txt
def run(self):
path = '../OCR/tempimg/'
if not os.path.exists(path):
os.mkdir(path)
url='https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word='+self.txt+'表情包'+'&ct=201326592&ic=0&lm=-1&width=&height=&v=flip'
#url='http://image.baidu.com/search/flip?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1496141615672_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&ctd=1496141615672%5E00_1524X790&word=%E8%A1%A8%E6%83%85%E5%8C%85'
html=requests.get(url).text
pic_url=re.findall('"objURL":"(.*?)",',html,re.S)
i = 0
imgnum = 0
for each in pic_url:
print(each)
try:
pic=requests.get(each,timeout=10)
except requests.exceptions.ConnectionError:
print('error')
continue
imgpath=path + str(i) + '.jpg'
fp = open(imgpath,'wb')
fp.write(pic.content)
fp.close()
try:
im = Image.open(imgpath)
(x,y) = im.size #read image size
x_s = 200 #define standard width
y_s = y * x_s // x #calc height based on standard width
out = im.resize((x_s,y_s),Image.ANTIALIAS) #resize image with high-quality
out.save(imgpath)
except OSError as e:
pass
i += 1
try:
pic = Image2txt.picture_ocr(imgpath)
txt = pic.get_crop_txt()
print(txt)
# if txt is ok
except AttributeError as e:
continue
if not txt:
print('ocr failed %s', '放弃')
continue
else:
imgnum += 1
self.finish_signal.emit(imgpath, True)
if imgnum >= 3:
return
|
[
"os.mkdir",
"os.path.exists",
"PIL.Image.open",
"re.findall",
"requests.get",
"OCR.Image2txt.picture_ocr"
] |
[((829, 872), 're.findall', 're.findall', (['""""objURL":"(.*?)","""', 'html', 're.S'], {}), '(\'"objURL":"(.*?)",\', html, re.S)\n', (839, 872), False, 'import re, os\n'), ((337, 357), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (351, 357), False, 'import re, os\n'), ((362, 376), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (370, 376), False, 'import re, os\n'), ((796, 813), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (808, 813), False, 'import requests\n'), ((946, 976), 'requests.get', 'requests.get', (['each'], {'timeout': '(10)'}), '(each, timeout=10)\n', (958, 976), False, 'import requests\n'), ((1173, 1192), 'PIL.Image.open', 'Image.open', (['imgpath'], {}), '(imgpath)\n', (1183, 1192), False, 'from PIL import Image\n'), ((1490, 1520), 'OCR.Image2txt.picture_ocr', 'Image2txt.picture_ocr', (['imgpath'], {}), '(imgpath)\n', (1511, 1520), False, 'from OCR import Image2txt\n')]
|
__source__ = 'https://leetcode.com/problems/bulb-switcher/description/'
# Time: O(1)
# Space: O(1)
#
# Description: Leetcode # 319. Bulb Switcher
#
# There are n bulbs that are initially off.
# You first turn on all the bulbs.
# Then, you turn off every second bulb.
# On the third round, you toggle every third bulb (turning on if it's off or turning off if it's on).
# For the ith round, you toggle every i bulb. For the nth round, you only toggle the last bulb.
# Find how many bulbs are on after n rounds.
#
# Example:
#
# Given n = 3.
#
# At first, the three bulbs are [off, off, off].
# After first round, the three bulbs are [on, on, on].
# After second round, the three bulbs are [on, off, on].
# After third round, the three bulbs are [on, off, off].
#
# So you should return 1, because there is only one bulb is on.
# Related Topics
# Math Brainteaser
# Similar Questions
# Bulb Switcher II
#
import math
import unittest
class Solution(object):
def bulbSwitch(self, n):
"""
type n: int
rtype: int
"""
# The number of full squares.
return int(math.sqrt(n))
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
#Thought: https://discuss.leetcode.com/topic/39558/share-my-o-1-solution-with-explanation
A bulb ends up on iff it is switched an odd number of times.
Call them bulb 1 to bulb n. Bulb i is switched in round d if and only if d divides i.
So bulb i ends up on if and only if it has an odd number of divisors.
Divisors come in pairs, like i=12 has divisors 1 and 12, 2 and 6, and 3 and 4.
Except when i is a square, like 36 has divisors 1 and 36, 2 and 18, 3 and 12, 4 and 9, and double divisor 6.
So bulb i ends up on if and only if i is a square.
So just count the square numbers.
Let R = int(sqrt(n)). That's the root of the largest square in the range [1,n]. And 1 is the smallest root.
So you have the roots from 1 to R, that's R roots. Which correspond to the R squares.
So int(sqrt(n)) is the answer. (C++ does the conversion to int automatically,
because of the specified return type).
#100% 0ms
public class Solution {
public int bulbSwitch(int n) {
return (int) Math.sqrt(n);
}
}
'''
|
[
"unittest.main",
"math.sqrt"
] |
[((1247, 1262), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1260, 1262), False, 'import unittest\n'), ((1105, 1117), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (1114, 1117), False, 'import math\n')]
|
from setuptools import setup
setup(
name='V16_API',
packages=['V16_API'],
include_package_data=True,
install_requires=[
'flask', 'flask-bootstrap', 'flask-nav', 'pyserial', 'flask_wtf', 'gunicorn'
],
)
|
[
"setuptools.setup"
] |
[((32, 207), 'setuptools.setup', 'setup', ([], {'name': '"""V16_API"""', 'packages': "['V16_API']", 'include_package_data': '(True)', 'install_requires': "['flask', 'flask-bootstrap', 'flask-nav', 'pyserial', 'flask_wtf', 'gunicorn']"}), "(name='V16_API', packages=['V16_API'], include_package_data=True,\n install_requires=['flask', 'flask-bootstrap', 'flask-nav', 'pyserial',\n 'flask_wtf', 'gunicorn'])\n", (37, 207), False, 'from setuptools import setup\n')]
|
import torch.nn as nn
from matplotlib import pyplot as plt
def MLP(input_dim, out_dims):
"""
Creates an MLP for the models.
:param input_dim: Integer containing the dimensions of the input (= x_dim + y_dim).
:param out_dims: An iterable containing the output sizes of the layers of the MLP.
:return: The MLP, defined as a PyTorch neural network module.
"""
# The MLP (last layer without a ReLU)
layers = [nn.Linear(input_dim, out_dims[0])]
if len(out_dims) > 1:
layers.append(nn.ReLU())
for i in range(1, len(out_dims) - 1):
layers.append(nn.Linear(out_dims[i - 1], out_dims[i]))
layers.append(nn.ReLU())
layers.append(nn.Linear(out_dims[-2], out_dims[-1]))
return nn.Sequential(*layers)
def plot_functions(target_x, target_y, context_x, context_y, pred_y, σ_y,
save_to_filepath=None):
"""
Plots the predicted mean and variance and the context points.
:param target_x: An array of shape [batch_size, num_targets, 1] that contains
the x values of the target points.
:param target_y: An array of shape [batch_size, num_targets, 1] that contains
the y values of the target points.
:param context_x: An array of shape [batch_size, num_contexts, 1] that contains
the x values of the context points.
:param context_y: An array of shape [batch_size, num_contexts, 1] that contains
the y values of the context points.
:param pred_y: An array of shape [batch_size, num_targets, 1] that contains
the predicted means of the y values at the target points in target_x.
:param σ: An array of shape [batch_size, num_targets, 1] that contains the
predicted std. dev. of the y values at the target points in target_x.
:param save_to_filepath: A string containing the path of the file where the
plot is to be saved.
"""
# Plot everything
plt.plot(target_x[0], pred_y[0], 'tab:blue', linewidth=2)
plt.plot(target_x[0], target_y[0], 'k', linewidth=2, alpha=0.25)
plt.plot(context_x[0], context_y[0], 'kP', markersize=6)
plt.fill_between(
target_x[0, :, 0],
pred_y[0, :, 0] - 1.96 * σ_y[0, :, 0],
pred_y[0, :, 0] + 1.96 * σ_y[0, :, 0],
alpha=0.2,
facecolor='tab:blue',
interpolate=True)
# Make the plot pretty
plt.yticks([-2, 0, 2], fontsize=12)
plt.xticks([-2, 0, 2], fontsize=12)
plt.ylim([-2, 2])
ax = plt.gca()
if save_to_filepath is not None:
plt.savefig(save_to_filepath, bbox_inches='tight')
plt.show()
|
[
"matplotlib.pyplot.show",
"torch.nn.ReLU",
"matplotlib.pyplot.plot",
"torch.nn.Sequential",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yticks",
"torch.nn.Linear",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.savefig"
] |
[((761, 783), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (774, 783), True, 'import torch.nn as nn\n'), ((1939, 1996), 'matplotlib.pyplot.plot', 'plt.plot', (['target_x[0]', 'pred_y[0]', '"""tab:blue"""'], {'linewidth': '(2)'}), "(target_x[0], pred_y[0], 'tab:blue', linewidth=2)\n", (1947, 1996), True, 'from matplotlib import pyplot as plt\n'), ((2001, 2065), 'matplotlib.pyplot.plot', 'plt.plot', (['target_x[0]', 'target_y[0]', '"""k"""'], {'linewidth': '(2)', 'alpha': '(0.25)'}), "(target_x[0], target_y[0], 'k', linewidth=2, alpha=0.25)\n", (2009, 2065), True, 'from matplotlib import pyplot as plt\n'), ((2070, 2126), 'matplotlib.pyplot.plot', 'plt.plot', (['context_x[0]', 'context_y[0]', '"""kP"""'], {'markersize': '(6)'}), "(context_x[0], context_y[0], 'kP', markersize=6)\n", (2078, 2126), True, 'from matplotlib import pyplot as plt\n'), ((2131, 2304), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['target_x[0, :, 0]', '(pred_y[0, :, 0] - 1.96 * σ_y[0, :, 0])', '(pred_y[0, :, 0] + 1.96 * σ_y[0, :, 0])'], {'alpha': '(0.2)', 'facecolor': '"""tab:blue"""', 'interpolate': '(True)'}), "(target_x[0, :, 0], pred_y[0, :, 0] - 1.96 * σ_y[0, :, 0], \n pred_y[0, :, 0] + 1.96 * σ_y[0, :, 0], alpha=0.2, facecolor='tab:blue',\n interpolate=True)\n", (2147, 2304), True, 'from matplotlib import pyplot as plt\n'), ((2377, 2412), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[-2, 0, 2]'], {'fontsize': '(12)'}), '([-2, 0, 2], fontsize=12)\n', (2387, 2412), True, 'from matplotlib import pyplot as plt\n'), ((2417, 2452), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[-2, 0, 2]'], {'fontsize': '(12)'}), '([-2, 0, 2], fontsize=12)\n', (2427, 2452), True, 'from matplotlib import pyplot as plt\n'), ((2457, 2474), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-2, 2]'], {}), '([-2, 2])\n', (2465, 2474), True, 'from matplotlib import pyplot as plt\n'), ((2484, 2493), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2491, 2493), True, 'from matplotlib import pyplot as plt\n'), ((2594, 2604), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2602, 2604), True, 'from matplotlib import pyplot as plt\n'), ((441, 474), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'out_dims[0]'], {}), '(input_dim, out_dims[0])\n', (450, 474), True, 'import torch.nn as nn\n'), ((2539, 2589), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_to_filepath'], {'bbox_inches': '"""tight"""'}), "(save_to_filepath, bbox_inches='tight')\n", (2550, 2589), True, 'from matplotlib import pyplot as plt\n'), ((525, 534), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (532, 534), True, 'import torch.nn as nn\n'), ((710, 747), 'torch.nn.Linear', 'nn.Linear', (['out_dims[-2]', 'out_dims[-1]'], {}), '(out_dims[-2], out_dims[-1])\n', (719, 747), True, 'import torch.nn as nn\n'), ((609, 648), 'torch.nn.Linear', 'nn.Linear', (['out_dims[i - 1]', 'out_dims[i]'], {}), '(out_dims[i - 1], out_dims[i])\n', (618, 648), True, 'import torch.nn as nn\n'), ((676, 685), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (683, 685), True, 'import torch.nn as nn\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
from ..utils.generic_utils import get_uid
class Layer():
"""Abstract base layer class."""
def __init__(self, **kwargs):
self._trainable_weights = []
self._non_trainable_weights = []
self._grads = {} # (name, delta)
self._updates = {}
prefix = self.__class__.__name__.lower()
self.name = prefix + '_' + str(get_uid(prefix))
self.trainable = kwargs.get('trainable', True)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer."""
output_shape = input_shape
self.output_shape = output_shape
return output_shape
def build(self, input_shape):
output_shape = self.compute_output_shape(input_shape)
return output_shape
def add_weight(self, shape=(), name=None, dtype=None, initializer=None, regularizer=None, constraint=None, trainable=True):
"""
@param shape : (tuple) The shape of the weight.
@param dtype : (dtype) The dtype of the weight.
@param initializer: (string) An Initializer instance.
@param regularizer: (string) A Regularizer instance.
@param trainable : (bool) A boolean, whether the weight should be trained via backprop or not.
@return weight : (ndarray) The created weights variable.
"""
weight = initializer(shape=shape, dtype=dtype)
if trainable:
self._trainable_weights.append(name)
else:
self._non_trainable_weights.append(name)
self._updates[name] = np.expand_dims(weight, axis=0) # shape=(z,x,y)
self._grads[name] = np.zeros_like(weight) # shape=(x,y)
return weight
def update(self, optimizer, batch_size):
if self.trainable and len(self._non_trainable_weights)>0:
self._trainable_weights += self._non_trainable_weights
self._non_trainable_weights = []
elif self.trainable == False and len(self._trainable_weights)>0:
self._non_trainable_weights += self._trainable_weights
self._trainable_weights = []
for name in self._trainable_weights:
weight = self.__dict__.get(name)
regularizer = self.__dict__.get(f"{name}_regularizer")
grad = self._grads[name]/batch_size + regularizer.diff(weight)
new_weight = optimizer.get_updates(
grad=grad,
curt_param=weight,
name=f"{self.name}_{name}"
)
self.__dict__[name] = new_weight # Update.
# self._updates[name] = np.r_[self._updates[name], np.expand_dims(new_weight, axis=0)]
self._grads[name] = np.zeros_like(new_weight)
def get_weights(self):
return []
def set_weights(self, weights):
pass
@property
def weights(self):
return self.get_weights()
|
[
"numpy.zeros_like",
"numpy.expand_dims"
] |
[((1652, 1682), 'numpy.expand_dims', 'np.expand_dims', (['weight'], {'axis': '(0)'}), '(weight, axis=0)\n', (1666, 1682), True, 'import numpy as np\n'), ((1727, 1748), 'numpy.zeros_like', 'np.zeros_like', (['weight'], {}), '(weight)\n', (1740, 1748), True, 'import numpy as np\n'), ((2777, 2802), 'numpy.zeros_like', 'np.zeros_like', (['new_weight'], {}), '(new_weight)\n', (2790, 2802), True, 'import numpy as np\n')]
|
# coding: utf-8
from django.test import TestCase
from djutils.testrunner import TearDownTestCaseMixin
from parkkeeper import models
from parkkeeper import factories
class BaseTaskTestCase(TearDownTestCaseMixin, TestCase):
def tearDown(self):
self.tearDownMongo()
def test_get_task_model_monit(self):
monit_task = factories.MonitTask()
task_type = monit_task.get_task_type()
task_model = models.BaseTask.get_task_model(task_type)
self.assertEqual(
task_model,
models.MonitTask
)
def test_get_task_model_work(self):
work_task = factories.WorkTask()
task_type = work_task.get_task_type()
task_model = models.BaseTask.get_task_model(task_type)
self.assertEqual(
task_model,
models.WorkTask
)
|
[
"parkkeeper.factories.MonitTask",
"parkkeeper.models.BaseTask.get_task_model",
"parkkeeper.factories.WorkTask"
] |
[((342, 363), 'parkkeeper.factories.MonitTask', 'factories.MonitTask', ([], {}), '()\n', (361, 363), False, 'from parkkeeper import factories\n'), ((432, 473), 'parkkeeper.models.BaseTask.get_task_model', 'models.BaseTask.get_task_model', (['task_type'], {}), '(task_type)\n', (462, 473), False, 'from parkkeeper import models\n'), ((624, 644), 'parkkeeper.factories.WorkTask', 'factories.WorkTask', ([], {}), '()\n', (642, 644), False, 'from parkkeeper import factories\n'), ((712, 753), 'parkkeeper.models.BaseTask.get_task_model', 'models.BaseTask.get_task_model', (['task_type'], {}), '(task_type)\n', (742, 753), False, 'from parkkeeper import models\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
from distutils.core import setup
setup(
name = 'sim-tree',
packages = ['sim_tree'], # this must be the same as the name above
install_requires = ['os', 'pandas', 'time', 'string'],
version = '0.6',
description = 'A module for automating hierarchical simulation studies',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/benlindsay/sim-tree',
download_url = 'https://github.com/benlindsay/sim-tree/archive/0.6.tar.gz',
keywords = ['workflow', 'simulations'],
classifiers = [],
)
|
[
"distutils.core.setup"
] |
[((120, 548), 'distutils.core.setup', 'setup', ([], {'name': '"""sim-tree"""', 'packages': "['sim_tree']", 'install_requires': "['os', 'pandas', 'time', 'string']", 'version': '"""0.6"""', 'description': '"""A module for automating hierarchical simulation studies"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/benlindsay/sim-tree"""', 'download_url': '"""https://github.com/benlindsay/sim-tree/archive/0.6.tar.gz"""', 'keywords': "['workflow', 'simulations']", 'classifiers': '[]'}), "(name='sim-tree', packages=['sim_tree'], install_requires=['os',\n 'pandas', 'time', 'string'], version='0.6', description=\n 'A module for automating hierarchical simulation studies', author=\n '<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/benlindsay/sim-tree', download_url=\n 'https://github.com/benlindsay/sim-tree/archive/0.6.tar.gz', keywords=[\n 'workflow', 'simulations'], classifiers=[])\n", (125, 548), False, 'from distutils.core import setup\n')]
|
from django.http.response import JsonResponse
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from jwt_devices import views
from jwt_devices.settings import api_settings
class PermittedHeadersMiddleware(object):
"""
Middleware used to disallow sending the permanent_token header in other requests than during permanent token
refresh to make sure naive FE developers do not send the fragile permanent token with each request.
"""
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
if self.get_response:
return self.get_response(request)
def process_view(self, request, view_func, view_args, view_kwargs):
view_cls = getattr(view_func, "cls", None)
if (view_cls and api_settings.JWT_PERMANENT_TOKEN_AUTH and request.META.get("HTTP_PERMANENT_TOKEN") and view_cls != views.DeviceRefreshJSONWebToken):
return JsonResponse({
"HTTP_PERMANENT_TOKEN": {
"details": _("Using the Permanent-Token header is disallowed for {}").format(type(view_cls))
}
}, status=status.HTTP_400_BAD_REQUEST)
|
[
"django.utils.translation.ugettext_lazy"
] |
[((1074, 1132), 'django.utils.translation.ugettext_lazy', '_', (['"""Using the Permanent-Token header is disallowed for {}"""'], {}), "('Using the Permanent-Token header is disallowed for {}')\n", (1075, 1132), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('reader', '0004_float_numbers')]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.CharField(
auto_created=True, primary_key=True,
max_length=25, serialize=False
)),
('name', models.CharField(
help_text='The name of the category. '
'Must be unique and cannot be changed once set',
max_length=25, unique=True, serialize=False
)),
('description', models.CharField(
help_text='A description for the category.',
max_length=250
)),
],
options={'verbose_name_plural': 'categories'},
),
migrations.AddField(
model_name='series',
name='categories',
field=models.ManyToManyField(
blank=True, to='reader.Category'
),
),
]
|
[
"django.db.models.CharField",
"django.db.models.ManyToManyField"
] |
[((1029, 1085), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'to': '"""reader.Category"""'}), "(blank=True, to='reader.Category')\n", (1051, 1085), False, 'from django.db import migrations, models\n'), ((262, 351), 'django.db.models.CharField', 'models.CharField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'max_length': '(25)', 'serialize': '(False)'}), '(auto_created=True, primary_key=True, max_length=25,\n serialize=False)\n', (278, 351), False, 'from django.db import migrations, models\n'), ((433, 588), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""The name of the category. Must be unique and cannot be changed once set"""', 'max_length': '(25)', 'unique': '(True)', 'serialize': '(False)'}), "(help_text=\n 'The name of the category. Must be unique and cannot be changed once set',\n max_length=25, unique=True, serialize=False)\n", (449, 588), False, 'from django.db import migrations, models\n'), ((695, 772), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""A description for the category."""', 'max_length': '(250)'}), "(help_text='A description for the category.', max_length=250)\n", (711, 772), False, 'from django.db import migrations, models\n')]
|
from Constants import CONST_IDX, LINR_IDX, KAPPA_IDX, CALPHA_IDX, SQRTPLUS_IDX, EXACT_CUBIC_CONSTANT, STANDARD_IDXS, CUBIC_EXACT_IDXS, QUADRATIC_FORWARD_EXACT_IDXS, NOTORIGIN_IDXS
from LoewnerRun import LoewnerRun, ConstantLoewnerRun, LinearLoewnerRun, KappaLoewnerRun, CAlphaLoewnerRun, SqrtTPlusOneLoewnerRun
class LoewnerRunFactory():
def __init__(self, start_time, final_time, outer_points, inner_points, compile_modules = True, save_data = True, save_plot = True):
# Set the time parameters for the factory
self.start_time = start_time
self.final_time = final_time
# Set the resolution parameters for the factory
self.outer_points = outer_points
self.inner_points = inner_points
# Set the compilation setting for the factory
self.compile_modules = compile_modules
# Set the saving options for the factory
self.save_plot = save_plot
self.save_data = save_data
# Give default arguments for the extra parameters
self.kappa = 0
self.alpha = 0
self.constant = 0
def select_single_run(self,index,start_time=None,final_time=None,outer_points=None,inner_points=None,constant=None,kappa=None,alpha=None):
# Choose the class variables for the LoewnerRun object if no alternative is given
if start_time is None:
start_time = self.start_time
if final_time is None:
final_time = self.final_time
if outer_points is None:
outer_points = self.outer_points
if inner_points is None:
inner_points = self.inner_points
if kappa is None:
kappa = self.kappa
if constant is None:
constant = self.constant
if alpha is None:
alpha = self.alpha
# Create LoewnerRun object based on which driving function was chosen
if index == CONST_IDX:
return ConstantLoewnerRun(constant,start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
if index == LINR_IDX:
return LinearLoewnerRun(start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
if index == KAPPA_IDX:
if final_time > 1:
final_time = 1
return KappaLoewnerRun(kappa,start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
if index == CALPHA_IDX:
return CAlphaLoewnerRun(alpha,start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
if index == SQRTPLUS_IDX:
return SqrtTPlusOneLoewnerRun(start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
# Create an ordinary LoewnerRun
return LoewnerRun(index,start_time,final_time,outer_points,inner_points,self.compile_modules,self.save_data,self.save_plot)
def create_standard_runs(self):
# Create a list of LoewnerRuns for driving functions that do not require additional arguments
return [self.select_single_run(index=i) for i in STANDARD_IDXS]
def create_wedge_runs(self):
# Create a list of LoewnerRuns for driving functions that do not start at origin excluding Kappa
return [self.select_single_run(index=i,constant=1) for i in NOTORIGIN_IDXS]
def vary_kappa(self, kappas, outer_points=None, inner_points=None):
if outer_points is None:
outer_points=self.outer_points
if inner_points is None:
inner_points=self.inner_points
# Create a list of kappa-driving LoewnerRuns with different values for kappa
return [self.select_single_run(index=KAPPA_IDX, kappa=k, outer_points=outer_points, inner_points=inner_points) for k in kappas]
def vary_alpha(self, alphas):
# Create a list of calpha-driving LoewnerRuns with different values for alpha
return [self.select_single_run(index=CALPHA_IDX, alpha=a) for a in alphas]
def vary_inner_res(self, index, points, constant=None, kappa=None, alpha=None):
# Create a list of LoewnerRuns with the same driving function and different values for 'inner time'
return [self.select_single_run(index=index, inner_points=p, constant=constant, kappa=kappa, alpha=alpha) for p in points]
def vary_final_time(self, index, times, constant=None, kappa=None, alpha=None):
# Create a list of LoewnerRuns with the same driving function and different values for the final time
return [self.select_single_run(index=index, final_time=t, constant=constant, kappa=kappa, alpha=alpha) for t in times]
def create_exact_cubic(self):
# Create a list of LoewnerRuns that have an exact cubic forward solution
return [self.select_single_run(index=i, constant=EXACT_CUBIC_CONSTANT) for i in CUBIC_EXACT_IDXS]
def create_exact_quadratic_forward(self):
# Create a list of LoewnerRuns that have an exact quadratic forward solution
return [self.select_single_run(index=i) for i in QUADRATIC_FORWARD_EXACT_IDXS]
|
[
"LoewnerRun.KappaLoewnerRun",
"LoewnerRun.ConstantLoewnerRun",
"LoewnerRun.LinearLoewnerRun",
"LoewnerRun.CAlphaLoewnerRun",
"LoewnerRun.LoewnerRun",
"LoewnerRun.SqrtTPlusOneLoewnerRun"
] |
[((2883, 3011), 'LoewnerRun.LoewnerRun', 'LoewnerRun', (['index', 'start_time', 'final_time', 'outer_points', 'inner_points', 'self.compile_modules', 'self.save_data', 'self.save_plot'], {}), '(index, start_time, final_time, outer_points, inner_points, self.\n compile_modules, self.save_data, self.save_plot)\n', (2893, 3011), False, 'from LoewnerRun import LoewnerRun, ConstantLoewnerRun, LinearLoewnerRun, KappaLoewnerRun, CAlphaLoewnerRun, SqrtTPlusOneLoewnerRun\n'), ((1943, 2081), 'LoewnerRun.ConstantLoewnerRun', 'ConstantLoewnerRun', (['constant', 'start_time', 'final_time', 'outer_points', 'inner_points', 'self.compile_modules', 'self.save_data', 'self.save_plot'], {}), '(constant, start_time, final_time, outer_points,\n inner_points, self.compile_modules, self.save_data, self.save_plot)\n', (1961, 2081), False, 'from LoewnerRun import LoewnerRun, ConstantLoewnerRun, LinearLoewnerRun, KappaLoewnerRun, CAlphaLoewnerRun, SqrtTPlusOneLoewnerRun\n'), ((2121, 2248), 'LoewnerRun.LinearLoewnerRun', 'LinearLoewnerRun', (['start_time', 'final_time', 'outer_points', 'inner_points', 'self.compile_modules', 'self.save_data', 'self.save_plot'], {}), '(start_time, final_time, outer_points, inner_points, self.\n compile_modules, self.save_data, self.save_plot)\n', (2137, 2248), False, 'from LoewnerRun import LoewnerRun, ConstantLoewnerRun, LinearLoewnerRun, KappaLoewnerRun, CAlphaLoewnerRun, SqrtTPlusOneLoewnerRun\n'), ((2353, 2485), 'LoewnerRun.KappaLoewnerRun', 'KappaLoewnerRun', (['kappa', 'start_time', 'final_time', 'outer_points', 'inner_points', 'self.compile_modules', 'self.save_data', 'self.save_plot'], {}), '(kappa, start_time, final_time, outer_points, inner_points,\n self.compile_modules, self.save_data, self.save_plot)\n', (2368, 2485), False, 'from LoewnerRun import LoewnerRun, ConstantLoewnerRun, LinearLoewnerRun, KappaLoewnerRun, CAlphaLoewnerRun, SqrtTPlusOneLoewnerRun\n'), ((2527, 2660), 'LoewnerRun.CAlphaLoewnerRun', 'CAlphaLoewnerRun', (['alpha', 'start_time', 'final_time', 'outer_points', 'inner_points', 'self.compile_modules', 'self.save_data', 'self.save_plot'], {}), '(alpha, start_time, final_time, outer_points, inner_points,\n self.compile_modules, self.save_data, self.save_plot)\n', (2543, 2660), False, 'from LoewnerRun import LoewnerRun, ConstantLoewnerRun, LinearLoewnerRun, KappaLoewnerRun, CAlphaLoewnerRun, SqrtTPlusOneLoewnerRun\n'), ((2704, 2836), 'LoewnerRun.SqrtTPlusOneLoewnerRun', 'SqrtTPlusOneLoewnerRun', (['start_time', 'final_time', 'outer_points', 'inner_points', 'self.compile_modules', 'self.save_data', 'self.save_plot'], {}), '(start_time, final_time, outer_points, inner_points,\n self.compile_modules, self.save_data, self.save_plot)\n', (2726, 2836), False, 'from LoewnerRun import LoewnerRun, ConstantLoewnerRun, LinearLoewnerRun, KappaLoewnerRun, CAlphaLoewnerRun, SqrtTPlusOneLoewnerRun\n')]
|
"""
Copyright 2010 <NAME>, <NAME>, and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
handler500 # Pyflakes
urlpatterns = patterns('',
url('^$', 'project.views.index', name="dashboard_url"),
url('^about/$', 'project.views.about', name="about_url"),
(r'^notifications/', include('notifications.urls')),
(r'^projects/', include('project.urls')),
(r'^recipes/', include('recipes.urls')),
(r'^preferences/', include('preferences.urls')),
(r'^jobs/', include('job_queue.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
[
"django.contrib.admin.autodiscover"
] |
[((680, 700), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (698, 700), False, 'from django.contrib import admin\n')]
|
from serial import *
import serial.tools.list_ports
#import serial.tools.list_ports
import jsonConfig as j
import time
#connected_devices=[""]
def serialConnection(values,device):
print("hi ")
print("serialConnection()")
print("values: {} , port: {}".format(values,device))
#c_number=getDeviceComNumber(device)
#print(c_number)
print("port is going to open")
port = serial.Serial(getDeviceComNumber(device), 9600, timeout=1) #serialport değşecek
time.sleep(3)
if(port is not None):
print("port openned")
for i in range(16):
binding_str= ''.join(j.getBindings(i,values))
print("str value: {} str type: {}".format(binding_str, type(binding_str)))
time.sleep(0.001)
if(len(binding_str)>-1):
port.write(bytes("{}".format(chr(i)),encoding="ascii"))
while(port.inWaiting()<1):
time.sleep(0.1)
print("Waiting data")
time.sleep(0.1)
print("Data comes: ")
print(port.readline().decode('ascii'))
port.write(bytes("{}".format(chr(len(binding_str))),encoding="ascii"))
while(port.inWaiting()<1):
time.sleep(0.1)
print("Waiting data")
#time.sleep(0.1)
print("Data need to send lenght: ")
print(port.readline().decode('ascii'))
#port.write(bytes(binding_str,encoding="ascii"))
print(binding_str.encode('iso8859_9'))
port.write(binding_str.encode('iso8859_9'))
while(port.inWaiting()<len(binding_str)):
time.sleep(0.1)
print("Waiting data")
time.sleep(0.1)
print("Data comes: ")
print(port.readline().decode('iso8859_9'))
else:
print("Bindings of the key error ")
port.close()
else:
print("Device cannot found")
def getDevices():
connected_devices=[]
connected_devices=serial.tools.list_ports.comports()
#print("connected_devices:")
#print(connected_devices)
#for i in connected_devices:
# print(i)
return connected_devices
def getDeviceComNumber(device):
number= device.split(' ')
#print(number[0])
return number[0]
|
[
"jsonConfig.getBindings",
"time.sleep"
] |
[((496, 509), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (506, 509), False, 'import time\n'), ((759, 776), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (769, 776), False, 'import time\n'), ((633, 657), 'jsonConfig.getBindings', 'j.getBindings', (['i', 'values'], {}), '(i, values)\n', (646, 657), True, 'import jsonConfig as j\n'), ((1033, 1048), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1043, 1048), False, 'import time\n'), ((1846, 1861), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1856, 1861), False, 'import time\n'), ((957, 972), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (967, 972), False, 'import time\n'), ((1301, 1316), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1311, 1316), False, 'import time\n'), ((1770, 1785), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1780, 1785), False, 'import time\n')]
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from buildbot.process.buildstep import FAILURE
from buildbot.process.buildstep import SUCCESS
from buildbot.process.buildstep import BuildStep
from buildbot.process.results import worst_status
from buildbot.steps.worker import CompositeStepMixin
class DownloadSecretsToWorker(BuildStep, CompositeStepMixin):
renderables = ['secret_to_be_populated']
def __init__(self, populated_secret_list, **kwargs):
super(DownloadSecretsToWorker, self).__init__(**kwargs)
self.secret_to_be_populated = populated_secret_list
@defer.inlineCallbacks
def runPopulateSecrets(self):
result = SUCCESS
for path, secretvalue in self.secret_to_be_populated:
if not isinstance(path, str):
raise ValueError("Secret path %s is not a string" % path)
self.secret_to_be_interpolated = secretvalue
res = yield self.downloadFileContentToWorker(path, self.secret_to_be_interpolated)
result = worst_status(result, res)
defer.returnValue(result)
@defer.inlineCallbacks
def run(self):
self._start_deferred = None
res = yield self.runPopulateSecrets()
defer.returnValue(res)
class RemoveWorkerFileSecret(BuildStep, CompositeStepMixin):
def __init__(self, populated_secret_list, logEnviron=False, **kwargs):
self.paths = []
for path, secret in populated_secret_list:
self.paths.append(path)
self.logEnviron = logEnviron
super(RemoveWorkerFileSecret, self).__init__(**kwargs)
@defer.inlineCallbacks
def runRemoveWorkerFileSecret(self):
all_results = []
for path in self.paths:
res = yield self.runRmFile(path, abandonOnFailure=False)
all_results.append(res)
if FAILURE in all_results:
result = FAILURE
else:
result = SUCCESS
defer.returnValue(result)
@defer.inlineCallbacks
def run(self):
self._start_deferred = None
res = yield self.runRemoveWorkerFileSecret()
defer.returnValue(res)
|
[
"twisted.internet.defer.returnValue",
"buildbot.process.results.worst_status"
] |
[((1829, 1854), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['result'], {}), '(result)\n', (1846, 1854), False, 'from twisted.internet import defer\n'), ((1992, 2014), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['res'], {}), '(res)\n', (2009, 2014), False, 'from twisted.internet import defer\n'), ((2711, 2736), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['result'], {}), '(result)\n', (2728, 2736), False, 'from twisted.internet import defer\n'), ((2881, 2903), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['res'], {}), '(res)\n', (2898, 2903), False, 'from twisted.internet import defer\n'), ((1795, 1820), 'buildbot.process.results.worst_status', 'worst_status', (['result', 'res'], {}), '(result, res)\n', (1807, 1820), False, 'from buildbot.process.results import worst_status\n')]
|
from datetime import datetime
extensions = []
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = u"Opale"
year = datetime.now().year
copyright = u"%d <NAME> " % year
exclude_patterns = ["_build"]
html_theme = "opale"
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html",
"searchbox.html",
"donate.html",
]
}
# html_logo = "_static/logo.png"
html_static_path = ["_static"]
html_theme_options = {
"logo": "logo.png",
"logo_name": True,
"logo_text_align": "center",
"description": "Dark theme based on Alabaster.",
"github_user": "AleCandido",
"github_repo": "opale",
"fixed_sidebar": True,
}
extensions.append("releases")
releases_github_path = "AleCandido/opale"
# Our pre-0.x releases are unstable / mix bugs+features
releases_unstable_prehistory = True
|
[
"datetime.datetime.now"
] |
[((151, 165), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (163, 165), False, 'from datetime import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 10 22:27:03 2020
@author: <NAME>
"""
import math
import tqdm
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import utils
from net import DCRNNModel
# import sys
# sys.path.append("./xlwang_version")
# from dcrnn_model import DCRNNModel
"""
Hyperparameters
"""
batch_size = 64
enc_input_dim = 2
dec_input_dim = 1
hidden_dim = 64
output_dim = 1
diffusion_steps = 2
num_nodes = 207
rnn_layers = 2
seq_length = 12
horizon = 12
cl_decay_steps = 2000 # decrease teaching force ratio in global steps
filter_type = "dual_random_walk"
epochs = 100
lr = 0.01
weight_decay = 0.0
epsilon = 1.0e-3
amsgard = True
lr_decay_ratio = 0.1
lr_decay_steps = [20, 30, 40, 50]
max_grad_norm = 5
checkpoints = './checkpoints/dcrnn.pt'
sensor_ids = './data/METR-LA/graph_sensor_ids.txt'
sensor_distance = './data/METR-LA/distances_la_2012.csv'
recording='data/processed/METR-LA'
"""
Dataset
"""
# read sensor IDs
with open(sensor_ids) as f:
sensor_ids = f.read().strip().split(',')
# read sensor distance
distance_df = pd.read_csv(sensor_distance, dtype={'from': 'str', 'to': 'str'})
# build adj matrix based on equation (10)
adj_mx = utils.get_adjacency_matrix(distance_df, sensor_ids)
data = utils.load_dataset(dataset_dir=recording, batch_size=batch_size, test_batch_size=batch_size)
train_data_loader = data['train_loader']
val_data_loader = data['val_loader']
test_data_loader = data['test_loader']
standard_scaler = data['scaler']
"""
Init model
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DCRNNModel(adj_mx,
diffusion_steps,
num_nodes,
batch_size,
enc_input_dim,
dec_input_dim,
hidden_dim,
output_dim,
rnn_layers,
filter_type).to(device)
# model = DCRNNModel(adj_mx,
# batch_size,
# enc_input_dim,
# dec_input_dim,
# diffusion_steps,
# num_nodes,
# rnn_layers,
# hidden_dim,
# horizon,
# output_dim,
# filter_type).to(device)
optimizer = torch.optim.Adam(model.parameters(),
lr=lr, eps=epsilon,
weight_decay=weight_decay,
amsgard=amsgard)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=lr_decay_steps,
gamma=lr_decay_ratio)
"""
DCRNN Training
"""
def compute_mae_loss(y_true, y_predicted, standard_scaler):
y_true = standard_scaler.inverse_transform(y_true)
y_predicted = standard_scaler.inverse_transform(y_predicted)
return utils.masked_mae_loss(y_predicted, y_true, null_val=0.0)
def eval_metrics(y_true_np, y_predicted_np, standard_scaler):
metrics = np.zeros(3)
y_true_np = standard_scaler.inverse_transform(y_true_np)
y_predicted_np = standard_scaler.inverse_transform(y_predicted_np)
mae = utils.masked_mae_np(y_predicted_np, y_true_np, null_val=0.0)
mape = utils.masked_mape_np(y_predicted_np, y_true_np, null_val=0.0)
rmse = utils.masked_rmse_np(y_predicted_np, y_true_np, null_val=0.0)
metrics[0] += mae
metrics[1] += mape
metrics[2] += rmse
return metrics
# some pre-calculated properties
num_train_iteration_per_epoch = math.ceil(data['x_train'].shape[0] / batch_size)
num_val_iteration_per_epoch = math.ceil(data['x_val'].shape[0] / batch_size)
num_test_iteration_per_epoch = math.ceil(data['x_test'].shape[0] / batch_size)
# start training
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Total number of trainable parameters:", params)
print("Initialization complete. Start training... ==>", epochs, "epochs with", num_train_iteration_per_epoch, "batches per epoch.")
for epoch in range(1, epochs + 1):
model.train()
train_iterator = train_data_loader.get_iterator()
val_iterator = val_data_loader.get_iterator()
total_loss = 0.0
total_metrics = np.zeros(3) # Three matrics: MAE, MAPE, RMSE
total_val_metrics = np.zeros(3)
for batch_idx, (x, y) in enumerate(tqdm.tqdm(train_iterator)):
x = torch.FloatTensor(x)
y = torch.FloatTensor(y)
y_true = y[..., :output_dim] # delete time encoding to form as label
# x:[batch, seq_len, nodes, enc_input_dim]
# y:[batch, horizon, nodes, output_dim + 1]
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
# compute teaching force ratio: decrease this gradually to 0
global_steps = (epoch - 1) * num_train_iteration_per_epoch + batch_idx
teaching_force_ratio = cl_decay_steps / (cl_decay_steps + math.exp(global_steps / cl_decay_steps))
# feedforward
y_hat = model(x, y, teaching_force_ratio) # [horizon, batch, nodes*output_dim]
y_hat = torch.transpose(torch.reshape(y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
# back propagation
loss = compute_mae_loss(y_true, y_hat.cpu(), standard_scaler)
loss.backward()
# gradient clipping
nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
optimizer.step()
# training statistics
total_loss += loss.item()
t_metrics = eval_metrics(y_true.numpy(), y_hat.detach().cpu().numpy(), standard_scaler)
total_metrics += t_metrics
# print('Batch_idx {:03d} | TF {:.4f} | Train MAE {:.5f} | Train MAPE {:.5f} | Train RMSE {:.5f}'.format(
# batch_idx, teaching_force_ratio, loss.item(), t_metrics[1], t_metrics[2]))
# validation after each epoch
model.eval()
with torch.no_grad():
for _, (val_x, val_y) in enumerate(tqdm.tqdm(val_iterator)):
val_x = torch.FloatTensor(val_x)
val_y = torch.FloatTensor(val_y)
val_y_true = val_y[..., :output_dim] # delete time encoding to form as label
# val_x:[batch, seq_len, nodes, enc_input_dim]
# val_y:[batch, horizon, nodes, output_dim + 1]
val_x, val_y = val_x.to(device), val_y.to(device)
val_y_hat = model(val_x, val_y, 0)
val_y_hat = torch.transpose(torch.reshape(val_y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
total_val_metrics += eval_metrics(val_y_true.numpy(), val_y_hat.detach().cpu().numpy(), standard_scaler)
# learning rate scheduling
lr_scheduler.step()
# GPU mem usage
gpu_mem_alloc = torch.cuda.max_memory_allocated() / 1000000 if torch.cuda.is_available() else 0
# save model every epoch
torch.save(model.state_dict(), checkpoints)
# logging
val_metrics = (total_val_metrics / num_val_iteration_per_epoch).tolist()
print('Epoch {:03d} | lr {:.6f} |Train loss {:.5f} | Val MAE {:.5f} | Val MAPE {:.5f} | Val RMSE {:.5f}| GPU {:.1f} MiB'.format(
epoch, optimizer.param_groups[0]['lr'], total_loss / num_train_iteration_per_epoch, val_metrics[0], val_metrics[1], val_metrics[2], gpu_mem_alloc))
print("Training complete.")
"""
DCRNN Testing
"""
print("\nmodel testing...")
test_iterator = test_data_loader.get_iterator()
total_test_metrics = np.zeros(3)
model.eval()
with torch.no_grad():
for _, (test_x, test_y) in enumerate(tqdm.tqdm(test_iterator)):
test_x = torch.FloatTensor(test_x)
test_y = torch.FloatTensor(test_y)
test_y_true = test_y[..., :output_dim] # delete time encoding to form as label
# test_x:[batch, seq_len, nodes, enc_input_dim]
# test_y:[batch, horizon, nodes, output_dim + 1]
test_x, test_y = test_x.to(device), test_y.to(device)
test_y_hat = model(test_x, test_y, 0)
test_y_hat = torch.transpose(torch.reshape(test_y_hat, (horizon, batch_size, num_nodes, output_dim)), 0, 1) # [batch, horizon, nodes, output_dim]
total_test_metrics += eval_metrics(test_y_true.numpy(), test_y_hat.detach().cpu().numpy(), standard_scaler)
test_metrics = (total_test_metrics / num_test_iteration_per_epoch).tolist()
print('Test MAE {:.5f} | Test MAPE {:.5f} | Test RMSE {:.5f}'.format(test_metrics[0], test_metrics[1], test_metrics[2]))
|
[
"tqdm.tqdm",
"math.exp",
"net.DCRNNModel",
"math.ceil",
"pandas.read_csv",
"utils.get_adjacency_matrix",
"utils.masked_mape_np",
"torch.cuda.max_memory_allocated",
"numpy.zeros",
"torch.FloatTensor",
"utils.load_dataset",
"utils.masked_mae_loss",
"utils.masked_mae_np",
"torch.cuda.is_available",
"torch.reshape",
"torch.no_grad",
"utils.masked_rmse_np",
"torch.optim.lr_scheduler.MultiStepLR"
] |
[((1085, 1149), 'pandas.read_csv', 'pd.read_csv', (['sensor_distance'], {'dtype': "{'from': 'str', 'to': 'str'}"}), "(sensor_distance, dtype={'from': 'str', 'to': 'str'})\n", (1096, 1149), True, 'import pandas as pd\n'), ((1202, 1253), 'utils.get_adjacency_matrix', 'utils.get_adjacency_matrix', (['distance_df', 'sensor_ids'], {}), '(distance_df, sensor_ids)\n', (1228, 1253), False, 'import utils\n'), ((1262, 1358), 'utils.load_dataset', 'utils.load_dataset', ([], {'dataset_dir': 'recording', 'batch_size': 'batch_size', 'test_batch_size': 'batch_size'}), '(dataset_dir=recording, batch_size=batch_size,\n test_batch_size=batch_size)\n', (1280, 1358), False, 'import utils\n'), ((2557, 2657), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'lr_decay_steps', 'gamma': 'lr_decay_ratio'}), '(optimizer, milestones=lr_decay_steps,\n gamma=lr_decay_ratio)\n', (2593, 2657), False, 'import torch\n'), ((3627, 3675), 'math.ceil', 'math.ceil', (["(data['x_train'].shape[0] / batch_size)"], {}), "(data['x_train'].shape[0] / batch_size)\n", (3636, 3675), False, 'import math\n'), ((3706, 3752), 'math.ceil', 'math.ceil', (["(data['x_val'].shape[0] / batch_size)"], {}), "(data['x_val'].shape[0] / batch_size)\n", (3715, 3752), False, 'import math\n'), ((3784, 3831), 'math.ceil', 'math.ceil', (["(data['x_test'].shape[0] / batch_size)"], {}), "(data['x_test'].shape[0] / batch_size)\n", (3793, 3831), False, 'import math\n'), ((7715, 7726), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (7723, 7726), True, 'import numpy as np\n'), ((2975, 3031), 'utils.masked_mae_loss', 'utils.masked_mae_loss', (['y_predicted', 'y_true'], {'null_val': '(0.0)'}), '(y_predicted, y_true, null_val=0.0)\n', (2996, 3031), False, 'import utils\n'), ((3109, 3120), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3117, 3120), True, 'import numpy as np\n'), ((3263, 3323), 'utils.masked_mae_np', 'utils.masked_mae_np', (['y_predicted_np', 'y_true_np'], {'null_val': '(0.0)'}), '(y_predicted_np, y_true_np, null_val=0.0)\n', (3282, 3323), False, 'import utils\n'), ((3335, 3396), 'utils.masked_mape_np', 'utils.masked_mape_np', (['y_predicted_np', 'y_true_np'], {'null_val': '(0.0)'}), '(y_predicted_np, y_true_np, null_val=0.0)\n', (3355, 3396), False, 'import utils\n'), ((3408, 3469), 'utils.masked_rmse_np', 'utils.masked_rmse_np', (['y_predicted_np', 'y_true_np'], {'null_val': '(0.0)'}), '(y_predicted_np, y_true_np, null_val=0.0)\n', (3428, 3469), False, 'import utils\n'), ((4379, 4390), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4387, 4390), True, 'import numpy as np\n'), ((4449, 4460), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4457, 4460), True, 'import numpy as np\n'), ((7745, 7760), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7758, 7760), False, 'import torch\n'), ((1558, 1583), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1581, 1583), False, 'import torch\n'), ((1605, 1746), 'net.DCRNNModel', 'DCRNNModel', (['adj_mx', 'diffusion_steps', 'num_nodes', 'batch_size', 'enc_input_dim', 'dec_input_dim', 'hidden_dim', 'output_dim', 'rnn_layers', 'filter_type'], {}), '(adj_mx, diffusion_steps, num_nodes, batch_size, enc_input_dim,\n dec_input_dim, hidden_dim, output_dim, rnn_layers, filter_type)\n', (1615, 1746), False, 'from net import DCRNNModel\n'), ((4505, 4530), 'tqdm.tqdm', 'tqdm.tqdm', (['train_iterator'], {}), '(train_iterator)\n', (4514, 4530), False, 'import tqdm\n'), ((4554, 4574), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (4571, 4574), False, 'import torch\n'), ((4587, 4607), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (4604, 4607), False, 'import torch\n'), ((6135, 6150), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6148, 6150), False, 'import torch\n'), ((7064, 7089), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7087, 7089), False, 'import torch\n'), ((7803, 7827), 'tqdm.tqdm', 'tqdm.tqdm', (['test_iterator'], {}), '(test_iterator)\n', (7812, 7827), False, 'import tqdm\n'), ((7847, 7872), 'torch.FloatTensor', 'torch.FloatTensor', (['test_x'], {}), '(test_x)\n', (7864, 7872), False, 'import torch\n'), ((7890, 7915), 'torch.FloatTensor', 'torch.FloatTensor', (['test_y'], {}), '(test_y)\n', (7907, 7915), False, 'import torch\n'), ((5286, 5352), 'torch.reshape', 'torch.reshape', (['y_hat', '(horizon, batch_size, num_nodes, output_dim)'], {}), '(y_hat, (horizon, batch_size, num_nodes, output_dim))\n', (5299, 5352), False, 'import torch\n'), ((6195, 6218), 'tqdm.tqdm', 'tqdm.tqdm', (['val_iterator'], {}), '(val_iterator)\n', (6204, 6218), False, 'import tqdm\n'), ((6241, 6265), 'torch.FloatTensor', 'torch.FloatTensor', (['val_x'], {}), '(val_x)\n', (6258, 6265), False, 'import torch\n'), ((6286, 6310), 'torch.FloatTensor', 'torch.FloatTensor', (['val_y'], {}), '(val_y)\n', (6303, 6310), False, 'import torch\n'), ((7017, 7050), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (7048, 7050), False, 'import torch\n'), ((8263, 8334), 'torch.reshape', 'torch.reshape', (['test_y_hat', '(horizon, batch_size, num_nodes, output_dim)'], {}), '(test_y_hat, (horizon, batch_size, num_nodes, output_dim))\n', (8276, 8334), False, 'import torch\n'), ((5094, 5133), 'math.exp', 'math.exp', (['(global_steps / cl_decay_steps)'], {}), '(global_steps / cl_decay_steps)\n', (5102, 5133), False, 'import math\n'), ((6670, 6740), 'torch.reshape', 'torch.reshape', (['val_y_hat', '(horizon, batch_size, num_nodes, output_dim)'], {}), '(val_y_hat, (horizon, batch_size, num_nodes, output_dim))\n', (6683, 6740), False, 'import torch\n')]
|
"""
Task request/response classes for the registration job (discovering, validating and storing metadata for a dataset)
"""
# Copyright 2021 The Funnel Rocket Maintainers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from enum import auto
from typing import Optional
from frocket.common.dataset import DatasetInfo, DatasetPartId, DatasetSchema
from frocket.common.serializable import SerializableDataClass, AutoNamedEnum, enveloped
from frocket.common.tasks.base import BaseTaskRequest, BaseTaskResult, BlobId, BaseJobResult, BaseApiResult
class DatasetValidationMode(AutoNamedEnum):
SINGLE = auto() # Only validate a single file in the dataset (meaning no cross-file consistency checks are done!)
FIRST_LAST = auto() # Validate only first and last files (by lexicographic sorting) and cross-check them
SAMPLE = auto() # Takes a sample of files, proportional to the no.o of files and up to a configured maximum.
REGISTER_DEFAULT_FILENAME_PATTERN = '*.parquet' # Ignore files such as '_SUCCESS' and the like in discovery
REGISTER_DEFAULT_VALIDATION_MODE = DatasetValidationMode.SAMPLE
REGISTER_DEFAULT_VALIDATE_UNIQUES = True
@dataclass(frozen=True)
class RegisterArgs(SerializableDataClass):
"""Parameters collected by the CLI / API server for the registration job"""
name: str
basepath: str
group_id_column: str
timestamp_column: str
pattern: str = REGISTER_DEFAULT_FILENAME_PATTERN
validation_mode: DatasetValidationMode = REGISTER_DEFAULT_VALIDATION_MODE
validate_uniques: bool = REGISTER_DEFAULT_VALIDATE_UNIQUES
@enveloped
@dataclass(frozen=True)
class RegistrationTaskRequest(BaseTaskRequest):
dataset: DatasetInfo
part_id: DatasetPartId
# If RegisterArgs.validate_uniques=true, task should return all group IDs in file
return_group_ids: bool
@enveloped
@dataclass(frozen=True)
class RegistrationTaskResult(BaseTaskResult):
dataset_schema: Optional[DatasetSchema] # None on failures
part_id: DatasetPartId
# If RegistrationTaskRequest.return_group_ids=true, a reference to the blob with the group IDs
group_ids_blob_id: Optional[BlobId]
@dataclass(frozen=True)
class RegistrationJobResult(BaseJobResult):
dataset: DatasetInfo
@dataclass(frozen=True)
class UnregisterApiResult(BaseApiResult):
dataset_found: bool
dataset_last_used: Optional[float]
|
[
"enum.auto",
"dataclasses.dataclass"
] |
[((1693, 1715), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1702, 1715), False, 'from dataclasses import dataclass\n'), ((2130, 2152), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2139, 2152), False, 'from dataclasses import dataclass\n'), ((2380, 2402), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2389, 2402), False, 'from dataclasses import dataclass\n'), ((2682, 2704), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2691, 2704), False, 'from dataclasses import dataclass\n'), ((2777, 2799), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2786, 2799), False, 'from dataclasses import dataclass\n'), ((1144, 1150), 'enum.auto', 'auto', ([], {}), '()\n', (1148, 1150), False, 'from enum import auto\n'), ((1267, 1273), 'enum.auto', 'auto', ([], {}), '()\n', (1271, 1273), False, 'from enum import auto\n'), ((1373, 1379), 'enum.auto', 'auto', ([], {}), '()\n', (1377, 1379), False, 'from enum import auto\n')]
|
import numpy as np
from rdkit.DataStructs.cDataStructs import ExplicitBitVect, SparseBitVect
from scipy.sparse import issparse, csr_matrix
from collections import defaultdict
from rdkit import DataStructs
from luna.util.exceptions import (BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError)
from luna.version import __version__
import logging
logger = logging.getLogger()
DEFAULT_FP_LENGTH = 2**32
DEFAULT_FOLDED_FP_LENGTH = 4096
DEFAULT_FP_DTYPE = np.int32
class Fingerprint:
"""A fingerprint that stores indices of "on" bits.
Parameters
----------
indices : array_like of int
Indices of "on" bits.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
unfolded_fp : `Fingerprint` or None
The unfolded version of this fingerprint.
If None, this fingerprint may have not been folded yet.
unfolding_map : dict, optional
A mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features).
props: dict, optional
Custom properties of the fingerprint, consisting of a string keyword and
some value. It can be used, for instance, to save the ligand name
and parameters used to generate shells (IFP features).
"""
def __init__(self, indices, fp_length=DEFAULT_FP_LENGTH, unfolded_fp=None, unfolding_map=None, props=None):
indices = np.asarray(indices, dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
self._indices = np.unique(indices)
self._fp_length = fp_length
self._unfolded_fp = unfolded_fp
self._unfolding_map = unfolding_map or {}
self._props = props or {}
self.version = __version__
@classmethod
def from_indices(cls, indices, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from an array of indices.
Parameters
----------
indices : array_like of int
Indices of "on" bits.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices = np.random.randint(0, fp_length, on_bits)
>>> print(indices)
[12 15 21 0 3 27 3 7]
>>> fp = Fingerprint.from_indices(indices, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(indices, fp_length, **kwargs)
@classmethod
def from_vector(cls, vector, fp_length=None, **kwargs):
"""Initialize from a vector.
Parameters
----------
vector : :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Array of bits.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the ``vector`` shape.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> fp_length = 32
>>> vector = np.random.choice([0, 1], size=(fp_length,), p=[0.8, 0.2])
>>> print(vector)
[0 0 0 0 0 0 0 1 1 0 0 0 0 1 0 0 0 1 0 1 1 0 0 0 0 0 0 1 0 0 0 0]
>>> fp = Fingerprint.from_vector(vector)
>>> print(fp.indices)
[ 7 8 13 17 19 20 27]
>>> print(fp.fp_length)
32
"""
if fp_length is None:
try:
fp_length = vector.shape[1]
except IndexError:
fp_length = vector.shape[0]
if issparse(vector):
indices = vector.indices.astype(np.long)
else:
indices = np.asarray(np.where(vector), dtype=np.long).flatten()
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_bit_string(cls, bit_string, fp_length=None, **kwargs):
"""Initialize from a bit string (e.g. '0010100110').
Parameters
----------
bit_string : str
String of 0s and 1s.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the string length.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> fp = Fingerprint.from_bit_string("0010100110000010")
>>> print(fp.indices)
[ 2 4 7 8 14]
>>> print(fp.fp_length)
16
"""
indices = [i for i, char in enumerate(bit_string) if char != '0']
if fp_length is None:
fp_length = len(bit_string)
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_rdkit(cls, rdkit_fp, **kwargs):
"""Initialize from an RDKit fingerprint.
Parameters
----------
rdkit_fp : :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` or :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect`
An existing RDKit fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
"""
if not (isinstance(rdkit_fp, ExplicitBitVect) or isinstance(rdkit_fp, SparseBitVect)):
logger.exception("Invalid fingerprint type. RDKit only accepts a SparseBitVect or ExplicitBitVect object.")
raise TypeError("Invalid fingerprint type. RDKit only accepts a SparseBitVect or ExplicitBitVect object.")
fp_length = rdkit_fp.GetNumBits()
indices = np.asarray(rdkit_fp.GetOnBits(), dtype=np.long)
return cls.from_indices(indices, fp_length, **kwargs)
@classmethod
def from_fingerprint(cls, fp, **kwargs):
"""Initialize from an existing fingerprint.
Parameters
----------
fp : `Fingerprint`
An existing fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `Fingerprint`
"""
if not isinstance(fp, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (cls.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (cls.__class__))
unfolded_fp = fp.__class__.from_fingerprint(fp.unfolded_fp) if fp.unfolded_fp is not None else None
unfolding_map = dict(fp.unfolding_map)
props = dict(fp.props)
return cls.from_indices(fp.indices, fp.fp_length, unfolded_fp=unfolded_fp, unfolding_map=unfolding_map, props=props)
@property
def indices(self):
"""array_like of int, read-only: Indices of "on" bits."""
return self._indices
@property
def bit_count(self):
"""int, read-only: Number of "on" bits."""
return self.indices.shape[0]
@property
def density(self):
"""float, read-only: Proportion of "on" bits in fingerprint."""
return self.bit_count / self.fp_length
@property
def counts(self):
"""dict, read-only: Mapping between each index in ``indices`` to the number of counts, which is always 1 for bit fingerprints."""
return dict([(k, 1) for k in self.indices])
@property
def fp_length(self):
"""int, read-only: The fingerprint length (total number of bits)."""
return self._fp_length
@property
def unfolded_fp(self):
"""`Fingerprint` or None, read-only: The unfolded version of this fingerprint. If None, this fingerprint may have not been folded yet."""
if self._unfolded_fp is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self._unfolded_fp
@property
def unfolded_indices(self):
"""array_like of int, read-only: Indices of "on" bits in the unfolded fingerprint."""
if self._unfolding_map is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self.unfolded_fp.indices
@property
def unfolding_map(self):
"""dict, read-only: The mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features)."""
if self._unfolding_map is None:
logger.warning("This fingerprint was not previously folded.")
return None
return self._unfolding_map
@property
def props(self):
"""dict, read-only: The custom properties of the fingerprint."""
return self._props
@property
def name(self):
"""str: The property 'name'. If it was not provided, then return an empty string."""
return self.props.get("name", "")
@name.setter
def name(self, name):
self.props["name"] = str(name)
@property
def num_levels(self):
"""int: The property 'num_levels' used to generate this fingerprint \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("num_levels", None)
@num_levels.setter
def num_levels(self, num_levels):
self.props["num_levels"] = str(num_levels)
@property
def radius_step(self):
"""float: The property 'radius_step' used to generate this fingerprint \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("radius_step", None)
@radius_step.setter
def radius_step(self, radius_step):
self.props["radius_step"] = str(radius_step)
@property
def num_shells(self):
"""int: The property 'num_shells' \
(see :class:`~luna.interaction.fp.shell.ShellGenerator`). \
If it was not provided, then return None."""
return self.props.get("num_shells", None)
@num_shells.setter
def num_shells(self, num_shells):
self.props["num_shells"] = str(num_shells)
def get_prop(self, key):
"""Get value of the property ``key``. If not set, raise KeyError."""
try:
return self.props[key]
except KeyError:
logger.warning("Key '%s' does not exist." % key)
return None
def set_prop(self, key, value):
"""Set value to the property ``key``."""
self.props[key] = value
def get_num_bits(self):
"""Get the fingerprint length (total number of bits)."""
return self.fp_length
def get_num_on_bits(self):
"""Get the number of "on" bits."""
return self.bit_count
def get_num_off_bits(self):
"""Get the number of "off" bits."""
return self.get_num_bits() - self.get_num_on_bits()
def get_bit(self, index):
"""Get the bit/count value at index ``index``.
Raises
------
BitsValueError
If the provided index is in a different bit scale.
"""
if index in self.counts:
return self.counts[index]
elif index >= 0 and index < self.fp_length:
return 0
else:
logger.exception("The provided index is in a different bit scale.")
raise BitsValueError("The provided index is in a different bit scale.")
def get_on_bits(self):
"""Get "on" bits.
Returns
-------
: :class:`numpy.ndarray`
"""
return np.array([k for (k, v) in self.counts.items() if v > 0])
def to_vector(self, compressed=True, dtype=DEFAULT_FP_DTYPE):
"""Convert this fingerprint to a vector of bits/counts.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_vector`.
Parameters
-------
compressed : bool
If True, build a compressed sparse matrix (scipy.sparse.csr_matrix).
dtype : data-type
The default value is np.int32.
Returns
-------
: :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Vector of bits/counts.
Return a compressed sparse matrix (`scipy.sparse.csr_matrix`) if ``compressed`` is True.
Otherwise, return a Numpy array (:class:`numpy.ndarray`)
Raises
------
BitsValueError
If some of the fingerprint indices are greater than the fingerprint length.
MemoryError
If the operation ran out of memory.
"""
data = [self.counts[i] for i in self.indices]
if compressed:
try:
row = np.zeros(self.bit_count)
col = self.indices
vector = csr_matrix((data, (row, col)), shape=(1, self.fp_length), dtype=dtype)
except ValueError as e:
logger.exception(e)
raise BitsValueError("Sparse matrix construction failed. Invalid indices or input data.")
else:
try:
# This function is causing a MemoryError exception when using a 2**32 vector.
vector = np.zeros(self.fp_length, dtype=dtype)
except MemoryError as e:
logger.exception(e)
raise MemoryError("Huge indices vector detected. An operation ran out of memory. "
"Use a different data type or apply a folding operation.")
try:
vector[self.indices] = data
except IndexError as e:
logger.exception(e)
raise BitsValueError("Some of the provided indices are greater than the fingerprint length.")
return vector
def to_bit_vector(self, compressed=True):
"""Convert this fingerprint to a vector of bits.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_bit_vector`.
Parameters
-------
compressed : bool
If True, build a compressed sparse matrix (scipy.sparse.csr_matrix).
Returns
-------
: :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Vector of bits/counts.
Return a compressed sparse matrix (`scipy.sparse.csr_matrix`) if ``compressed`` is True.
Otherwise, return a Numpy array (:class:`numpy.ndarray`)
Raises
------
BitsValueError
If some of the fingerprint indices are greater than the fingerprint length.
MemoryError
If the operation ran out of memory.
"""
return self.to_vector(compressed=compressed, dtype=np.bool_).astype(np.int8)
def to_bit_string(self):
"""Convert this fingerprint to a string of bits.
.. warning::
This function may raise a `MemoryError` exception when using huge indices vectors.
If you found this issue, you may want to try a different data type
or apply a folding operation before calling `to_bit_string`.
Returns
-------
: str
Raises
------
MemoryError
If the operation ran out of memory.
"""
try:
# This function is causing a MemoryError exception when using a 2**32 vector.
bit_vector = self.to_bit_vector(compressed=False).astype(np.int8)
return "".join(map(str, bit_vector))
except MemoryError as e:
logger.exception(e)
raise MemoryError("Huge indices vector detected. An operation ran out of memory. "
"Use a different data type or apply a folding operation.")
def to_rdkit(self, rdkit_fp_cls=None):
"""Convert this fingerprint to an RDKit fingerprint.
.. note::
If the fingerprint length exceeds the maximum RDKit fingerprint length (:math:`2^{31} - 1`),
this fingerprint will be folded to length :math:`2^{31} - 1` before conversion.
Returns
-------
: :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` or :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect`
If ``fp_length`` is less than :math:`1e5`, :class:`~rdkit.DataStructs.cDataStructs.ExplicitBitVect` is used.
Otherwise, :class:`~rdkit.DataStructs.cDataStructs.SparseBitVect` is used.
"""
if rdkit_fp_cls is None:
# Classes to store explicit bit vectors: ExplicitBitVect or SparseBitVect.
# ExplicitBitVect is most useful for situations where the size of the vector is
# relatively small (tens of thousands or smaller).
# For larger vectors, use the _SparseBitVect_ class instead.
if self.fp_length < 1e5:
rdkit_fp_cls = ExplicitBitVect
else:
rdkit_fp_cls = SparseBitVect
# RDKit data structure defines fingerprints as a std:set composed of ints (signed int).
# Since we always have values higher than 0 and since the data structure contains only signed ints,
# then the max length for a RDKit fingerprint is 2^31 - 1.
# C signed int (32 bit) ranges: [-2^31, 2^31-1].
max_rdkit_fp_length = 2**31 - 1
fp_length = self.fp_length
if max_rdkit_fp_length < fp_length:
logger.warning("The current fingerprint will be folded as its size is higher than the maximum "
"size accepted by RDKit, which is 2**31 - 1.")
fp_length = max_rdkit_fp_length
indices = self.indices % max_rdkit_fp_length
rdkit_fp = rdkit_fp_cls(fp_length)
rdkit_fp.SetBitsFromList(indices.tolist())
return rdkit_fp
def fold(self, new_length=DEFAULT_FOLDED_FP_LENGTH):
"""Fold this fingerprint to size ``new_length``.
Parameters
----------
new_length : int
Length of the new fingerprint, ideally multiple of 2. The default value is 4096.
Returns
-------
: `Fingerprint`
Folded `Fingerprint`.
Raises
------
BitsValueError
If the new fingerprint length is not a multiple of 2 or is greater than the existing fingerprint length.
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices = np.random.randint(0, fp_length, on_bits)
>>> print(indices)
[12 15 21 0 3 27 3 7]
>>> fp = Fingerprint.from_indices(indices, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 1 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
>>> folded_fp = fp.fold(8)
>>> print(folded_fp.indices)
[0 3 4 5 7]
>>> print(folded_fp.to_vector(compressed=False))
[1 0 0 1 1 1 0 1]
"""
if new_length > self.fp_length:
error_msg = ("The new fingerprint length must be smaller than the existing fingerprint length.")
logger.exception(error_msg)
raise BitsValueError(error_msg)
if not np.log2(self.fp_length / new_length).is_integer():
error_msg = ("It is not possible to fold the current fingerprint into the informed new length. "
"The current length divided by the new one is not a power of 2 number.")
logger.exception(error_msg)
raise BitsValueError(error_msg)
folded_indices = self.indices % new_length
unfolding_map = defaultdict(set)
for k, v in sorted(zip(folded_indices, self.indices)):
unfolding_map[k].add(v)
props = dict(self.props)
if "fp_length" in props:
props["fp_length"] = new_length
new_fp = self.__class__(indices=folded_indices, fp_length=new_length,
unfolded_fp=self, unfolding_map=unfolding_map, props=props)
return new_fp
def unfold(self):
"""Unfold this fingerprint and return its parent fingerprint.
Returns
-------
: `Fingerprint`
"""
return self.unfolded_fp
def union(self, other):
"""Return the union of indices of two fingerprints.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("The informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("The informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.union1d(self.indices, other.indices)
def intersection(self, other):
"""Return the intersection between indices of two fingerprints.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.intersect1d(self.indices, other.indices, assume_unique=True)
def difference(self, other):
"""Return indices in this fingerprint but not in ``other``.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.setdiff1d(self.indices, other.indices, assume_unique=True)
def symmetric_difference(self, other):
"""Return indices in either this fingerprint or ``other`` but not both.
Returns
-------
: :class:`numpy.ndarray`
Raises
------
InvalidFingerprintType
If the informed fingerprint is not an instance of `Fingerprint`.
BitsValueError
If the fingerprints have different lengths.
"""
if not isinstance(other, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (other.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (other.__class__))
if self.fp_length != other.fp_length:
raise BitsValueError("Fingerprints are in a different bit scale")
return np.setxor1d(self.indices, other.indices, assume_unique=True)
def calc_similarity(self, other):
"""Calculates the Tanimoto similarity between this fingeprint and ``other``.
Returns
-------
: float
Examples
--------
>>> from luna.interaction.fp.fingerprint import Fingerprint
>>> fp1 = Fingerprint.from_bit_string("0010101110000010")
>>> fp2 = Fingerprint.from_bit_string("1010100110010010")
>>> print(fp1.calc_similarity(fp2))
0.625
"""
return DataStructs.FingerprintSimilarity(self.to_rdkit(), other.to_rdkit())
def __repr__(self):
return ("<%s: indices=%s length=%d>" %
(self.__class__, repr(self.indices).replace('\n', '').replace(' ', ''), self.fp_length))
def __eq__(self, other):
if isinstance(other, Fingerprint):
return (self.__class__ == other.__class__
and self.fp_length == other.fp_length
and np.all(np.in1d(self.indices, other.indices, assume_unique=True)))
return False
def __ne__(self, other):
return not self.__eq__(other)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def __xor__(self, other):
return self.symmetric_difference(other)
class CountFingerprint(Fingerprint):
"""A fingerprint that stores the number of occurrences of each index.
Parameters
----------
indices : array_like of int, optional
Indices of "on" bits. It is optional if ``counts`` is provided.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
unfolded_fp : `Fingerprint` or None
The unfolded version of this fingerprint.
If None, this fingerprint may have not been folded yet.
unfolding_map : dict, optional
A mapping between current indices and indices from the unfolded version of this fingerprint
what makes it possible to trace folded bits back to the original shells (features).
props: dict, optional
Custom properties of the fingerprint, consisting of a string keyword and
some value. It can be used, for instance, to save the ligand name
and parameters used to generate shells (IFP features).
"""
def __init__(self, indices=None, counts=None, fp_length=DEFAULT_FP_LENGTH,
unfolded_fp=None, unfolding_map=None, props=None):
if indices is None and counts is None:
logger.exception("Indices or counts must be provided.")
raise IllegalArgumentError("Indices or counts must be provided.")
if indices is not None:
indices = np.asarray(indices, dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
if counts is None:
indices, counts = np.unique(indices, return_counts=True)
counts = dict(zip(indices, counts))
else:
indices = np.unique(indices)
if not np.all([x in indices for x in counts]):
logger.exception("At least one index from 'counts' is not in 'indices'.")
raise FingerprintCountsError("At least one index from 'counts' is not in 'indices'.")
if len(set(indices).symmetric_difference(counts)) > 0:
logger.exception("At least one index in 'indices' is not in 'counts'.")
raise FingerprintCountsError("At least one index in 'indices' is not in 'counts'.")
else:
indices = np.asarray(sorted(counts.keys()), dtype=np.long)
if np.any(np.logical_or(indices < 0, indices >= fp_length)):
logger.exception("Provided indices are in a different bit scale.")
raise BitsValueError("Provided indices are in a different bit scale.")
self._counts = counts
super().__init__(indices, fp_length, unfolded_fp, unfolding_map, props)
@classmethod
def from_indices(cls, indices=None, counts=None, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from an array of indices.
Parameters
----------
indices : array_like of int, optional
Indices of "on" bits. It is optional if ``counts`` is provided.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `CountFingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices, counts = np.unique(np.random.randint(0, fp_length, on_bits), return_counts=True)
>>> counts = dict(zip(indices, counts))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_indices(indices, counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(indices=indices, counts=counts, fp_length=fp_length, **kwargs)
@classmethod
def from_counts(cls, counts, fp_length=DEFAULT_FP_LENGTH, **kwargs):
"""Initialize from a counting map.
Parameters
----------
counts : dict
Mapping between each index in ``indices`` to the number of counts.
fp_length : int
The fingerprint length (total number of bits). The default value is :math:`2^{32}`.
**kwargs : dict, optional
Extra arguments to `CountFingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> counts = dict(zip(*np.unique(np.random.randint(0, fp_length, on_bits),
... return_counts=True)))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_counts(counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
"""
return cls(counts=counts, fp_length=fp_length, **kwargs)
@classmethod
def from_bit_string(cls, bit_string, counts=None, fp_length=None, **kwargs):
"""Initialize from a bit string (e.g. '0010100110').
Parameters
----------
bit_string : str
String of 0s and 1s.
counts : dict, optional
Mapping between each index in ``indices`` to the number of counts.
If not provided, the default count value of 1 will be used instead.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the string length.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> fp = CountFingerprint.from_bit_string("0010100110000010",
... counts={2: 5, 4: 1, 7: 3, 8: 1, 14: 2})
>>> print(fp.indices)
[ 2 4 7 8 14]
>>> print(fp.counts)
{2: 5, 4: 1, 7: 3, 8: 1, 14: 2}
"""
indices = [i for i, char in enumerate(bit_string) if char != '0']
if fp_length is None:
fp_length = len(bit_string)
return cls.from_indices(indices, counts, fp_length, **kwargs)
@classmethod
def from_vector(cls, vector, fp_length=None, **kwargs):
"""Initialize from a vector.
Parameters
----------
vector : :class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Array of counts.
fp_length : int, optional
The fingerprint length (total number of bits).
If not provided, the fingerprint length will be defined based on the ``vector`` shape.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> fp_length = 32
>>> vector = np.random.choice(5, size=(fp_length,), p=[0.76, 0.1, 0.1, 0.02, 0.02])
>>> print(vector)
[0 0 0 0 2 3 0 1 0 0 2 0 0 0 1 1 2 3 1 0 1 0 0 0 2 0 0 0 1 0 0 0]
>>> fp = CountFingerprint.from_vector(vector)
>>> print(fp.indices)
[ 4 5 7 10 14 15 16 17 18 20 24 28]
>>> print(fp.counts)
{4: 2, 5: 3, 7: 1, 10: 2, 14: 1, 15: 1, 16: 2, 17: 3, 18: 1, 20: 1, 24: 2, 28: 1}
"""
if fp_length is None:
try:
fp_length = vector.shape[1]
except IndexError:
fp_length = vector.shape[0]
if issparse(vector):
indices = vector.indices.astype(np.long)
counts = vector.data
else:
indices = np.asarray(np.where(vector), dtype=np.long).flatten()
counts = vector[indices]
counts = dict(zip(indices, counts))
return cls.from_indices(indices, counts, fp_length, **kwargs)
@classmethod
def from_fingerprint(cls, fp, **kwargs):
"""Initialize from an existing fingerprint.
Parameters
----------
fp : `Fingerprint`
An existing fingerprint.
**kwargs : dict, optional
Extra arguments to `Fingerprint`. Refer to the documentation for a
list of all possible arguments.
Returns
-------
: `CountFingerprint`
"""
if not isinstance(fp, Fingerprint):
logger.exception("Informed fingerprint is not an instance of %s." % (cls.__class__))
raise InvalidFingerprintType("Informed fingerprint is not an instance of %s." % (cls.__class__))
counts = dict([(i, c) for i, c in fp.counts.items() if c > 0])
unfolded_fp = fp.__class__.from_fingerprint(fp.unfolded_fp) if fp.unfolded_fp is not None else None
unfolding_map = dict(fp.unfolding_map)
props = dict(fp.props)
new_fp = cls.from_counts(counts, fp.fp_length, unfolded_fp=unfolded_fp,
unfolding_map=unfolding_map, props=props)
return new_fp
@property
def counts(self):
"""dict, read-only: Mapping between each index in ``indices`` to the number of counts."""
return self._counts
def get_count(self, index):
"""Get the count value at index ``index``. Return 0 if index is not in ``counts``."""
return self.counts.get(index, 0)
def fold(self, new_length=DEFAULT_FOLDED_FP_LENGTH):
"""Fold this fingerprint to size ``new_length``.
Parameters
----------
new_length : int
Length of the new fingerprint, ideally multiple of 2. The default value is 4096.
Returns
-------
: `Fingerprint`
Folded `Fingerprint`.
Raises
------
BitsValueError
If the new fingerprint length is not a multiple of 2 or is greater than the existing fingerprint length.
Examples
--------
>>> from luna.interaction.fp.fingerprint import CountFingerprint
>>> import numpy as np
>>> np.random.seed(0)
>>> on_bits = 8
>>> fp_length = 32
>>> indices, counts = np.unique(np.random.randint(0, fp_length, on_bits), return_counts=True)
>>> counts = dict(zip(indices, counts))
>>> print(counts)
{0: 1, 3: 2, 7: 1, 12: 1, 15: 1, 21: 1, 27: 1}
>>> fp = CountFingerprint.from_indices(indices, counts=counts, fp_length=fp_length)
>>> print(fp.indices)
[ 0 3 7 12 15 21 27]
>>> print(fp.to_vector(compressed=False))
[1 0 0 2 0 0 0 1 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0]
>>> folded_fp = fp.fold(8)
>>> print(folded_fp.indices)
[0 3 4 5 7]
>>> print(folded_fp.to_vector(compressed=False))
[1 0 0 3 1 1 0 2]
"""
new_fp = super().fold(new_length)
new_fp._counts = dict([(folded_idx, sum([self.get_count(x) for x in unfolded_set]))
for folded_idx, unfolded_set in new_fp.unfolding_map.items()])
return new_fp
def __repr__(self):
return ("<%s: counts={%s} length=%d>" %
(self.__class__, tuple([(k, v) for k, v in self.counts.items()]), self.fp_length))
def __eq__(self, other):
if isinstance(other, Fingerprint):
return (self.__class__ == other.__class__
and self.counts == other.counts
and self.fp_length == other.fp_length
and np.all(np.in1d(self.indices, other.indices, assume_unique=True)))
return False
|
[
"scipy.sparse.issparse",
"collections.defaultdict",
"luna.util.exceptions.IllegalArgumentError",
"luna.util.exceptions.InvalidFingerprintType",
"numpy.unique",
"luna.util.exceptions.BitsValueError",
"numpy.intersect1d",
"numpy.union1d",
"numpy.log2",
"numpy.asarray",
"luna.util.exceptions.FingerprintCountsError",
"scipy.sparse.csr_matrix",
"numpy.all",
"numpy.setdiff1d",
"numpy.setxor1d",
"numpy.zeros",
"numpy.where",
"numpy.logical_or",
"logging.getLogger",
"numpy.in1d"
] |
[((390, 409), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (407, 409), False, 'import logging\n'), ((1543, 1577), 'numpy.asarray', 'np.asarray', (['indices'], {'dtype': 'np.long'}), '(indices, dtype=np.long)\n', (1553, 1577), True, 'import numpy as np\n'), ((1835, 1853), 'numpy.unique', 'np.unique', (['indices'], {}), '(indices)\n', (1844, 1853), True, 'import numpy as np\n'), ((4669, 4685), 'scipy.sparse.issparse', 'issparse', (['vector'], {}), '(vector)\n', (4677, 4685), False, 'from scipy.sparse import issparse, csr_matrix\n'), ((21523, 21539), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (21534, 21539), False, 'from collections import defaultdict\n'), ((22930, 22969), 'numpy.union1d', 'np.union1d', (['self.indices', 'other.indices'], {}), '(self.indices, other.indices)\n', (22940, 22969), True, 'import numpy as np\n'), ((23773, 23836), 'numpy.intersect1d', 'np.intersect1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (23787, 23836), True, 'import numpy as np\n'), ((24634, 24695), 'numpy.setdiff1d', 'np.setdiff1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (24646, 24695), True, 'import numpy as np\n'), ((25515, 25575), 'numpy.setxor1d', 'np.setxor1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (25526, 25575), True, 'import numpy as np\n'), ((36072, 36088), 'scipy.sparse.issparse', 'issparse', (['vector'], {}), '(vector)\n', (36080, 36088), False, 'from scipy.sparse import issparse, csr_matrix\n'), ((1597, 1645), 'numpy.logical_or', 'np.logical_or', (['(indices < 0)', '(indices >= fp_length)'], {}), '(indices < 0, indices >= fp_length)\n', (1610, 1645), True, 'import numpy as np\n'), ((1745, 1809), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Provided indices are in a different bit scale."""'], {}), "('Provided indices are in a different bit scale.')\n", (1759, 1809), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((7691, 7783), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % cls.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n cls.__class__)\n", (7713, 7783), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((21062, 21087), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['error_msg'], {}), '(error_msg)\n', (21076, 21087), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((21420, 21445), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['error_msg'], {}), '(error_msg)\n', (21434, 21445), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((22692, 22790), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('The informed fingerprint is not an instance of %s.' % other.__class__)"], {}), "('The informed fingerprint is not an instance of %s.' %\n other.__class__)\n", (22714, 22790), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((22854, 22913), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Fingerprints are in a different bit scale"""'], {}), "('Fingerprints are in a different bit scale')\n", (22868, 22913), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((23539, 23633), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % other.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n other.__class__)\n", (23561, 23633), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((23697, 23756), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Fingerprints are in a different bit scale"""'], {}), "('Fingerprints are in a different bit scale')\n", (23711, 23756), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((24400, 24494), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % other.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n other.__class__)\n", (24422, 24494), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((24558, 24617), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Fingerprints are in a different bit scale"""'], {}), "('Fingerprints are in a different bit scale')\n", (24572, 24617), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((25281, 25375), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % other.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n other.__class__)\n", (25303, 25375), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((25439, 25498), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Fingerprints are in a different bit scale"""'], {}), "('Fingerprints are in a different bit scale')\n", (25453, 25498), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((28427, 28486), 'luna.util.exceptions.IllegalArgumentError', 'IllegalArgumentError', (['"""Indices or counts must be provided."""'], {}), "('Indices or counts must be provided.')\n", (28447, 28486), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((28542, 28576), 'numpy.asarray', 'np.asarray', (['indices'], {'dtype': 'np.long'}), '(indices, dtype=np.long)\n', (28552, 28576), True, 'import numpy as np\n'), ((37027, 37119), 'luna.util.exceptions.InvalidFingerprintType', 'InvalidFingerprintType', (["('Informed fingerprint is not an instance of %s.' % cls.__class__)"], {}), "('Informed fingerprint is not an instance of %s.' %\n cls.__class__)\n", (37049, 37119), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((12792, 12857), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""The provided index is in a different bit scale."""'], {}), "('The provided index is in a different bit scale.')\n", (12806, 12857), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((14317, 14341), 'numpy.zeros', 'np.zeros', (['self.bit_count'], {}), '(self.bit_count)\n', (14325, 14341), True, 'import numpy as np\n'), ((14402, 14472), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, (row, col))'], {'shape': '(1, self.fp_length)', 'dtype': 'dtype'}), '((data, (row, col)), shape=(1, self.fp_length), dtype=dtype)\n', (14412, 14472), False, 'from scipy.sparse import issparse, csr_matrix\n'), ((14801, 14838), 'numpy.zeros', 'np.zeros', (['self.fp_length'], {'dtype': 'dtype'}), '(self.fp_length, dtype=dtype)\n', (14809, 14838), True, 'import numpy as np\n'), ((28600, 28648), 'numpy.logical_or', 'np.logical_or', (['(indices < 0)', '(indices >= fp_length)'], {}), '(indices < 0, indices >= fp_length)\n', (28613, 28648), True, 'import numpy as np\n'), ((28756, 28820), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Provided indices are in a different bit scale."""'], {}), "('Provided indices are in a different bit scale.')\n", (28770, 28820), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((28887, 28925), 'numpy.unique', 'np.unique', (['indices'], {'return_counts': '(True)'}), '(indices, return_counts=True)\n', (28896, 28925), True, 'import numpy as np\n'), ((29022, 29040), 'numpy.unique', 'np.unique', (['indices'], {}), '(indices)\n', (29031, 29040), True, 'import numpy as np\n'), ((29679, 29727), 'numpy.logical_or', 'np.logical_or', (['(indices < 0)', '(indices >= fp_length)'], {}), '(indices < 0, indices >= fp_length)\n', (29692, 29727), True, 'import numpy as np\n'), ((29835, 29899), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Provided indices are in a different bit scale."""'], {}), "('Provided indices are in a different bit scale.')\n", (29849, 29899), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((14567, 14655), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Sparse matrix construction failed. Invalid indices or input data."""'], {}), "(\n 'Sparse matrix construction failed. Invalid indices or input data.')\n", (14581, 14655), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((15260, 15352), 'luna.util.exceptions.BitsValueError', 'BitsValueError', (['"""Some of the provided indices are greater than the fingerprint length."""'], {}), "(\n 'Some of the provided indices are greater than the fingerprint length.')\n", (15274, 15352), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((21104, 21140), 'numpy.log2', 'np.log2', (['(self.fp_length / new_length)'], {}), '(self.fp_length / new_length)\n', (21111, 21140), True, 'import numpy as np\n'), ((26532, 26588), 'numpy.in1d', 'np.in1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (26539, 26588), True, 'import numpy as np\n'), ((29064, 29104), 'numpy.all', 'np.all', (['[(x in indices) for x in counts]'], {}), '([(x in indices) for x in counts])\n', (29070, 29104), True, 'import numpy as np\n'), ((29224, 29303), 'luna.util.exceptions.FingerprintCountsError', 'FingerprintCountsError', (['"""At least one index from \'counts\' is not in \'indices\'."""'], {}), '("At least one index from \'counts\' is not in \'indices\'.")\n', (29246, 29303), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((29493, 29570), 'luna.util.exceptions.FingerprintCountsError', 'FingerprintCountsError', (['"""At least one index in \'indices\' is not in \'counts\'."""'], {}), '("At least one index in \'indices\' is not in \'counts\'.")\n', (29515, 29570), False, 'from luna.util.exceptions import BitsValueError, InvalidFingerprintType, IllegalArgumentError, FingerprintCountsError\n'), ((40028, 40084), 'numpy.in1d', 'np.in1d', (['self.indices', 'other.indices'], {'assume_unique': '(True)'}), '(self.indices, other.indices, assume_unique=True)\n', (40035, 40084), True, 'import numpy as np\n'), ((4787, 4803), 'numpy.where', 'np.where', (['vector'], {}), '(vector)\n', (4795, 4803), True, 'import numpy as np\n'), ((36223, 36239), 'numpy.where', 'np.where', (['vector'], {}), '(vector)\n', (36231, 36239), True, 'import numpy as np\n')]
|
from pages.driver import Driver
from pages.login import LoginPage
from pages.addNewDevice import AddNewDevice
from pages.devicesvc import DeviceService
from pages.appsvc import AppService
from pages.scheduler import Scheduler
from pages.notification import Notification
from pages.config import Config
import time
if __name__ == '__main__':
print('EdgeX GUI AUTO TESTING Starting...')
# driver = Driver()
# time.sleep(2)
lp = LoginPage()
lp.login()
time.sleep(2)
ad = AddNewDevice(lp.getDriver())
ad.addNewDevice()
# time.sleep(1)
# updateSvc = DeviceService(lp.getDriver())
# updateSvc.updateSvc()
# time.sleep(1)
# ap = AppService(lp.getDriver())
# ap.appSvcUpddate()
# time.sleep(1)
# sc = Scheduler(lp.getDriver())
# sc.addIntervalAndAction()
# time.sleep(1)
# noti = Notification(lp.getDriver())
# noti.addSub()
time.sleep(2)
lp.getDriver().quit()
|
[
"pages.login.LoginPage",
"time.sleep"
] |
[((445, 456), 'pages.login.LoginPage', 'LoginPage', ([], {}), '()\n', (454, 456), False, 'from pages.login import LoginPage\n'), ((477, 490), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (487, 490), False, 'import time\n'), ((910, 923), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (920, 923), False, 'import time\n')]
|
import torch.optim as optim
from sklearn.metrics import roc_auc_score, f1_score, jaccard_score
from model_plus import createDeepLabv3Plus
import sys
print(sys.version, sys.platform, sys.executable)
from trainer_plus import train_model
import datahandler_plus
import argparse
import os
import torch
import numpy
torch.cuda.empty_cache()
"""
Version requirements:
PyTorch Version: 1.2.0
Torchvision Version: 0.4.0a0+6b959ee
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-data_directory", help='Specify the dataset directory path')
parser.add_argument(
"-exp_directory", help='Specify the experiment directory where metrics and model weights shall be stored.')
parser.add_argument("--epochs", default=10, type=int)
parser.add_argument("--batchsize", default=2, type=int)
parser.add_argument("--output_stride", default=8, type=int)
parser.add_argument("--channels", default=4, type=int)
parser.add_argument("--pretrained", default='')
parser.add_argument("--class_weights", nargs='+', default=None)
parser.add_argument("--folder_structure", default='sep', help='sep or single')
args = parser.parse_args()
bpath = args.exp_directory
print('Export Directory: ' + bpath)
data_dir = args.data_directory
print('Data Directory: ' + data_dir)
epochs = args.epochs
print('Epochs: ' + str(epochs))
batchsize = args.batchsize
print('Batch size: ' + str(batchsize))
output_stride = args.output_stride
channels = args.channels
print('Number of classes: ' + str(channels))
class_weights = args.class_weights
print('Class weights: ' + str(class_weights))
folder_structure = args.folder_structure
print('folder structure: ' + folder_structure)
model_path = args.pretrained
print('loading pre-trained model from saved state: ' + model_path)
if not os.path.exists(bpath): # if it doesn't exist already
os.makedirs(bpath)
# Create the deeplabv3 resnet101 model which is pretrained on a subset of COCO train2017,
# on the 20 categories that are present in the Pascal VOC dataset.
if model_path != '':
try:
model = torch.load(model_path)
print('LOADED MODEL')
model.train()
except:
print('model path did not load')
model = createDeepLabv3Plus(outputchannels=channels, output_stride=output_stride)
else:
model = createDeepLabv3Plus(outputchannels=channels, output_stride=output_stride)
model.train()
# Specify the loss function
if class_weights == None:
print('class not weighted')
criterion = torch.nn.CrossEntropyLoss()
elif class_weights != None and len(class_weights) == channels:
print('class weighted')
class_weights = numpy.array(class_weights).astype(float)
torch_class_weights = torch.FloatTensor(class_weights).cuda()
criterion = torch.nn.CrossEntropyLoss(weight=torch_class_weights)
else:
print('channels did not allign with class weights - default applied')
print('class not weighted')
criterion = torch.nn.CrossEntropyLoss()
# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
# Specify the evalutation metrics
metrics = {'f1_score': f1_score, 'jaccard_score': jaccard_score}
# Create the dataloader
if folder_structure == 'sep':
dataloaders = datahandler_plus.get_dataloader_sep_folder(data_dir, batch_size=batchsize)
else:
dataloaders = datahandler_plus.get_dataloader_single_folder(data_dir, batch_size=batchsize)
trained_model = train_model(model, criterion, dataloaders,
optimizer, bpath=bpath, metrics=metrics, num_epochs=epochs)
# Save the trained model
# torch.save({'model_state_dict':trained_model.state_dict()},os.path.join(bpath,'weights'))
torch.save(model, os.path.join(bpath, 'weights.pt'))
|
[
"trainer_plus.train_model",
"os.makedirs",
"argparse.ArgumentParser",
"datahandler_plus.get_dataloader_single_folder",
"torch.load",
"os.path.exists",
"model_plus.createDeepLabv3Plus",
"torch.nn.CrossEntropyLoss",
"torch.FloatTensor",
"numpy.array",
"torch.cuda.empty_cache",
"datahandler_plus.get_dataloader_sep_folder",
"os.path.join"
] |
[((312, 336), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (334, 336), False, 'import torch\n'), ((460, 485), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (483, 485), False, 'import argparse\n'), ((3450, 3557), 'trainer_plus.train_model', 'train_model', (['model', 'criterion', 'dataloaders', 'optimizer'], {'bpath': 'bpath', 'metrics': 'metrics', 'num_epochs': 'epochs'}), '(model, criterion, dataloaders, optimizer, bpath=bpath, metrics=\n metrics, num_epochs=epochs)\n', (3461, 3557), False, 'from trainer_plus import train_model\n'), ((1778, 1799), 'os.path.exists', 'os.path.exists', (['bpath'], {}), '(bpath)\n', (1792, 1799), False, 'import os\n'), ((1835, 1853), 'os.makedirs', 'os.makedirs', (['bpath'], {}), '(bpath)\n', (1846, 1853), False, 'import os\n'), ((2296, 2369), 'model_plus.createDeepLabv3Plus', 'createDeepLabv3Plus', ([], {'outputchannels': 'channels', 'output_stride': 'output_stride'}), '(outputchannels=channels, output_stride=output_stride)\n', (2315, 2369), False, 'from model_plus import createDeepLabv3Plus\n'), ((2492, 2519), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2517, 2519), False, 'import torch\n'), ((3252, 3326), 'datahandler_plus.get_dataloader_sep_folder', 'datahandler_plus.get_dataloader_sep_folder', (['data_dir'], {'batch_size': 'batchsize'}), '(data_dir, batch_size=batchsize)\n', (3294, 3326), False, 'import datahandler_plus\n'), ((3351, 3428), 'datahandler_plus.get_dataloader_single_folder', 'datahandler_plus.get_dataloader_single_folder', (['data_dir'], {'batch_size': 'batchsize'}), '(data_dir, batch_size=batchsize)\n', (3396, 3428), False, 'import datahandler_plus\n'), ((3717, 3750), 'os.path.join', 'os.path.join', (['bpath', '"""weights.pt"""'], {}), "(bpath, 'weights.pt')\n", (3729, 3750), False, 'import os\n'), ((2060, 2082), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (2070, 2082), False, 'import torch\n'), ((2755, 2808), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'weight': 'torch_class_weights'}), '(weight=torch_class_weights)\n', (2780, 2808), False, 'import torch\n'), ((2937, 2964), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2962, 2964), False, 'import torch\n'), ((2204, 2277), 'model_plus.createDeepLabv3Plus', 'createDeepLabv3Plus', ([], {'outputchannels': 'channels', 'output_stride': 'output_stride'}), '(outputchannels=channels, output_stride=output_stride)\n', (2223, 2277), False, 'from model_plus import createDeepLabv3Plus\n'), ((2632, 2658), 'numpy.array', 'numpy.array', (['class_weights'], {}), '(class_weights)\n', (2643, 2658), False, 'import numpy\n'), ((2699, 2731), 'torch.FloatTensor', 'torch.FloatTensor', (['class_weights'], {}), '(class_weights)\n', (2716, 2731), False, 'import torch\n')]
|
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import TYPE_CHECKING, Tuple
from feishu.exception import LarkInvalidArguments, OpenLarkException
if TYPE_CHECKING:
from feishu.api import OpenLark
# https://open.feishu.cn/document/ukTMukTMukTM/uIzMxEjLyMTMx4iMzETM
class APIIDMixin(object):
def email_to_id(self, email):
"""邮箱转 open_id 和 user_id
:type self: OpenLark
:param email: 用户的邮箱
:type email: str
:return: open_id, user_id
:rtype: Tuple[str, str]
根据用户邮箱获取用户 open_id 和 user_id。
user_id 需要申请 user_id 的权限才能获取到
https://open.feishu.cn/document/ukTMukTMukTM/uEDMwUjLxADM14SMwATN
"""
url = self._gen_request_url('/open-apis/user/v3/email2id')
body = {'email': email}
res = self._post(url, body, with_tenant_token=True)
open_id = res.get('open_id', '') # type: str
user_id = res.get('employee_id', '') # type: str
return open_id, user_id
def open_id_to_user_id(self, open_id):
"""open_id 转 user_id
:type self: OpenLark
:param open_id: open_id
:type open_id: str
:return: user_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/openid2uid/')
body = {'open_id': open_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('user_id')
def user_id_to_open_id(self, user_id):
"""user_id 转 open_id
:type self: OpenLark
:param user_id: user_id
:type user_id: str
:return: open_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/uid2openid/')
body = {'user_id': user_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('open_id')
def employee_id_to_user_id(self, employee_id):
"""employee_id 转 user_id
:type self: OpenLark
:param employee_id: employee_id
:type employee_id: str
:return: user_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/eid2uid/')
body = {'employee_id': employee_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('user_id')
def user_id_to_employee_id(self, user_id):
"""user_id 转 employee_id
:type self: OpenLark
:param user_id: user_id
:type user_id: str
:return: employee_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/uid2eid/')
body = {'user_id': user_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('employee_id')
def chat_id_to_open_chat_id(self, chat_id):
"""chat_id 转 open_chat_id
:type self: OpenLark
:param chat_id: chat_id
:type chat_id: str
:return: open_chat_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/cid2ocid/')
body = {'chat_id': chat_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('open_chat_id')
def open_chat_id_to_chat_id(self, open_chat_id):
"""open_chat_id 转 chat_id
:type self: OpenLark
:param open_chat_id: open_chat_id
:type open_chat_id: str
:return: chat_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/ocid2cid/')
body = {'open_chat_id': open_chat_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('chat_id')
def message_id_to_open_message_id(self, message_id):
"""message_id 转 open_message_id
:type self: OpenLark
:param message_id: message_id
:type message_id: str
:return: open_message_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/mid2omid/')
body = {'message_id': message_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('open_message_id')
def open_message_id_to_message_id(self, open_message_id):
"""open_message_id 转 message_id
:type self: OpenLark
:param open_message_id: open_message_id
:type open_message_id: str
:return: message_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/omid2mid/')
body = {'open_message_id': open_message_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('message_id')
def department_id_to_open_department_id(self, department_id):
"""department_id 转 open_department_id
:type self: OpenLark
:param department_id: department_id
:type department_id: str
:return: open_department_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/did2odid/')
body = {'department_id': department_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('open_department_id')
def open_department_id_to_department_id(self, open_department_id):
"""open_department_id 转 department_id
:type self: OpenLark
:param open_department_id: open_department_id
:type open_department_id: str
:return: department_id
:rtype: str
"""
url = self._gen_request_url('/open-apis/exchange/v3/odid2did/')
body = {'open_department_id': open_department_id}
res = self._post(url, body, with_tenant_token=True)
return res.get('department_id')
def get_chat_id_between_user_bot(self, open_id='', user_id=''):
"""获取机器人和用户的 chat_id
:type self: OpenLark
:param open_id: open_id
:type open_id: str
:param user_id: user_id
:return: open_chat_id, chat_id
:rtype: Tuple[str, str]
https://lark-open.bytedance.net/document/ukTMukTMukTM/uYjMxEjL2ITMx4iNyETM
"""
if open_id:
url = self._gen_request_url('/open-apis/chat/v3/p2p/id?open_id={}'.format(open_id))
elif user_id:
url = self._gen_request_url('/open-apis/chat/v3/p2p/id?user_id={}'.format(user_id))
else:
raise OpenLarkException(msg='[get_chat_id_between_user_bot] empty open_id and user_id')
res = self._get(url, with_tenant_token=True)
open_chat_id = res.get('open_chat_id', '') # type: str
chat_id = res.get('chat_id', '') # type: str
return open_chat_id, chat_id
def get_chat_id_between_users(self, to_user_id,
open_id='',
user_id=''):
"""获取用户和用户的之前的 chat_id
:type self: OpenLark
:param to_user_id: 到谁的 open_id
:type to_user_id: str
:param open_id: 从谁来的 open_id
:type open_id: str
:param user_id: 从谁来的 user_id
:type user_id: str
:return: 两个人之间的 open_chat_id, chat_id
:rtype: Tuple[str, str]
仅头条内部用户可用 需要申请权限才能获取 @fanlv
open_id 和 user_id 传一个就行
https://lark-open.bytedance.net/document/ukTMukTMukTM/uYjMxEjL2ITMx4iNyETM
"""
if open_id:
url = self._gen_request_url('/open-apis/chat/v3/p2p/id?open_id={}&chatter={}'.format(open_id, to_user_id))
elif user_id:
url = self._gen_request_url('/open-apis/chat/v3/p2p/id?user_id={}&chatter={}'.format(user_id, to_user_id))
else:
raise LarkInvalidArguments(msg='[get_chat_id_between_users] empty open_id and user_id')
res = self._get(url, with_tenant_token=True)
open_chat_id = res.get('open_chat_id', '') # type: str
chat_id = res.get('chat_id', '') # type: str
return open_chat_id, chat_id
|
[
"feishu.exception.LarkInvalidArguments",
"feishu.exception.OpenLarkException"
] |
[((6357, 6443), 'feishu.exception.OpenLarkException', 'OpenLarkException', ([], {'msg': '"""[get_chat_id_between_user_bot] empty open_id and user_id"""'}), "(msg=\n '[get_chat_id_between_user_bot] empty open_id and user_id')\n", (6374, 6443), False, 'from feishu.exception import LarkInvalidArguments, OpenLarkException\n'), ((7609, 7695), 'feishu.exception.LarkInvalidArguments', 'LarkInvalidArguments', ([], {'msg': '"""[get_chat_id_between_users] empty open_id and user_id"""'}), "(msg=\n '[get_chat_id_between_users] empty open_id and user_id')\n", (7629, 7695), False, 'from feishu.exception import LarkInvalidArguments, OpenLarkException\n')]
|
"""
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. currentmodule:: scipy.linalg
Linear algebra functions.
.. seealso::
`numpy.linalg` for more linear algebra functions. Note that
although `scipy.linalg` imports most of them, identically named
functions from `scipy.linalg` may offer more or slightly differing
functionality.
Basics
======
.. autosummary::
:toctree: generated/
inv - Find the inverse of a square matrix
solve - Solve a linear system of equations
solve_banded - Solve a banded linear system
solveh_banded - Solve a Hermitian or symmetric banded system
solve_circulant - Solve a circulant system
solve_triangular - Solve a triangular matrix
solve_toeplitz - Solve a toeplitz matrix
det - Find the determinant of a square matrix
norm - Matrix and vector norm
lstsq - Solve a linear least-squares problem
pinv - Pseudo-inverse (Moore-Penrose) using lstsq
pinv2 - Pseudo-inverse using svd
pinvh - Pseudo-inverse of hermitian matrix
kron - Kronecker product of two arrays
tril - Construct a lower-triangular matrix from a given matrix
triu - Construct an upper-triangular matrix from a given matrix
orthogonal_procrustes - Solve an orthogonal Procrustes problem
LinAlgError
Eigenvalue Problems
===================
.. autosummary::
:toctree: generated/
eig - Find the eigenvalues and eigenvectors of a square matrix
eigvals - Find just the eigenvalues of a square matrix
eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
eigvals_banded - Find just the eigenvalues of a banded matrix
Decompositions
==============
.. autosummary::
:toctree: generated/
lu - LU decomposition of a matrix
lu_factor - LU decomposition returning unordered matrix and pivots
lu_solve - Solve Ax=b using back substitution with output of lu_factor
svd - Singular value decomposition of a matrix
svdvals - Singular values of a matrix
diagsvd - Construct matrix of singular values from output of svd
orth - Construct orthonormal basis for the range of A using svd
cholesky - Cholesky decomposition of a matrix
cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
cho_factor - Cholesky decomposition for use in solving a linear system
cho_solve - Solve previously factored linear system
cho_solve_banded - Solve previously factored banded linear system
polar - Compute the polar decomposition.
qr - QR decomposition of a matrix
qr_multiply - QR decomposition and multiplication by Q
qr_update - Rank k QR update
qr_delete - QR downdate on row or column deletion
qr_insert - QR update on row or column insertion
rq - RQ decomposition of a matrix
qz - QZ decomposition of a pair of matrices
ordqz - QZ decomposition of a pair of matrices with reordering
schur - Schur decomposition of a matrix
rsf2csf - Real to complex Schur form
hessenberg - Hessenberg form of a matrix
.. seealso::
`scipy.linalg.interpolative` -- Interpolative matrix decompositions
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm - Matrix exponential
logm - Matrix logarithm
cosm - Matrix cosine
sinm - Matrix sine
tanm - Matrix tangent
coshm - Matrix hyperbolic cosine
sinhm - Matrix hyperbolic sine
tanhm - Matrix hyperbolic tangent
signm - Matrix sign
sqrtm - Matrix square root
funm - Evaluating an arbitrary matrix function
expm_frechet - Frechet derivative of the matrix exponential
expm_cond - Relative condition number of expm in the Frobenius norm
fractional_matrix_power - Fractional matrix power
Matrix Equation Solvers
=======================
.. autosummary::
:toctree: generated/
solve_sylvester - Solve the Sylvester matrix equation
solve_continuous_are - Solve the continuous-time algebraic Riccati equation
solve_discrete_are - Solve the discrete-time algebraic Riccati equation
solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
solve_lyapunov - Solve the (continous-time) Lyapunov equation
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag - Construct a block diagonal matrix from submatrices
circulant - Circulant matrix
companion - Companion matrix
dft - Discrete Fourier transform matrix
hadamard - Hadamard matrix of order 2**n
hankel - Hankel matrix
helmert - Helmert matrix
hilbert - Hilbert matrix
invhilbert - Inverse Hilbert matrix
leslie - Leslie matrix
pascal - Pascal matrix
invpascal - Inverse Pascal matrix
toeplitz - Toeplitz matrix
tri - Construct a matrix filled with ones at and below a given diagonal
Low-level routines
==================
.. autosummary::
:toctree: generated/
get_blas_funcs
get_lapack_funcs
find_best_blas_type
.. seealso::
`scipy.linalg.blas` -- Low-level BLAS functions
`scipy.linalg.lapack` -- Low-level LAPACK functions
`scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
`scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
"""
from __future__ import division, print_function, absolute_import
from .linalg_version import linalg_version as __version__
from .misc import *
from .basic import *
from .decomp import *
from .decomp_lu import *
from .decomp_cholesky import *
from .decomp_qr import *
from ._decomp_qz import *
from .decomp_svd import *
from .decomp_schur import *
from ._decomp_polar import *
from .matfuncs import *
from .blas import *
from .lapack import *
from .special_matrices import *
from ._solvers import *
from ._procrustes import *
from ._decomp_update import *
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals',
'eigvalsh', 'lstsq', 'cholesky']:
try:
register_func(k, eval(k))
except ValueError:
pass
try:
register_func('pinv', pinv2)
except ValueError:
pass
del k, register_func
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
|
[
"numpy.dual.register_func",
"numpy.testing.Tester"
] |
[((6396, 6424), 'numpy.dual.register_func', 'register_func', (['"""pinv"""', 'pinv2'], {}), "('pinv', pinv2)\n", (6409, 6424), False, 'from numpy.dual import register_func\n'), ((6523, 6531), 'numpy.testing.Tester', 'Tester', ([], {}), '()\n', (6529, 6531), False, 'from numpy.testing import Tester\n'), ((6546, 6554), 'numpy.testing.Tester', 'Tester', ([], {}), '()\n', (6552, 6554), False, 'from numpy.testing import Tester\n')]
|
from dependency_injector.providers import Singleton
def singleton_provider(obj):
def clb():
return obj
return Singleton(clb)
|
[
"dependency_injector.providers.Singleton"
] |
[((129, 143), 'dependency_injector.providers.Singleton', 'Singleton', (['clb'], {}), '(clb)\n', (138, 143), False, 'from dependency_injector.providers import Singleton\n')]
|
import math
import itertools
class Solution:
def minimumIncompatibility(self, nums: List[int], k: int) -> int:
n = len(nums)
if k == n:
return 0
dp = [[math.inf] * n for _ in range(1 << n)]
nums.sort()
for i in range(n):
dp[1<<i][i] = 0
for mask in range(1<<n):
n_z_bits = [j for j in range(n) if mask & (1 << j)]
if len(n_z_bits) % (n // k) == 1:
for j, l in itertools.permutations(n_z_bits, 2):
dp[mask][l] = min(dp[mask][l], dp[mask ^ (1 << l)][j])
else:
for j, l in itertools.combinations(n_z_bits, 2):
if nums[j] != nums[l]:
dp[mask][j] = min(dp[mask][j], dp[mask ^ (1 << j)][l] + nums[l] - nums[j])
return min(dp[-1]) if min(dp[-1]) != math.inf else -1
|
[
"itertools.combinations",
"itertools.permutations"
] |
[((478, 513), 'itertools.permutations', 'itertools.permutations', (['n_z_bits', '(2)'], {}), '(n_z_bits, 2)\n', (500, 513), False, 'import itertools\n'), ((636, 671), 'itertools.combinations', 'itertools.combinations', (['n_z_bits', '(2)'], {}), '(n_z_bits, 2)\n', (658, 671), False, 'import itertools\n')]
|
import sys
from typing import List
from unittest.mock import patch
import pytest
from poetry_pdf.cli import parse_cli
from poetry_pdf.exceptions import InvalidCommand, InvalidSourcePath
@pytest.mark.parametrize(
"argv",
[
["poetry-pdf", "tests/fixtures/the_raven.txt"],
[
"poetry-pdf",
"tests/fixtures/the_raven.txt",
"--output-dir",
".",
],
[
"poetry-pdf",
"tests/fixtures/the_raven.txt",
"--author",
"<NAME>",
],
],
)
def test_parse_cli_valid_command(argv: List[str]) -> None:
with patch.object(sys, "argv", argv):
parse_cli()
def test_parse_cli_invalid_command() -> None:
argv = ["poetry-pdf", "tests/fixtures/the_raven.txt", "123"]
with patch.object(sys, "argv", argv), pytest.raises(
InvalidCommand
):
parse_cli()
def test_parse_cli_invalid_source() -> None:
argv = ["poetry-pdf", "tests/fixtures/not_the_raven.txt"]
with patch.object(sys, "argv", argv), pytest.raises(
InvalidSourcePath
):
parse_cli()
def test_parse_multiple_stylesheets() -> None:
argv = [
"poetry-pdf",
"tests/fixtures/the_raven.txt",
"--stylesheet",
"sheet1",
"--stylesheet",
"sheet2",
]
with patch.object(sys, "argv", argv):
stylesheets = parse_cli()[3]
assert stylesheets == ["sheet1", "sheet2"]
|
[
"poetry_pdf.cli.parse_cli",
"pytest.mark.parametrize",
"pytest.raises",
"unittest.mock.patch.object"
] |
[((190, 422), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""argv"""', "[['poetry-pdf', 'tests/fixtures/the_raven.txt'], ['poetry-pdf',\n 'tests/fixtures/the_raven.txt', '--output-dir', '.'], ['poetry-pdf',\n 'tests/fixtures/the_raven.txt', '--author', '<NAME>']]"], {}), "('argv', [['poetry-pdf',\n 'tests/fixtures/the_raven.txt'], ['poetry-pdf',\n 'tests/fixtures/the_raven.txt', '--output-dir', '.'], ['poetry-pdf',\n 'tests/fixtures/the_raven.txt', '--author', '<NAME>']])\n", (213, 422), False, 'import pytest\n'), ((639, 670), 'unittest.mock.patch.object', 'patch.object', (['sys', '"""argv"""', 'argv'], {}), "(sys, 'argv', argv)\n", (651, 670), False, 'from unittest.mock import patch\n'), ((680, 691), 'poetry_pdf.cli.parse_cli', 'parse_cli', ([], {}), '()\n', (689, 691), False, 'from poetry_pdf.cli import parse_cli\n'), ((814, 845), 'unittest.mock.patch.object', 'patch.object', (['sys', '"""argv"""', 'argv'], {}), "(sys, 'argv', argv)\n", (826, 845), False, 'from unittest.mock import patch\n'), ((847, 876), 'pytest.raises', 'pytest.raises', (['InvalidCommand'], {}), '(InvalidCommand)\n', (860, 876), False, 'import pytest\n'), ((900, 911), 'poetry_pdf.cli.parse_cli', 'parse_cli', ([], {}), '()\n', (909, 911), False, 'from poetry_pdf.cli import parse_cli\n'), ((1030, 1061), 'unittest.mock.patch.object', 'patch.object', (['sys', '"""argv"""', 'argv'], {}), "(sys, 'argv', argv)\n", (1042, 1061), False, 'from unittest.mock import patch\n'), ((1063, 1095), 'pytest.raises', 'pytest.raises', (['InvalidSourcePath'], {}), '(InvalidSourcePath)\n', (1076, 1095), False, 'import pytest\n'), ((1119, 1130), 'poetry_pdf.cli.parse_cli', 'parse_cli', ([], {}), '()\n', (1128, 1130), False, 'from poetry_pdf.cli import parse_cli\n'), ((1354, 1385), 'unittest.mock.patch.object', 'patch.object', (['sys', '"""argv"""', 'argv'], {}), "(sys, 'argv', argv)\n", (1366, 1385), False, 'from unittest.mock import patch\n'), ((1409, 1420), 'poetry_pdf.cli.parse_cli', 'parse_cli', ([], {}), '()\n', (1418, 1420), False, 'from poetry_pdf.cli import parse_cli\n')]
|
import sys
import shutil
import json
import subprocess
from typing import Union
from pathlib import Path
from jinja2 import Template
class UserModel():
"""Handles user-defined model described in python scripts.
"""
def __init__(self):
self.parent = Path(__file__).resolve().parent
self.dst = self.parent / "config/user_model/"
self.model_json = self.dst / "user_model.json"
self.user_model_key = "user_model"
self.current_model_key = "current_model"
self.models = []
self.import_models()
def register_model(self, src: str):
"""Register a specified model as new one.
Args:
src (str): The file path to register.
"""
src_abs = Path(src).resolve()
self.validate_model(src_abs)
shutil.copy(src_abs, self.dst)
self.set_current_model(src_abs)
self.update_json()
def delete_model(self, src: str):
"""Delete python file corresponding to model in the dst dir.
Args:
src (str): File name of python to delete
"""
path = self.parent / src
if path.exists():
path.unlink(src)
self.update_json()
def validate_model(self, path: Path) -> bool:
if path.suffix != ".py":
print(
"\033[31mThe file you're trying to add have no suffix 'py'\033[0m",
file=sys.stderr)
sys.exit(-1)
cmd = f"python {path}"
ret = subprocess.check_output(cmd.split()).decode("utf-8").strip("\n")
try:
f = float(ret)
int(f)
return True
except ValueError:
print(
"\033[31mThe type of return value isn't int or float\033[0m",
file=sys.stderr)
sys.exit(-1)
def update_json(self):
"""Update model json
Json consists of key and value. In case of "test.py" model, key and
value are as follows respectively.
test : test.py
"""
files = list(self.dst.glob("*.py"))
contens = {}
tmp = []
for i in files:
tmp.append(str(i.stem))
contens[self.user_model_key] = tmp
# load the current model in json
current = self.get_current_model()
if current:
contens["current_model"] = str(Path(current).stem)
else:
contens["current_model"] = ""
# write the registered models and current model into json
with open(str(self.model_json), "w") as f:
json.dump(contens, f, indent=4)
def set_current_model(self, model: Union[Path, str]):
if type(model) == str:
pass
else:
model = str(model.stem)
if self.model_json.exists():
with open(str(self.model_json), "r") as f:
j = json.load(f)
j[self.current_model_key] = model
with open(str(self.model_json), "w") as f:
json.dump(j, f, indent=4)
def remove_current_model(self):
if self.model_json.exists():
with open(str(self.model_json), "r") as f:
j = json.load(f)
j[self.current_model_key] = ""
with open(str(self.model_json), "w") as f:
json.dump(j, f, indent=4)
def import_models(self) -> list:
if self.model_json.exists():
with open(str(self.model_json), "r") as f:
j = json.load(f)
self.models = j[self.user_model_key]
return self.models
return []
def get_value(self):
f = self.get_current_model()
return self.execute(f)
def get_current_model(self) -> str:
"""Gets a file name corresponding to the model.
Returns:
str: The file name in the format of abs path.
"""
if self.model_json.exists():
with open(str(self.model_json), "r") as f:
j = json.load(f)
if self.current_model_key in j:
current_model = j[self.current_model_key]
if current_model:
self.py_file = self.parent / current_model
return str(self.py_file)
return None
def execute(self, pyfile: str):
"""Execute model file.
Args:
pyfile (str): The model file.
"""
if Path(pyfile).suffix != ".py":
pyfile += ".py"
cmd = f"python {pyfile}"
ret = subprocess.check_output(cmd.split()).decode("utf-8").strip("\n")
return ret
class JinjaTemplate():
def __init__(self, data: dict):
self.data = data
self.parent = Path(__file__).resolve().parent
self.model_prop = self.parent / "config/user_model/model_prop.json"
self.body = self.parent / "templates/jinja/body.html"
self.dst = self.parent / "templates/user_model.html"
self.org = self.parent / "templates/user_model.html.org"
def parse_dict(self):
dct = {}
colors = ["red", "blue", "yello", "green"]
it = iter(colors)
for key, value in self.data.items():
key_words = key.split("_")
if len(key_words) == 1 and not key_words[0] == "datasets":
dct[key_words[0]] = self.parse_int(value)
elif len(key_words) == 2:
if not key_words[0] in dct.keys():
dct[key_words[0]] = {}
dct[key_words[0]][key_words[1]] = self.parse_int(value)
if key_words[0] == "datasets":
dct[key_words[0]]["color"] = next(it)
else:
pass
if "add" in dct.keys():
dct.pop("add")
self.json_data = dct
with open(str(self.model_prop), "w") as f:
json.dump(self.json_data, f, indent=4)
def parse_int(self, data):
try:
return int(data)
except ValueError:
return data
def load_body(self):
with open(str(self.body), "r") as f:
s = f.read()
temp = Template(s)
body = {"body": temp.render(self.json_data)}
return body
def load_template(self, body: dict):
tmp = """
{%- raw %}
{% extends "layout.html" %}
{% block content %}
{%- endraw %}
{{ body }}
{%- raw %}
{% endblock %}
{%- endraw %}
"""
temp = Template(tmp)
html = temp.render(body)
return html
def make_template(self):
self.parse_dict()
body = self.load_body()
html = self.load_template(body)
UserModel().set_current_model(self.data["model"])
self.writer(html)
def writer(self, data: str):
with open(str(self.dst), "w") as f:
f.write(data)
def remove_model(self):
shutil.copy(self.org, self.dst)
|
[
"jinja2.Template",
"json.dump",
"json.load",
"pathlib.Path",
"shutil.copy",
"sys.exit"
] |
[((810, 840), 'shutil.copy', 'shutil.copy', (['src_abs', 'self.dst'], {}), '(src_abs, self.dst)\n', (821, 840), False, 'import shutil\n'), ((6137, 6148), 'jinja2.Template', 'Template', (['s'], {}), '(s)\n', (6145, 6148), False, 'from jinja2 import Template\n'), ((6497, 6510), 'jinja2.Template', 'Template', (['tmp'], {}), '(tmp)\n', (6505, 6510), False, 'from jinja2 import Template\n'), ((6917, 6948), 'shutil.copy', 'shutil.copy', (['self.org', 'self.dst'], {}), '(self.org, self.dst)\n', (6928, 6948), False, 'import shutil\n'), ((1443, 1455), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1451, 1455), False, 'import sys\n'), ((2583, 2614), 'json.dump', 'json.dump', (['contens', 'f'], {'indent': '(4)'}), '(contens, f, indent=4)\n', (2592, 2614), False, 'import json\n'), ((5862, 5900), 'json.dump', 'json.dump', (['self.json_data', 'f'], {'indent': '(4)'}), '(self.json_data, f, indent=4)\n', (5871, 5900), False, 'import json\n'), ((745, 754), 'pathlib.Path', 'Path', (['src'], {}), '(src)\n', (749, 754), False, 'from pathlib import Path\n'), ((1818, 1830), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1826, 1830), False, 'import sys\n'), ((2884, 2896), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2893, 2896), False, 'import json\n'), ((3018, 3043), 'json.dump', 'json.dump', (['j', 'f'], {'indent': '(4)'}), '(j, f, indent=4)\n', (3027, 3043), False, 'import json\n'), ((3193, 3205), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3202, 3205), False, 'import json\n'), ((3324, 3349), 'json.dump', 'json.dump', (['j', 'f'], {'indent': '(4)'}), '(j, f, indent=4)\n', (3333, 3349), False, 'import json\n'), ((3500, 3512), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3509, 3512), False, 'import json\n'), ((4010, 4022), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4019, 4022), False, 'import json\n'), ((4435, 4447), 'pathlib.Path', 'Path', (['pyfile'], {}), '(pyfile)\n', (4439, 4447), False, 'from pathlib import Path\n'), ((272, 286), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (276, 286), False, 'from pathlib import Path\n'), ((2377, 2390), 'pathlib.Path', 'Path', (['current'], {}), '(current)\n', (2381, 2390), False, 'from pathlib import Path\n'), ((4733, 4747), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4737, 4747), False, 'from pathlib import Path\n')]
|
# Generated by Django 3.2.7 on 2021-10-03 21:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Post",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=250)),
("url", models.URLField()),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"created_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="post_created_by",
to=settings.AUTH_USER_MODEL,
),
),
(
"updated_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="post_updated_by",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"ordering": ["-created_at"],
},
),
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("content", models.TextField()),
("edited", models.BooleanField(default="False")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"created_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comment_created_by",
to=settings.AUTH_USER_MODEL,
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comment_parent",
to="forum.post",
),
),
(
"updated_by",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comment_updated_by",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"ordering": ["-created_at"],
},
),
]
|
[
"django.db.models.URLField",
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.DateTimeField"
] |
[((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((474, 570), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (493, 570), False, 'from django.db import migrations, models\n'), ((732, 764), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)'}), '(max_length=250)\n', (748, 764), False, 'from django.db import migrations, models\n'), ((791, 808), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (806, 808), False, 'from django.db import migrations, models\n'), ((842, 881), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (862, 881), False, 'from django.db import migrations, models\n'), ((915, 950), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (935, 950), False, 'from django.db import migrations, models\n'), ((1025, 1153), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""post_created_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='post_created_by', to=settings.AUTH_USER_MODEL)\n", (1042, 1153), False, 'from django.db import migrations, models\n'), ((1336, 1464), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""post_updated_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='post_updated_by', to=settings.AUTH_USER_MODEL)\n", (1353, 1464), False, 'from django.db import migrations, models\n'), ((1828, 1924), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1847, 1924), False, 'from django.db import migrations, models\n'), ((2088, 2106), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2104, 2106), False, 'from django.db import migrations, models\n'), ((2136, 2172), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '"""False"""'}), "(default='False')\n", (2155, 2172), False, 'from django.db import migrations, models\n'), ((2206, 2245), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2226, 2245), False, 'from django.db import migrations, models\n'), ((2279, 2314), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2299, 2314), False, 'from django.db import migrations, models\n'), ((2389, 2520), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""comment_created_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='comment_created_by', to=settings.AUTH_USER_MODEL)\n", (2406, 2520), False, 'from django.db import migrations, models\n'), ((2699, 2814), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""comment_parent"""', 'to': '"""forum.post"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='comment_parent', to='forum.post')\n", (2716, 2814), False, 'from django.db import migrations, models\n'), ((2997, 3128), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""comment_updated_by"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='comment_updated_by', to=settings.AUTH_USER_MODEL)\n", (3014, 3128), False, 'from django.db import migrations, models\n')]
|
from django.http import HttpResponse
from rest_framework import permissions
from CRUDFilters.views import CRUDFilterModelViewSet
from .models import TestClass
from .serializers import TestClassSerializer
class TestClassViewset(CRUDFilterModelViewSet):
serializer_class = TestClassSerializer
crud_model = TestClass
permission_classes = (permissions.AllowAny,)
def create(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
def update(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
def partial_update(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
def destroy(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
def patch(self, request, *args, **kwargs):
return HttpResponse("Everything's fine here", status=200)
|
[
"django.http.HttpResponse"
] |
[((440, 490), 'django.http.HttpResponse', 'HttpResponse', (['"""Everything\'s fine here"""'], {'status': '(200)'}), '("Everything\'s fine here", status=200)\n', (452, 490), False, 'from django.http import HttpResponse\n'), ((555, 605), 'django.http.HttpResponse', 'HttpResponse', (['"""Everything\'s fine here"""'], {'status': '(200)'}), '("Everything\'s fine here", status=200)\n', (567, 605), False, 'from django.http import HttpResponse\n'), ((678, 728), 'django.http.HttpResponse', 'HttpResponse', (['"""Everything\'s fine here"""'], {'status': '(200)'}), '("Everything\'s fine here", status=200)\n', (690, 728), False, 'from django.http import HttpResponse\n'), ((794, 844), 'django.http.HttpResponse', 'HttpResponse', (['"""Everything\'s fine here"""'], {'status': '(200)'}), '("Everything\'s fine here", status=200)\n', (806, 844), False, 'from django.http import HttpResponse\n'), ((908, 958), 'django.http.HttpResponse', 'HttpResponse', (['"""Everything\'s fine here"""'], {'status': '(200)'}), '("Everything\'s fine here", status=200)\n', (920, 958), False, 'from django.http import HttpResponse\n')]
|
import os
from PIL import Image
from models.model import model
import argparse
import numpy as np
import tensorflow as tf
import shutil
def create(args):
if args.pre_trained == 'facenet':
from models.Face_recognition import FR_model
FR = FR_model()
Model = tf.keras.models.load_model(args.save_path)
path = args.img_dir + '/'
names = os.listdir(path)
Add = []
Age = []
for idx, i in enumerate(names, 0):
curr_img = Image.open(path + i)
# print(path+i)
curr_img = curr_img.resize((args.img_size, args.img_size))
curr_img = np.asarray(curr_img)
curr_img = curr_img.astype('float64')
curr_img /= 127.5
curr_img = curr_img - 1
X = [curr_img]
X = np.asarray(X)
assert X.shape == (1, args.img_size, args.img_size, 3), 'check input image shape'
X = FR(X)
y = Model(X)
Add.append(i)
Age.append(y)
if (idx + 1) % args.log_step == 0:
print('{} no of images predicted'.format(idx + 1))
os.mkdir('Face-AHQ')
# path = '/content/data/celeba_hq/train/male/'
path = args.img_dir + '/'
for i in range(len(Add)):
ages = os.listdir('Face-AHQ')
age = (int)(Age[i])
add = path + Add[i]
# creates folder
if str(age) not in ages:
os.mkdir('Face-AHQ/{}'.format(age))
dest = 'Face-AHQ/{}/{}.png'.format(age, i)
shutil.move(add, dest)
if (i + 1) % args.log_step == 0:
print('{} no of images saved'.format(i + 1))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model configuration.
parser.add_argument('--pre_trained', type=str, default = 'facenet', help='pre-trained model to be used')
parser.add_argument('--img_dir', type=str, default = 'data', help='pre-trained model to be used')
parser.add_argument('--img_size', type=int, default = 160, help='size of image to be fed to the model')
parser.add_argument('--log_step', type=int, default = 50, help='number of steps to be taken before logging')
parser.add_argument('--save_path', type=str, default = 'Model_checkpoint',
help = 'path of dir where model is to be saved')
args = parser.parse_args()
create(args)
|
[
"os.mkdir",
"tensorflow.keras.models.load_model",
"argparse.ArgumentParser",
"numpy.asarray",
"models.Face_recognition.FR_model",
"PIL.Image.open",
"shutil.move",
"os.listdir"
] |
[((373, 389), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (383, 389), False, 'import os\n'), ((1063, 1083), 'os.mkdir', 'os.mkdir', (['"""Face-AHQ"""'], {}), "('Face-AHQ')\n", (1071, 1083), False, 'import os\n'), ((1618, 1643), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1641, 1643), False, 'import argparse\n'), ((260, 270), 'models.Face_recognition.FR_model', 'FR_model', ([], {}), '()\n', (268, 270), False, 'from models.Face_recognition import FR_model\n'), ((287, 329), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['args.save_path'], {}), '(args.save_path)\n', (313, 329), True, 'import tensorflow as tf\n'), ((474, 494), 'PIL.Image.open', 'Image.open', (['(path + i)'], {}), '(path + i)\n', (484, 494), False, 'from PIL import Image\n'), ((605, 625), 'numpy.asarray', 'np.asarray', (['curr_img'], {}), '(curr_img)\n', (615, 625), True, 'import numpy as np\n'), ((765, 778), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (775, 778), True, 'import numpy as np\n'), ((1210, 1232), 'os.listdir', 'os.listdir', (['"""Face-AHQ"""'], {}), "('Face-AHQ')\n", (1220, 1232), False, 'import os\n'), ((1454, 1476), 'shutil.move', 'shutil.move', (['add', 'dest'], {}), '(add, dest)\n', (1465, 1476), False, 'import shutil\n')]
|
# -*- coding: utf-8 -*-
"""
The node registry is a place to list the relationships between node types
and their views.
Nodular does *not* provide a global instance of :class:`NodeRegistry`. Since
the registry determines what is available in an app, registries should be
constructed as app-level globals.
"""
from inspect import isclass
from collections import OrderedDict, defaultdict
from werkzeug.routing import Map as UrlMap
from .node import Node
__all__ = ['NodeRegistry']
def dottedname(entity):
"""Return a dotted name to the given named entity"""
return entity.__module__ + '.' + entity.__name__
class RegistryItem(object):
"""Container for registry entry data"""
pass
class NodeRegistry(object):
"""
Registry for node types and node views.
"""
def __init__(self):
self.nodes = OrderedDict()
self.child_nodetypes = defaultdict(set)
self.nodeviews = defaultdict(list)
self.viewlist = {}
self.urlmaps = defaultdict(lambda: UrlMap(strict_slashes=False))
def register_node(self, model, view=None, itype=None, title=None,
child_nodetypes=None, parent_nodetypes=None):
"""
Register a node.
:param model: Node model.
:param view: View for this node type (optional).
:param string itype: Register the node model as an instance type (optional).
:param string title: Optional title for the instance type.
:param list child_nodetypes: Allowed child nodetypes.
None or empty implies no children allowed.
:param list parent_nodetypes: Nodetypes that this node can be a child of.
:type model: :class:`~nodular.node.Node`
:type view: :class:`~nodular.crud.NodeView`
The special value ``'*'`` in ``child_nodetypes`` implies that this node
is a generic container. ``'*'`` in ``parent_nodetypes`` implies that
this node can appear in any container that has ``'*'`` in
``child_nodetypes``.
"""
item = RegistryItem()
item.model = model
item.nodetype = itype or model.__type__
item.title = (title or model.__title__) if itype else model.__title__
self.nodes[item.nodetype] = item
if view is not None:
self.register_view(item.nodetype, view)
self._register_parentchild(item, child_nodetypes, parent_nodetypes)
def _register_parentchild(self, regitem, child_nodetypes=None, parent_nodetypes=None):
if child_nodetypes is not None:
self.child_nodetypes[regitem.nodetype].update(
[c.__type__ if isinstance(c, Node) else c for c in child_nodetypes])
for ptype in parent_nodetypes or []:
self.child_nodetypes[ptype.__type__ if isinstance(ptype, Node) else ptype].add(regitem.nodetype)
def register_view(self, nodetype, view):
"""
Register a view.
:param string nodetype: Node type that this view renders for.
:param view: View class.
:type view: :class:`~nodular.view.NodeView`
"""
if isclass(nodetype):
nodetype = nodetype.__type__
self.nodeviews[nodetype].append(view)
dotted_view_name = dottedname(view)
self.viewlist[dotted_view_name] = view
# Combine URL rules from across views for the same nodetype
for rule in view.url_map.iter_rules():
rule = rule.empty()
rule.endpoint = dotted_view_name + '/' + rule.endpoint
self.urlmaps[nodetype].add(rule)
self.urlmaps[nodetype].update()
|
[
"collections.OrderedDict",
"collections.defaultdict",
"inspect.isclass",
"werkzeug.routing.Map"
] |
[((836, 849), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (847, 849), False, 'from collections import OrderedDict, defaultdict\n'), ((881, 897), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (892, 897), False, 'from collections import OrderedDict, defaultdict\n'), ((923, 940), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (934, 940), False, 'from collections import OrderedDict, defaultdict\n'), ((3092, 3109), 'inspect.isclass', 'isclass', (['nodetype'], {}), '(nodetype)\n', (3099, 3109), False, 'from inspect import isclass\n'), ((1011, 1039), 'werkzeug.routing.Map', 'UrlMap', ([], {'strict_slashes': '(False)'}), '(strict_slashes=False)\n', (1017, 1039), True, 'from werkzeug.routing import Map as UrlMap\n')]
|
#!/usr/bin/env python
# SPDX-FileCopyrightText: 2021 iteratec GmbH
#
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
import pytest
from unittest.mock import MagicMock, Mock
from unittest import TestCase
from zapclient.configuration import ZapConfiguration
class ZapSpiderHttpTests(TestCase):
@pytest.mark.unit
def test_has_spider_configurations(self):
config = ZapConfiguration("./tests/mocks/context-with-overlay/", "https://www.secureCodeBox.io/")
self.assertIsNone(config.get_active_spider_config)
config = ZapConfiguration("./tests/mocks/scan-full-bodgeit-docker/", "http://bodgeit:8080/")
self.assertIsNotNone(config.get_active_spider_config)
|
[
"zapclient.configuration.ZapConfiguration"
] |
[((398, 490), 'zapclient.configuration.ZapConfiguration', 'ZapConfiguration', (['"""./tests/mocks/context-with-overlay/"""', '"""https://www.secureCodeBox.io/"""'], {}), "('./tests/mocks/context-with-overlay/',\n 'https://www.secureCodeBox.io/')\n", (414, 490), False, 'from zapclient.configuration import ZapConfiguration\n'), ((564, 651), 'zapclient.configuration.ZapConfiguration', 'ZapConfiguration', (['"""./tests/mocks/scan-full-bodgeit-docker/"""', '"""http://bodgeit:8080/"""'], {}), "('./tests/mocks/scan-full-bodgeit-docker/',\n 'http://bodgeit:8080/')\n", (580, 651), False, 'from zapclient.configuration import ZapConfiguration\n')]
|
# -*- coding: utf-8 -*-
import sys
import os
UNITY_PATH = "/Applications/Unity/Hub/Editor/2019.4.18f1c1/Unity.app/Contents/MacOS/Unity"
class Unity(object):
# @staticmethod
# def SwitchPlatorm()
# @staticmethod
# def GeneratorWrapCode():
# Unity.ExecuteScript("CSObjectWrapEditor.Generator", "ClearAll")
# Unity.ExecuteScript("CSObjectWrapEditor.Generator", "GenAll")
# @staticmethod
# def ExportProject(platform):
# Unity.ExecuteScript("SwitchScene", "Export" + platform.capitalize() + "Release")
@staticmethod
def BuildAssetBundle(platform):
Unity.ExecuteScript("AssetsBundleBuilder", "BuildAssets" + platform.capitalize(), "BuildAssetBundle")
@staticmethod
def ExecuteScript(className, funcName, logName):
logFile = os.environ["BUILD_DIR"] + "/Log/" + logName + ".log";
args1 = UNITY_PATH
args2 = "-quit -batchmode"
args3 = "-logFile " + logFile
args4 = "-projectPath " + os.environ["GAME_DIR"]
args5 = "-nographics"
args6 = "-executeMethod " + className+"."+funcName
cmd = "'%s' %s %s %s %s %s" % (args1, args2, args3, args4, args5, args6)
os.system("echo "+cmd+" >> "+logFile+" 2>&1")
os.system(cmd + " >> "+logFile+" 2>&1")
# os.system(cmd)
# Log.AppendFile(tmpFile)
# @staticmethod
# def AppendFile(file):
# sys.stdin = open(file,"r")
# sys.stdout = open(os.environ["LOG_FILE"],"a")
# sys.stdout.write(sys.stdin.read())
# @staticmethod
# def Cmd(cmd):
# if os.environ.has_key("LOG_FILE"):
|
[
"os.system"
] |
[((1087, 1140), 'os.system', 'os.system', (["('echo ' + cmd + ' >> ' + logFile + ' 2>&1')"], {}), "('echo ' + cmd + ' >> ' + logFile + ' 2>&1')\n", (1096, 1140), False, 'import os\n'), ((1135, 1178), 'os.system', 'os.system', (["(cmd + ' >> ' + logFile + ' 2>&1')"], {}), "(cmd + ' >> ' + logFile + ' 2>&1')\n", (1144, 1178), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import re
import operator
from utool import util_inject
print, rrr, profile = util_inject.inject2(__name__)
def modify_tags(tags_list, direct_map=None, regex_map=None, regex_aug=None,
delete_unmapped=False, return_unmapped=False,
return_map=False):
import utool as ut
tag_vocab = ut.unique(ut.flatten(tags_list))
alias_map = ut.odict()
if regex_map is not None:
alias_map.update(**ut.build_alias_map(regex_map, tag_vocab))
if direct_map is not None:
alias_map.update(ut.odict(direct_map))
new_tags_list = tags_list
new_tags_list = ut.alias_tags(new_tags_list, alias_map)
if regex_aug is not None:
alias_aug = ut.build_alias_map(regex_aug, tag_vocab)
aug_tags_list = ut.alias_tags(new_tags_list, alias_aug)
new_tags_list = [ut.unique(t1 + t2) for t1, t2 in zip(new_tags_list, aug_tags_list)]
unmapped = list(set(tag_vocab) - set(alias_map.keys()))
if delete_unmapped:
new_tags_list = [ut.setdiff(tags, unmapped) for tags in new_tags_list]
toreturn = None
if return_map:
toreturn = (alias_map,)
if return_unmapped:
toreturn = toreturn + (unmapped,)
if toreturn is None:
toreturn = new_tags_list
else:
toreturn = (new_tags_list,) + toreturn
return toreturn
def tag_coocurrence(tags_list):
import utool as ut
co_occur_list = []
for tags in tags_list:
for combo in ut.combinations(tags, 2):
key = tuple(sorted(combo))
co_occur_list.append(key)
co_occur = ut.dict_hist(co_occur_list, ordered=True)
# co_occur[key] += 1
#co_occur = ut.odict(co_occur)
return co_occur
def tag_hist(tags_list):
import utool as ut
return ut.dict_hist(ut.flatten(tags_list), ordered=True)
def build_alias_map(regex_map, tag_vocab):
"""
Constructs explicit mapping. Order of items in regex map matters.
Items at top are given preference.
Example:
>>> # DISABLE_DOCTEST
>>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']]
>>> tag_vocab = ut.flat_unique(*tags_list)
>>> regex_map = [('t[3-4]', 'A9'), ('t0', 'a0')]
>>> unmapped = list(set(tag_vocab) - set(alias_map.keys()))
"""
import utool as ut
import re
alias_map = ut.odict([])
for pats, new_tag in reversed(regex_map):
pats = ut.ensure_iterable(pats)
for pat in pats:
flags = [re.match(pat, t) for t in tag_vocab]
for old_tag in ut.compress(tag_vocab, flags):
alias_map[old_tag] = new_tag
identity_map = ut.take_column(regex_map, 1)
for tag in ut.filter_Nones(identity_map):
alias_map[tag] = tag
return alias_map
def alias_tags(tags_list, alias_map):
"""
update tags to new values
Args:
tags_list (list):
alias_map (list): list of 2-tuples with regex, value
Returns:
list: updated tags
CommandLine:
python -m utool.util_tags alias_tags --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']]
>>> ut.build_alias_map()
>>> result = alias_tags(tags_list, alias_map)
>>> print(result)
"""
def _alias_dict(tags):
tags_ = [alias_map.get(t, t) for t in tags]
return list(set([t for t in tags_ if t is not None]))
tags_list_ = [_alias_dict(tags) for tags in tags_list]
return tags_list_
# def _fix_tags(tags):
# return {six.text_type(t.lower()) for t in tags}
# tags_list_ = list(map(_fix_tags, tags_list))
# re_list = [re.compile(pat) for pat, val in alias_map]
# val_list = ut.take_column(alias_map, 0)
# def _alias_regex(tags):
# new_tags = 0
# for t in tags:
# matched = [re_.match(t) is not None for re_ in re_list]
# matched_idx = ut.where(matched)
# assert len(matched_idx) <= 1, 'more than one tag in %r matched pattern' % (tags,)
# if len(matched_idx) > 0:
# repl_tags = ut.take(val_list, matched_idx)
# new_tags.extend(repl_tags)
# else:
# new_tags.append(t)
# return new_tags
# # tags_list_ = [_alias_regex(tags) for tags in tags_list_]
# return tags_list_
def filterflags_general_tags(tags_list, has_any=None, has_all=None,
has_none=None, min_num=None, max_num=None,
any_startswith=None, any_endswith=None,
in_any=None, any_match=None, none_match=None,
logic='and', ignore_case=True):
r"""
maybe integrate into utool? Seems pretty general
Args:
tags_list (list):
has_any (None): (default = None)
has_all (None): (default = None)
min_num (None): (default = None)
max_num (None): (default = None)
Notes:
in_any should probably be ni_any
TODO: make this function more natural
CommandLine:
python -m utool.util_tags --exec-filterflags_general_tags
python -m utool.util_tags --exec-filterflags_general_tags:0 --helpx
python -m utool.util_tags --exec-filterflags_general_tags:0
python -m utool.util_tags --exec-filterflags_general_tags:0 --none_match n
python -m utool.util_tags --exec-filterflags_general_tags:0 --has_none=n,o
python -m utool.util_tags --exec-filterflags_general_tags:1
python -m utool.util_tags --exec-filterflags_general_tags:2
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P', 'o'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['q', 'v'], ['n'], ['n'], ['N']]
>>> kwargs = ut.argparse_dict(ut.get_kwdefaults2(filterflags_general_tags), type_hint=list)
>>> print('kwargs = %r' % (kwargs,))
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> print(flags)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['v'], [], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n'], ['N']]
>>> has_all = 'n'
>>> min_num = 1
>>> flags = filterflags_general_tags(tags_list, has_all=has_all, min_num=min_num)
>>> result = ut.compress(tags_list, flags)
>>> print('result = %r' % (result,))
Ignore:
>>> # ENABLE_DOCTEST
>>> from utool.util_tags import * # NOQA
>>> import utool as ut
>>> tags_list = [['vn'], ['vn', 'no'], ['P'], ['P'], ['n', 'o'], [], ['n', 'N'], ['e', 'i', 'p', 'b', 'n'], ['n'], ['n', 'nP'], ['NP']]
>>> kwargs = {
>>> 'any_endswith': 'n',
>>> 'any_match': None,
>>> 'any_startswith': 'n',
>>> 'has_all': None,
>>> 'has_any': None,
>>> 'has_none': None,
>>> 'max_num': 3,
>>> 'min_num': 1,
>>> 'none_match': ['P'],
>>> }
>>> flags = filterflags_general_tags(tags_list, **kwargs)
>>> filtered = ut.compress(tags_list, flags)
>>> result = ('result = %s' % (ut.repr2(filtered),))
result = [['vn', 'no'], ['n', 'o'], ['n', 'N'], ['n'], ['n', 'nP']]
"""
import numpy as np
import utool as ut
def _fix_tags(tags):
if ignore_case:
return set([]) if tags is None else {six.text_type(t.lower()) for t in tags}
else:
return set([]) if tags is None else {six.text_type() for t in tags}
if logic is None:
logic = 'and'
logic_func = {
'and': np.logical_and,
'or': np.logical_or,
}[logic]
default_func = {
'and': np.ones,
'or': np.zeros,
}[logic]
tags_list_ = [_fix_tags(tags_) for tags_ in tags_list]
flags = default_func(len(tags_list_), dtype=np.bool)
if min_num is not None:
flags_ = [len(tags_) >= min_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if max_num is not None:
flags_ = [len(tags_) <= max_num for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_any is not None:
has_any = _fix_tags(set(ut.ensure_iterable(has_any)))
flags_ = [len(has_any.intersection(tags_)) > 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_none is not None:
has_none = _fix_tags(set(ut.ensure_iterable(has_none)))
flags_ = [len(has_none.intersection(tags_)) == 0 for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
if has_all is not None:
has_all = _fix_tags(set(ut.ensure_iterable(has_all)))
flags_ = [len(has_all.intersection(tags_)) == len(has_all) for tags_ in tags_list_]
logic_func(flags, flags_, out=flags)
def _test_item(tags_, fields, op, compare):
t_flags = [any([compare(t, f) for f in fields]) for t in tags_]
num_passed = sum(t_flags)
flag = op(num_passed, 0)
return flag
def _flag_tags(tags_list, fields, op, compare):
flags = [_test_item(tags_, fields, op, compare) for tags_ in tags_list_]
return flags
def _exec_filter(flags, tags_list, fields, op, compare):
if fields is not None:
fields = ut.ensure_iterable(fields)
if ignore_case:
fields = [f.lower() for f in fields]
flags_ = _flag_tags(tags_list, fields, op, compare)
logic_func(flags, flags_, out=flags)
return flags
flags = _exec_filter(
flags, tags_list, any_startswith,
operator.gt, six.text_type.startswith)
flags = _exec_filter(
flags, tags_list, in_any,
operator.gt, operator.contains)
flags = _exec_filter(
flags, tags_list, any_endswith,
operator.gt, six.text_type.endswith)
flags = _exec_filter(
flags, tags_list, any_match,
operator.gt, lambda t, f: re.match(f, t))
flags = _exec_filter(
flags, tags_list, none_match,
operator.eq, lambda t, f: re.match(f, t))
return flags
if __name__ == '__main__':
r"""
CommandLine:
python -m utool.util_tags
python -m utool.util_tags --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
[
"utool.dict_hist",
"utool.doctest_funcs",
"utool.compress",
"utool.ensure_iterable",
"utool.take_column",
"utool.flatten",
"utool.setdiff",
"utool.unique",
"re.match",
"six.text_type",
"utool.filter_Nones",
"utool.combinations",
"utool.odict",
"utool.build_alias_map",
"utool.alias_tags",
"multiprocessing.freeze_support",
"utool.util_inject.inject2"
] |
[((196, 225), 'utool.util_inject.inject2', 'util_inject.inject2', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'from utool import util_inject\n'), ((489, 499), 'utool.odict', 'ut.odict', ([], {}), '()\n', (497, 499), True, 'import utool as ut\n'), ((728, 767), 'utool.alias_tags', 'ut.alias_tags', (['new_tags_list', 'alias_map'], {}), '(new_tags_list, alias_map)\n', (741, 767), True, 'import utool as ut\n'), ((1702, 1743), 'utool.dict_hist', 'ut.dict_hist', (['co_occur_list'], {'ordered': '(True)'}), '(co_occur_list, ordered=True)\n', (1714, 1743), True, 'import utool as ut\n'), ((2450, 2462), 'utool.odict', 'ut.odict', (['[]'], {}), '([])\n', (2458, 2462), True, 'import utool as ut\n'), ((2754, 2782), 'utool.take_column', 'ut.take_column', (['regex_map', '(1)'], {}), '(regex_map, 1)\n', (2768, 2782), True, 'import utool as ut\n'), ((2798, 2827), 'utool.filter_Nones', 'ut.filter_Nones', (['identity_map'], {}), '(identity_map)\n', (2813, 2827), True, 'import utool as ut\n'), ((10776, 10808), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (10806, 10808), False, 'import multiprocessing\n'), ((10857, 10875), 'utool.doctest_funcs', 'ut.doctest_funcs', ([], {}), '()\n', (10873, 10875), True, 'import utool as ut\n'), ((450, 471), 'utool.flatten', 'ut.flatten', (['tags_list'], {}), '(tags_list)\n', (460, 471), True, 'import utool as ut\n'), ((819, 859), 'utool.build_alias_map', 'ut.build_alias_map', (['regex_aug', 'tag_vocab'], {}), '(regex_aug, tag_vocab)\n', (837, 859), True, 'import utool as ut\n'), ((884, 923), 'utool.alias_tags', 'ut.alias_tags', (['new_tags_list', 'alias_aug'], {}), '(new_tags_list, alias_aug)\n', (897, 923), True, 'import utool as ut\n'), ((1584, 1608), 'utool.combinations', 'ut.combinations', (['tags', '(2)'], {}), '(tags, 2)\n', (1599, 1608), True, 'import utool as ut\n'), ((1905, 1926), 'utool.flatten', 'ut.flatten', (['tags_list'], {}), '(tags_list)\n', (1915, 1926), True, 'import utool as ut\n'), ((2524, 2548), 'utool.ensure_iterable', 'ut.ensure_iterable', (['pats'], {}), '(pats)\n', (2542, 2548), True, 'import utool as ut\n'), ((655, 675), 'utool.odict', 'ut.odict', (['direct_map'], {}), '(direct_map)\n', (663, 675), True, 'import utool as ut\n'), ((949, 967), 'utool.unique', 'ut.unique', (['(t1 + t2)'], {}), '(t1 + t2)\n', (958, 967), True, 'import utool as ut\n'), ((1127, 1153), 'utool.setdiff', 'ut.setdiff', (['tags', 'unmapped'], {}), '(tags, unmapped)\n', (1137, 1153), True, 'import utool as ut\n'), ((2659, 2688), 'utool.compress', 'ut.compress', (['tag_vocab', 'flags'], {}), '(tag_vocab, flags)\n', (2670, 2688), True, 'import utool as ut\n'), ((9783, 9809), 'utool.ensure_iterable', 'ut.ensure_iterable', (['fields'], {}), '(fields)\n', (9801, 9809), True, 'import utool as ut\n'), ((10452, 10466), 're.match', 're.match', (['f', 't'], {}), '(f, t)\n', (10460, 10466), False, 'import re\n'), ((10567, 10581), 're.match', 're.match', (['f', 't'], {}), '(f, t)\n', (10575, 10581), False, 'import re\n'), ((557, 597), 'utool.build_alias_map', 'ut.build_alias_map', (['regex_map', 'tag_vocab'], {}), '(regex_map, tag_vocab)\n', (575, 597), True, 'import utool as ut\n'), ((2595, 2611), 're.match', 're.match', (['pat', 't'], {}), '(pat, t)\n', (2603, 2611), False, 'import re\n'), ((8702, 8729), 'utool.ensure_iterable', 'ut.ensure_iterable', (['has_any'], {}), '(has_any)\n', (8720, 8729), True, 'import utool as ut\n'), ((8920, 8948), 'utool.ensure_iterable', 'ut.ensure_iterable', (['has_none'], {}), '(has_none)\n', (8938, 8948), True, 'import utool as ut\n'), ((9139, 9166), 'utool.ensure_iterable', 'ut.ensure_iterable', (['has_all'], {}), '(has_all)\n', (9157, 9166), True, 'import utool as ut\n'), ((7994, 8009), 'six.text_type', 'six.text_type', ([], {}), '()\n', (8007, 8009), False, 'import six\n')]
|
import mcpi.minecraft as minecraft
from flask import render_template
from flask import Flask
from flask import jsonify
app = Flask(__name__)
@app.route('/')
def pyminemapIndex():
return render_template('index.html')
@app.route('/list')
def pyminemapList():
try:
positionstexte = []
mc = minecraft.Minecraft.create()
playerIds = mc.getPlayerEntityIds()
for playerId in playerIds:
position = mc.entity.getTilePos(playerId)
positionstexte.append("Spielerposition: x=" + str(position.x) + " y=" + str(position.y) + " z=" + str(position.z))
return render_template('list.html', positionstexte=positionstexte)
except (ConnectionResetError, ConnectionRefusedError):
return render_template('list.html', positionstext=None)
@app.route('/map')
def pyminemapMap():
return render_template('map.html')
@app.route('/api/players/positions', methods = ['GET'])
def getPlayerPositions():
try:
positions = []
mc = minecraft.Minecraft.create()
for playerId in mc.getPlayerEntityIds():
playerPosition = mc.entity.getPos(playerId)
position = {
'playerId': playerId,
'x': playerPosition.x,
'y': playerPosition.y,
'z': playerPosition.z
}
positions.append(position)
return jsonify(positions)
except (ConnectionResetError, ConnectionRefusedError):
return jsonify([])
@app.route('/api/worldDimensions', methods = ['GET'])
def getWorldDimensions():
# see: https://www.stuffaboutcode.com/p/minecraft-api-reference.html
try:
mc = minecraft.Minecraft.create()
x = 0
while mc.getBlock(x,0,0) != 95:
x += 1
z = 0
while mc.getBlock(0,0,z) != 95:
z += 1
maxX = x - 1
minX = maxX - 256
maxZ = z - 1
minZ = maxZ - 256
return jsonify({'minX':minX,'maxX':maxX,'minZ':minZ,'maxZ':maxZ})
except (ConnectionResetError, ConnectionRefusedError):
return jsonify(None)
|
[
"mcpi.minecraft.Minecraft.create",
"flask.jsonify",
"flask.Flask",
"flask.render_template"
] |
[((125, 140), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (130, 140), False, 'from flask import Flask\n'), ((191, 220), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (206, 220), False, 'from flask import render_template\n'), ((851, 878), 'flask.render_template', 'render_template', (['"""map.html"""'], {}), "('map.html')\n", (866, 878), False, 'from flask import render_template\n'), ((313, 341), 'mcpi.minecraft.Minecraft.create', 'minecraft.Minecraft.create', ([], {}), '()\n', (339, 341), True, 'import mcpi.minecraft as minecraft\n'), ((617, 676), 'flask.render_template', 'render_template', (['"""list.html"""'], {'positionstexte': 'positionstexte'}), "('list.html', positionstexte=positionstexte)\n", (632, 676), False, 'from flask import render_template\n'), ((1011, 1039), 'mcpi.minecraft.Minecraft.create', 'minecraft.Minecraft.create', ([], {}), '()\n', (1037, 1039), True, 'import mcpi.minecraft as minecraft\n'), ((1392, 1410), 'flask.jsonify', 'jsonify', (['positions'], {}), '(positions)\n', (1399, 1410), False, 'from flask import jsonify\n'), ((1673, 1701), 'mcpi.minecraft.Minecraft.create', 'minecraft.Minecraft.create', ([], {}), '()\n', (1699, 1701), True, 'import mcpi.minecraft as minecraft\n'), ((1957, 2022), 'flask.jsonify', 'jsonify', (["{'minX': minX, 'maxX': maxX, 'minZ': minZ, 'maxZ': maxZ}"], {}), "({'minX': minX, 'maxX': maxX, 'minZ': minZ, 'maxZ': maxZ})\n", (1964, 2022), False, 'from flask import jsonify\n'), ((751, 799), 'flask.render_template', 'render_template', (['"""list.html"""'], {'positionstext': 'None'}), "('list.html', positionstext=None)\n", (766, 799), False, 'from flask import render_template\n'), ((1485, 1496), 'flask.jsonify', 'jsonify', (['[]'], {}), '([])\n', (1492, 1496), False, 'from flask import jsonify\n'), ((2090, 2103), 'flask.jsonify', 'jsonify', (['None'], {}), '(None)\n', (2097, 2103), False, 'from flask import jsonify\n')]
|
from __future__ import unicode_literals, print_function, division
from collections import Counter
from nltk.tokenize import TweetTokenizer
import cPickle as cp
import io
import numpy as np
PAD_TOKEN = 0
SOS_TOKEN = 1
EOS_TOKEN = 2
VOCAB_SIZE = 10000
class Lang(object):
def __init__(self, name, lowercase=True, tokenizer=None):
self.name = name
self.word_count = Counter()
self.tokenizer = tokenizer
self.lowercase = lowercase # To lowercase all words encountered
self.embedding_matrix = None
self.PAD_TOK_VEC = None
self.UNK_TOK_VEC = None
def tokenize_sent(self, sentence):
if self.tokenizer is None:
return sentence.split(u' ')
else:
return self.tokenizer.tokenize(sentence)
def add_sentence(self, sentence):
for w in self.tokenize_sent(sentence):
if self.lowercase:
w = w.lower()
self.word_count[w] += 1
def generate_vocab(self):
vocab = self.word_count.most_common(VOCAB_SIZE)
self.word2ix = {"<PAD>": PAD_TOKEN, "<SOS>": SOS_TOKEN, "<EOS>": EOS_TOKEN}
for w, _ in vocab:
self.word2ix[w] = len(self.word2ix)
self.ix2word = {self.word2ix[w]: w for w in self.word2ix}
def add_word(self, word, embedding=None):
assert word not in self.word2ix, "Already present in vocab"
self.word2ix[word] = len(self.word2ix)
self.ix2word[self.word2ix[word]] = word
if self.embedding_matrix is not None:
_, n_embed = self.embedding_matrix.shape
embedding = embedding if embedding is not None else np.random.normal(0, 1, (1, n_embed))
self.embedding_matrix = np.concatenate([self.embedding_matrix, embedding], axis=0)
def __getitem__(self, item):
if type(item) == str or type(item) == unicode:
# Encode the string to be unicode
item = unicode(item)
if self.lowercase:
item = item.lower()
return self.word2ix[item] if item in self.word2ix else len(self.word2ix)
else:
return self.ix2word[item] if item in self.ix2word else u"<UNK>"
def __len__(self):
assert len(self.ix2word) == len(self.word2ix), "Index not built using generate_vocab and add_word"
return len(self.ix2word)
def save_file(self, filename):
cp.dump(self.__dict__, open(filename, 'wb'))
def load_file(self, filename):
self.__dict__ = cp.load(open(filename))
def get_embedding_matrix(self):
if self.embedding_matrix is None:
return None
_embedding_matrix = np.concatenate([self.PAD_TOK_VEC, self.embedding_matrix, self.UNK_TOK_VEC], axis=0)
return _embedding_matrix
def build_vocab(filename, l):
with io.open(filename, encoding='utf-8', mode='r', errors='replace') as f:
for line in f:
line = line.strip().split('\t')
l.add_sentence(line[0])
l.add_sentence(line[1])
l.generate_vocab()
return l
def build_embedding_matrix_from_gensim(l_en, gensim_model, embedding_dim=300):
l_en.PAD_TOK_VEC = np.random.normal(0, 1, (1, embedding_dim))
l_en.UNK_TOK_VEC = np.random.normal(0, 1, (1, embedding_dim))
l_en.embedding_matrix = np.random.normal(0, 1, (len(l_en) - 1, embedding_dim)) # PAD TOKEN ENCODED SEPARATELY
for w in l_en.word2ix:
if l_en.word2ix[w] == PAD_TOKEN:
# PAD TOKEN ENCODED SEPARATELY
continue
if w in gensim_model.wv:
l_en.embedding_matrix[l_en.word2ix[w] - 1] = gensim_model.wv[w]
return l_en
if __name__ == "__main__":
# ROOT_DIR = "/home/bass/DataDir/RTE/"
ROOT_DIR = ""
DATA_FILE = ROOT_DIR + "data/train.txt"
# DATA_FILE ="data/tiny_eng-fra.txt"
l_en = Lang('en', tokenizer=TweetTokenizer())
l_en = build_vocab(DATA_FILE, l_en)
save_file_name = ROOT_DIR + 'data/vocab.pkl'
l_en.save_file(save_file_name)
|
[
"nltk.tokenize.TweetTokenizer",
"numpy.random.normal",
"io.open",
"collections.Counter",
"numpy.concatenate"
] |
[((3179, 3221), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1, embedding_dim)'], {}), '(0, 1, (1, embedding_dim))\n', (3195, 3221), True, 'import numpy as np\n'), ((3245, 3287), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1, embedding_dim)'], {}), '(0, 1, (1, embedding_dim))\n', (3261, 3287), True, 'import numpy as np\n'), ((389, 398), 'collections.Counter', 'Counter', ([], {}), '()\n', (396, 398), False, 'from collections import Counter\n'), ((2668, 2755), 'numpy.concatenate', 'np.concatenate', (['[self.PAD_TOK_VEC, self.embedding_matrix, self.UNK_TOK_VEC]'], {'axis': '(0)'}), '([self.PAD_TOK_VEC, self.embedding_matrix, self.UNK_TOK_VEC],\n axis=0)\n', (2682, 2755), True, 'import numpy as np\n'), ((2826, 2889), 'io.open', 'io.open', (['filename'], {'encoding': '"""utf-8"""', 'mode': '"""r"""', 'errors': '"""replace"""'}), "(filename, encoding='utf-8', mode='r', errors='replace')\n", (2833, 2889), False, 'import io\n'), ((1731, 1789), 'numpy.concatenate', 'np.concatenate', (['[self.embedding_matrix, embedding]'], {'axis': '(0)'}), '([self.embedding_matrix, embedding], axis=0)\n', (1745, 1789), True, 'import numpy as np\n'), ((3868, 3884), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {}), '()\n', (3882, 3884), False, 'from nltk.tokenize import TweetTokenizer\n'), ((1658, 1694), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1, n_embed)'], {}), '(0, 1, (1, n_embed))\n', (1674, 1694), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
A program that carries out mini batch k-means clustering on Movielens datatset"""
from __future__ import print_function, division, absolute_import, unicode_literals
from decimal import *
#other stuff we need to import
import csv
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics.cluster import v_measure_score
from math import *
def distance(user_id,i):
distance=0
for j in range(len(user_movie_matrix[0])):
if user_movie_matrix[user_id][j] !=0 and user_movie_matrix[i][j]!=0:
distance+=Decimal(pow(Decimal(user_movie_matrix[user_id][j] - user_movie_matrix[i][j]),2))
distance=sqrt(distance)
return distance
#beginning of main program
#read in u1.base
training_file = open('ml-100k/u1.base','r')
rows = training_file.readlines()
training_file.close()
training_data=[]
for row in rows:
list = row.split('\t')
int_list = [int(item) for item in list]
training_data.append(int_list)
#read in u1.test
test_file = open('ml-100k/u1.test','r')
rows = test_file.readlines()
test_file.close()
test_data=[]
for row in rows:
list = row.split('\t')
int_list = [int(item) for item in list]
test_data.append(int_list)
print(len(training_data))
print(len(test_data))
user_ids = [row[0] for row in training_data]
user_ids = set(user_ids)
user_ids = sorted(user_ids)
number_of_users = len(user_ids)
#print(user_ids)
print(number_of_users)
movie_ids = [row[1] for row in training_data]
movie_ids = set(movie_ids)
movie_ids = sorted(movie_ids)
number_of_movies = len(movie_ids)
#print(movie_ids)
print(number_of_movies)
#create a user movie matrix
#pre-processing could be in two ways :
# a. either ignore ratings <= 3 so rating of 4 or 5 = 1 in matrix and <=3 is 0
# b. calculate a mean for each user
# c. or simply give 1 if rated and 0 if not rated
user_movie_matrix = np.zeros((number_of_users,number_of_movies))
#user_movie_matrix.fill(0.001)
for row in training_data:
user_id = user_ids.index(row[0])
movie_id = movie_ids.index(row[1])
user_movie_matrix[user_id,movie_id] = row[2]
#user_movie_matrix[user_id,movie_id] = row[2]
#print(user_movie_matrix[0])
#print(user_movie_matrix[942][1])
#print(user_movie_matrix[942][8])
#Normalizing user-movie matrix
#Additional step
'''for i in range(number_of_users):
tempList = []
tempList = user_movie_matrix[i].tolist()
print('templist')
print(tempList)
minVal = min(tempList)
maxVal = max(tempList)
for j in tempList:
j=Decimal(Decimal(j-minVal)/Decimal(maxVal-minVal))
j=j*5
user_movie_matrix[i] = tempList'''
print(user_movie_matrix)
print(len(user_movie_matrix))
print(len(user_movie_matrix[0]))
#print(user_movie_matrix)
#initialize and carry out clustering
K=50
#km = KMeans(n_clusters = K)
#km.fit(user_movie_matrix)
#km = KMeans(n_clusters = K)
km = MiniBatchKMeans(n_clusters = K)
km.fit(user_movie_matrix)
#labels
labels = km.labels_
print(str(labels))
#find which cluster each user is in
cluster_num_users=np.zeros(K)
#maintain a list of users per cluster
cluster_list_users=[]
for i in range(K):
cluster_list_users.append([])
print(cluster_list_users)
prediction = km.predict(user_movie_matrix)
print('\n--------Which cluster each user is in--------')
print('{:<15}\t{}'.format('User','Cluster'))
for i in range(len(prediction)):
print('{:<15}\t{}'.format(user_ids[i],prediction[i]))
cluster_num_users[prediction[i]]+=1
list_of_users = []
list_of_users = cluster_list_users[prediction[i]]
list_of_users.append(i)
cluster_list_users[prediction[i]]=list_of_users
f=open('cluster_num_users','w')
for i in range(K):
f.write(str(i))
f.write('\t')
f.write(str(cluster_num_users[i]))
f.write('\n')
f.close()
print(cluster_num_users)
print(cluster_list_users)
#Number of users in each cluster
print('\n--------Number of users in a cluster--------')
for i in range(K):
print('{:<15}\t{}'.format(i,cluster_num_users[i]))
print(sum(cluster_num_users))
print('The total distance of the solution found is',sum((km.transform(user_movie_matrix)).min(axis=1)))
#predicting rating for a movie by a user
print('Number of test data ')
print(len(test_data))
accuracy=0
root_mean_accuracy=0
weighted_sum=0
sum_of_weights=0
for row in test_data:
print('Testing for user and movie in test : ' + str(row))
movie = row[1]
rating = row[2]
#print('Cluster for this user : ')
user = row[0]
#print(user)
user_id = user_ids.index(user)
#print(user_id)
#print(labels)
cluster_index = labels[user_id]
#print(cluster_index)
print('Other user ids in this cluster : ')
print(cluster_num_users[cluster_index])
#print(len(cluster_list_users[cluster_index]))
other_user_ids_in_same_cluster=cluster_list_users[cluster_index]
print(other_user_ids_in_same_cluster)
#print('Have they rated movie ')
#print(movie)
if movie in movie_ids:
movie_id=movie_ids.index(movie)
else:
continue
number_of_users_who_rated_movie=0
sum_total_rating=0
for i in other_user_ids_in_same_cluster:
if user_movie_matrix[i][movie_id] > 0:
#print(i)
#print('index has rated movie ')
#print(movie_id)
#print(user_movie_matrix[i][movie_id])
if(Decimal(round(distance(user_id,i),2)) > Decimal(0.0)):
weight = Decimal(1/(distance(user_id,i)))
weighted_sum += weight*Decimal(user_movie_matrix[i][movie_id])
sum_of_weights += Decimal(weight)
number_of_users_who_rated_movie += 1
sum_total_rating += user_movie_matrix[i][movie_id]
print('Predicted Rating for this movie :')
#print(sum_total_rating)
if(number_of_users_who_rated_movie > 0 and sum_of_weights > 0):
print(weighted_sum)
print(sum_of_weights)
rating_predicted = weighted_sum/sum_of_weights
print(rating_predicted)
print(rating)
#rating_predicted = round(rating_predicted)
root_mean_accuracy += Decimal(pow(Decimal(rating_predicted-rating),2))
if abs(Decimal(rating_predicted - rating)) <= Decimal(1.0):
print("HERE")
accuracy += 1
'''elif Decimal(rating - rating_predicted) < Decimal(0.5):
print("HERE")
accuracy += 1'''
print(accuracy)
print('% accuracy')
print(accuracy*100/len(test_data))
root_mean_accuracy = root_mean_accuracy/len(test_data)
root_mean_accuracy = sqrt(root_mean_accuracy)
print(root_mean_accuracy)
|
[
"sklearn.cluster.MiniBatchKMeans",
"numpy.zeros"
] |
[((1958, 2003), 'numpy.zeros', 'np.zeros', (['(number_of_users, number_of_movies)'], {}), '((number_of_users, number_of_movies))\n', (1966, 2003), True, 'import numpy as np\n'), ((2991, 3020), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'K'}), '(n_clusters=K)\n', (3006, 3020), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((3160, 3171), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (3168, 3171), True, 'import numpy as np\n')]
|
from django.db import models
class Sale(models.Model):
created = models.DateTimeField()
def __str__(self):
return f'[{self.id}] {self.created:%Y-%m-%d}'
class SaleWithDrilldown(Sale):
"""
We will use this model in the admin to illustrate the difference
between date hierarchy with and without drilldown.
"""
class Meta:
proxy = True
verbose_name = 'Sale model with default drilldown'
class SaleWithCustomDrilldown(Sale):
class Meta:
proxy = True
verbose_name = 'Sale model with custom drilldown'
|
[
"django.db.models.DateTimeField"
] |
[((71, 93), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (91, 93), False, 'from django.db import models\n')]
|
import json # note: ujson fails this test due to float equality
import copy
import numpy as np
import pytest
from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(5, start=-2),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Tuple((Discrete(5), Discrete(2, start=6), Discrete(2, start=-4))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_roundtripping(space):
sample_1 = space.sample()
sample_2 = space.sample()
assert space.contains(sample_1)
assert space.contains(sample_2)
json_rep = space.to_jsonable([sample_1, sample_2])
json_roundtripped = json.loads(json.dumps(json_rep))
samples_after_roundtrip = space.from_jsonable(json_roundtripped)
sample_1_prime, sample_2_prime = samples_after_roundtrip
s1 = space.to_jsonable([sample_1])
s1p = space.to_jsonable([sample_1_prime])
s2 = space.to_jsonable([sample_2])
s2p = space.to_jsonable([sample_2_prime])
assert s1 == s1p, "Expected {} to equal {}".format(s1, s1p)
assert s2 == s2p, "Expected {} to equal {}".format(s2, s2p)
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(5, start=-2),
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
Box(low=-np.inf, high=np.inf, shape=(1, 3)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Tuple((Discrete(5), Discrete(2), Discrete(2, start=-6))),
MultiDiscrete([2, 2, 100]),
MultiBinary(6),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_equality(space):
space1 = space
space2 = copy.copy(space)
assert space1 == space2, "Expected {} to equal {}".format(space1, space2)
@pytest.mark.parametrize(
"spaces",
[
(Discrete(3), Discrete(4)),
(Discrete(3), Discrete(3, start=-1)),
(MultiDiscrete([2, 2, 100]), MultiDiscrete([2, 2, 8])),
(MultiBinary(8), MultiBinary(7)),
(
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
Box(low=np.array([-10, 0]), high=np.array([10, 9]), dtype=np.float32),
),
(
Box(low=-np.inf, high=0.0, shape=(2, 1)),
Box(low=0.0, high=np.inf, shape=(2, 1)),
),
(Tuple([Discrete(5), Discrete(10)]), Tuple([Discrete(1), Discrete(10)])),
(
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5, start=7), Discrete(10)]),
),
(Dict({"position": Discrete(5)}), Dict({"position": Discrete(4)})),
(Dict({"position": Discrete(5)}), Dict({"speed": Discrete(5)})),
],
)
def test_inequality(spaces):
space1, space2 = spaces
assert space1 != space2, "Expected {} != {}".format(space1, space2)
@pytest.mark.parametrize(
"space",
[
Discrete(5),
Discrete(8, start=-20),
Box(low=0, high=255, shape=(2,), dtype="uint8"),
Box(low=-np.inf, high=np.inf, shape=(3, 3)),
Box(low=1.0, high=np.inf, shape=(3, 3)),
Box(low=-np.inf, high=2.0, shape=(3, 3)),
],
)
def test_sample(space):
space.seed(0)
n_trials = 100
samples = np.array([space.sample() for _ in range(n_trials)])
expected_mean = 0.0
if isinstance(space, Box):
if space.is_bounded():
expected_mean = (space.high + space.low) / 2
elif space.is_bounded("below"):
expected_mean = 1 + space.low
elif space.is_bounded("above"):
expected_mean = -1 + space.high
else:
expected_mean = 0.0
elif isinstance(space, Discrete):
expected_mean = space.start + space.n / 2
else:
raise NotImplementedError
np.testing.assert_allclose(expected_mean, samples.mean(), atol=3.0 * samples.std())
@pytest.mark.parametrize(
"spaces",
[
(Discrete(5), MultiBinary(5)),
(
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
MultiDiscrete([2, 2, 8]),
),
(
Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8),
Box(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8),
),
(Dict({"position": Discrete(5)}), Tuple([Discrete(5)])),
(Dict({"position": Discrete(5)}), Discrete(5)),
(Tuple((Discrete(5),)), Discrete(5)),
(
Box(low=np.array([-np.inf, 0.0]), high=np.array([0.0, np.inf])),
Box(low=np.array([-np.inf, 1.0]), high=np.array([0.0, np.inf])),
),
],
)
def test_class_inequality(spaces):
assert spaces[0] == spaces[0]
assert spaces[1] == spaces[1]
assert spaces[0] != spaces[1]
assert spaces[1] != spaces[0]
@pytest.mark.parametrize(
"space_fn",
[
lambda: Dict(space1="abc"),
lambda: Dict({"space1": "abc"}),
lambda: Tuple(["abc"]),
],
)
def test_bad_space_calls(space_fn):
with pytest.raises(AssertionError):
space_fn()
def test_seed_Dict():
test_space = Dict(
{
"a": Box(low=0, high=1, shape=(3, 3)),
"b": Dict(
{
"b_1": Box(low=-100, high=100, shape=(2,)),
"b_2": Box(low=-1, high=1, shape=(2,)),
}
),
"c": Discrete(5),
}
)
seed_dict = {
"a": 0,
"b": {
"b_1": 1,
"b_2": 2,
},
"c": 3,
}
test_space.seed(seed_dict)
# "Unpack" the dict sub-spaces into individual spaces
a = Box(low=0, high=1, shape=(3, 3))
a.seed(0)
b_1 = Box(low=-100, high=100, shape=(2,))
b_1.seed(1)
b_2 = Box(low=-1, high=1, shape=(2,))
b_2.seed(2)
c = Discrete(5)
c.seed(3)
for i in range(10):
test_s = test_space.sample()
a_s = a.sample()
assert (test_s["a"] == a_s).all()
b_1_s = b_1.sample()
assert (test_s["b"]["b_1"] == b_1_s).all()
b_2_s = b_2.sample()
assert (test_s["b"]["b_2"] == b_2_s).all()
c_s = c.sample()
assert test_s["c"] == c_s
def test_box_dtype_check():
# Related Issues:
# https://github.com/openai/gym/issues/2357
# https://github.com/openai/gym/issues/2298
space = Box(0, 2, tuple(), dtype=np.float32)
# casting will match the correct type
assert space.contains(0.5)
# float64 is not in float32 space
assert not space.contains(np.array(0.5))
assert not space.contains(np.array(1))
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(3, start=-4),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_returns_list(space):
def assert_integer_list(seed):
assert isinstance(seed, list)
assert len(seed) >= 1
assert all([isinstance(s, int) for s in seed])
assert_integer_list(space.seed(None))
assert_integer_list(space.seed(0))
def convert_sample_hashable(sample):
if isinstance(sample, np.ndarray):
return tuple(sample.tolist())
if isinstance(sample, (list, tuple)):
return tuple(convert_sample_hashable(s) for s in sample)
if isinstance(sample, dict):
return tuple(
(key, convert_sample_hashable(value)) for key, value in sample.items()
)
return sample
def sample_equal(sample1, sample2):
return convert_sample_hashable(sample1) == convert_sample_hashable(sample2)
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(3, start=-4),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_reproducibility(space):
space1 = space
space2 = copy.deepcopy(space)
space1.seed(None)
space2.seed(None)
assert space1.seed(0) == space2.seed(0)
assert sample_equal(space1.sample(), space2.sample())
@pytest.mark.parametrize(
"space",
[
Tuple([Discrete(100), Discrete(100)]),
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5), Discrete(5, start=10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_subspace_incorrelated(space):
subspaces = space.spaces if isinstance(space, Tuple) else space.spaces.values()
space.seed(0)
states = [
convert_sample_hashable(subspace.np_random.bit_generator.state)
for subspace in subspaces
]
assert len(states) == len(set(states))
def test_multidiscrete_as_tuple():
# 1D multi-discrete
space = MultiDiscrete([3, 4, 5])
assert space.shape == (3,)
assert space[0] == Discrete(3)
assert space[0:1] == MultiDiscrete([3])
assert space[0:2] == MultiDiscrete([3, 4])
assert space[:] == space and space[:] is not space
assert len(space) == 3
# 2D multi-discrete
space = MultiDiscrete([[3, 4, 5], [6, 7, 8]])
assert space.shape == (2, 3)
assert space[0, 1] == Discrete(4)
assert space[0] == MultiDiscrete([3, 4, 5])
assert space[0:1] == MultiDiscrete([[3, 4, 5]])
assert space[0:2, :] == MultiDiscrete([[3, 4, 5], [6, 7, 8]])
assert space[:, 0:1] == MultiDiscrete([[3], [6]])
assert space[0:2, 0:2] == MultiDiscrete([[3, 4], [6, 7]])
assert space[:] == space and space[:] is not space
assert space[:, :] == space and space[:, :] is not space
def test_multidiscrete_subspace_reproducibility():
# 1D multi-discrete
space = MultiDiscrete([100, 200, 300])
space.seed(None)
assert sample_equal(space[0].sample(), space[0].sample())
assert sample_equal(space[0:1].sample(), space[0:1].sample())
assert sample_equal(space[0:2].sample(), space[0:2].sample())
assert sample_equal(space[:].sample(), space[:].sample())
assert sample_equal(space[:].sample(), space.sample())
# 2D multi-discrete
space = MultiDiscrete([[300, 400, 500], [600, 700, 800]])
space.seed(None)
assert sample_equal(space[0, 1].sample(), space[0, 1].sample())
assert sample_equal(space[0].sample(), space[0].sample())
assert sample_equal(space[0:1].sample(), space[0:1].sample())
assert sample_equal(space[0:2, :].sample(), space[0:2, :].sample())
assert sample_equal(space[:, 0:1].sample(), space[:, 0:1].sample())
assert sample_equal(space[0:2, 0:2].sample(), space[0:2, 0:2].sample())
assert sample_equal(space[:].sample(), space[:].sample())
assert sample_equal(space[:, :].sample(), space[:, :].sample())
assert sample_equal(space[:, :].sample(), space.sample())
def test_space_legacy_state_pickling():
legacy_state = {
"shape": (
1,
2,
3,
),
"dtype": np.int64,
"np_random": np.random.default_rng(),
"n": 3,
}
space = Discrete(1)
space.__setstate__(legacy_state)
assert space.shape == legacy_state["shape"]
assert space._shape == legacy_state["shape"]
assert space.np_random == legacy_state["np_random"]
assert space._np_random == legacy_state["np_random"]
assert space.n == 3
assert space.dtype == legacy_state["dtype"]
|
[
"gym.spaces.MultiBinary",
"copy.deepcopy",
"gym.spaces.Discrete",
"copy.copy",
"json.dumps",
"gym.spaces.MultiDiscrete",
"numpy.random.default_rng",
"pytest.raises",
"numpy.array",
"gym.spaces.Box",
"gym.spaces.Tuple",
"gym.spaces.Dict"
] |
[((2604, 2620), 'copy.copy', 'copy.copy', (['space'], {}), '(space)\n', (2613, 2620), False, 'import copy\n'), ((6518, 6550), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(3, 3)'}), '(low=0, high=1, shape=(3, 3))\n', (6521, 6550), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6575, 6610), 'gym.spaces.Box', 'Box', ([], {'low': '(-100)', 'high': '(100)', 'shape': '(2,)'}), '(low=-100, high=100, shape=(2,))\n', (6578, 6610), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6637, 6668), 'gym.spaces.Box', 'Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(2,)'}), '(low=-1, high=1, shape=(2,))\n', (6640, 6668), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6693, 6704), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (6701, 6704), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9745, 9765), 'copy.deepcopy', 'copy.deepcopy', (['space'], {}), '(space)\n', (9758, 9765), False, 'import copy\n'), ((10956, 10980), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (10969, 10980), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11258, 11295), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3, 4, 5], [6, 7, 8]]'], {}), '([[3, 4, 5], [6, 7, 8]])\n', (11271, 11295), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11855, 11885), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[100, 200, 300]'], {}), '([100, 200, 300])\n', (11868, 11885), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((12260, 12309), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[300, 400, 500], [600, 700, 800]]'], {}), '([[300, 400, 500], [600, 700, 800]])\n', (12273, 12309), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((13185, 13196), 'gym.spaces.Discrete', 'Discrete', (['(1)'], {}), '(1)\n', (13193, 13196), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1232, 1252), 'json.dumps', 'json.dumps', (['json_rep'], {}), '(json_rep)\n', (1242, 1252), False, 'import json\n'), ((245, 256), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (253, 256), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((266, 287), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {'start': '(-2)'}), '(5, start=-2)\n', (274, 287), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((297, 336), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(2, 2)'}), '(low=0.0, high=np.inf, shape=(2, 2))\n', (300, 336), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((688, 714), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (701, 714), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((724, 739), 'gym.spaces.MultiBinary', 'MultiBinary', (['(10)'], {}), '(10)\n', (735, 739), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1739, 1750), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (1747, 1750), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1760, 1781), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {'start': '(-2)'}), '(5, start=-2)\n', (1768, 1781), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1871, 1914), 'gym.spaces.Box', 'Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(1, 3)'}), '(low=-np.inf, high=np.inf, shape=(1, 3))\n', (1874, 1914), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2257, 2283), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (2270, 2283), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2293, 2307), 'gym.spaces.MultiBinary', 'MultiBinary', (['(6)'], {}), '(6)\n', (2304, 2307), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3801, 3812), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3809, 3812), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3822, 3844), 'gym.spaces.Discrete', 'Discrete', (['(8)'], {'start': '(-20)'}), '(8, start=-20)\n', (3830, 3844), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3854, 3901), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(2,)', 'dtype': '"""uint8"""'}), "(low=0, high=255, shape=(2,), dtype='uint8')\n", (3857, 3901), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3911, 3954), 'gym.spaces.Box', 'Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(3, 3)'}), '(low=-np.inf, high=np.inf, shape=(3, 3))\n', (3914, 3954), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3964, 4003), 'gym.spaces.Box', 'Box', ([], {'low': '(1.0)', 'high': 'np.inf', 'shape': '(3, 3)'}), '(low=1.0, high=np.inf, shape=(3, 3))\n', (3967, 4003), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4013, 4053), 'gym.spaces.Box', 'Box', ([], {'low': '(-np.inf)', 'high': '(2.0)', 'shape': '(3, 3)'}), '(low=-np.inf, high=2.0, shape=(3, 3))\n', (4016, 4053), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5890, 5919), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (5903, 5919), False, 'import pytest\n'), ((7521, 7532), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (7529, 7532), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7542, 7563), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {'start': '(-4)'}), '(3, start=-4)\n', (7550, 7563), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7573, 7612), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(2, 2)'}), '(low=0.0, high=np.inf, shape=(2, 2))\n', (7576, 7612), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7889, 7915), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (7902, 7915), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7925, 7940), 'gym.spaces.MultiBinary', 'MultiBinary', (['(10)'], {}), '(10)\n', (7936, 7940), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9017, 9028), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (9025, 9028), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9038, 9059), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {'start': '(-4)'}), '(3, start=-4)\n', (9046, 9059), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9069, 9108), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(2, 2)'}), '(low=0.0, high=np.inf, shape=(2, 2))\n', (9072, 9108), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9385, 9411), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (9398, 9411), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9421, 9436), 'gym.spaces.MultiBinary', 'MultiBinary', (['(10)'], {}), '(10)\n', (9432, 9436), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11036, 11047), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (11044, 11047), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11073, 11091), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[3]'], {}), '([3])\n', (11086, 11091), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11117, 11138), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[3, 4]'], {}), '([3, 4])\n', (11130, 11138), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11356, 11367), 'gym.spaces.Discrete', 'Discrete', (['(4)'], {}), '(4)\n', (11364, 11367), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11391, 11415), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (11404, 11415), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11441, 11467), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3, 4, 5]]'], {}), '([[3, 4, 5]])\n', (11454, 11467), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11496, 11533), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3, 4, 5], [6, 7, 8]]'], {}), '([[3, 4, 5], [6, 7, 8]])\n', (11509, 11533), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11562, 11587), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3], [6]]'], {}), '([[3], [6]])\n', (11575, 11587), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((11618, 11649), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[[3, 4], [6, 7]]'], {}), '([[3, 4], [6, 7]])\n', (11631, 11649), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((13126, 13149), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (13147, 13149), True, 'import numpy as np\n'), ((2756, 2767), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (2764, 2767), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2769, 2780), 'gym.spaces.Discrete', 'Discrete', (['(4)'], {}), '(4)\n', (2777, 2780), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2792, 2803), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {}), '(3)\n', (2800, 2803), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2805, 2826), 'gym.spaces.Discrete', 'Discrete', (['(3)'], {'start': '(-1)'}), '(3, start=-1)\n', (2813, 2826), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2838, 2864), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 100]'], {}), '([2, 2, 100])\n', (2851, 2864), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2866, 2890), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 8]'], {}), '([2, 2, 8])\n', (2879, 2890), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2902, 2916), 'gym.spaces.MultiBinary', 'MultiBinary', (['(8)'], {}), '(8)\n', (2913, 2916), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2918, 2932), 'gym.spaces.MultiBinary', 'MultiBinary', (['(7)'], {}), '(7)\n', (2929, 2932), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3145, 3185), 'gym.spaces.Box', 'Box', ([], {'low': '(-np.inf)', 'high': '(0.0)', 'shape': '(2, 1)'}), '(low=-np.inf, high=0.0, shape=(2, 1))\n', (3148, 3185), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3199, 3238), 'gym.spaces.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(2, 1)'}), '(low=0.0, high=np.inf, shape=(2, 1))\n', (3202, 3238), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4823, 4834), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (4831, 4834), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4836, 4850), 'gym.spaces.MultiBinary', 'MultiBinary', (['(5)'], {}), '(5)\n', (4847, 4850), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4959, 4983), 'gym.spaces.MultiDiscrete', 'MultiDiscrete', (['[2, 2, 8]'], {}), '([2, 2, 8])\n', (4972, 4983), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5018, 5073), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(64, 64, 3)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8)\n', (5021, 5073), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5087, 5142), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(32, 32, 3)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8)\n', (5090, 5142), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5262, 5273), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5270, 5273), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5308, 5319), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5316, 5319), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5743, 5761), 'gym.spaces.Dict', 'Dict', ([], {'space1': '"""abc"""'}), "(space1='abc')\n", (5747, 5761), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5779, 5802), 'gym.spaces.Dict', 'Dict', (["{'space1': 'abc'}"], {}), "({'space1': 'abc'})\n", (5783, 5802), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5820, 5834), 'gym.spaces.Tuple', 'Tuple', (["['abc']"], {}), "(['abc'])\n", (5825, 5834), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6014, 6046), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(3, 3)'}), '(low=0, high=1, shape=(3, 3))\n', (6017, 6046), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6263, 6274), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (6271, 6274), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7408, 7421), 'numpy.array', 'np.array', (['(0.5)'], {}), '(0.5)\n', (7416, 7421), True, 'import numpy as np\n'), ((7453, 7464), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (7461, 7464), True, 'import numpy as np\n'), ((353, 364), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (361, 364), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((366, 378), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (374, 378), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((427, 438), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (435, 438), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((564, 575), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (572, 575), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((577, 588), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (585, 588), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((590, 601), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (598, 601), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((620, 631), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (628, 631), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((633, 653), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {'start': '(6)'}), '(2, start=6)\n', (641, 653), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((655, 676), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {'start': '(-4)'}), '(2, start=-4)\n', (663, 676), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((797, 808), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (805, 808), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1799, 1817), 'numpy.array', 'np.array', (['[-10, 0]'], {}), '([-10, 0])\n', (1807, 1817), True, 'import numpy as np\n'), ((1824, 1842), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (1832, 1842), True, 'import numpy as np\n'), ((1931, 1942), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (1939, 1942), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((1944, 1956), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (1952, 1956), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2005, 2016), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (2013, 2016), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2142, 2153), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (2150, 2153), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2155, 2166), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (2163, 2166), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2168, 2179), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (2176, 2179), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2198, 2209), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (2206, 2209), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2211, 2222), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (2219, 2222), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2224, 2245), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {'start': '(-6)'}), '(2, start=-6)\n', (2232, 2245), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2365, 2376), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (2373, 2376), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7629, 7640), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (7637, 7640), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7642, 7654), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (7650, 7654), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7703, 7714), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (7711, 7714), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7840, 7851), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (7848, 7851), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7853, 7864), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (7861, 7864), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7866, 7877), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (7874, 7877), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((7998, 8009), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (8006, 8009), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9125, 9136), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (9133, 9136), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9138, 9150), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (9146, 9150), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9199, 9210), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (9207, 9210), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9336, 9347), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (9344, 9347), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9349, 9360), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (9357, 9360), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9362, 9373), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (9370, 9373), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9494, 9505), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (9502, 9505), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9976, 9989), 'gym.spaces.Discrete', 'Discrete', (['(100)'], {}), '(100)\n', (9984, 9989), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((9991, 10004), 'gym.spaces.Discrete', 'Discrete', (['(100)'], {}), '(100)\n', (9999, 10004), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10023, 10034), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10031, 10034), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10036, 10048), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (10044, 10048), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10067, 10078), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10075, 10078), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10080, 10101), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {'start': '(10)'}), '(5, start=10)\n', (10088, 10101), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10150, 10161), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10158, 10161), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10287, 10298), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10295, 10298), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10300, 10311), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (10308, 10311), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10313, 10324), 'gym.spaces.Discrete', 'Discrete', (['(2)'], {}), '(2)\n', (10321, 10324), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((10384, 10395), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (10392, 10395), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((2965, 2983), 'numpy.array', 'np.array', (['[-10, 0]'], {}), '([-10, 0])\n', (2973, 2983), True, 'import numpy as np\n'), ((2990, 3008), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (2998, 3008), True, 'import numpy as np\n'), ((3049, 3067), 'numpy.array', 'np.array', (['[-10, 0]'], {}), '([-10, 0])\n', (3057, 3067), True, 'import numpy as np\n'), ((3074, 3091), 'numpy.array', 'np.array', (['[10, 9]'], {}), '([10, 9])\n', (3082, 3091), True, 'import numpy as np\n'), ((3267, 3278), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3275, 3278), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3280, 3292), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (3288, 3292), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3303, 3314), 'gym.spaces.Discrete', 'Discrete', (['(1)'], {}), '(1)\n', (3311, 3314), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3316, 3328), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (3324, 3328), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3362, 3373), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3370, 3373), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3375, 3387), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (3383, 3387), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3410, 3430), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {'start': '(7)'}), '(5, start=7)\n', (3418, 3430), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3432, 3444), 'gym.spaces.Discrete', 'Discrete', (['(10)'], {}), '(10)\n', (3440, 3444), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3486, 3497), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3494, 3497), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3519, 3530), 'gym.spaces.Discrete', 'Discrete', (['(4)'], {}), '(4)\n', (3527, 3530), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3562, 3573), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3570, 3573), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((3592, 3603), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (3600, 3603), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((4883, 4901), 'numpy.array', 'np.array', (['[-10, 0]'], {}), '([-10, 0])\n', (4891, 4901), True, 'import numpy as np\n'), ((4908, 4926), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (4916, 4926), True, 'import numpy as np\n'), ((5182, 5193), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5190, 5193), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5204, 5215), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5212, 5215), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5247, 5258), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5255, 5258), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5292, 5303), 'gym.spaces.Discrete', 'Discrete', (['(5)'], {}), '(5)\n', (5300, 5303), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((5352, 5376), 'numpy.array', 'np.array', (['[-np.inf, 0.0]'], {}), '([-np.inf, 0.0])\n', (5360, 5376), True, 'import numpy as np\n'), ((5383, 5406), 'numpy.array', 'np.array', (['[0.0, np.inf]'], {}), '([0.0, np.inf])\n', (5391, 5406), True, 'import numpy as np\n'), ((5429, 5453), 'numpy.array', 'np.array', (['[-np.inf, 1.0]'], {}), '([-np.inf, 1.0])\n', (5437, 5453), True, 'import numpy as np\n'), ((5460, 5483), 'numpy.array', 'np.array', (['[0.0, np.inf]'], {}), '([0.0, np.inf])\n', (5468, 5483), True, 'import numpy as np\n'), ((6116, 6151), 'gym.spaces.Box', 'Box', ([], {'low': '(-100)', 'high': '(100)', 'shape': '(2,)'}), '(low=-100, high=100, shape=(2,))\n', (6119, 6151), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((6180, 6211), 'gym.spaces.Box', 'Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(2,)'}), '(low=-1, high=1, shape=(2,))\n', (6183, 6211), False, 'from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict\n'), ((464, 480), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (472, 480), True, 'import numpy as np\n'), ((487, 503), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (495, 503), True, 'import numpy as np\n'), ((867, 883), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (875, 883), True, 'import numpy as np\n'), ((890, 906), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (898, 906), True, 'import numpy as np\n'), ((2042, 2058), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2050, 2058), True, 'import numpy as np\n'), ((2065, 2081), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (2073, 2081), True, 'import numpy as np\n'), ((2435, 2451), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2443, 2451), True, 'import numpy as np\n'), ((2458, 2474), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (2466, 2474), True, 'import numpy as np\n'), ((7740, 7756), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (7748, 7756), True, 'import numpy as np\n'), ((7763, 7779), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (7771, 7779), True, 'import numpy as np\n'), ((8068, 8084), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (8076, 8084), True, 'import numpy as np\n'), ((8091, 8107), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (8099, 8107), True, 'import numpy as np\n'), ((9236, 9252), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9244, 9252), True, 'import numpy as np\n'), ((9259, 9275), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (9267, 9275), True, 'import numpy as np\n'), ((9564, 9580), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (9572, 9580), True, 'import numpy as np\n'), ((9587, 9603), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (9595, 9603), True, 'import numpy as np\n'), ((10187, 10203), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (10195, 10203), True, 'import numpy as np\n'), ((10210, 10226), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (10218, 10226), True, 'import numpy as np\n'), ((10454, 10470), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (10462, 10470), True, 'import numpy as np\n'), ((10477, 10493), 'numpy.array', 'np.array', (['[1, 5]'], {}), '([1, 5])\n', (10485, 10493), True, 'import numpy as np\n')]
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from integration_tests.tests.test_cases import BaseTestCase
from integration_tests.framework import riemann
from integration_tests.tests.utils import do_retries
from integration_tests.tests.utils import get_resource as resource
class PoliciesTestsBase(BaseTestCase):
NUM_OF_INITIAL_WORKFLOWS = 2
def tearDown(self):
super(PoliciesTestsBase, self).tearDown()
riemann.reset_data_and_restart()
def launch_deployment(self, yaml_file, expected_num_of_node_instances=1):
deployment, _ = self.deploy_application(resource(yaml_file))
self.deployment = deployment
self.node_instances = self.client.node_instances.list(
deployment_id=deployment.id
)
self.assertEqual(
expected_num_of_node_instances,
len(self.node_instances)
)
self.wait_for_executions(self.NUM_OF_INITIAL_WORKFLOWS,
expect_exact_count=False)
def get_node_instance_by_name(self, name):
for nodeInstance in self.node_instances:
if nodeInstance.node_id == name:
return nodeInstance
def wait_for_executions(self, expected_count, expect_exact_count=True):
def assertion():
executions = self.client.executions.list(
deployment_id=self.deployment.id)
if expect_exact_count:
self.assertEqual(len(executions), expected_count)
else:
self.assertGreaterEqual(len(executions), expected_count)
self.do_assertions(assertion)
def wait_for_invocations(self, deployment_id, expected_count):
def assertion():
invocations = self.get_plugin_data(
plugin_name='testmockoperations',
deployment_id=deployment_id
)['mock_operation_invocation']
self.assertEqual(expected_count, len(invocations))
return invocations
return do_retries(assertion)
def publish(self, metric, ttl=60, node_name='node',
service='service', node_id=''):
if node_id == '':
node_id = self.get_node_instance_by_name(node_name).id
deployment_id = self.deployment.id
self.publish_riemann_event(
deployment_id,
node_name=node_name,
node_id=node_id,
metric=metric,
service='{}.{}.{}.{}'.format(
deployment_id,
service,
node_name,
node_id
),
ttl=ttl
)
|
[
"integration_tests.tests.utils.get_resource",
"integration_tests.tests.utils.do_retries",
"integration_tests.framework.riemann.reset_data_and_restart"
] |
[((1029, 1061), 'integration_tests.framework.riemann.reset_data_and_restart', 'riemann.reset_data_and_restart', ([], {}), '()\n', (1059, 1061), False, 'from integration_tests.framework import riemann\n'), ((2621, 2642), 'integration_tests.tests.utils.do_retries', 'do_retries', (['assertion'], {}), '(assertion)\n', (2631, 2642), False, 'from integration_tests.tests.utils import do_retries\n'), ((1189, 1208), 'integration_tests.tests.utils.get_resource', 'resource', (['yaml_file'], {}), '(yaml_file)\n', (1197, 1208), True, 'from integration_tests.tests.utils import get_resource as resource\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-09-02 07:59
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
from apps.noclook.models import DEFAULT_ROLEGROUP_NAME, DEFAULT_ROLE_KEY, DEFAULT_ROLES
def init_default_roles(Role):
# and then get or create the default roles and link them
for role_slug, roledict in DEFAULT_ROLES.items():
role = Role.objects.get(slug=role_slug)
if role:
# add a default description and name to the roles
if not role.description and roledict['description']:
role.description = roledict['description']
role.save()
if roledict['name']:
role.name = roledict['name']
role.save()
role.save()
def forwards_func(apps, schema_editor):
Role = apps.get_model("noclook", "Role")
init_default_roles(Role)
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('noclook', '0008_role_squashed_0013_auto_20190725_1153'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
[
"django.db.migrations.RunPython",
"apps.noclook.models.DEFAULT_ROLES.items"
] |
[((393, 414), 'apps.noclook.models.DEFAULT_ROLES.items', 'DEFAULT_ROLES.items', ([], {}), '()\n', (412, 414), False, 'from apps.noclook.models import DEFAULT_ROLEGROUP_NAME, DEFAULT_ROLE_KEY, DEFAULT_ROLES\n'), ((1157, 1206), 'django.db.migrations.RunPython', 'migrations.RunPython', (['forwards_func', 'reverse_func'], {}), '(forwards_func, reverse_func)\n', (1177, 1206), False, 'from django.db import migrations\n')]
|
import numpy as np
import tensorflow as tf
from lib.crf import crf_inference
from lib.CC_labeling_8 import CC_lab
def single_generate_seed_step(params):
"""Implemented seeded region growing
Parameters
----------
params : 3-tuple of numpy 4D arrays
(tag) : numpy 4D array (size: B x 1 x 1 x C), where B = batch size, C = number of classes
GT label
(cue) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Weak cue
(prob) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Final feature map
Returns
-------
(cue) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Weak cue, after seeded region growing
"""
# th_f,th_b = 0.85,0.99
th_f, th_b = 0.5, 0.7
tag, cue, prob = params
existing_prob = prob * tag
existing_prob_argmax = np.argmax(existing_prob,
axis=2) + 1 # to tell the background pixel and the not-satisfy-condition pixel
tell_where_is_foreground_mask = (existing_prob_argmax > 1).astype(np.uint8)
existing_prob_fg_th_mask = (np.sum((existing_prob[:, :, 1:] > th_f).astype(np.uint8), axis=2) > 0.5).astype(
np.uint8) # if there is one existing category's score is bigger than th_f, the the mask is 1 for this pixel
existing_prob_bg_th_mask = (np.sum((existing_prob[:, :, 0:1] > th_b).astype(np.uint8), axis=2) > 0.5).astype(
np.uint8)
label_map = (existing_prob_fg_th_mask * tell_where_is_foreground_mask + existing_prob_bg_th_mask * (
1 - tell_where_is_foreground_mask)) * existing_prob_argmax
# the label map is a two-dimensional map to show which category satisify the following three conditions for each pixel
# 1. the category is in the tags of the image
# 2. the category has a max probs among the tags
# 3. the prob of the category is bigger that the threshold
# and those three conditions is the similarity criteria
# for the value in label_map, 0 is for no category satisifies the conditions, n is for the category n-1 satisifies the conditions
cls_index = np.where(tag > 0.5)[2] # the existing labels index
for c in cls_index:
mat = (label_map == (c + 1))
mat = mat.astype(int)
cclab = CC_lab(mat)
cclab.connectedComponentLabel() # this divide each connected region into a group, and update the value of cclab.labels which is a two-dimensional list to show the group index of each pixel
high_confidence_set_label = set() # this variable colloects the connected region index
for (x, y), value in np.ndenumerate(mat):
if value == 1 and cue[x, y, c] == 1:
high_confidence_set_label.add(cclab.labels[x][y])
elif value == 1 and np.sum(cue[x, y, :]) == 1:
cclab.labels[x][y] = -1
for (x, y), value in np.ndenumerate(np.array(cclab.labels)):
if value in high_confidence_set_label:
cue[x, y, c] = 1
return np.expand_dims(cue, axis=0)
class DSRG():
"""Class for the DSRG method"""
def __init__(self, config):
self.config = config
self.dataset = self.config.get('dataset')
self.h, self.w = (self.config.get('img_size'), self.config.get('img_size'))
self.num_classes = self.config.get('num_classes')
self.batch_size = self.config.get("batch_size")
self.phase = self.config.get('phase')
self.img_mean = self.config.get('img_mean')
self.seed_size = self.config.get('seed_size')
self.init_model_path = self.config.get('init_model_path', None)
self.crf_config_train = {"g_sxy":3/12,"g_compat":3,"bi_sxy":80/12,"bi_srgb":13,"bi_compat":10,"iterations":5}
self.crf_config_test = {"g_sxy":3,"g_compat":3,"bi_sxy":80,"bi_srgb":13,"bi_compat":10,"iterations":10}
self.net = {}
self.weights = {}
self.trainable_list = []
self.loss = {}
self.metric = {}
self.variables={"total":[]}
self.min_prob = 0.0001
self.stride = {}
self.stride["input"] = 1
# different lr for different variable
self.lr_1_list = []
self.lr_2_list = []
self.lr_10_list = []
self.lr_20_list = []
self.pool = self.config.get('pool')
def build(self,net_input=None,net_label=None,net_cues=None,net_id=None,phase='train'):
"""Build DSRG model
Parameters
----------
net_input : Tensor, optional
Input images in batch, after resizing and normalizing
net_label : Tensor, optional
GT segmentation in batch, after resizing
net_cues : Tensor, optional
Weak cue labels in batch, after resizing
net_id : Tensor, optional
Filenames in batch
phase : str, optional
Phase to run DSRG model
Returns
-------
(output) : Tensor
Final layer of FCN model of DSRG
"""
if "output" not in self.net:
if phase == 'train':
with tf.name_scope("placeholder"):
self.net["input"] = net_input
self.net["label"] = net_label # [None, self.num_classes], int32
self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.5, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
elif phase in ['val', 'tuning', 'segtest', 'test']:
with tf.name_scope("placeholder"):
self.net["input"] = net_input
# self.net["label"] = net_label # [None, self.num_classes], int32
# self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.0, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
elif phase == 'debug':
with tf.name_scope("placeholder"):
self.net["input"] = net_input
self.net["label"] = net_label # [None, self.num_classes], int32
self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.0, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
self.net["epoch"] = tf.Variable(0.0, trainable=False)
return self.net["output"]
def create_network(self, phase):
"""Helper function to build DSRG model
Parameters
----------
phase : str, optional
Phase to run DSRG model
Returns
-------
(crf) : Tensor
Final layer of FCN model of DSRG
"""
if self.init_model_path is not None:
self.load_init_model()
with tf.name_scope("vgg") as scope:
# build block
block = self.build_block("input",["conv1_1","relu1_1","conv1_2","relu1_2","pool1"])
block = self.build_block(block,["conv2_1","relu2_1","conv2_2","relu2_2","pool2"])
block = self.build_block(block,["conv3_1","relu3_1","conv3_2","relu3_2","conv3_3","relu3_3","pool3"])
block = self.build_block(block,["conv4_1","relu4_1","conv4_2","relu4_2","conv4_3","relu4_3","pool4"])
block = self.build_block(block,["conv5_1","relu5_1","conv5_2","relu5_2","conv5_3","relu5_3","pool5","pool5a"])
fc1 = self.build_fc(block,["fc6_1","relu6_1","drop6_1","fc7_1","relu7_1","drop7_1","fc8_1"], dilate_rate=6)
fc2 = self.build_fc(block,["fc6_2","relu6_2","drop6_2","fc7_2","relu7_2","drop7_2","fc8_2"], dilate_rate=12)
fc3 = self.build_fc(block,["fc6_3","relu6_3","drop6_3","fc7_3","relu7_3","drop7_3","fc8_3"], dilate_rate=18)
fc4 = self.build_fc(block,["fc6_4","relu6_4","drop6_4","fc7_4","relu7_4","drop7_4","fc8_4"], dilate_rate=24)
self.net["fc8"] = self.net[fc1]+self.net[fc2]+self.net[fc3]+self.net[fc4]
# DSRG
softmax = self.build_sp_softmax("fc8","fc8-softmax")
if phase in ['train', 'debug']:
new_seed = self.build_dsrg_layer("cues","fc8-softmax","new_cues")
crf = self.build_crf("fc8-softmax", "crf") # new
return self.net[crf] # NOTE: crf is log-probability
def build_block(self,last_layer,layer_lists):
"""Build a block of the DSRG model
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer_lists : list of str
List of strings of layer names to build inside the current block
Returns
-------
last_layer : Tensor
The output layer of the current block
"""
for layer in layer_lists:
if layer.startswith("conv"):
if layer[4] != "5":
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
weights,bias = self.get_weights_and_bias(layer)
self.net[layer] = tf.nn.conv2d( self.net[last_layer], weights, strides = [1,1,1,1], padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer[4] == "5":
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
weights,bias = self.get_weights_and_bias(layer)
self.net[layer] = tf.nn.atrous_conv2d( self.net[last_layer], weights, rate=2, padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer.startswith("relu"):
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.relu( self.net[last_layer],name="relu")
last_layer = layer
elif layer.startswith("pool5a"):
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.avg_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,1,1,1],padding="SAME",name="pool")
last_layer = layer
elif layer.startswith("pool"):
if layer[4] not in ["4","5"]:
with tf.name_scope(layer) as scope:
self.stride[layer] = 2 * self.stride[last_layer]
self.net[layer] = tf.nn.max_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,2,2,1],padding="SAME",name="pool")
last_layer = layer
if layer[4] in ["4","5"]:
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.max_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,1,1,1],padding="SAME",name="pool")
last_layer = layer
return last_layer
def build_fc(self,last_layer, layer_lists, dilate_rate=12):
"""Build a block of fully-connected layers
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer_lists : list of str
List of strings of layer names to build inside the current block
dilate_rate : int, optional
Dilation rate for atrous 2D convolutional layers
Returns
-------
last_layer : Tensor
The output layer of the current block
"""
for layer in layer_lists:
if layer.startswith("fc"):
with tf.name_scope(layer) as scope:
weights,bias = self.get_weights_and_bias(layer)
if layer.startswith("fc6"):
self.net[layer] = tf.nn.atrous_conv2d( self.net[last_layer], weights, rate=dilate_rate, padding="SAME", name="conv")
else:
self.net[layer] = tf.nn.conv2d( self.net[last_layer], weights, strides = [1,1,1,1], padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer.startswith("batch_norm"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.contrib.layers.batch_norm(self.net[last_layer])
last_layer = layer
if layer.startswith("relu"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.nn.relu( self.net[last_layer])
last_layer = layer
if layer.startswith("drop"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.nn.dropout( self.net[last_layer], keep_prob=1-self.net["drop_prob"])
last_layer = layer
return last_layer
def build_sp_softmax(self,last_layer,layer):
"""Build a block of a fully-connected layer and softmax
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer : str
Name of the softmax output layer
Returns
-------
layer : str
Name of the softmax output layer
"""
preds_max = tf.reduce_max(self.net[last_layer],axis=3,keepdims=True)
preds_exp = tf.exp(self.net[last_layer] - preds_max)
self.net[layer] = preds_exp / tf.reduce_sum(preds_exp,axis=3,keepdims=True) + self.min_prob
self.net[layer] = self.net[layer] / tf.reduce_sum(self.net[layer],axis=3,keepdims=True)
return layer
def build_crf(self,featmap_layer,layer):
"""Build a custom dense CRF layer
Parameters
----------
featemap_layer : str
Layer name of the feature map inputted to dense CRF layer
layer : str
Layer name of the dense CRF layer
Returns
-------
layer : str
Layer name of the dense CRF layer
"""
origin_image = self.net["input"] + self.img_mean
origin_image_zoomed = tf.image.resize_bilinear(origin_image,(self.seed_size, self.seed_size))
featemap = self.net[featmap_layer]
featemap_zoomed = tf.image.resize_bilinear(featemap,(self.seed_size, self.seed_size))
def crf(featemap,image):
batch_size = featemap.shape[0]
image = image.astype(np.uint8)
ret = np.zeros(featemap.shape,dtype=np.float32)
for i in range(batch_size):
ret[i,:,:,:] = crf_inference(image[i],self.crf_config_train,self.num_classes,featemap[i],use_log=True)
ret[ret < self.min_prob] = self.min_prob
ret /= np.sum(ret,axis=3,keepdims=True)
ret = np.log(ret)
return ret.astype(np.float32)
crf = tf.py_func(crf,[featemap_zoomed,origin_image_zoomed],tf.float32) # shape [N, h, w, C], RGB or BGR doesn't matter
self.net[layer] = crf
return layer
def build_dsrg_layer(self,seed_layer,prob_layer,layer):
"""Build DSRG layer
Parameters
----------
seed_layer : str
Layer name of the weak cues
prob_layer : str
Layer name of softmax
layer : str
Layer name of the DSRG layer
Returns
-------
layer : str
Layer name of the DSRG layer
"""
def generate_seed_step(tags,cues,probs):
tags = np.reshape(tags,[-1,1,1,self.num_classes])
params_list = []
for i in range(self.batch_size):
params_list.append([tags[i],cues[i],probs[i]])
ret = self.pool.map(single_generate_seed_step, params_list)
new_cues = ret[0]
for i in range(1,self.batch_size):
new_cues = np.concatenate([new_cues,ret[i]],axis=0)
return new_cues
self.net[layer] = tf.py_func(generate_seed_step,[self.net["label"],self.net[seed_layer],self.net[prob_layer]],tf.float32)
return layer
def load_init_model(self):
"""Load initialized layer"""
model_path = self.config["init_model_path"]
self.init_model = np.load(model_path, encoding="latin1", allow_pickle=True).item()
def get_weights_and_bias(self,layer,shape=None):
"""Load saved weights and biases for saved network
Parameters
----------
layer : str
Name of current layer
shape : list of int (size: 4), optional
4D shape of the convolutional or fully-connected layer
Returns
-------
weights : Variable
Saved weights
bias : Variable
Saved biases
"""
if layer in self.weights:
return self.weights[layer]
if shape is not None:
pass
elif layer.startswith("conv"):
shape = [3,3,0,0]
if layer == "conv1_1":
shape[2] = 3
else:
shape[2] = 64 * self.stride[layer]
if shape[2] > 512: shape[2] = 512
if layer in ["conv2_1","conv3_1","conv4_1"]: shape[2] = int(shape[2]/2)
shape[3] = 64 * self.stride[layer]
if shape[3] > 512: shape[3] = 512
elif layer.startswith("fc"):
if layer.startswith("fc6"):
shape = [3,3,512,1024]
if layer.startswith("fc7"):
shape = [1,1,1024,1024]
if layer.startswith("fc8"):
shape = [1,1,1024,self.num_classes]
if self.init_model_path is None:
init = tf.random_normal_initializer(stddev=0.01)
weights = tf.get_variable(name="%s_weights" % layer,initializer=init, shape = shape)
init = tf.constant_initializer(0)
bias = tf.get_variable(name="%s_bias" % layer,initializer=init, shape = [shape[-1]])
else:
if layer.startswith("fc8"):
init = tf.contrib.layers.xavier_initializer(uniform=True)
else:
init = tf.constant_initializer(self.init_model[layer]["w"])
weights = tf.get_variable(name="%s_weights" % layer,initializer=init,shape = shape)
if layer.startswith("fc8"):
init = tf.constant_initializer(0)
else:
init = tf.constant_initializer(self.init_model[layer]["b"])
bias = tf.get_variable(name="%s_bias" % layer,initializer=init,shape = [shape[-1]])
self.weights[layer] = (weights,bias)
if layer.startswith("fc8"):
self.lr_10_list.append(weights)
self.lr_20_list.append(bias)
else:
self.lr_1_list.append(weights)
self.lr_2_list.append(bias)
self.trainable_list.append(weights)
self.trainable_list.append(bias)
self.variables["total"].append(weights)
self.variables["total"].append(bias)
return weights,bias
def pred(self):
"""Implement final segmentation prediction as argmax of final feature map"""
if self.h is not None:
self.net["rescale_output"] = tf.image.resize_bilinear(self.net["output"], (self.h, self.w))
else:
label_size = tf.py_func(lambda x: x.shape[1:3], [self.net["input"]], [tf.int64, tf.int64])
self.net["rescale_output"] = tf.image.resize_bilinear(self.net["output"], [tf.cast(label_size[0], tf.int32),
tf.cast(label_size[1],
tf.int32)])
self.net["pred"] = tf.argmax(self.net["rescale_output"], axis=3)
def getloss(self):
"""Construct overall loss function
Returns
-------
loss : Tensor
Output of overall loss function
"""
loss = 0
# for DSRG
seed_loss = self.get_balanced_seed_loss(self.net["fc8-softmax"],self.net["new_cues"])
constrain_loss = self.get_constrain_loss(self.net["fc8-softmax"],self.net["crf"])
self.loss["seed"] = seed_loss
self.loss["constrain"] = constrain_loss
loss += seed_loss + constrain_loss
return loss
def get_balanced_seed_loss(self,softmax,cues):
"""Balanced seeding loss function
Parameters
----------
softmax : Tensor
Final feature map
cues : Tensor
Weak cues
Returns
-------
(loss) : Tensor
Output of balanced seeding loss function (sum of foreground/background losses)
"""
count_bg = tf.reduce_sum(cues[:,:,:,0:1],axis=(1,2,3),keepdims=True)
loss_bg = -tf.reduce_mean(tf.reduce_sum(cues[:,:,:,0:1]*tf.log(softmax[:,:,:,0:1]),axis=(1,2,3),keepdims=True)/(count_bg+1e-8))
count_fg = tf.reduce_sum(cues[:,:,:,1:],axis=(1,2,3),keepdims=True)
loss_fg = -tf.reduce_mean(tf.reduce_sum(cues[:,:,:,1:]*tf.log(softmax[:,:,:,1:]),axis=(1,2,3),keepdims=True)/(count_fg+1e-8))
return loss_bg+loss_fg
def get_constrain_loss(self,softmax,crf):
"""Constrain loss function
Parameters
----------
softmax : Tensor
Final feature map
crf : Tensor
Output of dense CRF
Returns
-------
loss : Tensor
Output of constrain loss function
"""
probs_smooth = tf.exp(crf)
loss = tf.reduce_mean(tf.reduce_sum(probs_smooth * tf.log(probs_smooth/(softmax+1e-8)+1e-8), axis=3))
return loss
|
[
"tensorflow.contrib.layers.xavier_initializer",
"numpy.load",
"tensorflow.reduce_sum",
"numpy.sum",
"numpy.argmax",
"tensorflow.constant_initializer",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.reduce_max",
"lib.CC_labeling_8.CC_lab",
"tensorflow.get_variable",
"tensorflow.nn.relu",
"lib.crf.crf_inference",
"tensorflow.nn.atrous_conv2d",
"tensorflow.cast",
"tensorflow.exp",
"numpy.reshape",
"tensorflow.name_scope",
"tensorflow.nn.bias_add",
"numpy.ndenumerate",
"tensorflow.nn.max_pool",
"tensorflow.random_normal_initializer",
"tensorflow.log",
"numpy.concatenate",
"numpy.log",
"tensorflow.py_func",
"tensorflow.argmax",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.contrib.layers.batch_norm",
"numpy.where",
"numpy.array",
"tensorflow.nn.avg_pool",
"tensorflow.nn.dropout",
"tensorflow.image.resize_bilinear"
] |
[((3106, 3133), 'numpy.expand_dims', 'np.expand_dims', (['cue'], {'axis': '(0)'}), '(cue, axis=0)\n', (3120, 3133), True, 'import numpy as np\n'), ((951, 983), 'numpy.argmax', 'np.argmax', (['existing_prob'], {'axis': '(2)'}), '(existing_prob, axis=2)\n', (960, 983), True, 'import numpy as np\n'), ((2213, 2232), 'numpy.where', 'np.where', (['(tag > 0.5)'], {}), '(tag > 0.5)\n', (2221, 2232), True, 'import numpy as np\n'), ((2372, 2383), 'lib.CC_labeling_8.CC_lab', 'CC_lab', (['mat'], {}), '(mat)\n', (2378, 2383), False, 'from lib.CC_labeling_8 import CC_lab\n'), ((2707, 2726), 'numpy.ndenumerate', 'np.ndenumerate', (['mat'], {}), '(mat)\n', (2721, 2726), True, 'import numpy as np\n'), ((6767, 6800), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (6778, 6800), True, 'import tensorflow as tf\n'), ((14069, 14127), 'tensorflow.reduce_max', 'tf.reduce_max', (['self.net[last_layer]'], {'axis': '(3)', 'keepdims': '(True)'}), '(self.net[last_layer], axis=3, keepdims=True)\n', (14082, 14127), True, 'import tensorflow as tf\n'), ((14146, 14186), 'tensorflow.exp', 'tf.exp', (['(self.net[last_layer] - preds_max)'], {}), '(self.net[last_layer] - preds_max)\n', (14152, 14186), True, 'import tensorflow as tf\n'), ((14894, 14966), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['origin_image', '(self.seed_size, self.seed_size)'], {}), '(origin_image, (self.seed_size, self.seed_size))\n', (14918, 14966), True, 'import tensorflow as tf\n'), ((15035, 15103), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['featemap', '(self.seed_size, self.seed_size)'], {}), '(featemap, (self.seed_size, self.seed_size))\n', (15059, 15103), True, 'import tensorflow as tf\n'), ((15646, 15713), 'tensorflow.py_func', 'tf.py_func', (['crf', '[featemap_zoomed, origin_image_zoomed]', 'tf.float32'], {}), '(crf, [featemap_zoomed, origin_image_zoomed], tf.float32)\n', (15656, 15713), True, 'import tensorflow as tf\n'), ((16766, 16877), 'tensorflow.py_func', 'tf.py_func', (['generate_seed_step', "[self.net['label'], self.net[seed_layer], self.net[prob_layer]]", 'tf.float32'], {}), "(generate_seed_step, [self.net['label'], self.net[seed_layer],\n self.net[prob_layer]], tf.float32)\n", (16776, 16877), True, 'import tensorflow as tf\n'), ((20546, 20591), 'tensorflow.argmax', 'tf.argmax', (["self.net['rescale_output']"], {'axis': '(3)'}), "(self.net['rescale_output'], axis=3)\n", (20555, 20591), True, 'import tensorflow as tf\n'), ((21561, 21625), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cues[:, :, :, 0:1]'], {'axis': '(1, 2, 3)', 'keepdims': '(True)'}), '(cues[:, :, :, 0:1], axis=(1, 2, 3), keepdims=True)\n', (21574, 21625), True, 'import tensorflow as tf\n'), ((21775, 21838), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cues[:, :, :, 1:]'], {'axis': '(1, 2, 3)', 'keepdims': '(True)'}), '(cues[:, :, :, 1:], axis=(1, 2, 3), keepdims=True)\n', (21788, 21838), True, 'import tensorflow as tf\n'), ((22362, 22373), 'tensorflow.exp', 'tf.exp', (['crf'], {}), '(crf)\n', (22368, 22373), True, 'import tensorflow as tf\n'), ((2986, 3008), 'numpy.array', 'np.array', (['cclab.labels'], {}), '(cclab.labels)\n', (2994, 3008), True, 'import numpy as np\n'), ((7231, 7251), 'tensorflow.name_scope', 'tf.name_scope', (['"""vgg"""'], {}), "('vgg')\n", (7244, 7251), True, 'import tensorflow as tf\n'), ((14331, 14384), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.net[layer]'], {'axis': '(3)', 'keepdims': '(True)'}), '(self.net[layer], axis=3, keepdims=True)\n', (14344, 14384), True, 'import tensorflow as tf\n'), ((15245, 15287), 'numpy.zeros', 'np.zeros', (['featemap.shape'], {'dtype': 'np.float32'}), '(featemap.shape, dtype=np.float32)\n', (15253, 15287), True, 'import numpy as np\n'), ((15518, 15552), 'numpy.sum', 'np.sum', (['ret'], {'axis': '(3)', 'keepdims': '(True)'}), '(ret, axis=3, keepdims=True)\n', (15524, 15552), True, 'import numpy as np\n'), ((15569, 15580), 'numpy.log', 'np.log', (['ret'], {}), '(ret)\n', (15575, 15580), True, 'import numpy as np\n'), ((16298, 16344), 'numpy.reshape', 'np.reshape', (['tags', '[-1, 1, 1, self.num_classes]'], {}), '(tags, [-1, 1, 1, self.num_classes])\n', (16308, 16344), True, 'import numpy as np\n'), ((18472, 18513), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.01)'}), '(stddev=0.01)\n', (18500, 18513), True, 'import tensorflow as tf\n'), ((18536, 18609), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "('%s_weights' % layer)", 'initializer': 'init', 'shape': 'shape'}), "(name='%s_weights' % layer, initializer=init, shape=shape)\n", (18551, 18609), True, 'import tensorflow as tf\n'), ((18630, 18656), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (18653, 18656), True, 'import tensorflow as tf\n'), ((18676, 18752), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "('%s_bias' % layer)", 'initializer': 'init', 'shape': '[shape[-1]]'}), "(name='%s_bias' % layer, initializer=init, shape=[shape[-1]])\n", (18691, 18752), True, 'import tensorflow as tf\n'), ((18998, 19071), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "('%s_weights' % layer)", 'initializer': 'init', 'shape': 'shape'}), "(name='%s_weights' % layer, initializer=init, shape=shape)\n", (19013, 19071), True, 'import tensorflow as tf\n'), ((19275, 19351), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': "('%s_bias' % layer)", 'initializer': 'init', 'shape': '[shape[-1]]'}), "(name='%s_bias' % layer, initializer=init, shape=[shape[-1]])\n", (19290, 19351), True, 'import tensorflow as tf\n'), ((20000, 20062), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (["self.net['output']", '(self.h, self.w)'], {}), "(self.net['output'], (self.h, self.w))\n", (20024, 20062), True, 'import tensorflow as tf\n'), ((20102, 20179), 'tensorflow.py_func', 'tf.py_func', (['(lambda x: x.shape[1:3])', "[self.net['input']]", '[tf.int64, tf.int64]'], {}), "(lambda x: x.shape[1:3], [self.net['input']], [tf.int64, tf.int64])\n", (20112, 20179), True, 'import tensorflow as tf\n'), ((14225, 14272), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['preds_exp'], {'axis': '(3)', 'keepdims': '(True)'}), '(preds_exp, axis=3, keepdims=True)\n', (14238, 14272), True, 'import tensorflow as tf\n'), ((15358, 15454), 'lib.crf.crf_inference', 'crf_inference', (['image[i]', 'self.crf_config_train', 'self.num_classes', 'featemap[i]'], {'use_log': '(True)'}), '(image[i], self.crf_config_train, self.num_classes, featemap[i\n ], use_log=True)\n', (15371, 15454), False, 'from lib.crf import crf_inference\n'), ((16669, 16711), 'numpy.concatenate', 'np.concatenate', (['[new_cues, ret[i]]'], {'axis': '(0)'}), '([new_cues, ret[i]], axis=0)\n', (16683, 16711), True, 'import numpy as np\n'), ((17038, 17095), 'numpy.load', 'np.load', (['model_path'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(model_path, encoding='latin1', allow_pickle=True)\n", (17045, 17095), True, 'import numpy as np\n'), ((18831, 18881), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(True)'}), '(uniform=True)\n', (18867, 18881), True, 'import tensorflow as tf\n'), ((18923, 18975), 'tensorflow.constant_initializer', 'tf.constant_initializer', (["self.init_model[layer]['w']"], {}), "(self.init_model[layer]['w'])\n", (18946, 18975), True, 'import tensorflow as tf\n'), ((19135, 19161), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (19158, 19161), True, 'import tensorflow as tf\n'), ((19203, 19255), 'tensorflow.constant_initializer', 'tf.constant_initializer', (["self.init_model[layer]['b']"], {}), "(self.init_model[layer]['b'])\n", (19226, 19255), True, 'import tensorflow as tf\n'), ((5191, 5219), 'tensorflow.name_scope', 'tf.name_scope', (['"""placeholder"""'], {}), "('placeholder')\n", (5204, 5219), True, 'import tensorflow as tf\n'), ((5524, 5557), 'tensorflow.Variable', 'tf.Variable', (['(0.5)'], {'trainable': '(False)'}), '(0.5, trainable=False)\n', (5535, 5557), True, 'import tensorflow as tf\n'), ((10339, 10359), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (10352, 10359), True, 'import tensorflow as tf\n'), ((10473, 10518), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.net[last_layer]'], {'name': '"""relu"""'}), "(self.net[last_layer], name='relu')\n", (10483, 10518), True, 'import tensorflow as tf\n'), ((12351, 12371), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (12364, 12371), True, 'import tensorflow as tf\n'), ((12840, 12890), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['self.net[layer]', 'bias'], {'name': '"""bias"""'}), "(self.net[layer], bias, name='bias')\n", (12854, 12890), True, 'import tensorflow as tf\n'), ((12999, 13019), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (13012, 13019), True, 'import tensorflow as tf\n'), ((13068, 13118), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['self.net[last_layer]'], {}), '(self.net[last_layer])\n', (13096, 13118), True, 'import tensorflow as tf\n'), ((13220, 13240), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (13233, 13240), True, 'import tensorflow as tf\n'), ((13289, 13321), 'tensorflow.nn.relu', 'tf.nn.relu', (['self.net[last_layer]'], {}), '(self.net[last_layer])\n', (13299, 13321), True, 'import tensorflow as tf\n'), ((13424, 13444), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (13437, 13444), True, 'import tensorflow as tf\n'), ((13493, 13565), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['self.net[last_layer]'], {'keep_prob': "(1 - self.net['drop_prob'])"}), "(self.net[last_layer], keep_prob=1 - self.net['drop_prob'])\n", (13506, 13565), True, 'import tensorflow as tf\n'), ((20267, 20299), 'tensorflow.cast', 'tf.cast', (['label_size[0]', 'tf.int32'], {}), '(label_size[0], tf.int32)\n', (20274, 20299), True, 'import tensorflow as tf\n'), ((20388, 20420), 'tensorflow.cast', 'tf.cast', (['label_size[1]', 'tf.int32'], {}), '(label_size[1], tf.int32)\n', (20395, 20420), True, 'import tensorflow as tf\n'), ((22433, 22481), 'tensorflow.log', 'tf.log', (['(probs_smooth / (softmax + 1e-08) + 1e-08)'], {}), '(probs_smooth / (softmax + 1e-08) + 1e-08)\n', (22439, 22481), True, 'import tensorflow as tf\n'), ((2875, 2895), 'numpy.sum', 'np.sum', (['cue[x, y, :]'], {}), '(cue[x, y, :])\n', (2881, 2895), True, 'import numpy as np\n'), ((5743, 5771), 'tensorflow.name_scope', 'tf.name_scope', (['"""placeholder"""'], {}), "('placeholder')\n", (5756, 5771), True, 'import tensorflow as tf\n'), ((6080, 6113), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (6091, 6113), True, 'import tensorflow as tf\n'), ((9330, 9350), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (9343, 9350), True, 'import tensorflow as tf\n'), ((9544, 9643), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['self.net[last_layer]', 'weights'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""conv"""'}), "(self.net[last_layer], weights, strides=[1, 1, 1, 1], padding=\n 'SAME', name='conv')\n", (9556, 9643), True, 'import tensorflow as tf\n'), ((9681, 9731), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['self.net[layer]', 'bias'], {'name': '"""bias"""'}), "(self.net[layer], bias, name='bias')\n", (9695, 9731), True, 'import tensorflow as tf\n'), ((9837, 9857), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (9850, 9857), True, 'import tensorflow as tf\n'), ((10051, 10142), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['self.net[last_layer]', 'weights'], {'rate': '(2)', 'padding': '"""SAME"""', 'name': '"""conv"""'}), "(self.net[last_layer], weights, rate=2, padding='SAME',\n name='conv')\n", (10070, 10142), True, 'import tensorflow as tf\n'), ((10182, 10232), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['self.net[layer]', 'bias'], {'name': '"""bias"""'}), "(self.net[layer], bias, name='bias')\n", (10196, 10232), True, 'import tensorflow as tf\n'), ((10624, 10644), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (10637, 10644), True, 'import tensorflow as tf\n'), ((10758, 10870), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['self.net[last_layer]'], {'ksize': '[1, 3, 3, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""pool"""'}), "(self.net[last_layer], ksize=[1, 3, 3, 1], strides=[1, 1, 1, \n 1], padding='SAME', name='pool')\n", (10772, 10870), True, 'import tensorflow as tf\n'), ((12540, 12641), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['self.net[last_layer]', 'weights'], {'rate': 'dilate_rate', 'padding': '"""SAME"""', 'name': '"""conv"""'}), "(self.net[last_layer], weights, rate=dilate_rate,\n padding='SAME', name='conv')\n", (12559, 12641), True, 'import tensorflow as tf\n'), ((12707, 12806), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['self.net[last_layer]', 'weights'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""conv"""'}), "(self.net[last_layer], weights, strides=[1, 1, 1, 1], padding=\n 'SAME', name='conv')\n", (12719, 12806), True, 'import tensorflow as tf\n'), ((6270, 6298), 'tensorflow.name_scope', 'tf.name_scope', (['"""placeholder"""'], {}), "('placeholder')\n", (6283, 6298), True, 'import tensorflow as tf\n'), ((6605, 6638), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'trainable': '(False)'}), '(0.0, trainable=False)\n', (6616, 6638), True, 'import tensorflow as tf\n'), ((21683, 21712), 'tensorflow.log', 'tf.log', (['softmax[:, :, :, 0:1]'], {}), '(softmax[:, :, :, 0:1])\n', (21689, 21712), True, 'import tensorflow as tf\n'), ((21895, 21923), 'tensorflow.log', 'tf.log', (['softmax[:, :, :, 1:]'], {}), '(softmax[:, :, :, 1:])\n', (21901, 21923), True, 'import tensorflow as tf\n'), ((11012, 11032), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (11025, 11032), True, 'import tensorflow as tf\n'), ((11158, 11270), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['self.net[last_layer]'], {'ksize': '[1, 3, 3, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""', 'name': '"""pool"""'}), "(self.net[last_layer], ksize=[1, 3, 3, 1], strides=[1, 2, 2, \n 1], padding='SAME', name='pool')\n", (11172, 11270), True, 'import tensorflow as tf\n'), ((11369, 11389), 'tensorflow.name_scope', 'tf.name_scope', (['layer'], {}), '(layer)\n', (11382, 11389), True, 'import tensorflow as tf\n'), ((11511, 11623), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['self.net[last_layer]'], {'ksize': '[1, 3, 3, 1]', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'name': '"""pool"""'}), "(self.net[last_layer], ksize=[1, 3, 3, 1], strides=[1, 1, 1, \n 1], padding='SAME', name='pool')\n", (11525, 11623), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 13:46:46 2019
Description:
Last update:
Version: 1.0
Author: <NAME> (NREL)
"""
import pandas as pd
class LoadConfig(object):
def __init__(self, config_file):
self.config_file = config_file
def str_to_bool(self, s):
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError('Insert boolean values in the config file')
def get_config_models(self):
df = pd.DataFrame()
df['MaxAnnualConditions'] = self.config_file.get('Water Heater Models', 'MaxAnnualConditions', fallback = None).split(',')
#df['TtankInitial'] = self.config_file.get('Water Heater Models', 'TtankInitial', fallback = None).split(',')
#df['TsetInitial'] = list(map(float, self.config_file.get('Water Heater Models', 'TsetInitial', fallback = None).split(',')))
#df['Capacity'] = list(map(int, self.config_file.get('Water Heater Models', 'Capacity', fallback = None).split(',')))
#df['Type'] = list(map(float, self.config_file.get('Water Heater Models', 'Type', fallback = None).split(',')))
#df['Location'] = list(map(float, self.config_file.get('Water Heater Models', 'Location', fallback = None).split(',')))
#df['MaxServiceCalls'] = list(map(float, self.config_file.get('Water Heater Models', 'MaxServiceCalls', fallback = None).split(',')))
return df
def get_n_subfleets(self):
return int(self.config_file.get('Water Heater Fleet', 'NumberSubfleets', fallback = 100))
def get_run_baseline(self):
return self.str_to_bool(self.config_file.get('Water Heater Fleet', 'RunBaseline', fallback = False))
def get_n_days_MC(self):
return int(self.config_file.get('Water Heater Fleet', 'NumberDaysBase', fallback = 10))
def get_fleet_config(self):
is_p_priority = self.str_to_bool(self.config_file.get('Fleet Configuration', 'Is_P_Priority', fallback = True))
is_aut = self.str_to_bool(self.config_file.get('Fleet Configuration', 'IsAutonomous', fallback = False))
return [is_p_priority, is_aut]
def get_FW(self):
fw_enabled = list()
fw_enabled.append(self.str_to_bool(self.config_file.get('FW', 'FW21_Enabled', fallback = True)))
# Discrete version of the response to frequency deviations (artificial inertia service)
fw_enabled.append(list(map(float, (self.config_file.get('FW', 'db_UF', fallback = None).split(',')))))
fw_enabled.append(list(map(float, (self.config_file.get('FW', 'db_OF', fallback = None).split(',')))))
# TODO: These parameters must be removed in future realeases of the API
fw_enabled.append(float(self.config_file.get('FW', 'k_UF', fallback = 0.05)))
fw_enabled.append(float(self.config_file.get('FW', 'k_UF', fallback = 0.05)))
fw_enabled.append(float(self.config_file.get('FW', 'P_avl', fallback = 1.0)))
fw_enabled.append(float(self.config_file.get('FW', 'P_min', fallback = 0.0)))
fw_enabled.append(float(self.config_file.get('FW', 'P_pre', fallback = 1.0)))
return fw_enabled
def get_impact_metrics_params(self):
metrics = list()
# Aveage tank baseline
metrics.append(float(self.config_file.get('Impact Metrics', 'ave_Tin_base', fallback = 123)))
# Aveage tank temperature under grid service
metrics.append(float(self.config_file.get('Impact Metrics', 'ave_Tin_grid', fallback = 123)))
# Cylces in baseline
metrics.append(float(self.config_file.get('Impact Metrics', 'cycle_basee', fallback = 100)))
# Cylces in grid operation
metrics.append(float(self.config_file.get('Impact Metrics', 'cycle_grid', fallback = 100)))
# State of Charge of the battery equivalent model under baseline
metrics.append(float(self.config_file.get('Impact Metrics', 'SOCb_metric', fallback = 100)))
# State of Charge of the battery equivalent model
metrics.append(float(self.config_file.get('Impact Metrics', 'SOC_metric', fallback = 1.0)))
# Unmet hours of the fleet
metrics.append(float(self.config_file.get('Impact Metrics', 'unmet_hours', fallback = 1.0)))
return metrics
def get_service_weight(self):
return float(self.config_file.get('Service Weighting Factor', 'ServiceWeight', fallback=0.5))
|
[
"pandas.DataFrame"
] |
[((542, 556), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (554, 556), True, 'import pandas as pd\n')]
|
import os
import shutil
from pathlib import PosixPath
from tempfile import TemporaryDirectory, mkdtemp
from urllib.error import URLError
import pytest
from jmanager.models.distribution import Architecture, Version, VersionType, Component
from jmanager.utils.fetch import HTTPFetcher
from test.globals import TEST_DISTRIBUTION
TEMPORARY_RELEASE_FTP_DIR = "releases/amd64/12.0-RELEASE"
TEMPORARY_SNAPSHOT_FTP_DIR = "snapshots/amd64/12.0-STABLE"
class MockingFetcher(HTTPFetcher):
FTP_BASE_DIRECTORY = PosixPath()
def __init__(self):
self.tmp_dir = mkdtemp()
self.SERVER_URL = f"file://{self.tmp_dir}"
def __enter__(self):
for folder in [TEMPORARY_RELEASE_FTP_DIR, TEMPORARY_SNAPSHOT_FTP_DIR]:
temporary_folder = f"{self.tmp_dir}/{folder}"
os.makedirs(temporary_folder)
with open(f"{temporary_folder}/base.txz", "w") as base_file:
base_file.write("base.txz")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
class TestFetchUtils:
class ErrorToBeRaised(BaseException):
pass
def test_fetch_base_tarball(self):
with MockingFetcher() as http_fetcher:
with TemporaryDirectory() as temp_dir:
temp_dir_path = PosixPath(temp_dir)
http_fetcher.fetch_tarballs_into(
version=TEST_DISTRIBUTION.version,
architecture=TEST_DISTRIBUTION.architecture,
components=TEST_DISTRIBUTION.components,
temp_dir=temp_dir_path)
assert temp_dir_path.joinpath('base.txz').is_file()
def test_fetch_tarballs_invalid_version(self):
distribution_version = Version(major=10, minor=6, version_type=VersionType.RELEASE)
with MockingFetcher() as http_fetcher:
with pytest.raises(URLError, match=r'\[Errno 2\] No such file or directory: '):
http_fetcher.fetch_tarballs_into(
version=distribution_version,
architecture=Architecture.AMD64,
components=[Component.BASE],
temp_dir=PosixPath('/tmp'))
def test_fetch_tarballs_from_snapshots(self):
distribution_version = Version(major=12, minor=0, version_type=VersionType.STABLE)
with MockingFetcher() as http_fetcher:
with TemporaryDirectory() as temp_dir:
temp_dir_path = PosixPath(temp_dir)
http_fetcher.fetch_tarballs_into(
version=distribution_version,
architecture=Architecture.AMD64,
components=[Component.BASE],
temp_dir=temp_dir_path)
assert temp_dir_path.joinpath('base.txz').is_file()
def test_fetch_with_callback_function(self):
def callback_function(text_to_show: str, received_bytes: int, total_bytes: int, time_elapsed: float):
assert isinstance(text_to_show, str)
assert isinstance(received_bytes, int)
assert isinstance(total_bytes, int)
assert isinstance(time_elapsed, float)
raise TestFetchUtils.ErrorToBeRaised(f"test message")
distribution_version = Version(major=12, minor=0, version_type=VersionType.STABLE)
with pytest.raises(TestFetchUtils.ErrorToBeRaised):
with MockingFetcher() as http_fetcher:
http_fetcher.fetch_tarballs_into(
version=distribution_version,
architecture=Architecture.AMD64,
components=[Component.BASE],
temp_dir=PosixPath('/tmp'),
callback=callback_function)
|
[
"jmanager.models.distribution.Version",
"tempfile.TemporaryDirectory",
"os.makedirs",
"pytest.raises",
"tempfile.mkdtemp",
"pathlib.PosixPath",
"shutil.rmtree"
] |
[((508, 519), 'pathlib.PosixPath', 'PosixPath', ([], {}), '()\n', (517, 519), False, 'from pathlib import PosixPath\n'), ((568, 577), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (575, 577), False, 'from tempfile import TemporaryDirectory, mkdtemp\n'), ((1032, 1079), 'shutil.rmtree', 'shutil.rmtree', (['self.tmp_dir'], {'ignore_errors': '(True)'}), '(self.tmp_dir, ignore_errors=True)\n', (1045, 1079), False, 'import shutil\n'), ((1775, 1835), 'jmanager.models.distribution.Version', 'Version', ([], {'major': '(10)', 'minor': '(6)', 'version_type': 'VersionType.RELEASE'}), '(major=10, minor=6, version_type=VersionType.RELEASE)\n', (1782, 1835), False, 'from jmanager.models.distribution import Architecture, Version, VersionType, Component\n'), ((2308, 2367), 'jmanager.models.distribution.Version', 'Version', ([], {'major': '(12)', 'minor': '(0)', 'version_type': 'VersionType.STABLE'}), '(major=12, minor=0, version_type=VersionType.STABLE)\n', (2315, 2367), False, 'from jmanager.models.distribution import Architecture, Version, VersionType, Component\n'), ((3290, 3349), 'jmanager.models.distribution.Version', 'Version', ([], {'major': '(12)', 'minor': '(0)', 'version_type': 'VersionType.STABLE'}), '(major=12, minor=0, version_type=VersionType.STABLE)\n', (3297, 3349), False, 'from jmanager.models.distribution import Architecture, Version, VersionType, Component\n'), ((804, 833), 'os.makedirs', 'os.makedirs', (['temporary_folder'], {}), '(temporary_folder)\n', (815, 833), False, 'import os\n'), ((3364, 3409), 'pytest.raises', 'pytest.raises', (['TestFetchUtils.ErrorToBeRaised'], {}), '(TestFetchUtils.ErrorToBeRaised)\n', (3377, 3409), False, 'import pytest\n'), ((1263, 1283), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (1281, 1283), False, 'from tempfile import TemporaryDirectory, mkdtemp\n'), ((1329, 1348), 'pathlib.PosixPath', 'PosixPath', (['temp_dir'], {}), '(temp_dir)\n', (1338, 1348), False, 'from pathlib import PosixPath\n'), ((1901, 1975), 'pytest.raises', 'pytest.raises', (['URLError'], {'match': '"""\\\\[Errno 2\\\\] No such file or directory: """'}), "(URLError, match='\\\\[Errno 2\\\\] No such file or directory: ')\n", (1914, 1975), False, 'import pytest\n'), ((2433, 2453), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (2451, 2453), False, 'from tempfile import TemporaryDirectory, mkdtemp\n'), ((2499, 2518), 'pathlib.PosixPath', 'PosixPath', (['temp_dir'], {}), '(temp_dir)\n', (2508, 2518), False, 'from pathlib import PosixPath\n'), ((2207, 2224), 'pathlib.PosixPath', 'PosixPath', (['"""/tmp"""'], {}), "('/tmp')\n", (2216, 2224), False, 'from pathlib import PosixPath\n'), ((3693, 3710), 'pathlib.PosixPath', 'PosixPath', (['"""/tmp"""'], {}), "('/tmp')\n", (3702, 3710), False, 'from pathlib import PosixPath\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 12:07:19 2018
@author: nn
"""
from collections import OrderedDict
import os
import cv2
from . import tf_metrics
def get_logger(arc_type, logger_params):
if arc_type == "sl":
logger = SlLogger(**logger_params)
elif arc_type == "ae":
logger = AELogger(**logger_params)
elif arc_type == "gan":
logger = GanLogger(**logger_params)
return logger
class BaseLogger:
def __init__(self, log_dir=None, out_root=None, metrics={}, metric_period=1,
sample_dirname="sample"):
if out_root is not None:
log_dir = os.path.join(out_root, log_dir)
if log_dir is not None:
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
self.log_dir = log_dir
self.metrics = metrics
self.sample = sample_dirname
def start_epoch(self, trainer, loader, epoch):
self.losses = OrderedDict()
def log_batch(self, batch, loss_keys=["loss"]):
for key in loss_keys:
if key not in batch:
continue
if key not in self.losses:
self.losses[key] = 0.0
self.losses[key] += batch[key]
def end_epoch(self, trainer, loader, epoch):
raise NotImplementedError()
def get_loss_str(self):
key = ", ".join(["{} : {:.04f}".format(k, v) for k, v in self.losses.items()])
return key
def log_end(self, trainer, loader):
pass
class SlLogger(BaseLogger):
def end_epoch(self, trainer, loader, epoch):
out = tf_metrics.get_metrics_classifier(loader, trainer,
metrics=self.metrics)
loss_key = self.get_loss_str()
key = ", ".join(["{} : {}".format(metric, out[metric]) for metric in self.metrics])
print("Epoch : {}, {}, {}".format(epoch, loss_key, key))
class AELogger(BaseLogger):
def end_epoch(self, trainer, loader, epoch):
out, images = tf_metrics.get_metrics_generator(loader, trainer,
metrics=self.metrics)
o_dir = os.path.join(self.log_dir, self.sample)
if not os.path.isdir(o_dir):
os.makedirs(o_dir)
for i, image in enumerate(images):
cv2.imwrite(os.path.join(o_dir, "{:05d}_{:04d}.png".format(epoch, i)), image)
loss_key = self.get_loss_str()
key = ", ".join(["{} : {}".format(metric, out[metric]) for metric in self.metrics])
print("Epoch : {}, {}, {}".format(epoch, loss_key, key))
class GanLogger(BaseLogger):
def end_epoch(self, trainer, loader, epoch):
out, images = tf_metrics.get_metrics_generator(loader, trainer,
metrics=self.metrics)
o_dir = os.path.join(self.log_dir, self.sample)
if not os.path.isdir(o_dir):
os.makedirs(o_dir)
for i, image in enumerate(images):
cv2.imwrite(os.path.join(o_dir, "{:05d}_{:04d}.png".format(epoch, i)), image)
loss_key = self.get_loss_str()
key = ", ".join(["{} : {}".format(metric, out[metric]) for metric in self.metrics])
print("Epoch : {}, {}, {}".format(epoch, loss_key, key))
|
[
"collections.OrderedDict",
"os.path.isdir",
"os.path.join",
"os.makedirs"
] |
[((904, 917), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (915, 917), False, 'from collections import OrderedDict\n'), ((2007, 2046), 'os.path.join', 'os.path.join', (['self.log_dir', 'self.sample'], {}), '(self.log_dir, self.sample)\n', (2019, 2046), False, 'import os\n'), ((2640, 2679), 'os.path.join', 'os.path.join', (['self.log_dir', 'self.sample'], {}), '(self.log_dir, self.sample)\n', (2652, 2679), False, 'import os\n'), ((624, 655), 'os.path.join', 'os.path.join', (['out_root', 'log_dir'], {}), '(out_root, log_dir)\n', (636, 655), False, 'import os\n'), ((2058, 2078), 'os.path.isdir', 'os.path.isdir', (['o_dir'], {}), '(o_dir)\n', (2071, 2078), False, 'import os\n'), ((2086, 2104), 'os.makedirs', 'os.makedirs', (['o_dir'], {}), '(o_dir)\n', (2097, 2104), False, 'import os\n'), ((2691, 2711), 'os.path.isdir', 'os.path.isdir', (['o_dir'], {}), '(o_dir)\n', (2704, 2711), False, 'import os\n'), ((2719, 2737), 'os.makedirs', 'os.makedirs', (['o_dir'], {}), '(o_dir)\n', (2730, 2737), False, 'import os\n'), ((697, 719), 'os.path.isdir', 'os.path.isdir', (['log_dir'], {}), '(log_dir)\n', (710, 719), False, 'import os\n'), ((729, 749), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (740, 749), False, 'import os\n')]
|
from base64 import b64encode
import pytest
from asgi_webdav.constants import DAVPath, DAVUser
from asgi_webdav.config import update_config_from_obj, get_config
from asgi_webdav.auth import DAVPassword, DAVPasswordType, DAVAuth
from asgi_webdav.request import DAVRequest
USERNAME = "username"
PASSWORD = "password"
HASHLIB_USER = "user-hashlib"
basic_authorization = b"Basic " + b64encode(
"{}:{}".format(USERNAME, PASSWORD).encode("utf-8")
)
basic_authorization_bad = b"Basic bad basic_authorization"
def get_basic_authorization(username, password) -> bytes:
return b"Basic " + b64encode("{}:{}".format(username, password).encode("utf-8"))
def fake_call():
pass
request = DAVRequest(
{"method": "GET", "headers": {b"authorization": b"placeholder"}, "path": "/"},
fake_call,
fake_call,
)
def test_dev_password_class():
pw_obj = DAVPassword("password")
assert pw_obj.type == DAVPasswordType.RAW
pw_obj = DAVPassword(
"<hashlib>:sha256:salt:291e247d155354e48fec2b579637782446821935fc96a5a08a0b7885179c408b"
)
assert pw_obj.type == DAVPasswordType.HASHLIB
pw_obj = DAVPassword("<digest>:ASGI-WebDAV:c1d34f1e0f457c4de05b7468d5165567")
assert pw_obj.type == DAVPasswordType.DIGEST
pw_obj = DAVPassword(
"<ldap>#1#ldaps://rexzhang.myds.me#SIMPLE#"
"uid=user-ldap,cn=users,dc=rexzhang,dc=myds,dc=me"
)
assert pw_obj.type == DAVPasswordType.LDAP
@pytest.mark.asyncio
async def test_basic_access_authentication():
config_data = {
"account_mapping": [
{"username": USERNAME, "password": PASSWORD, "permissions": list()},
{
"username": HASHLIB_USER,
"password": "<<PASSWORD>:"
"<PASSWORD>",
"permissions": list(),
},
]
}
update_config_from_obj(config_data)
dav_auth = DAVAuth(get_config())
request.headers[b"authorization"] = get_basic_authorization(USERNAME, PASSWORD)
user, message = await dav_auth.pick_out_user(request)
print(basic_authorization)
print(user)
print(message)
assert isinstance(user, DAVUser)
request.headers[b"authorization"] = get_basic_authorization(HASHLIB_USER, PASSWORD)
user, message = await dav_auth.pick_out_user(request)
assert isinstance(user, DAVUser)
request.headers[b"authorization"] = basic_authorization_bad
user, message = await dav_auth.pick_out_user(request)
print(user)
print(message)
assert user is None
def test_verify_permission():
username = USERNAME
password = PASSWORD
admin = False
# "+"
permissions = ["+^/aa"]
dav_user = DAVUser(username, password, permissions, admin)
assert not dav_user.check_paths_permission([DAVPath("/a")])
assert dav_user.check_paths_permission([DAVPath("/aa")])
assert dav_user.check_paths_permission([DAVPath("/aaa")])
permissions = ["+^/bbb"]
dav_user = DAVUser(username, password, permissions, admin)
assert not dav_user.check_paths_permission(
[DAVPath("/aaa")],
)
# "-"
permissions = ["-^/aaa"]
dav_user = DAVUser(username, password, permissions, admin)
assert not dav_user.check_paths_permission(
[DAVPath("/aaa")],
)
# "$"
permissions = ["+^/a$"]
dav_user = DAVUser(username, password, permissions, admin)
assert dav_user.check_paths_permission(
[DAVPath("/a")],
)
assert not dav_user.check_paths_permission(
[DAVPath("/ab")],
)
assert not dav_user.check_paths_permission(
[DAVPath("/a/b")],
)
# multi-rules
permissions = ["+^/a$", "+^/a/b"]
dav_user = DAVUser(username, password, permissions, admin)
assert dav_user.check_paths_permission(
[DAVPath("/a")],
)
assert dav_user.check_paths_permission(
[DAVPath("/a/b")],
)
permissions = ["+^/a$", "+^/a/b", "-^/a/b/c"]
dav_user = DAVUser(username, password, permissions, admin)
assert dav_user.check_paths_permission(
[DAVPath("/a")],
)
assert dav_user.check_paths_permission(
[DAVPath("/a/b")],
)
assert not dav_user.check_paths_permission(
[DAVPath("/a/b/c")],
)
permissions = ["+^/a$", "+^/a/b1", "-^/a/b2"]
dav_user = DAVUser(username, password, permissions, admin)
assert dav_user.check_paths_permission(
[DAVPath("/a")],
)
assert dav_user.check_paths_permission(
[DAVPath("/a/b1")],
)
assert not dav_user.check_paths_permission(
[DAVPath("/a/b2")],
)
|
[
"asgi_webdav.config.get_config",
"asgi_webdav.config.update_config_from_obj",
"asgi_webdav.constants.DAVUser",
"asgi_webdav.request.DAVRequest",
"asgi_webdav.auth.DAVPassword",
"asgi_webdav.constants.DAVPath"
] |
[((695, 810), 'asgi_webdav.request.DAVRequest', 'DAVRequest', (["{'method': 'GET', 'headers': {b'authorization': b'placeholder'}, 'path': '/'}", 'fake_call', 'fake_call'], {}), "({'method': 'GET', 'headers': {b'authorization': b'placeholder'},\n 'path': '/'}, fake_call, fake_call)\n", (705, 810), False, 'from asgi_webdav.request import DAVRequest\n'), ((868, 891), 'asgi_webdav.auth.DAVPassword', 'DAVPassword', (['"""password"""'], {}), "('password')\n", (879, 891), False, 'from asgi_webdav.auth import DAVPassword, DAVPasswordType, DAVAuth\n'), ((952, 1063), 'asgi_webdav.auth.DAVPassword', 'DAVPassword', (['"""<hashlib>:sha256:salt:291e247d155354e48fec2b579637782446821935fc96a5a08a0b7885179c408b"""'], {}), "(\n '<hashlib>:sha256:salt:291e247d155354e48fec2b579637782446821935fc96a5a08a0b7885179c408b'\n )\n", (963, 1063), False, 'from asgi_webdav.auth import DAVPassword, DAVPasswordType, DAVAuth\n'), ((1132, 1200), 'asgi_webdav.auth.DAVPassword', 'DAVPassword', (['"""<digest>:ASGI-WebDAV:c1d34f1e0f457c4de05b7468d5165567"""'], {}), "('<digest>:ASGI-WebDAV:c1d34f1e0f457c4de05b7468d5165567')\n", (1143, 1200), False, 'from asgi_webdav.auth import DAVPassword, DAVPasswordType, DAVAuth\n'), ((1264, 1378), 'asgi_webdav.auth.DAVPassword', 'DAVPassword', (['"""<ldap>#1#ldaps://rexzhang.myds.me#SIMPLE#uid=user-ldap,cn=users,dc=rexzhang,dc=myds,dc=me"""'], {}), "(\n '<ldap>#1#ldaps://rexzhang.myds.me#SIMPLE#uid=user-ldap,cn=users,dc=rexzhang,dc=myds,dc=me'\n )\n", (1275, 1378), False, 'from asgi_webdav.auth import DAVPassword, DAVPasswordType, DAVAuth\n'), ((1843, 1878), 'asgi_webdav.config.update_config_from_obj', 'update_config_from_obj', (['config_data'], {}), '(config_data)\n', (1865, 1878), False, 'from asgi_webdav.config import update_config_from_obj, get_config\n'), ((2680, 2727), 'asgi_webdav.constants.DAVUser', 'DAVUser', (['username', 'password', 'permissions', 'admin'], {}), '(username, password, permissions, admin)\n', (2687, 2727), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((2960, 3007), 'asgi_webdav.constants.DAVUser', 'DAVUser', (['username', 'password', 'permissions', 'admin'], {}), '(username, password, permissions, admin)\n', (2967, 3007), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3144, 3191), 'asgi_webdav.constants.DAVUser', 'DAVUser', (['username', 'password', 'permissions', 'admin'], {}), '(username, password, permissions, admin)\n', (3151, 3191), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3327, 3374), 'asgi_webdav.constants.DAVUser', 'DAVUser', (['username', 'password', 'permissions', 'admin'], {}), '(username, password, permissions, admin)\n', (3334, 3374), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3683, 3730), 'asgi_webdav.constants.DAVUser', 'DAVUser', (['username', 'password', 'permissions', 'admin'], {}), '(username, password, permissions, admin)\n', (3690, 3730), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3949, 3996), 'asgi_webdav.constants.DAVUser', 'DAVUser', (['username', 'password', 'permissions', 'admin'], {}), '(username, password, permissions, admin)\n', (3956, 3996), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((4298, 4345), 'asgi_webdav.constants.DAVUser', 'DAVUser', (['username', 'password', 'permissions', 'admin'], {}), '(username, password, permissions, admin)\n', (4305, 4345), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((1902, 1914), 'asgi_webdav.config.get_config', 'get_config', ([], {}), '()\n', (1912, 1914), False, 'from asgi_webdav.config import update_config_from_obj, get_config\n'), ((2836, 2850), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/aa"""'], {}), "('/aa')\n", (2843, 2850), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((2897, 2912), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/aaa"""'], {}), "('/aaa')\n", (2904, 2912), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3428, 3441), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a"""'], {}), "('/a')\n", (3435, 3441), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3784, 3797), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a"""'], {}), "('/a')\n", (3791, 3797), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3859, 3874), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a/b"""'], {}), "('/a/b')\n", (3866, 3874), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((4050, 4063), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a"""'], {}), "('/a')\n", (4057, 4063), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((4125, 4140), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a/b"""'], {}), "('/a/b')\n", (4132, 4140), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((4399, 4412), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a"""'], {}), "('/a')\n", (4406, 4412), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((4474, 4490), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a/b1"""'], {}), "('/a/b1')\n", (4481, 4490), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((2776, 2789), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a"""'], {}), "('/a')\n", (2783, 2789), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3065, 3080), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/aaa"""'], {}), "('/aaa')\n", (3072, 3080), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3249, 3264), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/aaa"""'], {}), "('/aaa')\n", (3256, 3264), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3507, 3521), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/ab"""'], {}), "('/ab')\n", (3514, 3521), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((3587, 3602), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a/b"""'], {}), "('/a/b')\n", (3594, 3602), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((4206, 4223), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a/b/c"""'], {}), "('/a/b/c')\n", (4213, 4223), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n'), ((4556, 4572), 'asgi_webdav.constants.DAVPath', 'DAVPath', (['"""/a/b2"""'], {}), "('/a/b2')\n", (4563, 4572), False, 'from asgi_webdav.constants import DAVPath, DAVUser\n')]
|
import os
import ndjson
import json
import time
from options import TestOptions
from framework import SketchModel
from utils import load_data
from writer import Writer
import numpy as np
from evalTool import *
def run_eval(opt=None, model=None, loader=None, dataset='test', write_result=False):
if opt is None:
opt = TestOptions().parse()
if model is None:
model = SketchModel(opt)
if loader is None:
loader = load_data(opt, datasetType=dataset, permutation=opt.permutation)
# print(len(loader))
if opt.eval_way == 'align':
predictList, lossList = eval_align_batchN(model, loader, P=opt.points_num)
elif opt.eval_way == 'unalign':
predictList, lossList = eval_unalign_batch1(model, loader)
else:
raise NotImplementedError('eval_way {} not implemented!'.format(opt.eval_way))
# print(predictList.shape)
testData = []
with open(os.path.join('data', opt.dataset, 'train',
'{}_{}.ndjson'.format(opt.class_name, dataset)), 'r') as f:
testData = ndjson.load(f)
if opt.metric_way == 'wlen':
p_metric_list, c_metric_list = eval_with_len(testData, predictList)
elif opt.metric_way == 'wolen':
p_metric_list, c_metric_list = eval_without_len(testData, predictList)
else:
raise NotImplementedError('metric_way {} not implemented!'.format(opt.metric_way))
if write_result:
testData = get_eval_result(testData, predictList)
result_path = os.path.join('data', opt.dataset, 'train', '{}_{}.ndjson'.format(opt.class_name, 'res'))
with open(result_path, 'w') as f:
ndjson.dump(testData, f)
loss_avg = np.average(lossList)
P_metric = np.average(p_metric_list)
C_metric = np.average(c_metric_list)
# print('P_metric:{:.4}%\tC_metric:{:.4}%'.format(P_metric*100, C_metric*100))
return loss_avg, P_metric, C_metric
if __name__ == "__main__":
_, P_metric, C_metric = run_eval(write_result=True)
print('P_metric:{:.4}%\tC_metric:{:.4}%'.format(P_metric*100, C_metric*100))
|
[
"utils.load_data",
"numpy.average",
"ndjson.load",
"options.TestOptions",
"ndjson.dump",
"framework.SketchModel"
] |
[((1682, 1702), 'numpy.average', 'np.average', (['lossList'], {}), '(lossList)\n', (1692, 1702), True, 'import numpy as np\n'), ((1718, 1743), 'numpy.average', 'np.average', (['p_metric_list'], {}), '(p_metric_list)\n', (1728, 1743), True, 'import numpy as np\n'), ((1759, 1784), 'numpy.average', 'np.average', (['c_metric_list'], {}), '(c_metric_list)\n', (1769, 1784), True, 'import numpy as np\n'), ((390, 406), 'framework.SketchModel', 'SketchModel', (['opt'], {}), '(opt)\n', (401, 406), False, 'from framework import SketchModel\n'), ((447, 511), 'utils.load_data', 'load_data', (['opt'], {'datasetType': 'dataset', 'permutation': 'opt.permutation'}), '(opt, datasetType=dataset, permutation=opt.permutation)\n', (456, 511), False, 'from utils import load_data\n'), ((1051, 1065), 'ndjson.load', 'ndjson.load', (['f'], {}), '(f)\n', (1062, 1065), False, 'import ndjson\n'), ((1637, 1661), 'ndjson.dump', 'ndjson.dump', (['testData', 'f'], {}), '(testData, f)\n', (1648, 1661), False, 'import ndjson\n'), ((330, 343), 'options.TestOptions', 'TestOptions', ([], {}), '()\n', (341, 343), False, 'from options import TestOptions\n')]
|
import sys
import time
import aiogram.types
from aiogram import types, Dispatcher, Bot
from aiogram.dispatcher import FSMContext
sys.path.append('bot')
from database.sess import get_users_by_link
from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, \
add_channels, reemove_channels
from python.States.StatesClasses import Adding, Removing
from python.config import bToken, admin_chat, admin_id
bot = Bot(token=bToken)
async def start_command(message: types.Message):
await message.answer("Hey!\n"
"I am a parser for telegram channels.\n\n"
"For the main menu, send - /menu.\n"
"Everything will be written in detail there.")
await message.answer_sticker(r'CAACAgIAAxkBAAIKpWHbI3SO<KEY>')
# ДОБАВЛЕНИЕ НОВОГО ПОЛЬЗОВАТЕЛЯ В БАЗУ ДАННЫХ
await create_new_user(message.from_user.id, message.from_user.username, 'on')
async def main_menu(message: types.Message):
await message.answer(f"Welcome to the main menu, {message.from_user.username}!\n"
f"I am a bot that parses telegram channels.\n\n"
"Here are my commands:\n"
"/menu - main menu\n"
"/parse_channels - a list of channels you are following\n"
"/add_parse_channel - add to the channel list\n"
"/remove_parse_channel - remove a channel from the list\n"
"/off - turn off parsing of your channels\n"
"/on - enable parsing of your channels\n\n"
f"P.S. now - {await check_on_off(message.from_user.id)}\n\n"
"My creator is @darrso")
await message.answer_sticker(r'CAACAgIAAxkBAAIKrmHbJw6ckgI0IrCLe_TJrbUyCJ_xAALRAAM27BsFCW1Sl32PAAEsIwQ')
async def switch_parametr(message: types.Message):
text = (message.text).replace("/", "")
if text == 'on':
await switch_on_off('on', message.from_user.id)
elif text == 'off':
await switch_on_off('off', message.from_user.id)
await message.answer(f'Parameter changed to: {text}'
f'\nSend /menu to check it out!')
async def parse_channel(message: types.Message):
data = (await check_parse_channels(message.from_user.id))
if data:
await message.answer(f'Here is the list of channels you are parsing:\n{data}\n\n'
f'Delete channel - /remove_parse_channel\n'
f'Add channel - /add_parse_channel\n'
f'Main menu - /menu')
else:
await message.answer("You are not parsing any channels yet.\n\nTo add channels send /add_parse_channel")
async def add_channel(message: types.Message):
await message.answer("To add a new channel send LINK TO CHANNEL\n\n"
"Example:\n"
"https://t.me/test\n\n"
"P.S. The bot cannot join private channels.\n"
"You can add a channel to the list of those that you are parsing, but the bot will subscribe to it only after a while\n"
"(you will receive a notification about this)")
await Adding.first.set()
async def adding_channel(message: types.Message, state: FSMContext):
res = await check_channel(message.text, message.from_user.id)
if res == 'NOT LINK!':
await message.answer('This link is not working!\n'
'Try again - /add_parse_channel')
elif res:
await bot.send_message(chat_id=admin_chat, text="/add " + message.text)
await add_channels(message.from_user.id, message.text)
await message.answer('Successfully!\n\nSend /menu for main menu!')
else:
if await add_channels(message.from_user.id, message.text):
await message.answer('Successfully!\n\nSend /menu for main menu!')
else:
await message.answer('This channel is already on your list!\n\n'
'View a list of your channels - /parse_channels')
await state.finish()
async def remove_channel(message: types.Message):
data = (await check_parse_channels(message.from_user.id))
if data == 'No one channels':
await message.answer("You cannot remove telegram channels from the list, because you have not added any!\n\n"
"Checking the list of channels - /parse_channels")
else:
await message.answer("Choose number of channel and send it!\n"
"Example:\n"
"1\n\n"
f"Here is the list of channels you are parsing:\n{data}")
await Removing.first.set()
async def removing_channel(message: types.Message, state: FSMContext):
data = await reemove_channels(message.from_user.id, message.text)
if data:
await message.answer('Success!\n\n'
'List of your channels - /parse_channels')
else:
await message.answer('Error!\n\n'
'Try again - /remove_parse_channel\n'
'Main menu - /menu')
await state.finish()
async def new_post(message: types.Message):
try:
time.sleep(2);
if message.chat.id == admin_chat:
if message.text[0:9] != "/NEW_POST":
pass
else:
messageid = message.message_id + 1
users = await get_users_by_link(message.text[10:])
if users:
for i in users:
await bot.forward_message(chat_id=int(i), from_chat_id=(admin_chat), message_id=messageid)
else:
pass
except:
pass
def register_message_handlers(dp: Dispatcher):
dp.register_message_handler(start_command, commands="start")
dp.register_message_handler(main_menu, commands="menu")
dp.register_message_handler(switch_parametr, commands=['on', 'off'])
dp.register_message_handler(parse_channel, commands="parse_channels")
dp.register_message_handler(add_channel, commands='add_parse_channel')
dp.register_message_handler(adding_channel, state=Adding.first)
dp.register_message_handler(remove_channel, commands='remove_parse_channel')
dp.register_message_handler(removing_channel, state=Removing.first)
dp.register_channel_post_handler(new_post, lambda message: message.text[0:9] == "/NEW_POST")
|
[
"sys.path.append",
"database.sess.check_parse_channels",
"python.States.StatesClasses.Adding.first.set",
"database.sess.reemove_channels",
"database.sess.add_channels",
"database.sess.get_users_by_link",
"time.sleep",
"aiogram.Bot",
"database.sess.switch_on_off",
"database.sess.create_new_user",
"database.sess.check_on_off",
"database.sess.check_channel",
"python.States.StatesClasses.Removing.first.set"
] |
[((129, 151), 'sys.path.append', 'sys.path.append', (['"""bot"""'], {}), "('bot')\n", (144, 151), False, 'import sys\n'), ((461, 478), 'aiogram.Bot', 'Bot', ([], {'token': 'bToken'}), '(token=bToken)\n', (464, 478), False, 'from aiogram import types, Dispatcher, Bot\n'), ((896, 967), 'database.sess.create_new_user', 'create_new_user', (['message.from_user.id', 'message.from_user.username', '"""on"""'], {}), "(message.from_user.id, message.from_user.username, 'on')\n", (911, 967), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((2339, 2381), 'database.sess.check_parse_channels', 'check_parse_channels', (['message.from_user.id'], {}), '(message.from_user.id)\n', (2359, 2381), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((3311, 3329), 'python.States.StatesClasses.Adding.first.set', 'Adding.first.set', ([], {}), '()\n', (3327, 3329), False, 'from python.States.StatesClasses import Adding, Removing\n'), ((3417, 3466), 'database.sess.check_channel', 'check_channel', (['message.text', 'message.from_user.id'], {}), '(message.text, message.from_user.id)\n', (3430, 3466), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((4273, 4315), 'database.sess.check_parse_channels', 'check_parse_channels', (['message.from_user.id'], {}), '(message.from_user.id)\n', (4293, 4315), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((4921, 4973), 'database.sess.reemove_channels', 'reemove_channels', (['message.from_user.id', 'message.text'], {}), '(message.from_user.id, message.text)\n', (4937, 4973), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((5360, 5373), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5370, 5373), False, 'import time\n'), ((2031, 2072), 'database.sess.switch_on_off', 'switch_on_off', (['"""on"""', 'message.from_user.id'], {}), "('on', message.from_user.id)\n", (2044, 2072), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((4810, 4830), 'python.States.StatesClasses.Removing.first.set', 'Removing.first.set', ([], {}), '()\n', (4828, 4830), False, 'from python.States.StatesClasses import Adding, Removing\n'), ((2111, 2153), 'database.sess.switch_on_off', 'switch_on_off', (['"""off"""', 'message.from_user.id'], {}), "('off', message.from_user.id)\n", (2124, 2153), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((3724, 3772), 'database.sess.add_channels', 'add_channels', (['message.from_user.id', 'message.text'], {}), '(message.from_user.id, message.text)\n', (3736, 3772), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((3875, 3923), 'database.sess.add_channels', 'add_channels', (['message.from_user.id', 'message.text'], {}), '(message.from_user.id, message.text)\n', (3887, 3923), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n'), ((5586, 5622), 'database.sess.get_users_by_link', 'get_users_by_link', (['message.text[10:]'], {}), '(message.text[10:])\n', (5603, 5622), False, 'from database.sess import get_users_by_link\n'), ((1699, 1733), 'database.sess.check_on_off', 'check_on_off', (['message.from_user.id'], {}), '(message.from_user.id)\n', (1711, 1733), False, 'from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, add_channels, reemove_channels\n')]
|
"""
Author: <NAME> (<EMAIL>)
Date: May 07, 2020
"""
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
from itertools import combinations
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
a = anchor_feature.detach().cpu().numpy()
b = contrast_feature.T.detach().cpu().numpy()
c = anchor_dot_contrast.detach().cpu().numpy()
d = np.matmul(a, b)
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
def testNan(self, x):
x = x.detach().cpu().numpy()
return np.isnan(x).any()
# CLOCS 中用于对比学习的loss
def obtain_contrastive_loss(latent_embeddings, pids, trial):
""" Calculate NCE Loss For Latent Embeddings in Batch
Args:
latent_embeddings (torch.Tensor): embeddings from model for different perturbations of same instance (BxHxN)
pids (list): patient ids of instances in batch
Outputs:
loss (torch.Tensor): scalar NCE loss
"""
if trial in ['CMSC', 'CMLC', 'CMSMLC']:
pids = np.array(pids, dtype=np.object)
pid1, pid2 = np.meshgrid(pids, pids)
pid_matrix = pid1 + '-' + pid2
pids_of_interest = np.unique(pids + '-' + pids) # unique combinations of pids of interest i.e. matching
bool_matrix_of_interest = np.zeros((len(pids), len(pids)))
for pid in pids_of_interest:
bool_matrix_of_interest += pid_matrix == pid
rows1, cols1 = np.where(np.triu(bool_matrix_of_interest, 1))
rows2, cols2 = np.where(np.tril(bool_matrix_of_interest, -1))
nviews = set(range(latent_embeddings.shape[2]))
view_combinations = combinations(nviews, 2)
loss = 0
ncombinations = 0
loss_terms = 2
# 如果报错误 UnboundLocalError: local variable 'loss_terms' referenced before assignment
# 那就重启PyCharm吧!
for combination in view_combinations:
view1_array = latent_embeddings[:, :, combination[0]] # (BxH)
view2_array = latent_embeddings[:, :, combination[1]] # (BxH)
norm1_vector = view1_array.norm(dim=1).unsqueeze(0)
norm2_vector = view2_array.norm(dim=1).unsqueeze(0)
sim_matrix = torch.mm(view1_array, view2_array.transpose(0, 1))
norm_matrix = torch.mm(norm1_vector.transpose(0, 1), norm2_vector)
temperature = 0.1
argument = sim_matrix / (norm_matrix * temperature)
sim_matrix_exp = torch.exp(argument)
if trial == 'CMC':
""" Obtain Off Diagonal Entries """
# upper_triangle = torch.triu(sim_matrix_exp,1)
# lower_triangle = torch.tril(sim_matrix_exp,-1)
# off_diagonals = upper_triangle + lower_triangle
diagonals = torch.diag(sim_matrix_exp)
""" Obtain Loss Terms(s) """
loss_term1 = -torch.mean(torch.log(diagonals / torch.sum(sim_matrix_exp, 1)))
loss_term2 = -torch.mean(torch.log(diagonals / torch.sum(sim_matrix_exp, 0)))
loss += loss_term1 + loss_term2
loss_terms = 2
elif trial == 'SimCLR':
self_sim_matrix1 = torch.mm(view1_array, view1_array.transpose(0, 1))
self_norm_matrix1 = torch.mm(norm1_vector.transpose(0, 1), norm1_vector)
temperature = 0.1
argument = self_sim_matrix1 / (self_norm_matrix1 * temperature)
self_sim_matrix_exp1 = torch.exp(argument)
self_sim_matrix_off_diagonals1 = torch.triu(self_sim_matrix_exp1, 1) + torch.tril(self_sim_matrix_exp1, -1)
self_sim_matrix2 = torch.mm(view2_array, view2_array.transpose(0, 1))
self_norm_matrix2 = torch.mm(norm2_vector.transpose(0, 1), norm2_vector)
temperature = 0.1
argument = self_sim_matrix2 / (self_norm_matrix2 * temperature)
self_sim_matrix_exp2 = torch.exp(argument)
self_sim_matrix_off_diagonals2 = torch.triu(self_sim_matrix_exp2, 1) + torch.tril(self_sim_matrix_exp2, -1)
denominator_loss1 = torch.sum(sim_matrix_exp, 1) + torch.sum(self_sim_matrix_off_diagonals1, 1)
denominator_loss2 = torch.sum(sim_matrix_exp, 0) + torch.sum(self_sim_matrix_off_diagonals2, 0)
diagonals = torch.diag(sim_matrix_exp)
loss_term1 = -torch.mean(torch.log(diagonals / denominator_loss1))
loss_term2 = -torch.mean(torch.log(diagonals / denominator_loss2))
loss += loss_term1 + loss_term2
loss_terms = 2
elif trial in ['CMSC', 'CMLC', 'CMSMLC']: # ours #CMSMLC = positive examples are same instance and same patient
triu_elements = sim_matrix_exp[rows1, cols1]
tril_elements = sim_matrix_exp[rows2, cols2]
diag_elements = torch.diag(sim_matrix_exp)
triu_sum = torch.sum(sim_matrix_exp, 1)
tril_sum = torch.sum(sim_matrix_exp, 0)
loss_diag1 = -torch.mean(torch.log(diag_elements / triu_sum))
loss_diag2 = -torch.mean(torch.log(diag_elements / tril_sum))
loss_triu = -torch.mean(torch.log(triu_elements / triu_sum[rows1]))
loss_tril = -torch.mean(torch.log(tril_elements / tril_sum[cols2]))
loss = loss_diag1 + loss_diag2
loss_terms = 2
if len(rows1) > 0:
loss += loss_triu # technically need to add 1 more term for symmetry
loss_terms += 1
if len(rows2) > 0:
loss += loss_tril # technically need to add 1 more term for symmetry
loss_terms += 1
# print(loss,loss_triu,loss_tril)
ncombinations += 1
loss = loss / (loss_terms * ncombinations)
return loss
|
[
"numpy.triu",
"torch.eye",
"numpy.isnan",
"torch.arange",
"torch.device",
"numpy.unique",
"numpy.meshgrid",
"torch.diag",
"torch.exp",
"torch.triu",
"torch.unbind",
"torch.matmul",
"torch.log",
"itertools.combinations",
"torch.max",
"torch.sum",
"torch.ones_like",
"torch.eq",
"numpy.tril",
"numpy.array",
"torch.tril",
"numpy.matmul"
] |
[((5136, 5159), 'itertools.combinations', 'combinations', (['nviews', '(2)'], {}), '(nviews, 2)\n', (5148, 5159), False, 'from itertools import combinations\n'), ((2896, 2947), 'torch.max', 'torch.max', (['anchor_dot_contrast'], {'dim': '(1)', 'keepdim': '(True)'}), '(anchor_dot_contrast, dim=1, keepdim=True)\n', (2905, 2947), False, 'import torch\n'), ((3180, 3195), 'numpy.matmul', 'np.matmul', (['a', 'b'], {}), '(a, b)\n', (3189, 3195), True, 'import numpy as np\n'), ((4530, 4561), 'numpy.array', 'np.array', (['pids'], {'dtype': 'np.object'}), '(pids, dtype=np.object)\n', (4538, 4561), True, 'import numpy as np\n'), ((4583, 4606), 'numpy.meshgrid', 'np.meshgrid', (['pids', 'pids'], {}), '(pids, pids)\n', (4594, 4606), True, 'import numpy as np\n'), ((4673, 4701), 'numpy.unique', 'np.unique', (["(pids + '-' + pids)"], {}), "(pids + '-' + pids)\n", (4682, 4701), True, 'import numpy as np\n'), ((5884, 5903), 'torch.exp', 'torch.exp', (['argument'], {}), '(argument)\n', (5893, 5903), False, 'import torch\n'), ((1213, 1233), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1225, 1233), False, 'import torch\n'), ((1295, 1314), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1307, 1314), False, 'import torch\n'), ((2308, 2337), 'torch.unbind', 'torch.unbind', (['features'], {'dim': '(1)'}), '(features, dim=1)\n', (2320, 2337), False, 'import torch\n'), ((2758, 2806), 'torch.matmul', 'torch.matmul', (['anchor_feature', 'contrast_feature.T'], {}), '(anchor_feature, contrast_feature.T)\n', (2770, 2806), False, 'import torch\n'), ((3363, 3384), 'torch.ones_like', 'torch.ones_like', (['mask'], {}), '(mask)\n', (3378, 3384), False, 'import torch\n'), ((3584, 3601), 'torch.exp', 'torch.exp', (['logits'], {}), '(logits)\n', (3593, 3601), False, 'import torch\n'), ((4952, 4987), 'numpy.triu', 'np.triu', (['bool_matrix_of_interest', '(1)'], {}), '(bool_matrix_of_interest, 1)\n', (4959, 4987), True, 'import numpy as np\n'), ((5021, 5057), 'numpy.tril', 'np.tril', (['bool_matrix_of_interest', '(-1)'], {}), '(bool_matrix_of_interest, -1)\n', (5028, 5057), True, 'import numpy as np\n'), ((6187, 6213), 'torch.diag', 'torch.diag', (['sim_matrix_exp'], {}), '(sim_matrix_exp)\n', (6197, 6213), False, 'import torch\n'), ((4064, 4075), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (4072, 4075), True, 'import numpy as np\n'), ((6846, 6865), 'torch.exp', 'torch.exp', (['argument'], {}), '(argument)\n', (6855, 6865), False, 'import torch\n'), ((7295, 7314), 'torch.exp', 'torch.exp', (['argument'], {}), '(argument)\n', (7304, 7314), False, 'import torch\n'), ((7677, 7703), 'torch.diag', 'torch.diag', (['sim_matrix_exp'], {}), '(sim_matrix_exp)\n', (7687, 7703), False, 'import torch\n'), ((6911, 6946), 'torch.triu', 'torch.triu', (['self_sim_matrix_exp1', '(1)'], {}), '(self_sim_matrix_exp1, 1)\n', (6921, 6946), False, 'import torch\n'), ((6949, 6985), 'torch.tril', 'torch.tril', (['self_sim_matrix_exp1', '(-1)'], {}), '(self_sim_matrix_exp1, -1)\n', (6959, 6985), False, 'import torch\n'), ((7360, 7395), 'torch.triu', 'torch.triu', (['self_sim_matrix_exp2', '(1)'], {}), '(self_sim_matrix_exp2, 1)\n', (7370, 7395), False, 'import torch\n'), ((7398, 7434), 'torch.tril', 'torch.tril', (['self_sim_matrix_exp2', '(-1)'], {}), '(self_sim_matrix_exp2, -1)\n', (7408, 7434), False, 'import torch\n'), ((7468, 7496), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(1)'], {}), '(sim_matrix_exp, 1)\n', (7477, 7496), False, 'import torch\n'), ((7499, 7543), 'torch.sum', 'torch.sum', (['self_sim_matrix_off_diagonals1', '(1)'], {}), '(self_sim_matrix_off_diagonals1, 1)\n', (7508, 7543), False, 'import torch\n'), ((7576, 7604), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(0)'], {}), '(sim_matrix_exp, 0)\n', (7585, 7604), False, 'import torch\n'), ((7607, 7651), 'torch.sum', 'torch.sum', (['self_sim_matrix_off_diagonals2', '(0)'], {}), '(self_sim_matrix_off_diagonals2, 0)\n', (7616, 7651), False, 'import torch\n'), ((8196, 8222), 'torch.diag', 'torch.diag', (['sim_matrix_exp'], {}), '(sim_matrix_exp)\n', (8206, 8222), False, 'import torch\n'), ((8247, 8275), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(1)'], {}), '(sim_matrix_exp, 1)\n', (8256, 8275), False, 'import torch\n'), ((8299, 8327), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(0)'], {}), '(sim_matrix_exp, 0)\n', (8308, 8327), False, 'import torch\n'), ((1838, 1880), 'torch.eye', 'torch.eye', (['batch_size'], {'dtype': 'torch.float32'}), '(batch_size, dtype=torch.float32)\n', (1847, 1880), False, 'import torch\n'), ((7741, 7781), 'torch.log', 'torch.log', (['(diagonals / denominator_loss1)'], {}), '(diagonals / denominator_loss1)\n', (7750, 7781), False, 'import torch\n'), ((7820, 7860), 'torch.log', 'torch.log', (['(diagonals / denominator_loss2)'], {}), '(diagonals / denominator_loss2)\n', (7829, 7860), False, 'import torch\n'), ((3413, 3452), 'torch.arange', 'torch.arange', (['(batch_size * anchor_count)'], {}), '(batch_size * anchor_count)\n', (3425, 3452), False, 'import torch\n'), ((6314, 6342), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(1)'], {}), '(sim_matrix_exp, 1)\n', (6323, 6342), False, 'import torch\n'), ((6404, 6432), 'torch.sum', 'torch.sum', (['sim_matrix_exp', '(0)'], {}), '(sim_matrix_exp, 0)\n', (6413, 6432), False, 'import torch\n'), ((8366, 8401), 'torch.log', 'torch.log', (['(diag_elements / triu_sum)'], {}), '(diag_elements / triu_sum)\n', (8375, 8401), False, 'import torch\n'), ((8440, 8475), 'torch.log', 'torch.log', (['(diag_elements / tril_sum)'], {}), '(diag_elements / tril_sum)\n', (8449, 8475), False, 'import torch\n'), ((8514, 8556), 'torch.log', 'torch.log', (['(triu_elements / triu_sum[rows1])'], {}), '(triu_elements / triu_sum[rows1])\n', (8523, 8556), False, 'import torch\n'), ((8594, 8636), 'torch.log', 'torch.log', (['(tril_elements / tril_sum[cols2])'], {}), '(tril_elements / tril_sum[cols2])\n', (8603, 8636), False, 'import torch\n'), ((2124, 2150), 'torch.eq', 'torch.eq', (['labels', 'labels.T'], {}), '(labels, labels.T)\n', (2132, 2150), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
from rete import Has, Filter, Rule
from rete.common import WME, Bind
from rete.network import Network
def test_filter_compare():
net = Network()
c0 = Has('spu:1', 'price', '$x')
f0 = Filter('$x>100')
f1 = Filter('$x<200')
f2 = Filter('$x>200 and $x<400')
f3 = Filter('$x>300')
p0 = net.add_production(Rule(c0, f0, f1))
p1 = net.add_production(Rule(c0, f2))
p2 = net.add_production(Rule(c0, f3))
net.add_wme(WME('spu:1', 'price', '100'))
net.add_wme(WME('spu:1', 'price', '150'))
net.add_wme(WME('spu:1', 'price', '300'))
assert len(p0.items) == 1
token = p0.items.pop()
assert token.get_binding('$x') == '150'
assert len(p1.items) == 1
token = p1.items.pop()
assert token.get_binding('$x') == '300'
assert not p2.items
def test_bind():
net = Network()
c0 = Has('spu:1', 'sales', '$x')
b0 = Bind('len(set($x) & set(range(1, 100)))', '$num')
f0 = Filter('$num > 0')
p0 = net.add_production(Rule(c0, b0, f0))
b1 = Bind('len(set($x) & set(range(100, 200)))', '$num')
p1 = net.add_production(Rule(c0, b1, f0))
b2 = Bind('len(set($x) & set(range(300, 400)))', '$num')
p2 = net.add_production(Rule(c0, b2, f0))
net.add_wme(WME('spu:1', 'sales', 'range(50, 110)'))
assert len(p0.items) == 1
assert len(p1.items) == 1
assert len(p2.items) == 0
t0 = p0.items[0]
t1 = p1.items[0]
assert t0.get_binding('$num') == 50
assert t1.get_binding('$num') == 10
|
[
"rete.Has",
"rete.common.Bind",
"rete.network.Network",
"rete.common.WME",
"rete.Rule",
"rete.Filter"
] |
[((165, 174), 'rete.network.Network', 'Network', ([], {}), '()\n', (172, 174), False, 'from rete.network import Network\n'), ((184, 211), 'rete.Has', 'Has', (['"""spu:1"""', '"""price"""', '"""$x"""'], {}), "('spu:1', 'price', '$x')\n", (187, 211), False, 'from rete import Has, Filter, Rule\n'), ((221, 237), 'rete.Filter', 'Filter', (['"""$x>100"""'], {}), "('$x>100')\n", (227, 237), False, 'from rete import Has, Filter, Rule\n'), ((247, 263), 'rete.Filter', 'Filter', (['"""$x<200"""'], {}), "('$x<200')\n", (253, 263), False, 'from rete import Has, Filter, Rule\n'), ((273, 300), 'rete.Filter', 'Filter', (['"""$x>200 and $x<400"""'], {}), "('$x>200 and $x<400')\n", (279, 300), False, 'from rete import Has, Filter, Rule\n'), ((310, 326), 'rete.Filter', 'Filter', (['"""$x>300"""'], {}), "('$x>300')\n", (316, 326), False, 'from rete import Has, Filter, Rule\n'), ((854, 863), 'rete.network.Network', 'Network', ([], {}), '()\n', (861, 863), False, 'from rete.network import Network\n'), ((873, 900), 'rete.Has', 'Has', (['"""spu:1"""', '"""sales"""', '"""$x"""'], {}), "('spu:1', 'sales', '$x')\n", (876, 900), False, 'from rete import Has, Filter, Rule\n'), ((910, 959), 'rete.common.Bind', 'Bind', (['"""len(set($x) & set(range(1, 100)))"""', '"""$num"""'], {}), "('len(set($x) & set(range(1, 100)))', '$num')\n", (914, 959), False, 'from rete.common import WME, Bind\n'), ((969, 987), 'rete.Filter', 'Filter', (['"""$num > 0"""'], {}), "('$num > 0')\n", (975, 987), False, 'from rete import Has, Filter, Rule\n'), ((1044, 1095), 'rete.common.Bind', 'Bind', (['"""len(set($x) & set(range(100, 200)))"""', '"""$num"""'], {}), "('len(set($x) & set(range(100, 200)))', '$num')\n", (1048, 1095), False, 'from rete.common import WME, Bind\n'), ((1152, 1203), 'rete.common.Bind', 'Bind', (['"""len(set($x) & set(range(300, 400)))"""', '"""$num"""'], {}), "('len(set($x) & set(range(300, 400)))', '$num')\n", (1156, 1203), False, 'from rete.common import WME, Bind\n'), ((356, 372), 'rete.Rule', 'Rule', (['c0', 'f0', 'f1'], {}), '(c0, f0, f1)\n', (360, 372), False, 'from rete import Has, Filter, Rule\n'), ((402, 414), 'rete.Rule', 'Rule', (['c0', 'f2'], {}), '(c0, f2)\n', (406, 414), False, 'from rete import Has, Filter, Rule\n'), ((444, 456), 'rete.Rule', 'Rule', (['c0', 'f3'], {}), '(c0, f3)\n', (448, 456), False, 'from rete import Has, Filter, Rule\n'), ((474, 502), 'rete.common.WME', 'WME', (['"""spu:1"""', '"""price"""', '"""100"""'], {}), "('spu:1', 'price', '100')\n", (477, 502), False, 'from rete.common import WME, Bind\n'), ((520, 548), 'rete.common.WME', 'WME', (['"""spu:1"""', '"""price"""', '"""150"""'], {}), "('spu:1', 'price', '150')\n", (523, 548), False, 'from rete.common import WME, Bind\n'), ((566, 594), 'rete.common.WME', 'WME', (['"""spu:1"""', '"""price"""', '"""300"""'], {}), "('spu:1', 'price', '300')\n", (569, 594), False, 'from rete.common import WME, Bind\n'), ((1016, 1032), 'rete.Rule', 'Rule', (['c0', 'b0', 'f0'], {}), '(c0, b0, f0)\n', (1020, 1032), False, 'from rete import Has, Filter, Rule\n'), ((1124, 1140), 'rete.Rule', 'Rule', (['c0', 'b1', 'f0'], {}), '(c0, b1, f0)\n', (1128, 1140), False, 'from rete import Has, Filter, Rule\n'), ((1232, 1248), 'rete.Rule', 'Rule', (['c0', 'b2', 'f0'], {}), '(c0, b2, f0)\n', (1236, 1248), False, 'from rete import Has, Filter, Rule\n'), ((1267, 1306), 'rete.common.WME', 'WME', (['"""spu:1"""', '"""sales"""', '"""range(50, 110)"""'], {}), "('spu:1', 'sales', 'range(50, 110)')\n", (1270, 1306), False, 'from rete.common import WME, Bind\n')]
|
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import time
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = (image_numpy - np.min(image_numpy)) / (np.max(image_numpy) - np.min(image_numpy))
image_numpy = image_numpy * 2 - 1
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
image_numpy = np.clip(image_numpy, 0.0, 255.0)
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class Timer(object):
def __init__(self, name=None, acc=False, avg=False):
self.name = name
self.acc = acc
self.avg = avg
self.total = 0.0
self.iters = 0
def __enter__(self):
self.start()
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
self.tstart = time.time()
def stop(self):
self.iters += 1
self.total += time.time() - self.tstart
if not self.acc:
self.reset()
def reset(self):
name_string = ''
if self.name:
name_string = '[' + self.name + '] '
value = self.total
msg = 'Elapsed'
if self.avg:
value /= self.iters
msg = 'Avg Elapsed'
print('%s%s: %.4f' % (name_string, msg, value))
self.total = 0.0
|
[
"os.makedirs",
"numpy.median",
"numpy.std",
"os.path.exists",
"numpy.transpose",
"numpy.clip",
"time.time",
"numpy.min",
"numpy.max",
"numpy.mean",
"numpy.tile",
"PIL.Image.fromarray",
"torch.abs"
] |
[((772, 804), 'numpy.clip', 'np.clip', (['image_numpy', '(0.0)', '(255.0)'], {}), '(image_numpy, 0.0, 255.0)\n', (779, 804), True, 'import numpy as np\n'), ((1206, 1234), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (1221, 1234), False, 'from PIL import Image\n'), ((647, 678), 'numpy.tile', 'np.tile', (['image_numpy', '(3, 1, 1)'], {}), '(image_numpy, (3, 1, 1))\n', (654, 678), True, 'import numpy as np\n'), ((1773, 1793), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1787, 1793), False, 'import os\n'), ((1803, 1820), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1814, 1820), False, 'import os\n'), ((2255, 2266), 'time.time', 'time.time', ([], {}), '()\n', (2264, 2266), False, 'import time\n'), ((2324, 2335), 'time.time', 'time.time', ([], {}), '()\n', (2333, 2335), False, 'import time\n'), ((516, 535), 'numpy.min', 'np.min', (['image_numpy'], {}), '(image_numpy)\n', (522, 535), True, 'import numpy as np\n'), ((540, 559), 'numpy.max', 'np.max', (['image_numpy'], {}), '(image_numpy)\n', (546, 559), True, 'import numpy as np\n'), ((562, 581), 'numpy.min', 'np.min', (['image_numpy'], {}), '(image_numpy)\n', (568, 581), True, 'import numpy as np\n'), ((698, 734), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (710, 734), True, 'import numpy as np\n'), ((1018, 1044), 'torch.abs', 'torch.abs', (['param.grad.data'], {}), '(param.grad.data)\n', (1027, 1044), False, 'import torch\n'), ((1517, 1527), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1524, 1527), True, 'import numpy as np\n'), ((1529, 1538), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1535, 1538), True, 'import numpy as np\n'), ((1540, 1549), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1546, 1549), True, 'import numpy as np\n'), ((1551, 1563), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (1560, 1563), True, 'import numpy as np\n'), ((1565, 1574), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1571, 1574), True, 'import numpy as np\n')]
|
import subprocess
import shutil
import os
import time
from .interface import IsolateInterface
class IsolateSimple(IsolateInterface):
def isolate(self, files, command, parameters, envvariables, directories, allowmultiprocess, stdinfile, stdoutfile):
if os.path.isdir("/tmp/gradertools/isolation/"):
shutil.rmtree("/tmp/gradertools/isolation/")
os.makedirs("/tmp/gradertools/isolation/")
box = "/tmp/gradertools/isolation/"
for file in files:
shutil.copy(file, os.path.join(box, os.path.basename(file)))
isolateio=" "
if stdinfile is not None:
isolateio+="< "+stdinfile
if stdoutfile is not None:
isolateio+="> "+stdoutfile
t0 = time.perf_counter()
out = subprocess.run(" ".join(["cd "+ box+ ";"]+[command]+parameters+[isolateio]), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
t1 = time.perf_counter()
self._boxdir = box
self._status = 'OK'
self._runtime = t1-t0
self._walltime = t1-t0
self._maxrss = 0 # Maximum resident set size of the process (in kilobytes).
self._cswv = 0 # Number of context switches caused by the process giving up the CPU voluntarily.
self._cswf = 0 # Number of context switches forced by the kernel.
self._cgmem = 0 # Total memory use by the whole control group (in kilobytes).
self._exitcode = out.returncode
self._stdout = out.stdout
def clean(self):
shutil.rmtree("/tmp/gradertools/isolation/")
|
[
"os.makedirs",
"os.path.basename",
"os.path.isdir",
"time.perf_counter",
"shutil.rmtree"
] |
[((267, 311), 'os.path.isdir', 'os.path.isdir', (['"""/tmp/gradertools/isolation/"""'], {}), "('/tmp/gradertools/isolation/')\n", (280, 311), False, 'import os\n'), ((378, 420), 'os.makedirs', 'os.makedirs', (['"""/tmp/gradertools/isolation/"""'], {}), "('/tmp/gradertools/isolation/')\n", (389, 420), False, 'import os\n'), ((748, 767), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (765, 767), False, 'import time\n'), ((963, 982), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (980, 982), False, 'import time\n'), ((1577, 1621), 'shutil.rmtree', 'shutil.rmtree', (['"""/tmp/gradertools/isolation/"""'], {}), "('/tmp/gradertools/isolation/')\n", (1590, 1621), False, 'import shutil\n'), ((325, 369), 'shutil.rmtree', 'shutil.rmtree', (['"""/tmp/gradertools/isolation/"""'], {}), "('/tmp/gradertools/isolation/')\n", (338, 369), False, 'import shutil\n'), ((540, 562), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (556, 562), False, 'import os\n')]
|
import os, sys
sys.path.append( os.path.join(os.path.dirname(os.path.abspath(__file__)),'tts_websocketserver','src') )
from tts_websocketserver.tts_server import run
if __name__ == '__main__':
run()
|
[
"os.path.abspath",
"tts_websocketserver.tts_server.run"
] |
[((203, 208), 'tts_websocketserver.tts_server.run', 'run', ([], {}), '()\n', (206, 208), False, 'from tts_websocketserver.tts_server import run\n'), ((62, 87), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (77, 87), False, 'import os, sys\n')]
|
import re
import pytest
import rita
def load_rules(rules_path):
with open(rules_path, "r") as f:
return f.read()
def spacy_engine(rules, **kwargs):
spacy = pytest.importorskip("spacy", minversion="2.1")
patterns = rita.compile_string(rules, **kwargs)
nlp = spacy.load("en")
ruler = spacy.pipeline.EntityRuler(nlp, overwrite_ents=True)
print(patterns)
ruler.add_patterns(patterns)
nlp.add_pipe(ruler)
def parse(text):
doc = nlp(text)
return list([(e.text, e.label_) for e in doc.ents])
return parse
def standalone_engine(rules, **kwargs):
parser = rita.compile_string(rules, use_engine="standalone", **kwargs)
print(parser.patterns)
def parse(text):
results = list(parser.execute(text, include_submatches=False))
return list([(r["text"], r["label"]) for r in results])
return parse
def rust_engine(rules, **kwargs):
from rita.engine.translate_rust import load_lib
lib = load_lib()
if lib is None:
pytest.skip("Missing rita-rust dynamic lib, skipping related tests")
print("Trying to run: {}".format(rules))
parser = rita.compile_string(rules, use_engine="rust", **kwargs)
print(parser.patterns)
def parse(text):
results = list(parser.execute(text, include_submatches=False))
return list([(r["text"], r["label"]) for r in results])
return parse
def normalize_output(r):
return re.sub(r"\s+", " ", r.strip().replace("\n", ""))
def raw_compare(r1, r2):
r1 = normalize_output(r1)
r2 = normalize_output(r2)
assert r1 == r2
|
[
"pytest.importorskip",
"rita.engine.translate_rust.load_lib",
"pytest.skip",
"rita.compile_string"
] |
[((177, 223), 'pytest.importorskip', 'pytest.importorskip', (['"""spacy"""'], {'minversion': '"""2.1"""'}), "('spacy', minversion='2.1')\n", (196, 223), False, 'import pytest\n'), ((239, 275), 'rita.compile_string', 'rita.compile_string', (['rules'], {}), '(rules, **kwargs)\n', (258, 275), False, 'import rita\n'), ((623, 684), 'rita.compile_string', 'rita.compile_string', (['rules'], {'use_engine': '"""standalone"""'}), "(rules, use_engine='standalone', **kwargs)\n", (642, 684), False, 'import rita\n'), ((984, 994), 'rita.engine.translate_rust.load_lib', 'load_lib', ([], {}), '()\n', (992, 994), False, 'from rita.engine.translate_rust import load_lib\n'), ((1150, 1205), 'rita.compile_string', 'rita.compile_string', (['rules'], {'use_engine': '"""rust"""'}), "(rules, use_engine='rust', **kwargs)\n", (1169, 1205), False, 'import rita\n'), ((1023, 1091), 'pytest.skip', 'pytest.skip', (['"""Missing rita-rust dynamic lib, skipping related tests"""'], {}), "('Missing rita-rust dynamic lib, skipping related tests')\n", (1034, 1091), False, 'import pytest\n')]
|
from dataclasses import dataclass
from sailenv import Vector3
from sailenv.dynamics import Dynamic
@dataclass
class UniformMovementRandomBounce(Dynamic):
start_direction: Vector3 = Vector3(0, 0, 1)
speed: float = 5
angular_speed: float = 2
seed: int = 42
@staticmethod
def get_type():
return "uniform_movement_random_bounce"
|
[
"sailenv.Vector3"
] |
[((188, 204), 'sailenv.Vector3', 'Vector3', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (195, 204), False, 'from sailenv import Vector3\n')]
|
# -*- coding: utf-8 -*-
"""Core download functions."""
# Imports ---------------------------------------------------------------------
import datetime
import json
import numpy as np
import pandas as pd
import requests
from . import constants
from . import errors
from . import settings
# Functions ------------------------------------------------------------------
def request(query):
"""Send an http request with a query and return the response.
request sends a SPARQL query to the api endpoint and returns the response
object. It is a simple wrapper around request.post. It sets the appropriate
headers and sends the query as the request body. It does not validate the
query or handle the response in any way. The response format is JSON.
Parameters
----------
query : str
A SPARQL query as a string.
Returns
-------
out : Response
The http response object from requests.
"""
url = settings.get_api_url()
headers = {}
headers['content-type'] = 'application/sparql-query'
headers['accept'] = 'application/sparql-results+json'
response = requests.post(url, headers=headers, data=query)
return response
def sparql_select(query):
"""Send a select query and return the response as a DataFrame.
sparql_select sends a SPARQL query to the api endpoint and returns the
response as a DataFrame. The SPARQL should be a SELECT query as the
response is processed as tabular data. The function will convert datatypes
that it recognises. It currently recognises date types. All other data
returned in the DataFrame will be strings. If the query syntax is not valid
or the request fails for any other reason a RequestError will be raised
with the response text.
Parameters
----------
query : str
A SPARQL SELECT query as a string.
Returns
-------
out : DataFrame
A pandas dataframe containing the results of the query.
"""
# Send the query and get the response
response = request(query)
# If the server returned an error raise it with the response text
if not response.ok:
raise errors.RequestError(response.text)
# Process the response as tabular data and return it as a DataFrame
json = response.json()
rows = []
headers = json['head']['vars']
records = json['results']['bindings']
# For each record build a row and assign values based on the data type
for record in records:
row = []
for header in headers:
if header in record:
if 'datatype' in record[header] and \
record[header]['datatype'] == constants.XML_DATE:
row.append(
datetime.datetime.strptime(
record[header]['value'], '%Y-%m-%d+%H:%M').date())
else:
row.append(record[header]['value'].strip())
else:
row.append(None)
rows.append(row)
return pd.DataFrame(data=rows, columns=headers).fillna(value=np.NaN)
|
[
"pandas.DataFrame",
"requests.post",
"datetime.datetime.strptime"
] |
[((1134, 1181), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'data': 'query'}), '(url, headers=headers, data=query)\n', (1147, 1181), False, 'import requests\n'), ((3046, 3086), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rows', 'columns': 'headers'}), '(data=rows, columns=headers)\n', (3058, 3086), True, 'import pandas as pd\n'), ((2769, 2838), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["record[header]['value']", '"""%Y-%m-%d+%H:%M"""'], {}), "(record[header]['value'], '%Y-%m-%d+%H:%M')\n", (2795, 2838), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 09:10:29 2021
Author: <NAME>
Functions for implementing the edge detection scheme first proposed by Zhang and Bao [1].
Modified for use with pywt's SWT2 transform and employs double thresholding similar to canny to improve noise resilience and revovery of weak edges.
Portions of code adapted from scikit-image's implementation of the canny edge detector;
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
[1] <NAME>. and <NAME>., 2002. Edge detection by scale multiplication in wavelet domain. Pattern Recognition Letters, 23(14), pp.1771-1784.
"""
import numpy as np
from pywt import swt2, Wavelet
from scipy.ndimage import generate_binary_structure, binary_erosion, label
from scipy import ndimage as ndi
def wavelet_edge_detector(image, start_level=0, levels=2, wavelet='rbio3.1', c=0.15, noise_var=40, t1=1, t2=2, dbl_th=True):
"""
Extracts the edge local maxima of the passed image using the product of two
consecutive stationary wavelet coefficients
-----------
image : 2D array
Input image, grayscale
start_level : int
Initial coefficient scale level to be extracted by the SWT
levels : int
number of levels to consider, must be even
wavelet : string
Name of wavelet as listed by pywt.wavelist()
c : float
Multiplier for calculating the threshold
noise_var : float
Estimate of the Gaussian Noise variance present in the image
t1 : float
Threshold multiplier for the lower threshold
t2 : float
Threshold multiplier for the lower threshold
Returns
-------
local_maxima : 2D array
local maxima extracted by the local maxima method
edge_mask : 2D array
Binary array marking edges present in the local maxima
-----
"""
assert(levels%2 == 0)
#calculate the maximum level to decompose the image with
max_level = start_level+levels
#Decompse the image to its detail coefficients using the 2D SWT
coeffs = swt2(image, wavelet=wavelet, level=max_level,
start_level=start_level, norm=False,
trim_approx=True)
#create empty arrays to store the detail coefficients
#algoritmhs only require Horizontal and Vertical details, so Diagonal is not calculated
coeff_arr_H = np.empty((image.shape + (max_level-start_level,)))
coeff_arr_V = np.empty((image.shape + (max_level-start_level,)))
#offset the coefficients based on the decomposition scale
for i in range(max_level-start_level):
coeff_arr_H[:,:,i] = np.roll(coeffs[-1-i][0], 2**(i+start_level))
coeff_arr_V[:,:,i] = np.roll(coeffs[-1-i][1], 2**(i+start_level))
#Get the Horizontal and Vertical products; the magnitude gradient matrices
Mdx = np.prod(coeff_arr_H, axis=2)
Mdy = np.prod(coeff_arr_V, axis=2)
#Remove negative coefficients, as these are solely due to noise
pts_Mdx_plus = (Mdx >= 0)
Mdx = pts_Mdx_plus * Mdx
pts_Mdy_plus = (Mdy >= 0)
Mdy = pts_Mdy_plus * Mdy
#Get the angle gradient matrices
Adx = np.sign(coeff_arr_H[:,:,1])*np.sqrt(Mdx)
Ady = np.sign(coeff_arr_V[:,:,1])*np.sqrt(Mdy)
#Obtain the local modulus maximum in the direction of the normal of the edge
local_maxima = local_modulus_maxima(Adx, Ady, Mdx, Mdy)
if dbl_th:
#Perform double thresholding and return the edge mask
edge_mask = dbl_thresholding_ZhangBao(local_maxima, wavelet=wavelet,
start_level=start_level,
c=c, noise_var=noise_var,
t1=t1, t2=t2)
else:
edge_mask = None
return local_maxima, edge_mask
def local_modulus_maxima(Adx, Ady, Mdx, Mdy, mask=None):
"""
Code adapted from scikit-image's canny implementation for faster execution
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
"""
"""Fast computation of the local maxima using custom gradient and angle matrices
Parameters
-----------
Adx : 2D array
Gradient array along axis 0 (Horizontal Detail Coefficients) to be used
for calculating the normal to the edges
Ady : 2D array
Gradient array along axis 1 (Vertical Detail Coefficients) to be used
for calculating the normal to the edges
Mdx : 2D array
Gradient array along axis 0 (Horizontal Detail Coefficients) to be used
for calculating the value of the edges
Mdy : 2D array
Gradient array along axis 1 (Vertical Detail Coefficients) to be used
for calculating the value of the edges
mask : array, dtype=bool, optional
Mask to limit the application of Canny to a certain area.
Returns
-------
output : 2D array
The local maxima
-----
The steps of the algorithm are as follows:
* Thin potential edges to 1-pixel wide curves. First, find the normal
to the edge at each point. This is done by looking at the
signs and the relative magnitude of the X-Sobel and Y-Sobel
to sort the points into 4 categories: horizontal, vertical,
diagonal and antidiagonal. Then look in the normal and reverse
directions to see if the values in either of those directions are
greater than the point in question. Use interpolation to get a mix of
points instead of picking the one that's the closest to the normal.
"""
#
# The steps involved:
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
assert (Mdx.shape == Mdy.shape)
assert (Mdx.shape == Adx.shape)
assert (Adx.shape == Ady.shape)
if mask is None:
mask = np.ones(Mdx.shape, dtype=bool)
jsobel = Ady
isobel = Adx
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.hypot(Mdx, Mdy)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
eroded_mask = binary_erosion(mask, s, border_value=0)
eroded_mask = eroded_mask & (magnitude > 0)
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(Mdx.shape)
#----- 0 to 45 degrees ------
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
return local_maxima * magnitude
def dbl_thresholding_ZhangBao(local_maxima, start_level=0, wavelet='rbio3.1', c=20, noise_var=1, t1=1, t2=2):
"""
Portions of code adapted from scikit-image's canny implementation for faster execution
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
"""
"""Performs double thresholding based the wavelet energy and noise variance values
Parameters
-----------
local_maxima : 2D array
Local maxima extracted by the local maxima method, same shape as input image
wavelet : string
Name of wavelet as listed by pywt.wavelist()
start_level : int
Initial coefficient scale level to be extracted by the SWT
c : float
Multiplier for calculating the threshold
noise_var : float
Estimate of the Gaussian Noise variance present in the image
t1 : float
Threshold multiplier for the lower threshold
t2 : float
Threshold multiplier for the lower threshold
Returns
-------
edge_mask : 2D array
Binary array marking edges present in the local maxima
-----
"""
#
#---- Create two masks at the two thresholds.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
#First lower threshold is the same as in Zhang and Bao's paper
#Set to remove the majority of the noise present
#threshold = c * energy of wavelet at scale j, energy at scale j+1,
#noise_var, scaled noise_var
#get wavelet coefficients
w = Wavelet(wavelet)
if w.orthogonal:
(_, psi_d1, _) = w.wavefun(level=start_level+1)
(_, psi_d2, _) = w.wavefun(level=start_level+2)
else:
(_, psi_d1, _, _, _) = w.wavefun(level=start_level+1)
(_, psi_d2, _, _, _) = w.wavefun(level=start_level+2)
#compute their enegries (in reality, square root of energy)
energy_psi_d1 = np.sqrt(np.sum(psi_d1**2))
energy_psi_d2 = np.sqrt(np.sum(psi_d2**2))
#add zeros to psi_d1 to compute the next variable
psi_d1_up = psi_d1.repeat(2)
psi_d1_up[1::2] = 0
if wavelet == 'haar':
psi_d1_up = psi_d1_up[1:-1]
#get the sigma_i value
sigma_i_sq = 2*np.sum((psi_d1_up/energy_psi_d1 + psi_d2/energy_psi_d2)**2)
t = c * energy_psi_d1 * energy_psi_d2 * noise_var * sigma_i_sq
T_low = t*t1
T_high = t*t2
high_mask = (local_maxima >= T_high)
low_mask = (local_maxima >= T_low)
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
strel = np.ones((3, 3), bool)
labels, count = label(low_mask, strel)
if count == 0:
return low_mask
sums = (np.array(ndi.sum(high_mask, labels, np.arange(count, dtype=np.int32) + 1),
copy=False, ndmin=1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask
#run demo
if __name__ == "__main__":
import cv2 as cv
lvl = 0
c = 0.345
t1 = 1.0
t2 = 2.75
noise_var = 7237.754103671255
cv.namedWindow('Camera Capture', cv.WINDOW_NORMAL)
cv.namedWindow('Product Local Maxima - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Edges - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Edges - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Overlay - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Overlay - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
image = cv.imread('test_images/USAF.tiff', cv.IMREAD_GRAYSCALE)
#convert image from 8-bit to 12-bit, same as camera depth
image = image.astype(np.float)
image = image * 4095/256
image = image.astype(np.uint16)
#find local maxima and edges using the Haar wavelet
local_maxima_hr, edges_hr = wavelet_edge_detector(image, start_level=lvl,
wavelet='haar',c=c,
noise_var=noise_var, t1=t1, t2=t2)
local_maxima_hr = local_maxima_hr / np.max(local_maxima_hr) * 65535
local_maxima_hr = local_maxima_hr.astype(np.uint16)
edges_hr = edges_hr * np.ones(edges_hr.shape) * 65535
edges_hr = edges_hr.astype(np.uint16)
comb_hr = np.zeros((image.shape + (3,)))
comb_hr[:,:,0] = image / 4096
comb_hr[:,:,1] = comb_hr[:,:,0]
comb_hr[:,:,2] = comb_hr[:,:,0]
comb_hr[:,:,2] += (edges_hr/65535)
comb_hr[:,:,2] = np.clip(comb_hr[:,:,2], 0, 1)
#find local maxima and edges using the Reverse Biorthogonal 3.1 wavelet
local_maxima_rb, edges_rb = wavelet_edge_detector(image, start_level=lvl,
wavelet='rbio3.1',c=c,
noise_var=noise_var, t1=t1, t2=t2)
local_maxima_rb = local_maxima_rb / np.max(local_maxima_rb) * 65535
local_maxima_rb = local_maxima_rb.astype(np.uint16)
edges_rb = edges_rb * np.ones(edges_rb.shape) * 65535
edges_rb = edges_rb.astype(np.uint16)
comb_rb = np.zeros((image.shape + (3,)))
comb_rb[:,:,0] = image / 4096
comb_rb[:,:,1] = comb_rb[:,:,0]
comb_rb[:,:,2] = comb_rb[:,:,0]
comb_rb[:,:,2] += (edges_rb/65535)
comb_rb[:,:,2] = np.clip(comb_rb[:,:,2], 0, 1)
image = image.astype(np.float)
image = image * 65535/4096
image = image.astype(np.uint16)
try:
while True:
cv.imshow('Camera Capture', image)
cv.imshow('Product Local Maxima - Haar Wavelet', local_maxima_hr)
cv.imshow('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet', local_maxima_rb)
cv.imshow('Edges - Haar Wavelet', edges_hr)
cv.imshow('Edges - Reverse Biorthogonal 3.1 Wavelet', edges_rb)
cv.imshow('Overlay - Haar Wavelet', comb_hr)
cv.imshow('Overlay - Reverse Biorthogonal 3.1 Wavelet', comb_rb)
cv.waitKey(1)
except KeyboardInterrupt:
cv.destroyAllWindows()
|
[
"scipy.ndimage.generate_binary_structure",
"numpy.abs",
"numpy.sum",
"numpy.empty",
"numpy.ones",
"numpy.clip",
"pywt.swt2",
"numpy.arange",
"cv2.imshow",
"numpy.prod",
"numpy.max",
"cv2.destroyAllWindows",
"numpy.roll",
"cv2.waitKey",
"numpy.hypot",
"pywt.Wavelet",
"scipy.ndimage.binary_erosion",
"numpy.zeros",
"cv2.imread",
"scipy.ndimage.label",
"numpy.sign",
"cv2.namedWindow",
"numpy.sqrt"
] |
[((2283, 2388), 'pywt.swt2', 'swt2', (['image'], {'wavelet': 'wavelet', 'level': 'max_level', 'start_level': 'start_level', 'norm': '(False)', 'trim_approx': '(True)'}), '(image, wavelet=wavelet, level=max_level, start_level=start_level, norm\n =False, trim_approx=True)\n', (2287, 2388), False, 'from pywt import swt2, Wavelet\n'), ((2599, 2649), 'numpy.empty', 'np.empty', (['(image.shape + (max_level - start_level,))'], {}), '(image.shape + (max_level - start_level,))\n', (2607, 2649), True, 'import numpy as np\n'), ((2669, 2719), 'numpy.empty', 'np.empty', (['(image.shape + (max_level - start_level,))'], {}), '(image.shape + (max_level - start_level,))\n', (2677, 2719), True, 'import numpy as np\n'), ((3074, 3102), 'numpy.prod', 'np.prod', (['coeff_arr_H'], {'axis': '(2)'}), '(coeff_arr_H, axis=2)\n', (3081, 3102), True, 'import numpy as np\n'), ((3114, 3142), 'numpy.prod', 'np.prod', (['coeff_arr_V'], {'axis': '(2)'}), '(coeff_arr_V, axis=2)\n', (3121, 3142), True, 'import numpy as np\n'), ((6917, 6931), 'numpy.abs', 'np.abs', (['isobel'], {}), '(isobel)\n', (6923, 6931), True, 'import numpy as np\n'), ((6950, 6964), 'numpy.abs', 'np.abs', (['jsobel'], {}), '(jsobel)\n', (6956, 6964), True, 'import numpy as np\n'), ((6982, 7000), 'numpy.hypot', 'np.hypot', (['Mdx', 'Mdy'], {}), '(Mdx, Mdy)\n', (6990, 7000), True, 'import numpy as np\n'), ((7137, 7168), 'scipy.ndimage.generate_binary_structure', 'generate_binary_structure', (['(2)', '(2)'], {}), '(2, 2)\n', (7162, 7168), False, 'from scipy.ndimage import generate_binary_structure, binary_erosion, label\n'), ((7188, 7227), 'scipy.ndimage.binary_erosion', 'binary_erosion', (['mask', 's'], {'border_value': '(0)'}), '(mask, s, border_value=0)\n', (7202, 7227), False, 'from scipy.ndimage import generate_binary_structure, binary_erosion, label\n'), ((7484, 7503), 'numpy.zeros', 'np.zeros', (['Mdx.shape'], {}), '(Mdx.shape)\n', (7492, 7503), True, 'import numpy as np\n'), ((12443, 12459), 'pywt.Wavelet', 'Wavelet', (['wavelet'], {}), '(wavelet)\n', (12450, 12459), False, 'from pywt import swt2, Wavelet\n'), ((13543, 13564), 'numpy.ones', 'np.ones', (['(3, 3)', 'bool'], {}), '((3, 3), bool)\n', (13550, 13564), True, 'import numpy as np\n'), ((13586, 13608), 'scipy.ndimage.label', 'label', (['low_mask', 'strel'], {}), '(low_mask, strel)\n', (13591, 13608), False, 'from scipy.ndimage import generate_binary_structure, binary_erosion, label\n'), ((13806, 13834), 'numpy.zeros', 'np.zeros', (['(count + 1,)', 'bool'], {}), '((count + 1,), bool)\n', (13814, 13834), True, 'import numpy as np\n'), ((14110, 14160), 'cv2.namedWindow', 'cv.namedWindow', (['"""Camera Capture"""', 'cv.WINDOW_NORMAL'], {}), "('Camera Capture', cv.WINDOW_NORMAL)\n", (14124, 14160), True, 'import cv2 as cv\n'), ((14166, 14237), 'cv2.namedWindow', 'cv.namedWindow', (['"""Product Local Maxima - Haar Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Product Local Maxima - Haar Wavelet', cv.WINDOW_NORMAL)\n", (14180, 14237), True, 'import cv2 as cv\n'), ((14243, 14338), 'cv2.namedWindow', 'cv.namedWindow', (['"""Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet',\n cv.WINDOW_NORMAL)\n", (14257, 14338), True, 'import cv2 as cv\n'), ((14340, 14396), 'cv2.namedWindow', 'cv.namedWindow', (['"""Edges - Haar Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Edges - Haar Wavelet', cv.WINDOW_NORMAL)\n", (14354, 14396), True, 'import cv2 as cv\n'), ((14402, 14478), 'cv2.namedWindow', 'cv.namedWindow', (['"""Edges - Reverse Biorthogonal 3.1 Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Edges - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)\n", (14416, 14478), True, 'import cv2 as cv\n'), ((14484, 14542), 'cv2.namedWindow', 'cv.namedWindow', (['"""Overlay - Haar Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Overlay - Haar Wavelet', cv.WINDOW_NORMAL)\n", (14498, 14542), True, 'import cv2 as cv\n'), ((14548, 14626), 'cv2.namedWindow', 'cv.namedWindow', (['"""Overlay - Reverse Biorthogonal 3.1 Wavelet"""', 'cv.WINDOW_NORMAL'], {}), "('Overlay - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)\n", (14562, 14626), True, 'import cv2 as cv\n'), ((14646, 14701), 'cv2.imread', 'cv.imread', (['"""test_images/USAF.tiff"""', 'cv.IMREAD_GRAYSCALE'], {}), "('test_images/USAF.tiff', cv.IMREAD_GRAYSCALE)\n", (14655, 14701), True, 'import cv2 as cv\n'), ((15426, 15454), 'numpy.zeros', 'np.zeros', (['(image.shape + (3,))'], {}), '(image.shape + (3,))\n', (15434, 15454), True, 'import numpy as np\n'), ((15641, 15672), 'numpy.clip', 'np.clip', (['comb_hr[:, :, 2]', '(0)', '(1)'], {}), '(comb_hr[:, :, 2], 0, 1)\n', (15648, 15672), True, 'import numpy as np\n'), ((16246, 16274), 'numpy.zeros', 'np.zeros', (['(image.shape + (3,))'], {}), '(image.shape + (3,))\n', (16254, 16274), True, 'import numpy as np\n'), ((16461, 16492), 'numpy.clip', 'np.clip', (['comb_rb[:, :, 2]', '(0)', '(1)'], {}), '(comb_rb[:, :, 2], 0, 1)\n', (16468, 16492), True, 'import numpy as np\n'), ((2860, 2910), 'numpy.roll', 'np.roll', (['coeffs[-1 - i][0]', '(2 ** (i + start_level))'], {}), '(coeffs[-1 - i][0], 2 ** (i + start_level))\n', (2867, 2910), True, 'import numpy as np\n'), ((2936, 2986), 'numpy.roll', 'np.roll', (['coeffs[-1 - i][1]', '(2 ** (i + start_level))'], {}), '(coeffs[-1 - i][1], 2 ** (i + start_level))\n', (2943, 2986), True, 'import numpy as np\n'), ((3393, 3422), 'numpy.sign', 'np.sign', (['coeff_arr_H[:, :, 1]'], {}), '(coeff_arr_H[:, :, 1])\n', (3400, 3422), True, 'import numpy as np\n'), ((3421, 3433), 'numpy.sqrt', 'np.sqrt', (['Mdx'], {}), '(Mdx)\n', (3428, 3433), True, 'import numpy as np\n'), ((3445, 3474), 'numpy.sign', 'np.sign', (['coeff_arr_V[:, :, 1]'], {}), '(coeff_arr_V[:, :, 1])\n', (3452, 3474), True, 'import numpy as np\n'), ((3473, 3485), 'numpy.sqrt', 'np.sqrt', (['Mdy'], {}), '(Mdy)\n', (3480, 3485), True, 'import numpy as np\n'), ((6830, 6860), 'numpy.ones', 'np.ones', (['Mdx.shape'], {'dtype': 'bool'}), '(Mdx.shape, dtype=bool)\n', (6837, 6860), True, 'import numpy as np\n'), ((12847, 12866), 'numpy.sum', 'np.sum', (['(psi_d1 ** 2)'], {}), '(psi_d1 ** 2)\n', (12853, 12866), True, 'import numpy as np\n'), ((12895, 12914), 'numpy.sum', 'np.sum', (['(psi_d2 ** 2)'], {}), '(psi_d2 ** 2)\n', (12901, 12914), True, 'import numpy as np\n'), ((13158, 13223), 'numpy.sum', 'np.sum', (['((psi_d1_up / energy_psi_d1 + psi_d2 / energy_psi_d2) ** 2)'], {}), '((psi_d1_up / energy_psi_d1 + psi_d2 / energy_psi_d2) ** 2)\n', (13164, 13223), True, 'import numpy as np\n'), ((15208, 15231), 'numpy.max', 'np.max', (['local_maxima_hr'], {}), '(local_maxima_hr)\n', (15214, 15231), True, 'import numpy as np\n'), ((15330, 15353), 'numpy.ones', 'np.ones', (['edges_hr.shape'], {}), '(edges_hr.shape)\n', (15337, 15353), True, 'import numpy as np\n'), ((16028, 16051), 'numpy.max', 'np.max', (['local_maxima_rb'], {}), '(local_maxima_rb)\n', (16034, 16051), True, 'import numpy as np\n'), ((16150, 16173), 'numpy.ones', 'np.ones', (['edges_rb.shape'], {}), '(edges_rb.shape)\n', (16157, 16173), True, 'import numpy as np\n'), ((16652, 16686), 'cv2.imshow', 'cv.imshow', (['"""Camera Capture"""', 'image'], {}), "('Camera Capture', image)\n", (16661, 16686), True, 'import cv2 as cv\n'), ((16700, 16765), 'cv2.imshow', 'cv.imshow', (['"""Product Local Maxima - Haar Wavelet"""', 'local_maxima_hr'], {}), "('Product Local Maxima - Haar Wavelet', local_maxima_hr)\n", (16709, 16765), True, 'import cv2 as cv\n'), ((16779, 16868), 'cv2.imshow', 'cv.imshow', (['"""Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet"""', 'local_maxima_rb'], {}), "('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet',\n local_maxima_rb)\n", (16788, 16868), True, 'import cv2 as cv\n'), ((16878, 16921), 'cv2.imshow', 'cv.imshow', (['"""Edges - Haar Wavelet"""', 'edges_hr'], {}), "('Edges - Haar Wavelet', edges_hr)\n", (16887, 16921), True, 'import cv2 as cv\n'), ((16935, 16998), 'cv2.imshow', 'cv.imshow', (['"""Edges - Reverse Biorthogonal 3.1 Wavelet"""', 'edges_rb'], {}), "('Edges - Reverse Biorthogonal 3.1 Wavelet', edges_rb)\n", (16944, 16998), True, 'import cv2 as cv\n'), ((17012, 17056), 'cv2.imshow', 'cv.imshow', (['"""Overlay - Haar Wavelet"""', 'comb_hr'], {}), "('Overlay - Haar Wavelet', comb_hr)\n", (17021, 17056), True, 'import cv2 as cv\n'), ((17070, 17134), 'cv2.imshow', 'cv.imshow', (['"""Overlay - Reverse Biorthogonal 3.1 Wavelet"""', 'comb_rb'], {}), "('Overlay - Reverse Biorthogonal 3.1 Wavelet', comb_rb)\n", (17079, 17134), True, 'import cv2 as cv\n'), ((17162, 17175), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (17172, 17175), True, 'import cv2 as cv\n'), ((17230, 17252), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (17250, 17252), True, 'import cv2 as cv\n'), ((13705, 13737), 'numpy.arange', 'np.arange', (['count'], {'dtype': 'np.int32'}), '(count, dtype=np.int32)\n', (13714, 13737), True, 'import numpy as np\n')]
|
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flask_login import login_required, current_user
from flaskr.models import Post, db, PostComment, User
from flaskr import csrf
blog = Blueprint('blog', __name__)
@blog.route('/')
def index():
posts = Post.query.order_by(Post.created).all()
return render_template('blog/index.html', posts=posts, getPostUser=getPostUser)
@blog.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
data = Post(current_user.id,title,body)
db.session.add(data)
db.session.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = Post.query.get(id)
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
return post
@blog.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
post.title = title
post.body = body
db.session.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@blog.route("/<int:id>/delete", methods=('POST',))
@login_required
def delete(id):
Post.query.filter_by(id=id).delete()
db.session.commit()
return redirect(url_for('blog.index'))
@blog.route("/<int:id>/<action>")
@login_required
def like(id, action):
post = get_post(id)
if action == 'like':
current_user.like_post(post)
db.session.commit()
if action == 'unlike':
current_user.unlike_post(post)
db.session.commit()
return redirect(request.referrer)
@blog.route("/<int:id>/comments", methods=('GET', 'POST'))
def showComments(id):
post = get_post(id)
comments = post.comments
return render_template('blog/comments.html', comments = comments, post = post)
@blog.route("/addComment", methods=('POST',))
def addComment():
if request.method == 'POST':
post_id = int(request.form['post_id'])
print(post_id)
body = request.form['body']
error = None
if body == '':
error = 'Body is required.'
if error is not None:
flash(error)
else:
comment = PostComment(current_user.id, post_id, body)
print(current_user.id)
db.session.add(comment)
db.session.commit()
return redirect(request.referrer)
def getPostUser(id):
return User.query.get(id).username
|
[
"flaskr.models.Post.query.order_by",
"flask.flash",
"flaskr.models.db.session.commit",
"flask.Blueprint",
"flask.redirect",
"flaskr.models.Post",
"flask_login.current_user.like_post",
"flaskr.models.PostComment",
"flaskr.models.Post.query.get",
"flask.url_for",
"flask_login.current_user.unlike_post",
"flask.render_template",
"flaskr.models.db.session.add",
"flaskr.models.User.query.get",
"flaskr.models.Post.query.filter_by"
] |
[((269, 296), 'flask.Blueprint', 'Blueprint', (['"""blog"""', '__name__'], {}), "('blog', __name__)\n", (278, 296), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((391, 463), 'flask.render_template', 'render_template', (['"""blog/index.html"""'], {'posts': 'posts', 'getPostUser': 'getPostUser'}), "('blog/index.html', posts=posts, getPostUser=getPostUser)\n", (406, 463), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((985, 1020), 'flask.render_template', 'render_template', (['"""blog/create.html"""'], {}), "('blog/create.html')\n", (1000, 1020), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((1070, 1088), 'flaskr.models.Post.query.get', 'Post.query.get', (['id'], {}), '(id)\n', (1084, 1088), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((1720, 1766), 'flask.render_template', 'render_template', (['"""blog/update.html"""'], {'post': 'post'}), "('blog/update.html', post=post)\n", (1735, 1766), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((1896, 1915), 'flaskr.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1913, 1915), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((2252, 2278), 'flask.redirect', 'redirect', (['request.referrer'], {}), '(request.referrer)\n', (2260, 2278), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((2425, 2492), 'flask.render_template', 'render_template', (['"""blog/comments.html"""'], {'comments': 'comments', 'post': 'post'}), "('blog/comments.html', comments=comments, post=post)\n", (2440, 2492), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((1936, 1957), 'flask.url_for', 'url_for', (['"""blog.index"""'], {}), "('blog.index')\n", (1943, 1957), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((2090, 2118), 'flask_login.current_user.like_post', 'current_user.like_post', (['post'], {}), '(post)\n', (2112, 2118), False, 'from flask_login import login_required, current_user\n'), ((2127, 2146), 'flaskr.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2144, 2146), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((2182, 2212), 'flask_login.current_user.unlike_post', 'current_user.unlike_post', (['post'], {}), '(post)\n', (2206, 2212), False, 'from flask_login import login_required, current_user\n'), ((2221, 2240), 'flaskr.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2238, 2240), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((3113, 3131), 'flaskr.models.User.query.get', 'User.query.get', (['id'], {}), '(id)\n', (3127, 3131), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((340, 373), 'flaskr.models.Post.query.order_by', 'Post.query.order_by', (['Post.created'], {}), '(Post.created)\n', (359, 373), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((778, 790), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (783, 790), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((824, 858), 'flaskr.models.Post', 'Post', (['current_user.id', 'title', 'body'], {}), '(current_user.id, title, body)\n', (828, 858), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((869, 889), 'flaskr.models.db.session.add', 'db.session.add', (['data'], {}), '(data)\n', (883, 889), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((902, 921), 'flaskr.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (919, 921), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((1539, 1551), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (1544, 1551), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((1638, 1657), 'flaskr.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1655, 1657), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((1855, 1882), 'flaskr.models.Post.query.filter_by', 'Post.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (1875, 1882), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((2838, 2850), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (2843, 2850), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((2887, 2930), 'flaskr.models.PostComment', 'PostComment', (['current_user.id', 'post_id', 'body'], {}), '(current_user.id, post_id, body)\n', (2898, 2930), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((2978, 3001), 'flaskr.models.db.session.add', 'db.session.add', (['comment'], {}), '(comment)\n', (2992, 3001), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((3014, 3033), 'flaskr.models.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3031, 3033), False, 'from flaskr.models import Post, db, PostComment, User\n'), ((3053, 3079), 'flask.redirect', 'redirect', (['request.referrer'], {}), '(request.referrer)\n', (3061, 3079), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((950, 971), 'flask.url_for', 'url_for', (['"""blog.index"""'], {}), "('blog.index')\n", (957, 971), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n'), ((1686, 1707), 'flask.url_for', 'url_for', (['"""blog.index"""'], {}), "('blog.index')\n", (1693, 1707), False, 'from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n')]
|
from pyvmodule.develope import *
from pyvmodule.tools.modules.sram.dual import SRamR,SRamW
from pyvmodule.tools.modules.fifo import Fifo
from .common import AxiComponent,update_data_burst_addr,compute_address
class Axi2RamR(SRamR):
class FifoAR(Fifo):
def update_data_araddr(self,field):
return update_data_burst_addr(field,self.data.arburst)
def __init__(self,axi,io=None,**kwargs):
SRamR.__init__(self,awidth=axi.awidth,bwidth=axi.bwidth,io=io,**kwargs)
self.reset = ~axi.aresetn
self.rcur = Reg(4)
axi.rresp[:]=0
for name in ['rid','rlast','rvalid']:
driver = Reg(len(getattr(axi,name)))
getattr(axi,name)[:] = driver
setattr(self,name,driver)
self.a = self.FifoAR(self.reset,axi,push=axi.arvalid,
names=['arid','araddr','arlen','arsize','arburst'],depth=0)
compute_address(self.a.data.araddr,self.a.data.arlen,axi.size_v)
axi.arready[:]= ~self.a.full
self.allow_out = Wire(axi.rready|~axi.rvalid)
self.a.data.arlen.last = Wire(self.a.data.arlen.equal_to(self.rcur))
self.a.pop[:] = self.en&self.a.data.arlen.last
self.a.data.araddr.update[:] = self.en&~self.a.data.arlen.last
self.rcur.reset = Wire(self.reset|self.a.pop)
When(self.rcur.reset)[self.rcur:0]\
.When(self.en)[self.rcur:self.rcur+1]
When(self.reset)[self.rvalid:0]\
.When(self.en)[self.rvalid:1]\
.When(axi.rready)[self.rvalid:0]
When(self.a.pop)[self.rid:self.a.data.arid]
When(self.en)[self.rlast:self.a.data.arlen.last]
self.en [:] = self.a.valid&self.allow_out
self.addr[:] = self.a.data.araddr
axi.rdata[:] = self.data
class Axi2RamW(SRamW):
class FifoAW(Fifo):
def update_data_awaddr(self,field):
return update_data_burst_addr(field,self.data.awburst)
def __init__(self,axi,io=None,**kwargs):
SRamW.__init__(self,awidth=axi.awidth,bwidth=axi.bwidth,io=io,**kwargs)
self.reset = ~axi.aresetn
self.w = VStruct()
for name in ['wdata','wstrb','wlast','wvalid']:
setattr(self,name,Reg(len(getattr(axi,name))))
self.a = self.FifoAW(self.reset,axi,push=axi.awvalid,
names=['awid','awaddr','awlen','awsize','awburst'],depth=0)
self.b = Fifo(self.reset,self.a.data.awid,pop=axi.bready)
axi.bid [:]= self.b.data
axi.bvalid[:]= self.b.valid
axi.bresp [:]= 0
compute_address(self.a.data.awaddr,self.a.data.awlen,axi.size_v)
self.allow_out = Wire(self.a.valid&~self.b.full)
self.a.data.awaddr.update[:] = self.en&~self.wlast
self.a.pop [:] = self.en& self.wlast
self.b.push [:] = self.a.pop
self.go = Wire(axi.wvalid&axi.wready)
blk = When(self.go)
for name in ['wdata','wstrb','wlast']:
blk[getattr(self,name):getattr(axi,name)]
When(self.reset)[self.wvalid:0]\
.When(self.go)[self.wvalid:1]\
.When(self.en)[self.wvalid:0]
axi.awready[:] = ~self.a.full
axi.wready [:] = self.allow_out|~self.wvalid
self.en [:] = self.wvalid&self.allow_out&~self.reset
self.addr[:] = self.a.data.awaddr
self.data[:] = self.wdata
self.strb[:] = self.wstrb
|
[
"pyvmodule.tools.modules.sram.dual.SRamR.__init__",
"pyvmodule.tools.modules.sram.dual.SRamW.__init__",
"pyvmodule.tools.modules.fifo.Fifo"
] |
[((420, 495), 'pyvmodule.tools.modules.sram.dual.SRamR.__init__', 'SRamR.__init__', (['self'], {'awidth': 'axi.awidth', 'bwidth': 'axi.bwidth', 'io': 'io'}), '(self, awidth=axi.awidth, bwidth=axi.bwidth, io=io, **kwargs)\n', (434, 495), False, 'from pyvmodule.tools.modules.sram.dual import SRamR, SRamW\n'), ((1970, 2045), 'pyvmodule.tools.modules.sram.dual.SRamW.__init__', 'SRamW.__init__', (['self'], {'awidth': 'axi.awidth', 'bwidth': 'axi.bwidth', 'io': 'io'}), '(self, awidth=axi.awidth, bwidth=axi.bwidth, io=io, **kwargs)\n', (1984, 2045), False, 'from pyvmodule.tools.modules.sram.dual import SRamR, SRamW\n'), ((2384, 2434), 'pyvmodule.tools.modules.fifo.Fifo', 'Fifo', (['self.reset', 'self.a.data.awid'], {'pop': 'axi.bready'}), '(self.reset, self.a.data.awid, pop=axi.bready)\n', (2388, 2434), False, 'from pyvmodule.tools.modules.fifo import Fifo\n')]
|
from typing import Dict, Iterable, List, Optional, Union
import attr
from attr.validators import instance_of
from ics.component import Component
from ics.event import Event
from ics.grammar.parse import Container, calendar_string_to_containers
from ics.parsers.icalendar_parser import CalendarParser
from ics.serializers.icalendar_serializer import CalendarSerializer
from ics.timeline import Timeline
from ics.todo import Todo
@attr.s
class CalendarAttrs(Component):
version: str = attr.ib(validator=instance_of(str)) # default set by Calendar.Meta.DEFAULT_VERSION
prodid: str = attr.ib(validator=instance_of(str)) # default set by Calendar.Meta.DEFAULT_PRODID
scale: Optional[str] = attr.ib(default=None)
method: Optional[str] = attr.ib(default=None)
version_params: Dict[str, List[str]] = attr.ib(factory=dict)
prodid_params: Dict[str, List[str]] = attr.ib(factory=dict)
scale_params: Dict[str, List[str]] = attr.ib(factory=dict)
method_params: Dict[str, List[str]] = attr.ib(factory=dict)
_timezones: Dict = attr.ib(factory=dict, init=False, repr=False, eq=False, order=False, hash=False)
events: List[Event] = attr.ib(factory=list, converter=list)
todos: List[Todo] = attr.ib(factory=list, converter=list)
class Calendar(CalendarAttrs):
"""
Represents an unique RFC 5545 iCalendar.
Attributes:
events: a list of `Event`s contained in the Calendar
todos: a list of `Todo`s contained in the Calendar
timeline: a `Timeline` instance for iterating this Calendar in chronological order
"""
class Meta:
name = 'VCALENDAR'
parser = CalendarParser
serializer = CalendarSerializer
DEFAULT_VERSION = "2.0"
DEFAULT_PRODID = "ics.py - http://git.io/lLljaA"
def __init__(
self,
imports: Union[str, Container] = None,
events: Optional[Iterable[Event]] = None,
todos: Optional[Iterable[Todo]] = None,
creator: str = None,
**kwargs
):
"""Initializes a new Calendar.
Args:
imports (**str**): data to be imported into the Calendar,
events (**Iterable[Event]**): `Event`s to be added to the calendar
todos (**Iterable[Todo]**): `Todo`s to be added to the calendar
creator (**string**): uid of the creator program.
"""
if events is None:
events = tuple()
if todos is None:
todos = tuple()
kwargs.setdefault("version", self.Meta.DEFAULT_VERSION)
kwargs.setdefault("prodid", creator if creator is not None else self.Meta.DEFAULT_PRODID)
super(Calendar, self).__init__(events=events, todos=todos, **kwargs) # type: ignore
self.timeline = Timeline(self, None)
if imports is not None:
if isinstance(imports, Container):
self._populate(imports)
else:
containers = calendar_string_to_containers(imports)
if len(containers) != 1:
raise NotImplementedError(
'Multiple calendars in one file are not supported by this method. Use ics.Calendar.parse_multiple()')
self._populate(containers[0]) # Use first calendar
@property
def creator(self) -> str:
return self.prodid
@creator.setter
def creator(self, value: str):
self.prodid = value
@classmethod
def parse_multiple(cls, string):
""""
Parses an input string that may contain mutiple calendars
and retruns a list of :class:`ics.event.Calendar`
"""
containers = calendar_string_to_containers(string)
return [cls(imports=c) for c in containers]
def __repr__(self) -> str:
return "<Calendar with {} event{} and {} todo{}>" \
.format(len(self.events),
"s" if len(self.events) > 1 else "",
len(self.todos),
"s" if len(self.todos) > 1 else "")
def __iter__(self) -> Iterable[str]:
"""Returns:
iterable: an iterable version of __str__, line per line
(with line-endings).
Example:
Can be used to write calendar to a file:
>>> c = Calendar(); c.events.append(Event(name="My cool event"))
>>> open('my.ics', 'w').writelines(c)
"""
return iter(str(self).splitlines(keepends=True))
|
[
"attr.validators.instance_of",
"ics.grammar.parse.calendar_string_to_containers",
"attr.ib",
"ics.timeline.Timeline"
] |
[((703, 724), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (710, 724), False, 'import attr\n'), ((753, 774), 'attr.ib', 'attr.ib', ([], {'default': 'None'}), '(default=None)\n', (760, 774), False, 'import attr\n'), ((819, 840), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (826, 840), False, 'import attr\n'), ((883, 904), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (890, 904), False, 'import attr\n'), ((946, 967), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (953, 967), False, 'import attr\n'), ((1010, 1031), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (1017, 1031), False, 'import attr\n'), ((1056, 1141), 'attr.ib', 'attr.ib', ([], {'factory': 'dict', 'init': '(False)', 'repr': '(False)', 'eq': '(False)', 'order': '(False)', 'hash': '(False)'}), '(factory=dict, init=False, repr=False, eq=False, order=False, hash=False\n )\n', (1063, 1141), False, 'import attr\n'), ((1163, 1200), 'attr.ib', 'attr.ib', ([], {'factory': 'list', 'converter': 'list'}), '(factory=list, converter=list)\n', (1170, 1200), False, 'import attr\n'), ((1225, 1262), 'attr.ib', 'attr.ib', ([], {'factory': 'list', 'converter': 'list'}), '(factory=list, converter=list)\n', (1232, 1262), False, 'import attr\n'), ((2790, 2810), 'ics.timeline.Timeline', 'Timeline', (['self', 'None'], {}), '(self, None)\n', (2798, 2810), False, 'from ics.timeline import Timeline\n'), ((3681, 3718), 'ics.grammar.parse.calendar_string_to_containers', 'calendar_string_to_containers', (['string'], {}), '(string)\n', (3710, 3718), False, 'from ics.grammar.parse import Container, calendar_string_to_containers\n'), ((509, 525), 'attr.validators.instance_of', 'instance_of', (['str'], {}), '(str)\n', (520, 525), False, 'from attr.validators import instance_of\n'), ((611, 627), 'attr.validators.instance_of', 'instance_of', (['str'], {}), '(str)\n', (622, 627), False, 'from attr.validators import instance_of\n'), ((2978, 3016), 'ics.grammar.parse.calendar_string_to_containers', 'calendar_string_to_containers', (['imports'], {}), '(imports)\n', (3007, 3016), False, 'from ics.grammar.parse import Container, calendar_string_to_containers\n')]
|
from unittest import TestCase
from freezegun import freeze_time
from secret.utils import create_secret
class Utils(TestCase):
@freeze_time('2019-03-12 12:00:00')
def test_create_secret_key(self):
secret = create_secret()
self.assertEqual(secret, 'd3a4646728a9de9a74d8fc4c41966a42')
|
[
"secret.utils.create_secret",
"freezegun.freeze_time"
] |
[((135, 169), 'freezegun.freeze_time', 'freeze_time', (['"""2019-03-12 12:00:00"""'], {}), "('2019-03-12 12:00:00')\n", (146, 169), False, 'from freezegun import freeze_time\n'), ((225, 240), 'secret.utils.create_secret', 'create_secret', ([], {}), '()\n', (238, 240), False, 'from secret.utils import create_secret\n')]
|
import unittest
from typing import List
import cadquery as cq
from cq_cam.utils import utils
class ProjectFaceTest(unittest.TestCase):
def setUp(self):
pass
def test_face_with_hole(self):
# This should create a projected face that is 2x4 (XY)
box = (
cq.Workplane('XZ')
.lineTo(2, 0)
.lineTo(2, 6)
.close()
.extrude(4)
.faces('<Z')
.workplane()
.moveTo(1, 2)
.rect(1, 1)
.cutThruAll()
)
face_wp = cq.Workplane(obj=box.faces().objects[1])
plane = face_wp.workplane().plane
# Make sure we picked the right face
self.assertEqual(plane.xDir, cq.Vector(0.0, -1.0, 0.0))
self.assertEqual(plane.yDir, cq.Vector(0.316227766016838, 0.0, 0.9486832980505139))
self.assertEqual(plane.zDir, cq.Vector(-0.9486832980505139, -0.0, 0.316227766016838))
result = utils.project_face(face_wp.objects[0])
class TestVector(cq.Vector):
def __eq__(self, other):
if getattr(other, 'wrapped', None):
return super().__eq__(other)
return False
expected_outer_wire = [
TestVector(2, 0, 0),
TestVector(2, -4, 0),
TestVector(0, -4, 0),
TestVector(0, 0, 0)
]
expected_inner_wire = [
TestVector(0.5, -1.5, 0),
TestVector(0.5, -2.5, 0),
TestVector(1.5, -1.5, 0),
TestVector(1.5, -2.5, 0)
]
def wire_to_vectors(wire: cq.Wire) -> List[cq.Vector]:
return [to_vector(vertex) for vertex in wire.Vertices()]
def to_vector(vertex: cq.Vertex) -> cq.Vector:
return TestVector(vertex.toTuple())
self.assertCountEqual(wire_to_vectors(result.outerWire()), expected_outer_wire)
inner_wires = result.innerWires()
self.assertEqual(len(inner_wires), 1)
self.assertCountEqual(wire_to_vectors(inner_wires[0]), expected_inner_wire)
|
[
"cadquery.Workplane",
"cq_cam.utils.utils.project_face",
"cadquery.Vector"
] |
[((968, 1006), 'cq_cam.utils.utils.project_face', 'utils.project_face', (['face_wp.objects[0]'], {}), '(face_wp.objects[0])\n', (986, 1006), False, 'from cq_cam.utils import utils\n'), ((737, 762), 'cadquery.Vector', 'cq.Vector', (['(0.0)', '(-1.0)', '(0.0)'], {}), '(0.0, -1.0, 0.0)\n', (746, 762), True, 'import cadquery as cq\n'), ((801, 854), 'cadquery.Vector', 'cq.Vector', (['(0.316227766016838)', '(0.0)', '(0.9486832980505139)'], {}), '(0.316227766016838, 0.0, 0.9486832980505139)\n', (810, 854), True, 'import cadquery as cq\n'), ((893, 948), 'cadquery.Vector', 'cq.Vector', (['(-0.9486832980505139)', '(-0.0)', '(0.316227766016838)'], {}), '(-0.9486832980505139, -0.0, 0.316227766016838)\n', (902, 948), True, 'import cadquery as cq\n'), ((300, 318), 'cadquery.Workplane', 'cq.Workplane', (['"""XZ"""'], {}), "('XZ')\n", (312, 318), True, 'import cadquery as cq\n')]
|
# -*- coding: utf-8 -*-
"""Test gui."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from pytest import raises
from ..qt import Qt, QApplication, QWidget, QMessageBox
from ..gui import (GUI, GUIState,
_try_get_matplotlib_canvas,
_try_get_vispy_canvas,
)
from phy.utils import Bunch
from phy.utils._color import _random_color
#------------------------------------------------------------------------------
# Utilities and fixtures
#------------------------------------------------------------------------------
def _create_canvas():
"""Create a VisPy canvas with a color background."""
from vispy import app
c = app.Canvas()
c.color = _random_color()
@c.connect
def on_draw(e): # pragma: no cover
c.context.clear(c.color)
return c
#------------------------------------------------------------------------------
# Test views
#------------------------------------------------------------------------------
def test_vispy_view():
from vispy.app import Canvas
assert isinstance(_try_get_vispy_canvas(Canvas()), QWidget)
def test_matplotlib_view():
from matplotlib.pyplot import Figure
assert isinstance(_try_get_matplotlib_canvas(Figure()), QWidget)
#------------------------------------------------------------------------------
# Test GUI
#------------------------------------------------------------------------------
def test_gui_noapp(tempdir):
if not QApplication.instance():
with raises(RuntimeError): # pragma: no cover
GUI(config_dir=tempdir)
def test_gui_1(tempdir, qtbot):
gui = GUI(position=(200, 100), size=(100, 100), config_dir=tempdir)
qtbot.addWidget(gui)
assert gui.name == 'GUI'
# Increase coverage.
@gui.connect_
def on_show():
pass
gui.unconnect_(on_show)
qtbot.keyPress(gui, Qt.Key_Control)
qtbot.keyRelease(gui, Qt.Key_Control)
assert isinstance(gui.dialog("Hello"), QMessageBox)
view = gui.add_view(_create_canvas(), floating=True, closable=True)
gui.add_view(_create_canvas())
view.setFloating(False)
gui.show()
assert gui.get_view('Canvas')
assert len(gui.list_views('Canvas')) == 2
# Check that the close_widget event is fired when the gui widget is
# closed.
_close = []
@view.connect_
def on_close_widget():
_close.append(0)
@gui.connect_
def on_close_view(view):
_close.append(1)
view.close()
assert _close == [1, 0]
gui.close()
assert gui.state.geometry_state['geometry']
assert gui.state.geometry_state['state']
gui.default_actions.exit()
def test_gui_status_message(gui):
assert gui.status_message == ''
gui.status_message = ':hello world!'
assert gui.status_message == ':hello world!'
gui.lock_status()
gui.status_message = ''
assert gui.status_message == ':hello world!'
gui.unlock_status()
gui.status_message = ''
assert gui.status_message == ''
def test_gui_geometry_state(tempdir, qtbot):
_gs = []
gui = GUI(size=(100, 100), config_dir=tempdir)
qtbot.addWidget(gui)
gui.add_view(_create_canvas(), 'view1')
gui.add_view(_create_canvas(), 'view2')
gui.add_view(_create_canvas(), 'view2')
@gui.connect_
def on_close():
_gs.append(gui.save_geometry_state())
gui.show()
qtbot.waitForWindowShown(gui)
assert len(gui.list_views('view')) == 3
assert gui.view_count() == {
'view1': 1,
'view2': 2,
}
gui.close()
# Recreate the GUI with the saved state.
gui = GUI(config_dir=tempdir)
gui.add_view(_create_canvas(), 'view1')
gui.add_view(_create_canvas(), 'view2')
gui.add_view(_create_canvas(), 'view2')
@gui.connect_
def on_show():
gui.restore_geometry_state(_gs[0])
assert gui.restore_geometry_state(None) is None
qtbot.addWidget(gui)
gui.show()
assert len(gui.list_views('view')) == 3
assert gui.view_count() == {
'view1': 1,
'view2': 2,
}
gui.close()
#------------------------------------------------------------------------------
# Test GUI state
#------------------------------------------------------------------------------
def test_gui_state_view(tempdir):
view = Bunch(name='MyView0')
state = GUIState(config_dir=tempdir)
state.update_view_state(view, dict(hello='world'))
assert not state.get_view_state(Bunch(name='MyView'))
assert not state.get_view_state(Bunch(name='MyView1'))
assert state.get_view_state(view) == Bunch(hello='world')
|
[
"phy.utils._color._random_color",
"matplotlib.pyplot.Figure",
"pytest.raises",
"phy.utils.Bunch",
"vispy.app.Canvas"
] |
[((812, 824), 'vispy.app.Canvas', 'app.Canvas', ([], {}), '()\n', (822, 824), False, 'from vispy import app\n'), ((839, 854), 'phy.utils._color._random_color', '_random_color', ([], {}), '()\n', (852, 854), False, 'from phy.utils._color import _random_color\n'), ((4447, 4468), 'phy.utils.Bunch', 'Bunch', ([], {'name': '"""MyView0"""'}), "(name='MyView0')\n", (4452, 4468), False, 'from phy.utils import Bunch\n'), ((4723, 4743), 'phy.utils.Bunch', 'Bunch', ([], {'hello': '"""world"""'}), "(hello='world')\n", (4728, 4743), False, 'from phy.utils import Bunch\n'), ((1234, 1242), 'vispy.app.Canvas', 'Canvas', ([], {}), '()\n', (1240, 1242), False, 'from vispy.app import Canvas\n'), ((1374, 1382), 'matplotlib.pyplot.Figure', 'Figure', ([], {}), '()\n', (1380, 1382), False, 'from matplotlib.pyplot import Figure\n'), ((1646, 1666), 'pytest.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1652, 1666), False, 'from pytest import raises\n'), ((4601, 4621), 'phy.utils.Bunch', 'Bunch', ([], {'name': '"""MyView"""'}), "(name='MyView')\n", (4606, 4621), False, 'from phy.utils import Bunch\n'), ((4659, 4680), 'phy.utils.Bunch', 'Bunch', ([], {'name': '"""MyView1"""'}), "(name='MyView1')\n", (4664, 4680), False, 'from phy.utils import Bunch\n')]
|
import os
import sys
from collections import OrderedDict
from absl import logging
import torch
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms.functional as TF
import pytorch_lightning as pl
import e2cnn.gspaces
import e2cnn.nn
from .base import VariationalAutoEncoderModule
from elm.nn import MLP, GConvNN, GConvTransposeNN
class GConvVAE(VariationalAutoEncoderModule):
def __init__(self,
in_channels,
out_channels,
n_channels,
img_size,
dim_latent,
activation=F.relu,
readout_fn=None,
fiber_group='rot_2d',
n_rot=4,
optim_lr=0.0001,
profiler=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
n_channels=n_channels,
img_size=img_size,
dim_latent=dim_latent,
activation=activation,
readout_fn=readout_fn,
optim_lr=optim_lr,
profiler=profiler)
self.fiber_group = fiber_group
self.n_rot = n_rot
self._create_networks()
self.params = self.parameters()
logging.debug("-------- GConv VAE ---------")
logging.debug("-------- Trainable Variables ---------")
for name, p in self.named_parameters():
logging.debug("{}, {}".format(name, p.size()))
logging.debug("--------------------------------------")
def _create_networks(self):
self.n_flip = 1
if 'flip' in self.fiber_group:
self.n_flip = 2
nc = self.n_channels
self.encoder = torch.nn.Sequential(
GConvNN(in_channels=self.in_channels,
out_channels=[nc,nc,2*nc,2*nc,2*nc],
kernel_size=[3,3,3,3,5],
stride=[2,2,2,2,2],
padding_mode='circular',
activation=self.activation,
out_activation=None,
use_bias=True,
fiber_group=self.fiber_group,
n_rot=self.n_rot))
self.flatten = torch.nn.Flatten()
self.encoder_mlp = MLP(in_sizes=self.n_rot*self.n_flip*2*2*2*nc,
out_sizes=[2*self.dim_latent])
self.decoder = torch.nn.Sequential(
GConvTransposeNN(in_channels=2*nc,
out_channels=[2*nc,2*nc,nc,nc,nc,self.in_channels],
kernel_size=[5,3,3,3,3,3],
stride=[2,2,2,2,2,1],
padding_mode='circular',
activation=self.activation,
out_activation=self.readout_fn,
use_bias=True,
fiber_group=self.fiber_group,
n_rot=self.n_rot))
self.decoder_mlp = MLP(in_sizes=self.dim_latent,
out_sizes=[self.n_rot*self.n_flip*2*2*2*nc],
out_activation=self.activation)
self.unflatten = torch.nn.Unflatten(dim=1, unflattened_size=(2*nc*self.n_flip*self.n_rot,2,2))
def encode(self, x):
z = self.encoder(x)
z = self.encoder_mlp(self.flatten(z))
mu, log_sigma_sq = torch.chunk(z, chunks=2, dim=-1)
return mu, log_sigma_sq
def decode(self, z):
z = self.unflatten(self.decoder_mlp(z))
x_hat = self.decoder(z)
return x_hat
def reparameterize(self, mu, log_sigma_sq):
sigma = torch.exp(log_sigma_sq/2.)
eps = torch.normal(torch.zeros_like(mu), torch.ones_like(sigma))
return eps * sigma + mu
def reconstruct(self, x):
mu, _ = self.encode(x)
x_hat = self.decode(mu)
return x_hat
def generate(self, n_samples=16):
z = torch.normal(torch.zeros(n_samples, self.dim_latent), torch.ones(n_samples, self.dim_latent))
x_hat = self.decode(z)
return x_hat
def forward(self, x):
mu, _ = self.encode(x)
return mu
def compute_loss_and_metrics(self, x, y=None):
mu, log_sigma_sq = self.encode(x)
z = self.reparameterize(mu, log_sigma_sq)
x_hat = self.decode(z)
recon_loss = F.mse_loss(x_hat, x, reduction='sum') / x.size()[0]
kl_loss = torch.sum(-0.5 * torch.sum(1 + log_sigma_sq - mu ** 2 - log_sigma_sq.exp(), dim = 1), dim = 0) / x.size()[0]
loss = recon_loss + kl_loss
logs = {
"recon": recon_loss,
"kl": kl_loss,
"elbo": loss
}
return loss, logs
|
[
"torch.ones_like",
"torch.ones",
"torch.nn.Unflatten",
"torch.zeros_like",
"absl.logging.debug",
"torch.nn.functional.mse_loss",
"torch.zeros",
"torch.exp",
"torch.chunk",
"elm.nn.GConvTransposeNN",
"elm.nn.MLP",
"elm.nn.GConvNN",
"torch.nn.Flatten"
] |
[((1296, 1353), 'absl.logging.debug', 'logging.debug', (['"""-------- GConv VAE ---------"""'], {}), "('-------- GConv VAE ---------')\n", (1309, 1353), False, 'from absl import logging\n'), ((1358, 1413), 'absl.logging.debug', 'logging.debug', (['"""-------- Trainable Variables ---------"""'], {}), "('-------- Trainable Variables ---------')\n", (1371, 1413), False, 'from absl import logging\n'), ((1515, 1570), 'absl.logging.debug', 'logging.debug', (['"""--------------------------------------"""'], {}), "('--------------------------------------')\n", (1528, 1570), False, 'from absl import logging\n'), ((2226, 2244), 'torch.nn.Flatten', 'torch.nn.Flatten', ([], {}), '()\n', (2242, 2244), False, 'import torch\n'), ((2268, 2361), 'elm.nn.MLP', 'MLP', ([], {'in_sizes': '(self.n_rot * self.n_flip * 2 * 2 * 2 * nc)', 'out_sizes': '[2 * self.dim_latent]'}), '(in_sizes=self.n_rot * self.n_flip * 2 * 2 * 2 * nc, out_sizes=[2 * self\n .dim_latent])\n', (2271, 2361), False, 'from elm.nn import MLP, GConvNN, GConvTransposeNN\n'), ((2915, 3035), 'elm.nn.MLP', 'MLP', ([], {'in_sizes': 'self.dim_latent', 'out_sizes': '[self.n_rot * self.n_flip * 2 * 2 * 2 * nc]', 'out_activation': 'self.activation'}), '(in_sizes=self.dim_latent, out_sizes=[self.n_rot * self.n_flip * 2 * 2 *\n 2 * nc], out_activation=self.activation)\n', (2918, 3035), False, 'from elm.nn import MLP, GConvNN, GConvTransposeNN\n'), ((3065, 3155), 'torch.nn.Unflatten', 'torch.nn.Unflatten', ([], {'dim': '(1)', 'unflattened_size': '(2 * nc * self.n_flip * self.n_rot, 2, 2)'}), '(dim=1, unflattened_size=(2 * nc * self.n_flip * self.\n n_rot, 2, 2))\n', (3083, 3155), False, 'import torch\n'), ((3260, 3292), 'torch.chunk', 'torch.chunk', (['z'], {'chunks': '(2)', 'dim': '(-1)'}), '(z, chunks=2, dim=-1)\n', (3271, 3292), False, 'import torch\n'), ((3498, 3527), 'torch.exp', 'torch.exp', (['(log_sigma_sq / 2.0)'], {}), '(log_sigma_sq / 2.0)\n', (3507, 3527), False, 'import torch\n'), ((1768, 2055), 'elm.nn.GConvNN', 'GConvNN', ([], {'in_channels': 'self.in_channels', 'out_channels': '[nc, nc, 2 * nc, 2 * nc, 2 * nc]', 'kernel_size': '[3, 3, 3, 3, 5]', 'stride': '[2, 2, 2, 2, 2]', 'padding_mode': '"""circular"""', 'activation': 'self.activation', 'out_activation': 'None', 'use_bias': '(True)', 'fiber_group': 'self.fiber_group', 'n_rot': 'self.n_rot'}), "(in_channels=self.in_channels, out_channels=[nc, nc, 2 * nc, 2 * nc,\n 2 * nc], kernel_size=[3, 3, 3, 3, 5], stride=[2, 2, 2, 2, 2],\n padding_mode='circular', activation=self.activation, out_activation=\n None, use_bias=True, fiber_group=self.fiber_group, n_rot=self.n_rot)\n", (1775, 2055), False, 'from elm.nn import MLP, GConvNN, GConvTransposeNN\n'), ((2424, 2745), 'elm.nn.GConvTransposeNN', 'GConvTransposeNN', ([], {'in_channels': '(2 * nc)', 'out_channels': '[2 * nc, 2 * nc, nc, nc, nc, self.in_channels]', 'kernel_size': '[5, 3, 3, 3, 3, 3]', 'stride': '[2, 2, 2, 2, 2, 1]', 'padding_mode': '"""circular"""', 'activation': 'self.activation', 'out_activation': 'self.readout_fn', 'use_bias': '(True)', 'fiber_group': 'self.fiber_group', 'n_rot': 'self.n_rot'}), "(in_channels=2 * nc, out_channels=[2 * nc, 2 * nc, nc, nc,\n nc, self.in_channels], kernel_size=[5, 3, 3, 3, 3, 3], stride=[2, 2, 2,\n 2, 2, 1], padding_mode='circular', activation=self.activation,\n out_activation=self.readout_fn, use_bias=True, fiber_group=self.\n fiber_group, n_rot=self.n_rot)\n", (2440, 2745), False, 'from elm.nn import MLP, GConvNN, GConvTransposeNN\n'), ((3548, 3568), 'torch.zeros_like', 'torch.zeros_like', (['mu'], {}), '(mu)\n', (3564, 3568), False, 'import torch\n'), ((3570, 3592), 'torch.ones_like', 'torch.ones_like', (['sigma'], {}), '(sigma)\n', (3585, 3592), False, 'import torch\n'), ((3786, 3825), 'torch.zeros', 'torch.zeros', (['n_samples', 'self.dim_latent'], {}), '(n_samples, self.dim_latent)\n', (3797, 3825), False, 'import torch\n'), ((3827, 3865), 'torch.ones', 'torch.ones', (['n_samples', 'self.dim_latent'], {}), '(n_samples, self.dim_latent)\n', (3837, 3865), False, 'import torch\n'), ((4159, 4196), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['x_hat', 'x'], {'reduction': '"""sum"""'}), "(x_hat, x, reduction='sum')\n", (4169, 4196), True, 'import torch.nn.functional as F\n')]
|
from ncbi.ncbi_taxonomy_parser import TaxonomyParser, Taxonomy
from common.database import *
from common.utils import get_data_dir
import os
# default strain for their species for organism searching
LMDB_SPECIES_MAPPING_STRAIN = ['367830','511145', '272563', '208964', '559292']
DATA_SOURCE = 'NCBI Taxonomy'
def write_LMDB_annotation_file(database, base_dir, excluded_names=['environmental sample']):
'''
If species_only, export species and their children only
:param excluded_names: list of tax names that should not be used for annotation
:return:
'''
# find the strains that are used to replace its parent species for annotation
query = """
match p=(n:Taxonomy)-[:HAS_PARENT*0..]->(:Taxonomy {rank: 'species'})
where n.{PROP_ID} in $tax_ids
with nodes(p) as nodes, n
unwind nodes as p
with n, p where n <> p
return n.{PROP_ID} as tax_id, p.{PROP_ID} as parent_id, p.name as parent_name
"""
df = database.get_data(query, {'tax_ids': LMDB_SPECIES_MAPPING_STRAIN})
replace_id_map = {}
for index, row in df.iterrows():
replace_id_map[row['parent_id']] = row['tax_id']
parser = TaxonomyParser(base_dir)
nodes = parser.parse_files()
outfile = os.path.join(parser.output_dir, 'species_for_LMDB.tsv')
with open(outfile, 'w') as f:
f.write('tax_id\trank\tcategory\tname\tname_class\torig_tax_id\tdata_source\n')
for node in nodes.values():
if node.top_category and node.rank == 'species':
_write_node_names(node, f, excluded_names, replace_id_map)
def _write_node_names(tax, file, exclude_node_names=[], replace_id_map=None):
"""
recursively write node names and children names
:param tax: tax node
:param file: outfile
:param exclude_node_names:
:param replace_id_map:
:return:
"""
if exclude_node_names:
# if tax name contains the exclude_node_name, return without writing
for name in tax.names.keys():
for exclude in exclude_node_names:
if exclude in name:
return
if replace_id_map and tax.tax_id in replace_id_map:
tax.orig_id = tax.tax_id
tax.tax_id = replace_id_map[tax.orig_id]
lines = ''
for name, name_class in tax.names.items():
lines = lines + '\t'.join([tax.tax_id, tax.rank, tax.top_category, name, name_class, tax.orig_id, DATA_SOURCE]) + '\n'
file.write(lines)
for child in tax.children:
_write_node_names(child, file, exclude_node_names, replace_id_map)
def main():
database = get_database()
# pass the write base_data_dir for the parser
write_LMDB_annotation_file(database, get_data_dir())
database.close()
if __name__ == "__main__":
main()
|
[
"ncbi.ncbi_taxonomy_parser.TaxonomyParser",
"os.path.join",
"common.utils.get_data_dir"
] |
[((1163, 1187), 'ncbi.ncbi_taxonomy_parser.TaxonomyParser', 'TaxonomyParser', (['base_dir'], {}), '(base_dir)\n', (1177, 1187), False, 'from ncbi.ncbi_taxonomy_parser import TaxonomyParser, Taxonomy\n'), ((1235, 1290), 'os.path.join', 'os.path.join', (['parser.output_dir', '"""species_for_LMDB.tsv"""'], {}), "(parser.output_dir, 'species_for_LMDB.tsv')\n", (1247, 1290), False, 'import os\n'), ((2698, 2712), 'common.utils.get_data_dir', 'get_data_dir', ([], {}), '()\n', (2710, 2712), False, 'from common.utils import get_data_dir\n')]
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/15 13:49
# @Author : <NAME>
# @FileName: parse_uniprot_header.py
# @Usage:
# @Note:
# @E-mail: <EMAIL>
import pandas as pd
import re
class UniprotParse:
def __init__(self, _input_fasta):
self.input = _input_fasta
self.output = None
def parse(self):
with open(self.input, 'r', encoding='utf-8') as f:
_header_list = [line.strip('>') for line in f if line.startswith('>')]
_header_parse_list = []
for _header in _header_list:
_ele_dict = {'ID': _header.split()[0].split('|')[1],
'Entry': _header.split()[0].split('|')[2]}
_description_list = []
for _ele in _header.split()[1:]:
pre_fix = re.match('OX=|OS=|GN=|PE=|SV=', _ele)
if pre_fix:
_ele_dict[pre_fix.group().strip("=")] = _ele.split('=')[1]
else:
_description_list.append(_ele)
_ele_dict['Description'] = ' '.join(_description_list)
_header_parse_list.append(_ele_dict)
self.output = pd.DataFrame(_header_parse_list)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="This is the script to get Uniprot fasta header information"
"and use it intrepret BLAST results")
sub_parser = parser.add_subparsers(title='', dest='interpret/parse')
sub_parser.required = True
parse_parser = sub_parser.add_parser(
'parse', help='Parse Uniprot fasta headers to a table')
parse_parser.add_argument('-i', '--input_file', required=True,
help='<filepath> The uniprot fasta')
parse_parser.add_argument('-o', '--output_file', required=True,
help='<filepath> The output path')
parse_parser.set_defaults(subcmd="parse")
interpret_parser = sub_parser.add_parser(
'interpret', help='Interpret BLAST results')
interpret_parser.add_argument('-i', '--input_file', required=True,
help='<filepath> The BLAST result, only format six is acceptable')
interpret_parser.add_argument('-u', '--uniprot', required=True,
help='<filepath> The niprot fasta header information generated by "parse" function')
interpret_parser.add_argument('-c', '--column', required=True, type=int,
help='<int> Specify which column in BLAST result contains the identifier of Uniprot')
interpret_parser.add_argument('-o', '--output_file', required=True,
help='<filepath> The output path')
interpret_parser.set_defaults(subcmd="interpret")
args = parser.parse_args()
if args.subcmd == "parse":
uni = UniprotParse(args.input_file)
uni.parse()
uni.output.to_csv(args.output_file, index=False, sep='\t')
if args.subcmd == "interpret":
blast_result = pd.read_table(args.input_file, header=None)
uniprot_info = pd.read_table(args.uniprot)
blast_result[args.column-1] = blast_result[args.column-1].apply(lambda x: x.split('|')[1])
result = pd.merge(blast_result, uniprot_info[['ID', 'GN', 'Description']],
left_on=args.column-1,
right_on='ID',
how='left')
result.drop('ID', axis=1, inplace=True)
result.to_csv(args.output_file, header=False, index=False, sep='\t', float_format='%.3g')
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"pandas.merge",
"re.match",
"pandas.read_table"
] |
[((1221, 1362), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This is the script to get Uniprot fasta header informationand use it intrepret BLAST results"""'}), "(description=\n 'This is the script to get Uniprot fasta header informationand use it intrepret BLAST results'\n )\n", (1244, 1362), False, 'import argparse\n'), ((1126, 1158), 'pandas.DataFrame', 'pd.DataFrame', (['_header_parse_list'], {}), '(_header_parse_list)\n', (1138, 1158), True, 'import pandas as pd\n'), ((3039, 3082), 'pandas.read_table', 'pd.read_table', (['args.input_file'], {'header': 'None'}), '(args.input_file, header=None)\n', (3052, 3082), True, 'import pandas as pd\n'), ((3106, 3133), 'pandas.read_table', 'pd.read_table', (['args.uniprot'], {}), '(args.uniprot)\n', (3119, 3133), True, 'import pandas as pd\n'), ((3250, 3372), 'pandas.merge', 'pd.merge', (['blast_result', "uniprot_info[['ID', 'GN', 'Description']]"], {'left_on': '(args.column - 1)', 'right_on': '"""ID"""', 'how': '"""left"""'}), "(blast_result, uniprot_info[['ID', 'GN', 'Description']], left_on=\n args.column - 1, right_on='ID', how='left')\n", (3258, 3372), True, 'import pandas as pd\n'), ((770, 807), 're.match', 're.match', (['"""OX=|OS=|GN=|PE=|SV="""', '_ele'], {}), "('OX=|OS=|GN=|PE=|SV=', _ele)\n", (778, 807), False, 'import re\n')]
|
"""add site airtable
Revision ID: da6f10c8ebf4
Revises: 9<PASSWORD>e<PASSWORD>
Create Date: 2019-11-29 07:48:18.074193
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "da6f10c8ebf4"
down_revision = "aaae4ae18288"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"Site", sa.Column("airtable_id", sa.String(256), nullable=True, unique=True)
)
try:
upgrade_data()
op.alter_column(
"Site", "airtable_id", nullable=False, existing_type=sa.String(256)
)
except Exception as e:
op.drop_column("Site", "airtable_id")
raise e
def downgrade():
op.drop_column("Site", "airtable_id")
sites = sa.sql.table(
"Site",
sa.Column("site_id", sa.Integer),
sa.Column("url", sa.String(1024)),
sa.Column("airtable_id", sa.String(256)),
)
def upgrade_data():
for site_id, url in [
(111, "https://www.youtube.com/channel/UCLZBXiS9ZrIXgKBs_SMfGBQ"),
(58, "https://www.facebook.com/blesseverydayaroundyou/"),
]:
op.execute(
sites.delete().where(sites.c.site_id == site_id and sites.c.url == url)
)
for url, airtable_id in airtable_id_map.items():
op.execute(
sites.update()
.where(sites.c.url == url)
.values({"airtable_id": airtable_id})
)
airtable_id_map = {
## XXX duplicated
# "https://www.youtube.com/channel/UCLZBXiS9ZrIXgKBs_SMfGBQ":"rec8rzS7SqKqnQuio",
"https://www.youtube.com/channel/UCmgDmqjxbkqIXu4rNbrKodA": "rectV3bxAU2YrpWQW",
"https://www.youtube.com/channel/UCLZBXiS9ZrIXgKBs_SMfGBQ": "rec6OsbxedCXaW1j1",
"https://www.youtube.com/channel/UCpu3bemTQwAU8PqM4kJdoEQ": "recYUDT5JflPA2OoF",
"https://www.youtube.com/channel/UCN2e8dLY9KnH-B15LyBM7Fg": "recQWxWEaVUWUcWYX",
"https://www.ptt.cc/bbs/HatePolitics/index.html": "recsh7wPi68vLDNWk",
"https://www.ptt.cc/bbs/Gossiping/index.html": "recLVrfhLyQDCzDA8",
"https://taronews.tw/": "recJFqGr5a1xfaf8o",
"https://www.cna.com.tw/": "recQUiciCUROnBe4A",
"http://news.ltn.com.tw/": "recMqu8b2B0fjCWIF",
"https://udn.com/": "reci0cxTv83iSeHl8",
"https://tw.appledaily.com/new/realtime": "recgBO5TsaGP8MLbg",
"https://tw.appledaily.com/": "recW0Y3DQ3DaeRQ7Y",
"https://www.ettoday.net/": "recJ9pSXGsxE4kmn9",
"https://news.ebc.net.tw/": "recBW5P0o0fKX2T1L",
"https://www.chinatimes.com/": "recslfJAoVKDbdh24",
"https://www.eatnews.net/": "rec3Wrnrb3GTcDivT",
"https://www.taiwanmazu.org/": "recB4NpLrTvUwWovp",
"http://tailian.taiwan.cn/": "recG8g1JoHti4T8fO",
"https://www.toutiao.com/": "recirH5ayaKXA633m",
"http://www.itaiwannews.cn/": "reczA8cBEGIcvwo1B",
"http://nooho.net": "recXoBEAH8TRdhZYj",
"http://taiwan-madnews.com": "recXa7wpjdcrWT8X7",
"http://taiwan-politicalnews.com": "recPnWuwH01QTAZPX",
"http://hssszn.com": "recBpcl1dLZQpY2Q5",
"http://fafa01.com": "recGN46B3LnnA8LbF",
"http://qiqi.today": "recRl8ORrU0IKWkBZ",
"http://defense.rocks": "recgFCKXWH8hBt6Rw",
"http://cnba.live": "rec3HARifvZvpwmzE",
"http://i77.today": "recSV8S0hvZY3ZTuA",
"http://77s.today": "recgTV83ZY5NWWnGT",
"http://www.qiqi.world/": "reca6qh8fo3mCqfCh",
"http://www.mission-tw.com/mission": "recJjdr5Jb4fGe9Os",
"http://www.taiwan.cn/": "recuC7NzKlui3dcd6",
"https://www.facebook.com/eatnews/": "recQshOYa9lZin1AU",
"https://www.facebook.com/search/top/?q=%E6%96%87%E5%B1%B1%E4%BC%AF&epa=SEARCH_BOX": "rec7xrqokEMg3s5L9",
"https://www.facebook.com/znk168/": "recIjToauNtBJdyQu",
"https://www.facebook.com/almondbrother/": "receHukPdyaKCtBMj",
"https://www.facebook.com/Colorlessrise/": "recMSPrWl8AuExQMk",
"https://www.facebook.com/pg/KSMissLin/groups/?referrer=pages_groups_card_cta&ref=page_internal": "rech9IRKLxxB0kx2w",
"https://www.facebook.com/%E5%BC%B7%E5%BC%B7%E6%BB%BE%E5%A4%A7%E5%93%A5-%E9%98%BF%E8%AA%8C-1088027454701943/?__tn__=%2Cd%2CP-R&eid=ARBiDxJohZf5_icvMw2BXVNG2nHG4VR9b_ArA_Tc6PfA98MtdnGw1xVKWvIdE-X1wfSteOnhr6PxVDUX": "recx88UIQkLjJ10wU",
"https://www.facebook.com/twherohan/": "recAY2H12zcSbCfhv",
"https://www.facebook.com/%E8%A8%B1%E6%B7%91%E8%8F%AF-130771133668155/": "recvnH2Lot8ypWNrl",
"https://www.facebook.com/hsiweiC/": "recVBlckyMtFmlh82",
"https://www.facebook.com/groups/260112827997606/": "recIttcUl3HPUoqzj",
"https://www.facebook.com/groups/391568921608431/": "recpKtnBclXwY4aqG",
"https://www.facebook.com/groups/2072874742809044/": "recDffdw3nHCyVE3j",
"https://www.facebook.com/groups/488305761638330/": "recm3eGGXPkOtfFLr",
"https://www.facebook.com/groups/389408621877306/": "recdvH8v3SJX5TpRZ",
"https://www.facebook.com/groups/768267203515635/": "recvKtQ84sirCdMzD",
"https://www.facebook.com/straitstoday/": "recLmQSJ5BUyrpKE6",
"https://www.facebook.com/youth86/": "recH6lOgxmwbsfu6N",
"https://www.facebook.com/groups/1148919035153224/": "recw8GIqZ6a4HXzR4",
"https://www.facebook.com/knowledge.practice.studio/": "rec7YTWk5wIUlQ25Z",
"https://www.facebook.com/Templelivenetwork/": "recwAHI4ZH36ZOeeb",
"https://www.facebook.com/%E4%B8%80%E8%B5%B7%E8%BF%BD%E5%8A%87%E5%90%A7-2407159445968433/": "recBYOI6sd8UPLnsm",
"https://www.facebook.com/KMTTCC/": "reciCaICxxil0pnSj",
"https://www.facebook.com/Quotations.life168/": "recGSreihqP7XX1C0",
"https://www.facebook.com/ZhongHuaYRM/": "recfLM0dY6CKhVNuR",
"https://www.facebook.com/happyworld88": "recMx7tumAkDqZulR",
"https://www.facebook.com/traveltheworld168/": "recSdwgOnLSFlZajU",
"https://www.facebook.com/yifanfengshun888/": "recQTMyEWf2xsCelK",
"https://www.facebook.com/world.tw/": "rec5cEt7NvB3TcI79",
"https://www.facebook.com/HaterIsHere/": "recTMSPJmmQXBfcDO",
"https://www.facebook.com/jesusSavesF13/": "rechrvzObklDq6Xcj",
"https://www.facebook.com/TaiwanNeutra/": "recANFv93ormFlTiT",
"https://www.facebook.com/%E9%9F%93%E5%AE%B6%E8%BB%8D%E9%90%B5%E7%B2%89%E8%81%AF%E7%9B%9F-837868789721803/": "recc9xwpmhaoLMgzx",
"https://www.facebook.com/%E7%B5%B1%E4%B8%80%E4%B8%AD%E5%9C%8B%E4%B8%AD%E5%9C%8B%E7%B5%B1%E4%B8%80-%E7%BB%9F%E4%B8%80%E4%B8%AD%E5%9B%BD%E4%B8%AD%E5%9B%BD%E7%BB%9F%E4%B8%80-1403317033298680/": "recmv1QvbaruPxERN",
"https://www.facebook.com/%E5%8F%8D%E8%94%A1%E8%8B%B1%E6%96%87%E7%B2%89%E7%B5%B2%E5%9C%98-257062087822640/": "recLTcnCQdOyMgZX4",
"https://www.facebook.com/CTTATTACK/": "recuhN7EituL81XfD",
"https://www.facebook.com/Gyhappyboard/": "recUfUuenCqEXY13X",
"https://www.facebook.com/%E8%A9%B1%E8%AA%AA%E9%82%A3%E4%BA%9B%E7%B7%A8%E9%80%A0%E7%9A%84%E4%BA%8B-304688810020434/": "rec4z05fcic3vlQyq",
## XXX duplicated
# "https://www.facebook.com/blesseverydayaroundyou/":"recUUs0ITu6PrpVIo",
"https://www.facebook.com/%E8%94%A1%E8%8B%B1%E6%96%87%E4%B8%8B%E5%8F%B0%E7%BD%AA%E7%8B%80%E9%9B%86%E7%B5%90%E7%B8%BD%E9%83%A8-121570255108696/": "reclAN9s2yWASp9A8",
"https://www.facebook.com/CapricornStory4U/": "recLduxn9D5XD2w3p",
"https://www.facebook.com/blesseverydayaroundyou/": "recVQ6iGSGFFAuK3I",
"https://www.facebook.com/inability.dpp/": "recojKVhcsmrUQVrV",
"https://www.facebook.com/%E8%97%8D%E8%89%B2%E6%AD%A3%E7%BE%A9%E5%8A%9B%E9%87%8F-1100522356652838/": "recm0Qil3pdQRPJq3",
"https://www.facebook.com/LIKEHISTORYWORLD/": "recaSQDs9KIuUZL3g",
"https://www.facebook.com/GCAironbloodarmy/": "recxjVgJQ4QA7vnP2",
"https://www.facebook.com/globalchinesenewsunion/": "recS0IahdjcUZ2uV5",
"https://www.facebook.com/GlobalChineselove/": "recXvfkeYIWRS1yDG",
"https://www.facebook.com/cbcarmy/": "rec0GLO9KrkL26Hl9",
"https://www.facebook.com/Islandofghost/": "recaxv1mbJzhBUmvh",
"https://www.facebook.com/GhostIslandNews/": "recnfmS6KQq8ADPdq",
"https://www.facebook.com/lovebakinglovehealthy/": "recqDcHtzstSEYuEN",
"https://www.facebook.com/getoutdpp/": "recGhjG3J67YawoV3",
"https://www.facebook.com/%E7%BD%B7%E5%85%8D%E6%B0%91%E9%80%B2%E9%BB%A8-2129370967290567/": "rec3rJ5tNg2otD5qz",
"https://www.facebook.com/johncelayo/": "rec8n4wKSsbOAyq1J",
"https://www.facebook.com/grumbledpp/": "rec64LvmyaPlP4kBP",
"https://www.facebook.com/%E6%96%87%E9%9D%92%E5%B7%A5%E4%BD%9C%E6%9C%83-510052339062419/": "rec8Z1YuT8hWKYbG2",
"https://www.facebook.com/%E9%9D%A0%E5%8C%97%E6%B0%91%E9%80%B2%E9%BB%A8-454656575008713/": "recwLUUVEocoCeT8g",
"https://www.facebook.com/bigchinalove/": "recPUgrixj8HPlVUp",
"https://www.facebook.com/shengser/": "rec63fhQeP0MU3357",
"https://www.facebook.com/%E8%A8%8E%E5%8E%AD%E6%B0%91%E9%80%B2%E9%BB%A8-504021696772145/": "rec7l2nBPLFj4sOmr",
"https://www.facebook.com/%E9%9D%A0%E5%8C%97%E6%99%82%E4%BA%8B-165534787282102/": "recGCFPh0DWZ6MG4i",
"https://www.facebook.com/taiwan1314520/": "rec9BS2RnG7Bi773d",
"https://www.facebook.com/fuqidao168/": "recVbbS2hFI2S39z7",
"https://www.facebook.com/GlobalChineseAlliance/": "recEvRHB5bqjxS6ES",
"https://www.facebook.com/%E5%A4%A9%E5%8D%97%E5%9C%B0%E5%8C%97-1063653903655415/": "recdWAeftdXBwOLIX",
"https://www.facebook.com/kmtdppisshit/": "rec6s2d1TXlmUI2nG",
"https://www.facebook.com/catssssssssssssss/": "recpu60Ei5EqoEXxn",
"https://www.facebook.com/qiqi.news/": "recOpNLBJ4R2mmCqM",
"https://www.facebook.com/dogcat101300/": "recXy5Rkxp0PhMpCs",
"https://www.facebook.com/travelmoviemusic/": "recw9FN2e3jZFJwqX",
"https://www.facebook.com/imangonews/": "recVrU412hfv2dChw",
"https://www.facebook.com/%E4%BA%BA%E7%94%9F%E6%AD%A3%E8%83%BD%E9%87%8F-1349834938455966/": "reccVfkXwa6u8R4o3",
"https://www.facebook.com/%E4%BA%BA%E7%94%9F%E7%AC%91%E8%91%97%E8%B5%B0-1958092751106755/": "recEnSF53PkWENrhs",
"https://www.facebook.com/thumbsuplifenews/": "recqbh2I61V2JArRi",
"https://www.facebook.com/hssszn/": "recODAxW73l6JpJJ7",
"https://www.facebook.com/aroundtheworld01/": "recjrgKJKwH1ru67m",
"https://www.facebook.com/%E5%8F%8D%E8%94%A1%E8%8B%B1%E6%96%87%E8%81%AF%E7%9B%9F%E5%85%A8%E5%9C%8B%E6%B0%91%E6%80%A8%E5%97%86%E8%94%A1%E7%B8%BD%E9%83%A8-1566024720346478/": "rectYderJ2wfojGfN",
"https://www.facebook.com/%E9%9D%92%E5%A4%A9%E7%99%BD%E6%97%A5%E6%AD%A3%E7%BE%A9%E5%8A%9B%E9%87%8F-1006889099430655/": "recjnR3SPoTTEUT15",
"https://www.guancha.cn/": "recE5pFRI2dRUdsBB",
"https://news.163.com": "recqZh8SLNtPITFo9",
"https://kknews.cc/": "recKJwOC1QvSQJgKB",
"http://www.readthis.one/": "recxMOjlGZDoUbLWc",
"https://www.coco01.today/": "recjILSLlLRmkgP5I",
"https://www.ptt01.cc/": "recj4kR6ExZgXdzOk",
"https://www.xuehua.us/": "recbEZkJV8k2Fg91E",
"https://www.orgs.one/": "recJVUAsWSsbKz9N0",
"http://www.how01.com/": "rec03ujV04yeDHeAu",
"https://read01.com/zh-tw/": "recwO0vYEkxI4JbBl",
"https://www.youtube.com/channel/UCgkHTZsCdH8P9z7lazTXN3g": "recnUmD0TFC1UPMPH",
"https://www.youtube.com/channel/UCJHq28mKJowPCGQ0WDIDU9A": "recjh6Rzp8iCarxF3",
"https://www.youtube.com/channel/UCMcDqLHgIuXWtWsqPEkqnWA": "recyUFTVMNsGGuCAV",
}
|
[
"sqlalchemy.String",
"alembic.op.drop_column",
"sqlalchemy.Column"
] |
[((697, 734), 'alembic.op.drop_column', 'op.drop_column', (['"""Site"""', '"""airtable_id"""'], {}), "('Site', 'airtable_id')\n", (711, 734), False, 'from alembic import op\n'), ((775, 807), 'sqlalchemy.Column', 'sa.Column', (['"""site_id"""', 'sa.Integer'], {}), "('site_id', sa.Integer)\n", (784, 807), True, 'import sqlalchemy as sa\n'), ((830, 845), 'sqlalchemy.String', 'sa.String', (['(1024)'], {}), '(1024)\n', (839, 845), True, 'import sqlalchemy as sa\n'), ((877, 891), 'sqlalchemy.String', 'sa.String', (['(256)'], {}), '(256)\n', (886, 891), True, 'import sqlalchemy as sa\n'), ((388, 402), 'sqlalchemy.String', 'sa.String', (['(256)'], {}), '(256)\n', (397, 402), True, 'import sqlalchemy as sa\n'), ((620, 657), 'alembic.op.drop_column', 'op.drop_column', (['"""Site"""', '"""airtable_id"""'], {}), "('Site', 'airtable_id')\n", (634, 657), False, 'from alembic import op\n'), ((560, 574), 'sqlalchemy.String', 'sa.String', (['(256)'], {}), '(256)\n', (569, 574), True, 'import sqlalchemy as sa\n')]
|
import pytest
import numpy as np
import xarray as xr
import dask.array as da
from xrspatial import curvature
from xrspatial.utils import doesnt_have_cuda
from xrspatial.tests.general_checks import general_output_checks
elevation = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[1584.8767, 1584.8767, 1585.0546, 1585.2324, 1585.2324, 1585.2324],
[1585.0546, 1585.0546, 1585.2324, 1585.588, 1585.588, 1585.588],
[1585.2324, 1585.4102, 1585.588, 1585.588, 1585.588, 1585.588],
[1585.588, 1585.588, 1585.7659, 1585.7659, 1585.7659, 1585.7659],
[1585.7659, 1585.9437, 1585.7659, 1585.7659, 1585.7659, 1585.7659],
[1585.9437, 1585.9437, 1585.9437, 1585.7659, 1585.7659, 1585.7659]],
dtype=np.float32
)
def test_curvature_on_flat_surface():
# flat surface
test_arr1 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.array([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster1 = xr.DataArray(test_arr1, attrs={'res': (1, 1)})
curv = curvature(test_raster1)
general_output_checks(test_raster1, curv, expected_results)
def test_curvature_on_convex_surface():
# convex
test_arr2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0., 100., 0., np.nan],
[np.nan, 100., -400., 100., np.nan],
[np.nan, 0., 100., 0., np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster2 = xr.DataArray(test_arr2, attrs={'res': (1, 1)})
curv = curvature(test_raster2)
general_output_checks(test_raster2, curv, expected_results)
def test_curvature_on_concave_surface():
# concave
test_arr3 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0., -100., 0., np.nan],
[np.nan, -100., 400., -100., np.nan],
[np.nan, 0., -100., 0., np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster3 = xr.DataArray(test_arr3, attrs={'res': (1, 1)})
curv = curvature(test_raster3)
general_output_checks(test_raster3, curv, expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_curvature_gpu_equals_cpu():
import cupy
agg_numpy = xr.DataArray(elevation, attrs={'res': (10.0, 10.0)})
cpu = curvature(agg_numpy, name='numpy_result')
agg_cupy = xr.DataArray(
cupy.asarray(elevation), attrs={'res': (10.0, 10.0)}
)
gpu = curvature(agg_cupy, name='cupy_result')
general_output_checks(agg_cupy, gpu)
np.testing.assert_allclose(cpu.data, gpu.data.get(), equal_nan=True)
# NOTE: Dask + GPU code paths don't currently work because of
# dask casting cupy arrays to numpy arrays during
# https://github.com/dask/dask/issues/4842
def test_curvature_numpy_equals_dask():
agg_numpy = xr.DataArray(elevation, attrs={'res': (10.0, 10.0)})
numpy_curvature = curvature(agg_numpy, name='numpy_curvature')
agg_dask = xr.DataArray(
da.from_array(elevation, chunks=(3, 3)), attrs={'res': (10.0, 10.0)}
)
dask_curvature = curvature(agg_dask, name='dask_curvature')
general_output_checks(agg_dask, dask_curvature)
# both produce same results
np.testing.assert_allclose(
numpy_curvature.data, dask_curvature.data.compute(), equal_nan=True)
|
[
"xrspatial.utils.doesnt_have_cuda",
"cupy.asarray",
"numpy.asarray",
"xrspatial.tests.general_checks.general_output_checks",
"numpy.array",
"xarray.DataArray",
"dask.array.from_array",
"xrspatial.curvature"
] |
[((248, 756), 'numpy.asarray', 'np.asarray', (['[[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [1584.8767, 1584.8767, \n 1585.0546, 1585.2324, 1585.2324, 1585.2324], [1585.0546, 1585.0546, \n 1585.2324, 1585.588, 1585.588, 1585.588], [1585.2324, 1585.4102, \n 1585.588, 1585.588, 1585.588, 1585.588], [1585.588, 1585.588, 1585.7659,\n 1585.7659, 1585.7659, 1585.7659], [1585.7659, 1585.9437, 1585.7659, \n 1585.7659, 1585.7659, 1585.7659], [1585.9437, 1585.9437, 1585.9437, \n 1585.7659, 1585.7659, 1585.7659]]'], {'dtype': 'np.float32'}), '([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [1584.8767, \n 1584.8767, 1585.0546, 1585.2324, 1585.2324, 1585.2324], [1585.0546, \n 1585.0546, 1585.2324, 1585.588, 1585.588, 1585.588], [1585.2324, \n 1585.4102, 1585.588, 1585.588, 1585.588, 1585.588], [1585.588, 1585.588,\n 1585.7659, 1585.7659, 1585.7659, 1585.7659], [1585.7659, 1585.9437, \n 1585.7659, 1585.7659, 1585.7659, 1585.7659], [1585.9437, 1585.9437, \n 1585.9437, 1585.7659, 1585.7659, 1585.7659]], dtype=np.float32)\n', (258, 756), True, 'import numpy as np\n'), ((851, 951), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0]])\n', (859, 951), True, 'import numpy as np\n'), ((1079, 1264), 'numpy.array', 'np.array', (['[[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0, 0, 0, np.nan], [np.\n nan, 0, 0, 0, np.nan], [np.nan, 0, 0, 0, np.nan], [np.nan, np.nan, np.\n nan, np.nan, np.nan]]'], {}), '([[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0, 0, 0, np.\n nan], [np.nan, 0, 0, 0, np.nan], [np.nan, 0, 0, 0, np.nan], [np.nan, np\n .nan, np.nan, np.nan, np.nan]])\n', (1087, 1264), True, 'import numpy as np\n'), ((1372, 1418), 'xarray.DataArray', 'xr.DataArray', (['test_arr1'], {'attrs': "{'res': (1, 1)}"}), "(test_arr1, attrs={'res': (1, 1)})\n", (1384, 1418), True, 'import xarray as xr\n'), ((1431, 1454), 'xrspatial.curvature', 'curvature', (['test_raster1'], {}), '(test_raster1)\n', (1440, 1454), False, 'from xrspatial import curvature\n'), ((1460, 1519), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['test_raster1', 'curv', 'expected_results'], {}), '(test_raster1, curv, expected_results)\n', (1481, 1519), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((1596, 1697), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, -1, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, -1, 0, 0], [0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0]])\n', (1604, 1697), True, 'import numpy as np\n'), ((1825, 2041), 'numpy.asarray', 'np.asarray', (['[[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, 100.0, 0.0, np.nan\n ], [np.nan, 100.0, -400.0, 100.0, np.nan], [np.nan, 0.0, 100.0, 0.0, np\n .nan], [np.nan, np.nan, np.nan, np.nan, np.nan]]'], {}), '([[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, 100.0, \n 0.0, np.nan], [np.nan, 100.0, -400.0, 100.0, np.nan], [np.nan, 0.0, \n 100.0, 0.0, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan]])\n', (1835, 2041), True, 'import numpy as np\n'), ((2130, 2176), 'xarray.DataArray', 'xr.DataArray', (['test_arr2'], {'attrs': "{'res': (1, 1)}"}), "(test_arr2, attrs={'res': (1, 1)})\n", (2142, 2176), True, 'import xarray as xr\n'), ((2189, 2212), 'xrspatial.curvature', 'curvature', (['test_raster2'], {}), '(test_raster2)\n', (2198, 2212), False, 'from xrspatial import curvature\n'), ((2218, 2277), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['test_raster2', 'curv', 'expected_results'], {}), '(test_raster2, curv, expected_results)\n', (2239, 2277), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((2356, 2456), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0]])\n', (2364, 2456), True, 'import numpy as np\n'), ((2584, 2802), 'numpy.asarray', 'np.asarray', (['[[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, -100.0, 0.0, np.\n nan], [np.nan, -100.0, 400.0, -100.0, np.nan], [np.nan, 0.0, -100.0, \n 0.0, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan]]'], {}), '([[np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, 0.0, -100.0,\n 0.0, np.nan], [np.nan, -100.0, 400.0, -100.0, np.nan], [np.nan, 0.0, -\n 100.0, 0.0, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan]])\n', (2594, 2802), True, 'import numpy as np\n'), ((2889, 2935), 'xarray.DataArray', 'xr.DataArray', (['test_arr3'], {'attrs': "{'res': (1, 1)}"}), "(test_arr3, attrs={'res': (1, 1)})\n", (2901, 2935), True, 'import xarray as xr\n'), ((2948, 2971), 'xrspatial.curvature', 'curvature', (['test_raster3'], {}), '(test_raster3)\n', (2957, 2971), False, 'from xrspatial import curvature\n'), ((2977, 3036), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['test_raster3', 'curv', 'expected_results'], {}), '(test_raster3, curv, expected_results)\n', (2998, 3036), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((3194, 3246), 'xarray.DataArray', 'xr.DataArray', (['elevation'], {'attrs': "{'res': (10.0, 10.0)}"}), "(elevation, attrs={'res': (10.0, 10.0)})\n", (3206, 3246), True, 'import xarray as xr\n'), ((3258, 3299), 'xrspatial.curvature', 'curvature', (['agg_numpy'], {'name': '"""numpy_result"""'}), "(agg_numpy, name='numpy_result')\n", (3267, 3299), False, 'from xrspatial import curvature\n'), ((3412, 3451), 'xrspatial.curvature', 'curvature', (['agg_cupy'], {'name': '"""cupy_result"""'}), "(agg_cupy, name='cupy_result')\n", (3421, 3451), False, 'from xrspatial import curvature\n'), ((3459, 3495), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['agg_cupy', 'gpu'], {}), '(agg_cupy, gpu)\n', (3480, 3495), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((3062, 3080), 'xrspatial.utils.doesnt_have_cuda', 'doesnt_have_cuda', ([], {}), '()\n', (3078, 3080), False, 'from xrspatial.utils import doesnt_have_cuda\n'), ((3804, 3856), 'xarray.DataArray', 'xr.DataArray', (['elevation'], {'attrs': "{'res': (10.0, 10.0)}"}), "(elevation, attrs={'res': (10.0, 10.0)})\n", (3816, 3856), True, 'import xarray as xr\n'), ((3880, 3924), 'xrspatial.curvature', 'curvature', (['agg_numpy'], {'name': '"""numpy_curvature"""'}), "(agg_numpy, name='numpy_curvature')\n", (3889, 3924), False, 'from xrspatial import curvature\n'), ((4064, 4106), 'xrspatial.curvature', 'curvature', (['agg_dask'], {'name': '"""dask_curvature"""'}), "(agg_dask, name='dask_curvature')\n", (4073, 4106), False, 'from xrspatial import curvature\n'), ((4112, 4159), 'xrspatial.tests.general_checks.general_output_checks', 'general_output_checks', (['agg_dask', 'dask_curvature'], {}), '(agg_dask, dask_curvature)\n', (4133, 4159), False, 'from xrspatial.tests.general_checks import general_output_checks\n'), ((3341, 3364), 'cupy.asarray', 'cupy.asarray', (['elevation'], {}), '(elevation)\n', (3353, 3364), False, 'import cupy\n'), ((3966, 4005), 'dask.array.from_array', 'da.from_array', (['elevation'], {'chunks': '(3, 3)'}), '(elevation, chunks=(3, 3))\n', (3979, 4005), True, 'import dask.array as da\n')]
|
# Copyright 2016 Recorded Future, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auth provider for RF tokens stored in environment."""
import os
import email
import hashlib
import hmac
import requests
from .error import MissingAuthError
# pylint: disable=too-few-public-methods
class RFTokenAuth(requests.auth.AuthBase):
"""Authenticate using a token stored in an environment variable.
The class will look for tokens in RF_TOKEN and RECFUT_TOKEN (legacy).
"""
def __init__(self, token, api_version=1):
"""Initialize the class. Provide a valid token."""
self.token = self._find_token() if token == 'auto' else token
self._api_version = api_version
def __call__(self, req):
"""Add the authentication header when class is called."""
# If we still haven't a token we need to bail.
if not self.token:
raise MissingAuthError
if self._api_version == 1:
req.headers['Authorization'] = "RF-TOKEN token=%s" % self.token
else:
req.headers['X-RFToken'] = self.token
return req
@staticmethod
def _find_token():
if 'RF_TOKEN' in os.environ:
return os.environ['RF_TOKEN']
if 'RECFUT_TOKEN' in os.environ:
return os.environ['RECFUT_TOKEN']
raise MissingAuthError('Auth method auto selected but no token '
'found in environment (RF_TOKEN or '
'RECFUT_TOKEN).')
class SignatureHashAuth(requests.auth.AuthBase):
"""Authenticate using signed queries."""
def __init__(self, username, userkey):
"""Initialize. Provide a valid username and key."""
self.username = username
self.userkey = userkey
def __call__(self, req):
"""Add the auth headers to a request."""
# pylint: disable=no-member
timestamp = email.Utils.formatdate()
split = req.path_url.split("?")
path_params = split[1] if len(split) > 1 else ""
body = req.body if req.body else ""
if "v2" in req.path_url:
v2_url = req.path_url.replace("/rfq", "")
hash_text = v2_url + body + timestamp
else:
hash_text = "?" + path_params + body + timestamp
hmac_hash = hmac.new(self.userkey,
hash_text,
hashlib.sha256).hexdigest()
req.headers['Date'] = timestamp
req.headers['Authorization'] = 'RF-HS256 user=%s, hash=%s' % (
self.username, hmac_hash
)
return req
|
[
"hmac.new",
"email.Utils.formatdate"
] |
[((2402, 2426), 'email.Utils.formatdate', 'email.Utils.formatdate', ([], {}), '()\n', (2424, 2426), False, 'import email\n'), ((2802, 2851), 'hmac.new', 'hmac.new', (['self.userkey', 'hash_text', 'hashlib.sha256'], {}), '(self.userkey, hash_text, hashlib.sha256)\n', (2810, 2851), False, 'import hmac\n')]
|
#!/usr/bin/env python3
"""
This script will run all jupyter notebooks in order to test for errors.
"""
import sys
import os
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
if os.path.dirname(sys.argv[0]) != '':
os.chdir(os.path.dirname(sys.argv[0]))
notebooks = ('grids-and-coefficients.ipynb',
'localized-spectral-analysis.ipynb',
'gravity-and-magnetic-fields.ipynb',
'plotting-maps.ipynb',
'low-level-spherical-harmonic-analyses.ipynb',
'advanced-localized-spectral-analysis.ipynb',
'advanced-shcoeffs-and-shgrid-usage.ipynb',
'spherical-harmonic-normalizations.ipynb',
'advanced-shwindow-usage.ipynb',
'3d-plots.ipynb')
if sys.version_info.major == 3:
kname = 'python3'
else:
raise ('Python version {:d} not supported.'.format(sys.version_info.major))
for i in range(len(notebooks)):
with open(notebooks[i]) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=240, kernel_name=kname)
print('Processing file {:s}'.format(notebooks[i]))
ep.preprocess(nb, {'metadata': {'path': '.'}})
|
[
"nbformat.read",
"nbconvert.preprocessors.ExecutePreprocessor",
"os.path.dirname"
] |
[((200, 228), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (215, 228), False, 'import os\n'), ((249, 277), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (264, 277), False, 'import os\n'), ((991, 1021), 'nbformat.read', 'nbformat.read', (['f'], {'as_version': '(4)'}), '(f, as_version=4)\n', (1004, 1021), False, 'import nbformat\n'), ((1035, 1086), 'nbconvert.preprocessors.ExecutePreprocessor', 'ExecutePreprocessor', ([], {'timeout': '(240)', 'kernel_name': 'kname'}), '(timeout=240, kernel_name=kname)\n', (1054, 1086), False, 'from nbconvert.preprocessors import ExecutePreprocessor\n')]
|
import unittest
import sys
sys.path.append('/pEigen/src/peigen')
import libpeigen as peigen
class DenseFactorizationTest(unittest.TestCase):
def setUp(self):
self.rows = 1000
self.cols = 1000
self.dense_matrix = peigen.denseMatrixDouble(self.rows, self.cols)
self.dense_matrix.setRandom(1)
self.factorizer = peigen.denseDecomposition(self.dense_matrix)
def test_thin_svd(self):
self.factorizer.BDCSVD()
S = self.factorizer.getSingularValues()
norm_greater_than_0 = (S.diagonal(0).norm() > 0)
U = self.factorizer.getU()
UtU = U.transpose()*U
trace = UtU.trace()
residual = (trace - UtU.rows())**2/(UtU.rows()**2)
res_less_than_eps = (residual < 1e-9)
self.assertEqual(res_less_than_eps, True)
self.assertEqual(norm_greater_than_0, True)
def test_qr_decomp(self):
self.factorizer.HouseholderQR()
Q = self.factorizer.getQ()
QtQ = Q.transpose()*Q
trace = QtQ.trace()
residual = (trace - QtQ.rows())**2/(QtQ.rows()**2)
res_less_than_eps = (residual < 1e-9)
self.assertEqual(res_less_than_eps, True)
if __name__ == '__main__':
unittest.main()
|
[
"sys.path.append",
"unittest.main",
"libpeigen.denseDecomposition",
"libpeigen.denseMatrixDouble"
] |
[((27, 64), 'sys.path.append', 'sys.path.append', (['"""/pEigen/src/peigen"""'], {}), "('/pEigen/src/peigen')\n", (42, 64), False, 'import sys\n'), ((1253, 1268), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1266, 1268), False, 'import unittest\n'), ((250, 296), 'libpeigen.denseMatrixDouble', 'peigen.denseMatrixDouble', (['self.rows', 'self.cols'], {}), '(self.rows, self.cols)\n', (274, 296), True, 'import libpeigen as peigen\n'), ((371, 415), 'libpeigen.denseDecomposition', 'peigen.denseDecomposition', (['self.dense_matrix'], {}), '(self.dense_matrix)\n', (396, 415), True, 'import libpeigen as peigen\n')]
|
from typing import Optional
from prompt_toolkit import PromptSession
from prompt_toolkit import print_formatted_text as print_
from efb.validator import YesNoValidator
SESSION = PromptSession()
def make_decision(question: str, default: Optional[bool] = None) -> bool:
default_string = f'(default {"y" if default else "n"})' if default is not None else ''
while True:
answer = SESSION.prompt(f'{question} [y/n] {default_string}: ', validator=YesNoValidator())
if answer == 'y':
return True
if answer == 'n':
return False
if not answer and default is not None:
return default
print_(f'Please state your decision as y or n (not {answer}')
|
[
"efb.validator.YesNoValidator",
"prompt_toolkit.print_formatted_text",
"prompt_toolkit.PromptSession"
] |
[((181, 196), 'prompt_toolkit.PromptSession', 'PromptSession', ([], {}), '()\n', (194, 196), False, 'from prompt_toolkit import PromptSession\n'), ((664, 725), 'prompt_toolkit.print_formatted_text', 'print_', (['f"""Please state your decision as y or n (not {answer}"""'], {}), "(f'Please state your decision as y or n (not {answer}')\n", (670, 725), True, 'from prompt_toolkit import print_formatted_text as print_\n'), ((463, 479), 'efb.validator.YesNoValidator', 'YesNoValidator', ([], {}), '()\n', (477, 479), False, 'from efb.validator import YesNoValidator\n')]
|