index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
48,678 | EHounslow/Metagenomics | refs/heads/main | /modelling/disease_selector.py | from typing import Tuple
from pandas.core.frame import DataFrame
def get_disease_data(
disease_to_select: str, disease_target: DataFrame
) -> Tuple[DataFrame, DataFrame, DataFrame]:
"""
Creates a single disease subset dataframe from the full metagenomic dataframe.
Args:
disease_to_select: (str): Name of the target dataset
disease_target: (DataFrame): Dataframe of the targeted disease subset
Returns:
Tuple [DataFrame, DataFrame, DataFrame]: 3 dataframes returning subsets
of the dataset containing control samples, disease samples, and both,
with a boolean (int) value in the disease column.
"""
disease = disease_target["disease"]
# Create subset of controls and target diseases
combined_disease_control = disease_target[
(disease == "n") | (disease == "nd") | (disease == disease_to_select)
]
control = disease_target[(disease == "n") | (disease == "nd")]
disease_target = disease_target[disease == disease_to_select]
# transform target variable disease into numeric data
combined_disease_control["disease"] = combined_disease_control["disease"].replace(
["n", "nd"], 0
)
combined_disease_control["disease"] = combined_disease_control["disease"].replace(
disease_to_select, 1
)
return combined_disease_control, disease_target, control
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,679 | EHounslow/Metagenomics | refs/heads/main | /modelling/variable_definer.py | from typing import List, Tuple
from pandas.core.frame import DataFrame
def get_variables(data_subset: DataFrame) -> Tuple[List, List, List]:
"""
Seperates the variables into 3 lists containing metadata, species,
and all other taxonomic variables.
Args:
data_subset: (DataFrame): A dataframe containing the target data.
Returns:
species: (list): A list of all taxonomic variables at the species level only
taxonomy: (list): A list of all other taxonomic variables
metadata: (list): A list of all non-taxonomic variables
"""
variables = data_subset.columns.values
species = []
taxonomy = []
metadata = []
for cell in variables:
if cell.startswith("k_") and "s_" in cell and "t_" not in cell:
species.append(cell)
elif cell.startswith("k_") and ("s_" not in cell or "t_" in cell):
taxonomy.append(cell)
elif not cell.startswith("k_"):
metadata.append(cell)
return species, taxonomy, metadata
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,680 | EHounslow/Metagenomics | refs/heads/main | /modelling/dataset_selector.py | from pandas.core.frame import DataFrame
def get_dataset(dataset_to_select: str, abundance_data: DataFrame) -> DataFrame:
"""
Creates a single data subset dataframe from the full metagenomic dataframe.
Args:
dataset_to_select: (str): Name of the target dataset
abundance_data: (DataFrame): Dataframe of the full metagenomic dataset
Returns:
DataFrame: A dataframe containing a dataset subset
"""
dataset = abundance_data["dataset_name"]
disease_target = abundance_data[dataset == dataset_to_select]
return disease_target
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,681 | EHounslow/Metagenomics | refs/heads/main | /modelling/modellers/random_forest_modeller.py | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from pandas.core.frame import DataFrame
def run_random_forest(
train_x: DataFrame, train_y: DataFrame, val_x: DataFrame, val_y: DataFrame
) -> None:
"""
Runs a random forest model.
Args:
train_x: (DataFrame): Training data for x.
train_y: (DataFrame): Training data for y.
val_x: (DataFrame): Validation data for x.
val_y: (DataFrame): Validation data for y.
"""
# Define the model. Set random_state to 1
rf_model = RandomForestRegressor(random_state=1)
# Fit model
rf_model.fit(train_x, train_y)
# Calculate the mean absolute error of your Random Forest model on the validation data
rf_val_predictions = rf_model.predict(val_x)
rf_val_mae = mean_absolute_error(rf_val_predictions, val_y)
print(f"Validation MAE for Random Forest Model: {rf_val_mae}")
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,682 | EHounslow/Metagenomics | refs/heads/main | /modelling/data_loader.py | import pandas as pd
from pandas.core.frame import DataFrame
def extract_abundance_data(abundance: str) -> DataFrame:
"""
Creates a transposed dataframe from a .txt or .csv file and sets the first row as the header.
Args:
abundance: (str): The path to the source file
Returns:
DataFrame: A dataframe of the file data
"""
abundance_data = pd.read_csv(abundance, delimiter="\t", header=None, dtype=str).T
new_header = abundance_data.iloc[0]
abundance_data = abundance_data[1:]
abundance_data.columns = new_header
return abundance_data
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,683 | EHounslow/Metagenomics | refs/heads/main | /tests/test_dataset_selector.py | from pandas.core.frame import DataFrame
from modelling.dataset_selector import get_dataset
def test_get_dataset():
# Arrange
# Act
# Assert
assert True
| {"/tests/test_data_loader.py": ["/modelling/data_loader.py"], "/tests/test_disease_selector.py": ["/modelling/dataset_selector.py", "/modelling/data_loader.py", "/modelling/disease_selector.py"], "/tests/test_dataset_selector.py": ["/modelling/dataset_selector.py"]} |
48,692 | coffee247/PA5b | refs/heads/master | /sentiment.py |
import math
import operator
import pathlib
import re
import sys
import xml.etree.ElementTree as ET
import logging
import fileLoaderClass
# set up logging
dirpath = pathlib.Path(__file__).parent.absolute()
logging.basicConfig(filename=f'{dirpath}/my-model.txt', filemode='w', format='%(message)s')
class sentimenter():
bagOfWords = dict() # unigram table of word frequencies
corpora = [] # container for each of the loaded files (from disk)
def __init__(self):
myFileloader = fileLoaderClass
self.corpora = myFileloader.loadFiles() # load files from disk into files array
a=1
| {"/sentiment.py": ["/fileLoaderClass.py"]} |
48,693 | coffee247/PA5b | refs/heads/master | /fileLoaderClass.py |
import pathlib
import sys
class fileLoader():
def __init__(self):
pass
def loadFiles(self):
files = []
outfiles = []
argLength: int = len(sys.argv)
for index in range(1, argLength, 1):
files.append(self.readfile(index)) # load text from files on disk
return files
def readfile(self, fileAtIndex):
try:
file = open(sys.argv[fileAtIndex], 'r')
filetext = file.read()
file.close()
return filetext
except:
print("{} \n\tException: error loading file {}\n\t"
"Application will quit so you can try again!\n{}".format("*" * 72, sys.argv[fileAtIndex], "*" * 72))
input("press ENTER key to continue quitting\n\n")
print("Bye!\n\n")
exit() | {"/sentiment.py": ["/fileLoaderClass.py"]} |
48,725 | cyins/YOLOv5-SGBM | refs/heads/master | /recevier.py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
## Simple talker demo that listens to std_msgs/Strings published
## to the 'chatter' topic
import rospy,re,argparse,sys
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import PolygonStamped,Point32
import cv2,socket,numpy,time,select
from multiprocessing import Process
from utils.general import timethis,timeblock
class rospublisher:
"""
@description : handle relevant events of ROS topic publishing
---------
@function :
-------
"""
count = 0
def __init__(self):
rospublisher.count += 1
rospy.init_node('AvoidModel', anonymous=True)
self._coor_pub=rospy.Publisher('Avoid_Object_Position',PolygonStamped,queue_size=10)
self._img_pub=rospy.Publisher('Postproc_Images',Image,queue_size=10)
# self._cloud_img_pub=rospy.Publisher('Disparity_Images',Image,queue_size=10)
self.r=rospy.Rate(1)
def __del__(self):
rospublisher.count -= 1
print ("RosPublisher release")
# @timethis
def pub_axis(self,coords,timestamp,frame,num):
"""
@description : publisher the coordinates of the avoid object to ROS
---------
@param :
coords: type list, list of coordinates
frame: type int, image frame no.
num: type int, the number of object in coordinates
-------
@Returns : None
-------
"""
coord_msg=PolygonStamped()
coord_msg.header.stamp=rospy.Time(int(timestamp[0]),int(timestamp[1][:9]))
coord_msg.header.frame_id=str(frame)
'''
@logic:以矩形框的宽作为预测的物体厚度值,同时,考虑到距离识别误差,在预测厚度值的基础上,添加0.2m。
'''
if coords != []:
for i in range(int(num)):
coord_msg.polygon.points.append(Point32())
coord_msg.polygon.points[4*i].x=float(coords[i*6+2])/1000+abs((float(coords[i*6+0])/1000)-float(coords[i*6+3])/1000)-0.1
coord_msg.polygon.points[4*i].y=float(-float(coords[i*6+0])/1000)
coord_msg.polygon.points[4*i].z=float(coords[i*6+1])/1000
coord_msg.polygon.points.append(Point32())
coord_msg.polygon.points[4*i+1].x=float(coords[i*6+2])/1000+0.1
coord_msg.polygon.points[4*i+1].y=float(-float(coords[i*6+0])/1000)
coord_msg.polygon.points[4*i+1].z=float(coords[i*6+1])/1000
coord_msg.polygon.points.append(Point32())
coord_msg.polygon.points[4*i+2].x=float(coords[i*6+5])/1000+0.1
coord_msg.polygon.points[4*i+2].y=float(-float(coords[i*6+3])/1000)
coord_msg.polygon.points[4*i+2].z=float(coords[i*6+4])/1000
coord_msg.polygon.points.append(Point32())
coord_msg.polygon.points[4*i+3].x=float(coords[i*6+5])/1000+abs((float(coords[i*6+0])/1000)-float(coords[i*6+3])/1000)-0.1
coord_msg.polygon.points[4*i+3].y=float(-float(coords[i*6+3])/1000)
coord_msg.polygon.points[4*i+3].z=float(coords[i*6+4])/1000
self._coor_pub.publish(coord_msg)
# @timethis
def pub_img(self,img):
bridge=CvBridge()
self._img_pub.publish(bridge.cv2_to_imgmsg(img, encoding="passthrough"))
# def pub_cloud_img(self,img):
# bridge=CvBridge()
# self._cloud_img_pub.publish(bridge.cv2_to_imgmsg(img, encoding="passthrough"))
# 接受图片及大小的信息
def recvall(sock, count):#读取count长度的数据
"""
@description : socket server receive an fixed length packet
---------
@param :
sock: socket server connector handler
count: type int, length of the packet to receive
-------
@Returns : buf, type bytes*count, the received data
-------
"""
buf = b''
while count:
newbuf = sock.recv(count) # s.sendall()发送, s.recv()接收. 因为client是通过 sk.recv()来进行接受数据,而count表示,最多每次接受count字节,
# print(newbuf)
if not newbuf: return buf
buf += newbuf
count -= len(newbuf)
return buf
def netdata_pipe(server_soc, videoWriter, pub):
"""
@description : recevie image from socket server
---------
@param :
server_soc: the handler of the socket server
addr: address of socket server
pub: the handler of the ROS node
-------
@Returns : None
-------
"""
print('waiting for connect')
conn, client_address = server_soc.accept()
print('connect from:' + str(client_address))
block=1024 #1280*720
Connect = False
if conn:
Connect = True
while Connect:
t0 = time.time()
image_length=0
coords=[]
img_flag = False
stringData = conn.recv(block)#nt()只能转化由纯数字组成的字符串
if stringData != b'':
if stringData.startswith(b'$Image'):
with timeblock('process time:'):
stringData = stringData.decode()
image_length=int(stringData.split(',')[1])
dis_img_length = int(stringData.split(',')[2])
img_resolution = stringData.split(',')[3:5]
dis_resolution = stringData.split(',')[5:7]
timestamp=stringData.split(',')[7:-1]
frame=stringData.split(',')[-1]
coords = []
if opt.image:
conn.sendall('Ready for Image'.encode('utf-8'))
stringData = recvall(conn, (image_length))#nt()只能转化由纯数字组成的字符串
img = numpy.frombuffer(stringData,numpy.uint8) # 将获取到的字符流数据转换成1维数组 data = numpy.fromstring() numpy.frombuffer
img = numpy.reshape(img,(int(img_resolution[0]),int(img_resolution[1]),3))
conn.sendall('Ready for Disparity Image'.encode('utf-8'))
stringData = recvall(conn,(dis_img_length))
dis_img = numpy.frombuffer(stringData,numpy.uint8) # 将获取到的字符流数据转换成1维数组 data = numpy.fromstring() numpy.frombuffer
dis_img = numpy.reshape(dis_img,(int(dis_resolution[0]),int(dis_resolution[1]),3))
img = cv2.hconcat([img,dis_img])
pub.pub_img(img)
# pub.pub_cloud_img(dis_img)
conn.sendall('Ready for Coordinates'.encode('utf-8'))
stringData = conn.recv(block)#nt()只能转化由纯数字组成的字符串
stringData = stringData.decode()
coords=stringData.split(',')[1:-1]
assert len(coords) % 6 == 0,'coords length error'
conn.sendall('Ready for next Frame'.encode('utf-8'))
pub.pub_axis(coords,timestamp,frame,(len(coords)/6))
# =================================================================================================================================
time.sleep(0.05)
# print('Server recv: %d data in %.3f'%(image_size,time.time()-t0),end='\r')
# =================================================================================================================================
else:
continue
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--port', type=int, default=9191, help='tcp port')
parser.add_argument('--ip', type=str, default='192.168.3.181', help='tcp IP')
parser.add_argument('--image', action='store_true', help='show image mode')
opt = parser.parse_args()
pub=rospublisher()
address = (opt.ip, opt.port)#'192.168.1.104', 8004
server_soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_soc.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1) #端口可复用
server_soc.bind(address)
server_soc.listen()
videoWriter = []
netdata_pipe(server_soc,videoWriter,pub)
server_soc.close()
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,726 | cyins/YOLOv5-SGBM | refs/heads/master | /utils/image_merge.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description: : 将左右相机拍摄的两张照片合成一张
@Date :2021/05/28 19:38:05
@Author :Yyc
@version :1.0
'''
import os,time,sys,cv2
import numpy as np
#%%
#TODO:打开两张照片,合成一张
def merge_images(root='',folder=''):
# print(folder)
dir = os.path.join(root,folder)
for _,_,files in os.walk(dir,topdown=False):
# print(files)
if 'im0.png' in files:
filel = os.path.join(dir,'im0.png')
iml = cv2.imread(filel)
else:
print(f'{dir} Image Left not found')
return -1
if 'im1.png' in files:
filer = os.path.join(dir,'im1.png')
imr = cv2.imread(filer)
else:
print(f'{dir} Image Right not found')
return -1
# width = 2*imgl.shape[1]
# height = imgl.shape[0]
im_merge = cv2.hconcat([iml,imr])
save_path = os.path.join(root,'merge')
# print(save_path)
cv2.imshow('merge image',im_merge)
cv2.imwrite(os.path.join(save_path,folder+'.png'),im_merge)
if cv2.waitKey(1) == ord('q'):
raise KeyboardInterrupt
#%%
#TODO:遍历指定文件夹下的所有文件夹,搜索其中的im0.png,im1.png文件
def parser_folder(parent_folder=''):
for root,folders,_ in os.walk(parent_folder,topdown=False):
for folder in folders:
merge_images(root,folder)
#%%
#main:
if __name__ == '__main__':
parser_folder('/home/bynav/RK3399/AI_SGBM/data/stereo') | {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,727 | cyins/YOLOv5-SGBM | refs/heads/master | /latency_test.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description: main function for YOLOv5 + SGBM model running on RK3399pro platform
@Date :2021/04/23 19:56:56
@Author :Noah
@version :version : 21042603
'''
import os,logging,sys,argparse,time,socket,math,queue
import cv2
from utils.Stereo_Application import SGBM,BM,disparity_centre
from utils.dataset import DATASET_NAMES,loadfiles,loadcam
from utils.stereoconfig import stereoCamera
from utils.general import scale_coords,confirm_dir,timethis,timeblock,socket_client,calib_type
# %% initial environment
def platform_init(bm_model=False, imgsz=640, device='pc', tcp_address=('192.168.3.181',9191)):
"""
@description : initialize AI, SGBM, socket connect and stereo camera calibration parameter
---------
@param :
bm_model: type bool. when True, stereo matching use BM model, otherwise, stereo matching use SGBM model
imgsz: type int, AI model used for image resize
device: type string, reserved
tcp_address: type tuple (address,port num),tcp connection address
-------
@Returns :
AI: type class RKNNDetector, AI model handler
SM: type class BM or SGBM, stereo matching model handler
config: class stereoCamera, stereo camera calibration parameter
soc_client: class socket_client, tcp connection handler
-------
"""
# %% load data as pipeline
def LoadData(source='', webcam=False, cam_freq=5, imgsz=(640,640), save_path='',debug=False):
"""
@description : load data source ,and prepare for iteration
---------
@param :
source: type string, data folder/file name, or webcam ID
webcam: type bool, if true, the data source is a real-time webcam, otherwise, a image/images or a video/videos
cam_freq: type int, only effective when webcam is true, set the frame get from the real-time webcam per second
imgsz: type tuple (int,int), reserved
save_path: type string, the save_path directory for real-time webcam images
-------
@Returns :
dataset: type class loadcam or loadfiles, a iterator for images stream source
-------
"""
if webcam:
dataset = loadcam(source, cam_freq, imgsz, save_path, debug, calib_type.AR0135_416_416)
else:
dataset = loadfiles(source, imgsz, save_path)
return dataset
# %% obejct detection and matching
def object_matching(dataset):
"""
@description : a combination of data iteration, object predict ,stereo matching and data transmission process
---------
@param :
ai_model: type class RKNNDetector, AI model handler
sm_model: type BM or SGBM model, stereo matching model handler
camera_config: type class stereoCamera, stereo camera calibration parameters
dataset: type class loadfiles or loadcam, data iterator
ratio: type float, one of the control parameters of disparity to depth process
imgsz: type tuple (int,int), used by Image_Rectification function and socket function to resize images for SGBM to the same size as image for object detection
fps: type int, reserved
debug: type bool, if true, add post-process image to the packet for socket transimission and save original image for replay on PC
UMat: type bool, if true, use GPU accelarating SGBM and image rectification process
soc_client: tpye class socket_client, the tcp transfer handler
-------
@Returns : None
-------
"""
for _,img_left,img_right,_,TimeStamp,_ in dataset:
cv2.imshow('image',img_left)
if cv2.waitKey(1) == ord('q'):
break
#%% main
def main():
"""
@description : used to convert most gloable parameters to partial parameters to accelerate
---------
@param :None
-------
@Returns :None
-------
"""
print(args)
source, device, bm_model, imgsz, webcam, cam_freq, ratio, debug, visual, UMat, tcp_port, tcp_ip, save_path= \
args.source, args.device, args.BM, args.img_size, args.webcam, args.cam_freq, args.ratio, args.debug, args.visual,\
args.UMat, args.tcp_port, args.tcp_ip, args.save_path
if args.verbose:
logging.basicConfig(filename=os.path.join(save_path,'log.txt'),
filemode='w',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
try:
len(imgsz)
except:
imgsz = (imgsz,imgsz)
# dataset set up
dataset = LoadData(source, webcam, cam_freq, imgsz, save_path, debug)
# dataset iteration and model runs
object_matching(dataset)
tt1=time.time()
print('All Done using (%.2fs)'%(tt1-tt0))
#%% input port
if __name__ == '__main__':
tt0=time.time()
# parameter input with model start up
parser = argparse.ArgumentParser()
parser.add_argument("--source", help="The data source for model input", type=str, default='./data/test08')
parser.add_argument("--img_size", help="The data size for model input", nargs='+', type=int, default=[416,416])
parser.add_argument("--tcp_port", help="tcp port", type=int, default=9191)
parser.add_argument("--tcp_ip", help="tcp ip", type=str, default='192.168.3.181')
parser.add_argument("--out_range", help="The data size for model input", nargs='+', type=float, default=[0.5,1])
parser.add_argument("--score", help="inference score threshold", type=float, default=0)
parser.add_argument("--cam_freq", help="The webcam frequency", type=int, default=1)
parser.add_argument("--cam_type", help="0: OV9714, 1: AR0135 1280X720; 2: AR0135 1280X960", type=int, default=1)
parser.add_argument("--ratio", help="ratio for distance calculate", type=float, default=0.05)
parser.add_argument("--device", help="device on which model runs", type=str,default='pc')
parser.add_argument("--UMat", help="Use opencv with openCL",action="store_true")
parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("--webcam", help="connect to real camera", action="store_true")
parser.add_argument("--BM", help="switch to BM alogrithm for depth inference", action="store_true")
parser.add_argument("--debug", help="save data source for replay", action="store_true")
parser.add_argument("--visual", help="result visualization", action="store_true")
parser.add_argument("--save_result", help="inference result save", action="store_true")
parser.add_argument("--save_path",help="path for result saving",type=str,default="runs/detect/test")
args = parser.parse_args()
# %% 创建局部函数,加快代码运行
main()
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,728 | cyins/YOLOv5-SGBM | refs/heads/master | /utils/Img2Video.py | '''
Author : Noah
Date : 20210408
function: Load data to input to the model
'''
import os,sys,logging,glob,time,rospy
from pathlib import Path
from itertools import repeat
from multiprocessing.pool import ThreadPool
from threading import Thread
# from utils.general import confirm_dir,timethis,calib_type
import cv2
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
class loadfiles:
"""
@description : load iamge or video file(s) and create a iterator
---------
@function :
-------
"""
def __init__(self, path='', img_size=640, save_path='/home/bynav/RK3399/AI_SGBM/runs/'):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
try:
files = sorted(glob.glob(os.path.join(p, '*.*')), key=lambda x: int(os.path.basename(x).split('.')[0])) # dir
except:
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist'%p) #cp3.5
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.vid_file_path = save_path
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.writer = None #debug function
if any(videos):
self.new_video(videos[0]) # new video
assert self.nf > 0, 'No images or videos found in %s. '%p \
# 'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' #cp3.5
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if isinstance(self.writer, cv2.VideoWriter):
self.writer.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %d/%d (%d/%d) %s: '%(self.count + 1,self.nf,self.frame,self.nframes,path), end='\n') #cp3.5
else:
# Read image
self.count += 1
self.cap = None
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('========================new image========================')
print('image %d/%d %s: '%(self.count, self.nf, path), end='\n') #cp3.5
# Padded resize
return path, img0, self.cap
def get_vid_dir(self,path):
self.vid_file_path = path
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
# self.file_path = '/home/bynav/AI_SGBM/runs/detect/exp/video'
if not os.path.isdir(self.vid_file_path):
os.mkdir(self.vid_file_path)
save_path = os.path.join(self.vid_file_path, str(path+'.avi'))
fourcc = 'mp4v' # output video codec
w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)/2)
h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), 1, (2560, 960))
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
if __name__ == '__main__':
# %%
save_path = '/home/bynav/0_code/RK3399/AI_SGBM/runs/detect/test/20210721/raw_video/20210721104034'
dataset = loadfiles(path='/home/bynav/0_code/RK3399/AI_SGBM/runs/detect/test/20210721/raw_video',save_path = save_path)
fourcc = 'mp4v'
# %%
# save_path = os.path.join(save_path,'test.avi')
# video_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), 10, (832, 312))
# %%
for _,img,_ in dataset:
if dataset.mode == 'image':
video_writer.write(img)
cv2.imshow('Test',img)
if cv2.waitKey(1) == ord('q'):
break
else:
save_name=os.path.join(save_path,str(dataset.frame)+'.png')
cv2.imwrite(save_name,img)
cv2.imshow('Test',img)
if cv2.waitKey(1) == ord('q'):
break | {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,729 | cyins/YOLOv5-SGBM | refs/heads/master | /utils/rknn_dataset_gen.py | '''
Author : Noah
Date : 20210412
function: generate dataset for rknn quantization
'''
import os,sys,glob,time,random,argparse
from pathlib import Path
image_format = ['.jpg','.png']
if __name__ == '__main__':
# %%
# parameter input with model start up
parser = argparse.ArgumentParser()
parser.add_argument("--source", help="The image source folder path", type=str, default='../yolo_easy/data/images/left')
parser.add_argument("--output", help="output path", type=str, default='../yolo_sololife')
parser.add_argument("--num", help="the image number for dataset generate", type=int, default=500)
# parser.add_argument("--device", help="device on which model runs", type=str,default='pc')
# parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
# parser.add_argument("--webcam", help="connect to real camera", action="store_true")
# parser.add_argument("--BM", help="switch to BM alogrithm for depth inference", action="store_true")
# parser.add_argument("--debug", help="run under debug mode", action="store_true")
# parser.add_argument("--save_path",help="path for result saving",type=str,default="runs/detect/test")
args = parser.parse_args()
print(args)
# %%
# read images in folder and do random sort
path = os.path.abspath(args.source)
assert os.path.isdir(path),'%s is not a existed folder' %path
files = next(os.walk(path))[2]
images = []
for file in files:
if os.path.splitext(file)[1] in image_format:
images.append(file)
assert len(images) >= args.num,'There are not enough images in the source folder'
random.shuffle(images)
# %%
# get the first num images path and write it in dataset.txt
assert os.path.isdir(args.output),'the output folder is not existed'
with open(os.path.join(args.output,'dataset.txt'),'w') as f:
i = 0
for image in images:
if i >= args.num:
break
file_index=os.path.join(path,image)
f.write(file_index)
f.write('\n')
i+=1
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,730 | cyins/YOLOv5-SGBM | refs/heads/master | /utils/dataset.py | '''
Author : Noah
Date : 20210408
function: Load data to input to the model
'''
import os,sys,logging,glob,time,queue
from pathlib import Path
from itertools import repeat
from multiprocessing.pool import ThreadPool
from threading import Thread
import threading
import numpy as np
from utils.general import confirm_dir,timethis,calib_type
import cv2
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
class DATASET_NAMES():
"""
@description :pre define the object class name for object detection
---------
@function : None
-------
"""
voc_names = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
coco_names = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']
masks_names = ['mask','nomask']
voc_split_names = ['bottle','chair','diningtable','person','pottedplant','sofa','tvmonitor']
coco_split_names = ['person','sports ball','bottle','cup','chair','potted plant','cell phone', 'book']
name_color = [[246, 252, 48],[38, 9, 129],[235, 204, 85],[51, 148, 36],[68, 154, 71],[77, 204, 64],[142, 183, 11],
[76, 224, 194],[62, 211, 108],[87, 182, 84],[217, 236, 51],[83, 160, 30],[136, 38, 28],[157, 71, 128],
[166, 144, 72],[142, 82, 203],[161, 110, 0],[179, 75, 107],[241, 31, 58],[188, 179, 151],[6, 141, 72],
[34, 65, 134],[248, 200, 119],[98, 14, 74],[108, 42, 45],[65, 253, 19],[41, 70, 255],[72, 54, 7],
[86, 8, 97],[106, 129, 218],[59, 147, 175],[234, 40, 195],[92, 42, 230],[236, 173, 62],[144, 190, 177],
[18, 181, 241],[247, 59, 100],[212, 181, 95],[143, 117, 204],[30, 46, 171],[86, 254, 78],[82, 124, 249],
[142, 236, 83],[193, 223, 226],[198, 202, 19],[101, 171, 24],[212, 147, 16],[55, 73, 49],[104, 91, 136],
[205, 89, 132],[42, 103, 28],[109, 60, 150],[250, 216, 158],[211, 132, 120],[188, 40, 169],[92, 12, 162],
[107, 64, 221],[149, 174, 193],[126, 54, 154],[88, 107, 46],[115, 128, 33],[73, 202, 252],[1, 224, 125],
[9, 55, 163],[66, 145, 204],[61, 248, 181],[220, 238, 17],[53, 26, 250],[162, 156, 200],[240, 117, 64],
[53, 65, 194],[17, 146, 93],[197, 199, 158],[64, 54, 35],[188, 183, 177],[206, 17, 174],[34, 155, 144],
[142, 123, 110],[211, 17, 89],[54, 38, 67]]
class pipeline:
"""
@description : a data pipeline shared by multiple threads
---------
@function : send images, timestamps and valid signal
-------
"""
def __init__(self,width=2560,height=960):
self.timestamp=0.
self.frame=0
self.valid = False
self.lock = threading.Lock()
def put(self,timestamp,img0,frame):
with self.lock:
self.timestamp = timestamp
self.frame = frame
self.image = img0
self.valid = True
def get(self):
with self.lock:
timestamp=self.timestamp
img0=self.image
frame = self.frame
valid = self.valid
self.valid = False
return timestamp,img0,frame,valid
class loadfiles:
"""
@description : load iamge or video file(s) and create a iterator
---------
@function :
-------
"""
def __init__(self, path='', img_size=640, save_path=''):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
try:
files = sorted(glob.glob(os.path.join(p, '*.*')), key=lambda x: int(os.path.basename(x).split('.')[0])) # dir
except:
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist'%p) #cp3.5
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.vid_file_path = os.path.join(save_path,'video')
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.writer = None #debug function
self.file_name = 'Orign'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'Supported formats are:\nimages: %s\nvideos: %s'%(img_formats,vid_formats) #cp3.5
# 'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' #cp3.6
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if isinstance(self.writer, cv2.VideoWriter):
self.writer.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.file_name = path+'_'+self.frame
self.frame += 1
# print('video %d/%d (%d/%d) %s: '%(self.count + 1,self.nf,self.frame,self.nframes,path), end='') #cp3.5
else:
# Read image
self.count += 1
self.mode = 'image'
img0 = cv2.imread(path) # BGR
self.file_name = os.path.split(path)[-1]
assert img0 is not None, 'Image Not Found ' + path
# print('========================new image========================')
# print('image %d/%d %s: '%(self.count, self.nf, path), end='\n') #cp3.5
# Padded resize
TimeStamp = str(time.time()).split('.')
if len(TimeStamp[1])<9:
for i in range(9-len(TimeStamp[1])):
TimeStamp[1] += '0'
h = img0.shape[0]
w = img0.shape[1]
w1 = round(w/2)
img0_left = img0[:,:w1,:]
img0_right = img0[:,w1:,:]
return path, img0_left, img0_right, (h,w1), TimeStamp, self.cap
def get_vid_dir(self,path):
self.vid_file_path = path
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
# self.file_path = '/home/bynav/AI_SGBM/runs/detect/exp/video'
if not os.path.isdir(self.vid_file_path):
os.mkdir(self.vid_file_path)
save_path = os.path.join(self.vid_file_path, str(path.split('/')[-1].split('.')[0])+'.avi')
fps = self.cap.get(cv2.CAP_PROP_FPS)
fourcc = 'mp4v' # output video codec
w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)/2)
h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class loadcam:
"""
@description : load real-time webcam data and create a iterator
---------
@function :
---------
"""
# @timethis
def __init__(self, pipe='4', cam_freq=5, img_size=640, save_path='', debug=False, cam_mode=1):
self.img_size = img_size
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.debug = debug
self.pipe = pipe
self.time = 0
self.writer = None
self.cap = cv2.VideoCapture(pipe) # video capture object
if cam_mode == calib_type.AR0135_1280_960.value or cam_mode == calib_type.AR0135_416_416.value or cam_mode == calib_type.AR0135_640_640.value:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,2560)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,960) #AR0135
elif cam_mode == calib_type.AR0135_640_480.value:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,1280)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,480) #AR0135
else:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,2560)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,720) #OV9714
self.pipeline = pipeline(int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.cam_freq = cam_freq
self.fps = self.cap.get(cv2.CAP_PROP_FPS)
self.size = (self.cap.get(cv2.CAP_PROP_FRAME_WIDTH),self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# self.queue = queue.LifoQueue(maxsize=self.fps)
bufsize = 1
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, bufsize) # set buffer size
print('Camera run under %s and %s fps'%(str(self.size),str(self.fps)))
self.vid_file_path = confirm_dir(save_path,'webcam')
self.img_file_path = confirm_dir(save_path,'webimg')
self.new_video('test.avi')
self.mode = 'webcam'
self.count = 0
self.frame = 0
self.real_frame = 0
self.valid = False
self.start = False
self.thread = Thread(target=self._update,args=[],daemon=True)
self.thread.start()
def _update(self):
while True:
self.real_frame += 1
TimeStamp = time.time()-0.044 #cv2.cap.read() average latency is 290ms
# Read frame
if self.pipe in [0,1,2,3,4,5]: # local camera
ret_val, img0 = self.cap.read()
# self.valid = True
# img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
assert ret_val, 'Camera Error %d'%self.pipe #cp3.5
self.pipeline.put(TimeStamp,img0,self.real_frame)
self.start = True
def __iter__(self):
return self
# @timethis
def __next__(self):
runtime = time.time() - self.time
if runtime < 1/self.cam_freq:
time.sleep(round(1/self.cam_freq-runtime,3))
while True:
if self.start:
TimeStamp,img0,self.frame,self.valid = self.pipeline.get()
if self.valid:
break
# print('========================= webcam %d ======================='%self.frame,end='\r') #cp3.5
TimeStamp = str(TimeStamp).split('.')
if len(TimeStamp[1])<9:
for i in range(9-len(TimeStamp[1])):
TimeStamp[1] += '0'
w = img0.shape[1]
w1 = int(w/2)
if self.debug:
save_file = os.path.join(self.img_file_path,(str(self.frame)+'.bmp'))
cv2.imwrite(save_file,img0)
imgl = img0[:,:w1,:]
imgr = img0[:,w1:,:]
self.count += 1
img_path = 'webcam.jpg'
self.time = time.time()
return img_path, imgl, imgr, None, TimeStamp, None
def get_vid_dir(self,path):
self.vid_file_path = path
def new_video(self, path):
if isinstance(self.writer, cv2.VideoWriter):
self.writer.release()
fps = self.cap.get(cv2.CAP_PROP_FPS)
fourcc = 'mp4v' # output video codec
w = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)/2)
h = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
save_path = os.path.join(self.vid_file_path, path)
self.writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
def __len__(self):
return 0
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,731 | cyins/YOLOv5-SGBM | refs/heads/master | /raw_record.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description: : 录数据
@Date :2021/05/31 15:05:22
@Author :Yyc
@version :1.0
'''
import os,logging,sys,argparse,time,math,queue
from datetime import datetime
import cv2
import numpy as np
import matplotlib.pyplot as plt
from threading import Thread
from utils.img_preprocess import Image_Rectification
from utils.Stereo_Application import Stereo_Matching,disparity_centre
from utils.dataset import DATASET_NAMES,loadfiles,loadcam
from utils.stereoconfig import stereoCamera
from utils.general import confirm_dir,timethis,timeblock,socket_client,calib_type,camera_mode,plot_one_box
#%%
def main():
dataset = loadcam(pipe=args.source,cam_freq=args.fps,img_size=args.img_size,save_path=args.save_path,debug=args.debug,cam_mode=args.cam_type)
file_path = confirm_dir(args.save_path,datetime.now().strftime("%Y%m%d"))
# file_path = confirm_dir(file_path,datetime.now().strftime("%Y%m%d%H%M%S"))
vid_path = confirm_dir(file_path,'raw_video')
file_name = os.path.join(vid_path,datetime.now().strftime("%Y%m%d%H%M%S")+'.avi')
file_path = confirm_dir(file_path,'raw_video')
fourcc = 'mp4v' # output video codec
vid_writer = cv2.VideoWriter(file_name,cv2.VideoWriter_fourcc(*fourcc), dataset.fps, (2560, 960))
with open(os.path.join(file_path,datetime.now().strftime("%Y%m%d%H%M%S")+'ts.txt'),'w') as f:
for _,img_left,img_right,_,TimeStamp,_ in dataset:
with timeblock('RPOCESS'):
frame = cv2.hconcat([img_left,img_right])
xyxy = [0,0,1,1]
box_label = str(dataset.count)+'('+str(dataset.frame)+')'+TimeStamp[0]+'.'+TimeStamp[1]
plot_one_box(xyxy, frame, label=box_label, color=[137,205,36], line_thickness=5)
cv2.imshow('test',frame)
if cv2.waitKey(1) == ord('q'):
break
vid_writer.write(frame)
line = '$TIMESTAMP,'+str(dataset.count)+'('+str(dataset.frame)+')'+':'+str(TimeStamp)+'*FC'+'\n'
f.write(line)
#%% input port
if __name__ == '__main__':
tt0=time.time()
# parameter input with model start up
parser = argparse.ArgumentParser()
parser.add_argument("--source", help="The data source for model input", type=str, default='./data/test08')
parser.add_argument("--img_size", help="The data size for model input", nargs='+', type=int, default=[416,416])
parser.add_argument("--tcp_port", help="tcp port", type=int, default=9191)
parser.add_argument("--tcp_ip", help="tcp ip", type=str, default='192.168.3.181')
parser.add_argument("--out_range", help="The data size for model input", nargs='+', type=float, default=[0.5,1])
parser.add_argument("--sm_lambda", help="Stereo matching post filter parameter lambda", type=float, default=8000)
parser.add_argument("--sm_sigma", help="Stereo matching post filter parameter sigmacolor", type=float, default=1.0)
parser.add_argument("--sm_UniRa", help="Stereo matching post filter parameter UniquenessRatio", type=int, default=40)
parser.add_argument("--score", help="inference score threshold", type=float, default=0)
parser.add_argument("--fps", help="The webcam frequency", type=int, default=1)
parser.add_argument("--cam_type", help="0: OV9714, 1: AR0135 1280X720; 2: AR0135 1280X960; 3:AR0135 416X416; 4:AR0135 640X640; 5:AR0135 640X480", type=int, default=5)
parser.add_argument("--ratio", help="ratio for distance calculate", type=float, default=0.05)
parser.add_argument("--device", help="device on which model runs", type=str,default='pc')
parser.add_argument("--UMat", help="Use opencv with openCL",action="store_true")
parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("--webcam", help="connect to real camera", action="store_true")
parser.add_argument("--BM", help="switch to BM alogrithm for depth inference", action="store_true")
parser.add_argument("--debug", help="save data source for replay", action="store_true")
parser.add_argument("--visual", help="result visualization", action="store_true")
parser.add_argument("--save_result", help="inference result save", action="store_true")
parser.add_argument("--save_path",help="path for result saving",type=str,default="runs/detect/test")
args = parser.parse_args()
# %% 创建局部函数,加快代码运行
main() | {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,732 | cyins/YOLOv5-SGBM | refs/heads/master | /detect.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description: main function for YOLOv5 + SGBM model running on RK3399pro platform
@Date :2021/04/23 19:56:56
@Author :Noah
@version :version : 21052005
'''
import os,logging,sys,argparse,time,math,queue
from datetime import date,datetime
import cv2
import numpy as np
import matplotlib.pyplot as plt
from threading import Thread
from utils.rknn_detect_yolov5 import RKNNDetector
from utils.img_preprocess import Image_Rectification
from utils.Stereo_Application import Stereo_Matching,disparity_centre,reproject_3dcloud
from utils.dataset import DATASET_NAMES,loadfiles,loadcam
from utils.stereoconfig import stereoCamera
from utils.general import confirm_dir,timethis,timeblock,socket_client,calib_type,camera_mode,matching_points_gen,plot_one_box
# %% initial environment
def platform_init(imgsz=640, tcp_address=('192.168.3.181',9191), save_path=''):
"""
@description : initialize AI, SGBM, socket connect and stereo camera calibration parameter
---------
@param :
bm_model: type bool. when True, stereo matching use BM model, otherwise, stereo matching use SGBM model
imgsz: type int, AI model used for image resize
device: type string, reserved
tcp_address: type tuple (address,port num),tcp connection address
-------
@Returns :
AI: type class RKNNDetector, AI model handler
SM: type class BM or SGBM, stereo matching model handler
config: class stereoCamera, stereo camera calibration parameter
soc_client: class socket_client, tcp connection handler
-------
"""
#init stereo matching model
cam_mode = camera_mode(args.cam_type)
soc_client=socket_client(address=tcp_address)
if Stereo_Matching.count != 0:
del SM
SM = Stereo_Matching(cam_mode.mode, args.BM, args.filter,\
args.sm_lambda, args.sm_sigma, args.sm_UniRa,\
args.sm_numdi, args.sm_mindi, args.sm_block, args.sm_tt,\
args.sm_pfc, args.sm_pfs, args.sm_pft,\
args.sm_sws, args.sm_sr, args.sm_d12md, save_path)
#init AI model
MASKS = [[0,1,2],[3,4,5],[6,7,8]]
ANCHORS = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]]
CLASSES = DATASET_NAMES.coco_split_names
if RKNNDetector.count != 0:
del AI
AI = RKNNDetector(model='weights/best_416_coco_split_50.rknn',wh=imgsz,masks=MASKS,anchors=ANCHORS,names=CLASSES)
return AI,SM,soc_client,cam_mode
# %% load data as pipeline
def LoadData(source='', webcam=False, cam_freq=5, imgsz=(640,640), save_path='',debug=False, cam_mode =5):
"""
@description : load data source ,and prepare for iteration
---------
@param :
source: type string, data folder/file name, or webcam ID
webcam: type bool, if true, the data source is a real-time webcam, otherwise, a image/images or a video/videos
cam_freq: type int, only effective when webcam is true, set the frame get from the real-time webcam per second
imgsz: type tuple (int,int), reserved
save_path: type string, the save_path directory for real-time webcam images
-------
@Returns :
dataset: type class loadcam or loadfiles, a iterator for images stream source
-------
"""
if webcam:
dataset = loadcam(source, cam_freq, imgsz, save_path, debug, cam_mode.mode.value)
config = stereoCamera(mode=cam_mode.mode.value,height=cam_mode.size[1],width=cam_mode.size[0])
else:
dataset = loadfiles(source, imgsz, save_path)
config = stereoCamera(mode=cam_mode.mode.value,height=cam_mode.size[1],width=cam_mode.size[0])
return dataset,config
# %%
def object_matching(ai_model,sm_model,camera_config,dataset,ratio,imgsz,fps,debug,UMat,soc_client,visual,cam_mode,filter, save_path):
"""
@description : a combination of data iteration, object predict ,stereo matching and data transmission process
---------
@param :
ai_model: type class RKNNDetector, AI model handler
sm_model: type BM or SGBM model, stereo matching model handler
camera_config: type class stereoCamera, stereo camera calibration parameters
dataset: type class loadfiles or loadcam, data iterator
ratio: type float, one of the control parameters of disparity to depth process
imgsz: type tuple (int,int), used by Image_Rectification function and socket function to resize images for SGBM to the same size as image for object detection
fps: type int, reserved
debug: type bool, if true, add post-process image to the packet for socket transimission and save original image for replay on PC
UMat: type bool, if true, use GPU accelarating SGBM and image rectification process
soc_client: tpye class socket_client, the tcp transfer handler
visual: tpye bool, if true send the result image through TCP with coordinates, otherwise, only coordinates
cam_mode: tpye int, the camera configuration number
filter: tpye bool, if true, the disparity map will be a dense matrix smoothing by a post WLS filter, otherwise, disparity map is a sparse matrix
save_path: str, save path for result
-------
@Returns : None
-------
"""
disarity_queue = queue.Queue(maxsize=1)
pred_queue = queue.Queue(maxsize=1)
fx = camera_config.cam_matrix_left[0,0]
fy = camera_config.cam_matrix_left[1,1]
v = camera_config.cam_matrix_left[0,2]
u = camera_config.cam_matrix_left[1,2]
t1 = time.time()
real_time = time.asctime()
for _,img_left,img_right,_,TimeStamp,_ in dataset:
t0 = time.time()
distance = []
distance.append(TimeStamp)
if dataset.mode == 'image' or dataset.mode == 'webcam':
frame = str(dataset.count)
else:
frame = str(dataset.count)+'-'+str(dataset.frame)
img_raw, img_ai, img_left, img_right, gain, padding=Image_Rectification(camera_config, img_left, img_right, imgsz=imgsz, debug=debug, UMat=UMat, cam_mode=cam_mode)
# with timeblock('process image:'):
sm_t = Thread(target=sm_model.run,args=(img_left,img_right,camera_config.Q,disarity_queue,UMat,filter))
ai_t = Thread(target=ai_model.predict,args=(img_raw, img_ai, gain, padding, pred_queue))
sm_t.start()
ai_t.start()
ai_t.join()
sm_t.join()
# %%
"""
将一张图片的预测框逐条分开,并且还原到原始图像尺寸
"""
disparity,color_3d = disarity_queue.get()
# color_depth = color_3d[:,:,2]
# color_xy = color_3d[:,:,:2]
preds = pred_queue.get()
labels = []
coords = []
scores = []
raw_coords = []
if preds[0] != []:
labels = preds[0]
scores = preds[1]
coords = preds[2]
raw_coords = preds[3]
index = 0
for label,score,box,raw_box in zip(labels,scores,coords,raw_coords):
if score >= args.score:
pred = []
temp_dis = disparity_centre(raw_box, ratio, disparity, color_3d[:,:,2], camera_config.focal_length, camera_config.baseline, camera_config.pixel_size, args.sm_mindi)
if (temp_dis >= args.out_range[0]*1000) & (temp_dis <= args.out_range[1]*1000):
distance.append([label,\
float((raw_box[0]-v)*temp_dis/fx),\
float((raw_box[3]-u)*temp_dis/fy),\
float((raw_box[2]-v)*temp_dis/fx),\
float((raw_box[3]-u)*temp_dis/fy),\
float(temp_dis)]) # two bottom corners and distance to focal point
# distance.append([label,\
# float(color_3d[raw_box[3]-1,raw_box[0]-1,0]),\
# float(color_3d[raw_box[3]-1,raw_box[0]-1,1]),\
# float(color_3d[raw_box[3]-1,raw_box[2]-1,0]),\
# float(color_3d[raw_box[3]-1,raw_box[2]-1,1]),\
# float(temp_dis)]) # two bottom corners and distance to focal point
# %%
"""
将最终深度结果画到图像里
"""
xyxy = [raw_box[0],raw_box[1],raw_box[2],raw_box[3]]
box_label = str(round(temp_dis,2)) #cp3.5
plot_one_box(xyxy, img_ai, label=box_label, color=DATASET_NAMES.name_color[DATASET_NAMES.coco_split_names.index(label)], line_thickness=1)
index += 1
xyxy = [0,padding[0],1,padding[0]+1]
box_label = str(dataset.count)+'('+str(dataset.frame)+')'+str(TimeStamp[0]+'.'+TimeStamp[1])
plot_one_box(xyxy, img_ai, label=box_label, color=[137,205,36], line_thickness=1)
# %%% send result
soc_client.send(img_ai, disparity, padding, distance, frame, imgsz, 0.5, visual)
# %%
"""
save result to local
"""
# with timeblock('write file'):
if args.save_result:
txt_path = confirm_dir(save_path,'txt')
# %%
"""
if dataset.mode == 'image':
file_path = confirm_dir(save_path,'images')
save_path = os.path.join(file_path,str(dataset.count)+'.bmp')
cv2.imwrite(save_path, img_ai)
elif dataset.mode == 'video' or dataset.mode == 'webcam':
file_path = os.path.join(save_path,dataset.mode)
dataset.get_vid_dir(file_path)
dataset.writer.write(img_ai)
# %%
with open(os.path.join(txt_path,'result.txt'),'w') as f:
f.write('-----------------'+real_time+str(frame)+'-----------------\n')
for pred in distance[1:]:
line = real_time+': '+str(pred[0])+','+str(pred[1])+','+str(pred[2])+','+str(pred[3])+','+str(pred[4])+','+str(pred[5])+'\n'
f.write(line)
"""
"""
save boxes
"""
# %%
# with open(os.path.join(txt_path,str(dataset.count)+'.txt'),'a+') as f:
# for raw_box in raw_coords:
# line = '['+str(raw_box[0])+','+str(raw_box[1])+']'+','+'['+str(raw_box[2])+','+str(raw_box[3])+']'+'\n'
# f.write(line)
# %%
"""
save timestamp
"""
with open(os.path.join(txt_path,'time_stamp.txt'),'+a') as f:
line = '$TIMESTAMP,'+str(dataset.count)+'('+str(dataset.frame)+')'+':'+str(TimeStamp)+'-'+str(time.time()-t0)+'\n'
f.write(line)
# %%
if dataset.mode == 'webcam':
print('frame: %s(%s) Done. (%.3fs);Process: use (%.3fs)'%(frame,dataset.frame,(time.time()-t1),time.time()-t0),end='\r') #cp3.5
else:
print('frame: %s Done. (%.3fs);Process: use (%.3fs)'%(frame,(time.time()-t1),time.time()-t0),end='\r') #cp3.5
t1=time.time()
#%% main
def main():
"""
@description : used to convert most gloable parameters to partial parameters to accelerate
---------
@param : None
-------
@Returns : None
-------
"""
print(args)
source, device, bm_model, sm_lambda, sm_sigma, sm_unira, sm_numdisparity, sm_mindisparity, sm_block, sm_TextureThreshold, sm_filter,\
imgsz, webcam, fps, ratio, debug, visual, UMat, tcp_port, tcp_ip= \
args.source, args.device, args.BM, args.sm_lambda, args.sm_sigma, args.sm_UniRa,\
args.sm_numdi, args.sm_mindi, args.sm_block, args.sm_tt,\
args.filter, args.img_size, args.webcam, args.fps, args.ratio, args.debug, args.visual,\
args.UMat, args.tcp_port, args.tcp_ip
save_path = confirm_dir(args.save_path,datetime.now().strftime("%Y%m%d"))
save_path = confirm_dir(save_path,datetime.now().strftime("%Y%m%d%H%M%S"))
if args.verbose:
logging.basicConfig(filename=os.path.join(save_path,'log.txt'),
filemode='w',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
try:
len(imgsz)
except:
imgsz = (imgsz,imgsz)
# platform init
AI, SM, soc_client,cam_mode= platform_init(imgsz, (tcp_ip,tcp_port), save_path)
# dataset set up
dataset, camera_config = LoadData(source, webcam, fps, imgsz, save_path, debug,cam_mode)
# dataset iteration and model runs
object_matching(AI, SM, camera_config, dataset, ratio, imgsz, fps, debug, UMat, soc_client, visual, cam_mode.mode.value, sm_filter, save_path)
tt1=time.time()
print('All Done using (%.2fs)'%(tt1-tt0))
#%% input port
if __name__ == '__main__':
tt0=time.time()
# parameter input with model start up
parser = argparse.ArgumentParser()
parser.add_argument("--source", help="The data source for model input", type=str, default='0')
parser.add_argument("--img_size", help="The data size for model input", nargs='+', type=int, default=[416,416])
parser.add_argument("--tcp_port", help="tcp port", type=int, default=9191)
parser.add_argument("--tcp_ip", help="tcp ip", type=str, default='192.168.3.181')
parser.add_argument("--out_range", help="The data size for model input", nargs='+', type=float, default=[0.3,1])
parser.add_argument("--sm_lambda", help="Stereo matching post filter parameter lambda", type=float, default=8000)
parser.add_argument("--sm_sigma", help="Stereo matching post filter parameter sigmacolor", type=float, default=2.0)
parser.add_argument("--sm_UniRa", help="Stereo matching post filter parameter UniquenessRatio", type=int, default=5)
parser.add_argument("--sm_numdi", help="Stereo matching max number disparity", type=int, default=64)
parser.add_argument("--sm_mindi", help="Stereo matching min number disparity", type=int, default=-5)
parser.add_argument("--sm_block", help="Stereo matching blocksize", type=int, default=9)
parser.add_argument("--sm_tt", help="Stereo matching blocksize", type=int, default=5)
parser.add_argument("--sm_pfc", help="Stereo matching PreFilterCap", type=int, default=63)
parser.add_argument("--sm_pfs", help="Stereo matching PreFilterSize", type=int, default=9)
parser.add_argument("--sm_pft", help="Stereo matching PreFilterType", type=int, default=1)
parser.add_argument("--sm_sws", help="Stereo matching SpeckleWindowSize", type=int, default=50)
parser.add_argument("--sm_sr", help="Stereo matching SpeckleRange", type=int, default=2)
parser.add_argument("--sm_d12md", help="Stereo matching Disp12MaxDiff", type=int, default=1)
parser.add_argument("--score", help="inference score threshold", type=float, default=0)
parser.add_argument("--fps", help="The webcam frequency", type=int, default=4)
parser.add_argument("--cam_type", help="0: OV9714, 1: AR0135 1280X720; 2: AR0135 1280X960; 3:AR0135 416X416; 4:AR0135 640X640; 5:AR0135 640X480; 6:MIDDLEBURY 416X360", type=int, default=5)
parser.add_argument("--ratio", help="ratio for distance calculate", type=float, default=0.05)
parser.add_argument("--device", help="device on which model runs", type=str,default='pc')
parser.add_argument("--UMat", help="Use opencv with openCL",action="store_true")
parser.add_argument("--filter", help="Enable post WLS filter",action="store_true")
parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("--webcam", help="connect to real camera", action="store_true")
parser.add_argument("--BM", help="switch to BM alogrithm for depth inference", action="store_true")
parser.add_argument("--debug", help="save data source for replay", action="store_true")
parser.add_argument("--visual", help="result visualization", action="store_true")
parser.add_argument("--save_result", help="inference result save", action="store_true")
parser.add_argument("--save_path",help="path for result saving",type=str,default="runs/detect/test/")
args = parser.parse_args()
# %% 创建局部函数,加快代码运行
main()
# %%
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,733 | cyins/YOLOv5-SGBM | refs/heads/master | /listener.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description: :
@Date :2021/04/23 21:50:37
@Author :xia
@version :1.0
'''
import rospy,cv2,socket,numpy,time
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import PolygonStamped
from multiprocessing import Process
def Axiscallback(data):
rospy.loginfo(rospy.get_caller_id() + 'I heard %s', data)
def Imgcallback(data):
# print('1')
bridge = CvBridge()
cv2_img = bridge.imgmsg_to_cv2(data)
cv2.imshow('ROS Image',cv2_img)
cv2.waitKey(1)
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('listener', anonymous=True)
rospy.Subscriber('Avoid_Object_Position', PolygonStamped, Axiscallback)
rospy.Subscriber('Postproc_Images', Image, Imgcallback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,734 | cyins/YOLOv5-SGBM | refs/heads/master | /sm_test.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Description: : Stereo Matching test
@Date :2021/05/28 09:47:18
@Author :Yyc
@version :1.0
'''
import os,logging,sys,argparse,time,math,queue
from datetime import date
today = date.today()
import cv2
import numpy as np
import matplotlib.pyplot as plt
from threading import Thread
from utils.img_preprocess import Image_Rectification
from utils.Stereo_Application import Stereo_Matching,reproject_3dcloud
from utils.dataset import DATASET_NAMES,loadfiles,loadcam
from utils.stereoconfig import stereoCamera
from utils.general import confirm_dir,timethis,timeblock,socket_client,calib_type,camera_mode,matching_points_gen
def sm_run():
#init stereo matching model
cam_mode = camera_mode(args.cam_type)
path_name = confirm_dir(args.save_path,today.strftime("%Y%m%d"))
if args.BM:
path_name = confirm_dir(path_name,'stereoBM'+\
'_filter_'+str(args.filter)+\
'_'+str(args.sm_lambda)+\
'_'+str(args.sm_sigma)+\
'_'+str(args.sm_UniRa)+\
'_'+str(args.sm_numdi)+\
'_'+str(args.sm_mindi)+\
'_'+str(args.sm_block)+\
'_'+str(args.sm_tt)+\
'_'+str(args.sm_pfc)+\
'_'+str(args.sm_pfs)+\
'_'+str(args.sm_pft)+\
'_'+str(args.sm_sws)+\
'_'+str(args.sm_sr)+\
'_'+str(args.sm_d12md)+\
'_'+str(args.cam_type))
else:
path_name = confirm_dir(path_name,'stereoSGBM'+\
'_filter_'+str(args.filter)+\
'_'+str(args.sm_lambda)+\
'_'+str(args.sm_sigma)+\
'_'+str(args.sm_UniRa)+\
'_'+str(args.sm_numdi)+\
'_'+str(args.sm_mindi)+\
'_'+str(args.sm_block)+\
'_'+str(args.sm_tt)+\
'_'+str(args.sm_pfc)+\
'_'+str(args.sm_sws)+\
'_'+str(args.sm_sr)+\
'_'+str(args.sm_d12md)+\
'_'+str(args.cam_type))
if Stereo_Matching.count != 0:
del sm_model
sm_model = Stereo_Matching(cam_mode.mode, args.BM, args.filter,\
args.sm_lambda, args.sm_sigma, args.sm_UniRa,\
args.sm_numdi, args.sm_mindi, args.sm_block, args.sm_tt,\
args.sm_pfc, args.sm_pfs, args.sm_pft,\
args.sm_sws, args.sm_sr, args.sm_d12md, path_name)
#data source configuration
if args.webcam:
dataset = loadcam(args.source, args.fps, args.img_size, args.save_path, args.debug, cam_mode.mode.value)
camera_config = stereoCamera(mode=cam_mode.mode.value,height=cam_mode.size[1],width=cam_mode.size[0])
else:
dataset = loadfiles(args.source, args.img_size, args.save_path)
camera_config = stereoCamera(mode=cam_mode.mode.value,height=cam_mode.size[1],width=cam_mode.size[0])
#image calibration
disparity_queue = queue.Queue(maxsize=1)
for _,img_left,img_right,_,TimeStamp,_ in dataset:
# with timeblock('process'):
if dataset.mode == 'image' or dataset.mode == 'webcam':
frame = str(dataset.count)
else:
frame = str(dataset.count)+'-'+str(dataset.frame)
img_raw, img_ai, img_left, img_right, gain, padding=Image_Rectification(camera_config, img_left, img_right, imgsz=args.img_size, debug=True, UMat=args.UMat, cam_mode=cam_mode.mode.value)
sm_model.run(img_left,img_right,camera_config.Q,disparity_queue,args.UMat,args.filter)
disparity,color_3d = disparity_queue.get()
# print('disparity max: %.2f;min: %.2f'%(np.amax(disparity),np.amin(disparity)),end='\r')
points = []
try:
with open(os.path.join('runs/detect/test/txt',str(dataset.count)+'.txt'),'r') as f:
files = f.readlines()
for point in files:
points.append([int(point.split(',')[1].split(']')[0]),int(point.split(',')[0][1:])])
except Exception as e:
print(e,end='\r')
stereo_merge = matching_points_gen(disparity,img_left,img_right,points,[0,0])
if padding != 0:
stereo_merge = np.ravel(stereo_merge)
stereo_merge = stereo_merge[padding[0]*args.img_size[0]*2:(-(padding[0])*args.img_size[0]*2)]
stereo_merge = np.reshape(stereo_merge,(-1,832))
img_ai = np.ravel(img_ai)
img_ai = img_ai[padding[0]*args.img_size[0]*3:(-(padding[0])*args.img_size[0]*3)]
img_ai = np.reshape(img_ai,(-1,416,3))
disparity = np.ravel(disparity)
disparity = disparity[padding[0]*args.img_size[0]:(-(padding[0])*args.img_size[0])]
disparity = np.reshape(disparity,(-1,416))
# print('disparity max: %.2f;min: %.2f'%(np.amax(disparity),np.amin(disparity)),end='\r')
minVal = np.amin(disparity)
maxVal = np.amax(disparity)
color_3d = np.ravel(color_3d[:,:,2])
color_3d = np.divide(color_3d,1000)
color_3d = color_3d[padding[0]*args.img_size[0]:(-(padding[0])*args.img_size[0])]
color_3d = np.reshape(color_3d,(-1,416))
# print('distance max: %.2f;min: %.2f'%(np.amax(color_3d),np.amin(color_3d)),end='\r')
# minVal = np.amin(color_3d)
# maxVal = np.amax(color_3d)
# reproject_3dcloud(img_ai,disparity,camera_config.focal_length,camera_config.baseline)
disparity_color = cv2.applyColorMap(cv2.convertScaleAbs(disparity, alpha=255.0/(maxVal-minVal),beta=-minVal*255.0/(maxVal-minVal)), cv2.COLORMAP_JET)
color_merge = cv2.hconcat([disparity_color,img_ai])
cv2.imshow('color',color_merge)
cv2.imshow('object matching',stereo_merge)
depth_path = confirm_dir(path_name,'depth')
file_name = os.path.join(depth_path,dataset.file_name)
cv2.imwrite(file_name,color_merge)
matching_path = confirm_dir(path_name,'matching')
file_name = os.path.join(matching_path,dataset.file_name)
cv2.imwrite(file_name,stereo_merge)
if cv2.waitKey(1) == ord('q'):
break
time.sleep(2)
if __name__ == '__main__':
tt0=time.time()
# parameter input with model start up
parser = argparse.ArgumentParser()
parser.add_argument("--source", help="The data source for model input", type=str, default='0')
parser.add_argument("--img_size", help="The data size for model input", nargs='+', type=int, default=[416,416])
parser.add_argument("--tcp_port", help="tcp port", type=int, default=9191)
parser.add_argument("--tcp_ip", help="tcp ip", type=str, default='192.168.3.181')
parser.add_argument("--out_range", help="The data size for model input", nargs='+', type=float, default=[0.5,1])
parser.add_argument("--sm_lambda", help="Stereo matching post filter parameter lambda", type=float, default=8000)
parser.add_argument("--sm_sigma", help="Stereo matching post filter parameter sigmacolor", type=float, default=2.0)
parser.add_argument("--sm_UniRa", help="Stereo matching post filter parameter UniquenessRatio", type=int, default=5)
parser.add_argument("--sm_numdi", help="Stereo matching max number disparity", type=int, default=64)
parser.add_argument("--sm_mindi", help="Stereo matching min number disparity", type=int, default=-5)
parser.add_argument("--sm_block", help="Stereo matching blocksize", type=int, default=9)
parser.add_argument("--sm_tt", help="Stereo matching blocksize", type=int, default=5)
parser.add_argument("--sm_pfc", help="Stereo matching PreFilterCap", type=int, default=63)
parser.add_argument("--sm_pfs", help="Stereo matching PreFilterSize", type=int, default=9)
parser.add_argument("--sm_pft", help="Stereo matching PreFilterType", type=int, default=1)
parser.add_argument("--sm_sws", help="Stereo matching SpeckleWindowSize", type=int, default=50)
parser.add_argument("--sm_sr", help="Stereo matching SpeckleRange", type=int, default=2)
parser.add_argument("--sm_d12md", help="Stereo matching Disp12MaxDiff", type=int, default=1)
parser.add_argument("--score", help="inference score threshold", type=float, default=0)
parser.add_argument("--fps", help="The webcam frequency", type=int, default=1)
parser.add_argument("--cam_type", help="0: OV9714, 1: AR0135 1280X720; 2: AR0135 1280X960; 3:AR0135 416X416; 4:AR0135 640X640; 5:AR0135 640X480; 6:MIDDLEBURY 416X360", type=int, default=5)
parser.add_argument("--ratio", help="ratio for distance calculate", type=float, default=0.05)
parser.add_argument("--device", help="device on which model runs", type=str,default='pc')
parser.add_argument("--UMat", help="Use opencv with openCL",action="store_true")
parser.add_argument("--filter", help="Enable post WLS filter",action="store_true")
parser.add_argument("--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("--webcam", help="connect to real camera", action="store_true")
parser.add_argument("--BM", help="switch to BM alogrithm for depth inference", action="store_true")
parser.add_argument("--debug", help="save data source for replay", action="store_true")
parser.add_argument("--visual", help="result visualization", action="store_true")
parser.add_argument("--save_result", help="inference result save", action="store_true")
parser.add_argument("--save_path",help="path for result saving",type=str,default="runs/detect/test/middlebury")
args = parser.parse_args()
print(args)
# %% 创建局部函数,加快代码运行
sm_run() | {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,735 | cyins/YOLOv5-SGBM | refs/heads/master | /utils/general.py | '''
Author : Noah
Date : 20210408
function: common-basic function database
'''
import os,sys,logging,cv2,time,functools,socket
from pathlib import Path
import numpy as np
from contextlib import contextmanager
from enum import Enum
def timethis(func):
"""
@description : a timecounter wrapper for a function
---------
@param :
-------
@Returns : print the runtime and name of the function to the terminal
-------
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time.perf_counter()
r = func(*args, **kwargs)
end = time.perf_counter()
print('{}.{} : {:.3f}'.format(func.__module__, func.__name__, end - start))
return r
return wrapper
@contextmanager
def timeblock(label):
"""
@description : a timecounter wrapper for a section of code
---------
@param :
-------
@Returns : print the runtime of a section of code to the terminal
-------
"""
start = time.perf_counter()
try:
yield
finally:
end = time.perf_counter()
print('{} : {}'.format(label, end - start),end='\r')
def get_new_size(img, scale):
if type(img) != cv2.UMat:
return tuple(map(int, np.array(img.shape[:2][::-1]) * scale))
else:
return tuple(map(int, np.array(img.get().shape[:2][::-1]) * scale))
def get_max_scale(img, max_w, max_h):
if type(img) == cv2.UMat:
h, w = img.get().shape[:2]
else:
h, w = img.shape[:2]
scale = min(max_w / w, max_h / h, 1)
return scale
class AutoScale:
def __init__(self, img, max_w, max_h):
self._src_img = img
self.scale = get_max_scale(img, max_w, max_h)
self._new_size = get_new_size(img, self.scale)
self.__new_img = None
@property
def size(self):
return self._new_size
@property
def new_img(self):
if self.__new_img is None:
self.__new_img = cv2.resize(self._src_img, self._new_size,interpolation=cv2.INTER_AREA)
return self.__new_img
# @timethis
def letterbox(img, new_wh=(416, 416), color=(114, 114, 114)):
a = AutoScale(img, *new_wh)
new_img = a.new_img
if type(new_img) == cv2.UMat:
h, w = new_img.get().shape[:2]
else:
h, w = new_img.shape[:2]
padding = [(new_wh[1] - h)-int((new_wh[1] - h)/2), int((new_wh[1] - h)/2), (new_wh[0] - w)-int((new_wh[0] - w)/2), int((new_wh[0] - w)/2)]
# new_img = cv2.copyMakeBorder(new_img, 0, new_wh[1] - h, 0, new_wh[0] - w, cv2.BORDER_CONSTANT, value=color)
new_img = cv2.copyMakeBorder(new_img, padding[0], padding[1], padding[2], padding[3], cv2.BORDER_CONSTANT, value=color)
# logging.debug(f'image padding: {padding}') #cp3.6
logging.debug('image padding: %s',padding) #cp3.5
return new_img, (new_wh[0] / a.scale, new_wh[1] / a.scale), padding
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
"""
@description :change pixel coordinates in the img1_shape to img0_shape
---------
@param :
img1_shape: type tuple(int,int), the image size of the coords
coords: type numpy.array, the coords for translation
img0_shape: type tuple(int,int), the expected image size of the coords after translation
ratio_pad: type tuple(int,(int,int)), tuple(0) is the resize ratio, tuple(1,0) is the x padding, tuple(1,1) is the y padding
-------
@Returns :
coords: type numpy.array, the coordinates in img1_shape
-------
"""
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
coords = clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0] = clip_axis(boxes[:,0], img_shape[1]) # x1
boxes[:, 1] = clip_axis(boxes[:,1], img_shape[0]) # y1
boxes[:, 2] = clip_axis(boxes[:,2], img_shape[1]) # x2
boxes[:, 3] = clip_axis(boxes[:,3], img_shape[0]) # y2
return boxes
def clip_axis(axis,limit):
ind = np.where(axis>limit)
axis[ind] = limit
ind = np.where(axis<0)
axis[ind] = 0
return axis
def confirm_dir(root_path,new_path):
"""
@description :make sure path=(root_path/new_path) exist
---------
@param :
root_path: type string,
new_path: type string,
-------
@Returns :
-------
"""
path = os.path.join(root_path,new_path)
if not os.path.isdir(path):
os.mkdir(path)
return path
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] + t_size[1] + 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c2[1]), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
class socket_client():
"""
@description : handle tcp event
---------
@function :
-------
"""
def __init__(self,address=('192.168.3.181',9191)):
"""
@description : obtain and save the tcp client address
---------
@param : address: server address, in default taple format
-------
@Returns : None
-------
"""
self.address = address
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(self.address)
print('connect to: ',address)
except socket.error as msg:
self.sock = None
print(msg)
sys.exit(1)
# @timethis
def send(self,image,disparity,padding,coords,frame,imgsz=(416,416),quality=0.5,debug=False):
"""
@description : send a packet to tcp server
---------
@param : image: the cv2 image mat (imgsz[0],imgsz[1])
@param : coords: the coords of the opposite angle of the object rectangle,(n,(x1,y1,z1,x2,y2,z2))
@param : frame: frame number of the pipe stream
@param : imgsz: the image resolution(height,width), reserved
@param : quality: type float ,in the range [0,1], reserved
@param : debug: type bool, if true, add a image in imgsz shape to tcp transmission packet
-------
@Returns : None
-------
"""
answer = []
if debug:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), int(quality*100)]
## 首先对图片进行编码,因为socket不支持直接发送图片
stringData = np.ravel(image)
stringData = stringData[padding[0]*imgsz[0]*3:(padding[0]+312)*imgsz[0]*3]
disparity = np.ravel(disparity)
disparity = disparity[padding[0]*imgsz[0]:(padding[0]+312)*imgsz[0]]
minVal = np.amin(disparity)
maxVal = np.amax(disparity)
disparity = np.reshape(disparity,(312,416))
disparity_color = cv2.applyColorMap(cv2.convertScaleAbs(disparity, alpha=255.0/(maxVal-minVal),beta=-minVal*255.0/(maxVal-minVal)), cv2.COLORMAP_JET)
colorsz = disparity_color.shape
## 首先发送图片编码后的长度
header='$Image,'+str(len(stringData))+','+str(len(np.ravel(disparity_color)))+','+str(312)+','+str(416)+','+str(colorsz[0])+','+str(colorsz[1])+','+str(coords[0][0])+','+str(coords[0][1])+','+str(frame)
self.sock.sendall(header.encode('utf-8').ljust(64))
while answer != 'Ready for Image':
answer = self.sock.recv(32).decode('utf-8')
self.sock.sendall(stringData)
while answer != 'Ready for Disparity Image':
answer = self.sock.recv(64).decode('utf-8')
self.sock.sendall(np.ravel(disparity_color))
else:
header='$Image,'+str('0')+','+str(coords[0][0])+','+str(coords[0][1])
self.sock.sendall(header.encode('utf-8').ljust(32))
while answer != 'Ready for Coordinates':
answer = self.sock.recv(32).decode('utf-8')
coordData = '$Coord'
for item in coords[1:]:
coordData += ','
coordData += str(item[1])
coordData += ','
coordData += str(item[2])
coordData += ','
coordData += str(item[5])
coordData += ','
coordData += str(item[3])
coordData += ','
coordData += str(item[4])
coordData += ','
coordData += str(item[5])
coordData += ',*FC'
## 然后发送编码的内容
self.sock.sendall(coordData.encode('utf-8'))
while answer != 'Ready for next Frame':
answer = self.sock.recv(32).decode('utf-8')
def close(self):
if self.sock:
print('closing tcp client ...')
self.sock.close()
def __del__(self):
self.close()
class calib_type(Enum):
"""
@description : camera type for image rectification
---------
@function : sequence the camera type by number
-------
"""
OV9714_1280_720 = 0
AR0135_1280_720 = 1
AR0135_1280_960 = 2
AR0135_416_416 = 3
AR0135_640_640 = 4
AR0135_640_480 = 5
MIDDLEBURY_416 = 6
class camera_mode:
"""
@description : camera mode for image rectification
---------
@function : give each calib type a image size for rectification
-------
"""
def __init__(self,mode):
if mode == 0:
self.mode=calib_type.OV9714_1280_720
self.size=(1280,720)
elif mode == 1:
self.mode=calib_type.AR0135_1280_720
self.size=(1280,720)
elif mode == 2:
self.mode=calib_type.AR0135_1280_960
self.size=(1280,960)
elif mode == 3:
self.mode=calib_type.AR0135_416_416
self.size=(416,416)
elif mode == 4:
self.mode=calib_type.AR0135_640_640
self.size=(640,640)
elif mode == 5:
self.mode=calib_type.AR0135_640_480
self.size=(640,480)
else:
self.mode=calib_type.MIDDLEBURY_416
self.size=(1280,960)
def matching_points_gen(disparity,img_left,img_right,left_points=[],padding=[]):
"""
@description : get the left image points and draw a line between the original point in the image_left and matching point in the image_right
---------
@param : disparity: type matrix, the disparity map of the image_left and image_right
@param : image_left: type matrix, the raw image from the left camera
@param : image_right: type matrix, the raw image from the right camera
@param : left_points: type list, the point in the image_left
-------
@Returns : the image with matching points line in it
-------
"""
merge = cv2.hconcat([img_left,img_right])
if left_points == []:
return merge
# %% 加点
raw_points = []
for i in range(int(len(left_points)/2)):
add_point = [int((left_points[2*i][0]+left_points[2*i+1][0])/2),int((left_points[2*i][1]+left_points[2*i+1][1])/2)]
raw_points.append(left_points[2*i])
raw_points.append(add_point)
raw_points.append(left_points[2*i+1])
# %% 划线
for point in raw_points:
# print(padding[0])
first_matching_point = [point[1]-1+416-padding[0],point[0]-1]
first_point = [point[1]-1-padding[0],point[0]-1]
cv2.line(merge,first_point,first_matching_point,color=(0,255,0),thickness=1,lineType=cv2.LINE_8)
sec_matching_point = [int(point[1]-1+416-disparity[point[1]-1,point[0]-1])-padding[0],point[0]-1]
sec_point = [point[1]-1+416-padding[0],point[0]-1]
cv2.line(merge,sec_point,sec_matching_point,color=(0,255,0),thickness=2,lineType=cv2.LINE_8)
return merge
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,736 | cyins/YOLOv5-SGBM | refs/heads/master | /utils/Stereo_Application.py | import numpy as np
import argparse,logging,time,math,os
import cv2
from matplotlib import pyplot as plt
from utils.general import calib_type, timethis
class Cursor:
def __init__(self, ax):
self.ax = ax
self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = ax.text(0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.2f, y=%1.2f' % (x, y))
self.ax.figure.canvas.draw()
class Stereo_Matching:
"""
@description : Stereo_Matching Algorithm class
---------
@function :
-------
"""
count=0
def __init__(self,cam_mode,BM=False,filter=True,
filter_lambda=8000.0,filter_sigma=1.5,
filter_unira=5,
numdisparity=64, mindis=0, block=9, TextureThreshold=5,
prefiltercap=63, prefiltersize=9, prefiltertype=1,
SpeckleWindowSize=50, speckleRange=2,disp12maxdiff=1,
sf_path=''):
"""
@description : initialize stereoBM or stereoSGBM alogrithm
---------
@param : cam_mode: reserved
@param : BM: bool, if false, use stereoSGBM, otherwise, use stereoBM
@param : filter: bool, if false, use stereoSGBM, otherwise, use stereoBM
@param : filter_lambda: float, the lambda parameter of post WLS filter
@param : filter_sigma: float, the sigmacolor parameter of post WLS filter
@param : filter_unira: int, Margin in percentage by which the best (minimum) computed cost function value should "win" the second best value to consider the found match correct. Normally, a value within the 5-15 range is good enough.
@param : numdisparity: int, Maximum disparity minus minimum disparity. The value is always greater than zero. In the current implementation, this parameter must be divisible by 16.
@param : mindis: int, Minimum possible disparity value. Normally, it is zero but sometimes rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
@param : block: int, Matched block size. It must be an odd number >=1 . Normally, it should be somewhere in the 3..11 range.
@param : TextureThreshold: int, must be dividable by 16, the min disparity the SGBM will attempt
@param : prefiltercap: int, Truncation value for the prefiltered image pixels. The algorithm first computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval. The result values are passed to the Birchfield-Tomasi pixel cost function.
@param : prefiltersize: int, Pre-processing filter window size, allowed range is [5,255], generally should be between 5x5 ... 21x21, parameters must be odd, INT type
@param : prefiltertype: int, he type of preparation filter is mainly used to reduce the photometric distortions, eliminate noise and enhanced texture, etc., there are two optional types: CV_STEREO_BM_NORMALIZED_RESPONSE (normalized response) or CV_STEREO_BM_XSOBEL (horizontal direction Sobel Operator) , Default type), this parameter is int type
@param : SpeckleWindowSize: int, Maximum size of smooth disparity regions to consider their noise speckles and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.
@param : speckleRange: int, Maximum disparity variation within each connected component. If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16. Normally, 1 or 2 is good enough.
@param : disp12maxdiff: int, Maximum allowed difference (in integer pixel units) in the left-right disparity check. Set it to a non-positive value to disable the check.
@param : sf_path: str, the stereo algorithm configuration save path
-------
@Returns :
-------
"""
t0 = time.time()
self.BM = BM
Stereo_Matching.count += 1
self.filter_en = filter
self.lamdba=filter_lambda
self.sigma=filter_sigma
self.unira=filter_unira
if not self.BM:
self.window_size = 3
'''
#The second parameter controlling the disparity smoothness.
# The larger the values are, the smoother the disparity is.
# P1 is the penalty on the disparity change by plus or minus 1 between neighbor pixels.
# P2 is the penalty on the disparity change by more than 1 between neighbor pixels.
# The algorithm requires P2 > P1 .
# See stereo_match.cpp sample where some reasonably good P1 and P2 values are shown (like 8*number_of_image_channels*blockSize*blockSize and 32*number_of_image_channels*blockSize*blockSize , respectively).
'''
self.left_matcher = cv2.StereoSGBM_create(
minDisparity=mindis,
numDisparities=numdisparity-mindis, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize=block,
P1=8 * 3 * self.window_size ** 2,
P2=32 * 3 * self.window_size ** 2,
disp12MaxDiff=1,
uniquenessRatio=self.unira,
speckleWindowSize=SpeckleWindowSize,
speckleRange=speckleRange,
preFilterCap=prefiltercap,
mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY
)
print('\nSGBM Initial Done. (%.2fs)'%(time.time() - t0)) #cp3.5
else:
self.left_matcher = cv2.StereoBM_create(numdisparity, block)
self.left_matcher.setUniquenessRatio(self.unira)
self.left_matcher.setTextureThreshold(TextureThreshold)
self.left_matcher.setMinDisparity(mindis)
self.left_matcher.setDisp12MaxDiff(disp12maxdiff)
self.left_matcher.setSpeckleRange(speckleRange)
self.left_matcher.setSpeckleWindowSize(SpeckleWindowSize)
self.left_matcher.setBlockSize(block)
self.left_matcher.setNumDisparities(numdisparity)
self.left_matcher.setPreFilterCap(prefiltercap)
self.left_matcher.setPreFilterSize(prefiltersize)
self.left_matcher.setPreFilterType(prefiltertype)
# self.left_matcher.setROI1(0)
# self.left_matcher.setROI2(0)
# self.left_matcher.setSmallerBlockSize(0)
print('\nBM Initial Done. (%.2fs)'%(time.time() - t0)) #cp3.5
if self.filter_en:
self.right_matcher = cv2.ximgproc.createRightMatcher(self.left_matcher)
self.filter = cv2.ximgproc.createDisparityWLSFilter(self.left_matcher)
self.filter.setLambda(self.lamdba)
self.filter.setSigmaColor(self.sigma)
if sf_path != '':
self.write_file(sf_path)
def change_parameters(self,filter_unira=-1,filter_lambda=-1,filter_sigma=-1):
"""
@description : reserved, TODO: dynamic adjust filter parameters
---------
@param : filter_lambda, float, the lambda parameter of post WLS filter
@param : filter_sigma, float, the sigmacolor parameter of post WLS filter
@param : filter_unira, int, the UniquenessRatio parameter of the stereo alogrithm filter
-------
@Returns : -1 ,error, 0, normal
-------
"""
if Stereo_Matching.count == 0:
print ('No Stereo Matching instance find.')
return -1
if filter_unira >= 0:
self.stereo.setUniquenessRatio(filter_unira)
print('set UniquenessRatio: %d'%filter_unira)
if filter_lambda >= 0:
self.filter.setLambda(filter_lambda)
print('set filter Lambda: %f'%filter_lambda)
if filter_sigma >= 0:
self.filter.setSigmaColor(filter_sigma)
print('set filter sigma: %f'%filter_sigma)
return 0
def write_file(self,path):
self.sf = cv2.FileStorage()
file_path = os.path.join(path,'stereo_config.xml')
self.sf.open(file_path,cv2.FileStorage_WRITE)
self.sf.write('datetime', time.asctime())
if self.BM:
self.sf.startWriteStruct('stereoBM',cv2.FileNode_MAP)
else:
self.sf.startWriteStruct('stereoSGBM',cv2.FileNode_MAP)
self.sf.write('NumDisparities',self.left_matcher.getNumDisparities())
self.sf.write('MinDisparity',self.left_matcher.getMinDisparity())
self.sf.write('BlockSize',self.left_matcher.getBlockSize())
self.sf.write('Disp12MaxDiff',self.left_matcher.getDisp12MaxDiff())
self.sf.write('SpeckleRange',self.left_matcher.getSpeckleRange())
self.sf.write('SpeckleWindowSize',self.left_matcher.getSpeckleWindowSize())
self.sf.write('PreFilterCap',self.left_matcher.getPreFilterCap())
self.sf.write('UniquenessRatio',self.left_matcher.getUniquenessRatio())
if self.BM:
self.sf.write('PreFilterSize',self.left_matcher.getPreFilterSize())
self.sf.write('PreFilterType',self.left_matcher.getPreFilterType())
self.sf.write('ROI1',self.left_matcher.getROI1())
self.sf.write('ROI2',self.left_matcher.getROI2())
self.sf.write('SmallerBlockSize',self.left_matcher.getSmallerBlockSize())
self.sf.write('TextureThreshold',self.left_matcher.getTextureThreshold())
else:
self.sf.write('Mode',self.left_matcher.getMode())
self.sf.endWriteStruct()
if self.filter_en:
self.sf.startWriteStruct('DisparityWLSFilter',cv2.FileNode_MAP)
self.sf.write('ConfidenceMap',self.filter.getConfidenceMap())
self.sf.write('DepthDiscontinuityRadius',self.filter.getDepthDiscontinuityRadius())
self.sf.write('Lambda',self.filter.getLambda())
self.sf.write('LRCthresh',self.filter.getLRCthresh())
self.sf.write('ROI',self.filter.getROI())
self.sf.write('SigmaColor',self.filter.getSigmaColor())
self.sf.endWriteStruct()
self.sf.release()
def __del__(self):
class_name=self.__class__.__name__
print ('\n',class_name,"release")
# @timethis
def run(self,ImgL,ImgR,Q,Queue,UMat=False,filter=True):
"""
@description :compute the disparity of ImgL and ImgR and put the disparity map to Queue
---------
@param : ImgL, Gray image taked by the left camera
@param : ImgR, Gray image taked by the right camera
@param : Queue, the data container of python API queue, used for data interaction between thread
@param : UMat, bool, if true, the data type is UMat(GPU), otherwise, the data type is UMat(CPU)
@param : filter, bool, if true, return the disparity map with post filter, otherwise, return the raw disparity map
-------
@Returns : disparity, a Mat with the same shape as ImgL
-------
"""
t0=time.time()
if not self.filter_en:
if not self.BM:
if UMat:
disparity_left = self.left_matcher.compute(ImgL, ImgR, False).get().astype(np.float32) / 16.0
else:
disparity_left = self.left_matcher.compute(ImgL, ImgR, False).astype(np.float32) / 16.0
else:
if UMat:
disparity_left = self.left_matcher.compute(ImgL,ImgR).get().astype(np.float32) / 16.0
else:
disparity_left = self.left_matcher.compute(ImgL,ImgR).astype(np.float32) / 16.0
logging.info('\nBM Done. (%.2fs)',(time.time() - t0)) #cp3.5
color_3d = cv2.reprojectImageTo3D(disparity_left,Q).reshape(-1,416,3)
Queue.put((disparity_left,color_3d))
else:
if not self.BM:
if UMat:
disparity_left = self.left_matcher.compute(ImgL, ImgR, False).get().astype(np.float32) / 16.0
disparity_right = self.right_matcher.compute(ImgR, ImgL, False).get().astype(np.float32) / 16.0
else:
disparity_left = self.left_matcher.compute(ImgL, ImgR, False).astype(np.float32) / 16.0
disparity_right = self.right_matcher.compute(ImgR, ImgL, False).astype(np.float32) / 16.0
else:
if UMat:
disparity_left = self.left_matcher.compute(ImgL,ImgR).get().astype(np.float32) / 16.0
disparity_right = self.right_matcher.compute(ImgR, ImgL).get().astype(np.float32) / 16.0
else:
disparity_left = self.left_matcher.compute(ImgL,ImgR).astype(np.float32) / 16.0
disparity_right = self.right_matcher.compute(ImgR, ImgL).astype(np.float32) / 16.0
logging.info('\nBM Done. (%.2fs)',(time.time() - t0)) #cp3.5
disparity=self.filter.filter(disparity_left, ImgL, disparity_map_right=disparity_right)
color_3d = cv2.reprojectImageTo3D(disparity,Q).reshape(-1,416,3)
Queue.put((disparity,color_3d))
def disparity_centre(raw_box,ratio,disparity,depth_map,focal,baseline,pixel_size,mindisparity):
"""
@description : from disparity map get the depth prediction of the (x_centre,y_centre) point
---------
@param :
raw_box: the coordinates of the opposite angle of the prediction box
ratio: the distance between to centre point
disparity: type array, disparity map
depth_map: type array, depth map
focal: focal length in pixel unit
baseline: baseline in mm unit
pixel_size: pixel_size in mm unit
-------
@Returns :
-------
"""
'''
logic: if the pixel number in the box in smaller than 225,than calculate the whole box pixels and get the average,
otherwise,
'''
depth=[]
#%%%% TODO: 分9个图像框
# print(raw_box)
dx,dy=int((raw_box[2]-raw_box[0])*ratio),int((raw_box[3]-raw_box[1])*ratio)
if (dx == 0) and (dy == 0):
# %% caculate every pixel in box and get the Median
for i in range(raw_box[2]-raw_box[0]):
# print('\ndisparity row:',end=' ')
for j in range(raw_box[3]-raw_box[1]):
# print(disparity[(raw_box[0]+i),(raw_box[1]+j)],end=',')
# if disparity[(raw_box[0]+i),(raw_box[1]+j)] > -11:
# depth.append(disparity[(raw_box[0]+i),(raw_box[1]+j)])
depth.append(depth_map[(raw_box[0]+i),(raw_box[1]+j)])
# print(depth,end='\r')
else:
cx,cy=int((raw_box[0]+raw_box[2])/2),int((raw_box[1]+raw_box[3])/2)
dw,dh=int((raw_box[2]-raw_box[0])/6),int((raw_box[3]-raw_box[1])/6)
cxcy=[(cx-2*dw,cy-2*dh),(cx,cy-2*dh),(cx+2*dw,cy-2*dh),\
(cx-2*dw,cy),(cx,cy),(cx+2*dw,cy),\
(cx-2*dw,cy+2*dh),(cx,cy+2*dh),(cx+2*dw,cy+2*dh)]
# print(cxcy)
# print(dx,dy)
#%%%% TODO: 每个框计算深度均值
for x_centre,y_centre in cxcy:
p=[-2,-1,0,1,2]
d=np.zeros((25,),dtype=float)
dis_mean=0.
for i in range(5):
for j in range(5):
nx,ny=int(x_centre+p[i]*dx),int(y_centre+p[j]*dy)
# print('(%d,%d)'%(nx,ny),end=' ')
# d.flat[5*i+j]=disparity[ny,nx]
d.flat[5*i+j]=depth_map[ny,nx]
d=d.ravel()
if mindisparity < 0:
d=d[d>(mindisparity-1.)]
else:
d=d[d>-1.]
d=np.sort(d,axis=None)
# print(d,end='\r')
if len(d) >= 5:
d=np.delete(d,[0,-1])
dis_mean = d.mean()
depth.append(dis_mean)
# %%%% TODO: 取众多框计算值的中位数
depth = np.abs(depth)
depth.sort()
if len(depth) == 0:
temp_dis = -1
elif (len(depth)%2 == 0) & (len(depth)>1):
if (depth[math.floor(len(depth)/2)] != 0) and (depth[math.floor(len(depth)/2)-1] != 0):
# temp_dis = ((focal*baseline/abs(depth[math.floor(len(depth)/2)]))+(focal*baseline/abs(depth[math.floor(len(depth)/2)-1])))/2
temp_dis = (depth[math.floor(len(depth)/2)] + depth[math.floor(len(depth)/2)-1])/2
else:
temp_dis = -1
else:
if depth[math.floor(len(depth)/2)] != 0:
# temp_dis = focal*baseline/abs(depth[math.floor(len(depth)/2)])
temp_dis = depth[math.floor(len(depth)/2)]
else:
temp_dis = -1
return temp_dis
def remove_invalid(disp_arr, points, colors):
mask = (
(disp_arr > disp_arr.min()) &
np.all(~np.isnan(points), axis=1) &
np.all(~np.isinf(points), axis=1)
)
return points[mask], colors[mask]
def calc_point_cloud(image, disp, q):
points = cv2.reprojectImageTo3D(disp, q).reshape(-1, 3)
colors = image.reshape(-1, 3)
return remove_invalid(disp.reshape(-1), points, colors)
def project_points(points, colors, r, t, k, dist_coeff, width, height):
projected, _ = cv2.projectPoints(points, r, t, k, dist_coeff)
xy = projected.reshape(-1, 2).astype(np.int)
mask = (
(0 <= xy[:, 0]) & (xy[:, 0] < width) &
(0 <= xy[:, 1]) & (xy[:, 1] < height)
)
return xy[mask], colors[mask]
def calc_projected_image(points, colors, r, t, k, dist_coeff, width, height):
xy, cm = project_points(points, colors, r, t, k, dist_coeff, width, height)
image = np.zeros((height, width, 3), dtype=colors.dtype)
image[xy[:, 1], xy[:, 0]] = cm
return image
def rotate(arr, anglex, anglez):
return np.array([ # rx
[1, 0, 0],
[0, np.cos(anglex), -np.sin(anglex)],
[0, np.sin(anglex), np.cos(anglex)]
]).dot(np.array([ # rz
[np.cos(anglez), 0, np.sin(anglez)],
[0, 1, 0],
[-np.sin(anglez), 0, np.cos(anglez)]
])).dot(arr)
def reproject_3dcloud(left_image, disparity, focal_length, tx):
image = left_image
height, width, _ = image.shape
q = np.array([
[1, 0, 0, -width/2],
[0, 1, 0, -height/2],
[0, 0, 0, focal_length],
[0, 0, -1/tx, 0]
])
points, colors = calc_point_cloud(image, disparity, q)
r = np.eye(3)
t = np.array([0, 0, -100.0])
k = np.array([
[focal_length, 0, width/2],
[0, focal_length, height/2],
[0, 0, 1]
])
dist_coeff = np.zeros((4, 1))
def view(r, t):
cv2.imshow('projected', calc_projected_image(
points, colors, r, t, k, dist_coeff, width, height
))
view(r, t)
angles = { # x, z
'w': (-np.pi/6, 0),
's': (np.pi/6, 0),
'a': (0, np.pi/6),
'd': (0, -np.pi/6)
}
while 1:
key = cv2.waitKey(0)
if key not in range(256):
continue
ch = chr(key)
if ch in angles:
ax, az = angles[ch]
r = rotate(r, -ax, -az)
t = rotate(t, ax, az)
view(r, t)
elif ch == '\x1b': # esc
cv2.destroyAllWindows()
break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--left_source', type=str, default='data/images/left.png', help='source')
parser.add_argument('--right_source', type=str, default='data/images/left.png', help='source')
opt = parser.parse_args()
print(opt)
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,737 | cyins/YOLOv5-SGBM | refs/heads/master | /utils/img_preprocess.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 27 11:06:14 2021
@author: bynav
"""
# -*- coding: utf-8 -*-
import cv2,time,logging
import numpy as np
from utils.stereoconfig import stereoCamera
import os
from pathlib import Path
from utils.general import timethis,timeblock,calib_type,letterbox
# from pcl import pcl_visualization
# 预处理
def preprocess(img1, img2):
# 彩色图->灰度图
if(img1.ndim == 3):
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) # 通过OpenCV加载的图像通道顺序是BGR
if(img2.ndim == 3):
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# 直方图均衡
img1 = cv2.equalizeHist(img1)
img2 = cv2.equalizeHist(img2)
return img1, img2
# 消除畸变
# @timethis
def undistortion(image, camera_matrix, dist_coeff):
undistortion_image = cv2.undistort(image, camera_matrix, dist_coeff)
return undistortion_image
# 获取畸变校正和立体校正的映射变换矩阵、重投影矩阵
# @param:config是一个类,存储着双目标定的参数:config = stereoconfig.stereoCamera()
# @timethis
def getRectifyTransform(height, width, config):
# 读取内参和外参
left_K = config.cam_matrix_left
right_K = config.cam_matrix_right
left_distortion = config.distortion_l
right_distortion = config.distortion_r
R = config.R
T = config.T
# 计算校正变换
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(left_K, left_distortion, right_K, right_distortion, (width, height), R, T, alpha=0)
map1x, map1y = cv2.initUndistortRectifyMap(left_K, left_distortion, R1, P1, (width, height), cv2.CV_32FC1)
map2x, map2y = cv2.initUndistortRectifyMap(right_K, right_distortion, R2, P2, (width, height), cv2.CV_32FC1)
return map1x, map1y, map2x, map2y, Q
# 畸变校正和立体校正
# @timethis
def rectifyImage(image1, image2, map1x, map1y, map2x, map2y):
rectifyed_img1 = cv2.remap(image1, map1x, map1y, cv2.INTER_AREA)
rectifyed_img2 = cv2.remap(image2, map2x, map2y, cv2.INTER_AREA)
return rectifyed_img1, rectifyed_img2
# 立体校正检验----画线
def draw_line(image1, image2):
# 建立输出图像
height = max(image1.shape[0], image2.shape[0])
width = image1.shape[1] + image2.shape[1]
output = np.zeros((height, width, 3), dtype=np.uint8)
output[0:image1.shape[0], 0:image1.shape[1]] = image1
output[0:image2.shape[0], image1.shape[1]:] = image2
# 绘制等间距平行线
line_interval = 50 # 直线间隔:50
for k in range(height // line_interval):
cv2.line(output, (0, line_interval * (k + 1)), (2 * width, line_interval * (k + 1)), (0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
return output
def resize_convert(imgl_rectified, imgr_rectified, imgsz=640, stride=32):
imgl_rectified = letterbox(imgl_rectified, imgsz)[0]
imgr_rectified = letterbox(imgr_rectified, imgsz)[0]
# Convert
img_ai = imgl_rectified[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img_ai = np.ascontiguousarray(img_ai)
imgl_rectified = np.ascontiguousarray(imgl_rectified)
imgr_rectified = np.ascontiguousarray(imgr_rectified)
return img_ai, imgl_rectified, imgr_rectified
# @timethis
def Image_Rectification(camera_config, img_left, img_right, imgsz=640, path=False, debug=False, UMat=False, cam_mode=4):
"""
@description : stereo camera calibration
---------
@param :
camera_config: type class stereoCamera, stereo camera calibration parameters
img_left: type array, image from the left camera
img_right: type array, image from the right camera
imgsz: type tuple (int,int), used by Image_Rectification function resize images for SGBM to the same size as image for object detection
path: type bool, if true, the img_left and img_right type are string,as image file path
debug: type bool, reserved
UMat: type bool, if true, use GPU accelarating SGBM and image rectification process
cam_mode: type int, the camera configuration type
-------
@Returns :
iml_rectified: type array, left image for SGBM
imr_rectified: type array, right image for SGBM
img_ai_raw: type array, image for object detection
(height,width): type tuple(int,int), reserved
-------
"""
# 读取MiddleBurry数据集的图片
t0 = time.time()
if path:
imgl_path=str(Path(img_left).absolute())
imgr_path=str(Path(img_right).absolute())
iml = cv2.imread(imgl_path) # left
imr = cv2.imread(imgr_path) # right
else:
iml = img_left # left
imr = img_right # right
if UMat:
iml = cv2.UMat(iml) # left
imr = cv2.UMat(imr) # right
# 读取相机内参和外参
config = camera_config
if cam_mode == calib_type.AR0135_416_416.value or cam_mode == calib_type.AR0135_640_640.value:
if UMat:
img_raw = cv2.UMat.get(iml)
else:
img_raw = iml
iml, gain, padding = letterbox(iml, imgsz)
imr = letterbox(imr, imgsz)[0]
# 立体校正
img_ai, imr_rectified = rectifyImage(iml, imr, config.map1x, config.map1y, config.map2x, config.map2y)
elif cam_mode == calib_type.MIDDLEBURY_416.value:
img_ai, gain, padding = letterbox(iml, imgsz)
imr_rectified = letterbox(imr, imgsz)[0]
img_raw = iml
else:
# 立体校正
iml_rectified, imr_rectified = rectifyImage(iml, imr, config.map1x, config.map1y, config.map2x, config.map2y)
if UMat:
img_raw = cv2.UMat.get(iml_rectified)
else:
img_raw = iml_rectified
# 图像缩放
img_ai, gain, padding = letterbox(iml_rectified, imgsz)
imr_rectified = letterbox(imr_rectified, imgsz)[0]
# save for debug
# cv2.imwrite('./runs/detect/test/Left1_rectified.bmp', iml_rectified)
# cv2.imwrite('./runs/detect/test/Right1_rectified.bmp', imr_rectified)
if debug:
# 绘制等间距平行线,检查立体校正的效果
line = draw_line(img_ai, imr_rectified)
cv2.imwrite('./runs/detect/test/line.png', line)
iml_rectified = cv2.cvtColor(img_ai, cv2.COLOR_BGR2GRAY)
imr_rectified = cv2.cvtColor(imr_rectified, cv2.COLOR_BGR2GRAY)
iml_rectified = np.ascontiguousarray(iml_rectified)
imr_rectified = np.ascontiguousarray(imr_rectified)
return img_raw, img_ai, iml_rectified, imr_rectified, gain, padding
if __name__ == '__main__':
config = stereoCamera()
img_left = '../data/images/Left1.bmp'
img_right = '../data/images/Right1.bmp'
left,right,left_rgb = Image_Rectification(config, img_left, img_right, path=True)
cv2.imshow('left',left)
cv2.waitKey(500)
cv2.imshow('right',right)
cv2.waitKey(500)
cv2.imshow('left_rgb',left_rgb)
cv2.waitKey(2000)
cv2.destroyAllWindows()
| {"/recevier.py": ["/utils/general.py"], "/latency_test.py": ["/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/dataset.py": ["/utils/general.py"], "/raw_record.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/detect.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/sm_test.py": ["/utils/img_preprocess.py", "/utils/Stereo_Application.py", "/utils/dataset.py", "/utils/general.py"], "/utils/Stereo_Application.py": ["/utils/general.py"], "/utils/img_preprocess.py": ["/utils/general.py"]} |
48,738 | jkornblum/airflow-cyberark-secrets-backend | refs/heads/main | /airflow_cyberark_secrets_backend/cyberark_backend.py | import os
from typing import Dict, Optional, Union
from airflow.models.connection import Connection
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
import requests
__version__ = "0.1.0"
class CyberArkSecretsBackend(BaseSecretsBackend, LoggingMixin):
"""
Class for Airflow Secrets Backend Connection to CyberArk
"""
def __init__(
self,
app_id: str,
ccp_url: str,
safe: str,
verify: Optional[Union[str, bool]] = None,
**kwargs,
):
"""
Args:
app_id: The CyberArk Central Credential Provider AppId
ccp__url: The Cyber CCP URL base (excluding query params)
safe: The CyberArk safe
verify: path to ssl certificate or False to disable verification
if None will look for env var CYBERARK_SSL, if not found will disable (False)
"""
super(CyberArkSecretsBackend, self).__init__(**kwargs)
self.app_id = app_id
self.ccp_url = ccp_url
self.safe = safe
self._verify = verify
if self.ccp_url[-1] == "?":
self.ccp_url = self.ccp_url[:-1]
if self._verify is None:
if "CYBERARK_SSL" in os.environ:
self._verify = os.environ["CYBERARK_SSL"]
else:
self._verify = False
def _fetch_cyberark(self, ca_obj: str) -> dict:
"""
Fetch the secret from CyberArk
Args:
ca_obj: The CyberArk object name
Returns:
dict: The connection dictionary
"""
ca_map = {
"AccountDescription": "svc_account",
"ApplicationName": "schema",
"Address": "host",
"Comment": "extra",
"Content": "password",
"LogonDomain": "login",
"Port": "port",
}
url = f"{self.ccp_url}?AppID={self.app_id}&Safe={self.safe}&Object={ca_obj}"
response = requests.get(url, verify=self._verify)
response = response.json()
ca_content: Dict[str, Union[int, str]] = {
ca_map[ca_key]: str(response[ca_key])
for ca_key in ca_map
if ca_key in response
}
if "port" in ca_content:
ca_content["port"] = int(ca_content["port"])
# if the airflow connection is using a svc_account with auto-rotate
# managed by CyberArk then fetch the fresh credential
if "svc_account" in ca_content:
ca_content["password"] = str(
self._fetch_cyberark(str(ca_content["svc_account"]))["password"]
)
del ca_content["svc_account"]
return ca_content
def get_connections(self, conn_id: str) -> Optional[Connection]:
"""
Get connections with a specific ID
Args:
conn_id: The Airflow connection id, the CyberArk object name
Returns:
airflow.models.connection.Connection
"""
conn_dict = self._fetch_cyberark(ca_obj=conn_id)
if conn_dict:
conn = Connection(conn_id=conn_id, **conn_dict)
return [conn]
return None
def get_variable(self, key: str) -> Optional[str]:
"""Return Variable value from CyberArk. This will be str of secret content.
Arg:
key: The variable key, the cyberark object name
Returns:
str of the CyberArk secret content
"""
conn_dict = self._fetch_cyberark(ca_obj=key)
if conn_dict:
return conn_dict["password"]
return None
| {"/airflow_cyberark_secrets_backend/__init__.py": ["/airflow_cyberark_secrets_backend/cyberark_backend.py"]} |
48,739 | jkornblum/airflow-cyberark-secrets-backend | refs/heads/main | /airflow_cyberark_secrets_backend/__init__.py | __version__ = "0.1.0"
from .cyberark_backend import CyberArkSecretsBackend
| {"/airflow_cyberark_secrets_backend/__init__.py": ["/airflow_cyberark_secrets_backend/cyberark_backend.py"]} |
48,740 | jkornblum/airflow-cyberark-secrets-backend | refs/heads/main | /setup.py | # https://setuptools.readthedocs.io/en/latest/userguide/quickstart.html#development-mode
import setuptools
setuptools.setup()
| {"/airflow_cyberark_secrets_backend/__init__.py": ["/airflow_cyberark_secrets_backend/cyberark_backend.py"]} |
48,743 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /blog/forms.py | from django import forms
from django.forms.models import inlineformset_factory
from blog.models import *
import logging
logger = logging.getLogger(__name__)
class PostForm(forms.ModelForm):
class Meta:
model = Post
exclude = ['autor']
class TagForm(forms.ModelForm):
class Meta:
model = Tag
class Post_TagForm(forms.ModelForm):
class Meta:
model= Post_Tag
| {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,744 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /settings.py | from djangoappengine.settings_base import *
import os
ADMIN_MEDIA_PREFIX = '/static/admin/'
TIME_ZONE = 'Europe/Madrid'
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
LANGUAGE_CODE= 'es'
SITE_ID=1
GAE_SETTINGS_MODULES = (
'gae_comments_settings',
)
INSTALLED_APPS = (
'djangoappengine',
'djangotoolbox',
'django.contrib.auth',
'django.contrib.comments',
'django.contrib.contenttypes',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sitemaps',
'markdown',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
LOGIN_REDIRECT_URL = '/blog/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'urls'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s: %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'proyecto.app': {
'handlers': ['console'],
'level': 'DEBUG'
},
}
} | {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,745 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /blog/Sitemap.py | ### sitemaps.py
from django.contrib.sitemaps import GenericSitemap
from blog.models import Post, Categoria
all_sitemaps = {}
for categoria in Categoria.objects.all():
info_dict = {
'queryset': categoria.post_set.filter(estado=1),
}
sitemap = GenericSitemap(info_dict,priority=0.6)
# dict key is provided as 'section' in sitemap index view
all_sitemaps[categoria.nombre] = sitemap
| {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,746 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /blog/feeds.py | from django.core.exceptions import ObjectDoesNotExist
from django.utils.feedgenerator import Atom1Feed
from django.contrib.syndication.feeds import Feed
from blog.models import Categoria, Post
class LatestEntriesFeed(Feed):
author_name = "Ruben Dario"
copyright = "AGPL"
description = "Ultimos Posts publicados"
feed_type = Atom1Feed
item_copyright = "AGPL"
item_author_name = "Ruben Dario"
item_author_link = "http://"
link = "/feeds/posts/"
title = "Ultimos Posts"
def items(self):
return Post.activo.all()[:15]
def item_pubdate(self, item):
return item.creado
def item_guid(self, item):
return "tag:%s:%s" % (item.creado.strftime('%Y-%m-%d'), item.get_absolute_url())
class CategoryFeed(LatestEntriesFeed):
def get_object(self, bits):
if len(bits) != 1:
raise ObjectDoesNotExist
return Categoria.objects.get(slug__exact=bits[0])
def title(self, obj):
return "Ultimos Posts en Categoria '%s'" % (obj.nombre)
def description(self, obj):
return "Ultimos Posts en Categoria '%s'" % (obj.nombre)
def link(self, obj):
return obj.get_absolute_url()
def items(self, obj):
return obj.live_post_set()[:15]
| {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,747 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /blog/models.py | from django.db import models
from django.contrib.auth.models import User
from markdown import markdown
from django.template.defaultfilters import slugify
import datetime
class PostsActivosManager(models.Manager):
def get_query_set(self):
return super(PostsActivosManager, self).get_query_set().filter(estado=self.model.ACTIVO)
class Categoria(models.Model):
nombre = models.CharField(max_length=50, unique=True)
slug= models.SlugField(unique=True, help_text="Valor generado automaticamente segun el nombre")
descripcion = models.TextField()
def __unicode__(self):
return 'Categoria: %s Descripcion: %s' % (self.nombre, self.descripcion)
@models.permalink
def get_absolute_url(self):
return ('categoria_detalle'),(),{'slug': self.slug}
def posts_activos(self):
return self.post_set.filter(estado=Post.ACTIVO)
class Meta:
ordering = ["nombre"]
verbose_name_plural= "Categorias"
class Post(models.Model):
ACTIVO= 1
BORRADOR= 2
OCULTO= 3
ESTADOS_CHOICES=(
(ACTIVO, 'Activo'),
(BORRADOR, 'Borrador'),
(OCULTO, 'Oculto'),
)
autor = models.ForeignKey(User)
titulo= models.CharField(max_length=50,unique=True)
contenido = models.TextField()
creado = models.DateTimeField(editable=False)
modificado= models.DateTimeField(editable=False)
contenido_html= models.TextField(editable=False,blank=True)
activa_comentarios= models.BooleanField(default=True)
slug= models.SlugField(unique=True,editable=False)
estado= models.IntegerField(choices=ESTADOS_CHOICES, default=ACTIVO)
categoria= models.ForeignKey(Categoria)
objects= models.Manager()
activo= PostsActivosManager()
def __unicode__(self):
return 'Titulo: %s Estado: %s' % (self.titulo, self.estado)
def save(self, force_insert=False, force_update= False):
self.contenido_html= markdown(self.contenido)
if not self.id:
self.slug= slugify(self.titulo)
self.creado= datetime.datetime.today()
self.modificado= datetime.datetime.today()
super(Post,self).save(force_insert, force_update)
class Meta:
ordering = ["creado"]
@models.permalink
def get_absolute_url(self):
return ('blog.views.ver_Post', [str(self.slug)])
class Tag(models.Model):
etiqueta = models.CharField(max_length=50)
def __unicode__(self):
return 'Etiqueta: %s' % (self.etiqueta)
class Meta:
ordering = ["etiqueta"]
class Post_Tag(models.Model):
post = models.ForeignKey(Post)
tag= models.ForeignKey(Tag)
def __unicode__(self):
return 'Post: %s ' % (self.post)
| {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,748 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /urls.py | from django.conf.urls.defaults import patterns, include, url
from django.contrib.auth.forms import AuthenticationForm
from django.contrib import admin
from blog.feeds import LatestEntriesFeed,CategoryFeed
from blog.Sitemap import all_sitemaps as sitemaps
admin.autodiscover()
handler500 = 'djangotoolbox.errorviews.server_error'
feeds = {
'ultimos': LatestEntriesFeed,
'categorias': CategoryFeed,
}
urlpatterns = patterns('',
(r'^$', 'django.views.generic.simple.redirect_to', {'url': '/blog/', }),
(r'^blog/', include('blog.urls')),
(r'^accounts/create_user/$', 'blog.views.create_new_user'),
(r'^accounts/login/$', 'django.contrib.auth.views.login',
{'authentication_form': AuthenticationForm,
'template_name': 'blog/login.html',
}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout',
{'next_page': '/blog/',}),
(r'^admin/', include(admin.site.urls)),
(r'^markdown/preview/$', 'blog.views.mark'),
(r'^comments/', include('django.contrib.comments.urls')),
(r'^feeds/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', {'feed_dict': feeds}),
(r'^sitemap.xml$', 'django.contrib.sitemaps.views.index', {'sitemaps': sitemaps}),
(r'^sitemap-(?P<section>.+)\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
)
| {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,749 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /blog/views.py | from django.contrib.auth.forms import UserCreationForm
from django.views.generic.simple import direct_to_template
from django.http import HttpResponseRedirect,HttpResponse
from blog.forms import *
from blog.models import *
from django.shortcuts import get_object_or_404
from django.views.generic.list_detail import object_list
import logging
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from markdown import markdown
from django.forms.models import formset_factory,inlineformset_factory
from django.core.paginator import Paginator , EmptyPage, PageNotAnInteger
from django.utils.datetime_safe import datetime
from django.db.models import Count
from django.views.decorators.cache import cache_page
logger = logging.getLogger(__name__)
def mark(request):
logger.debug('Hemos entrado en mi vista: markdown')
data= request.POST.get('data')
return HttpResponse(markdown(data))
@cache_page(60 * 1)
def index_Posts(request, pagina):
logger.debug('Hemos entrado en mi_vista: index_Posts')
cat=Categoria.objects.all()
tag= Tag.objects.all()
tam= tag.count()
logger.debug('Tamanyo: '+ str(tam))
tags= dict([(t.etiqueta,int(tam/t.post_tag_set.all().count()) ) for t in tag])
logger.debug(' Nube Tags: '+str(tags))
pagina= int(pagina)
inicio= 5*(pagina-1)
fin= (5*pagina)
cantidad= Post.activo.all().count()
logger.debug('Inicio: '+ str(inicio)+ ' Fin: '+ str(fin)+' Pagina: '+ str(pagina)+'Cantidad : '+ str(cantidad))
Posts = Post.activo.all().order_by('-creado')[inicio:fin]
logger.debug(Posts)
atras=pagina-1 if inicio>1 else pagina
sig=pagina+1 if fin<cantidad else pagina
return direct_to_template(request, 'blog/index.html',
{'Posts': Posts,
'atras': atras,
'sig':sig,
'pag':pagina,
'cat':cat,
'tags':tags})
@login_required( login_url='/accounts/login/')
@staff_member_required
def lista_Posts(request):
logger.debug('Hemos entrado en mi_vista: lista_Posts')
Posts = Post.objects.all().order_by('-creado')
logger.debug(Posts)
paginator= Paginator (Posts,5)
page= request.GET.get('page')
if page is None or page=='':
page=1
try:
Posts= paginator.page(int(page))
except PageNotAnInteger:
Posts= paginator.page(1)
except EmptyPage:
Posts= paginator.page(paginator.num_pages)
return direct_to_template(request, 'blog/post/lista_posts.html',
{'Posts': Posts})
@login_required( login_url='/accounts/login/')
@staff_member_required
def crear_Post(request):
logger.debug('Hemos entrado en mi_vista: crear_Post')
pformset= PostForm
tformset= formset_factory( TagForm, extra=4,can_delete=False)
if request.method == 'POST':
formset= pformset(request.POST,request.FILES,prefix='post')
tagformset= tformset(request.POST, request.FILES,prefix='tag')
logger.debug('Formulario Crear Valido...')
if formset.is_valid() :
post=formset.save(commit=False)
post.autor= request.user
post.save()
logger.debug('Post: '+ str(post.id))
for tag in tagformset:
t=tag.save(commit=False)
if not (t.etiqueta is None or t.etiqueta==''):
try:
t= Tag.objects.get(etiqueta=t.etiqueta)
except Tag.DoesNotExist:
t.save()
logger.debug(' Tag id: '+str(t.id))
Post_Tag.objects.create(post=post,tag=t)
cantidad= Post.activo.all().count()/5
return HttpResponseRedirect('/blog/')
else:
formset= pformset(prefix='post')
tagformset = tformset(prefix='tag')
return direct_to_template(request, 'blog/post/post_form.html',
{'form': formset,
'tform': tagformset})
def ver_Post(request, slug):
logger.debug('Hemos entrado en mi_vista: ver_Post')
P= Post.objects.get(slug=slug)
return direct_to_template(request,'blog/post/ver_post.html',
{'Post' : P })
@login_required( login_url='/accounts/login/')
@staff_member_required
def modificar_Post(request, slug):
logger.debug('Hemos entrado en mi_vista: modificar_Post')
inlineForm= inlineformset_factory(Post, Tag, extra=2,can_delete=False)
p= Post.objects.select_related().get(slug=slug)
if request.method == 'POST':
formset= PostForm(request.POST,instance=p)
tagformset= inlineForm(request.POST, request.FILES, instance=p)
if formset.is_valid() and tagformset.is_valid():
logger.debug('Formulario Modificar Valido...')
post=formset.save(commit=False)
post.autor= request.user
post.save()
for tag in tagformset:
t=tag.save(commit=False)
if not (t.etiqueta is None or t.etiqueta==''):
t.post= post
t.save(True, True)
return HttpResponseRedirect('/blog/')
else:
formset= PostForm(instance=p)
tagformset = inlineForm(instance=p)
return direct_to_template(request, 'blog/post/post_form.html',
{'form': formset,
'tform': tagformset})
@login_required( login_url='/accounts/login/')
@staff_member_required
def borrar_Post(request, slug):
logger.debug('Hemos entrado en mi vista: borrar_Post')
Post.objects.select_related().get(slug=slug).delete()
return HttpResponseRedirect('/blog/')
@login_required( login_url='/accounts/login/')
def create_new_user(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
# user must be active for login to work
user.is_active = True
user.save()
return HttpResponseRedirect('/blog/')
else:
form = UserCreationForm()
return direct_to_template(request, 'blog/user_create_form.html',
{'form': form})
@cache_page(60 * 1)
def categoria_detalle(request, slug):
categoria = get_object_or_404(Categoria, slug=slug)
return object_list(request, queryset=categoria.posts_activos(), extra_context={
'categoria': categoria
})
@cache_page(60 * 1)
def lista_Post_Tag(request, tag, pagina):
logger.debug('Hemos entrado en mi_vista: index_Posts por Tag')
cat=Categoria.objects.all()
t2= Tag.objects.all()
tam= t2.count()
logger.debug('Tamanyo: '+ str(tam))
tags= dict([(t.etiqueta,int(tam/t.post_tag_set.all().count()) ) for t in t2])
logger.debug(' Nube Tags: '+str(tags))
pagina= int(pagina)
inicio= 5*(pagina-1)
fin= (5*pagina)
t3= t2.get(etiqueta=tag)
cantidad= t3.post_tag_set.all().count()
logger.debug('Inicio: '+ str(inicio)+ ' Fin: '+ str(fin)+' Pagina: '+ str(pagina)+'Cantidad : '+ str(cantidad))
Posts = t3.post_tag_set.all().order_by('-creado')[inicio:fin]
logger.debug(Posts)
atras=pagina-1 if inicio>1 else pagina
sig=pagina+1 if fin<cantidad else pagina
return direct_to_template(request, 'blog/indextag.html',
{'Posts': Posts,
'atras': atras,
'sig':sig,
'pag':pagina,
'cat':cat,
'tags':tags})
def lista_Post_Categoria(request, catg, pagina):
logger.debug('Hemos entrado en mi_vista: index_Posts por Categoria')
cat=Categoria.objects.all()
t2= Tag.objects.all()
tam= t2.count()
logger.debug('Tamanyo: '+ str(tam))
tags= dict([(t.etiqueta,int(tam/t.post_tag_set.all().count()) ) for t in t2])
logger.debug(' Nube Tags: '+str(tags))
pagina= int(pagina)
inicio= 5*(pagina-1)
fin= (5*pagina)
c3= cat.get(nombre=catg)
cantidad= c3.post_set.all().count()
logger.debug('Inicio: '+ str(inicio)+ ' Fin: '+ str(fin)+' Pagina: '+ str(pagina)+'Cantidad : '+ str(cantidad))
Posts = c3.post_set.all().order_by('-creado')[inicio:fin]
logger.debug(Posts)
atras=pagina-1 if inicio>1 else pagina
sig=pagina+1 if fin<cantidad else pagina
return direct_to_template(request, 'blog/indexcat.html',
{'Posts': Posts,
'atras': atras,
'sig':sig,
'pag':pagina,
'cat':cat,
'tags':tags})
| {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,750 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /blog/urls.py | from django.conf.urls.defaults import *
from blog.models import *
from blog.views import *
urlpatterns = patterns('blog.views',
(r'^(?P<slug>[-\w]+)/$', categoria_detalle, {}, 'categoria_detalle'),
(r'^$', index_Posts, { 'pagina':1}),
(r'^post/pagina/(?P<pagina>\d{0,3})$', index_Posts),
(r'^post/listpost/$',lista_Posts),
(r'^post/verpost/(?P<slug>[-\w]+)/$',ver_Post),
(r'^post/crearpost/$', crear_Post),
(r'^post/modifpost/(?P<slug>[-\w]+)/$',modificar_Post),
(r'^post/borrarpost/(?P<slug>[-\w]+)/$',borrar_Post),
(r'^tag/(?P<tag>[\w]+)/pagina/(?P<pagina>\d{0,3})/$',lista_Post_Tag),
(r'^cat/(?P<catg>[\w]+)/pagina/(?P<pagina>\d{0,3})/$',lista_Post_Categoria),
) | {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,751 | Rubentxu/RubentxuDjangoPy | refs/heads/master | /blog/tests.py | from django.test import TestCase
from blog.models import Post
class SimpleTest(TestCase):
def setUp(self):
Post(contenido='This is a test greeting').save()
def test_setup(self):
self.assertEqual(1, len(Post.objects.all()))
self.assertEqual('This is a test greeting', Post.objects.all()[0].content)
| {"/blog/forms.py": ["/blog/models.py"], "/blog/Sitemap.py": ["/blog/models.py"], "/blog/feeds.py": ["/blog/models.py"], "/urls.py": ["/blog/feeds.py", "/blog/Sitemap.py"], "/blog/views.py": ["/blog/forms.py", "/blog/models.py"], "/blog/urls.py": ["/blog/models.py", "/blog/views.py"], "/blog/tests.py": ["/blog/models.py"]} |
48,756 | monocongo/openimages | refs/heads/master | /src/openimages/download_segmentations.py | import os
import zipfile
from typing import Dict
from helpers import download_file
def download_segmentation_zipfiles(base_url: str, section: str, meta_dir: str):
"""
Downloads segmentation mask archive files from OpenImages dataset.
:param base_url: OpenImages URL location
:param section: split section (train, validation, or test) for which to
download the archives
:param meta_dir: directory which we should download the archive files into
"""
# make the metadata directory if it doesn't exist
if meta_dir is not None:
os.makedirs(meta_dir, exist_ok=True)
for i in range(16):
bin = format(i, "x")
mask_filename = _construct_archive_filename(section, bin)
url = f"{base_url}{section}-masks/{mask_filename}"
dest_path = f"{meta_dir}/{mask_filename}"
if not os.path.exists(dest_path):
try:
download_file(url, dest_path)
except ValueError as e:
raise ValueError(
f"Failed to get segmentation mask archive (bin {bin}) for split section {section}.",
e,
)
def extract_segmentation_mask(arguments: Dict):
"""
Extracts segmentation mask images from previously downloaded archive files.
:param arguments: dictionary containing the following arguments:
handle_map: dictionary with mask archive filenames as keys and
associated zipfile objects as values
section: split section (train, validation, or test) to which the
mask belongs
mask_filename: name of the mask image file
dest_file_path: path to where the mask image will be extracted
"""
archive_filename = _construct_archive_filename(arguments["section"], arguments["mask_filename"][0])
zf_handle = arguments["handle_map"][archive_filename]
zf_handle.extract(arguments["mask_filename"], arguments["dest_file_path"])
def open_segmentation_zipfiles(section: str, meta_dir: str) -> Dict:
"""
Opens the segmentation mask archive files (as downloaded by
`download_segmentation_zipfiles`) and returns a dictionary mapping the
archive filenames to the associated zipfile objects.
:param section: split section (train, validation, or test) for which to
download the archives
:param meta_dir: directory which we should download the archive files into
:return A dictionary mapping segmentation archive filenames to their
associated zipfile objects.
"""
handle_map = {}
for i in range(16):
bin = format(i, "x")
filename = _construct_archive_filename(section, bin)
handle_map[filename] = zipfile.ZipFile(
os.path.join(meta_dir, filename), mode="r")
return handle_map
def close_segmentation_zipfiles(handle_map: Dict):
"""
Closes the zipfile objects in the given handleMap, as opened by `open_segmentation_zipfiles`.
:param handle_map: the dictionary with zipfile handles to close.
"""
for hnd in handle_map.values():
hnd.close()
def _construct_archive_filename(section: str, bin: str):
return f"{section}-masks-{bin}.zip"
| {"/src/openimages/download.py": ["/src/openimages/download_segmentations.py"]} |
48,757 | monocongo/openimages | refs/heads/master | /tests/test_download.py | import logging
import os
import pytest
from openimages import download
# ------------------------------------------------------------------------------
# disable logging messages
logging.disable(logging.CRITICAL)
# ------------------------------------------------------------------------------
@pytest.mark.usefixtures(
"data_dir",
)
def test_download_dataset(
data_dir,
):
"""
Test for the openimages.download.download_dataset() function
:param data_dir: temporary directory into which test files will be loaded
"""
pass
| {"/src/openimages/download.py": ["/src/openimages/download_segmentations.py"]} |
48,758 | monocongo/openimages | refs/heads/master | /tests/conftest.py | import os
from distutils import dir_util
import pytest
@pytest.fixture
def data_dir(tmpdir, request):
"""
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
"""
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, str(tmpdir))
return tmpdir
| {"/src/openimages/download.py": ["/src/openimages/download_segmentations.py"]} |
48,759 | monocongo/openimages | refs/heads/master | /setup.py | import os
from setuptools import setup, find_packages
parent_dir = os.path.dirname(os.path.realpath(__file__))
with open(f"{parent_dir}/README.md", "r") as readme_file:
long_description = readme_file.read()
setup(
name="openimages",
version="0.0.1",
author="James Adams",
author_email="monocongo@gmail.com",
description="Tools for downloading computer vision datasets from Google's OpenImages dataset",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/monocongo/openimages",
python_requires=">=3.6",
provides=[
"openimages",
],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
package_dir={'': 'src'},
packages=find_packages(where='src'),
install_requires=[
"boto3",
"cvdata",
"lxml",
"pandas",
"requests",
"tqdm",
],
entry_points={
"console_scripts": [
"oi_download_dataset=openimages.download:_entrypoint_download_dataset",
"oi_download_images=openimages.download:_entrypoint_download_images",
]
},
)
| {"/src/openimages/download.py": ["/src/openimages/download_segmentations.py"]} |
48,760 | monocongo/openimages | refs/heads/master | /src/helpers.py | import os
import requests
from tqdm import tqdm
# Modified from https://stackoverflow.com/a/37573701.
def download_file(url: str, dest_path: str = None):
"""
Downloads file at the given URL and stores it in dest_path if given, or
returns the contents if dest_path is None (default).
:param url URL to download.
:param dest_path Location to store downloaded file at, or None to return
contents instead.
:return Downloaded file contents if dest_path is None, otherwise None.
"""
response = requests.get(url, allow_redirects=True, stream=True)
if response.status_code != 200:
raise ValueError(
f"Failed to download file from {url}. "
f"-- Invalid response (status code: {response.status_code}).",
)
total_size = int(response.headers.get('content-length', 0))
block_size = 100 * 1024 # 100 Kibibyte
t = tqdm(total=total_size, unit='iB', unit_scale=True,
desc=f"GET {os.path.basename(os.path.normpath(url))}")
if dest_path is not None:
with open(dest_path, 'wb') as f:
for data in response.iter_content(block_size):
t.update(len(data))
f.write(data)
else:
file_contents_blocks = []
for data in response.iter_content(block_size):
t.update(len(data))
file_contents_blocks.append(data)
t.close()
if total_size != 0 and t.n != total_size:
raise ValueError(
f"Download interrupted (received {t.n} of {total_size} bytes)")
return bytes().join(file_contents_blocks) if dest_path is None else None
| {"/src/openimages/download.py": ["/src/openimages/download_segmentations.py"]} |
48,761 | monocongo/openimages | refs/heads/master | /src/openimages/download.py | import argparse
import concurrent.futures
import io
import logging
import os
from typing import Dict, List, Set
import urllib3
import warnings
import boto3
import botocore
import lxml.etree as etree
import pandas as pd
import requests
from tqdm import tqdm
from cvdata.utils import image_dimensions
from helpers import download_file
# define a "public API" and somewhat manage "wild" imports
# (see http://xion.io/post/code/python-all-wild-imports.html)
__all__ = ["download_dataset", "download_images"]
# OpenImages URL locations
_OID_v4 = "https://storage.googleapis.com/openimages/2018_04/"
_OID_v5 = "https://storage.googleapis.com/openimages/v5/"
# ignore the connection pool is full warning messages from boto
warnings.filterwarnings("ignore")
# ------------------------------------------------------------------------------
# set up a basic, global _logger which will write to the console
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
def _class_label_codes(
class_labels: List[str],
meta_dir: str = None,
) -> Dict:
"""
Gets a dictionary that maps a list of OpenImages image class labels to their
corresponding image class label codes.
:param class_labels: image class labels for which we'll find corresponding
OpenImages image class codes
:param meta_dir: directory where we should look for the class descriptions
CSV file, and if not present download it into there for future use
:return: dictionary with the class labels mapped to their corresponding
OpenImages image class codes
"""
classes_csv = "class-descriptions-boxable.csv"
if meta_dir is None:
# get the class descriptions CSV from OpenImages and read into a DataFrame
try:
contents = download_file(_OID_v5 + classes_csv)
except ValueError as e:
raise ValueError("Failed to get class descriptions information.", e)
df_classes = pd.read_csv(io.BytesIO(contents), header=None)
else:
# download the class descriptions CSV file to the specified directory if not present
descriptions_csv_file_path = os.path.join(meta_dir, classes_csv)
if not os.path.exists(descriptions_csv_file_path):
# get the annotations CSV for the section
url = _OID_v5 + classes_csv
try:
download_file(url, descriptions_csv_file_path)
except ValueError as e:
raise ValueError("Failed to get class descriptions information.", e)
df_classes = pd.read_csv(descriptions_csv_file_path, header=None)
# build dictionary of class labels to OpenImages class codes
labels_to_codes = {}
for class_label in class_labels:
labels_to_codes[class_label.lower()] = \
df_classes.loc[[i.lower() == class_label.lower() for i in df_classes[1]]].values[0][0]
# return the labels to OpenImages codes dictionary
return labels_to_codes
# ------------------------------------------------------------------------------
def _class_label_segmentation_codes(
class_labels: List[str],
meta_dir: str = None,
) -> List[str]:
"""
Gets a list of OpenImages image class label codes relevant to segmentation
masks.
:param class_labels: image class labels for which we'll find corresponding
OpenImages image class codes
:param meta_dir: directory where we should look for the class label codes
file, and if not present download it into there for future use
:return: list of OpenImages class label codes
"""
classes_txt = "classes-segmentation.txt"
class_label_codes = []
if meta_dir is None:
# get the class codes text file
try:
contents = download_file(_OID_v5 + classes_txt)
except ValueError as e:
raise ValueError("Failed to get class descriptions information.", e)
class_label_codes = [line for line in contents.splitlines()]
else:
# download the class descriptions CSV file to the specified directory if not present
class_label_codes_file_path = os.path.join(meta_dir, classes_txt)
if not os.path.exists(class_label_codes_file_path):
# get the class label codes
url = _OID_v5 + classes_txt
try:
download_file(url, class_label_codes_file_path)
except ValueError as e:
raise ValueError("Failed to get class descriptions information.", e)
# read the lines into a list
class_label_codes = []
with open(class_label_codes_file_path, "r") as class_label_codes_file:
for line in class_label_codes_file:
class_labels.append(line.strip())
# return the OpenImages class label codes
return class_label_codes
# ------------------------------------------------------------------------------
def download_dataset(
dest_dir: str,
class_labels: List[str],
exclusions_path: str = None,
annotation_format: str = None,
meta_dir: str = None,
limit: int = None,
) -> Dict:
"""
Downloads a dataset of images and annotations for a specified list of
OpenImages image class labels.
:param dest_dir: base directory under which the images and annotations
will be stored
:param class_labels: list of OpenImages class labels we'll download
:param annotation_format: format of annotation files, valid options:
"darknet" (YOLO) and "pascal" (PASCAL VOC)
:param exclusions_path: path to file containing file IDs to exclude from the
dataset (useful if there are files known to be problematic or invalid)
:param meta_dir: directory where we should look for the class descriptions
and annotations CSV files, if these files are not present from a previous
usage then download these files into this directory for future use
:param limit: the maximum number of images per label we should download
:return: dictionary of class labels mapped to dictionaries specifying the
corresponding images and annotations directories for the class
"""
# make the metadata directory if it's specified and doesn't exist
if meta_dir is not None:
os.makedirs(meta_dir, exist_ok=True)
# get the OpenImages image class codes for the specified class labels
label_codes = _class_label_codes(class_labels, meta_dir)
# build the directories for each class label
class_directories = {}
for class_label in label_codes.keys():
# create directory to contain the image files for the class
images_dir = os.path.join(dest_dir, class_label, "images")
os.makedirs(images_dir, exist_ok=True)
class_directories[class_label] = {
"images_dir": images_dir,
}
# create directory to contain the annotation files for the class
if annotation_format is not None:
annotations_dir = os.path.join(dest_dir, class_label, annotation_format)
os.makedirs(annotations_dir, exist_ok=True)
class_directories[class_label]["annotations_dir"] = annotations_dir
# get the IDs of questionable files marked for exclusion
exclusion_ids = None
if exclusions_path is not None:
# read the file IDs from the exclusions file
with open(exclusions_path, "r") as exclusions_file:
exclusion_ids = set([line.rstrip('\n') for line in exclusions_file])
# keep counts of the number of images downloaded for each label
class_labels = list(label_codes.keys())
label_download_counts = {label: 0 for label in class_labels}
# OpenImages is already split into sections so we'll need to loop over each
for split_section in ("train", "validation", "test"):
# get a dictionary of class labels to GroupByDataFrames
# containing bounding box info grouped by image IDs
label_bbox_groups = _group_bounding_boxes(split_section, label_codes, exclusion_ids, meta_dir)
for label_index, class_label in enumerate(class_labels):
# get the bounding boxes grouped by image and the collection of image IDs
bbox_groups = label_bbox_groups[class_label]
image_ids = bbox_groups.groups.keys()
# limit the number of images we'll download, if specified
if limit is not None:
remaining = limit - label_download_counts[class_label]
if remaining <= 0:
break
elif remaining < len(image_ids):
image_ids = list(image_ids)[0:remaining]
# download the images
_logger.info(
f"Downloading {len(image_ids)} {split_section} images "
f"for class \'{class_label}\'",
)
_download_images_by_id(
image_ids,
split_section,
class_directories[class_label]["images_dir"],
)
# update the downloaded images count for this label
label_download_counts[class_label] += len(image_ids)
# build the annotations
if annotation_format is not None:
_logger.info(
f"Creating {len(image_ids)} {split_section} annotations "
f"({annotation_format}) for class \'{class_label}\'",
)
_build_annotations(
annotation_format,
image_ids,
bbox_groups,
class_labels,
label_index,
class_directories[class_label]["images_dir"],
class_directories[class_label]["annotations_dir"],
)
if annotation_format == "darknet":
# write the class labels to a names file to allow
# for indexing the Darknet label numbers
darknet_object_names = os.path.join(dest_dir, "darknet_obj_names.txt")
with open(darknet_object_names, "w") as darknet_obj_names_file:
for label in class_labels:
darknet_obj_names_file.write(f"{label}\n")
return class_directories
# ------------------------------------------------------------------------------
def download_segmentation_dataset(
dest_dir: str,
class_labels: List[str],
exclusions_path: str = None,
annotation_format: str = None,
meta_dir: str = None,
limit: int = None,
) -> Dict:
"""
Downloads a dataset of images, bounding boxes, and segmentation annotations
for a specified list of OpenImages image class labels.
:param dest_dir: base directory under which the images and annotations
will be stored
:param class_labels: list of OpenImages class labels we'll download
:param annotation_format: format of annotation files, valid options:
"darknet" (YOLO) and "pascal" (PASCAL VOC)
:param exclusions_path: path to file containing file IDs to exclude from the
dataset (useful if there are files known to be problematic or invalid)
:param meta_dir: directory where we should look for the class descriptions
and annotations CSV files, if these files are not present from a previous
usage then download these files into this directory for future use
:param limit: the maximum number of images per label we should download
:return: dictionary of class labels mapped to dictionaries specifying the
corresponding images and annotations directories for the class
"""
if meta_dir is None:
raise ValueError("Downloading segmentations requires meta_dir to be specified")
# make the metadata directory if it doesn't exist
os.makedirs(meta_dir, exist_ok=True)
# get the OpenImages image class codes for the specified class labels
label_codes = _class_label_codes(class_labels, meta_dir)
# build the directories for each class label
class_directories = {}
for class_label in label_codes.keys():
# create directory to contain the image files for the class
images_dir = os.path.join(dest_dir, class_label, "images")
os.makedirs(images_dir, exist_ok=True)
class_directories[class_label] = {
"images_dir": images_dir,
}
# create directory to contain the segmentation files for the class
segmentations_dir = os.path.join(dest_dir, class_label, "segmentations")
os.makedirs(segmentations_dir, exist_ok=True)
class_directories[class_label]["segmentations_dir"] = segmentations_dir
# create directory to contain the annotation files for the class
if annotation_format is not None:
annotations_dir = os.path.join(dest_dir, class_label, annotation_format)
os.makedirs(annotations_dir, exist_ok=True)
class_directories[class_label]["annotations_dir"] = annotations_dir
# get the IDs of questionable files marked for exclusion
exclusion_ids = None
if exclusions_path is not None:
# read the file IDs from the exclusions file
with open(exclusions_path, "r") as exclusions_file:
exclusion_ids = set([line.rstrip('\n') for line in exclusions_file])
# keep counts of the number of images downloaded for each label
class_labels = list(label_codes.keys())
label_download_counts = {label: 0 for label in class_labels}
# OpenImages is already split into sections so we'll need to loop over each
for split_section in ("train", "validation", "test"):
# get a dictionary of class labels to GroupByDataFrames
# containing bounding box info grouped by image IDs
label_bbox_groups = _group_bounding_boxes(split_section, label_codes, exclusion_ids, meta_dir)
label_images_ids = {l: gbdf.groups.keys() for (l, gbdf) in label_bbox_groups.items()}
label_segment_groups = _group_segments(split_section, label_codes, label_images_ids, meta_dir)
for label_index, class_label in enumerate(class_labels):
# get the bounding boxes and segmentation masks
# grouped by image and the collection of image IDs
bbox_groups = label_bbox_groups[class_label]
segmentation_groups = label_segment_groups[class_label]
image_ids = label_images_ids[class_label]
# limit the number of images we'll download, if specified
if limit is not None:
remaining = limit - label_download_counts[class_label]
if remaining <= 0:
break
elif remaining < len(image_ids):
image_ids = list(image_ids)[0:remaining]
# download the images and masks
_logger.info(
f"Downloading {len(image_ids)} {split_section} images and accompanying segmentation masks "
f"for class \'{class_label}\'",
)
_download_images_by_id(
image_ids,
split_section,
class_directories[class_label]["images_dir"],
)
_download_segmentations_by_image_id(
segmentation_groups,
split_section,
os.path.join(meta_dir, "segmentation-zips"),
class_directories[class_label]["segmentations_dir"],
)
# update the downloaded images count for this label
label_download_counts[class_label] += len(image_ids)
# build the annotations
if annotation_format is not None:
_logger.info(
f"Creating {len(image_ids)} {split_section} annotations "
f"({annotation_format}) for class \'{class_label}\'",
)
_build_annotations(
annotation_format,
image_ids,
bbox_groups,
class_labels,
label_index,
class_directories[class_label]["images_dir"],
class_directories[class_label]["annotations_dir"],
True,
)
if annotation_format == "darknet":
# write the class labels to a names file to allow
# for indexing the Darknet label numbers
darknet_object_names = os.path.join(dest_dir, "darknet_obj_names.txt")
with open(darknet_object_names, "w") as darknet_obj_names_file:
for label in class_labels:
darknet_obj_names_file.write(f"{label}\n")
return class_directories
# ------------------------------------------------------------------------------
def download_images(
dest_dir: str,
class_labels: List[str],
exclusions_path: str = None,
meta_dir: str = None,
limit: int = None,
) -> Dict:
"""
Downloads a dataset of images for a specified list of OpenImages image classes.
:param dest_dir: base directory under which the images and annotations
will be stored
:param class_labels: list of OpenImages class labels we'll download
:param exclusions_path: path to file containing file IDs to exclude from the
dataset (useful if there are files known to be problematic or invalid)
:param meta_dir: directory where we should look for the class descriptions
and annotations CSV files, if these files are not present from a previous
usage then download these files into this directory for future use
:param limit: the maximum number of images per label we should download
:return: dictionary of the images directory directory for each class label,
for example: {"dog": "/data/oi/dog/images", "cat": "/data/oi/cat/images"}
"""
image_directories = download_dataset(
dest_dir,
class_labels,
exclusions_path,
None,
meta_dir,
limit,
)
# collapse a level of the returned distionary so we're able to return
# a dictionary that just maps the class label to images directory
return {label: dirs_dict["images_dir"] for label, dirs_dict in image_directories.items()}
# ------------------------------------------------------------------------------
def _download_images_by_id(
image_ids: List[str],
section: str,
images_directory: str,
):
"""
Downloads a collection of images from OpenImages dataset.
:param image_ids: list of image IDs to download
:param section: split section (train, validation, or test) where the image
should be found
:param images_directory: destination directory where the image files are to
be written
"""
# we'll download the images from AWS S3 so we'll need a boto S3 client
s3_client = boto3.client(
's3',
config=botocore.config.Config(signature_version=botocore.UNSIGNED),
)
# create an iterable list of function arguments
# that we'll map to the download function
download_args_list = []
for image_id in image_ids:
image_file_name = image_id + ".jpg"
download_args = {
"s3_client": s3_client,
"image_file_object_path": section + "/" + image_file_name,
"dest_file_path": os.path.join(images_directory, image_file_name),
}
download_args_list.append(download_args)
# use a ThreadPoolExecutor to download the images in parallel
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
# use the executor to map the download function to the iterable of arguments
list(tqdm(executor.map(_download_single_image, download_args_list),
total=len(download_args_list), desc="Downloading images"))
# ------------------------------------------------------------------------------
def _download_segmentations_by_image_id(
mask_data: pd.core.groupby.DataFrameGroupBy,
section: str,
segmentation_meta_dir: str,
segmentations_directory: str,
):
"""
Downloads image segmentation masks from OpenImages dataset.
:param image_ids: list of image IDs for which to download segmentation masks
:param section: split section (train, validation, or test) where the image
should be found
:param mask_data: annotation data for the segmentation masks
:param segmentations_directory: destination directory where the mask files
are to be written
"""
from .download_segmentations import download_segmentation_zipfiles, extract_segmentation_mask, open_segmentation_zipfiles, close_segmentation_zipfiles
download_segmentation_zipfiles(_OID_v5, section, segmentation_meta_dir)
_logger.info(f"Opening segmentation mask zip files")
handle_map = open_segmentation_zipfiles(section, segmentation_meta_dir)
# create an iterable list of function arguments
# that we'll map to the download function
download_args_list = []
for image_id in mask_data.groups.keys():
masks = mask_data.get_group(image_id)['MaskPath'].values.tolist()
for mask_name in masks:
download_args = {
"handle_map": handle_map,
"section": section,
"mask_filename": mask_name,
"dest_file_path": segmentations_directory,
}
download_args_list.append(download_args)
# Use a ThreadPoolExecutor to extract the images in parallel.
#
# Note: max_workers set to 1 since any actual parallelism causes a crash due
# decompression errors, probably related to the shared archive handles.
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
# use the executor to map the extraction function to the iterable of arguments
list(tqdm(executor.map(extract_segmentation_mask, download_args_list),
total=len(download_args_list), desc="Extracting mask images"))
close_segmentation_zipfiles(handle_map)
# ------------------------------------------------------------------------------
def _build_annotations(
annotation_format: str,
image_ids: List[str],
bbox_groups: pd.core.groupby.DataFrameGroupBy,
class_labels: List[str],
class_index: int,
images_directory: str,
annotations_directory: str,
include_segmentation_masks: bool = False,
):
"""
Builds and saves annotations for a collection of images.
:param annotation_format:
:param image_ids:
:param bbox_groups:
:param class_labels:
:param class_index:
:param images_directory: directory where the image files should be located
:param annotations_directory: destination directory where the annotation
files are to be written
:param include_segmentation_masks:
"""
# create an iterable list of function arguments
# that we'll map to the annotation builder function
build_args_list = []
for image_id in image_ids:
# get all bounding boxes in the image for the label
bboxes = bbox_groups.get_group(image_id)[['XMin', 'XMax', 'YMin', 'YMax']].values.tolist()
# build a dictionary of arguments for the _build_annotation function
# that will be called by one of the process pool's worker processes
build_args = {
"annotation_format": annotation_format,
"bboxes": bboxes,
"image_id": image_id,
"images_dir": images_directory,
"annotations_dir": annotations_directory,
"include_segmentation_masks": include_segmentation_masks,
}
if annotation_format == "pascal":
build_args["class_label"] = class_labels[class_index]
elif annotation_format == "darknet":
build_args["class_index"] = class_index
else:
raise ValueError(
f"Unsupported annotation format: \"{annotation_format}\"",
)
build_args_list.append(build_args)
# use a ProcessPoolExecutor to download the images in parallel
with concurrent.futures.ProcessPoolExecutor() as executor:
# use the executor to map the build function to the iterable of arguments
list(tqdm(executor.map(_build_annotation, build_args_list),
total=len(build_args_list)))
# ------------------------------------------------------------------------------
def _build_annotation(arguments: Dict):
"""
Builds and saves an annotation file for an image.
:param arguments: dictionary containing the following arguments:
"bboxes": a list of bounding box lists with four elements: [xmin, ymin,
xmax, ymax]
"class_labels": list of image class labels (categories)
"image_id": OpenImages image ID
"images_dir": directory containing the image
"annotations_dir": destination directory where the annotation file
should be written
"""
if arguments["annotation_format"] == "pascal":
# write a PASCAL VOC file for this image
# using all bounding boxes in the image's group
_write_bboxes_as_pascal(
arguments["bboxes"],
arguments["class_label"],
arguments["image_id"],
arguments["images_dir"],
arguments["annotations_dir"],
# arguments["include_segmentation_masks"],
)
elif arguments["annotation_format"] == "darknet":
# write a Darknet annotation file for this image
# using all bounding boxes in the image's group
_write_bboxes_as_darknet(
arguments["bboxes"],
arguments["class_index"],
arguments["image_id"],
arguments["images_dir"],
arguments["annotations_dir"],
)
# elif arguments["annotation_format"] == "kitti":
# # TODO
# pass
else:
raise ValueError(
f"Unsupported annotation format: \"{arguments['annotation_format']}\"",
)
# ------------------------------------------------------------------------------
def _get_annotations_csv(
split_section: str,
) -> str:
"""
Requests the annotations CSV for a split section.
:param split_section:
:return: the CSV payload
"""
# get the annotations CSV for the section
url = _OID_v4 + split_section + "/" + split_section + "-annotations-bbox.csv"
try:
contents = download_file(url)
except ValueError as e:
raise ValueError(
f"Failed to get bounding box information for split section {split_section}.", e)
return contents
# ------------------------------------------------------------------------------
def _get_segmentations_csv(
split_section: str,
) -> str:
"""
Requests the segmentations CSV for a split section.
:param split_section:
:return: the CSV payload
"""
# get the annotations CSV for the section
url = _OID_v5 + split_section + "-annotations-object-segmentation.csv"
try:
contents = download_file(url)
except ValueError as e:
raise ValueError(
f"Failed to get bounding box information for split section {split_section} ", e)
return contents
# ------------------------------------------------------------------------------
def _group_bounding_boxes(
section: str,
label_codes: Dict,
exclusion_ids: Set[str],
meta_dir: str = None,
) -> pd.core.groupby.DataFrameGroupBy:
"""
Gets a pandas DataFrameGroupBy object containing bounding boxes for an image
class grouped by image ID.
:param section: the relevant split section, "train", "validation", or "test"
:param label_codes: dictionary with class labels mapped to the
corresponding OpenImages-specific code of the image class
:param exclusion_ids: file IDs that should be excluded
:param meta_dir: directory where the annotations CSV should be located,
if not present it will be downloaded and stored here for future use
:return: DataFrameGroupBy object with bounding box columns grouped by image IDs
"""
_logger.info(f"Reading bounding box data")
if meta_dir is None:
# get the annotations CSV for the section
contents = _get_annotations_csv(section)
# read the CSV into a pandas DataFrame
df_images = pd.read_csv(io.BytesIO(contents))
else:
# download the annotations CSV file to the specified directory if not present
bbox_csv_file_path = os.path.join(meta_dir, section + "-annotations-bbox.csv")
if not os.path.exists(bbox_csv_file_path):
# get the annotations CSV for the section
contents = _get_annotations_csv(section)
with open(bbox_csv_file_path, "wb") as annotations_file:
annotations_file.write(contents)
# read the CSV into a pandas DataFrame
df_images = pd.read_csv(bbox_csv_file_path)
# remove any rows which are identified to be excluded
if exclusion_ids and (len(exclusion_ids) > 0):
df_images = df_images[~df_images["ImageID"].isin(exclusion_ids)]
# filter out images that are occluded, truncated, group, depiction, inside, etc.
for reject_field in ("IsOccluded", "IsTruncated", "IsGroupOf", "IsDepiction", "IsInside"):
df_images = df_images[df_images[reject_field] == 0]
# drop the columns we won't need, keeping only
# the image ID, label name and bounding box columns
unnecessary_columns = [
"IsOccluded",
"IsTruncated",
"IsGroupOf",
"IsDepiction",
"IsInside",
"Source",
"Confidence",
]
df_images.drop(unnecessary_columns, axis=1, inplace=True)
# create a dictionary and populate it with class labels mapped to
# GroupByDataFrame objects with bounding boxes grouped by image ID
labels_to_bounding_box_groups = {}
for class_label, class_code in label_codes.items():
# filter the DataFrame down to just the images for the class label
df_label_images = df_images[df_images["LabelName"] == class_code]
# drop the label name column since it's no longer needed
df_label_images.drop(["LabelName"], axis=1, inplace=True)
# map the class label to a GroupBy object with each
# group's row containing the bounding box columns
labels_to_bounding_box_groups[class_label] = \
df_label_images.groupby(df_images["ImageID"])
# return the dictionary we've created
return labels_to_bounding_box_groups
# ------------------------------------------------------------------------------
def _group_segments(
section: str,
label_codes: Dict,
label_image_ids: Dict,
meta_dir: str = None,
) -> pd.core.groupby.DataFrameGroupBy:
"""
Gets a pandas DataFrameGroupBy object containing segmentations for an image
class grouped by image ID.
Instead of allowing exclusions, this function accepts a list of image IDs to
include. This is because the bounding box attribute information (IsOccluded
etc.) is only available when reading that file. This construction allows
using the list of bbox images to control for which images masks are fetched.
:param section: the relevant split section, "train", "validation", or "test"
:param label_codes: dictionary with class labels mapped to the
corresponding OpenImages-specific code of the image class
:param label_image_ids: dictionary with class labels mapped to the image IDs
for which to download segmentation masks
:param meta_dir: directory where the segmentations CSV should be located,
if not present it will be downloaded and stored here for future use
:return: DataFrameGroupBy object with bounding box columns grouped by image IDs
"""
_logger.info(f"Reading segmentation mask data")
if meta_dir is None:
# get the annotations CSV for the section
contents = _get_segmentations_csv(section)
# read the CSV into a pandas DataFrame
df_images = pd.read_csv(io.BytesIO(contents))
else:
# download the annotations CSV file to the specified directory if not present
bbox_csv_file_path = os.path.join(meta_dir, section + "-annotations-object-segmentation.csv")
if not os.path.exists(bbox_csv_file_path):
# get the annotations CSV for the section
contents = _get_segmentations_csv(section)
with open(bbox_csv_file_path, "wb") as annotations_file:
annotations_file.write(contents)
# read the CSV into a pandas DataFrame
df_images = pd.read_csv(bbox_csv_file_path)
# create a dictionary and populate it with class labels mapped to
# GroupByDataFrame objects with segmentation data grouped by image ID
labels_to_bounding_box_groups = {}
for class_label, class_code in label_codes.items():
# filter the DataFrame down to just the images for the class label
df_label_images = df_images[df_images["LabelName"] == class_code]
# keep only masks for images we are asked to select
image_ids = label_image_ids[class_label]
df_label_images = df_label_images[df_label_images["ImageID"].isin(image_ids)]
# drop the label name column since it's no longer needed
df_label_images.drop(["LabelName"], axis=1, inplace=True)
# map the class label to a GroupBy object with each
# group's row containing the bounding box columns
labels_to_bounding_box_groups[class_label] = \
df_label_images.groupby(df_label_images["ImageID"])
# return the dictionary we've created
return labels_to_bounding_box_groups
# ------------------------------------------------------------------------------
def _write_bboxes_as_darknet(
bboxes: List[List[float]],
label_index: int,
image_id: str,
images_dir: str,
darknet_dir: str,
) -> str:
"""
Writes a Darknet annotation file containing the bounding boxes for an image.
:param bboxes: iterable of lists of bounding box coordinates [xmin, xmax,
ymin, ymax]
:param label_index: class label index
:param image_id: image ID (should be the image's file name minus the file
extension ".jpg")
:param images_dir: directory where the image file is located
:param darknet_dir: directory where the PASCAL file should be written
:return: path to the Darknet annotation file
"""
# get the images' dimensions
image_file_path = os.path.join(images_dir, image_id + ".jpg")
image_width, image_height, _ = image_dimensions(image_file_path)
# open the annotation file for writing bounding boxes one per line
darknet_file_path = os.path.join(darknet_dir, image_id + ".txt")
if os.path.exists(darknet_file_path):
# an annotation file already exists for this image so append to it
open_mode = "+a"
else:
# no annotation file exists yet for this image so create it
open_mode = "+w"
with open(darknet_file_path, open_mode) as darknet_file:
# for each bounding box get the corresponding center x and y
# as well as the bounding box's width and height in terms of
# a decimal fraction of the total image dimension
for bbox in bboxes:
# get the label index based on the annotation's object name
# find the bounding box's center X and Y, and width/height
bbox_min_x, bbox_max_x, bbox_min_y, bbox_max_y = bbox
bbox_width = (bbox_max_x - bbox_min_x) * image_width
bbox_height = (bbox_max_y - bbox_min_y) * image_height
bbox_width_fraction = bbox_width / image_width
bbox_height_fraction = bbox_height / image_height
bbox_center_x = (bbox_min_x * image_width) + (bbox_width / 2)
bbox_center_y = (bbox_min_y * image_height) + (bbox_height / 2)
bbox_center_fraction_x = bbox_center_x / image_width
bbox_height_fraction_y = bbox_center_y / image_height
# make sure we haven't overshot too much, if not then clip
if bbox_width_fraction > 1.0:
if (bbox_width_fraction - 1.0) > 0.025:
# we have a significant overshoot, something's off and
# we probably can't fix it without looking into the issue
# further so report it via the logger and skip
_logger.warning(
"Creation of Darknet annotation for image "
f"{image_id} results in an invalid (too "
"wide) width fraction",
)
continue
else:
# clip to one
bbox_width_fraction = 1.0
if bbox_width_fraction < 0.0:
if bbox_width_fraction < 0.025:
# we have a significant overshoot, something's off and
# we probably can't fix it without looking into the issue
# further so report it via the logger and skip
_logger.warning(
"Creation of Darknet annotation for image "
f"{image_id} results in an invalid ("
"negative) width fraction -- skipping this box",
)
continue
else:
# clip to zero
bbox_width_fraction = 0.0
if bbox_height_fraction > 1.0:
if (bbox_height_fraction - 1.0) > 0.025:
# we have a significant overshoot, something's off and
# we probably can't fix it without looking into the issue
# further so report it via the logger and skip
_logger.warning(
"Creation of Darknet annotation for image "
f"{image_id} results in an invalid ("
"too tall) height fraction -- skipping this box",
)
continue
else:
# clip to 1.0
bbox_height_fraction = 1.0
if bbox_height_fraction < 0.0:
if bbox_height_fraction < 0.025:
# we have a significant overshoot, something's off and
# we probably can't fix it without looking into the issue
# further so report it via the logger and skip
_logger.warning(
"Creation of Darknet annotation for image "
f"{image_id} results in an invalid ("
"negative) height fraction -- skipping this box",
)
continue
else:
# clip to zero
bbox_height_fraction = 0.0
if (bbox_width < 0.0) or (bbox_height < 0.0):
# something's off and we probably can't fix it without looking
# into the issue further so report it via the logger and skip
_logger.warning(
"Creation of Darknet annotation for image "
f"{image_id} results in an invalid ("
"negative) width or height -- skipping this box",
)
continue
# write the bounding box info into the file
darknet_file.write(
f"{label_index} {bbox_center_fraction_x} "
f"{bbox_height_fraction_y} "
f"{bbox_width_fraction} "
f"{bbox_height_fraction}\n",
)
return darknet_file_path
# ------------------------------------------------------------------------------
def _write_bboxes_as_pascal(
bboxes: List[List[float]],
label: str,
image_id: str,
images_dir: str,
pascal_dir: str,
) -> int:
"""
Writes a PASCAL VOC (XML) annotation file containing the bounding boxes for
an image.
:param bboxes: iterable of lists of bounding box coordinates [xmin, ymin, xmax, ymax]
:param label: class label
:param image_id: ID of the image file (typically the image file name
minus ".jpg" or ".png")
:param images_dir: directory where the image file is located
:param pascal_dir: directory where the PASCAL file should be written
:return: 0 for success, 1 for failure
"""
# get the image dimensions
image_file_name = image_id + ".jpg"
image_path = os.path.join(images_dir, image_file_name)
try:
img_width, img_height, img_depth = image_dimensions(image_path)
except OSError as error:
_logger.warning(
"Unable to create PASCAL annotation for image "
f"{image_file_name} -- skipping",
error
)
return 1
normalized_image_path = os.path.normpath(image_path)
folder_name, image_file_name = normalized_image_path.split(os.path.sep)[-2:]
# TODO
# The below creates a fresh tree in all cases for later writing to the
# annotation XML file. We should instead first see if the annotation file
# already exists and if so then add the annotations (bounding boxes) to
# the existing element tree before we then rewrite the XML file.
annotation = etree.Element('annotation')
folder = etree.SubElement(annotation, "folder")
folder.text = folder_name
filename = etree.SubElement(annotation, "filename")
filename.text = image_file_name
path = etree.SubElement(annotation, "path")
path.text = normalized_image_path
source = etree.SubElement(annotation, "source")
database = etree.SubElement(source, "database")
database.text = "OpenImages"
size = etree.SubElement(annotation, "size")
width = etree.SubElement(size, "width")
width.text = str(img_width)
height = etree.SubElement(size, "height")
height.text = str(img_height)
depth = etree.SubElement(size, "depth")
depth.text = str(img_depth)
segmented = etree.SubElement(annotation, "segmented")
segmented.text = "0"
for bbox in bboxes:
obj = etree.SubElement(annotation, "object")
name = etree.SubElement(obj, "name")
name.text = label
pose = etree.SubElement(obj, "pose")
pose.text = "Unspecified"
truncated = etree.SubElement(obj, "truncated")
truncated.text = "0"
difficult = etree.SubElement(obj, "difficult")
difficult.text = "0"
bndbox = etree.SubElement(obj, "bndbox")
xmin = etree.SubElement(bndbox, "xmin")
xmin.text = str(max(0, int(bbox[0] * img_width)))
xmax = etree.SubElement(bndbox, "xmax")
xmax.text = str(min(img_width - 1, int(bbox[1] * img_width)))
ymin = etree.SubElement(bndbox, "ymin")
ymin.text = str(max(0, int(bbox[2] * img_height)))
ymax = etree.SubElement(bndbox, "ymax")
ymax.text = str(min(img_height - 1, int(bbox[3] * img_height)))
# write the XML to file
pascal_file_path = os.path.join(pascal_dir, image_id + ".xml")
with open(pascal_file_path, 'w') as pascal_file:
pascal_file.write(etree.tostring(annotation, pretty_print=True, encoding='utf-8').decode("utf-8"))
return 0
# ------------------------------------------------------------------------------
def _download_single_image(arguments: Dict):
"""
Downloads and saves an image file from the OpenImages dataset.
:param arguments: dictionary containing the following arguments:
"s3_client": an S3 client object
"image_file_object_path": the S3 object path corresponding to the image
file to be downloaded
"dest_file_path": destination directory where the image file should be
written
"""
if os.path.exists(arguments["dest_file_path"]):
return
try:
with open(arguments["dest_file_path"], "wb") as dest_file:
arguments["s3_client"].download_fileobj(
"open-images-dataset",
arguments["image_file_object_path"],
dest_file,
)
except urllib3.exceptions.ProtocolError as error:
_logger.warning(
f"Unable to download image {arguments['image_file_object_path']} -- skipping",
error,
)
# ------------------------------------------------------------------------------
def _parse_command_line():
# parse the command line arguments
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
"--base_dir",
type=str,
required=True,
help="path to the base output directory",
)
args_parser.add_argument(
"--labels",
type=str,
required=True,
nargs='+',
help="object class to be fetched from OpenImages",
)
args_parser.add_argument(
"--format",
type=str,
required=False,
choices=["darknet", "pascal"],
help="output format: KITTI, PASCAL, Darknet (YOLO), TFRecord, or COCO",
)
args_parser.add_argument(
"--exclusions",
type=str,
required=False,
help="path to file containing file IDs (one per line) to exclude from "
"the final dataset",
)
args_parser.add_argument(
"--meta_dir",
type=str,
required=False,
help="path to a directory where CSV files for the OpenImages dataset "
"metadata (annotations, descriptions, etc.) should be read and/or "
"downloaded into for later use",
)
args_parser.add_argument(
"--limit",
type=int,
required=False,
help="maximum number of images to download per image class/label",
)
args_parser.add_argument(
"--include_segmentation",
default=False,
action='store_true',
help="whether or not to include segmentation annotations",
)
return vars(args_parser.parse_args())
# ------------------------------------------------------------------------------
def _entrypoint_download_dataset():
args = _parse_command_line()
# we must have an annotation format specified
if args["format"] is None:
raise argparse.ArgumentError(None, f"Missing the required '--format' argument")
if args["include_segmentation"]:
download_segmentation_dataset(
args["base_dir"],
args["labels"],
args["exclusions"],
args["format"],
args["meta_dir"],
args["limit"],
)
else:
download_dataset(
args["base_dir"],
args["labels"],
args["exclusions"],
args["format"],
args["meta_dir"],
args["limit"],
)
# ------------------------------------------------------------------------------
def _entrypoint_download_images():
args = _parse_command_line()
# we must not have an annotation format specified
if args["format"] is not None:
raise argparse.ArgumentError(None, "Invalid '--format' argument")
download_images(
args["base_dir"],
args["labels"],
args["exclusions"],
args["meta_dir"],
args["limit"],
)
# ------------------------------------------------------------------------------
if __name__ == "__main__":
"""
Usage:
$ python download.py --base_dir /data/datasets/openimages \
--format pascal --label Person --meta_dir /data/datasets/openimages
"""
_entrypoint_download_dataset()
| {"/src/openimages/download.py": ["/src/openimages/download_segmentations.py"]} |
48,763 | yusiningxin/sniper-pytorch | refs/heads/master | /lib/data_utils/load_data.py | # ---------------------------------------------------------------
# SNIPER: Efficient Multi-scale Training
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified from https://github.com/msracver/Deformable-ConvNets
# Modified by Mahyar Najibi
# ---------------------------------------------------------------
import numpy as np
import os
import cPickle
import gc
from dataset import imdb, coco
def load_gt_roidb(dataset_name, image_set_name, root_path, dataset_path, result_path=None,
flip=False):
""" load ground truth roidb """
imdb = eval(dataset_name)(image_set_name, root_path, dataset_path, result_path)
roidb = imdb.gt_roidb()
if flip:
roidb = imdb.append_flipped_images(roidb)
return roidb
def load_proposal_roidb(dataset_name, image_set_name, root_path, dataset_path, result_path=None,
proposal='rpn', append_gt=True, flip=False,proposal_path='proposals', only_gt=False,
get_imdb=False, load_mask=False):
""" load proposal roidb (append_gt when training) """
imdb = eval(dataset_name)(image_set_name, root_path, dataset_path, result_path,load_mask=load_mask)
roidb = imdb.gt_roidb()
#roidb = roidb[:1000]
if not only_gt:
roidb = eval('imdb.' + proposal + '_roidb')(roidb, append_gt,proposal_path=proposal_path)
else:
# Make sure boxes are converted to float
for r in roidb:
r['boxes'] = r['boxes'].astype(np.float32)
if flip:
roidb = imdb.append_flipped_images(roidb)
if get_imdb:
return roidb, imdb
return roidb
def merge_roidb(roidbs):
""" roidb are list, concat them together """
roidb = roidbs[0]
for r in roidbs[1:]:
roidb.extend(r)
return roidb
def add_chip_data(roidb,chip_meta_data_path=''):
assert os.path.isfile(chip_meta_data_path),'Chip meta data does not exists!'
print('Loading chip meta data from : {}'.format(chip_meta_data_path))
file = open(chip_meta_data_path,'rb')
gc.disable()
chip_meta_data = cPickle.load(file)
gc.enable()
file.close()
gc.collect()
print('Done!')
# print('Pre-computing valid proposals per chip....')
for iroidb, imeta in zip(roidb,chip_meta_data):
for k in imeta:
iroidb[k] = imeta[k]
assert len(chip_meta_data)==len(roidb), 'Length of chip meta data should be the same as roidb'
def remove_small_boxes(roidb,max_scale=3,min_size=10):
remove_counter = 0
total_counter = 0
for iroidb in roidb:
cboxes = iroidb['boxes']*max_scale
widths = cboxes[:,2] - cboxes[:,0] + 1
heights = cboxes[:,3] - cboxes[:,1] + 1
max_sizes = np.maximum(widths,heights)
valid_inds = np.where(max_sizes>=min_size)[0]
total_counter += widths.shape[0]
if valid_inds.shape[0]<widths.shape[0]:
remove_counter+= (widths.shape[0] - valid_inds.shape[0])
iroidb['gt_classes'] = iroidb['gt_classes'][valid_inds]
iroidb['max_classes'] = iroidb['max_classes'][valid_inds]
iroidb['max_overlaps'] = iroidb['max_overlaps'][valid_inds]
iroidb['gt_overlaps'] = iroidb['gt_overlaps'][valid_inds,:]
iroidb['boxes'] = iroidb['boxes'][valid_inds,:]
print('Removed {} small boxes out of {} boxes!'.format(remove_counter,total_counter))
return roidb
def filter_roidb(roidb, config):
""" remove roidb entries without usable rois """
def is_valid(entry):
""" valid images have at least 1 fg or bg roi """
overlaps = entry['max_overlaps']
fg_inds = np.where(overlaps >= config.TRAIN.FG_THRESH)[0]
bg_inds = np.where((overlaps < config.TRAIN.BG_THRESH_HI) & (overlaps >= config.TRAIN.BG_THRESH_LO + 0.0001))[0]
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
num = len(roidb)
filtered_roidb = [entry for entry in roidb if is_valid(entry)]
num_after = len(filtered_roidb)
print 'filtered %d roidb entries: %d -> %d' % (num - num_after, num, num_after)
return filtered_roidb
def load_gt_segdb(dataset_name, image_set_name, root_path, dataset_path, result_path=None,
flip=False):
""" load ground truth segdb """
imdb = eval(dataset_name)(image_set_name, root_path, dataset_path, result_path)
segdb = imdb.gt_segdb()
if flip:
segdb = imdb.append_flipped_images_for_segmentation(segdb)
return segdb
def merge_segdb(segdbs):
""" segdb are list, concat them together """
segdb = segdbs[0]
for r in segdbs[1:]:
segdb.extend(r)
return segdb
| {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,764 | yusiningxin/sniper-pytorch | refs/heads/master | /lib/iterators/PytorchTest.py | import torch.utils.data as data
import numpy as np
from multiprocessing import Pool
from data_utils.data_workers import im_worker
from multiprocessing.pool import ThreadPool
import math
import matplotlib.pyplot as plt
class PytorchTest(data.Dataset):
def __init__(self, roidb, config, test_scale, batch_size=4, threads=8, nGPUs=1, pad_rois_to=400, crop_size=None,num_classes=None):
self.crop_size = crop_size
self.roidb = roidb
self.batch_size = batch_size
self.num_classes = num_classes if num_classes else roidb[0]['gt_overlaps'].shape[1]
self.data_name = ['data', 'im_info', 'im_ids']
self.label_name = None
self.cur_i = 0
self.label = []
self.context_size = 320
self.thread_pool = ThreadPool(threads)
self.im_worker = im_worker(crop_size=None if not self.crop_size else self.crop_size[0], cfg=config, target_size=test_scale)
self.test_scale = test_scale
self.reset()
def __len__(self):
return len(self.inds)
def __getitem__(self, item):
num = item % self.batch_size
if num == 0:
self.get_chip_label_per_batch()
return self.im_tensor_batch[num],self.im_info_batch[num],self.im_ids[num]
def get_chip_label_per_batch(self):
cur_from = self.cur_i
cur_to = self.cur_i + self.batch_size
self.cur_i = self.cur_i +self.batch_size
roidb = [self.roidb[self.inds[i]] for i in range(cur_from, cur_to)]
im_ids = np.array([self.inds[i] for i in range(cur_from, cur_to)])
hor_flag = True if roidb[0]['width']>= roidb[0]['height'] else False
max_size = [self.test_scale[0], self.test_scale[1]] if hor_flag else [self.test_scale[1], self.test_scale[0]]
ims = []
for i in range(self.batch_size):
ims.append([roidb[i]['image'], max_size ,roidb[i]['flipped']])
im_info = np.zeros((self.batch_size, 3))
processed_list = self.thread_pool.map(self.im_worker.worker, ims)
im_tensor = np.zeros((self.batch_size, 3, max_size[0], max_size[1]), dtype=np.float32)
for i,p in enumerate(processed_list):
im_info[i] = [p[2][0], p[2][1], p[1]]
im_tensor[i] = p[0]
self.im_tensor_batch = im_tensor
self.im_info_batch = im_info
self.im_ids = im_ids
def reset(self):
self.cur_i = 0
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz_inds = np.where(widths >= heights)[0]
vert_inds = np.where(widths<heights)[0]
if horz_inds.shape[0]%self.batch_size>0:
extra_horz = self.batch_size - (horz_inds.shape[0] % self.batch_size)
horz_inds = np.hstack((horz_inds, horz_inds[0:extra_horz]))
if vert_inds.shape[0]%self.batch_size>0:
extra_vert = self.batch_size - (vert_inds.shape[0]%self.batch_size)
vert_inds = np.hstack((vert_inds, vert_inds[0:extra_vert]))
inds = np.hstack((horz_inds, vert_inds))
extra = inds.shape[0] % self.batch_size
assert extra==0,'The number of samples here should be divisible by batch size'
self.inds = inds
| {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,765 | yusiningxin/sniper-pytorch | refs/heads/master | /lib/iterators/PytorchIterator.py | import torch.utils.data as data
import numpy as np
from multiprocessing import Pool
from data_utils.data_workers import anchor_worker, im_worker, chip_worker
from multiprocessing.pool import ThreadPool
import math
import matplotlib.pyplot as plt
class PytorchIterator(data.Dataset):
def __init__(self, roidb, config, batch_size=4, threads=8, nGPUs=1, pad_rois_to=400, crop_size=(512, 512),single_size_change=False):
self.cur_i = 0
self.roidb = roidb
self.batch_size = batch_size
self.pixel_mean = config.network.PIXEL_MEANS
self.thread_pool = ThreadPool(threads)
# self.executor_pool = ThreadPoolExecutor(threads)
self.n_per_gpu = batch_size / nGPUs
self.batch = None
self.cfg = config
self.n_expected_roi = pad_rois_to
self.pad_label = np.array(-1)
self.pad_weights = np.zeros((1, 8))
self.pad_targets = np.zeros((1, 8))
self.pad_roi = np.array([[0, 0, 100, 100]])
self.single_size_change = single_size_change
self.crop_size = crop_size
self.num_classes = roidb[0]['gt_overlaps'].shape[1]
self.bbox_means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (self.num_classes, 1))
self.bbox_stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (self.num_classes, 1))
if config.TRAIN.WITH_MASK:
self.label_name.append('gt_masks')
self.pool = Pool(config.TRAIN.NUM_PROCESS)
self.epiter = 0
self.im_worker = im_worker(crop_size=self.crop_size[0], cfg=config)
self.chip_worker = chip_worker(chip_size=self.crop_size[0], cfg=config)
self.anchor_worker = anchor_worker(chip_size=self.crop_size[0], cfg=config)
self.get_chip()
# self.get_all_data_and_label()
def __len__(self):
return len(self.inds)
def __getitem__(self, item):
num = item%self.batch_size
if num == 0:
self.get_chip_label_per_batch()
if not self.cfg.TRAIN.ONLY_PROPOSAL:
return self.data_batch[0][num],self.data_batch[1][num],self.data_batch[2][num],self.label_batch[0][num]
else:
return self.data_batch[0][num],self.label_batch[0][num], self.label_batch[1][num], self.label_batch[2][num]
def get_chip_label_per_batch(self):
cur_from = self.cur_i
cur_to = self.cur_i + self.batch_size
self.cur_i = (self.cur_i + self.batch_size)%len(self.inds)
roidb = [self.roidb[self.inds[i]] for i in range(cur_from, cur_to)]
cropids = [self.roidb[self.inds[i]]['chip_order'][self.crop_idx[self.inds[i]] % len(self.roidb[self.inds[i]]['chip_order'])] for i in range(cur_from, cur_to)]
n_batch = len(roidb)
ims = []
for i in range(n_batch):
ims.append([roidb[i]['image'], roidb[i]['crops'][cropids[i]], roidb[i]['flipped']])
#print("begin im_work")
processed_list = self.thread_pool.map_async(self.im_worker.worker, ims)
for i in range(cur_from, cur_to):
self.crop_idx[self.inds[i]] = self.crop_idx[self.inds[i]] + 1
processed_roidb = []
for i in range(len(roidb)):
tmp = roidb[i].copy()
scale = roidb[i]['crops'][cropids[i]][1]
tmp['im_info'] = [self.crop_size[0], self.crop_size[1], scale]
processed_roidb.append(tmp)
worker_data = []
srange = np.zeros((len(processed_roidb), 2))
chipinfo = np.zeros((len(processed_roidb), 3))
for i in range(len(processed_roidb)):
cropid = cropids[i]
nids = processed_roidb[i]['props_in_chips'][cropid]
gtids = np.where(processed_roidb[i]['max_overlaps'] == 1)[0]
gt_boxes = processed_roidb[i]['boxes'][gtids, :]
boxes = processed_roidb[i]['boxes'].copy()
cur_crop = processed_roidb[i]['crops'][cropid][0]
im_scale = processed_roidb[i]['crops'][cropid][1]
height = processed_roidb[i]['crops'][cropid][2]
width = processed_roidb[i]['crops'][cropid][3]
classes = processed_roidb[i]['max_classes'][gtids]
if self.cfg.TRAIN.WITH_MASK:
gt_masks = processed_roidb[i]['gt_masks']
for scalei, cscale in enumerate(self.cfg.TRAIN.SCALES):
if scalei == len(self.cfg.TRAIN.SCALES) - 1:
# Last or only scale
srange[i, 0] = 0 if self.cfg.TRAIN.VALID_RANGES[scalei][0] < 0 else \
self.cfg.TRAIN.VALID_RANGES[scalei][0] * im_scale
srange[i, 1] = self.crop_size[1] if self.cfg.TRAIN.VALID_RANGES[scalei][1] < 0 else \
self.cfg.TRAIN.VALID_RANGES[scalei][1] * im_scale # max scale
elif im_scale == cscale:
# Intermediate scale
srange[i, 0] = 0 if self.cfg.TRAIN.VALID_RANGES[scalei][0] < 0 else \
self.cfg.TRAIN.VALID_RANGES[scalei][0] * self.cfg.TRAIN.SCALES[scalei]
srange[i, 1] = self.crop_size[1] if self.cfg.TRAIN.VALID_RANGES[scalei][1] < 0 else \
self.cfg.TRAIN.VALID_RANGES[scalei][1] * self.cfg.TRAIN.SCALES[scalei]
break
chipinfo[i, 0] = height
chipinfo[i, 1] = width
chipinfo[i, 2] = im_scale
argw = [processed_roidb[i]['im_info'], cur_crop, im_scale, nids, gtids, gt_boxes, boxes,
classes.reshape(len(classes), 1)]
if self.cfg.TRAIN.WITH_MASK:
argw += [gt_masks]
worker_data.append(argw)
#print("begin anchor_work")
all_labels = self.pool.map(self.anchor_worker.worker, worker_data)
feat_width = self.crop_size[1] / self.cfg.network.RPN_FEAT_STRIDE
feat_height = self.crop_size[0] / self.cfg.network.RPN_FEAT_STRIDE
labels = np.zeros((n_batch, self.cfg.network.NUM_ANCHORS * feat_height * feat_width))
bbox_targets = np.zeros((n_batch, self.cfg.network.NUM_ANCHORS * 4, feat_height, feat_width))
bbox_weights = np.zeros((n_batch, self.cfg.network.NUM_ANCHORS * 4, feat_height, feat_width))
gt_boxes = -np.ones((n_batch, 100, 5))
if self.cfg.TRAIN.WITH_MASK:
encoded_masks = -np.ones((n_batch,100,500))
for i in range(len(all_labels)):
# labels[i] = all_labels[i][0][0]
# pids = all_labels[i][2]
# if len(pids[0]) > 0:
# bbox_targets[i][pids[0], pids[1], pids[2]] = all_labels[i][1]
# bbox_weights[i][pids[0], pids[1], pids[2]] = 1.0
gt_boxes[i] = all_labels[i][3]
# if self.cfg.TRAIN.WITH_MASK:
# encoded_masks[i] = all_labels[i][4]
im_tensor = np.zeros((n_batch, 3, self.crop_size[0], self.crop_size[1]), dtype=np.float32)
processed_list = processed_list.get()
for i in range(len(processed_list)):
im_tensor[i] = processed_list[i]
self.data_batch = [im_tensor] if self.cfg.TRAIN.ONLY_PROPOSAL else \
[im_tensor, srange, chipinfo]
self.label_batch = [labels, bbox_targets, bbox_weights] if self.cfg.TRAIN.ONLY_PROPOSAL else \
[gt_boxes]
if self.cfg.TRAIN.WITH_MASK:
self.label_batch.append(np.array(encoded_masks))
#self.visualize(im_tensor, gt_boxes)
# return mx.io.DataBatch(data=self.data, label=self.label, pad=self.getpad(), index=self.getindex(),
# provide_data=self.provide_data, provide_label=self.provide_label)
def get_chip(self):
self.cur_i = 0
self.n_neg_per_im = 2
self.crop_idx = [0] * len(self.roidb)
self.chip_worker.reset()
# Devide the dataset and extract chips for each part
n_per_part = int(math.ceil(len(self.roidb) / float(self.cfg.TRAIN.CHIPS_DB_PARTS)))
chips = []
# generate chips including all gt_boxes,3 scales
for i in range(self.cfg.TRAIN.CHIPS_DB_PARTS):
chips += self.pool.map(self.chip_worker.chip_extractor,
self.roidb[i * n_per_part:min((i + 1) * n_per_part, len(self.roidb))])
chip_count = 0
for i, r in enumerate(self.roidb):
cs = chips[i]
chip_count += len(cs)
r['crops'] = cs
all_props_in_chips = []
for i in range(self.cfg.TRAIN.CHIPS_DB_PARTS):
all_props_in_chips += self.pool.map(self.chip_worker.box_assigner,
self.roidb[i * n_per_part:min((i + 1) * n_per_part, len(self.roidb))])
for ps, cur_roidb in zip(all_props_in_chips, self.roidb):
cur_roidb['props_in_chips'] = ps[0]
if self.cfg.TRAIN.USE_NEG_CHIPS:
cur_roidb['neg_crops'] = ps[1]
cur_roidb['neg_props_in_chips'] = ps[2]
chipindex = []
if self.cfg.TRAIN.USE_NEG_CHIPS:
# Append negative chips
for i, r in enumerate(self.roidb):
cs = r['neg_crops']
if len(cs) > 0:
sel_inds = np.arange(len(cs))
if len(cs) > self.n_neg_per_im:
sel_inds = np.random.permutation(sel_inds)[0:self.n_neg_per_im]
for ind in sel_inds:
chip_count = chip_count + 1
r['crops'].append(r['neg_crops'][ind])
r['props_in_chips'].append(r['neg_props_in_chips'][ind].astype(np.int32))
for j in range(len(r['crops'])):
chipindex.append(i)
else:
for i, r in enumerate(self.roidb):
for j in range(len(r['crops'])):
chipindex.append(i)
print('Total number of extracted chips: {}'.format(chip_count))
blocksize = self.batch_size
chipindex = np.array(chipindex)
if chipindex.shape[0] % blocksize > 0:
extra = blocksize - (chipindex.shape[0] % blocksize)
chipindex = np.hstack((chipindex, chipindex[0:extra]))
print('add extra chips: {}'.format(chipindex.shape))
allinds = np.random.permutation(chipindex)
#allinds = chipindex
self.inds = np.array(allinds, dtype=int)
for r in self.roidb:
r['chip_order'] = np.random.permutation(np.arange(len(r['crops'])))
#r['chip_order'] = range(len(r['crops']))
print("Get Chip Done")
#self.epiter = self.epiter + 1
#self.size = len(self.inds)
def visualize(self, im_tensor, boxes):
for imi in range(im_tensor.shape[0]):
im = np.zeros((im_tensor.shape[2], im_tensor.shape[3], 3), dtype=np.uint8)
for i in range(3):
im[:, :, i] = im_tensor[imi, i, :, :] + self.pixel_mean[2 - i]
# Visualize positives
plt.imshow(im)
cboxes = boxes[imi]
for box in cboxes:
rect = plt.Rectangle((box[0], box[1]),
box[2] - box[0],
box[3] - box[1], fill=False,
edgecolor='green', linewidth=3.5)
plt.gca().add_patch(rect)
num = np.random.randint(100000)
plt.savefig('/home/liuqiuyue/debug_0/test_{}_pos.png'.format(num))
plt.cla()
plt.clf()
plt.close() | {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,766 | yusiningxin/sniper-pytorch | refs/heads/master | /lib/chips/chip_generator.py | # --------------------------------------------------------------
# SNIPER: Efficient Multi-Scale Training
# Licensed under The Apache-2.0 License [see LICENSE for details]
# by Mahyar Najibi and Bharat Singh
# --------------------------------------------------------------
import chips
from bbox.bbox_transform import clip_boxes, ignore_overlaps
import numpy as np
class chip_generator(object):
def __init__(self, chip_stride=32, use_cpp=True):
self.use_cpp = False
self.chip_stride = chip_stride
def generate(self, boxes, width, height, chipsize):
if self.use_cpp:
return self._cgenerate(boxes, width, height, chipsize, self.chip_stride)
else:
return self._pygenerate(boxes, width, height, chipsize, self.chip_stride)
@staticmethod
def _cgenerate(boxes, width, height, chipsize, stride):
boxes = clip_boxes(boxes, np.array([height - 1, width - 1]))
return chips.generate(np.ascontiguousarray(boxes, dtype=np.float32),
width, height, chipsize, stride)
@staticmethod
def _pygenerate(boxes, width, height, chipsize, stride):
chips = []
boxes = clip_boxes(boxes, np.array([height-1, width-1]))
# ensure coverage of image for worst case
# corners
chips.append([max(width - chipsize, 0), 0, width - 1, min(chipsize, height-1)])
chips.append([0, max(height - chipsize, 0), min(chipsize, width-1), height-1])
chips.append([max(width - chipsize, 0), max(height - chipsize, 0), width-1, height-1])
for i in range(0, width - int(chipsize), stride):
for j in range(0, height - int(chipsize), stride):
x1 = i
y1 = j
x2 = i + chipsize - 1
y2 = j + chipsize - 1
chips.append([x1, y1, x2, y2])
for j in range(0, height - int(chipsize), stride):
x1 = max(width - chipsize - 1,0)
y1 = j
x2 = width - 1
y2 = j + chipsize - 1
chips.append([x1, y1, x2, y2])
for i in range(0, width - int(chipsize), stride):
x1 = i
y1 = max(height - chipsize - 1,0)
x2 = i + chipsize - 1
y2 = height - 1
chips.append([x1, y1, x2, y2])
chips = np.array(chips).astype(np.float)
p = np.random.permutation(chips.shape[0])
chips = chips[p]
overlaps = ignore_overlaps(chips, boxes.astype(np.float))
chip_matches = []
num_matches = []
for j in range(len(chips)):
nvids = np.where(overlaps[j, :] == 1)[0]
chip_matches.append(set(nvids.tolist()))
num_matches.append(len(nvids))
fchips = []
totalmatches = 0
while True:
max_matches = 0
max_match = max(num_matches)
mid = np.argmax(np.array(num_matches))
if max_match == 0:
break
if max_match > max_matches:
max_matches = max_match
maxid = mid
bestchip = chip_matches[maxid]
fchips.append(chips[maxid])
totalmatches = totalmatches + max_matches
# now remove all rois in bestchip
for j in range(len(num_matches)):
chip_matches[j] = chip_matches[j] - bestchip
num_matches[j] = len(chip_matches[j])
return fchips
| {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,767 | yusiningxin/sniper-pytorch | refs/heads/master | /lib/train_utils/train_one_batch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import time
import numpy
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = 0 if (self.count < 1e-5) else (self.sum / self.count)
def pos_neg_recall(output, target):
output = numpy.float32(output.data.cpu().numpy())
pred = output.argmax(axis=1)
label = numpy.int32(target.data.cpu().numpy())
correct = (pred == label)
neg_label = (label == 0)
neg_num = neg_label.sum()
neg_recall_num = numpy.sum(correct * neg_label)
pos_label = (label > 0)
pos_num = pos_label.sum()
pos_recall_num = numpy.sum(correct * pos_label)
correct_num = numpy.sum(correct)
pos_recall = pos_recall_num*1.0/pos_num if pos_num!=0 else 0
neg_recall = neg_recall_num*1.0/neg_num if neg_num!=0 else 0
acc = correct_num*1.0/(pos_num+neg_num)
return pos_recall, neg_recall,acc,pos_num,neg_num
def train_one_batch(train_model,optimizer,meters,data, valid_range, im_info,label, bbox_target, bbox_weight, gt_boxes,epoch_index,batch_index):
train_model.train()
loss_temp = 0
t0 = time.time()
data_var = data.float().cuda()
valid_range_var = valid_range.float().cuda()
im_info_var = im_info.float().cuda()
label_var = label.float().cuda()
bbox_target_var = bbox_target.float().cuda()
bbox_weight_var = bbox_weight.float().cuda()
gt_boxes_var = gt_boxes.float().cuda()
rois, rpn_cls_prob,rpn_label,cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, RCNN_loss_cls, RCNN_loss_bbox, rois_label = train_model(data_var, im_info_var, valid_range_var, label_var, bbox_target_var, bbox_weight_var, gt_boxes_var)
pos_recall, neg_recall, acc,pos_num,neg_num = pos_neg_recall(cls_prob, rois_label)
rpn_pos_recall, rpn_neg_recall, rpn_acc, rpn_pos_num, rpn_neg_num = pos_neg_recall(rpn_cls_prob, rpn_label)
#print(rpn_loss_cls.mean(),rpn_loss_box.mean() ,RCNN_loss_cls.mean() ,RCNN_loss_bbox.mean())
optimizer.zero_grad()
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() + RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
loss.backward()
optimizer.step()
t1 = time.time()
meters['loss'].update(loss.item(),1)
meters['batch_time'].update(t1-t0,1)
meters['rpn_cls_loss'].update(rpn_loss_cls.data.mean(),1)
meters['rpn_box_loss'].update(rpn_loss_box.data.mean(), 1)
meters['rcnn_cls_loss'].update(RCNN_loss_cls.data.mean(), 1)
meters['rcnn_box_loss'].update(RCNN_loss_bbox.data.mean(), 1)
meters['acc'].update(acc, 1)
meters['neg_recall'].update(neg_recall, 1)
meters['pos_recall'].update(pos_recall, 1)
meters['pos_num'].update(pos_num, 1)
meters['neg_num'].update(neg_num, 1)
meters['rpn_acc'].update(rpn_acc, 1)
meters['rpn_neg_recall'].update(rpn_neg_recall, 1)
meters['rpn_pos_recall'].update(rpn_pos_recall, 1)
meters['rpn_pos_num'].update(rpn_pos_num, 1)
meters['rpn_neg_num'].update(rpn_neg_num, 1)
if batch_index%100==0:
print(epoch_index,batch_index,'Batch_time: %.4f sum_Loss: %.4f rpn_cls_loss: %.4f rpn_box_loss: %.4f rcnn_cls_loss: %.4f rcnn_box_loss: %.4f pos_recall: %.4f neg_recall: %.4f acc: %.4f pos_num: %5d neg_num: %5d rpn_pos_recall: %.4f rpn_neg_recall: %.4f rpn_acc: %.4f rpn_pos_num: %5d rpn_neg_num: %5d' %(meters['batch_time'].avg, meters['loss'].avg,meters['rpn_cls_loss'].avg,meters['rpn_box_loss'].avg,meters['rcnn_cls_loss'].avg,meters['rcnn_box_loss'].avg,meters['pos_recall'].avg,meters['neg_recall'].avg,meters['acc'].avg,meters['pos_num'].avg,meters['neg_num'].avg,meters['rpn_pos_recall'].avg,meters['rpn_neg_recall'].avg,meters['rpn_acc'].avg,meters['rpn_pos_num'].avg,meters['rpn_neg_num'].avg) )
for k in meters.keys():
meters[k].reset()
#print(epoch_index,batch_index,rpn_loss_cls.mean().data[0], rpn_loss_box.mean().data[0] , RCNN_loss_cls.mean().data[0] , RCNN_loss_bbox.mean().data[0])
| {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,768 | yusiningxin/sniper-pytorch | refs/heads/master | /main_dataset.py |
import init
import os
import sys
import torch
os.environ["CUDA_VISIBLE_DEVICES"] = '1,2,3'
nGPUs = 3
import matplotlib
matplotlib.use('Agg')
import torch.nn as nn
from configs.faster.default_configs import config, update_config, update_config_from_list
from iterators.PytorchIterator import PytorchIterator
from data_utils.load_data import load_proposal_roidb, merge_roidb, filter_roidb
from bbox.bbox_regression import add_bbox_regression_targets
from iterators.PytorchIterator import PytorchIterator
import argparse
import logging
import math
import logging.config
from model.faster_rcnn.resnet import resnet
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.utils.net_utils import weights_normal_init, save_net, load_net, adjust_learning_rate, save_checkpoint, clip_gradient
import time
import numpy
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = 0 if (self.count < 1e-5) else (self.sum / self.count)
def pos_neg_recall(output, target):
output = numpy.float32(output.data.cpu().numpy())
pred = output.argmax(axis=1)
label = numpy.int32(target.data.cpu().numpy())
correct = (pred == label)
neg_label = (label == 0)
neg_num = neg_label.sum()
neg_recall_num = numpy.sum(correct * neg_label)
pos_label = (label > 0)
pos_num = pos_label.sum()
pos_recall_num = numpy.sum(correct * pos_label)
correct_num = numpy.sum(correct)
pos_recall = pos_recall_num*1.0/pos_num if pos_num!=0 else 0
neg_recall = neg_recall_num*1.0/neg_num if neg_num!=0 else 0
acc = correct_num*1.0/(pos_num+neg_num)
return pos_recall, neg_recall,acc,pos_num,neg_num
def parser():
arg_parser = argparse.ArgumentParser('SNIPER training module')
arg_parser.add_argument('--cfg', dest='cfg', help='Path to the config file',
default='configs/faster/sniper_res101_e2e.yml',type=str)
arg_parser.add_argument('--display', dest='display', help='Number of epochs between displaying loss info',
default=100, type=int)
arg_parser.add_argument('--momentum', dest='momentum', help='BN momentum', default=0.995, type=float)
arg_parser.add_argument('--save_prefix', dest='save_prefix', help='Prefix used for snapshotting the network',
default='SNIPER', type=str)
arg_parser.add_argument('--set', dest='set_cfg_list', help='Set the configuration fields from command line',
default=None, nargs=argparse.REMAINDER)
return arg_parser.parse_args()
def save_checkpoint(state, filename):
torch.save(state, filename)
if __name__ == '__main__':
args = parser()
update_config(args.cfg)
if args.set_cfg_list:
update_config_from_list(args.set_cfg_list)
batch_size = nGPUs * config.TRAIN.BATCH_IMAGES
if not os.path.isdir(config.output_path):
os.mkdir(config.output_path)
# Create roidb
image_sets = [iset for iset in config.dataset.image_set.split('+')]
roidbs = [load_proposal_roidb(config.dataset.dataset, image_set, config.dataset.root_path,
config.dataset.dataset_path,
proposal=config.dataset.proposal, append_gt=True, flip=config.TRAIN.FLIP,
result_path=config.output_path,
proposal_path=config.proposal_path, load_mask=config.TRAIN.WITH_MASK, only_gt=not config.TRAIN.USE_NEG_CHIPS)
for image_set in image_sets]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb, config)
bbox_means, bbox_stds = add_bbox_regression_targets(roidb, config)
print('Creating Iterator with {} Images'.format(len(roidb)))
pytorch_dataset = PytorchIterator(roidb=roidb, config=config, batch_size=batch_size, nGPUs=nGPUs,threads=config.TRAIN.NUM_THREAD, pad_rois_to=400)
train_loader = torch.utils.data.DataLoader(dataset=pytorch_dataset, batch_size=batch_size, shuffle=False,num_workers=0)
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
valid_range = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
im_data = im_data.cuda()
valid_range = valid_range.cuda()
im_info = im_info.cuda()
gt_boxes = gt_boxes.cuda()
#faster-rcnn
fasterRCNN = resnet(config.dataset.NUM_CLASSES, 101, pretrained=True, class_agnostic=config.CLASS_AGNOSTIC)
# init weight
fasterRCNN.create_architecture()
lr = 0.001
params = []
for key, value in dict(fasterRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
load_name = 'output/faster_rcnn_jwyang.pth'
checkpoint = torch.load(load_name)
origin_state_dict = fasterRCNN.state_dict()
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in checkpoint['model'].items():
if k[0:6] == 'module':
name = k[7:] # remove `module.`
else:
name = k
if k == 'RCNN_bbox_pred.bias' or k == 'RCNN_bbox_pred.weight':
continue
new_state_dict[name] = v
origin_state_dict.update(new_state_dict)
fasterRCNN.load_state_dict(origin_state_dict)
fasterRCNN.cuda()
fasterRCNN = nn.DataParallel(fasterRCNN)
meter_names = ['batch_time', 'loss','rpn_cls_loss','rpn_box_loss','rcnn_cls_loss','rcnn_box_loss','acc','pos_recall','neg_recall','neg_num','pos_num','rpn_acc','rpn_pos_recall','rpn_neg_recall','rpn_neg_num','rpn_pos_num']
meters = {name: AverageMeter() for name in meter_names}
fasterRCNN.train()
for epoch in range(config.TRAIN.begin_epoch,config.TRAIN.end_epoch):
for i, (data, valid_range, im_info,gt_boxes) in enumerate(train_loader):
t0 = time.time()
im_data.data.resize_(data.size()).copy_(data).float()
valid_range.data.resize_(valid_range.size()).copy_(valid_range).float()
im_info.data.resize_(im_info.size()).copy_(im_info).float()
gt_boxes.data.resize_(gt_boxes.size()).copy_(gt_boxes).float()
fasterRCNN.zero_grad()
rois, rpn_cls_prob, rpn_label, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_box, RCNN_loss_cls, RCNN_loss_bbox, rois_label = fasterRCNN(im_data, im_info,valid_range, gt_boxes)
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() + RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
pos_recall, neg_recall, acc, pos_num, neg_num = pos_neg_recall(cls_prob, rois_label)
rpn_pos_recall, rpn_neg_recall, rpn_acc, rpn_pos_num, rpn_neg_num = pos_neg_recall(rpn_cls_prob, rpn_label)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
t1 = time.time()
meters['loss'].update(loss.item(), 1)
meters['batch_time'].update(t1 - t0, 1)
meters['rpn_cls_loss'].update(rpn_loss_cls.data.mean(), 1)
meters['rpn_box_loss'].update(rpn_loss_box.data.mean(), 1)
meters['rcnn_cls_loss'].update(RCNN_loss_cls.data.mean(), 1)
meters['rcnn_box_loss'].update(RCNN_loss_bbox.data.mean(), 1)
meters['acc'].update(acc, 1)
meters['neg_recall'].update(neg_recall, 1)
meters['pos_recall'].update(pos_recall, 1)
meters['pos_num'].update(pos_num, 1)
meters['neg_num'].update(neg_num, 1)
meters['rpn_acc'].update(rpn_acc, 1)
meters['rpn_neg_recall'].update(rpn_neg_recall, 1)
meters['rpn_pos_recall'].update(rpn_pos_recall, 1)
meters['rpn_pos_num'].update(rpn_pos_num, 1)
meters['rpn_neg_num'].update(rpn_neg_num, 1)
if i % 100 == 0:
print(epoch, i,
'Batch_time: %.4f lr: %.4f sum_Loss: %.4f rpn_cls_loss: %.4f rpn_box_loss: %.4f rcnn_cls_loss: %.4f rcnn_box_loss: %.4f pos_recall: %.4f neg_recall: %.4f acc: %.4f pos_num: %5d neg_num: %5d rpn_pos_recall: %.4f rpn_neg_recall: %.4f rpn_acc: %.4f rpn_pos_num: %5d rpn_neg_num: %5d' % (
meters['batch_time'].avg, lr,meters['loss'].avg, meters['rpn_cls_loss'].avg,
meters['rpn_box_loss'].avg, meters['rcnn_cls_loss'].avg, meters['rcnn_box_loss'].avg,
meters['pos_recall'].avg, meters['neg_recall'].avg, meters['acc'].avg, meters['pos_num'].avg,
meters['neg_num'].avg, meters['rpn_pos_recall'].avg, meters['rpn_neg_recall'].avg,
meters['rpn_acc'].avg, meters['rpn_pos_num'].avg, meters['rpn_neg_num'].avg))
for k in meters.keys():
meters[k].reset()
if i!=0 and i % 1000 == 0:
save_name = os.path.join('output','nofix_{}_{}.pth'.format(epoch, i))
save_checkpoint({
'epoch': epoch + 1,
'model': fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict()
}, save_name)
#if epoch % 1 == 0:
adjust_learning_rate(optimizer, 0.1)
lr *= 0.1
save_name = os.path.join('output', 'nofix_{}.pth'.format(epoch))
save_checkpoint({
'epoch': epoch + 1,
'model': fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict()
}, save_name)
| {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,769 | yusiningxin/sniper-pytorch | refs/heads/master | /main_test.py | import init
import os
import sys
import torch
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
#os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import matplotlib
matplotlib.use('Agg')
import torch.nn as nn
#sys.path.insert(0, 'lib')
from configs.faster.default_configs import config, update_config, update_config_from_list
from iterators.PytorchIterator import PytorchIterator
from data_utils.load_data import load_proposal_roidb, merge_roidb, filter_roidb
from iterators.PytorchIterator import PytorchIterator
from model.faster_rcnn.resnet import resnet
import argparse
from inference import imdb_detection_wrapper
import logging
import math
import logging.config
def parser():
arg_parser = argparse.ArgumentParser('SNIPER test module')
arg_parser.add_argument('--cfg', dest='cfg', help='Path to the config file',
default='configs/faster/sniper_res101_e2e.yml',type=str)
arg_parser.add_argument('--save_prefix', dest='save_prefix', help='Prefix used for snapshotting the network',
default='SNIPER', type=str)
arg_parser.add_argument('--vis', dest='vis', help='Whether to visualize the detections',
action='store_true')
arg_parser.add_argument('--set', dest='set_cfg_list', help='Set the configuration fields from command line',
default=None, nargs=argparse.REMAINDER)
return arg_parser.parse_args()
def main():
args = parser()
update_config(args.cfg)
if args.set_cfg_list:
update_config_from_list(args.set_cfg_list)
if not os.path.isdir(config.output_path):
os.mkdir(config.output_path)
# Create roidb
#print(config)
roidb, imdb = load_proposal_roidb(config.dataset.dataset, config.dataset.test_image_set, config.dataset.root_path,
config.dataset.dataset_path,
proposal=config.dataset.proposal, only_gt=True, flip=False,
result_path=config.output_path,
proposal_path=config.proposal_path, get_imdb=True)
#roidb = roidb[:100]
#check_point = torch.load('output/faster_rcnn_1220_0_19000.pth')
load_name = 'output/nofix_3_15000.pth'
#load_name = 'output/faster_rcnn_jwyang.pth'
# faster-rcnn
fasterRCNN = resnet(config.dataset.NUM_CLASSES, 101, pretrained=True, class_agnostic=config.CLASS_AGNOSTIC)
# init weight
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in checkpoint['model'].items():
if k[0:6] == 'module':
name = k[7:] # remove `module.`
else:
name = k
new_state_dict[name] = v
fasterRCNN.load_state_dict(new_state_dict)
fasterRCNN.cuda()
fasterRCNN = nn.DataParallel(fasterRCNN)
fasterRCNN.eval()
if config.TEST.EXTRACT_PROPOSALS:
imdb_proposal_extraction_wrapper(sym_inst, config, imdb, roidb, context, arg_params, aux_params, args.vis)
else:
imdb_detection_wrapper(fasterRCNN, config, imdb, roidb)
if __name__ == '__main__':
main()
| {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,770 | yusiningxin/sniper-pytorch | refs/heads/master | /init.py | import sys
sys.path.insert(0,'lib')
| {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,771 | yusiningxin/sniper-pytorch | refs/heads/master | /inference.py | # ------------------------------------------------------------------
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Inference module for performing detection and proposal extraction
# Written by Mahyar Najibi
# -----------------------------------------------------------------
import numpy as np
#from bbox.bbox_transform import bbox_pred, clip_boxes
from iterators.PytorchTest import PytorchTest
import torch
from configs.faster.default_configs import config as config_
import os
import time
import cPickle
from model.rpn.bbox_transform import bbox_transform_inv
from model.rpn.bbox_transform import clip_boxes
from data_utils.data_workers import nms_worker
from data_utils.visualization import visualize_dets
from tqdm import tqdm
import math
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
from model.nms.nms_wrapper import nms
# from iterators.MNIteratorTest import MNIteratorTest
# import mxnet as mx
class Tester(object):
def __init__(self, module, imdb, roidb, test_iter, cfg, rcnn_output_names=None, rpn_output_names=None,
logger=None, batch_size=None):
self.test_iter = test_iter
self.cfg = cfg
self.module = module
self.rcnn_output_names = rcnn_output_names
if not self.rcnn_output_names:
self.rcnn_output_names = {
'cls': 'cls_prob_reshape_output',
'bbox': 'bbox_pred_reshape_output',
'im_ids': 'im_ids'
}
self.rpn_output_names = rpn_output_names
if not self.rpn_output_names:
self.rpn_output_names = {
'scores': 'rois_score',
'rois': 'rois_output',
'im_ids': 'im_ids'
}
self.logger = logger
self.result_path = imdb.result_path
self.num_classes = imdb.num_classes
self.class_names = imdb.classes
self.num_images = len(roidb)
self.imdb_name = imdb.name
self.nms_worker = nms_worker(cfg.TEST.NMS, cfg.TEST.NMS_SIGMA)
self.batch_size = batch_size
self.roidb = roidb
self.verbose = len(roidb) > 1
self.thread_pool = None
def forward(self, batch):
self.module.forward(batch, is_train=False)
return [dict(zip(self.module.output_names, i))
for i in zip(*self.module.get_outputs(merge_multi_context=False))]
def get_proposals(self, batch, scales):
data = dict(zip(self.data_names, batch.data))
outputs = self.forward(batch)
scores, rois = [], []
im_shapes = np.array([im.shape[-2:] for im in data['data']]).reshape(-1, self.batch_size, 2)
im_ids = np.array([], dtype=int)
for i, (gpu_out, gpu_scales, gpu_shapes) in enumerate(zip(outputs, scales, im_shapes)):
gpu_rois = gpu_out[self.rpn_output_names['rois']].asnumpy()
# Reshape crois
nper_gpu = gpu_rois.shape[0] / self.batch_size
gpu_scores = gpu_out[self.rpn_output_names['scores']].asnumpy()
im_ids = np.hstack((im_ids, gpu_out[self.rpn_output_names['im_ids']].asnumpy().astype(int)))
for idx in range(self.batch_size):
cids = np.where(gpu_rois[:, 0] == idx)[0]
assert len(cids) == nper_gpu, 'The number of rois per GPU should be fixed!'
crois = gpu_rois[cids, 1:] / gpu_scales[idx]
cscores = gpu_scores[cids]
# Store predictions
scores.append(cscores)
rois.append(crois)
return scores, rois, data, im_ids
def detect(self, batch, scales):
with torch.no_grad():
rois, rpn_cls_prob, rpn_label, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label= self.module(batch[0],batch[1])
im_id = batch[2]
boxes = rois.data[:, :, 1:5]
box_deltas = bbox_pred.data
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor([0.1,0.1,0.2,0.2]).cuda()
#box_deltas = box_deltas.view(1,-1, 4*81)
box_deltas = box_deltas.view(1, -1, 4)
boxes = boxes.view(1,-1,4)
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, batch[1].data, 1)
cls_prob = cls_prob.detach().cpu().numpy()
cls_prob = cls_prob.reshape(self.batch_size,cls_prob.shape[0]/self.batch_size,-1)
pred_boxes= pred_boxes.detach().cpu().numpy()
pred_boxes = pred_boxes.reshape(self.batch_size, pred_boxes.shape[1]/self.batch_size, -1)
scales = scales.reshape(self.batch_size,-1)
for i,(bbox,scale) in enumerate(zip(pred_boxes,scales)):
pred_boxes[i] = pred_boxes[i] / scale.float()
return cls_prob,pred_boxes,batch,im_id
def set_scale(self, scale):
if isinstance(self.test_iter, PrefetchingIter):
self.test_iter.iters[0].set_scale(scale)
else:
self.test_iter.set_scale(scale)
self.test_iter.reset()
def show_info(self, print_str):
print(print_str)
if self.logger: self.logger.info(print_str)
def aggregate(self, scale_cls_dets, vis=True, cache_name='cache', vis_path='/home/liuqiuyue/vis/', vis_name=None,pre_nms_db_divide=10, vis_ext='.png'):
n_scales = len(scale_cls_dets)
assert n_scales == len(self.cfg.TEST.VALID_RANGES), 'A valid range should be specified for each test scale'
all_boxes = [[[] for _ in range(self.num_images)] for _ in range(self.num_classes)]
nms_pool = Pool(32)
if len(scale_cls_dets) > 1:
self.show_info('Aggregating detections from multiple scales and applying NMS...')
else:
self.show_info('Performing NMS on detections...')
# Apply ranges and store detections per category
parallel_nms_args = [[] for _ in range(pre_nms_db_divide)]
n_roi_per_pool = math.ceil(self.num_images / float(pre_nms_db_divide))
for i in range(self.num_images):
for j in range(1, self.num_classes):
agg_dets = np.empty((0, 5), dtype=np.float32)
for all_cls_dets, valid_range in zip(scale_cls_dets, self.cfg.TEST.VALID_RANGES):
cls_dets = all_cls_dets[j][i]
heights = cls_dets[:, 2] - cls_dets[:, 0]
widths = cls_dets[:, 3] - cls_dets[:, 1]
areas = widths * heights
lvalid_ids = np.where(areas > valid_range[0] * valid_range[0])[0] if valid_range[0] > 0 else \
np.arange(len(areas))
uvalid_ids = np.where(areas <= valid_range[1] * valid_range[1])[0] if valid_range[1] > 0 else \
np.arange(len(areas))
valid_ids = np.intersect1d(lvalid_ids, uvalid_ids)
cls_dets = cls_dets[valid_ids, :] if len(valid_ids) > 0 else cls_dets
agg_dets = np.vstack((agg_dets, cls_dets))
parallel_nms_args[int(i / n_roi_per_pool)].append(agg_dets)
# Divide roidb and perform NMS in parallel to reduce the memory usage
im_offset = 0
for part in tqdm(range(pre_nms_db_divide)):
final_dets = nms_pool.map(self.nms_worker.worker, parallel_nms_args[part])
n_part_im = int(len(final_dets) / (self.num_classes - 1))
for i in range(n_part_im):
for j in range(1, self.num_classes):
all_boxes[j][im_offset + i] = final_dets[i * (self.num_classes - 1) + (j - 1)]
im_offset += n_part_im
nms_pool.close()
# Limit number of detections to MAX_PER_IMAGE if requested and visualize if vis is True
for i in range(self.num_images):
if self.cfg.TEST.MAX_PER_IMAGE > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1, self.num_classes)])
if len(image_scores) > self.cfg.TEST.MAX_PER_IMAGE:
image_thresh = np.sort(image_scores)[-self.cfg.TEST.MAX_PER_IMAGE]
for j in range(1, self.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
visualization_path = vis_path if vis_path else os.path.join(self.cfg.TEST.VISUALIZATION_PATH,
cache_name)
if not os.path.isdir(visualization_path):
os.makedirs(visualization_path)
import cv2
im = cv2.cvtColor(cv2.imread(self.roidb[i]['image']), cv2.COLOR_BGR2RGB)
visualize_dets(im,
[[]] + [all_boxes[j][i] for j in range(1, self.num_classes)],
1.0,
self.cfg.network.PIXEL_MEANS, self.class_names, threshold=0.5,
save_path=os.path.join(visualization_path, '{}{}'.format(vis_name if vis_name else i,
vis_ext)), transform=False)
if cache_name:
cache_path = os.path.join(self.result_path, cache_name)
if not os.path.isdir(cache_path):
os.makedirs(cache_path)
cache_path = os.path.join(cache_path, 'detections.pkl')
self.show_info('Done! Saving detections into: {}'.format(cache_path))
with open(cache_path, 'wb') as detfile:
cPickle.dump(all_boxes, detfile)
return all_boxes
# def aggregate(self, scale_cls_dets, vis=True, cache_name='cache', vis_path='/home/liuqiuyue/vis/', vis_name=None,
# pre_nms_db_divide=10, vis_ext='.png'):
# n_scales = len(scale_cls_dets)
# assert n_scales == len(self.cfg.TEST.VALID_RANGES), 'A valid range should be specified for each test scale'
# all_boxes = [[[] for _ in range(self.num_images)] for _ in range(self.num_classes)]
# nms_pool = Pool(32)
# if len(scale_cls_dets) > 1:
# self.show_info('Aggregating detections from multiple scales and applying NMS...')
# else:
# self.show_info('Performing NMS on detections...')
#
# # Apply ranges and store detections per category
# parallel_nms_args = [[] for _ in range(pre_nms_db_divide)]
# n_roi_per_pool = math.ceil(self.num_images / float(pre_nms_db_divide))
# empty_array = np.transpose(np.array([[], [], [], [], []]), (1, 0))
# for i in range(self.num_images):
# for j in range(1, self.num_classes):
# agg_dets = np.empty((0, 5), dtype=np.float32)
# for all_cls_dets, valid_range in zip(scale_cls_dets, self.cfg.TEST.VALID_RANGES):
# cls_dets = all_cls_dets[j][i]
# heights = cls_dets[:, 2] - cls_dets[:, 0]
# widths = cls_dets[:, 3] - cls_dets[:, 1]
# areas = widths * heights
# lvalid_ids = np.where(areas > valid_range[0] * valid_range[0])[0] if valid_range[0] > 0 else \
# np.arange(len(areas))
# uvalid_ids = np.where(areas <= valid_range[1] * valid_range[1])[0] if valid_range[1] > 0 else \
# np.arange(len(areas))
# valid_ids = np.intersect1d(lvalid_ids, uvalid_ids)
# cls_dets = cls_dets[valid_ids, :] if len(valid_ids) > 0 else cls_dets
# agg_dets = np.vstack((agg_dets, cls_dets))
# scores_tmp = agg_dets[:, 4]
# order = scores_tmp.argsort()[::-1]
# agg_dets = agg_dets[order]
# agg_dets = torch.from_numpy(agg_dets).cuda()
# keep = nms(agg_dets, 0.45)
# if type(keep).__name__ == 'list':
# all_boxes[j][i] = empty_array
# else:
# agg_dets = agg_dets[keep.view(-1).long()]
# all_boxes[j][i] = agg_dets.cpu().numpy()
#
# # Limit number of detections to MAX_PER_IMAGE if requested and visualize if vis is True
# for i in range(self.num_images):
# if self.cfg.TEST.MAX_PER_IMAGE > 0:
# image_scores = np.hstack([all_boxes[j][i][:, -1] for j in range(1, self.num_classes)])
# if len(image_scores) > self.cfg.TEST.MAX_PER_IMAGE:
# image_thresh = np.sort(image_scores)[-self.cfg.TEST.MAX_PER_IMAGE]
# for j in range(1, self.num_classes):
# keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
# all_boxes[j][i] = all_boxes[j][i][keep, :]
# if vis:
# visualization_path = vis_path if vis_path else os.path.join(self.cfg.TEST.VISUALIZATION_PATH,
# cache_name)
# if not os.path.isdir(visualization_path):
# os.makedirs(visualization_path)
# import cv2
# im = cv2.cvtColor(cv2.imread(self.roidb[i]['image']), cv2.COLOR_BGR2RGB)
# visualize_dets(im,
# [[]] + [all_boxes[j][i] for j in range(1, self.num_classes)],
# 1.0,
# self.cfg.network.PIXEL_MEANS, self.class_names, threshold=0.5,
# save_path=os.path.join(visualization_path, '{}{}'.format(vis_name if vis_name else i,
# vis_ext)), transform=False)
#
# if cache_name:
# cache_path = os.path.join(self.result_path, cache_name)
# if not os.path.isdir(cache_path):
# os.makedirs(cache_path)
# cache_path = os.path.join(cache_path, 'detections.pkl')
# self.show_info('Done! Saving detections into: {}'.format(cache_path))
# with open(cache_path, 'wb') as detfile:
# cPickle.dump(all_boxes, detfile)
# return all_boxes
def get_detections(self, cls_thresh=1e-3, cache_name='cache', evaluate=False,vis = False):
all_boxes = [[[] for _ in range(self.num_images)] for _ in range(self.num_classes)]
data_counter = 0
detect_time, post_time = 0, 0
for i, batch in enumerate(self.test_iter):
im_info = batch[1]
scales = im_info[:, 2].reshape(-1, self.batch_size)
# Run detection on the batch
stime = time.time()
scores, boxes, data, im_ids = self.detect(batch, scales)
detect_time += time.time() - stime
stime = time.time()
for i, (cscores, cboxes, im_id) in enumerate(zip(scores, boxes, im_ids)):
parallel_nms_args = []
for j in range(1, self.num_classes):
# Apply the score threshold
inds = np.where(cscores[:, j] > cls_thresh)[0]
rem_scores = cscores[inds, j, np.newaxis]
#rem_boxes = cboxes[inds, j * 4:(j+1) * 4]
rem_boxes = cboxes[inds, 0:4]
cls_dets = np.hstack((rem_boxes, rem_scores))
if evaluate or vis:
parallel_nms_args.append(cls_dets)
else:
all_boxes[j][im_id] = cls_dets
# Apply nms
if evaluate or vis:
if not self.thread_pool:
self.thread_pool = ThreadPool(8)
final_dets = self.thread_pool.map(self.nms_worker.worker, parallel_nms_args)
for j in range(1, self.num_classes):
all_boxes[j][im_id] = final_dets[j - 1]
# Filter boxes based on max_per_image if needed
if evaluate and self.cfg.TEST.MAX_PER_IMAGE:
image_scores = np.hstack([all_boxes[j][im_id][:, -1]
for j in range(1, self.num_classes)])
if len(image_scores) > self.cfg.TEST.MAX_PER_IMAGE:
image_thresh = np.sort(image_scores)[-self.cfg.TEST.MAX_PER_IMAGE]
for j in range(1, self.num_classes):
keep = np.where(all_boxes[j][im_id][:, -1] >= image_thresh)[0]
all_boxes[j][im_id] = all_boxes[j][im_id][keep, :]
if vis:
visualization_path = '/home/liuqiuyue/vis1/'
if not os.path.isdir(visualization_path):
os.makedirs(visualization_path)
visualize_dets(batch[0][i].cpu().numpy(),
[[]] + [all_boxes[j][im_id] for j in range(1, self.num_classes)], im_info[i, 2],
self.cfg.network.PIXEL_MEANS, self.class_names, threshold=0.5,
save_path=os.path.join(visualization_path, '{}{}'.format(im_id, '.png')))
data_counter += self.batch_size
post_time += time.time() - stime
if self.verbose:
self.show_info('Tester: {}/{}, Detection: {:.4f}s, Post Processing: {:.4}s'.format(
min(data_counter, self.num_images),
self.num_images, detect_time / data_counter,
post_time / data_counter))
if self.thread_pool:
self.thread_pool.close()
return all_boxes
def extract_proposals(self, n_proposals=300, cache_name='cache', vis=False, vis_ext='.png'):
all_boxes = [[] for _ in range(self.num_images)]
data_counter = 0
detect_time, post_time = 0, 0
if vis and not os.path.isdir(self.cfg.TEST.VISUALIZATION_PATH):
os.makedirs(self.cfg.TEST.VISUALIZATION_PATH)
with torch.no_grad():
for batch in self.test_iter:
im_info = batch.data[1].asnumpy()
scales = im_info[:, 2].reshape(-1, self.batch_size)
# Run detection on the batch
stime = time.time()
scores, boxes, data, im_ids = self.get_proposals(batch, scales)
detect_time += time.time() - stime
stime = time.time()
for i, (cscores, cboxes, im_id) in enumerate(zip(scores, boxes, im_ids)):
# Keep the requested number of rois
rem_scores = cscores[0:n_proposals, np.newaxis]
rem_boxes = cboxes[0:n_proposals, 0:4]
cls_dets = np.hstack((rem_boxes, rem_scores)).astype(np.float32)
if vis:
visualization_path = os.path.join(self.cfg.TEST.VISUALIZATION_PATH, cache_name)
if not os.path.isdir(visualization_path):
os.makedirs(visualization_path)
visualize_dets(batch.data[0][i].asnumpy(),
[[]] + [cls_dets], im_info[i, 2],
self.cfg.network.PIXEL_MEANS, ['__background__', 'object'], threshold=0.5,
save_path=os.path.join(visualization_path, '{}{}'.format(im_id, vis_ext)))
all_boxes[im_id] = cls_dets
data_counter += self.test_iter.get_batch_size()
post_time += time.time() - stime
self.show_info(
'Tester: {}/{}, Forward: {:.4f}s, Post Processing: {:.4}s'.format(min(data_counter, self.num_images),
self.num_images,
detect_time / data_counter,
post_time / data_counter))
return all_boxes
def detect_scale_worker(arguments):
[scale, nbatch, config, model, roidb, imdb] = arguments
print('Performing inference for scale: {}'.format(scale))
pytorch_dataset = PytorchTest(roidb=roidb, config=config, batch_size=nbatch, threads=32,pad_rois_to=400, crop_size=None, test_scale=scale)
train_loader = torch.utils.data.DataLoader(dataset=pytorch_dataset, batch_size=nbatch, shuffle=False,num_workers=0)
# Create Tester
tester = Tester(model, imdb, roidb, train_loader, cfg=config, batch_size=nbatch)
return tester.get_detections(evaluate=False, cache_name='dets_scale_{}x{}'.format(scale[0], scale[1]))
def imdb_detection_wrapper(model, config, imdb, roidb,vis = False):
detections = []
for nbatch, scale in zip(config.TEST.BATCH_IMAGES, config.TEST.SCALES):
detections.append(detect_scale_worker([scale, nbatch, config, model, roidb, imdb]))
tester = Tester(None, imdb, roidb, None, cfg=config, batch_size=nbatch)
all_boxes = tester.aggregate(detections, vis=False, cache_name='dets_final')
print('Evaluating detections...')
imdb.evaluate_detections(all_boxes)
print('All done!')
def proposal_scale_worker(arguments):
[scale, nbatch, context, config, sym_def, \
roidb, imdb, arg_params, aux_params, vis] = arguments
print('Performing inference for scale: {}'.format(scale))
nGPUs = len(context)
sym_inst = sym_def(n_proposals=400, test_nbatch=nbatch)
sym = sym_inst.get_symbol_rpn(config, is_train=False)
test_iter = MNIteratorTest(roidb=roidb, config=config, batch_size=nGPUs * nbatch, nGPUs=nGPUs, threads=32,
pad_rois_to=400, crop_size=None, test_scale=scale)
# Create the module
shape_dict = dict(test_iter.provide_data_single)
sym_inst.infer_shape(shape_dict)
mod = mx.mod.Module(symbol=sym,
context=context,
data_names=[k[0] for k in test_iter.provide_data_single],
label_names=None)
mod.bind(test_iter.provide_data, test_iter.provide_label, for_training=False)
mod.init_params(arg_params=arg_params, aux_params=aux_params)
# Create Tester
tester = Tester(mod, imdb, roidb, test_iter, cfg=config, batch_size=nbatch)
return tester.extract_proposals(vis=(vis and config.TEST.VISUALIZE_INTERMEDIATE_SCALES),
cache_name='props_scale_{}x{}'.format(scale[0], scale[1]))
def imdb_proposal_extraction_wrapper(sym_def, config, imdb, roidb, context, arg_params, aux_params, vis):
if vis and config.TEST.CONCURRENT_JOBS > 1:
print('Visualization is only allowed with 1 CONCURRENT_JOBS')
print('Setting CONCURRENT_JOBS to 1')
config.TEST.CONCURRENT_JOBS = 1
proposals = []
if config.TEST.CONCURRENT_JOBS == 1:
for nbatch, scale in zip(config.TEST.BATCH_IMAGES, config.TEST.SCALES):
proposals.append(proposal_scale_worker([scale, nbatch, context, config, sym_def, \
roidb, imdb, arg_params, aux_params, vis]))
else:
im_per_job = int(math.ceil(float(len(roidb)) / config.TEST.CONCURRENT_JOBS))
roidbs = []
pool = Pool(config.TEST.CONCURRENT_JOBS)
for i in range(config.TEST.CONCURRENT_JOBS):
roidbs.append([roidb[j] for j in range(im_per_job * i, min(im_per_job * (i + 1), len(roidb)))])
for i, (nbatch, scale) in enumerate(zip(config.TEST.BATCH_IMAGES, config.TEST.SCALES)):
parallel_args = []
for j in range(config.TEST.CONCURRENT_JOBS):
parallel_args.append([scale, nbatch, context, config, sym_def, \
roidbs[j], imdb, arg_params, aux_params, vis])
proposal_list = pool.map(proposal_scale_worker, parallel_args)
tmp_props = []
for prop in proposal_list:
tmp_props += prop
# Cache proposals...
cache_path = os.path.join(imdb.result_path, 'props_scale_{}x{}'.format(scale[0], scale[1]))
if not os.path.isdir(cache_path):
os.makedirs(cache_path)
cache_path = os.path.join(cache_path, 'proposals.pkl')
print('Done! Saving proposals into: {}'.format(cache_path))
with open(cache_path, 'wb') as detfile:
cPickle.dump(tmp_props, detfile)
proposals.append(tmp_props)
pool.close()
if not os.path.isdir(config.TEST.PROPOSAL_SAVE_PATH):
os.makedirs(config.TEST.PROPOSAL_SAVE_PATH)
final_proposals = proposals[0]
if len(proposals) > 1:
for i in range(len(proposals[0])):
for j in range(1, len(proposals)):
final_proposals[i] = np.vstack((final_proposals[i], proposals[j][i]))
save_path = os.path.join(config.TEST.PROPOSAL_SAVE_PATH, '{}_{}_rpn.pkl'.format(config.dataset.dataset.upper(),
config.dataset.test_image_set))
with open(save_path, 'wb') as file:
cPickle.dump(final_proposals, file)
print('All done!') | {"/main_dataset.py": ["/init.py"], "/main_test.py": ["/init.py", "/inference.py"]} |
48,773 | marcosguilhermef/oddsing-python | refs/heads/main | /scrapingOdds.py | import traceback
from warnings import catch_warnings, simplefilter
import connect
from bs4 import BeautifulSoup
import re
import json
import datetime
import database
from OddParametrizacao import bolinhaParaSa
from OddParametrizacao import kbetsParaSa
from difflib import SequenceMatcher
class RaparOddsSa():
def __init__(self,link,dateMatch = ''):
self.link = link
self.Request()
self.StartSoup()
self.oddsJSON = {}
self.oddsJSON['data_hora'] = datetime.datetime.now()
self.oddsJSON['sistema'] = 'sa sports'
self.oddsJSON['date_match'] = dateMatch
def Request(self):
self.body = connect.ConectSA(self.link).getBody()
def StartSoup(self):
self.soup = BeautifulSoup(self.body, 'html.parser')
def scrapCasaFora(self):
self.CasaFora = self.soup.find(id='conteudo_tituloCampeonato')
self.CasaFora = re.sub('Apostas Disponíveis - ','',self.CasaFora.text)
splod = re.split('x',self.CasaFora)
self.oddsJSON['tCasa'] = re.sub('(\s)$|^(\s)','',splod[0])
self.oddsJSON['tFora'] = re.sub('(\s)$|^(\s)','',splod[1])
return self.CasaFora
def scrapOdds(self):
#print(self.body)
tipoDeOdd = None
self.oddsSoap = self.soup.find_all('tr', limit=False)
self.oddsJSON['odds'] = []
for odd in self.oddsSoap:
td = odd.find_all('td')
try:
text = td[1].get_text()
parametrizar = { "nome":td[0].get_text(),"Taxa": float(text.replace(',','.') if text != '' else '0') , "tipo": tipoDeOdd}
self.oddsJSON['odds'].append(parametrizar)
except IndexError:
if len(td) != 0:
tipoDeOdd = td[0].get_text()
return self.oddsJSON
#json.dumps(self.oddsJSON, indent = 4)
def scrapNomeBanca(self):
self.oddsJSON['banca'] = re.search('\w{1,}', self.link).group(0)
print(self.oddsJSON['banca'])
def setStatus(self):
self.oddsJSON['ativo'] = True
def scrapCompleto(self):
self.scrapNomeBanca()
self.scrapCasaFora()
self.scrapOdds()
self.setStatus()
return self.getOddsJSON()
def getOddsJSON(self):
return self.oddsJSON
class rasparDadosKbets():
listaDeJogos = None
def __init__(self,link,casa= '',fora = '', dateMatch= None):
self.link = link
self.casa = casa
self.fora = fora
self.Request()
self.oddsJSON = {}
self.oddsJSON['sistema'] = 'kbets'
self.oddsJSON['date_match'] = dateMatch
def Request(self):
try:
self.body = connect.ConectKbets(self.link+'/axios/data').getBody()
except Exception as e:
raise e
def Start(self):
self.setBanca()
self.setHora()
self.setStats()
self.setCasaFora()
self.oddsJSON['odds'] = self.setOdds()
return self.oddsJSON
def setBanca(self):
self.oddsJSON['banca'] = re.search('\w{1,}', self.link).group(0)
def setHora(self):
self.oddsJSON['data_hora'] = datetime.datetime.now()
def setStats(self):
self.oddsJSON['ativo'] = True
def setCasaFora(self):
self.gerarNome()
NomesSubstituiveis = self.obterNomeCasaNomeFora(self.casa, self.fora)
self.oddsJSON['tCasa'] = NomesSubstituiveis['casa']
self.oddsJSON['tFora'] = NomesSubstituiveis['fora']
self.oddsJSON['tCasaOriginal'] = self.casa
self.oddsJSON['tForaHoriginal'] = self.fora
def gerarNome(self):
db = database.Database()
lista = db.getAllTimes()
self.listaSAgames = lista
def obterNomeCasaNomeFora(self, casa,fora):
listaSAgames = self.listaSAgames
casaResult = list(filter(lambda x: SequenceMatcher(None,x['tCasa'],casa).ratio() > 0.70 ,listaSAgames))
if len(casaResult) == 0:
casaResult = casa
else:
casaResult =casaResult[0]['tCasa']
print('ratio: ',SequenceMatcher(None,casaResult,casa).ratio()," casa: "+casaResult," casa2: ", casa)
foraResult = list(filter(lambda x: SequenceMatcher(None,x['tFora'],fora).ratio() > 0.70,listaSAgames))
if len(foraResult) == 0:
foraResult = fora
else:
foraResult = foraResult[0]['tFora']
print('ratio: ',SequenceMatcher(None,foraResult,fora).ratio()," fora: "+foraResult," fora2: ", fora)
return { "casa": casaResult, "fora": foraResult }
def setOdds(self):
self.oddsJSON['odds'] = []
for x in self.body:
try:
compatibilizar = kbetsParaSa(x["grupo_id"],x['odds'],x['grupo'])
grupo = compatibilizar.getGrupoOdd()
oddsNome = compatibilizar.getName()
odd = { "tipo": grupo, "Taxa": float(x["taxa"]), "nome": oddsNome }
self.oddsJSON['odds'].append(odd)
except:
traceback.print_exc()
exit()
return self.oddsJSON['odds']
class ScrapingOddsBolinha():
def __init__(self,link, date_match,tCasa = '',tFora = '',campeonato = ''):
self.link = link
self.Request()
self.oddsJSON = {}
self.oddsJSON['data_hora'] = datetime.datetime.now()
self.oddsJSON['sistema'] = 'bolinha'
self.oddsJSON['tCasa'] = tCasa
self.oddsJSON['tFora'] = tFora
self.oddsJSON['tCasaOriginal'] = tCasa
self.oddsJSON['tForaHoriginal'] =tFora
self.oddsJSON['visivel'] = True
self.oddsJSON['ativo'] = True
self.oddsJSON['banca'] = re.search('\w{1,}', self.link).group(0)
self.oddsJSON['date_match'] = date_match
self.oddsJSON['campeonato'] = campeonato
self.mudarNome()
def Request(self):
try:
self.body = connect.ConectBolinha(self.link).getBody()
self.body = re.sub("'{","{",str(self.body))
self.body = re.sub("}'",'}',str(self.body))
self.body = re.sub("'","\"",self.body)
self.body = json.loads(self.body)
except Exception as e:
raise e
def getBody(self):
return self.body[0]
def mudarNome(self):
db = database.Database()
lista = db.getAllTimes()
self.listaSAgames = lista
novoNome = self.obterNomeCasaNomeFora(self.oddsJSON['tCasa'],self.oddsJSON['tFora'])
self.oddsJSON['tCasa'] = novoNome['casa']
self.oddsJSON['tFora'] = novoNome['fora']
def scrapCompleto(self):
self.scrapOdds()
return self.oddsJSON
def scrapOdds(self):
self.oddsJSON['odds'] = []
for i in self.body:
compatibilizar = bolinhaParaSa(i['Value']['cat_id'],i['Value']['descricao'])
odd_dados = { 'nome': compatibilizar.getName(),'Taxa': i['Value']['taxa'] ,'tipo': compatibilizar.getGrupoOdd() }
self.oddsJSON['odds'].append(odd_dados)
def obterNomeCasaNomeFora(self, casa,fora):
listaSAgames = self.listaSAgames
casaResult = list(filter(lambda x: SequenceMatcher(None,x['tCasa'],casa).ratio() > 0.70 ,listaSAgames))
if len(casaResult) == 0:
casaResult = casa
else:
casaResult =casaResult[0]['tCasa']
print('ratio: ',SequenceMatcher(None,casaResult,casa).ratio()," casa: "+casaResult," casa2: ", casa)
foraResult = list(filter(lambda x: SequenceMatcher(None,x['tFora'],fora).ratio() > 0.70,listaSAgames))
if len(foraResult) == 0:
foraResult = fora
else:
foraResult = foraResult[0]['tFora']
print('ratio: ',SequenceMatcher(None,foraResult,fora).ratio()," fora: "+foraResult," fora2: ", fora)
return { "casa": casaResult, "fora": foraResult }
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,774 | marcosguilhermef/oddsing-python | refs/heads/main | /datasets.py | class Oddssa():
def __init__(self,list,body):
self.banca = list['banca']
self.campeonato = list['campeonato']
self.tCasa = list['tCasa']
self.tFora = list['tFora']
self.casa = list['casa']
self.empate = list['empate']
self.fora = list['fora']
self.body = body
def setTCasa(self, value):
self.tCasa = value
def setTFora(self, value):
self.tFora = value
def setFora(self, value):
self.fora = value
def setEmpate(self, value):
self.empate = value
def setCasa(self, value):
self.casa = value
def setCampeonato(self, value):
self.campeonato = value
class Link():
"""def __init__(self,list):
self.setLinkList(list)
self.setLengthList()
self.setInterator()"""
def __init__(self,list):
self.setLinkList(list)
self.setLengthList()
self.setInterator()
#link list
def getLinkList(self):
print('setado',self.link)
return self.link
def setLinkList(self,list):
self.link = list
#interator
def getInterator(self):
return self.interator
def incrementInterator(self):
self.interator =+ 1
return self.interator
def setInterator(self):
self.getInterator = 0
#lengthlist
def getLengthList(self):
return self.lengthList
def setLengthList(self):
self.lengthList = len(self.link)
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,775 | marcosguilhermef/oddsing-python | refs/heads/main | /scrapingLinks.py | from os import link, stat
from warnings import catch_warnings
import connect
from bs4 import BeautifulSoup
import re
import datetime
import json
class RaparLinksOddsSa():
def __init__(self,link):
self.link = link
self.body = self.getLinksMaisOdds()
self.pais = None,
self.campeonato = None
self.dataMatch = None
def getLinksMaisOdds(self):
return connect.ConectSA(self.link).getBody()
def Raspar(self):
soup = BeautifulSoup(self.body, 'html.parser')
#tr = soup.find_all('tr',limit=False)
a = soup.find_all('a')
links = map(lambda re : re['href'], a)
linksTratados = map(lambda link: re.sub('(./)','/',link),links)
linksUteis = map(lambda link: link if re.search('apostas.aspx',link) != None else None ,linksTratados)
linksUteisSemNones = self.removeNone(linksUteis)
return linksUteisSemNones
def RaspagemCompleta(self):
soup = BeautifulSoup(self.body, 'html.parser')
a = soup.find_all('tr',limit=False)
informacoesIteis = []
i = 0
for e in a:
try:
if 'tcpais' in e.td['class']:
self.rasparPais(e.td.span.span.next_sibling.get_text())
if 'tccampeonato' in e.td['class']:
self.rasparCampeonato(e.td.span.extract())
if 'th_1' in e.td['class']:
date_match = e.td.p.next_sibling.get_text()
link = self.RasparLink(e)
link = re.sub('(./)','/',link)
informacoesIteis.append({
'campeonato': self.campeonato,
'pais': self.pais,
'date_match': datetime.datetime.strptime(date_match+" -03:00", '%d/%m/%Y %H:%M %z'),
'link': self.link+'/simulador'+link
})
except:
pass
return informacoesIteis
def rasparPais(self,pais):
self.pais = pais.replace(u'\xa0', u' ')
def rasparCampeonato(self,campeonato):
self.campeonato = campeonato.get_text()
def rasparDataMatch(self,dataMatch):
self.dataMatch = dataMatch
def RasparLink(self,raw):
return raw.td.find_next_sibling(name="td", attrs={ 'class': 'th_5' }).a['href']
def removeNone(self, link):
newList = []
for i in link:
if i != None:
newList.append(i)
return newList
#a = RaparLinksOddsSa("https://nbet91.com/simulador/jogos.aspx?idesporte=102&idcampeonato=575067")
class RaparLinksOddsKbets():
def __init__(self,link):
self.link = link
self.oddsJSON = {}
self.Request()
self.Start()
def Request(self):
try:
self.body = connect.ConectKbets(self.link+'/axios/data').getBody()
except Exception as e:
raise e
def Start(self):
self.oddsGroups = self.body['odds']
self.gameList = self.body['lista']
def getAllId(self):
newArr = []
for item in self.gameList:
newArr.append(
{
"link":self.link+"/axios/oddsWithGroups/"+item['id'],
"id": item['id'],
"gameItem": list(filter( lambda x : x['id'] == item['id'] ,self.gameList)),
"date_match": datetime.datetime.strptime(item['data_hora']+" -03:00", '%Y-%m-%d %H:%M:%S %z')
}
)
return newArr
def getOddsGroups(self):
return self.oddsGroups
def getgameList(self):
return self.gameList
class RaparLinksOddsBolinha():
def __init__(self,link):
self.link = link
self.oddsJSON = {}
self.Request()
#self.Start()
def Request(self):
try:
self.body = connect.ConectBolinha(self.link+'/futebolapi/api/CampJogo/getEvents/1').getBody()
except Exception as e:
raise e
def getBody(self):
return self.body
def getMainData(self):
newArr = []
body = self.getBody()
print('aqqui')
for i in body:
try:
res = json.loads(i['Value'])
None if res == None else newArr.append(
{
'camp_jog_id': res["camp_jog_id"],
'camp_nome': res["camp_nome"],
'tCasa': res["casa_time"],
'tFora': res["visit_time"],
'date_match': datetime.datetime.strptime(res["dt_hr_ini"]+" -03:00",'%Y-%m-%dT%H:%M:%S %z'),
'link': self.link+'/futebolapi/api/CampJogo/getOdds/'+str(res["camp_jog_id"])
}
)
except Exception as e:
print('erro', e)
return newArr
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,776 | marcosguilhermef/oddsing-python | refs/heads/main | /images.py | import requests
from bs4 import BeautifulSoup
import traceback
import pymongo
from pymongo import aggregation
from database import Database
import os
from PIL import Image
import io
import re
from bson.objectid import ObjectId
import traceback;
class conectar:
def __init__(self,link):
self.link = link
def iniciar(self):
try:
r = requests.get('https://'+self.link)
return r
except requests.exceptions.SSLError:
r = requests.get('http://'+self.link)
return r
except:
pass
DATABASE = Database()
PATH="imagens/"
URL_BASE="localhost/origin/imagem"
#PATH="/home/origin/www/oddsing/laravelOddsing/storage/app/bancas/"
#URL_BASE="https://oddsing.xyz/bancas"
def salvarOriginal(link,id,img,banca,imgBruto):
try:
imgBrutoResize = Image.open(io.BytesIO(imgBruto))
try:
imgBrutoResize.save(PATH+banca+"/original/"+str(id)+'.png')
except FileNotFoundError:
os.mkdir(PATH+banca)
imgBrutoResize.save(PATH+banca+"/original/"+str(id)+'.png')
DATABASE.setImageInBanca(id,URL_BASE,'original',banca)
except FileNotFoundError:
os.mkdir(PATH+banca+"/original/")
salvarOriginal(link,id,img,banca,imgBruto)
def salvar50por50(link,id,img,banca,imgBruto):
try:
imgBrutoResize = Image.open(io.BytesIO(imgBruto)).resize((50,50))
imgBrutoResize.save(PATH+banca+"/50x50/"+str(id)+'.png')
DATABASE.setImageInBanca(id,URL_BASE,'50x50',banca)
except FileNotFoundError:
os.mkdir(PATH+banca+"/50x50/")
salvar50por50(link,id,img,banca,imgBruto)
"""myresult = DATABASE.getBancasListComplet('sa sports')
for x in myresult:
try:
site = conectar(x['url']).iniciar()
soup = BeautifulSoup(site.content, 'html.parser')
img = soup.find_all('img',limit=False)
imgBruto = conectar(x['url']+"/"+img[0]['src']).iniciar().content
try:
salvarOriginal(x['url'],x['_id'],img[0]['src'],x['banca'],imgBruto)
salvar50por50(x['url'],x['_id'],img[0]['src'],x['banca'],imgBruto)
except FileNotFoundError:
os.mkdir(PATH)
salvarOriginal(x['url'],x['_id'],img[0]['src'],x['banca'],imgBruto)
salvar50por50(x['url'],x['_id'],img[0]['src'],x['banca'],imgBruto)
except:
print('ERRO NO LINK ABAIXO SA SPORTS '+x['url'])
pass """
myresult = DATABASE.getBancasListComplet('kbets')
for x in myresult:
try:
site = conectar(x['url']).iniciar()
soup = BeautifulSoup(site.content, 'html.parser')
img = soup.find_all('link',limit=False)
imgLink = img[0]['href']
imgLink = re.sub('//','',imgLink)
print("-----> ",imgLink)
imgBruto = conectar(imgLink).iniciar().content
try:
salvarOriginal(x['url'],x['_id'],'',x['banca'],imgBruto)
salvar50por50(x['url'],x['_id'],'',x['banca'],imgBruto)
except FileNotFoundError:
os.mkdir(PATH)
salvarOriginal(x['url'],x['_id'],'',x['banca'],imgBruto)
salvar50por50(x['url'],x['_id'],'',x['banca'],imgBruto)
except:
print(x)
traceback.print_exc()
pass
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,777 | marcosguilhermef/oddsing-python | refs/heads/main | /database.py | import sqlite3
import pymongo
from pymongo import aggregation
import urllib
import json
class Database():
mongo = pymongo.MongoClient("mongodb://localhost:27017/")
#mongo = pymongo.MongoClient("mongodb://oddsing:"+urllib.parse.quote("!@%17aBc1212")+"@localhost:27017/")
databaseM = mongo["oddsing"]
collection = databaseM['odds']
listaDeJogos = None
def setCollection(self):
self.collection = self.databaseM['odds']
def insertDados(self, dados):
self.dados = dados
self.cur.execute("insert into jogo (tCasa,tFora,banca,odds) values (?, ?, ?, ?)", (self.dados['tCasa'], self.dados['tFora'], self.dados['banca'], str(self.dados['odds'])))
self.con.commit()
def insertMongo(self,dados):
self.desativarAtivos(dados)
result = self.collection.insert_one(dados)
print('carregado: '+str(result.inserted_id))
def desativarAtivos(self,dados):
print(dados['sistema'])
if dados['sistema'] == "kbets":
result = self.collection.update_many({"ativo": True, "tCasaOriginal": dados['tCasaOriginal'], "tForaHoriginal":dados['tForaHoriginal'], 'banca': dados['banca']}, { "$set": {"ativo": False}})
elif dados['sistema'] == "bolinha":
result = self.collection.update_many({"ativo": True, "tCasaOriginal": dados['tCasaOriginal'], "tForaHoriginal":dados['tForaHoriginal'], 'banca': dados['banca']}, { "$set": {"ativo": False}})
print(result.modified_count)
else:
result = self.collection.update_many({"ativo": True, "tCasa": dados['tCasa'], "tFora":dados['tFora'], 'banca': dados['banca']}, { "$set": {"ativo": False}})
def getAllTimes(self):
aggregate = [{"$match": { "ativo": True, "sistema": "sa sports" } },{"$group" : { "_id": { "tCasa": "$tCasa", "tFora" : "$tFora"}} }]
result = Database.mongo["oddsing"]["odds"].aggregate(aggregate)
result = list(result)
newResult = map( lambda x: x['_id'] ,result)
self.listaDeJogos = newResult
return list(newResult)
def getBancasList(self, sistema):
collection = self.databaseM['banca']
result = collection.find({"rastrear": True, "sistema": sistema},{"_id":0,"url":1})
result = list(map( lambda x : x['url'] , result))
return result
def getBancasListComplet(self, sistema):
collection = self.databaseM['banca']
result = collection.find({ "sistema": sistema, "imagem": {"$size" : 0}},{"_id":1,"url":1,"banca":1})
return list(result)
def setImageInBanca(self,id,url_base,size,banca):
collection = self.databaseM['banca']
result = collection.update_one({"_id": id}, {
"$push": {
"imagem": {
"size": size,
"url": url_base+"/"+banca+"/"+size+"/"+str(id)+".png"
}
}
}
)
print(result.modified_count)
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,778 | marcosguilhermef/oddsing-python | refs/heads/main | /main.py | #!/usr/bin/python3
from pymongo import database
from scrapingLinks import RaparLinksOddsSa as scrap
from scrapingLinks import RaparLinksOddsKbets as scrapK
from scrapingLinks import RaparLinksOddsBolinha as scrapBol
from datasets import Link
from lerArquivoDeLinks import ArquivoSA as lerSa
from lerArquivoDeLinks import ArquivoKBETS as lerK
from connect import ConectSA
from scrapingOdds import RaparOddsSa as scrapsaodds
from scrapingOdds import rasparDadosKbets as scrapkbetsodds
from scrapingOdds import ScrapingOddsBolinha as scrapbolinhaodds
from SalvarEmTexto import SalvarArquivoTexto as save
from threading import Thread
import threading
from database import Database
import traceback
import logging
import concurrent.futures
THREADS_N = 8
class CarregamentoDeLinks():
def __init__(self):
self.database = Database()
self.bancaListLink = None
self.listLinkOdds = None
def ScrapingLinksKbets(self):
self.linksK = self.database.getBancasList('kbets')
for k in self.linksK:
self.scrapingCompletoKbets(k)
def scrapingCompletoKbets(self,link):
try:
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS_N) as executor:
instancia = scrapK(link)
executor.map(self.scrapingOddsKbets,instancia.getAllId())
except:
traceback.print_exc()
def scrapingOddsKbets(self,item):
#print('thread: ',threading.get_ident())
result = scrapkbetsodds(item['link'], casa=item['gameItem'][0]['tc'], fora=item['gameItem'][0]['tf'],dateMatch=item['date_match']).Start()
print(result)
self.salve(result)
def ScrapingOddSA(self):
links = self.database.getBancasList('sa sports')
for link in links:
self.RaspagemCompletaLinkSA(link)
def RaspagemCompletaLinkSA(self,link):
arr = self.ScrapingLinkMainOddsSA(link)
self.RasparTodosOsLinksMaisOddsSA(arr)
def ScrapingLinkMainOddsSA(self,link):
try:
listLinksScraping = scrap(link).RaspagemCompleta()
except:
listLinksScraping = []
return listLinksScraping
def RasparTodosOsLinksMaisOddsSA(self,arr):
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS_N) as executor:
executor.map(self.RasparMaisOddsSA,arr)
def RasparMaisOddsSA(self,dados):
print('thread: ',threading.get_ident())
try:
a = scrapsaodds(dados['link'],dados['date_match']).scrapCompleto()
self.salve(a)
except Exception:
traceback.print_exc()
def ScrapingOddsBolinha(self):
self.linksBolinha = self.database.getBancasList('bolinha')
for i in self.linksBolinha:
self.RasparOddsBolinhas(i)
def RasparOddsBolinhas(self,link):
link = scrapBol(link).getMainData()
try:
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS_N) as executor:
executor.map(self.ProcessarMultithreadBolinha,link)
except:
traceback.print_exc()
def ProcessarMultithreadBolinha(self,i):
print('thread: ',threading.get_ident())
odds = scrapbolinhaodds(i['link'],i['date_match'],i['tCasa'],i['tFora'],i['camp_nome']).scrapCompleto()
self.salve(odds)
def salve(self,body):
print('salve')
self.database.insertMongo(body)
a = CarregamentoDeLinks()
while True:
a.ScrapingOddSA()
a.ScrapingLinksKbets()
a.ScrapingOddsBolinha()
""" while True:
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(a.ScrapingOddSA)
executor.submit(a.ScrapingLinksKbets)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)
executor.submit(a.ScrapingOddsBolinha)
"""
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,779 | marcosguilhermef/oddsing-python | refs/heads/main | /connect.py | import requests
#essa class faz a primeira requisição ao site
class ConectSA():
def __init__(self,SALink):
self.link = SALink
self.response = self.setResponse()
def setResponse(self):
try:
r = requests.get('https://'+self.link)
return r
except requests.exceptions.SSLError:
r = requests.get('http://'+self.link)
return r
except Exception as erro:
raise erro
def getBody(self):
return self.response.content
def getResponseCode(self):
return self.response.status_code
class ConectKbets():
def __init__(self,link):
self.link = link
self.response = self.setResponse()
def setResponse(self):
try:
print('https://'+self.link)
r = requests.get('https://'+self.link)
return r
except requests.exceptions.SSLError:
print('http://'+self.link)
r = requests.get('http://'+self.link)
return r
except Exception as erro:
raise erro
def getBody(self):
return self.response.json()
def getResponseCode(self):
return self.response.status_code
class Cookie():
cookies = None
def setCookie(self,cookie):
self.cookies = cookie
class ConectBolinha():
cookies = ''
def __init__(self,link):
self.link = link
self.response = self.setResponse()
def setResponse(self):
try:
print('https://'+self.link)
a = Cookie()
r = requests.get('https://'+self.link, cookies = a.cookies)
a.setCookie(r.cookies)
if r.status_code == 500:
raise requests.exceptions.ConnectionError
return r
except requests.exceptions.SSLError:
print('http://'+self.link)
a = Cookie()
r = requests.get('http://'+self.link, cookies = a.cookies)
a.setCookie(r.cookies)
if r.status_code == 500:
raise requests.exceptions.ConnectionError
return r
except requests.exceptions.ConnectionError:
try:
""" proxies = {
"http": "http://lum-customer-hl_6fcc0e29-zone-static-route_err-pass_dyn-country-br:1art1pt9d8mi@zproxy.lum-superproxy.io:22225",
"https": "http://lum-customer-hl_6fcc0e29-zone-static-route_err-pass_dyn-country-br:1art1pt9d8mi@zproxy.lum-superproxy.io:22225",
} """
proxies = {
"http": "socks4://191.7.215.246:5678",
"https": "socks4://191.7.215.246:5678",
}
print('http://'+self.link)
r = requests.get('https://'+self.link, proxies=proxies)
return r
except:
print('http://'+self.link)
print('erro')
r = requests.get('http://'+self.link, proxies = proxies)
return r
def getBody(self):
return self.response.json()
def getResponseCode(self):
return self.response.status_code
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,780 | marcosguilhermef/oddsing-python | refs/heads/main | /SalvarEmTexto.py | import pandas as pd
import json
import csv
import xml.etree.cElementTree as e
class SalvarArquivoTexto():
def __init__(self, arquivo):
self.arquivo = arquivo
#self.setHeads()
def save(self):
arq = open('teste.json','a+')
arq.write(json.dumps(self.arquivo))
def getHeads(self):
return self.heads
def setHeads(self):
self.heads = list(self.arquivo.keys())
def csvsave(self):
heads = self.getHeads()
odds = self.arquivo
print(json.dumps(odds))
def htmlCreacre():
table = '<table>'
class html():
@staticmethod
def setTable(corpo):
return '<table>'+corpo+'</table>'
@staticmethod
def setBodyArray(array):
string = ''
for arr in array:
pass | {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,781 | marcosguilhermef/oddsing-python | refs/heads/main | /lerArquivoDeLinks.py | import re
class ArquivoSA():
def ler(self):
op = open('SAlist')
links = op.readlines()
return list(map(lambda link : re.sub('\n','',link), links))
class ArquivoKBETS():
def ler(self):
op = open('KBETSlist')
links = op.readlines()
return list(map(lambda link : re.sub('\n','',link), links))
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,782 | marcosguilhermef/oddsing-python | refs/heads/main | /este.py | def teste(**arr):
print(arr)
teste(tCasa="casa",tFora="fora") | {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,783 | marcosguilhermef/oddsing-python | refs/heads/main | /OddParametrizacao.py | class bolinhaParaSa():
def __init__(self,codigo = '',nome = ''):
self.grupoCodigo = int(codigo)
self.grupoOdd = codigo
self.tipoOdd = ''
self.nome = nome
def getGrupoOdd(self):
grupo = {
1: "Vencedor do Encontro" ,
2: "Dupla Chance" ,
3: "Casa - Total de gols no jogo" ,
4: "Fora - Total de gols no jogo" ,
5: "Total de Gols no Jogo" ,
6: "Ambas as equipes marcarão na partida" ,
7: "Total de Gols Par ou Ímpar" ,
8: "Resultado Exato" ,
9: "Handicap de Gol" ,
10: "Handicap Asiático" ,
11: "Vence ao Intervalo/Vence ao Final do Jogo" ,
12: "Empate não tem aposta" ,
13: "Resultado / Total de Gols" ,
14: "Ambas com Resultado" ,
15: "Ambas as equipes marcarão na partida" ,
16: "Margem de vitória" ,
17: "Cantos" ,
18: "Equipes para marcar" ,
19: "Metados com mais gols" ,
20: "Casa metade mais produtiva" ,
21: "Fora metade mais produtiva" ,
22: "Baliza inviolada" ,
23: "Número de gols na partida" ,
24: "Total de gols com ambas marcam" ,
25: "Total exato de gols" ,
26: "Ganhar a zero" ,
27: "Ganhar um tempo" ,
28: "Ganhar ambos os tempos" ,
29: "Marcar em ambos os tempos" ,
30: "1º Tempo - ambas as equipes marcam" ,
31: "2º Tempo - ambas as equipes para marcar" ,
32: "Marcar um pênalti" ,
33: "Cartão vermelho na partida" ,
40: "Vencedor da luta" ,
45: "Vencedor" ,
46: "Handicap de pontos" ,
47: "Total de pontos" ,
48: "Intervalo / Final" ,
49: "Margem de vitória" ,
50: "Casa Par/Ímpar"
}
key = grupo.get(self.grupoCodigo)
return key if key != None else 'desconecido: codigo '+str(self.grupoCodigo)
def getName(self):
grupo = {
1: {
"Vencedor do Encontro": {
1:'Casa',
2:'Empate',
3:'Fora'
}
},
2: {
"Dupla Chance":{
"1-2":"Casa ou Fora",
"1X":"Casa ou Empate",
"X2":"Empate ou Fora",
}
} ,
3: {
"Casa - Total de gols no jogo": {
"Casa - Acima 0.5":"mais de 0,5",
"Casa - Acima 1.5":"mais de 1,5",
"Casa - Acima 2.5":"mais de 2,5",
"Casa - Acima 3.5":"mais de 3,5",
"Casa - Acima 4.5":"mais de 4,5",
"Casa - menos 0.5":"menos de 0,5",
"Casa - menos 1.5":"menos de 1,5",
"Casa - menos 2.5":"menos de 2,5",
"Casa - menos 3.5":"menos de 3,5",
"Casa - menos 4.5":"menos de 4,5",
}
} ,
4: {
"Fora - Total de gols no jogo":{
"Fora - Acima 0.5":"mais de 0,5",
"Fora - Acima 1.5":"mais de 1,5",
"Fora - Acima 2.5":"mais de 2,5",
"Fora - Acima 3.5":"mais de 3,5",
"Fora - Acima 4.5":"mais de 4,5",
"Fora - menos 0.5":"menos de 0,5",
"Fora - menos 1.5":"menos de 1,5",
"Fora - menos 2.5":"menos de 2,5",
"Fora - menos 3.5":"menos de 3,5",
"Fora - menos 4.5":"menos de 4,5",
}
} ,
5: {
"Total de Gols no Jogo":{
"Jogo - Acima 0.5":"mais de 0,5",
"Jogo - Acima 1.5":"mais de 1,5",
"Jogo - Acima 2.5":"mais de 2,5",
"Jogo - Acima 3.5":"mais de 3,5",
"Jogo - Acima 4.5":"mais de 4,5",
"Jogo - menos 0.5":"menos de 0,5",
"Jogo - menos 1.5":"menos de 1,5",
"Jogo - menos 2.5":"menos de 2,5",
"Jogo - menos 3.5":"menos de 3,5",
"Jogo - menos 4.5":"menos de 4,5",
}
} ,
6: {
"Ambas as equipes marcarão na partida": {
"Ambos M Nao":"não",
"Ambos M":"sim"
}
} ,
7: {"Total de Gols Par ou Ímpar": {
"Casa": "Casa",
"Fora": "Fora",
"Empate":"Empate"
}} ,
8: {"Resultado Exato": {
}} ,
9: {"Handicap de Gol": {}} ,
10:{ "Handicap Asiático": {}} ,
11: {"Vence ao Intervalo/Vence ao Final do Jogo": {}} ,
12: {"Empate não tem aposta": {}} ,
13: {"Resultado / Total de Gols": {}} ,
14: {"Ambas com Resultado": {}} ,
15: {"Ambas as equipes marcarão na partida": {}} ,
16: {"Margem de vitória": {}} ,
17: {"Cantos": {}} ,
18: {"Equipes para marcar": {}} ,
19: {"Metados com mais gols": {}} ,
20: {"Casa metade mais produtiva": {}} ,
21: {"Fora metade mais produtiva": {}} ,
22: {"Baliza inviolada": {}} ,
23: {"Número de gols na partida": {}} ,
24: {"Total de gols com ambas marcam": {}} ,
25: {"Total exato de gols": {}} ,
26: {"Ganhar a zero": {}} ,
27: {"Ganhar um tempo" : {}},
28: {"Ganhar ambos os tempos": {}} ,
29: {"Marcar em ambos os tempos" : {}},
30: {"1º Tempo - ambas as equipes marcam": {}} ,
31: {"2º Tempo - ambas as equipes para marcar": {}} ,
32: {"Marcar um pênalti": {}} ,
33: {"Cartão vermelho na partida": {}} ,
40: {"Vencedor da luta": {}} ,
45: {"Vencedor": {}} ,
46: {"Handicap de pontos": {}},
47: {"Total de pontos" : {}},
48: {"Intervalo / Final": {}} ,
49: {"Margem de vitória" : {}},
50: {"Casa Par/Ímpar": {}}
}
grupamento = self.getGrupoOdd()
key = grupo[self.grupoCodigo][grupamento].get(self.nome)
return key if key != None else self.nome
class kbetsParaSa():
def __init__(self,codigo = '',nome = '', tipoOdd = ''):
self.grupoCodigo = int(codigo)
self.grupoOdd = codigo
self.tipoOdd = tipoOdd
self.nome = nome
def getGrupoOdd(self):
grupo = {
1: "Vencedor do Encontro" ,
2: "Dupla Chance" ,
3: "Casa - Total de gols no jogo" ,
4: "Fora - Total de gols no jogo" ,
5: "Total de Gols no Jogo" ,
6: "Ambas as equipes marcarão na partida" ,
7: "Total de Gols Par ou Ímpar" ,
8: "Resultado Exato" ,
#9: "Handicap de Gol" ,
#10: "Handicap Asiático" ,
#11: "Vence ao Intervalo/Vence ao Final do Jogo" ,
#12: "Empate não tem aposta" ,
#13: "Resultado / Total de Gols" ,
#14: "Ambas com Resultado" ,
#15: "Ambas as equipes marcarão na partida" ,
13: "Margem de vitória" ,
48: "Cantos" ,
#18: "Equipes para marcar" ,
15: "Metados com mais gols" ,
#20: "Casa metade mais produtiva" ,
#21: "Fora metade mais produtiva" ,
#22: "Baliza inviolada" ,
16: "Número de gols na partida" ,
17: "Total de gols com ambas marcam" ,
18: "Total exato de gols" ,
19: "Ganhar a zero" ,
20: "Ganhar um tempo" ,
21: "Ganhar ambos os tempos" ,
22: "Marcar em ambos os tempos" ,
30: "1º Tempo - ambas as equipes marcam" ,
36: "2º Tempo - ambas as equipes para marcar" ,
#32: "Marcar um pênalti" ,
33: "Cartão vermelho na partida" ,
#40: "Vencedor da luta" ,
#45: "Vencedor" ,
#46: "Handicap de pontos" ,
#47: "Total de pontos" ,
#48: "Intervalo / Final" ,
#49: "Margem de vitória" ,
#50: "Casa Par/Ímpar"
}
key = grupo.get(self.grupoCodigo)
return key if key != None else self.tipoOdd
def getName(self):
grupo = {
1: { "Vencedor do Encontro" : {
"Casa": "Casa",
"Fora": "Fora",
"Empate": "Empate"
}
} ,
2: { "Dupla Chance" : {
"1-2 - Casa ou Fora":"Casa ou Fora",
"1X - Casa ou empate":"Casa ou Empate",
"X2 - Fora ou empate":"Empate ou Fora",
}
} ,
3: { "Casa - Total de gols no jogo" :
{
"Casa Acima 0.5":"mais de 0,5",
"Casa Acima 1.5":"mais de 1,5",
"Casa Acima 2.5":"mais de 2,5",
"Casa Acima 3.5":"mais de 3,5",
"Casa Acima 4.5":"mais de 4,5",
"Casa Acima 5.5":"mais de 5,5",
"Casa Abaixo 0.5":"menos de 0,5",
"Casa Abaixo 1.5":"menos de 1,5",
"Casa Abaixo 2.5":"menos de 2,5",
"Casa Abaixo 3.5":"menos de 3,5",
"Casa Abaixo 4.5":"menos de 4,5",
"Casa Abaixo 5.5":"menos de 5,5",
}
} ,
4: { "Fora - Total de gols no jogo" : {
"Fora Acima 0.5":"mais de 0,5",
"Fora Acima 1.5":"mais de 1,5",
"Fora Acima 2.5":"mais de 2,5",
"Fora Acima 3.5":"mais de 3,5",
"Fora Acima 4.5":"mais de 4,5",
"Fora Acima 5.5":"mais de 5,5",
"Fora Abaixo 0.5":"menos de 0,5",
"Fora Abaixo 1.5":"menos de 1,5",
"Fora Abaixo 2.5":"menos de 2,5",
"Fora Abaixo 3.5":"menos de 3,5",
"Fora Abaixo 4.5":"menos de 4,5",
"Fora Abaixo 5.5":"menos de 5,5",
}
} ,
5: { "Total de Gols no Jogo" : {
"Jogo - acima 0.5":"mais de 0,5",
"Jogo - acima 1.5":"mais de 1,5",
"Jogo - acima 2.5":"mais de 2,5",
"Jogo - acima 3.5":"mais de 3,5",
"Jogo - acima 4.5":"mais de 4,5",
"Jogo - acima 5.5":"mais de 5,5",
"Jogo - abaixo 0.5":"menos de 0,5",
"Jogo - abaixo 1.5":"menos de 1,5",
"Jogo - abaixo 2.5":"menos de 2,5",
"Jogo - abaixo 3.5":"menos de 3,5",
"Jogo - abaixo 4.5":"menos de 4,5",
"Jogo - abaixo 5.5":"menos de 5,5",
}
},
6: { "Ambas as equipes marcarão na partida" : {} } ,
7: { "Total de Gols Par ou Ímpar" : {} } ,
8: { "Resultado Exato" : {
"Placar 0x0 Final":"0:0",
"Placar 0x1 Final":"0:1",
"Placar 0x2 Final":"0:2",
"Placar 0x3 Final":"0:3",
"Placar 0x4 Final":"0:4",
"Placar 0x5 Final":"0:5",
"Placar 0x6 Final":"0:6",
"Placar 1x0 Final":"1:0",
"Placar 1x1 Final":"1:1",
"Placar 1x2 Final":"1:2",
"Placar 1x3 Final":"1:3",
"Placar 1x4 Final":"1:4",
"Placar 1x5 Final":"1:5",
"Placar 1x6 Final":"1:6",
"Placar 2x0 Final":"2:0",
"Placar 2x1 Final":"2:1",
"Placar 2x2 Final":"2:2",
"Placar 2x3 Final":"2:3",
"Placar 2x4 Final":"2:4",
"Placar 2x5 Final":"2:5",
"Placar 2x6 Final":"2:6",
"Placar 3x0 Final":"3:0",
"Placar 3x1 Final":"3:1",
"Placar 3x2 Final":"3:2",
"Placar 3x3 Final":"3:3",
"Placar 3x4 Final":"3:4",
"Placar 3x5 Final":"3:5",
"Placar 3x6 Final":"3:6",
"Placar 4x0 Final":"4:0",
"Placar 4x1 Final":"4:1",
"Placar 4x2 Final":"4:2",
"Placar 4x3 Final":"4:3",
"Placar 4x4 Final":"4:4",
"Placar 4x5 Final":"4:5",
"Placar 4x6 Final":"4:6",
"Placar 5x0 Final":"5:0",
"Placar 5x1 Final":"5:1",
"Placar 5x2 Final":"5:2",
"Placar 5x3 Final":"5:3",
"Placar 5x4 Final":"5:4",
"Placar 5x5 Final":"5:5",
"Placar 5x6 Final":"5:6",
"Placar 6x0 Final":"6:0",
"Placar 6x1 Final":"6:1",
"Placar 6x2 Final":"6:2",
"Placar 6x3 Final":"6:3",
"Placar 6x4 Final":"6:4",
"Placar 6x5 Final":"6:5",
"Placar 6x6 Final":"6:6",
} },
#9: "Handicap de Gol" ,
#10: "Handicap Asiático" ,
#11: "Vence ao Intervalo/Vence ao Final do Jogo" ,
#12: "Empate não tem aposta" ,
#13: "Resultado / Total de Gols" ,
#14: "Ambas com Resultado" ,
#15: "Ambas as equipes marcarão na partida" ,
13: { "Margem de vitória" : {} } ,
48: { "Cantos" : {} } ,
#18: "Equipes para marcar" ,
15: { "Metados com mais gols" : {} } ,
#20: "Casa metade mais produtiva" ,
#21: "Fora metade mais produtiva" ,
#22: "Baliza inviolada" ,
16: { "Número de gols na partida" : {} } ,
17: { "Total de gols com ambas marcam" : {} } ,
18: { "Total exato de gols" : {} } ,
19: { "Ganhar a zero" : {} } ,
20: { "Ganhar um tempo" : {} } ,
21: { "Ganhar ambos os tempos" : {} } ,
22: { "Marcar em ambos os tempos" : {} } ,
30: { "1º Tempo - ambas as equipes marcam" : {} } ,
36: { "2º Tempo - ambas as equipes para marcar" : {} } ,
#32: "Marcar um pênalti" ,
33: { "Cartão vermelho na partida" : {} } ,
#40: "Vencedor da luta" ,
#45: "Vencedor" ,
#46: "Handicap de pontos" ,
#47: "Total de pontos" ,
#48: "Intervalo / Final" ,
#49: "Margem de vitória" ,
#50: "Casa Par/Ímpar"
}
grupamento = self.getGrupoOdd()
try:
key = grupo[self.grupoCodigo][grupamento].get(self.nome,self.nome)
except:
key = self.nome
return key
#a = kbetsParaSa(17,'Jogo - Acima 1.5','empate ou fora')
#print(a.getGrupoOdd())
#print(a.getName())
| {"/scrapingOdds.py": ["/connect.py", "/database.py", "/OddParametrizacao.py"], "/scrapingLinks.py": ["/connect.py"], "/images.py": ["/database.py"], "/main.py": ["/scrapingLinks.py", "/datasets.py", "/lerArquivoDeLinks.py", "/connect.py", "/scrapingOdds.py", "/SalvarEmTexto.py", "/database.py"]} |
48,795 | algakbk/book-store | refs/heads/main | /main/views.py | from django.shortcuts import render, HttpResponse, redirect
from .models import Book
# Create your views here.
def homepage(request):
return render(request, "index.html")
def book(request):
book_list = Book.objects.all()
return render(request, "books.html", {"book_list": book_list})
def add_book(request):
return render(request, "add_book.html")
def add_books(request):
form = request.POST
title = form["title"]
subtitle = form["subtitle"]
description = form["description"]
price = form["price"]
genre = form["genre"]
author = form["author"]
year = form["year"]
book1 = Book(title=title, subtitle=subtitle, description=description, price=price, genre=genre, author=author, year=year)
book1.save()
return redirect(book)
def delete_book(request, id):
book1 = Book.objects.get(id=id)
book1.delete()
return redirect(book)
def fav_book(request, id):
book1 = Book.objects.get(id=id)
book1.is_favorite = True
book1.save()
return redirect(book)
def unfav_book(request, id):
book1 = Book.objects.get(id=id)
book1.is_favorite = False
book1.save()
return redirect(book)
def booksDetail(request, id):
book_detail = Book.objects.get(id=id)
return render(request, "books_detail.html", {"book_detail": book_detail}) | {"/main/views.py": ["/main/models.py"], "/book/urls.py": ["/main/views.py"]} |
48,796 | algakbk/book-store | refs/heads/main | /book/urls.py | """book URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from main.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path("", homepage, name="home"),
path("books/", book, name="books"),
path("add_book/", add_book, name="add_book"),
path("add-books/", add_books, name="add-books"),
path("delete-book/<id>/", delete_book, name="delete-book"),
path("fav-book/<id>/", fav_book, name="fav-book"),
path("unfav-book/<id>/", unfav_book, name="unfav-book"),
path("book-detail/<id>/", booksDetail, name="book-detail"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | {"/main/views.py": ["/main/models.py"], "/book/urls.py": ["/main/views.py"]} |
48,797 | algakbk/book-store | refs/heads/main | /main/models.py | from django.db import models
class Book(models.Model):
title = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
description = models.CharField(max_length=1000)
price = models.DecimalField(max_digits=5, decimal_places=2)
genre = models.CharField(max_length=100)
author = models.CharField(max_length=100)
year = models.IntegerField()
created_at = models.DateField(auto_now_add=True)
is_favorite = models.BooleanField(default=False) | {"/main/views.py": ["/main/models.py"], "/book/urls.py": ["/main/views.py"]} |
48,798 | rlhjansen/Point2PointRoutability | refs/heads/master | /make_plots.py |
import sys
import argparse
from subprocess import run
from common import read_config
sys.path.append("..")
if __name__ == '__main__':
config_dict = read_config()
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--routings_per_pathlist", help="the number of different orders each pathlist should be routed, the paper uses 200", type=str, default=str(config_dict["routings_per_pathlist"]))
args = parser.parse_args()
N = args.routings_per_pathlist
run(("python calculate_routability.py --routings_per_pathlist " + N).split(" "))
run(("python fit_meshwise.py --routings_per_pathlist " + N).split(" "))
run(("python fit_routability_parameters.py --routings_per_pathlist " + N).split(" "))
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,799 | rlhjansen/Point2PointRoutability | refs/heads/master | /classes/simpleRouter.py |
from .mesh import file_to_mesh
from .gatherer import Gatherer
import matplotlib.pyplot as plt
class Router(Gatherer):
def __init__(self, c, n, nX, x, y, routings_per_pathlist):
Gatherer.__init__(self, c, n, nX, x, y, routings_per_pathlist)
self.set_saveloc()
self.mesh = file_to_mesh(self.mesh_path, None)
self.mesh.read_nets(self.netlist_path)
def route(self):
self.mesh.connect()
self.iter = 0
ords = [self.mesh.get_random_net_order() for i in range(self.routings_per_pathlist)]
data = [self.mesh.solve_order(ords[i], reset=True)[:2] for i in range(self.routings_per_pathlist)]
combined_data = [data[i] + ords[i] for i in range(len(data))]
self.add_iter_batch(combined_data)
self.save_all_data()
print("running object with meshsize", self.x, "netlistlen", self.n, "pathlist", self.nX)
return
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,800 | rlhjansen/Point2PointRoutability | refs/heads/master | /fit_routability_parameters.py | import os
import argparse
import os
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
from math import exp, floor
import numpy as np
from common import read_config
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
# reference page:
# https://stackoverflow.com/questions/39803385/what-does-a-4-element-tuple-argument-for-bbox-to-anchor-mean-in-matplotlib/39806180#39806180
LOC_ROUTABILITY_POINT = 2
BBOX_TO_ANCHOR_ROUTABILITY_POINT = (0.05, 0.5, .9, .45)
LOC_ROUTABILITY_SLOPE = 1
BBOX_TO_ANCHOR_ROUTABILITY_SLOPE = (0.75, 0.5, .2, .45)
def named_function(name):
def naming(func):
func.name = name
return func
return naming
def load_ab(param_func):
df = pd.read_csv("params_" + param_func + ".csv", header=1)
return df
def getdfcol(df, n):
""" get n'th column values of dataframe
"""
return df[df.columns[n]]
INTERP_MESH = np.array([i for i in range(400, 10001, 1)])
initial_col = 'b'
best_col = '#d95f02'
mean_col = 'g'
worst_col = 'magenta'
black = 'k'
meta_fit_vars = {}
meta_fit_vars['start'] = (13, 0.005, 0.1)
meta_fit_vars['start_s'] = (-0.8, 0.0005, 0.0001)
MFV = meta_fit_vars
@named_function('arg1 * np.log( arg2 * area - arg3)')
def logfunc(value, const1, const2, const3):
return const1 * np.log( const2 *value - const3)
@named_function('arg1 * np.log( arg2 * area - arg3)')
def slope_adapted_logfunc(value, const1, const2, const3):
"""shenanigans to make sure optimizer stays in the > 0 domain for the log pls yannick?"""
if np.sum((const2 *value - const3) < 0):
const4 = const3*-1
return const1 * np.log( const2 *value - const4)
return const1 * np.log( const2 *value - const3)
def correct_slopefunc(value, const1, const2, const3):
""" extensie van shenanigans om te checken voor correctness """
if np.sum((const2 *value - const3) < 0):
const4 = const3*-1
return const1, const2, const4
else:
return const1, const2, const3
def log10(num):
return np.log(num)/np.log(10)
def get_significant(x, n):
r = round(x, -int(floor(log10(abs(x)))) + (n))
return r
def format_found_params_inside_parenthesis(const1, const2, const3):
return "r'" + str(get_significant(const1, 2)) + " $\cdot$ ln (" + str(get_significant(const2, 2)) + "x - " + str(get_significant(const3, 2)) + ")'"
def meta_fit(mesh_metric, param_func, meta_param_func, _arb=False, _mean=False, _best=False, _worst=False):
meta_param_func_str = meta_param_func
meta_param_func = eval(meta_param_func)
fig = plt.figure(dpi=200)
ax = fig.add_subplot(111)
df = load_ab("inverted_logistic")
mesh_area_values = getdfcol(df,0)**2
initial_shift = getdfcol(df,1)
best_shift = getdfcol(df,5)
mean_shift = getdfcol(df,3)
worst_shift = getdfcol(df,7)
meta_fit_resfile = "slope_routability_fit_results.csv"
meta_fit_result_lines = ["function:" + meta_param_func.name,"what is fitted, arg1, arg2, arg3, MSE"]
if _arb:
errorfunc = lambda vars,x,data : logfunc(x, vars[0], vars[1], vars[2]) - data
RoutabilitypointArbitrary = least_squares(errorfunc, x0=meta_fit_vars['start'], args=(mesh_area_values, initial_shift), method="lm")
ax.scatter(mesh_area_values, initial_shift, c=initial_col, label='original')
ax.plot(INTERP_MESH, logfunc(INTERP_MESH, *list(RoutabilitypointArbitrary.x)), c=black, linestyle="--")
if _best:
RoutabilitypointBest = least_squares(errorfunc, x0=meta_fit_vars['start'], args=(mesh_area_values, best_shift), method="lm")
ax.scatter(mesh_area_values, best_shift, c=best_col, label='permuted')
ax.plot(INTERP_MESH, logfunc(INTERP_MESH, *list(RoutabilitypointBest.x)), c=black, linestyle="--")
if _mean:
ax.scatter(mesh_area_values, mean_shift, c=mean_col, label='discovered shift mean')
RoutabilitypointMean = least_squares(errorfunc, x0=meta_fit_vars['start'], args=(mesh_area_values, mean_shift), method="lm")
ax.plot(INTERP_MESH, logfunc(INTERP_MESH, *list(RoutabilitypointMean.x)), c=black, linestyle="--")
if _worst:
ax.scatter(mesh_area_values, worst_shift, c=worst_col, label='discovered shift worst')
RoutabilitypointWorst = least_squares(errorfunc, x0=meta_fit_vars['start'], args=(mesh_area_values, worst_shift), method="lm")
ax.plot(INTERP_MESH, logfunc(INTERP_MESH, *list(RoutabilitypointWorst.x)), c=black, linestyle="--")
plt.xlabel("mesh area")
plt.ylabel("routability point")
meta_fit_result_lines.append(",".join(["initial routabilitypoint", str(RoutabilitypointArbitrary.x[0]), str(RoutabilitypointArbitrary.x[1]), str(RoutabilitypointArbitrary.x[2]), str(np.mean(np.power(RoutabilitypointArbitrary.fun, 2)))]))
meta_fit_result_lines.append(",".join(["routabilitypoint after permutation", str(RoutabilitypointBest.x[0]), str(RoutabilitypointBest.x[1]), str(RoutabilitypointBest.x[2]), str(np.mean(np.power(RoutabilitypointBest.fun, 2)))]))
ax.text(4900, 33.8, eval(format_found_params_inside_parenthesis(*list(RoutabilitypointArbitrary.x))), fontsize=12)
ax.text(4900, 60.3, eval(format_found_params_inside_parenthesis(*list(RoutabilitypointBest.x))), fontsize=12)
print(*RoutabilitypointArbitrary.x)
print(*RoutabilitypointBest.x)
plt.legend(bbox_to_anchor=BBOX_TO_ANCHOR_ROUTABILITY_POINT, loc=LOC_ROUTABILITY_POINT, fancybox=False, shadow=False, ncol=1, frameon=False)
plt.show()
fig = plt.figure(dpi=200)
ax = fig.add_subplot(111)
initial_slope = getdfcol(df,2)
best_slope = getdfcol(df,6)
mean_slope = getdfcol(df,4)
worst_slope = getdfcol(df,8)
if _arb:
errorfunc = lambda vars,x,data : slope_adapted_logfunc(x, vars[0], vars[1], vars[2]) -data
OptimizeResultSlopeArbitrary = least_squares(errorfunc, x0=meta_fit_vars['start_s'], args=(mesh_area_values, initial_slope), method="lm")
slope_arbitrary_corrected = correct_slopefunc(mesh_area_values, *list(OptimizeResultSlopeArbitrary.x))
ax.scatter(mesh_area_values, initial_slope, c=initial_col, label='original')
ax.plot(INTERP_MESH, slope_adapted_logfunc(INTERP_MESH, *slope_arbitrary_corrected), c=black, linestyle="--")
if _best:
OptimizeResultSlopeBest = least_squares(errorfunc, x0=meta_fit_vars['start_s'], args=(mesh_area_values, best_slope), method="lm")
slope_best_corrected = correct_slopefunc(mesh_area_values, *list(OptimizeResultSlopeBest.x))
ax.scatter(mesh_area_values, best_slope, c=best_col, label='permuted')
ax.plot(INTERP_MESH, slope_adapted_logfunc(INTERP_MESH, *slope_best_corrected), c=black, linestyle="--")
if _mean:
OptimizeResultSlopeMean = least_squares(errorfunc, x0=meta_fit_vars['start_s'], args=(mesh_area_values, mean_slope), method="lm")
slope_mean_corrected = correct_slopefunc(mesh_area_values, *list(OptimizeResultSlopeMean.x))
ax.scatter(mesh_area_values, mean_slope, c=mean_col, label='discovered mean sequence')
ax.plot(INTERP_MESH, slope_adapted_logfunc(INTERP_MESH, *slope_mean_corrected), c=black, linestyle="--")
if _worst:
OptimizeResultSlopeWorst = least_squares(errorfunc, x0=meta_fit_vars['start_s'], args=(mesh_area_values, worst_slope), method="lm")
slope_worst_corrected = correct_slopefunc(mesh_area_values, *list(OptimizeResultSlopeWorst.x))
ax.scatter(mesh_area_values, worst_slope, c=worst_col, label='discovered worst sequence')
ax.plot(INTERP_MESH, slope_adapted_logfunc(INTERP_MESH, *slope_worst_corrected), c=black, linestyle="--")
plt.xlabel("mesh area")
plt.ylabel("slope")
print(*slope_arbitrary_corrected)
print(*slope_best_corrected)
ax.text(3400, .165, eval(format_found_params_inside_parenthesis(*slope_arbitrary_corrected)), fontsize=12)
ax.text(3400, .27, eval(format_found_params_inside_parenthesis(*slope_best_corrected)), fontsize=12)
meta_fit_result_lines.append(",".join(["initial slope", str(slope_arbitrary_corrected[0]), str(slope_arbitrary_corrected[1]), str(slope_arbitrary_corrected[2]), str(np.mean(np.power(OptimizeResultSlopeArbitrary.fun, 2)))]))
meta_fit_result_lines.append(",".join(["slope after permutation", str(slope_best_corrected[0]), str(slope_best_corrected[1]), str(slope_best_corrected[2]), str(np.mean(np.power(OptimizeResultSlopeBest.fun, 2)))]))
with open(meta_fit_resfile, "w+") as f:
f.write("\n".join(meta_fit_result_lines))
plt.legend(bbox_to_anchor=BBOX_TO_ANCHOR_ROUTABILITY_SLOPE, loc=LOC_ROUTABILITY_SLOPE, fancybox=False, shadow=False, ncol=1, frameon=False)
plt.show()
if __name__ == '__main__':
config_dict = read_config()
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--routings_per_pathlist", help="the number of different orders each pathlist should be routed, the paper uses 200", type=int, default=config_dict['routings_per_pathlist'])
args = parser.parse_args()
N = args.routings_per_pathlist
meta_function = "logfunc"
meta_fit("area", "inverted_logistic", meta_function, _arb=True, _mean=False, _best=True, _worst=False)
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,801 | rlhjansen/Point2PointRoutability | refs/heads/master | /fit_meshwise.py | import argparse
import os
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from scipy.optimize import least_squares
from math import exp
import numpy as np
from common import read_config
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
BBOX_TO_ANCHOR = (0.5, 0.45, .4, .55)
LEGEND_LOC = 4 #location given the bounding box
LEGEND_WINDOW_INDEX = 1 #location of legend in the 3x3 grid figure, as follows
# 0 # 1 # 2
# 3 # 4 # 5
# 6 # 7 # 8
initial_col = '#1b9e77'
fit_col = '#1b9e77'
best_fit = '#d95f02'
best_col = '#d95f02'
mean_col = 'b'
worst_col = 'magenta'
def mean(list):
return sum(list)/len(list)
def named_function(name):
def naming(func):
func.name = name
return func
return naming
def format_meshsize(meshsize):
return str(meshsize)+"x"+str(meshsize)
def initial_solv_str(meshsize):
return "routability by arb {}x{}".format(str(meshsize), str(meshsize))
def best_solv_str(meshsize):
return "routability best of {}x{}".format(str(meshsize), str(meshsize))
def worst_solv_str(meshsize):
return "routability worst of {}x{}".format(str(meshsize), str(meshsize))
def mean_solv_str(meshsize):
return "routability of mean {}x{}".format(meshsize, meshsize)
@named_function("balanced sigmoid")
def inverted_logistic(nl, shift, slope):
return 1 - (1 /(1+ np.exp(-(nl-shift)*slope)))
def lstr(iterable):
return [str(elem) for elem in iterable]
# 3x3 window location checker, used to assign labels/axis values
def determine_3x3_y(elem_n):
return not elem_n % 3
def determine_3x3_solv(elem_n):
return elem_n == 3
def determine_3x3_nl(elem_n):
return elem_n == 7
def determine_3x3_x(elem_n):
return elem_n // 6
def save_shift_slope(meshsizes, param_func, N):
fitfunc = eval(param_func)
datafile = "compare_routability_best_of_"+str(N)+".csv"
fit_error_file = "routability_fits_per_meshsize.csv"
fit_error_file_lines = ["meshsize, initial routability param, initial slope param, initial squared error avg, after routability param, after slope param, after squared error avg"]
df = pd.read_csv(datafile, index_col="netlist length")
nl = np.array(df.index.values.tolist())
param_csv = open("params_" + param_func + ".csv", "w+")
param_csv.write(", arbitrary netlists,,mean routability,,optimized netlist order,,worst order,\n")
param_csv.write("meshsize (XxZ),initial_a,initial_b,mean_a,mean_b,best_a,best_b,worst_a,worst_b\n")
errorfunc = lambda vars,x,data : fitfunc(x, vars[0], vars[1]) - data
for j, meshsize in enumerate(meshsizes):
y_arb = df[initial_solv_str(meshsize)]
y_mean = df[mean_solv_str(meshsize)]
y_best = df[best_solv_str(meshsize)]
y_worst = df[worst_solv_str(meshsize)]
fitResArb = least_squares(errorfunc, x0=(20, 0.05), args=(nl, y_arb), method='lm')
fitResBest = least_squares(errorfunc, x0=(30, 0.05), args=(nl, y_best), method='lm')
fitResMean = least_squares(errorfunc, x0=(20, 0.05), args=(nl, y_mean), method='lm')
fitResWorst = least_squares(errorfunc, x0=(20, 0.05), args=(nl, y_worst), method='lm')
fit_error_file_lines.append(",".join([str(meshsize), str(fitResArb.x[0]), str(fitResArb.x[1]), str(np.mean(np.power(fitResArb.fun, 2))), str(fitResBest.x[0]), str(fitResBest.x[1]), str(np.mean(np.power(fitResBest.fun, 2)))]))
param_csv.write(",".join([str(meshsize)]+lstr(list(fitResArb.x))+lstr(list(fitResMean.x))+lstr(list(fitResBest.x))+lstr(list(fitResWorst.x)))+"\n")
param_csv.close()
with open(fit_error_file, "w+") as f:
f.write("\n".join(fit_error_file_lines))
def conditional_label(boolean_value, label):
if boolean_value:
return label
else:
return None
def gen_filename_window(param_func, types, scatter, fitted):
if not os.path.exists(param_func):
os.mkdir(param_func)
plot_savefile = param_func + "/" + "_".join(types)
if scatter:
plot_savefile += "_s"
if fitted:
plot_savefile += "_f"
plot_savefile += "_3x3.png"
return plot_savefile
def plot_shift_slope(meshsizes, types, param_func, title, scatter=True, fitted=True, legend=True):
fitfunc = eval(param_func)
plot_savefile = gen_filename_window(param_func, types, scatter, fitted)
datafile = "compare_routability_best_of_"+str(N)+".csv"
df = pd.read_csv(datafile, index_col="netlist length")
nl = np.array(df.index.values.tolist())
ab_df = load_shift_slope(param_func)
params_r = []
params_b = []
params_m = []
params_w = []
_best = "best" in types
_mean = "mean" in types
_arb = "initial" in types
_worst = "worst" in types
fig=plt.figure(figsize=(12,7))
gs = gridspec.GridSpec(3,3)
gs.update(wspace=0.02, hspace=0.02) # set the spacing between axes.
legend_loc = 0
for j, cs in enumerate(meshsizes):
ax = plt.subplot(gs[j])
# ax = plt.subplot2mesh((3,3), (j//3, j%3))
ax.text(.95,.9,format_meshsize(cs),horizontalalignment='right', transform=ax.transAxes)
if not determine_3x3_x(j):
ax.set_xticks([])
else:
ax.set_xticks([10,20,30,40,50,60,70,80,90])
if determine_3x3_nl(j):
ax.set_xlabel("Number of paths in pathlist")
if not determine_3x3_y(j):
ax.set_yticks([])
if determine_3x3_solv(j):
ax.set_ylabel("Routability")
labelwindow = j==legend_loc
if _arb:
y_arb = df[initial_solv_str(cs)]
popta = ab_df["initial_a"][j], ab_df["initial_b"][j]
if scatter:
plt.scatter(nl, y_arb, c=mean_col, s=6, label=conditional_label(labelwindow, "original"))
if fitted:
ABNL_plot(nl, popta, fitfunc, c='k')
if _mean:
y_mean = df[mean_solv_str(cs)]
poptm = ab_df["mean_a"][j], ab_df["mean_b"][j]
if scatter:
plt.scatter(nl, y_mean, c=mean_col, s=6, label=conditional_label(labelwindow, "average sequence routability"))
if fitted:
ABNL_plot(nl, poptm, fitfunc, c=fit_col)
if _best:
y_best = df[best_solv_str(cs)]
poptb = ab_df["best_a"][j], ab_df["best_b"][j]
if scatter:
plt.scatter(nl, y_best, c=best_col, s=6, label=conditional_label(labelwindow, "permuted"))
if fitted:
ABNL_plot(nl, poptb, fitfunc, c='k')
if _worst:
y_worst = df[best_solv_str(cs)]
poptw = ab_df["worst_a"][j], ab_df["worst_b"][j]
if scatter:
plt.scatter(nl, y_worst, c=best_col, s=6, label=conditional_label(labelwindow, "worst after permutation"))
if fitted:
ABNL_plot(nl, poptw, fitfunc, c='k')
if labelwindow and legend:
# Put a legend to the right of the current axis
lgd = plt.legend(bbox_to_anchor=BBOX_TO_ANCHOR, loc=LEGEND_LOC, fancybox=False, shadow=False, ncol=1, frameon=False)
# original settings
# also set legend_loc to 7 above
# lgd = plt.legend(bbox_to_anchor=(1, 1.0))
# plt.legend(loc='upper center',
# bbox_to_anchor=(0.5, -0.22),fancybox=False, shadow=False, ncol=3)
plt.suptitle(title)
if legend:
# tight bounding box
plt.savefig(plot_savefile, bbox_extra_artists=(lgd,))
else:
plt.savefig(plot_savefile)
plt.show()
def ABNL_plot(nl, popts, fitfunc, c, label=None):
if label:
plt.plot(nl, fitfunc(nl, *popts), c=c, linestyle='--', label=label)
else:
plt.plot(nl, fitfunc(nl, *popts), c=c, linestyle='--')
def load_shift_slope(param_func):
df = pd.read_csv("params_" + param_func + ".csv", header=1)
return df
def getdfcol(df, n):
""" get n'th column values of dataframe
"""
return df[df.columns[n]]
if __name__ == '__main__':
config_dict = read_config()
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--routings_per_pathlist", help="the number of different orders each pathlist should be routed, the paper uses 200", type=int, default=config_dict['routings_per_pathlist'])
args = parser.parse_args()
N = args.routings_per_pathlist
meshsizes = config_dict["meshsizes"]
save_shift_slope(meshsizes, "inverted_logistic", N)
plot_shift_slope(meshsizes, ["initial", "best"], "inverted_logistic", "", scatter=True)
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,802 | rlhjansen/Point2PointRoutability | refs/heads/master | /calculate_routability.py | import argparse
from statistics import mean
import os
from common import read_config
def reorder_by_netlength(files, lengths, iters, chipsize):
""" Groups files by length and gathers individual filenames
"""
netlendict = {k:{"filenames":[], 'count':0} for k in lengths}
for f in files:
filedata = f.split(os.sep)
net_data = filedata[5].split('_')[0][1:]
k = net_data
netlendict[int(k)]["filenames"].append(f)
return netlendict
def get_files(xsize, ysize, iters):
path = os.path.curdir
path = os.path.join(path, "results")
path = os.path.join(path, "x" + str(xsize) + "y" + str(ysize))
path = os.path.join(path, "C100")
return [os.path.join(fdata[0], fdata[2][fdata[2].index('all_data.csv')]) for fdata in os.walk(path) if 'all_data.csv' in fdata[2]]
def get_first_placed_count(f):
""" Gets the first scatterpoint from a file
"""
readfile = open(f, 'r')
line = readfile.readline()
data = line.split(";")
firstcount = int(data[1])
readfile.close()
return firstcount
def get_least_placed_count(f, iters):
""" Gets the best scatterpoint from a file
"""
readfile = open(f, 'r')
best_count = None
for i, line in enumerate(readfile.readlines()):
data = line.split(";")
count = int(data[1])
if i == iters:
break
if i == 0:
best_count = count
else:
if count < best_count:
best_count = count
readfile.close()
return best_count
def get_most_placed_count(f, iters):
""" Gets the best scatterpoint from a file
"""
readfile = open(f, 'r')
best_count = None
for i, line in enumerate(readfile.readlines()):
if i == iters:
break
data = line.split(";")
count = int(data[1])
if i == 0:
best_count = count
else:
if count > best_count:
best_count = count
readfile.close()
return best_count
def get_mean_routed_count(f, k, iters):
""" Gets the scatterpoints from a file
"""
readfile = open(f, 'r')
place_counts = []
for i, line in enumerate(readfile.readlines()):
if i == iters:
break
data = line.split(";")
place_counts.append(1 if int(data[1]) == k else 0)
readfile.close()
return mean(place_counts)
def make_netlen_routabilitydict(files, netlengths, netlendict, chipsize, iters):
netlen_routabilitydict = {k:{'f':[], 'bc':[], 'mc':[], 'minc':[], 'count':0} for k in netlengths}
for k in netlendict:
for f in netlendict[k]["filenames"]:
firstcount = get_first_placed_count(f)
best_count = get_most_placed_count(f, iters)
mean_count = get_mean_routed_count(f, k, iters)
min_count = get_least_placed_count(f, iters)
netlen_routabilitydict[k]['f'].append(1 if firstcount == k else 0)
netlen_routabilitydict[k]['mc'].append(mean_count)
netlen_routabilitydict[k]['bc'].append(1 if best_count == k else 0)
netlen_routabilitydict[k]['minc'].append(1 if min_count == k else 0)
return netlen_routabilitydict
def gather_data_chipsize(chipsize, netlengths, iters):
files = get_files(chipsize, chipsize, iters)
netlendict = reorder_by_netlength(files, netlengths, iters, chipsize)
netlen_routability_dict = make_netlen_routabilitydict(files, netlengths, netlendict, chipsize, iters)
return files, netlendict, netlen_routability_dict
def routability_header_gen(chipsizes, best_of_N):
for cs in chipsizes:
random_routability = ["routability by arb {}x{}".format(str(cs), str(cs))]
mean_routability = ["routability of mean {}x{}".format(str(cs), str(cs))]
best_routability = ["routability best of {}x{}".format(str(cs), str(cs))]
worst_routability = ["routability worst of {}x{}".format(str(cs), str(cs))]
yield random_routability
yield mean_routability
yield best_routability
yield worst_routability
def make_routability_csvs(chipsizes, best_of_N, netlengths):
netlendicts_persize = []
netlen_countdicts_persize = []
csv_data_walk = [["netlist length"]] + [elem for elem in routability_header_gen(chipsizes, best_of_N)]
dw_len = len(csv_data_walk)
csv_data_walk[0].extend([str(nl) for nl in netlengths])
for i, chipsize in enumerate(chipsizes):
j = i*4
files, netlendict, netlen_routabilitydict = gather_data_chipsize(chipsize, netlengths, best_of_N)
csv_data_walk[j+1].extend([str(mean(netlen_routabilitydict[n]['f'])) for n in netlengths])
csv_data_walk[j+2].extend([str(mean(netlen_routabilitydict[n]['mc'])) for n in netlengths])
csv_data_walk[j+3].extend([str(mean(netlen_routabilitydict[n]['bc'])) for n in netlengths])
csv_data_walk[j+4].extend([str(mean(netlen_routabilitydict[n]['minc'])) for n in netlengths])
with open("compare_routability_best_of_"+str(best_of_N)+".csv", "w+") as f:
for i, netlength in enumerate(csv_data_walk[0]):
line = ",".join([csv_data_walk[j][i] for j in range(dw_len)]) + "\n"
f.write(line)
if __name__ == '__main__':
config_dict = read_config()
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--routings_per_pathlist", help="the number of different orders each pathlist should be routed, the paper uses 200", type=int, default=config_dict['routings_per_pathlist'])
args = parser.parse_args()
N = args.routings_per_pathlist
meshsizes = config_dict['meshsizes']
netlist_lengths = config_dict['netlist_lengths']
make_routability_csvs(meshsizes, N, netlist_lengths)
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,803 | rlhjansen/Point2PointRoutability | refs/heads/master | /setup_experiment.py | import argparse
import os
from classes.mesh import Mesh, file_to_mesh
def create_net_datapath(c, n, x, y):
abspath = create_circuit_datapath(c, x, y)
abspath = os.path.join(abspath, "N"+str(n))
if not os.path.exists(abspath):
os.makedirs(abspath)
abspath = os.path.join(abspath, "N"+str(n)+"_"+str(len(os.listdir(abspath)))+".csv")
open(abspath, "a").close()
return abspath
def create_circuit_datapath(c, x, y):
abspath = os.path.abspath(__file__)
abspath = os.path.dirname(abspath)
abspath = os.path.join(abspath, "data")
abspath = os.path.join(abspath, "x"+str(x)+"y"+str(y))
if not os.path.exists(abspath):
os.makedirs(abspath)
abspath = os.path.join(abspath, 'C'+str(c))
if not os.path.exists(abspath):
os.makedirs(abspath)
print(abspath)
return abspath
def main(x, y, netlens, netcount, terminalcount=100):
abspath = os.path.abspath(__file__)
abspath = os.path.dirname(abspath)
abspath = os.path.join(abspath, "data")
abspath = os.path.join(abspath, "x"+str(x)+"y"+str(y))
if not os.path.exists(abspath):
os.makedirs(abspath)
newmesh = Mesh([x, y])
newmesh.generate_terminals(terminalcount)
circuit_path = create_circuit_datapath(terminalcount, x, y) + ".csv"
newmesh.write_mesh(circuit_path)
for n in netlens:
for _ in range(netcount):
netlistpath = create_net_datapath(terminalcount, n, x, y)
newmesh.generate_nets(n)
newmesh.write_nets(netlistpath)
newmesh.wipe_nets()
def make_config(pathlist_count, meshsizes, netlist_lengths):
cfg_dict = {"pathlist_count":pathlist_count, "meshsizes":meshsizes, "netlist_lengths":netlist_lengths}
with open("config.txt", "w+") as f:
f.write(str(cfg_dict))
if __name__ == '__main__':
netlist_lengths = [10+i for i in range(81)]
meshsizes = [20,30,40,50,60,70,80,90,100]
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--pathlist_count", help="the number of different pathlists to use", type=int, default=20)
args = parser.parse_args()
make_config(args.pathlist_count, meshsizes, netlist_lengths)
for meshsize in meshsizes:
main(meshsize, meshsize, netlist_lengths, args.pathlist_count)
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,804 | rlhjansen/Point2PointRoutability | refs/heads/master | /common.py |
def lprint(iterable, header=None):
if header:
print("\t".join([str(elem) for elem in header]))
for elem in iterable:
if type(elem) == list:
print("\t".join([str(e) for e in elem]))
else:
print(str(elem))
def read_config():
with open("config.txt", "r") as f:
config_dict = eval(f.readline())
f.close()
return config_dict
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,805 | rlhjansen/Point2PointRoutability | refs/heads/master | /classes/node.py |
def manhattan(vertex1, vertex2):
"""
:param vertex1: tuple, coordinate
:param vertex2: tuple, coordinate
:return: manhattan distance between the two coordinates
"""
manh_d = sum([abs(vertex1[i] - vertex2[i]) for i in range(len(vertex1))])
return manh_d
class Node:
def __init__(self, coord, value):
self.value = value
self.coord = coord
self.neighbours = []
self.terminal = False
self.net = False
self.neighbour_num = 0
self.set_value(value)
self.out_nets = set()
def set_value(self, value):
if self.is_occupied():
raise ValueError("node already occupied")
self.value = value
if value[0] == 'g':
self.terminal = True
elif value[0] == 'n':
self.net = True
return True
def get_value(self):
"""
:return: string "0", "gX", or "nY"
"""
return self.value
def get_neighbours(self):
return self.neighbours
def get_neighbour_order_to(self, end_vertex):
nnl = self.neighbours[:]
nnl.sort(key=lambda x: manhattan(x.get_coord(), end_vertex))
return nnl
def get_coord(self):
return self.coord
def is_occupied(self):
"""
:return: True if node is in use by a net or terminal, else False
"""
return self.terminal or self.net
def is_terminal(self):
return self.terminal
def is_net(self):
return self.net
def get_adjecent_occupied(self):
"""
:return: number of adjecent nodes that are occupied, either by a terminal
or by a net
"""
count = 0
for adj in self.neighbours:
if adj.is_occupied():
count += 1
return count
def has_room(self):
"""
note: for netlist creation
:return: True if node has room for an additional outgoing net,
"""
count = self.get_adjecent_occupied() + len(self.out_nets)
if count < self.neighbour_num:
return True
else:
return False
def add_net(self, net):
"""
:param net: adds net to the set of nets allowed at the terminal
:return:
"""
if self.is_terminal():
self.out_nets.add(net)
else:
raise ValueError("cannot add net to non-terminal node")
print("a net should not be added here")
def connect(self, neighbours):
"""
:param neighbours: tuple (neighbouring) of Node objects
:saves: this the list in the node object
"""
self.neighbours = list(neighbours)
self.neighbour_num = len(neighbours)
def disconnect(self):
self.neighbours = []
self.neighbour_num = 0
def remove_out_nets(self):
"""
sets the outgoing nets (of a terminal-node) to the empty set
"""
self.out_nets = set()
def remove_net(self):
if self.is_terminal():
raise ValueError("not a net, node ", self.coord, "contains:", self.value )
else:
self.value = "0"
self.net = False
def is_blocked_in(self):
"""pre-routing, skip routing if terminal is blocked in"""
for neighbour in self.neighbours:
if not (neighbour.is_terminal() or neighbour.is_net()):
return False
return True
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,806 | rlhjansen/Point2PointRoutability | refs/heads/master | /classes/mesh.py |
from random import randint, shuffle
import queue as Q
import functools
import operator
from .node import Node
class Mesh:
def __init__(self, size_params, solver="A_star", height=8, terminals=None):
# initialize the mesh basics
self.params = size_params + [height] # parameters of the mesh
self.platform_params = size_params
self.terminal_coords = {} # key:val gX:tuple(terminal_loc)
self.coord_terminal = {}
self.terminal_net = {} # key:val gX:set(nA, nB, nC...)
self.net_terminal = {} # key:val nX:tuple(g1, g2)
self.nets = {} # Live nets
self.connections = {} # key:val coord_tuple:tuple(neighbour_tuples)
self.wire_locs = set()
self.meshdict = {n:Node(n, '0') for n in params_inp(self.params)}
if terminals:
self.place_premade_terminals(terminals_from_list_of_lists(terminals))
def write_mesh(self, fname):
"""
writes current terminal configuration to an out-file
:param fname: filename to save to
"""
mesh = self.to_base()
with open(fname, 'w+') as fout:
for row in mesh:
fout.write(",".join(row) + "\n")
fout.close()
def to_base(self):
"""
:return: a list of lists of the "ground floor" of the mesh
"""
x = self.platform_params[0]
y = self.platform_params[1]
list = [str(self.meshdict[i+(0,)].get_value()) for i in params_inp(self.platform_params)]
newbase = [[list[j * x + i] for i in range(x)] for j in
range(y)]
return newbase
# connects nodes in the mesh to it's neighbouring nodes
def connect(self):
"""
adds the connections each node has into the connection dictionary
"""
for key in self.meshdict.keys():
neighbour_nodes = tuple([self.meshdict.get(pn) for pn in neighbours(key) if self.meshdict.get(pn, False)])
# neigbour_coords = tuple([pn for pn in neighbours(key) if self.meshdict.get(pn, False)]) // testing with coords
self.meshdict[key].connect(neighbour_nodes)
def disconnect(self):
for key in self.meshdict.keys():
neighbour_nodes = tuple([self.meshdict.get(pn) for pn in neighbours(key) if self.meshdict.get(pn, False)])
# neigbour_coords = tuple([pn for pn in neighbours(key) if self.meshdict.get(pn, False)]) // testing with coords
self.meshdict[key].disconnect()
def rand_loc(self):
"""
:return: random empty location on the "ground floor" of the mesh
"""
x_pos = randint(1, self.params[0]-2)
y_pos = randint(1, self.params[1]-2)
z_pos = 0
terminal_pos = tuple([x_pos, y_pos, z_pos])
already_occupied = self.meshdict.get(terminal_pos).get_value()[0] != '0'
while already_occupied:
x_pos = randint(1, self.params[0]-2)
y_pos = randint(1, self.params[1]-2)
terminal_pos = tuple([x_pos, y_pos, z_pos])
already_occupied = self.meshdict.get(terminal_pos).get_value()[0] != '0'
return terminal_pos
def generate_terminals(self, num):
"""
places num terminals on the mesh
"""
self.wipe_terminals()
for i in range(num):
rescoords = self.rand_loc()
self.add_terminal(rescoords, "g"+str(i))
def wipe_terminals(self):
"""
remove all terminals from mesh
"""
self.meshdict = {n:Node(n, '0') for n in params_inp(self.params)}
self.connect()
self.terminal_coords = {} # key:val gX:tuple(terminal_loc)
self.coord_terminal = {}
self.terminal_net = {}
# adds a terminal to the mesh
def add_terminal(self, coords, terminal_string):
"""
places the terminal inside the mesh ditionary
places the terminalstring inside the terminalcoord dictionary
"""
self.meshdict[coords].set_value(terminal_string)
self.terminal_coords[terminal_string] = coords
self.coord_terminal[coords] = terminal_string
self.terminal_net[terminal_string] = set()
def place_premade_terminals(self, terminal_pairs):
terminalcoords, terminals = terminal_pairs
for n, val in enumerate(terminalcoords):
self.add_terminal(val[::-1] + (0,), terminals[n])
def get_terminal_coords(self):
kv = [[k , v] for k, v in self.terminal_coords.items()]
k = [kv[i][0] for i in range(len(kv))]
v = [kv[i][1] for i in range(len(kv))]
return k, v
def add_net(self, terminal1, terminal2, n_str):
self.net_terminal[n_str] = (terminal1, terminal2)
self.meshdict[self.terminal_coords[terminal1]].add_net(n_str)
self.meshdict[self.terminal_coords[terminal2]].add_net(n_str)
self.terminal_net[terminal1].add(n_str)
self.terminal_net[terminal2].add(n_str)
self.nets[n_str] = (terminal1, terminal2)
def generate_nets(self, num):
AG = list(self.terminal_coords.keys())
GN = len(AG)-1
for i in range(num):
g1, g2, net = AG[randint(0,GN)], AG[randint(0,GN)], 'n'+str(i)
g1nets = self.terminal_net.get(g1, set())
g2nets = self.terminal_net.get(g2, set())
common = (g1nets & g2nets)
roomleft1 = self.meshdict.get(self.terminal_coords[g1]).has_room()
roomleft2 = self.meshdict.get(self.terminal_coords[g2]).has_room()
no_room_left = not (roomleft1 and roomleft2)
while (common or no_room_left) or (g1==g2):
g1, g2 = AG[randint(0, GN)], AG[randint(0, GN)]
if g1 == g2:
continue
g1nets = self.terminal_net.get(g1)
g2nets = self.terminal_net.get(g2)
common = g1nets & g2nets
roomleft1 = self.meshdict.get(self.terminal_coords[g1]).has_room()
roomleft2 = self.meshdict.get(self.terminal_coords[g2]).has_room()
no_room_left = not (roomleft1 and roomleft2)
self.add_net(g1, g2, net)
def get_random_net_order(self):
key_list = list(self.net_terminal.keys())
shuffle(key_list)
return key_list[:]
def write_nets(self, abspath):
with open(abspath, 'w+') as out:
for netk in self.nets.keys():
g1, g2 = self.nets.get(netk)
out.write(','.join([netk,g1,g2])+'\n')
def read_nets(self, abspath):
nets = []
with open(abspath, 'r') as inf:
for line in inf:
nets.append(line[:-1].split(','))
for line in nets:
net, g1, g2 = line
self.add_net(g1, g2, net)
###########################
##### Reset Block #####
###########################
def wipe_nets(self):
"""
remove the netlist from class
"""
for key in self.coord_terminal.keys():
self.meshdict[key].remove_out_nets()
for key in self.terminal_net.keys():
self.terminal_net[key] = set()
self.net_terminal = {}
self.reset_nets()
self.nets = {}
def reset_nets(self):
"""
retains netlist connections but resets their placement
"""
for spot in self.wire_locs:
self.meshdict[spot].remove_net()
self.wire_locs = set()
def __str__(self):
complete = []
pars = self.params
for z in range(pars[2]):
complete.append("### Layer" + str(z + 1) + "###")
for y in range(pars[1]):
vals = [self.meshdict[(x,y,z)].get_value() for x in range(pars[0])]
transformed_vals = [transform_print(val) for val in vals]
complete.append(" ".join(transformed_vals))
return "\n".join(complete)
def extract_route(self, path_dict, end_loc):
path = ((),)
get_loc = path_dict.get(end_loc)[0]
while path_dict.get(get_loc)[0] != get_loc:
path = path + (get_loc,)
get_loc = path_dict.get(get_loc)[0]
return path[::-1]
def A_star_max_g(self, net):
""" finds a path for a net with A-star algorithm, quits searching early if the end-terminal is closed off by its immediate neighbourse.
in case of ties in nodes to expand by (heuristic+cost to node)
the node with most steps taken yet is chosen
:param net: terminal-pair (gX, gY)
:return: path, length if path founde, else false, false
"""
q = Q.PriorityQueue()
count = 0
end_loc = self.terminal_coords.get(self.net_terminal.get(net)[1])
if self.meshdict.get(end_loc).is_blocked_in():
return False, False, count
start_loc = self.terminal_coords.get(self.net_terminal.get(net)[0])
if self.meshdict.get(start_loc).is_blocked_in():
return False, False, count
path = ((start_loc),)
manh_d = manhattan(path[-1], end_loc)
q.put((manh_d, 0, start_loc))
visited = dict()
visited[start_loc] = [start_loc, 0]
while not q.empty():
count += 1
k = q.get()
_, steps, current = k
for neighbour in self.meshdict.get(current).get_neighbours():
n_coord = neighbour.get_coord()
if neighbour.is_occupied():
if n_coord == end_loc:
# end condition, path found
visited[n_coord] = [current, steps]
return self.extract_route(visited, n_coord), \
visited.get(end_loc)[1], count
else:
continue
if n_coord in visited:
if visited.get(n_coord)[1] > steps:
# checks if current number of steps is lower than
# established cost of the node
visited[n_coord] = [current, steps]
# was - 1 before, not sure why atm
q.put((manhattan(n_coord, end_loc) + steps + 1, steps + 1,
n_coord))
else:
visited[n_coord] = [current, steps]
q.put((manhattan(n_coord, end_loc) + steps + 1, steps + 1,
n_coord))
return False, False, count
def solve_order(self, net_order, reset=False):
tot_length = 0
solved = 0
nets_solved = []
tries = 0
for net in net_order:
path, length, expansions = self.A_star_max_g(net)
tries += expansions
if path:
self.place(net, path)
solved += 1
tot_length += length
nets_solved.append(net)
if reset:
self.reset_nets()
return [solved, tot_length, tries]
def get_solution_placement(self, net_order):
paths = []
for net in net_order:
path = self.A_star_max_g(net)[0]
if path:
paths.append(path)
else:
paths.append( ((),))
self.reset_nets()
return paths
def place(self, net, path):
for spot in path[:-1]:
if self.meshdict[spot].set_value(net):
self.wire_locs.add(spot)
else:
raise ValueError("invalid placement")
return False
def transform_print(val):
vlen = len(val)
if val == '0':
return '___'
elif val[0] == 'n':
return ' '*(3-vlen) + val
elif val[0] == 'g':
return ' '*(3-vlen) + val
else:
raise ValueError("incorrect node value")
def file_to_mesh(fpath, nets):
"""
:param nets: either a netlist or a number of nets
:return: a new Mesh
"""
base = read_mesh(fpath)
xlen = len(base[0])
ylen = len(base)
Newmesh = Mesh([xlen, ylen], terminals=base)
return Newmesh
def terminals_from_list_of_lists(lol):
"""
:return: tuple of all terminal coordinates for a mesh and the terminal numbers
"""
terminal_coords = []
terminals = []
for x in range(len(lol)):
for y in range(len(lol[0])):
terminal = lol[x][y]
if terminal[0] == 'g':
terminal_coords.append((x, y))
terminals.append(terminal)
return terminal_coords, terminals
def read_mesh(fpath):
"""
reads a mesh configuration fom the file at the file path
:return: list of lists
"""
base = []
with open(fpath, 'r') as fin:
for line in fin:
base.append(line[:-1].split(',')) # [:-1] so no '\n')
return base
def manhattan(loc1, loc2):
"""
:param loc1: tuple, coordinate
:param loc2: tuple, coordinate
:return: manhattan distance between the two coordinates
"""
manh_d = sum([abs(loc1[i] - loc2[i]) for i in range(len(loc1))])
return manh_d
def params_inp(params):
""" return all tuples for a mesh of certain size,
params = (10,10) creates tuples for positions (0, 0), (1, 0), ..., (9,9)
:return: tuple for every node in a mesh with params
"""
base = [0]*len(params)
count = 0
tot = prodsum(params)
return tuple([tuple(count_to_pos(c, params)) for c in range(tot)])
def prodsum(iterable):
"""
:param iterable: list of numbers
:return: returns the product of all numbers i.e [5,6,2] returns 5*6*2
"""
return functools.reduce(operator.mul, iterable, 1)
def count_to_pos(count, params):
"""
:param count: count is the number of the node being made
:param params: parameters of the mesh
:return: returns a set of new coordinates to be placed in the meshdict
"""
base = [0]*len(params)
for i in range(len(params)):
base[i] = count // prodsum(params[:i]) % params[i]
return base
def neighbours(coords):
"""
:param - tuple: tuple of coordinates of a point in the mesh:
:return: neighbouring nodes in the mesh
"""
rl = []
for i in range(len(coords)):
temp1 = list(coords)
temp2 = list(coords)
temp1[i] -= 1
temp2[i] += 1
rl.extend((tuple(temp1), tuple(temp2)))
return tuple(rl)
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,807 | rlhjansen/Point2PointRoutability | refs/heads/master | /run_experiment.py |
import multiprocessing as mp
import sys
import argparse
import datetime
from common import read_config
import classes.simpleRouter as r
def router_generator(meshsize, netlist_length, pathlist_count, routings_per_pathlist, num_terminals=100):
print("creating router objects for meshsize", meshsize, "netlist length", netlist_length)
for nX in range(pathlist_count):
s = r.Router(num_terminals, netlist_length, nX, meshsize,meshsize, routings_per_pathlist)
yield s
def start_wrapper(router_obj):
router_obj.route()
def append_routings_per_pathlist_to_config(config, routings_per_pathlist):
config["routings_per_pathlist"] = routings_per_pathlist
with open("config.txt", "w+") as f:
f.write(str(config))
if __name__ == '__main__':
config_dict = read_config()
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--routings_per_pathlist", help="the number of different orders each pathlist should be routed, the paper uses 200", type=int, default=200)
parser.add_argument("-c", "--cpu_count", help="number of cpus to use, default is computer cpus -2, so other things can still run (slowly) besides the experiment", type=int, default=mp.cpu_count()-2)
args = parser.parse_args()
append_routings_per_pathlist_to_config(config_dict, args.routings_per_pathlist)
starttime = datetime.datetime.now()
for meshsize in config_dict["meshsizes"]:
for netlist_length in config_dict["netlist_lengths"]:
pool = mp.Pool(args.cpu_count)
Routers = router_generator(meshsize, netlist_length, config_dict["pathlist_count"], args.routings_per_pathlist)
pool.map(start_wrapper, Routers)
pool.close()
print("time elapsed:\t", datetime.datetime.now() - starttime)
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,808 | rlhjansen/Point2PointRoutability | refs/heads/master | /classes/gatherer.py | import os
from .mesh import file_to_mesh
class Gatherer:
def __init__(self, c, n, nX, x, y, routings_per_pathlist):
self.c = c # mesh with c terminals
self.n = n # netlist with n nets
self.nX = nX # Xth netlist with n nets
self.x = x
self.y = y
self.routings_per_pathlist = routings_per_pathlist
self.all_data = []
self.setup_load_paths()
def add_iter_batch(self, batch_results):
iter = str(self.iter)
self.all_data.extend([[iter] + br for br in batch_results])
def setup_load_paths(self):
abspath = os.path.abspath(__file__)
abspath = os.path.dirname(abspath)
abspath = os.path.dirname(abspath)
abspath = os.path.join(abspath, "data")
abspath = os.path.join(abspath, "x"+str(self.x)+"y"+str(self.y))
abspath = os.path.join(abspath, 'C'+str(self.c))
self.mesh_path = abspath+".csv"
abspath = os.path.join(abspath, "N"+str(self.n))
abspath = os.path.join(abspath, "N"+str(self.n)+"_"+str(self.nX)+".csv")
self.netlist_path = abspath
def set_saveloc(self):
abspath = os.path.abspath(__file__)
abspath = os.path.dirname(abspath)
abspath = os.path.dirname(abspath)
abspath = os.path.dirname(abspath)
abspath = os.path.join("results")
abspath = os.path.join(abspath, "x"+str(self.x)+"y"+str(self.y))
abspath = os.path.join(abspath, 'C'+str(self.c))
if not os.path.exists(abspath):
os.makedirs(abspath)
abspath = os.path.join(abspath, "N"+str(self.n))
abspath = os.path.join(abspath, "N"+str(self.n)+"_"+str(self.nX))
if not os.path.exists(abspath):
os.makedirs(abspath)
prevdata = len(os.listdir(abspath))
if not os.path.exists(abspath):
os.makedirs(abspath)
self.savedir = abspath
def save_all_data(self):
save_data = [";".join([d[0], str(d[1]), str(d[2]), ",".join(d[3:])]) for d in self.all_data]
savefile = os.path.join(self.savedir, "all_data.csv")
with open(savefile, "w+") as f:
for line in save_data:
f.write(line + "\n")
| {"/make_plots.py": ["/common.py"], "/classes/simpleRouter.py": ["/classes/mesh.py", "/classes/gatherer.py"], "/fit_routability_parameters.py": ["/common.py"], "/fit_meshwise.py": ["/common.py"], "/calculate_routability.py": ["/common.py"], "/setup_experiment.py": ["/classes/mesh.py"], "/classes/mesh.py": ["/classes/node.py"], "/run_experiment.py": ["/common.py", "/classes/simpleRouter.py"], "/classes/gatherer.py": ["/classes/mesh.py"]} |
48,817 | avandriets/MultiUserBlog | refs/heads/master | /PostPage.py | """
Post managing module
"""
from google.appengine.ext import db
from BaseHandler import BaseHandler
from Comment import Comment, comment_key
from NewPost import blog_key
class PostPage(BaseHandler):
"""class handler for PostPage"""
def get(self, post_id):
"""GET method to show post page """
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
# check if post exists
if not post:
self.error(404)
return
# get post comments from DB
comments = post.comments_collection.order('-created')
like_count = post.like_collection.count()
# prepare variables to render
param = dict(post=post, comments=comments, like_count=like_count)
self.render("permalink.html", **param)
def post(self, post_id):
"""POST method for handle add comment action"""
if not self.user:
self.redirect('/login')
else:
# get post
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
# if post not exists go to 404 !!!
if not post:
self.error(404)
return
# get post comments from DB
comments = post.comments_collection.order('-created')
# get entered comment and check if it is not empty
comment = self.request.get('comment', default_value=None)
if comment:
comment = comment.strip()
# add comment to db and render or show error page
if comment:
n_c = Comment(parent=comment_key(), post=post, body=comment,
author=self.user.key())
n_c.put()
self.redirect('/blog/{}'.format(str(post.key().id())))
else:
like_count = post.like_collection.count()
error = "Enter comment, please!"
param = dict(error=error, post=post, comments=comments
, like_count=like_count)
self.render("permalink.html", **param)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,818 | avandriets/MultiUserBlog | refs/heads/master | /PostLike.py | """
Post liking module
"""
from google.appengine.ext import db
from BaseHandler import BaseHandler
from Like import Like, like_key
from Post import blog_key
class PostLike(BaseHandler):
"""Post liking handler class"""
def post(self):
"""handle POST like request"""
# redirect user to login page if he doesnt login yet
if not self.user:
self.redirect('/login')
return
# get blog_id parameter
post_id = self.request.get("blog_id", None)
# get post from DB by key
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
if not post:
self.error(404)
return
# get post likes from DB
likes_list = post.like_collection
# check if user already likes this post
set_like_already = False
for acc in likes_list:
if acc.author.key() == self.user.key():
set_like_already = True
# prepare params for render page
param = dict(post=post)
# check that you cannot like your own post
if post.owner.key() == self.user.key():
param["error"] = "You cannot like this post, you are owner!"
self.render("like-post.html", **param)
else:
# check if you like post already
if set_like_already:
param["error"] = "You have liked this post already!"
self.render("like-post.html", **param)
else:
# add like to db
n_l = Like(parent=like_key(), post=post,
author=self.user.key())
n_l.put()
self.redirect('/blog/{}'.format(post_id))
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,819 | avandriets/MultiUserBlog | refs/heads/master | /DeleteComment.py | """
Module provide tools to delete comment from database
"""
from google.appengine.ext import db
from BaseHandler import BaseHandler
from Comment import comment_key
from Post import blog_key
class DeleteComment(BaseHandler):
"""
Delete comment render class provide methods to show interface for
deleting comment from db
"""
def post(self):
"""POST method to show delete page and manage deleting process"""
if not self.user:
# if user is not login redirect him to login page
self.redirect('/login')
return
# get post id and user choice whether delete or not (yes/no
# parameters)
comment_id = self.request.get("comment_id", None)
post_id = self.request.get("blog_id", None)
yes = self.request.get("yes", None)
no = self.request.get("no", None)
# get post by key
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
# find comment
key_comment = db.Key.from_path('Comment', int(comment_id),
parent=comment_key())
comment = db.get(key_comment)
# show 404 if something go wrong with post
if not (post or comment):
self.error(404)
return
# fill parameters for render page
param = dict(p=post, c=comment)
# check if user if owner of post
if post.owner.key() != self.user.key():
param["is_errors"] = True
param["error"] = "You cannot delete this comment," \
" you are not owner!"
self.render("delete-comment.html", **param)
else:
# handle user choice delete or not
if yes or no:
if yes:
comment.delete()
self.redirect("/blog/{}".format(post_id))
else:
self.redirect("/blog/{}".format(post_id))
pass
else:
self.render("delete-comment.html", **param)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,820 | avandriets/MultiUserBlog | refs/heads/master | /Login.py | """
Login handler request module
"""
from User import User
from BaseHandler import BaseHandler
class Login(BaseHandler):
"""Login handler class to manage user login"""
def get(self):
"""to show login method form"""
self.render('login-form.html')
def post(self):
"""handle POST login request"""
# get username and password
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/blog')
else:
template_values = {
'is_errors': True,
'login_error': 'Invalid login',
}
self.render('login-form.html', **template_values)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,821 | avandriets/MultiUserBlog | refs/heads/master | /EditComment.py | """Comment edit tols module"""
from google.appengine.ext import db
from BaseHandler import BaseHandler
from Comment import comment_key
from Post import blog_key
class EditComment(BaseHandler):
"""Edit comment handler class"""
def post(self):
"""POST edit comment"""
# check if user have login
if not self.user:
self.redirect('/login')
return
# get parameters from header
comment_id = self.request.get("comment_id", None)
post_id = self.request.get("blog_id", None)
save = self.request.get("save", None)
cancel = self.request.get("cancel", None)
body = self.request.get('body', default_value=None)
# check if user input just spaces to page
if body:
body = body.strip()
# find post
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
# find comment
key_comment = db.Key.from_path('Comment', int(comment_id),
parent=comment_key())
comment = db.get(key_comment)
# if there is no any posts
if not (post or comment):
self.error(404)
return
# prepare parameters for rendering page
param = dict(p=post, subject=post.subject, content=post.content,
c=comment, body=comment.body)
# check if user is owner of post
if comment.author.key() != self.user.key():
param["permission_error"] = "You cannot edit this comment," \
" you are not owner!"
self.render("edit-comment.html", **param)
else:
# handle user choice save or not
if save or cancel:
if save:
if body:
comment.body = body
comment.save()
self.redirect("/blog/{}".format(post_id))
else:
param["error"] = "Add comment, please!"
self.render("edit-comment.html", **param)
else:
self.redirect("/blog/{}".format(post_id))
else:
self.render("edit-comment.html", **param)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,822 | avandriets/MultiUserBlog | refs/heads/master | /Comment.py | """
Comments model module
describe Comment model
"""
from google.appengine.ext import db
from BaseHandler import render_str
from Post import Post
from User import User
def comment_key(name='default'):
"""Provide comment key method"""
return db.Key.from_path('comments', name)
class Comment(db.Model):
"""Class describe comment model"""
post = db.ReferenceProperty(Post, collection_name='comments_collection')
body = db.TextProperty()
author = db.ReferenceProperty(User)
created = db.DateTimeProperty(auto_now_add=True)
def render(self):
"""mthod for render single user comment"""
self._render_text = self.body.replace('\n', '<br>')
param = dict(c=self)
return render_str("comment.html", **param)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,823 | avandriets/MultiUserBlog | refs/heads/master | /DeletePost.py | """
Module provide tools to delete post from database
"""
from google.appengine.ext import db
from BaseHandler import BaseHandler
from Post import blog_key
class DeletePost(BaseHandler):
"""
Delete post render class provide methods to show interface for deleting
post from db
"""
def post(self):
"""POST method to show delete page and manage deleting process"""
if not self.user:
# if user is not login redirect him to login page
self.redirect('/login')
else:
# get post id and user choice whether delete or not (yes/no
# parameters)
post_id = self.request.get("blog_id", None)
yes = self.request.get("yes", None)
no = self.request.get("no", None)
# get post by key
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
# show 404 if something go wrong with post
if not post:
self.error(404)
return
# fill parameters for render page
param = dict(p=post)
# check if user if owner of post
if post.owner.key() != self.user.key():
param["is_errors"] = True
param[
"error"] = "You cannot delete this post, you are not owner!"
self.render("delete-post.html", **param)
else:
# handle user choice delete or not
if yes or no:
if yes:
post.delete()
self.redirect("/")
else:
self.redirect("/blog/{}".format(post_id))
pass
else:
self.render("delete-post.html", **param)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,824 | avandriets/MultiUserBlog | refs/heads/master | /BlogFrontPage.py | """
Front Page module
"""
from BaseHandler import BaseHandler
from Post import Post
from User import User
class BlogFrontPage(BaseHandler):
"""Front page class"""
def get(self):
""" GET method to render front page with posts list"""
user_id = self.request.get('user_id', None)
if user_id is not None:
user = User.by_id(int(user_id))
posts = user.blogs_collection.order('-created')
else:
posts = Post.all().order('-created')
context = dict(posts=posts, )
self.render("blog_front_page.html", **context)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,825 | avandriets/MultiUserBlog | refs/heads/master | /NewPost.py | """
New post handler module
"""
from BaseHandler import BaseHandler
from Post import Post, blog_key
class NewPost(BaseHandler):
"""NewPost manage class"""
def get(self):
"""GET method to show newpost form"""
# check if user is in system or redirect to login page
if self.user:
self.render("newpost.html")
else:
self.redirect("/login")
def post(self):
"""POST method to handle actions related with creating new post"""
# check if user is login otherwise redirecy to mai page
if not self.user:
self.redirect('/blog')
return
# get parameters from header
subject = self.request.get('subject', default_value=None)
content = self.request.get('content', default_value=None)
# check if parameters are not empty
if subject and content:
subject = subject.strip()
content = content.strip()
# create new post
p = Post(parent=blog_key(), subject=subject, content=content,
owner=self.user.key())
p.put()
# redirect to post page
self.redirect('/blog/{}'.format(str(p.key().id())))
else:
# something go wrong
error = "subject and content, please!"
param = dict(subject=subject, content=content,
error=error, is_errors=True)
self.render("newpost.html", **param)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,826 | avandriets/MultiUserBlog | refs/heads/master | /security.py | """
secure methods for cookies
"""
import hmac
SECRET_KEY = 'ijxz135d#8b2+t1&5#_g_zu&5juj_(828-7cr6a&!0m'
def make_secure_val(val):
"""make secure cookie"""
return '%s|%s' % (val, hmac.new(SECRET_KEY, val).hexdigest())
def check_secure_val(secure_val):
"""check cookie"""
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,827 | avandriets/MultiUserBlog | refs/heads/master | /Like.py | """
Like handler module
"""
from google.appengine.ext import db
from Post import Post
from User import User
def like_key(name='default'):
"""like key function"""
return db.Key.from_path('likes', name)
class Like(db.Model):
"""Like model class"""
post = db.ReferenceProperty(Post, collection_name='like_collection')
author = db.ReferenceProperty(User)
created = db.DateTimeProperty(auto_now_add=True)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,828 | avandriets/MultiUserBlog | refs/heads/master | /Logout.py | """
Module manages logout process
"""
from BaseHandler import BaseHandler
class Logout(BaseHandler):
"""Logout handler class"""
def get(self):
"""logout and redirect to main page"""
self.logout()
self.redirect('/')
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,829 | avandriets/MultiUserBlog | refs/heads/master | /Post.py | """
Post model describing module
"""
from google.appengine.ext import db
from BaseHandler import render_str
from User import User
def blog_key(name='default'):
"""post key function"""
return db.Key.from_path('blogs', name)
class Post(db.Model):
"""Post model definition"""
subject = db.StringProperty(required=True)
content = db.TextProperty(required=True)
owner = db.ReferenceProperty(User, collection_name='blogs_collection')
created = db.DateTimeProperty(auto_now_add=True)
last_modified = db.DateTimeProperty(auto_now=True)
def get_lead(self):
"""method generate short post representation"""
words_list = self.content.split()
last_word = ""
# show just 50 words in short post description
if len(words_list) > 50:
lst = words_list[:50]
last_word = " ..."
else:
lst = words_list
output_str = ""
for word in lst:
output_str += word + " "
return output_str + last_word
def get_owner_id(self):
"""get post owner id and check if it not empty"""
if self.owner:
return self.owner.key().id()
else:
return None
def render_short(self):
"""render short representation for post"""
self._render_text = self.get_lead().replace('\n', '<br>')
param = dict(p=self)
return render_str("post.html", **param)
def render_full(self):
"""render full representation for post"""
self._render_text = self.content.replace('\n', '<br>')
param = dict(p=self)
return render_str("post.html", **param)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,830 | avandriets/MultiUserBlog | refs/heads/master | /EditPost.py | """
Module provide tools to change post in DB
"""
from google.appengine.ext import db
from BaseHandler import BaseHandler
from Post import blog_key
class EditPost(BaseHandler):
"""
Class provide tools to interact user with DB by html page
"""
def post(self):
"""method to handle edit post action show page and save or discard
changes"""
# check if user have login
if not self.user:
self.redirect('/login')
else:
# get parameters from header
post_id = self.request.get("blog_id", None)
save = self.request.get("save", None)
cancel = self.request.get("cancel", None)
subject = self.request.get('subject', default_value=None)
content = self.request.get('content', default_value=None)
# check if user input just spaces to page
if subject and content:
subject = subject.strip()
content = content.strip()
# find post
key = db.Key.from_path('Post', int(post_id), parent=blog_key())
post = db.get(key)
# if there is no any posts
if not post:
self.error(404)
return
# prepare parameters for rendering page
param = dict(p=post, subject=post.subject, content=post.content)
# check if user is owner of post
if post.owner.key() != self.user.key():
param["is_errors"] = True
param["error"] = "You cannot edit this post, you are not owner!"
self.render("edit-post.html", **param)
else:
# handle user choice save or not
if save or cancel:
if save:
if subject and content:
post.subject = subject
post.content = content
post.save()
self.redirect("/blog/{}".format(post_id))
else:
param["edit_error"] = "subject and content, please!"
self.render("edit-post.html", **param)
else:
self.redirect("/blog/{}".format(post_id))
else:
self.render("edit-post.html", **param)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,831 | avandriets/MultiUserBlog | refs/heads/master | /Registration.py | """
This module are used to register new user in the site
"""
from BaseHandler import BaseHandler
from User import User
import re
# Regular expressions to check user input
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
PASS_RE = re.compile(r"^.{3,20}$")
def valid_username(username):
"""This function check if user correct input username"""
return username and USER_RE.match(username)
def valid_password(password):
"""This function check if user correct input password"""
return password and PASS_RE.match(password)
def valid_email(email):
"""This function check if user correct input email"""
return not email or EMAIL_RE.match(email)
class Signup(BaseHandler):
"""Base Signup user class. It implements user registration methods"""
def get(self):
"""GET method shows user registration form if user is not login and
welcome page if he is"""
if not self.user:
self.render("signup-form.html")
else:
self.redirect('/welcome')
def post(self):
"""POST method registers user in system"""
# get parameters
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
# save parameters to dictionary for form rendering
params = dict(username=self.username,
email=self.email)
# checking input parameters
if not valid_username(self.username):
params['is_username_errors'] = True
params['username_error'] = "That's not a valid username."
have_error = True
if not valid_password(self.password):
params['is_password_errors'] = True
params['password_error'] = "That wasn't a valid password."
have_error = True
elif self.password != self.verify:
params['is_verify_errors'] = True
params['verify_error'] = "Your passwords didn't match."
have_error = True
if not valid_email(self.email):
params['is_email_errors'] = True
params['email_error'] = "That's not a valid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
self.done()
def done(self, *a, **kw):
""" this method registers user"""
raise NotImplementedError
class Register(Signup):
"""Child signup class that implements methods for crating user """
def done(self):
# make sure the user doesn't already exist
u = User.by_name(self.username)
params = dict(username=self.username,
email=self.email)
if u:
params['is_username_errors'] = True
params['username_error'] = "That user already exists."
self.render('signup-form.html', **params)
else:
u = User.register(self.username, self.password, self.email)
u.put()
self.login(u)
self.redirect('/welcome')
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,832 | avandriets/MultiUserBlog | refs/heads/master | /Welcome.py | """
Welcome page handler module
"""
from BaseHandler import BaseHandler
class Welcome(BaseHandler):
"""page handler"""
def get(self):
if self.user:
self.render('welcome.html')
else:
self.redirect('/signup')
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,833 | avandriets/MultiUserBlog | refs/heads/master | /main.py | """
Main dispatcher module
"""
import webapp2
from BaseHandler import BaseHandler
from BlogFrontPage import BlogFrontPage
from DeleteComment import DeleteComment
from DeletePost import DeletePost
from EditComment import EditComment
from EditPost import EditPost
from Login import Login
from Logout import Logout
from NewPost import NewPost
from PostLike import PostLike
from PostPage import PostPage
from Registration import Register
from Welcome import Welcome
class MainPage(BaseHandler):
"""Main page handler"""
def get(self):
self.render("main_page.html")
# routes
app = webapp2.WSGIApplication([
('/', MainPage),
('/login', Login),
('/logout', Logout),
('/signup', Register),
('/blog/newpost', NewPost),
('/blog/?', BlogFrontPage),
('/blog/([0-9]+)', PostPage),
('/welcome', Welcome),
('/blog/delete-post', DeletePost),
('/blog/edit-post', EditPost),
('/blog/like', PostLike),
('/blog/comment/edit-comment', EditComment),
('/blog/comment/delete-comment', DeleteComment),
], debug=True)
| {"/PostPage.py": ["/Comment.py", "/NewPost.py"], "/PostLike.py": ["/Like.py", "/Post.py"], "/DeleteComment.py": ["/Comment.py", "/Post.py"], "/EditComment.py": ["/Comment.py", "/Post.py"], "/Comment.py": ["/Post.py"], "/DeletePost.py": ["/Post.py"], "/BlogFrontPage.py": ["/Post.py"], "/NewPost.py": ["/Post.py"], "/Like.py": ["/Post.py"], "/EditPost.py": ["/Post.py"], "/main.py": ["/BlogFrontPage.py", "/DeleteComment.py", "/DeletePost.py", "/EditComment.py", "/EditPost.py", "/Login.py", "/Logout.py", "/NewPost.py", "/PostLike.py", "/PostPage.py", "/Registration.py", "/Welcome.py"]} |
48,836 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_backend/views.py | from django.contrib.auth.models import User
from rest_framework import viewsets
from foodrition_api.models import Food, FoodImage
from foodrition_api.serializers import UserSerializer, FoodSerializer
from django.views.decorators.http import require_http_methods
from rest_framework.parsers import FileUploadParser
from rest_framework.exceptions import ParseError
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from foodrition_api.services.ml_model import ModelFactory
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from django.http import JsonResponse
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = (IsAuthenticated,)
class FoodViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows food to be viewed or edited.
"""
queryset = Food.objects.all().order_by('id')
serializer_class = FoodSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
""" allow rest api to filter by description """
queryset = Food.objects.all()
name = self.request.query_params.get('name', None)
ndb_no = self.request.query_params.get('ndb_no', None)
if name is not None:
queryset = queryset.filter(name=name)
elif ndb_no is not None:
queryset = queryset.filter(ndb_no=ndb_no)
return queryset
class ClassificationAPI(APIView):
"""
API for classifying images. A JSON containing the classified id
'pred_id', the corresponding model class description 'food_descr'
and a mapping to the corresponding nutrition description is
returned.
"""
parser_class = (FileUploadParser,)
permission_classes = (IsAuthenticated,)
def post(self, request, format=None):
print("Request to classify file")
if 'file' not in request.data:
raise ParseError("Empty content")
f = request.data['file']
pred = ModelFactory.predict(f)
print(f"Predicted Id for file upload {pred.pred_id}")
print("Object dict: ", pred.__dict__)
food_image = FoodImage()
food_image.img.save(f.name, f, save=True)
food_image.classification = pred.pred_id
food_image.save()
return JsonResponse(pred.__dict__, status=status.HTTP_200_OK) | {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,837 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/migrations/0003_foodimage.py | # Generated by Django 2.1.5 on 2019-02-11 13:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foodrition_api', '0002_auto_20190206_1142'),
]
operations = [
migrations.CreateModel(
name='FoodImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.ImageField(upload_to='')),
('classification', models.IntegerField(null=True)),
],
),
]
| {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,838 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/management/commands/init_food_table.py | from django.core.management.base import BaseCommand, CommandError
from foodrition_api.models import Food
from os.path import isfile
import csv
import pandas as pd
import numpy as np
class Command(BaseCommand):
help = 'Initialises the food database using a .csv file.'
def add_arguments(self, parser):
parser.add_argument('file_path', type=str)
def handle(self, *args, **options):
try:
# Use pandas because parser and dtype are handled more optimal
df = pd.read_csv(options['file_path'], delimiter=";",
dtype={'Shrt_Desc': str, 'Water_g': np.float64,
'Energ_Kcal': np.float64, 'Protein_(g)': np.float64,
'Lipid_Tot_(g)': np.float64, 'Ash_(g)': np.float64,
'Carbohydrt_(g)': np.float64,
'Fiber_TD_(g)': np.float64})
for iter_row in df.iterrows():
row = iter_row[1]
_, created = Food.objects.get_or_create(
ndb_no=row['NDB_No'],
name=row['Shrt_Desc'],
water_g=row['Water_(g)'],
energy_kcal=row['Energ_Kcal'],
protein_g=row['Protein_(g)'],
lipid_g=row['Lipid_Tot_(g)'],
ash_g=row['Ash_(g)'],
carbohydrt_g=row['Carbohydrt_(g)'],
fiber_g=row['Fiber_TD_(g)']
)
self.stdout.write(self.style.SUCCESS("Succesfully initialized food table from file."))
except:
self.stdout.write(self.style.ERROR("Could not initialize food table from file:"))
raise
| {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,839 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/tests.py | from django.test import TestCase
from .services.ml_model import ModelFactory
from django.core.cache import cache
class MLModelTest(TestCase):
def test_loading_model_to_cache(self):
model_map, model = ModelFactory.load_model_in_cache()
assert(hasattr(model, 'predict'))
assert(isinstance(model_map, dict))
# Make also sure that it lands in cache
model = cache.get(ModelFactory.model_cache_key)
model_map = cache.get(ModelFactory.model_map_cache_key)
assert(hasattr(model, 'predict'))
assert(isinstance(model_map, dict))
| {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,840 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/services/ml_model.py | from keras.models import model_from_json
from keras.applications.resnet50 import preprocess_input
import json
import cv2
from os.path import join, dirname
import tensorflow as tf
import numpy as np
class food_prediction():
def __init__(self, pred_id: int, food_class: str, nutr_class: str, nutr_ndb_no: int):
self.pred_id = int(pred_id)
self.food_class = food_class
self.nutr_class = nutr_class
self.nutr_ndb_no = int(nutr_ndb_no)
class ModelFactory:
model_folder = join(dirname(__file__), 'model') # get current directory'
img_res = (224, 224)
graph = None
model = None
nutr_descr_map = None
nutr_ndb_map = None
model_map = None
@staticmethod
def predict(img):
img_res = ModelFactory.img_res
model = ModelFactory.model
model_map = ModelFactory.model_map
nutr_descr_map = ModelFactory.nutr_descr_map
nutr_ndb_map = ModelFactory.nutr_ndb_map
with ModelFactory.graph.as_default():
img = cv2.imdecode(np.fromstring(img.read(), np.uint8), cv2.IMREAD_UNCHANGED)
img = cv2.resize(img, ModelFactory.img_res)
img = preprocess_input(img)
predict_vec = model.predict(img.reshape(-1, img_res[0], img_res[1], 3))[0]
predict_id = np.argmax(predict_vec)
print(f"Predicted class {predict_id} with precentage {predict_vec[predict_id]*100}%")
pred = food_prediction(predict_id, model_map[predict_id], nutr_descr_map[predict_id],
nutr_ndb_map[predict_id])
return pred
@staticmethod
def load_model_and_maps():
''' Loads model and necessary maps to. Has to be called in service startup
before predictions can be made. '''
model_folder = ModelFactory.model_folder
try:
print("Trying to load model in cache")
model_json_file = open(join(model_folder, "model.json"), 'r')
model_json = model_json_file.read()
model_json_file.close()
ModelFactory.model = model_from_json(model_json)
# load weights for model
ModelFactory.model.load_weights(join(model_folder, "model.h5"))
# Set graph for async predict
# (see issue https://github.com/keras-team/keras/issues/2397)
ModelFactory.graph = tf.get_default_graph()
with open(join(model_folder, 'clf_map.json')) as map_file:
ModelFactory.model_map = json.load(map_file)
ModelFactory.model_map = {int(key): value for (key, value) in
ModelFactory.model_map.items()}
print("model_map: ", ModelFactory.model_map)
with open(join(model_folder, 'nutrition_map.json')) as map_file:
ModelFactory.nutr_descr_map = json.load(map_file)
ModelFactory.nutr_descr_map = {int(key): value for (key, value) in
ModelFactory.nutr_descr_map.items()}
with open(join(model_folder, 'nutrition_map_NDB.json')) as map_file:
ModelFactory.nutr_ndb_map = json.load(map_file)
ModelFactory.nutr_ndb_map = {int(key): value for (key, value) in
ModelFactory.nutr_ndb_map.items()}
except TypeError:
print("Can't load model from file! ")
raise
| {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,841 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/serializers.py | from django.contrib.auth.models import User
from .models import Food
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email')
class FoodSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Food
fields = ('ndb_no', 'name', 'water_g', 'energy_kcal', 'protein_g', 'lipid_g',
'carbohydrt_g', 'fiber_g', 'sugar_g') | {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,842 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/startup.py | from foodrition_api.services.ml_model import ModelFactory
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.db import IntegrityError
def foodrition_api_startup_func():
username = 'guest'
password = 'bacon'
try:
guest_user = User.objects.create_user(username, password)
except IntegrityError:
print("User already exists in database! Resetting password...")
guest_user = User.objects.get(username=username)
guest_user.set_password(password)
guest_user.save()
_ = Token.objects.get_or_create(user=guest_user)
ModelFactory.load_model_and_maps() | {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,843 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/migrations/0001_initial.py | # Generated by Django 2.1.5 on 2019-02-06 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Food',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('water_g', models.FloatField(blank=True, null=True)),
('energy_kcal', models.FloatField(blank=True, null=True)),
('protein_g', models.FloatField(blank=True, null=True)),
('lipid_g', models.FloatField(blank=True, null=True)),
('ash_g', models.FloatField(blank=True, null=True)),
('carbohydrt_g', models.FloatField(blank=True, null=True)),
('fiber_g', models.FloatField(blank=True, null=True)),
('sugar_g', models.FloatField(blank=True, null=True)),
('calcium_mg', models.FloatField(blank=True, null=True)),
('iron_mg', models.FloatField(blank=True, null=True)),
('magnesium_mg', models.FloatField(blank=True, null=True)),
('phosphorus_mg', models.FloatField(blank=True, null=True)),
('potassium_mg', models.FloatField(blank=True, null=True)),
('sodium_mg', models.FloatField(blank=True, null=True)),
('zinc_mg', models.FloatField(blank=True, null=True)),
('copper_mg', models.FloatField(blank=True, null=True)),
('manganese_mg', models.FloatField(blank=True, null=True)),
('selenium_mg', models.FloatField(blank=True, null=True)),
('vit_c_mg', models.FloatField(blank=True, null=True)),
('thiamin_mg', models.FloatField(blank=True, null=True)),
('riboflavin_mg', models.FloatField(blank=True, null=True)),
('niacin_mg', models.FloatField(blank=True, null=True)),
('panto_acid_mg', models.FloatField(blank=True, null=True)),
('vit_B6_mg', models.FloatField(blank=True, null=True)),
('folate_mug', models.FloatField(blank=True, null=True)),
('folic_acid_mug', models.FloatField(blank=True, null=True)),
('food_folate_mug', models.FloatField(blank=True, null=True)),
('folate_dfe_mug', models.FloatField(blank=True, null=True)),
('choline_mg', models.FloatField(blank=True, null=True)),
('vit_B12_mug', models.FloatField(blank=True, null=True)),
('vit_A_iu', models.FloatField(blank=True, null=True)),
('vit_A_rae', models.FloatField(blank=True, null=True)),
('retinol_mug', models.FloatField(blank=True, null=True)),
('alpha_carot_mug', models.FloatField(blank=True, null=True)),
('beta_carot_mug', models.FloatField(blank=True, null=True)),
('beta_crypt_mug', models.FloatField(blank=True, null=True)),
('lycopene_mug', models.FloatField(blank=True, null=True)),
('lut_and_cea_mug', models.FloatField(blank=True, null=True)),
('vit_e_mug', models.FloatField(blank=True, null=True)),
('vit_d_mug', models.FloatField(blank=True, null=True)),
('vit_d_ui', models.FloatField(blank=True, null=True)),
('vit_k', models.FloatField(blank=True, null=True)),
('FA_sat_g', models.FloatField(blank=True, null=True)),
('FA_mono_g', models.FloatField(blank=True, null=True)),
('FA_poly_g', models.FloatField(blank=True, null=True)),
('cholestrl_mg', models.FloatField(blank=True, null=True)),
('gmwt_1', models.FloatField(blank=True, null=True)),
('gmwt_desc1', models.FloatField(blank=True, null=True)),
('gmwt_2', models.FloatField(blank=True, null=True)),
('gmwt_desc2', models.FloatField(blank=True, null=True)),
('refuse_pct', models.FloatField(blank=True, null=True)),
],
),
]
| {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,844 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/apps.py | from django.apps import AppConfig
class FoodritionApiConfig(AppConfig):
name = 'foodrition_api'
| {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
48,845 | D0cWhats0n/Foodrition_app | refs/heads/master | /foodrition_backend/foodrition_api/models.py | from django.db import models
class Food(models.Model):
ndb_no = models.IntegerField(null=False, unique=True)
name = models.CharField(null=False, max_length=100)
water_g = models.FloatField(null=True, blank=True)
energy_kcal = models.IntegerField(null=True, blank=True)
protein_g = models.FloatField(null=True, blank=True)
lipid_g = models.FloatField(null=True, blank=True)
ash_g = models.FloatField(null=True, blank=True)
carbohydrt_g = models.FloatField(null=True, blank=True)
fiber_g = models.FloatField(null=True, blank=True)
sugar_g = models.FloatField(null=True, blank=True)
calcium_mg = models.FloatField(null=True, blank=True)
iron_mg = models.FloatField(null=True, blank=True)
magnesium_mg = models.FloatField(null=True, blank=True)
phosphorus_mg = models.FloatField(null=True, blank=True)
potassium_mg = models.FloatField(null=True, blank=True)
sodium_mg = models.FloatField(null=True, blank=True)
zinc_mg = models.FloatField(null=True, blank=True)
copper_mg = models.FloatField(null=True, blank=True)
manganese_mg = models.FloatField(null=True, blank=True)
selenium_mg = models.FloatField(null=True, blank=True)
vit_c_mg = models.FloatField(null=True, blank=True)
thiamin_mg = models.FloatField(null=True, blank=True)
riboflavin_mg = models.FloatField(null=True, blank=True)
niacin_mg = models.FloatField(null=True, blank=True)
panto_acid_mg = models.FloatField(null=True, blank=True)
vit_B6_mg = models.FloatField(null=True, blank=True)
folate_mug = models.FloatField(null=True, blank=True)
folic_acid_mug = models.FloatField(null=True, blank=True)
food_folate_mug = models.FloatField(null=True, blank=True)
folate_dfe_mug = models.FloatField(null=True, blank=True)
choline_mg = models.FloatField(null=True, blank=True)
vit_B12_mug = models.FloatField(null=True, blank=True)
vit_A_iu = models.FloatField(null=True, blank=True)
vit_A_rae = models.FloatField(null=True, blank=True)
retinol_mug = models.FloatField(null=True, blank=True)
alpha_carot_mug = models.FloatField(null=True, blank=True)
beta_carot_mug = models.FloatField(null=True, blank=True)
beta_crypt_mug = models.FloatField(null=True, blank=True)
lycopene_mug = models.FloatField(null=True, blank=True)
lut_and_cea_mug = models.FloatField(null=True, blank=True)
vit_e_mug = models.FloatField(null=True, blank=True)
vit_d_mug = models.FloatField(null=True, blank=True)
vit_d_ui = models.FloatField(null=True, blank=True)
vit_k = models.FloatField(null=True, blank=True)
FA_sat_g = models.FloatField(null=True, blank=True)
FA_mono_g = models.FloatField(null=True, blank=True)
FA_poly_g = models.FloatField(null=True, blank=True)
cholestrl_mg = models.FloatField(null=True, blank=True)
gmwt_1 = models.FloatField(null=True, blank=True)
gmwt_desc1 = models.FloatField(null=True, blank=True)
gmwt_2 = models.FloatField(null=True, blank=True)
gmwt_desc2 = models.FloatField(null=True, blank=True)
refuse_pct = models.FloatField(null=True, blank=True)
class FoodImage(models.Model):
img = models.ImageField(null=False)
classification = models.IntegerField(null=True) | {"/foodrition_backend/foodrition_api/tests.py": ["/foodrition_backend/foodrition_api/services/ml_model.py"], "/foodrition_backend/foodrition_api/serializers.py": ["/foodrition_backend/foodrition_api/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.