my-interfuser-model / simulation /visualization.py
mohammed-aljafry's picture
Final fix v9: Re-upload with fully self-contained and correct package structure
f08920f verified
raw
history blame
5.79 kB
# =========================================================
# الملف 2: simulation/visualization.py
# =========================================================
import cv2
import numpy as np
import math
PIXELS_PER_METER = 8
MAX_DISTANCE = 32
IMG_SIZE = MAX_DISTANCE * PIXELS_PER_METER * 2
EGO_CAR_X = IMG_SIZE // 2
EGO_CAR_Y = IMG_SIZE - (4.0 * PIXELS_PER_METER)
def process_camera_image(tensor_image):
image_np = tensor_image.permute(1, 2, 0).cpu().numpy()
image_np = (image_np * np.array([0.229, 0.224, 0.225])) + np.array([0.485, 0.456, 0.406])
image_np = np.clip(image_np, 0, 1)
return (image_np * 255).astype(np.uint8)[:, :, ::-1]
def ensure_rgb(image):
if len(image.shape) == 2 or image.shape[2] == 1:
return cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
return image
def convert_grid_to_xy(i, j):
return (j - 9.5) * 1.6, (19.5 - i) * 1.6
def render_waypoints(waypoints, last_valid_waypoints=None):
img = np.zeros((IMG_SIZE, IMG_SIZE, 3), np.uint8)
current_waypoints = waypoints
if waypoints is not None and len(waypoints) > 2:
last_valid_waypoints = waypoints
elif last_valid_waypoints is not None:
current_waypoints = last_valid_waypoints
else:
return img, last_valid_waypoints
for i, point in enumerate(current_waypoints):
px = int(EGO_CAR_X + point[1] * PIXELS_PER_METER)
py = int(EGO_CAR_Y - point[0] * PIXELS_PER_METER)
color = (0, 0, 255) if i == len(current_waypoints) - 1 else (0, 255, 0)
cv2.circle(img, (px, py), 4, color, -1)
return img, last_valid_waypoints
def render_self_car(theta, last_valid_theta=0.0):
img = np.zeros((IMG_SIZE, IMG_SIZE, 3), np.uint8)
if abs(theta - last_valid_theta) < np.pi / 2: last_valid_theta = theta
else: theta = last_valid_theta
rect_size_px = (int(4.0 * PIXELS_PER_METER), int(2.0 * PIXELS_PER_METER))
angle_deg = -np.degrees(theta)
box_points = cv2.boxPoints(((EGO_CAR_X, EGO_CAR_Y), rect_size_px, angle_deg))
cv2.fillConvexPoly(img, np.int32(box_points), (0, 255, 255))
return img, last_valid_theta
def add_rect(img, loc, ori, box, value, color):
center_x=EGO_CAR_X+int(loc[0]*PIXELS_PER_METER);center_y=EGO_CAR_Y-int(loc[1]*PIXELS_PER_METER);size_px=(int(box[0]*PIXELS_PER_METER),int(box[1]*PIXELS_PER_METER));angle_deg=-np.degrees(math.atan2(ori[1],ori[0]));display_color=tuple([int(c*value) for c in color]);box_points=cv2.boxPoints(((center_x,center_y),size_px,angle_deg));cv2.fillConvexPoly(img,np.int32(box_points),display_color)
def find_peak_box_improved(data):
OBJECT_CLASSES={'car':(2.0,8.0),'bike':(1.0,2.5),'pedestrian':(0.3,1.5)};CONF_THRESHOLD=0.25;det_padded=np.zeros((22,22,7));det_padded[1:21,1:21]=data;detected_objects=[]
for i in range(1,21):
for j in range(1,21):
conf=det_padded[i,j,0];is_peak=(conf>CONF_THRESHOLD and conf>det_padded[i-1,j,0] and conf>det_padded[i+1,j,0] and conf>det_padded[i,j-1,0] and conf>det_padded[i,j+1,0])
if is_peak:
coords=(i-1,j-1);raw_data=data[coords];length,width=raw_data[4],max(raw_data[5],0.1);obj_type='unknown'
if OBJECT_CLASSES['pedestrian'][0]<=length<OBJECT_CLASSES['pedestrian'][1]:obj_type='pedestrian'
elif OBJECT_CLASSES['bike'][0]<=length<OBJECT_CLASSES['bike'][1]:obj_type='bike'
elif OBJECT_CLASSES['car'][0]<=length<OBJECT_CLASSES['car'][1]:obj_type='car'
if obj_type!='unknown':detected_objects.append({'coords':coords,'type':obj_type,'confidence':conf,'raw_data':raw_data})
return detected_objects
def render_improved(det_data,t=0):
CLASS_COLORS={'car':(0,0,255),'bike':(0,255,0),'pedestrian':(255,0,0),'unknown':(128,128,128)};detected_objects=find_peak_box_improved(det_data)
counts={cls:0 for cls in CLASS_COLORS.keys()};[counts.update({obj['type']:counts.get(obj['type'],0)+1}) for obj in detected_objects];img=np.zeros((IMG_SIZE,IMG_SIZE,3),np.uint8)
for obj in detected_objects:
i,j=obj['coords'];obj_data=obj['raw_data'];speed=obj_data[6];center_x,center_y=convert_grid_to_xy(i,j);theta=obj_data[3]*np.pi;ori=np.array([math.cos(theta),math.sin(theta)])
loc_x=center_x+obj_data[1]+t*speed*ori[0];loc_y=center_y+obj_data[2]-t*speed*ori[1];box=np.array([obj_data[4],obj_data[5]])
if obj['type']=='pedestrian':box*=1.5
add_rect(img,np.array([loc_x,loc_y]),ori,box,obj['confidence'],CLASS_COLORS[obj['type']])
return img,counts
class DisplayInterface:
def __init__(self,width=1200,height=600):self._width=width;self._height=height
def run_interface(self,data):
dashboard=np.zeros((self._height,self._width,3),dtype=np.uint8);font=cv2.FONT_HERSHEY_SIMPLEX;dashboard[:,:800]=cv2.resize(data.get('camera_view'),(800,600));dashboard[:400,800:1200]=cv2.resize(ensure_rgb(data['map_t0']),(400,400));dashboard[400:600,800:1000]=cv2.resize(ensure_rgb(data['map_t1']),(200,200));dashboard[400:600,1000:1200]=cv2.resize(ensure_rgb(data['map_t2']),(200,200));cv2.line(dashboard,(800,0),(800,600),(255,255,255),2);cv2.line(dashboard,(800,400),(1200,400),(255,255,255),2);cv2.line(dashboard,(1000,400),(1000,600),(255,255,255),2)
y_pos=40
for key,text in data['text_info'].items():cv2.putText(dashboard,text,(820,y_pos),font,0.6,(255,255,255),1);y_pos+=30
y_pos+=10
for t,counts in data['object_counts'].items():count_str=f"{t}: C={counts['car']} B={counts['bike']} P={counts['pedestrian']}";cv2.putText(dashboard,count_str,(820,y_pos),font,0.5,(255,255,255),1);y_pos+=20
cv2.putText(dashboard,"t0",(1160,30),font,0.8,(0,255,255),2);cv2.putText(dashboard,"t1",(960,430),font,0.8,(0,255,255),2);cv2.putText(dashboard,"t2",(1160,430),font,0.8,(0,255,255),2)
return dashboard