Spaces:
Sleeping
Sleeping
| # -*- coding: utf-8 -*- | |
| """ | |
| Luke Fullard: 16 June 2024 | |
| Script to track an object of interest in a video | |
| """ | |
| import streamlit as st | |
| import cv2 | |
| import numpy as np | |
| from PIL import Image | |
| import tempfile | |
| import pandas as pd | |
| import os | |
| from io import BytesIO | |
| import base64 | |
| from skimage import filters | |
| ############################################################################### | |
| ############################################################################### | |
| ############################################################################### | |
| # Function to apply image adjustments with new options | |
| def apply_adjustments(frame, grayscale, contrast, blur, edges, sharpen_amount, subtract_background, lower_threshold, upper_threshold, binarize, binarize_threshold): | |
| if grayscale: | |
| frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
| if contrast > 1.0: | |
| # Clip the values to avoid wrapping around | |
| frame = np.clip(contrast * frame, 0, 255).astype(np.uint8) | |
| if blur > 0: | |
| frame = cv2.GaussianBlur(frame, (blur, blur), 0) | |
| if edges: | |
| frame = cv2.Canny(frame, lower_threshold, upper_threshold) | |
| # Sharpening | |
| if sharpen_amount > 0: | |
| frame = filters.unsharp_mask(frame, radius=1, amount=sharpen_amount) | |
| frame = (frame * 255).astype(np.uint8) # Convert back to uint8 | |
| # Background Subtraction | |
| if subtract_background: | |
| # Initialize the background subtractor | |
| fgbg = cv2.createBackgroundSubtractorMOG2() | |
| frame = fgbg.apply(frame) | |
| if binarize: | |
| _, frame = cv2.threshold(frame, binarize_threshold, 255, cv2.THRESH_BINARY) | |
| return frame | |
| ############################################################################### | |
| ############################################################################### | |
| ############################################################################### | |
| def part_one(): | |
| st.subheader('Part one: upload your video file') | |
| with st.expander('Instructions #1'): | |
| st.write('Please choose a file to upload. At present only the following formats are supported: "mp4", "avi", "mov", "mkv"') | |
| uploaded_file = st.file_uploader("Choose a video file", type=["mp4", "avi", "mov", "mkv"]) | |
| return uploaded_file | |
| ############################################################################### | |
| ############################################################################### | |
| ############################################################################### | |
| def part_two(uploaded_file): | |
| st.subheader('Part two: adjust image and select region of interest') | |
| with st.expander('Instructions #2'): | |
| st.write(''' | |
| Part two is a two step process: | |
| a) Use the Image adjustments in the left hand side toolbar to modify the image until suitable for tracking analysis. In practice, I have found the "Binarize" tool to be the most useful for tracking. You can use the frame number selector just above the image to see how the image adjustment affects the other frames in the video sequence. | |
| b) Set the region of interest (ROI). Using the four number sliders below, choose the initial area of interest. the X,Y numbers define the top left of the rectangular area of interest, while the width and height define the rectangle dimensions. The rectangle will be drawn on the image to help guide you. | |
| **NOTE: The ROI is always set on the first frame, so ensure you are on that image frame when setting the ROI.** | |
| Once you are ready, click the "Start Object Tracking" button below the image. | |
| ''') | |
| # Save the uploaded video file to a temporary file | |
| tfile = tempfile.NamedTemporaryFile(delete=False) | |
| tfile.write(uploaded_file.read()) | |
| file_name, _ = os.path.splitext(uploaded_file.name) | |
| # Read the first frame of the video from the temporary file | |
| vcap = cv2.VideoCapture(tfile.name) | |
| total_frames = int(vcap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| # Close and delete the temporary file | |
| tfile.close() | |
| instructions_container = st.empty() | |
| left_column,right_column = st.columns(2) | |
| # User input for frame number | |
| frame_number = st.number_input(f'Enter a frame number between 1 and {total_frames}', min_value=1, max_value=total_frames, value=1) | |
| if frame_number >1: | |
| st.warning("WARNING: When setting the region of interest (ROI) please ensure that you are defining the region based on Frame #1.") | |
| # Options for adjustments | |
| st.sidebar.header("Image adjustments") | |
| grayscale = st.sidebar.checkbox("Convert to Grayscale") | |
| contrast = st.sidebar.slider("Contrast", 1.0, 3.0, 1.0) | |
| blur = st.sidebar.slider("Blur", 0, 10, 0) | |
| edges = st.sidebar.checkbox("Edge Detection") | |
| # Options for edge detection adjustments | |
| lower_threshold = 0 | |
| upper_threshold = 0 | |
| if edges: | |
| st.sidebar.header("Edge Detection Parameters") | |
| lower_threshold = st.sidebar.slider("Lower Threshold", 0, 255, 100) | |
| upper_threshold = st.sidebar.slider("Upper Threshold", 0, 255, 200) | |
| sharpen_amount = st.sidebar.slider("Sharpen Amount", 0.0, 2.0, 0.0) | |
| subtract_background = st.sidebar.checkbox("Background Subtraction") | |
| # Options for binarization adjustments | |
| binarize = st.sidebar.checkbox("Binarize") | |
| binarize_threshold=0 | |
| if binarize: | |
| st.sidebar.header("Binarization Parameters") | |
| binarize_threshold = st.sidebar.slider("Binarization Threshold", 0, 255, 128) | |
| # Seek to the specified frame | |
| vcap.set(cv2.CAP_PROP_POS_FRAMES, frame_number-1) | |
| success, frame = vcap.read() | |
| if success: | |
| # Manual ROI entry | |
| with instructions_container: | |
| st.write("Enter the Region of interest (ROI) coordinates") | |
| with left_column: | |
| x = st.number_input('Enter X coordinate of top-left corner', min_value=0, value=0) | |
| w = st.number_input('Enter width of the ROI', min_value=1, value=100) | |
| with right_column: | |
| y = st.number_input('Enter Y coordinate of top-left corner', min_value=0, value=0) | |
| h = st.number_input('Enter height of the ROI', min_value=1, value=100) | |
| # Apply adjustments to the frame | |
| adjusted_frame = apply_adjustments(frame, grayscale, contrast, blur, edges, sharpen_amount, | |
| subtract_background, lower_threshold, upper_threshold, binarize, | |
| binarize_threshold, | |
| ) | |
| # Draw the rectangle on the frame | |
| cv2.rectangle(adjusted_frame, (x, y), (x + w, y + h), (255, 0, 0), 2) | |
| # Display the adjusted frame | |
| st.image(Image.fromarray(adjusted_frame), caption=f'Adjusted Frame at {frame_number}', use_column_width=True) | |
| else: | |
| st.error(f"Could not read frame number {frame_number}.") | |
| st.write('---') | |
| # Release the video capture object | |
| # vcap.release() | |
| return vcap,file_name,total_frames,grayscale, contrast, blur, edges, sharpen_amount, subtract_background, lower_threshold, upper_threshold, binarize, binarize_threshold, x,y,w,h | |
| ############################################################################### | |
| ############################################################################### | |
| ############################################################################### | |
| def part_three(vcap,temp_dir,file_name, grayscale, contrast, blur, edges, sharpen_amount, subtract_background, lower_threshold, upper_threshold, binarize, binarize_threshold,x,y,w,h): | |
| st.subheader('Part Three: Your object being tracked!') | |
| st.write("...please be patient, it may take some time...") | |
| vcap.set(cv2.CAP_PROP_POS_FRAMES, 0) | |
| # Initialize the object tracker | |
| tracker = cv2.TrackerCSRT_create() | |
| #initialize positions | |
| x_position = [] | |
| y_position = [] | |
| w_position = [] | |
| h_position = [] | |
| # Read the first frame | |
| success, frame = vcap.read() | |
| if success: | |
| # Get video properties | |
| frame_width = int(vcap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| frame_height = int(vcap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| fps = vcap.get(cv2.CAP_PROP_FPS) | |
| # Define the codec and create VideoWriter object | |
| fourcc = cv2.VideoWriter_fourcc(*'XVID') | |
| out = cv2.VideoWriter(f'{temp_dir}/{file_name}_ADJUSTED.avi', fourcc, fps, (frame_width, frame_height)) | |
| # Apply adjustments to the first frame | |
| adjusted_frame = apply_adjustments(frame, grayscale, contrast, blur, edges, sharpen_amount, subtract_background, lower_threshold, upper_threshold, binarize, binarize_threshold) | |
| # Define an initial bounding box | |
| bbox = (x, y, w, h) | |
| # Initialize tracker with first frame and bounding box | |
| tracker.init(adjusted_frame, bbox) | |
| tracking_image_box = st.empty() | |
| tracking_image_info = st.empty() | |
| # Loop over the frames of the video | |
| frame_number = 0 | |
| while True: | |
| # Read a new frame | |
| success, frame = vcap.read() | |
| if not success: | |
| break | |
| # Apply adjustments to the frame | |
| adjusted_frame = apply_adjustments(frame, grayscale, contrast, blur, edges, sharpen_amount, subtract_background, lower_threshold, upper_threshold, binarize, binarize_threshold) | |
| # Update tracker | |
| success_tracker, box = tracker.update(adjusted_frame) | |
| if success_tracker: | |
| # Draw the tracking box | |
| (x, y, w, h) = [int(v) for v in box] | |
| cv2.rectangle(adjusted_frame, (x, y), (x + w, y + h), (0, 255, 0), 2) | |
| # Write the adjusted frame to the output video | |
| out.write(adjusted_frame) | |
| with tracking_image_info: | |
| st.write(f''' | |
| Frame number: {frame_number+1} (of {total_frames-1}) | |
| (x-position, y-position, width, height) = ''',x,y,w,h) | |
| x_position.append(x) | |
| y_position.append(y) | |
| w_position.append(w) | |
| h_position.append(h) | |
| # Display the tracked frame | |
| with tracking_image_box: | |
| st.image(Image.fromarray(adjusted_frame), caption='Tracked Frame', use_column_width=True) | |
| frame_number += 1 | |
| #Plot x and y over time | |
| t = np.linspace(1,len(x_position),len(x_position)) - 1 | |
| df = pd.DataFrame({ | |
| 'Frame number' : t, | |
| 'Horizontal pixel' : x_position, | |
| 'Vertical pixel' : y_position, | |
| 'Box width' : w_position, | |
| 'Box height' : h_position | |
| }) | |
| else: | |
| df=pd.DataFrame() | |
| # Release the video capture object | |
| vcap.release() | |
| out.release() | |
| return df | |
| ############################################################################### | |
| ############################################################################### | |
| ############################################################################### | |
| def part_four(df,file_name): | |
| st.write("---") | |
| st.subheader('Part four: RESULTS!') | |
| st.write(''' | |
| A plot of the horizontal and vertical position of the tracking rectagle (ROI) is displayed in the graph below. | |
| You can zoom in and hover over specific parts of the graph as desired. | |
| Note, the x-axis is frame number (not time) and the y-axis is pixel position. You will need to convert these to time and distance in your units of interest. | |
| ''') | |
| st.line_chart( | |
| df, x="Frame number", y=["Horizontal pixel", "Vertical pixel"], color=["#FF0000", "#0000FF"] # Optional | |
| ) | |
| st.write("Please use the two buttons below to download the pixel tracking results as a xlsx file, and a movie of the adjusted video as an avi file.") | |
| col_a,col_b = st.columns(2) | |
| # Function to convert DataFrame to Excel and return a BytesIO object | |
| def to_excel(df): | |
| output = BytesIO() | |
| with pd.ExcelWriter(output, engine='xlsxwriter') as writer: | |
| df.to_excel(writer, index=False, sheet_name='Sheet1') | |
| # No need to call writer.save() as it is handled by the context manager | |
| processed_data = output.getvalue() | |
| return processed_data | |
| if len(df)>0: | |
| # Download button | |
| def download_excel_results(): | |
| st.download_button( | |
| label="Download Excel Results File", | |
| data=to_excel(df), | |
| file_name=f"{file_name}.xlsx", | |
| mime="application/vnd.ms-excel" | |
| ) | |
| with col_a: | |
| download_excel_results() | |
| with open(f'{temp_dir}/{file_name}_ADJUSTED.avi', 'rb') as file: | |
| video_bytes = file.read() | |
| def download_video_results(video_bytes): | |
| st.download_button( | |
| label="Download Video", | |
| data=video_bytes, | |
| file_name=f'{file_name}_ADJUSTED.avi', | |
| mime="video/avi" | |
| ) | |
| # video_file.close() # Close the file | |
| with col_b: | |
| # Function to convert file to a download link | |
| if temp_dir: | |
| download_video_results(video_bytes) | |
| ############################################################################### | |
| ############################################################################### | |
| ############################################################################### | |
| with tempfile.TemporaryDirectory() as temp_dir: | |
| # Streamlit app | |
| st.title('Cell squashing tracker!') | |
| st.write('---') | |
| # File uploader | |
| uploaded_file = part_one() | |
| st.write('---') | |
| # If a file is uploaded | |
| if uploaded_file is not None: | |
| #Image adjustment and ROI | |
| vcap,file_name,total_frames,grayscale, contrast, blur, edges, sharpen_amount, subtract_background, lower_threshold, upper_threshold, binarize, binarize_threshold, x,y,w,h = part_two(uploaded_file) | |
| # Button to start object tracking | |
| if st.button('Start Object Tracking'): | |
| #start tracking | |
| df = part_three(vcap,temp_dir,file_name, grayscale, contrast, blur, edges, sharpen_amount, subtract_background, lower_threshold, upper_threshold, binarize, binarize_threshold,x,y,w,h) | |
| #display/download results | |
| part_four(df,file_name) | |
| else: | |
| st.info("Upload a video file to get started.") | |