traffic_backend / main.py
poemsforaphrodite's picture
Update main.py
da02b1d verified
import cv2
import numpy as np
import gradio as gr
import requests
from io import BytesIO
from apify_client import ApifyClient
# Initialize the ApifyClient with your API token
client = ApifyClient("apify_api_YIdcCj4RBwUSEJfohmQo95dENLkUiJ3NzqTh")
def fetch_image_url():
# Run the Actor task and wait for it to finish
run = client.task("epE74b4Zp6Z0ueWAo").call()
# Fetch and return the first screenshot URL from the run's dataset
for item in client.dataset(run["defaultDatasetId"]).iterate_items():
print("Fetched screenshot URL:", item['screenshotUrl']) # Debug print
return item['screenshotUrl'] # Assuming the item contains a 'screenshotUrl' field with the image URL
def download_image(url):
response = requests.get(url)
if response.status_code != 200:
print("Failed to download image. Status code:", response.status_code)
return None
image = np.array(bytearray(response.content), dtype=np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
if image is None:
print("Failed to decode image.")
return image
def calculate_tci_from_url():
image_url = fetch_image_url()
image = download_image(image_url)
if image is None:
return "Error: Could not download or decode the image."
# Save the image to a file for debugging
cv2.imwrite("downloaded_image.jpg", image)
# Convert the image to HSV
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Define color ranges in HSV
color_ranges = {
'orange': ((10, 100, 100), (20, 255, 255)), # Adjusted range to exclude yellow
'red': ((0, 100, 100), (10, 255, 255)), # First range for red
'red2': ((160, 100, 100), (180, 255, 255)), # Second range for red
'green': ((40, 40, 40), (80, 255, 255)),
'dark_red': ((0, 50, 50), (10, 100, 100)) # Range for dark red
}
# Create masks for each color
masks = {}
for color, (lower, upper) in color_ranges.items():
masks[color] = cv2.inRange(image_hsv, np.array(lower), np.array(upper))
# Combine the two red masks
masks['red'] = cv2.bitwise_or(masks['red'], masks['red2'])
del masks['red2']
# Calculate the percentage of each color
total_pixels = image.shape[0] * image.shape[1]
percentages = {color: (np.sum(mask != 0) / total_pixels) * 100 for color, mask in masks.items()}
print("Color percentages:", percentages) # Debug print
# Assign the percentages to variables
orange_percentage = percentages['orange']
green_percentage = percentages['green']
red_percentage = percentages['red']
dark_red_percentage = percentages['dark_red']
# Sum of all percentages
sum_percentage = orange_percentage + green_percentage + red_percentage + dark_red_percentage
# Normalize the percentages to sum up to 100
normalized_percentages = {
'P0': (green_percentage / sum_percentage) * 100,
'P1': (orange_percentage / sum_percentage) * 100,
'P2': (red_percentage / sum_percentage) * 100,
'P3': (dark_red_percentage / sum_percentage) * 100
}
print("Normalized percentages:", normalized_percentages) # Debug print
# Calculate the Traffic Congestion Index (TCI)
TCI = (0 * normalized_percentages['P0'] +
1 * normalized_percentages['P1'] +
2 * normalized_percentages['P2'] +
3 * normalized_percentages['P3'])
return TCI
# Define the Gradio interface
interface = gr.Interface(
fn=calculate_tci_from_url,
inputs=[],
outputs=gr.Textbox(label="Traffic Congestion Index (TCI)")
)
# Launch the Gradio app
if __name__ == "__main__":
interface.launch(share=True)