Update app.py
Browse files
app.py
CHANGED
|
@@ -78,10 +78,9 @@ def prompt(inputs):
|
|
| 78 |
- Any visible brand logos or distinguishing marks (e.g., Tesla logo)
|
| 79 |
- Details of any visible damage (e.g., scratches, dents)
|
| 80 |
- Vehicle’s region or country (based on the license plate or other clues)
|
| 81 |
-
If some details are unclear or not visible, return `None` for those fields. Do not guess or provide inaccurate information."""
|
| 82 |
-
),
|
| 83 |
HumanMessage(
|
| 84 |
-
content=[
|
| 85 |
{"type": "text", "text": "Analyze the vehicle in the image and extract as many details as possible, including type, license plate, make, model, year, condition, damage, etc."},
|
| 86 |
{"type": "text", "text": instructions}, # include any other format instructions here
|
| 87 |
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{inputs['image']}", "detail": "low"}}
|
|
@@ -101,48 +100,54 @@ def MLLM_response(inputs):
|
|
| 101 |
pipeline = image_encoding | prompt | MLLM_response | parser
|
| 102 |
|
| 103 |
# Streamlit Interface for uploading images and showing results
|
| 104 |
-
st.header("Upload
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
st.
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
- Any visible brand logos or distinguishing marks (e.g., Tesla logo)
|
| 79 |
- Details of any visible damage (e.g., scratches, dents)
|
| 80 |
- Vehicle’s region or country (based on the license plate or other clues)
|
| 81 |
+
If some details are unclear or not visible, return `None` for those fields. Do not guess or provide inaccurate information."""),
|
|
|
|
| 82 |
HumanMessage(
|
| 83 |
+
content=[
|
| 84 |
{"type": "text", "text": "Analyze the vehicle in the image and extract as many details as possible, including type, license plate, make, model, year, condition, damage, etc."},
|
| 85 |
{"type": "text", "text": instructions}, # include any other format instructions here
|
| 86 |
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{inputs['image']}", "detail": "low"}}
|
|
|
|
| 100 |
pipeline = image_encoding | prompt | MLLM_response | parser
|
| 101 |
|
| 102 |
# Streamlit Interface for uploading images and showing results
|
| 103 |
+
st.header("Upload Vehicle Images for Information Extraction")
|
| 104 |
+
|
| 105 |
+
# Option to select either single or batch image upload
|
| 106 |
+
upload_option = st.radio("Select Upload Type", ["Single Image Upload", "Batch Images Upload"])
|
| 107 |
+
|
| 108 |
+
# Single Image Upload
|
| 109 |
+
if upload_option == "Single Image Upload":
|
| 110 |
+
st.subheader("Upload a Single Vehicle Image")
|
| 111 |
+
uploaded_image = st.file_uploader("Choose a JPEG image", type="jpeg")
|
| 112 |
+
|
| 113 |
+
if uploaded_image is not None:
|
| 114 |
+
# Display the uploaded image
|
| 115 |
+
image = PILImage.open(uploaded_image)
|
| 116 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
| 117 |
+
|
| 118 |
+
# Convert the uploaded image to base64
|
| 119 |
+
image_path = "/tmp/uploaded_image.jpeg"
|
| 120 |
+
with open(image_path, "wb") as f:
|
| 121 |
+
f.write(uploaded_image.getbuffer())
|
| 122 |
+
|
| 123 |
+
# Process the image through the pipeline
|
| 124 |
+
output = pipeline.invoke({"image_path": image_path})
|
| 125 |
+
|
| 126 |
+
# Show the results in a user-friendly format
|
| 127 |
+
st.subheader("Extracted Vehicle Information")
|
| 128 |
+
st.json(output)
|
| 129 |
+
|
| 130 |
+
# Optionally, display more vehicle images from the folder
|
| 131 |
+
img_dir = "/content/images"
|
| 132 |
+
image_paths = glob.glob(os.path.join(img_dir, "*.jpeg"))
|
| 133 |
+
display_image_grid(image_paths)
|
| 134 |
+
|
| 135 |
+
# Batch Images Upload
|
| 136 |
+
elif upload_option == "Batch Images Upload":
|
| 137 |
+
st.sidebar.header("Batch Image Upload")
|
| 138 |
+
batch_images = st.sidebar.file_uploader("Upload Images", type="jpeg", accept_multiple_files=True)
|
| 139 |
+
|
| 140 |
+
if batch_images:
|
| 141 |
+
batch_input = [{"image_path": f"/tmp/{file.name}"} for file in batch_images]
|
| 142 |
+
for file in batch_images:
|
| 143 |
+
with open(f"/tmp/{file.name}", "wb") as f:
|
| 144 |
+
f.write(file.getbuffer())
|
| 145 |
+
|
| 146 |
+
# Process the batch and display the results in a DataFrame
|
| 147 |
+
batch_output = pipeline.batch(batch_input)
|
| 148 |
+
df = pd.DataFrame(batch_output)
|
| 149 |
+
st.dataframe(df)
|
| 150 |
+
|
| 151 |
+
# Show images in a grid
|
| 152 |
+
image_paths = [f"/tmp/{file.name}" for file in batch_images]
|
| 153 |
+
display_image_grid(image_paths)
|