Spaces:
Paused
Paused
Mandour commited on
Commit ·
9bd6442
1
Parent(s): bc8ce3f
remove time
Browse files
app.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
import json
|
| 4 |
-
import time
|
| 5 |
from typing import Tuple
|
| 6 |
from PIL import Image
|
| 7 |
import torch
|
|
@@ -14,7 +13,6 @@ from models import (
|
|
| 14 |
)
|
| 15 |
from rembg import remove
|
| 16 |
from io import BytesIO
|
| 17 |
-
from time import time
|
| 18 |
|
| 19 |
# Load environment variables (optional for local dev; Spaces use web UI for env vars)
|
| 20 |
if os.path.exists('.env'):
|
|
@@ -117,14 +115,12 @@ def run_inference(image_tensor: torch.Tensor, description: str, category: str, m
|
|
| 117 |
device = model_components['device']
|
| 118 |
|
| 119 |
pil_img = transforms.ToPILImage()(image_tensor.squeeze(0).cpu())
|
| 120 |
-
start_time = time.time()
|
| 121 |
|
| 122 |
results = get_predicated_values(
|
| 123 |
model, category, pil_img, description,
|
| 124 |
image_processor, bert_tokenizer, roberta_tokenizer, device
|
| 125 |
)
|
| 126 |
|
| 127 |
-
end_time = time.time()
|
| 128 |
|
| 129 |
total_attributes = len([a for a in results if a["value"] and a["value"] != "N/A"])
|
| 130 |
avg_confidence = np.mean([a["confidence"] for a in results if a["value"]
|
|
@@ -134,7 +130,6 @@ def run_inference(image_tensor: torch.Tensor, description: str, category: str, m
|
|
| 134 |
"attributes": results,
|
| 135 |
"total_attributes": total_attributes,
|
| 136 |
"avg_confidence": avg_confidence,
|
| 137 |
-
"processing_time": end_time - start_time
|
| 138 |
}
|
| 139 |
|
| 140 |
def get_confidence_color(confidence: float) -> str:
|
|
@@ -179,7 +174,6 @@ def format_results_html(results: dict) -> str:
|
|
| 179 |
<p>
|
| 180 |
<strong>{results["total_attributes"]}</strong> attributes extracted |
|
| 181 |
<strong>{results["avg_confidence"]:.1%}</strong> avg confidence |
|
| 182 |
-
<strong>{results["processing_time"]:.2f}s</strong> processing time
|
| 183 |
</p>
|
| 184 |
</div>
|
| 185 |
</div>
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import pandas as pd
|
| 3 |
import json
|
|
|
|
| 4 |
from typing import Tuple
|
| 5 |
from PIL import Image
|
| 6 |
import torch
|
|
|
|
| 13 |
)
|
| 14 |
from rembg import remove
|
| 15 |
from io import BytesIO
|
|
|
|
| 16 |
|
| 17 |
# Load environment variables (optional for local dev; Spaces use web UI for env vars)
|
| 18 |
if os.path.exists('.env'):
|
|
|
|
| 115 |
device = model_components['device']
|
| 116 |
|
| 117 |
pil_img = transforms.ToPILImage()(image_tensor.squeeze(0).cpu())
|
|
|
|
| 118 |
|
| 119 |
results = get_predicated_values(
|
| 120 |
model, category, pil_img, description,
|
| 121 |
image_processor, bert_tokenizer, roberta_tokenizer, device
|
| 122 |
)
|
| 123 |
|
|
|
|
| 124 |
|
| 125 |
total_attributes = len([a for a in results if a["value"] and a["value"] != "N/A"])
|
| 126 |
avg_confidence = np.mean([a["confidence"] for a in results if a["value"]
|
|
|
|
| 130 |
"attributes": results,
|
| 131 |
"total_attributes": total_attributes,
|
| 132 |
"avg_confidence": avg_confidence,
|
|
|
|
| 133 |
}
|
| 134 |
|
| 135 |
def get_confidence_color(confidence: float) -> str:
|
|
|
|
| 174 |
<p>
|
| 175 |
<strong>{results["total_attributes"]}</strong> attributes extracted |
|
| 176 |
<strong>{results["avg_confidence"]:.1%}</strong> avg confidence |
|
|
|
|
| 177 |
</p>
|
| 178 |
</div>
|
| 179 |
</div>
|