Spaces:
Build error
Build error
Deploy Gradio app with multiple files
Browse files- app.py +32 -0
- config.py +3 -0
- models.py +47 -0
- requirements.txt +12 -0
- utils.py +22 -0
app.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from models import analyze_content
|
| 3 |
+
from utils import is_safe_image
|
| 4 |
+
from config import MAX_FILE_SIZE
|
| 5 |
+
|
| 6 |
+
def process_content(content):
|
| 7 |
+
if not is_safe_image(content):
|
| 8 |
+
return "Content violates safety guidelines. Please upload appropriate content."
|
| 9 |
+
|
| 10 |
+
return analyze_content(content)
|
| 11 |
+
|
| 12 |
+
with gr.Blocks(title="Visual Content Analysis") as demo:
|
| 13 |
+
gr.Markdown("## Professional Visual Content Analysis")
|
| 14 |
+
gr.Markdown("Upload images or videos for detailed analysis and feedback")
|
| 15 |
+
|
| 16 |
+
with gr.Row():
|
| 17 |
+
with gr.Column():
|
| 18 |
+
input_media = gr.File(label="Upload Media", file_types=["image", "video"], file_count="single")
|
| 19 |
+
submit_btn = gr.Button("Analyze", variant="primary")
|
| 20 |
+
with gr.Column():
|
| 21 |
+
output = gr.Textbox(label="Analysis Report", interactive=False)
|
| 22 |
+
|
| 23 |
+
gr.Markdown(f"*Max file size: {MAX_FILE_SIZE}MB*")
|
| 24 |
+
|
| 25 |
+
submit_btn.click(
|
| 26 |
+
fn=process_content,
|
| 27 |
+
inputs=input_media,
|
| 28 |
+
outputs=output
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
if __name__ == "__main__":
|
| 32 |
+
demo.launch()
|
config.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Application configuration
|
| 2 |
+
MAX_FILE_SIZE = 10 # MB
|
| 3 |
+
SAFE_CONTENT_CATEGORIES = ["professional", "educational", "artistic"]
|
models.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
import random
|
| 5 |
+
|
| 6 |
+
def analyze_content(file_path):
|
| 7 |
+
# Placeholder for actual analysis - in a real application, this would use computer vision models
|
| 8 |
+
# This demo provides fictional analysis for demonstration purposes
|
| 9 |
+
|
| 10 |
+
if file_path.lower().endswith(('.mp4', '.avi', '.mov')):
|
| 11 |
+
cap = cv2.VideoCapture(file_path)
|
| 12 |
+
duration = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS)
|
| 13 |
+
cap.release()
|
| 14 |
+
media_type = "video"
|
| 15 |
+
length = f"{duration:.1f} seconds"
|
| 16 |
+
else:
|
| 17 |
+
img = Image.open(file_path)
|
| 18 |
+
width, height = img.size
|
| 19 |
+
media_type = "image"
|
| 20 |
+
length = f"{width}x{height} pixels"
|
| 21 |
+
|
| 22 |
+
# Generate fictional analysis
|
| 23 |
+
aspects = ["composition", "lighting", "focus", "color balance", "technical quality"]
|
| 24 |
+
ratings = {aspect: random.randint(1, 10) for aspect in aspects}
|
| 25 |
+
|
| 26 |
+
feedback = [
|
| 27 |
+
"Professional analysis report:",
|
| 28 |
+
f"Media type: {media_type}",
|
| 29 |
+
f"Dimensions/duration: {length}",
|
| 30 |
+
"\nDetailed assessment:"
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
for aspect, score in ratings.items():
|
| 34 |
+
feedback.append(f"- {aspect.capitalize()}: {score}/10")
|
| 35 |
+
if score < 4:
|
| 36 |
+
feedback.append(" (Needs significant improvement)")
|
| 37 |
+
elif score < 7:
|
| 38 |
+
feedback.append(" (Adequate but could be enhanced)")
|
| 39 |
+
else:
|
| 40 |
+
feedback.append(" (Well-executed)")
|
| 41 |
+
|
| 42 |
+
feedback.append("\nRecommendations:")
|
| 43 |
+
feedback.append("- Consider adjusting lighting conditions")
|
| 44 |
+
feedback.append("- Ensure proper focus and framing")
|
| 45 |
+
feedback.append("- Maintain consistent color temperature")
|
| 46 |
+
|
| 47 |
+
return "\n".join(feedback)
|
requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
numpy
|
| 3 |
+
pillow
|
| 4 |
+
opencv-python
|
| 5 |
+
torch
|
| 6 |
+
torchvision
|
| 7 |
+
scikit-image
|
| 8 |
+
1. File upload capability for images and videos
|
| 9 |
+
2. Content safety checks
|
| 10 |
+
3. Detailed analysis report generation
|
| 11 |
+
4. Professional feedback on technical aspects
|
| 12 |
+
5. Size limitations for uploaded files
|
utils.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from PIL import Image
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
def is_safe_image(file_path):
|
| 5 |
+
"""Basic content safety check (placeholder)"""
|
| 6 |
+
try:
|
| 7 |
+
img = Image.open(file_path)
|
| 8 |
+
# Convert to numpy array for analysis
|
| 9 |
+
img_array = np.array(img)
|
| 10 |
+
|
| 11 |
+
# In a real application, this would include:
|
| 12 |
+
# 1. NSFW content detection
|
| 13 |
+
# 2. Appropriate content validation
|
| 14 |
+
# 3. Privacy compliance checks
|
| 15 |
+
|
| 16 |
+
# For this demo, we'll just check image dimensions
|
| 17 |
+
if min(img_array.shape[:2]) < 50:
|
| 18 |
+
return False # Reject very small images
|
| 19 |
+
|
| 20 |
+
return True
|
| 21 |
+
except:
|
| 22 |
+
return False
|