jonathanagustin commited on
Commit
73d8c97
Β·
verified Β·
1 Parent(s): 5a741a4

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +17 -4
  2. app.py +48 -0
  3. requirements.txt +4 -0
README.md CHANGED
@@ -1,12 +1,25 @@
1
  ---
2
  title: Image Detective
3
- emoji: πŸ’»
4
  colorFrom: blue
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 6.0.2
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: Image Detective
3
+ emoji: πŸ”
4
  colorFrom: blue
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: "6.0.2"
8
  app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
+ ## πŸ” Image Detective
14
+
15
+ Classify images into any categories you define using CLIP zero-shot classification.
16
+
17
+ ## Features
18
+
19
+ - Upload any image
20
+ - Define custom categories on the fly
21
+ - No model downloads - uses API
22
+
23
+ ## Setup
24
+
25
+ Add your `HF_TOKEN` as a Secret in Space Settings.
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
+
5
+ # Get token from environment (set in HF Space secrets)
6
+ HF_TOKEN = os.environ.get("HF_TOKEN", "")
7
+ client = InferenceClient(token=HF_TOKEN) if HF_TOKEN else InferenceClient()
8
+
9
+
10
+ def classify(image, labels_text: str) -> dict:
11
+ """Classify image against user-provided labels."""
12
+ if image is None:
13
+ return {}
14
+ labels = [l.strip() for l in labels_text.split(",") if l.strip()]
15
+ if not labels:
16
+ labels = ["cat", "dog", "bird", "car", "person"]
17
+
18
+ try:
19
+ results = client.zero_shot_image_classification(
20
+ image,
21
+ candidate_labels=labels,
22
+ model="openai/clip-vit-base-patch32",
23
+ )
24
+ return {r.label: r.score for r in results}
25
+ except Exception as e:
26
+ return {"Error": str(e)}
27
+
28
+
29
+ with gr.Blocks(title="Image Detective") as demo:
30
+ gr.Markdown("# πŸ” Image Detective\nUpload an image and define your own categories!")
31
+
32
+ with gr.Row(equal_height=True):
33
+ with gr.Column(scale=1):
34
+ img_input = gr.Image(type="pil", label="Upload or drop an image")
35
+ labels_input = gr.Textbox(
36
+ label="Categories (comma-separated)",
37
+ placeholder="cat, dog, bird, car",
38
+ value="cat, dog, bird, car, person",
39
+ )
40
+ btn = gr.Button("Classify!", variant="primary")
41
+ with gr.Column(scale=1):
42
+ output = gr.Label(label="Predictions", num_top_classes=5)
43
+
44
+ btn.click(classify, inputs=[img_input, labels_input], outputs=output)
45
+ labels_input.submit(classify, inputs=[img_input, labels_input], outputs=output)
46
+
47
+ demo.queue()
48
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio>=6.0.0
2
+ huggingface_hub>=0.23.0
3
+ pillow>=10.0.0
4
+