SophiaSR commited on
Commit
c069dc7
·
verified ·
1 Parent(s): 0a06b40

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +69 -0
  2. requirements.txt +5 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --- SECTION 1: INSTALLS & IMPORTS ---
2
+ # Run !pip install -q gradio transformers in a separate cell if not already installed
3
+ from transformers import MobileViTImageProcessor, MobileViTForImageClassification
4
+ from PIL import Image
5
+ import requests
6
+ import torch
7
+ import torch.nn.functional as F
8
+ import gradio as gr
9
+
10
+ # --- SECTION 2: MODEL LOADING ---
11
+ # We load the model once at the top to save memory and time
12
+ model_name = "apple/mobilevit-small"
13
+ processor = MobileViTImageProcessor.from_pretrained(model_name)
14
+ model = MobileViTForImageClassification.from_pretrained(model_name)
15
+
16
+ # --- SECTION 3: THE CORE LOGIC ---
17
+ def predict_with_confidence(image_input):
18
+ """
19
+ Takes a URL or a local file path, processes it through MobileViT,
20
+ and returns a human-readable label with a confidence score.
21
+ """
22
+ try:
23
+ # 1. Handle Input (URL vs Local)
24
+ if str(image_input).startswith('http'):
25
+ image = Image.open(requests.get(image_input, stream=True).raw).convert("RGB")
26
+ else:
27
+ image = Image.open(image_input).convert("RGB")
28
+
29
+ # 2. Pre-process the image
30
+ inputs = processor(images=image, return_tensors="pt")
31
+
32
+ # 3. Run Inference
33
+ with torch.no_grad():
34
+ outputs = model(**inputs)
35
+
36
+ # 4. Calculate Results
37
+ logits = outputs.logits
38
+ probs = F.softmax(logits, dim=-1)
39
+
40
+ predicted_class_idx = logits.argmax(-1).item()
41
+ label = model.config.id2label[predicted_class_idx]
42
+ confidence = probs[0][predicted_class_idx].item()
43
+
44
+ return f"Prediction: {label} | Confidence: {confidence:.2%}"
45
+
46
+ except Exception as e:
47
+ return f"Error: {str(e)}"
48
+
49
+ # --- SECTION 4: TESTING (CONSOLE) ---
50
+ print("--- RUNNING TESTS ---")
51
+ url_test = "http://images.cocodataset.org/val2017/000000039769.jpg"
52
+ print(f"URL Test: {predict_with_confidence(url_test)}")
53
+
54
+ # To test your local image, uncomment the line below and paste your path:
55
+ # my_local_path = "/kaggle/input/test-image/my_picture.png"
56
+ # print(f"Local Test: {predict_with_confidence(my_local_path)}")
57
+
58
+ # --- SECTION 5: INTERACTIVE INTERFACE ---
59
+ # This creates a web UI inside your notebook
60
+ demo = gr.Interface(
61
+ fn=predict_with_confidence,
62
+ inputs=gr.Image(type="filepath", label="Upload Image or Drag & Drop"),
63
+ outputs=gr.Textbox(label="Model Output"),
64
+ title="MobileViT Edge-AI Classifier",
65
+ description="A lightweight Computer Vision model designed for mobile efficiency."
66
+ )
67
+
68
+ # Set share=True to get a public link you can send to recruiters
69
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ gradio
4
+ pillow
5
+ requests