mayonaise1979 commited on
Commit
6d0fb7e
ยท
verified ยท
1 Parent(s): a536148

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -27
app.py CHANGED
@@ -1,48 +1,69 @@
 
 
1
  import gradio as gr
2
  from transformers import AutoImageProcessor, AutoModelForImageClassification
3
  from PIL import Image
4
  import torch
 
5
 
 
 
 
 
6
 
7
- path = "./"
8
- model = AutoModelForImageClassification.from_pretrained(path)
9
- processor = AutoImageProcessor.from_pretrained(path)
10
-
11
-
12
  def classify_image(image):
13
-
14
  inputs = processor(images=image, return_tensors="pt")
15
-
16
  with torch.no_grad():
17
  outputs = model(**inputs)
18
-
19
- probs = torch.nn.functional.softmax(outputs.logits, dim=-1)[0]
20
-
21
- labels = model.config.id2label
22
- result = {labels[i]: float(probs[i]) for i in range(len(labels))}
23
-
24
- return result
25
-
26
-
27
 
 
 
 
28
  guide_text = """
29
  ### โš ๏ธ F1-Score 0.92 ์„ฑ๋Šฅ ํ™•์ธํ•˜๊ธฐ (ํ•„๋…!)
30
- ์ด ๋ชจ๋ธ์€ ์‚ฐ์—… ํ˜„์žฅ์˜ **์ œํ•œ๋œ ํ™˜๊ฒฝ(๊น”๋”ํ•œ ๋ฐฐ๊ฒฝ)**์„ ํ•™์Šตํ–ˆ์Šต๋‹ˆ๋‹ค.
31
- ๊ฒ€์ฆ๋œ ์„ฑ๋Šฅ์„ ํ™•์ธํ•˜์‹œ๋ ค๋ฉด, ์ œ๊ฐ€ **ํ•™์Šต์— ์‹ค์ œ ์‚ฌ์šฉํ•œ ํ…Œ์ŠคํŠธ ์ด๋ฏธ์ง€**๋ฅผ ์‚ฌ์šฉํ•ด ์ฃผ์„ธ์š”.
32
 
33
  ๐Ÿ‘‰ **[๐Ÿ“ฅ ํ…Œ์ŠคํŠธ์šฉ ์ƒ˜ํ”Œ ์ด๋ฏธ์ง€ ๋‹ค์šด๋กœ๋“œ (ํด๋ฆญ)](https://huggingface.co/spaces/mayonaise1979/image_classifier/blob/main/0126.zip)**
34
- *(์œ„ zip ํŒŒ์ผ์„ ๋‹ค์šด๋ฐ›์•„ ์••์ถ•์„ ํ’€๊ณ  ์ด๋ฏธ์ง€๋ฅผ ๋„ฃ์–ด๋ณด์„ธ์š”!)*
35
 
 
 
 
 
36
  ---
37
- **์ฃผ์˜:** ๊ตฌ๊ธ€๋งํ•œ ์™ธ๋ถ€ ์ด๋ฏธ์ง€(๋ณต์žกํ•œ ๋ฐฐ๊ฒฝ)๋Š” ์ธ์‹๋ฅ ์ด ๋–จ์–ด์งˆ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค.
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  """
39
 
40
- iface = gr.Interface(
41
- fn=classify_image,
42
- inputs=gr.Image(type="pil"),
43
- outputs=gr.Label(num_top_classes=3),
44
- title="์žฌํ™œ์šฉํ’ˆ๋ถ„๋ฅ˜๊ธฐ ๐Ÿค–",
45
- description="guide_text"
 
 
46
  )
47
 
48
- iface.launch()
 
1
+
2
+
3
  import gradio as gr
4
  from transformers import AutoImageProcessor, AutoModelForImageClassification
5
  from PIL import Image
6
  import torch
7
+ import torch.nn.functional as F
8
 
9
+ # 1. ๋ชจ๋ธ๊ณผ ํ”„๋กœ์„ธ์„œ ๋กœ๋“œ
10
+ model_path = "./"
11
+ processor = AutoImageProcessor.from_pretrained(model_path)
12
+ model = AutoModelForImageClassification.from_pretrained(model_path)
13
 
14
+ # 2. ๋ถ„๋ฅ˜ ํ•จ์ˆ˜
 
 
 
 
15
  def classify_image(image):
16
+ if image is None: return None
17
  inputs = processor(images=image, return_tensors="pt")
 
18
  with torch.no_grad():
19
  outputs = model(**inputs)
20
+ probabilities = F.softmax(outputs.logits, dim=1)[0]
21
+ results = {}
22
+ for i, prob in enumerate(probabilities):
23
+ label_name = model.config.id2label[i]
24
+ results[label_name] = float(prob)
25
+ return results
 
 
 
26
 
27
+ # ---------------------------------------------------------
28
+ # ์ƒ๋‹จ: ์‚ฌ์šฉ์ž ๊ฐ€์ด๋“œ (๋‹ค์šด๋กœ๋“œ ๋งํฌ)
29
+ # ---------------------------------------------------------
30
  guide_text = """
31
  ### โš ๏ธ F1-Score 0.92 ์„ฑ๋Šฅ ํ™•์ธํ•˜๊ธฐ (ํ•„๋…!)
32
+ ์ด ๋ชจ๋ธ์€ **์‚ฐ์—…์šฉ ์—ฃ์ง€ ๋””๋ฐ”์ด์Šค(๋‹จ์ˆœ ๋ฐฐ๊ฒฝ)** ํ™˜๊ฒฝ์„ ๊ฐ€์ •ํ•˜์—ฌ ๋งŒ๋“  ๊ฒฝ๋Ÿ‰ํ™” ๋ชจ๋ธ์ž…๋‹ˆ๋‹ค.
33
+ ๊ฒ€์ฆ๋œ ์„ฑ๋Šฅ์„ ํ™•์ธํ•˜์‹œ๋ ค๋ฉด, ์ œ๊ฐ€ ํ•™์Šต์— ์‹ค์ œ ์‚ฌ์šฉํ•œ ์ด๋ฏธ์ง€๋ฅผ ๋„ฃ์–ด๋ณด์„ธ์š”.
34
 
35
  ๐Ÿ‘‰ **[๐Ÿ“ฅ ํ…Œ์ŠคํŠธ์šฉ ์ƒ˜ํ”Œ ์ด๋ฏธ์ง€ ๋‹ค์šด๋กœ๋“œ (ํด๋ฆญ)](https://huggingface.co/spaces/mayonaise1979/image_classifier/blob/main/0126.zip)**
36
+ """
37
 
38
+ # ---------------------------------------------------------
39
+ # ํ•˜๋‹จ: ๊ฐœ๋ฐœ ํžˆ์Šคํ† ๋ฆฌ (1~10๋‹จ๊ณ„ ํ‘œ)
40
+ # ---------------------------------------------------------
41
+ dev_summary = """
42
  ---
43
+ ### ๐Ÿ› ๏ธ ๊ฐœ๋ฐœ ๋กœ๊ทธ: ์‚ฝ์งˆ๊ณผ ํ•ด๊ฒฐ์˜ ๊ธฐ๋ก (0.68 โ†’ 0.92)
44
+ ๋‹จ์ˆœํ•œ ํŠœ๋‹์ด ์•„๋‹Œ, **๋ฐ์ดํ„ฐ ํ’ˆ์งˆ ๊ฐœ์„ **์„ ํ†ตํ•ด ๋ฌธ์ œ๋ฅผ ํ•ด๊ฒฐํ•œ 10๋‹จ๊ณ„์˜ ์‹คํ—˜ ๊ณผ์ •์ž…๋‹ˆ๋‹ค.
45
+
46
+ | ๋‹จ๊ณ„ | ์ฃผ์š” ์‹œ๋„ (Experiment) | F1-Score | ๋ถ„์„ ๋ฐ ๊ฒฐ๊ณผ (Key Insight) |
47
+ | :---: | :--- | :---: | :--- |
48
+ | 1 | Baseline (MobileViT) | 0.68 | ๋‚ฎ์€ ์„ฑ๋Šฅ, ํด๋ž˜์Šค ๋ถˆ๊ท ํ˜• ํ™•์ธ |
49
+ | 2~3 | ์ฆ๊ฐ•(Augmentation) ์žฌ๊ฒ€์ฆ | 0.67 | ํ•™์Šต๋ฅ /์ฆ๊ฐ• ์กฐ์ ˆํ–ˆ์œผ๋‚˜ ์„ฑ๋Šฅ ์ •์ฒด (ํšจ๊ณผ ๋ฏธ๋ฏธ) |
50
+ | 4~5 | Class Weight ์ ์šฉ | 0.65 ๐Ÿ“‰ | ๋…ธ์ด์ฆˆ ๋ฐ์ดํ„ฐ์— ๊ณผ์ ํ•ฉ๋˜์–ด ์„ฑ๋Šฅ ์˜คํžˆ๋ ค ํ•˜๋ฝ |
51
+ | 6 | ํŒŒ๋ผ๋ฏธํ„ฐ ์žฌ์กฐ์ • | 0.73 | ์ „์ฒ˜๋ฆฌ ๋ณ€๊ฒฝ ์—†์ด๋Š” ํ•œ๊ณ„์ž„์„ ํ™•์ธ |
52
+ | **7** | **๐Ÿ”ฅ ๋ฐ์ดํ„ฐ 2์ฐจ ์ „์ฒ˜๋ฆฌ (Cleaning)** | **0.82 ๐Ÿš€** | **๋ถˆ๋Ÿ‰ ๋ฐ์ดํ„ฐ 50% ์‚ญ์ œ โ†’ ์„ฑ๋Šฅ ๋น„์•ฝ์  ์ƒ์Šน** |
53
+ | **8** | **๋ชจ๋ธ ๋ณ€๊ฒฝ (EfficientFormer)** | **0.92 ๐Ÿ†** | ์ •์ œ๋œ ๋ฐ์ดํ„ฐ์— ์ตœ์‹  ๊ฒฝ๋Ÿ‰ ๋ชจ๋ธ ๋„์ž… |
54
+ | 9~10 | ํ•ด์ƒ๋„/์ •๊ทœํ™” ์ถ”๊ฐ€ ์‹คํ—˜ | 0.92 | ์„ฑ๋Šฅ ์ˆ˜๋ ด (์ถ”๊ฐ€ ๊ฐœ์„ ํญ ๋ฏธ๋ฏธ) |
55
+
56
+ > **๊ฒฐ๋ก :** Transformer ๋ชจ๋ธ์˜ ์„ฑ๋Šฅ์€ **"์–ผ๋งˆ๋‚˜ ์ข‹์€ ๋ชจ๋ธ์ธ๊ฐ€"๋ณด๋‹ค "์–ผ๋งˆ๋‚˜ ๊นจ๋—ํ•œ ๋ฐ์ดํ„ฐ์ธ๊ฐ€"**๊ฐ€ ๊ฒฐ์ •ํ•จ์„ ์ฆ๋ช….
57
  """
58
 
59
+ # 3. ํ™”๋ฉด ๊ตฌ์„ฑ
60
+ interface = gr.Interface(
61
+ fn=classify_image,
62
+ inputs=gr.Image(type="pil", label="์—ฌ๊ธฐ์— ์ด๋ฏธ์ง€๋ฅผ ๋“œ๋ž˜๊ทธํ•˜์„ธ์š”"),
63
+ outputs=gr.Label(num_top_classes=3, label="๋ถ„๋ฅ˜ ๊ฒฐ๊ณผ"),
64
+ title="โ™ป๏ธ ๊ฒฝ๋Ÿ‰ํ™” ์žฌํ™œ์šฉํ’ˆ ๋ถ„๋ฅ˜๊ธฐ",
65
+ description=guide_text, # ์ƒ๋‹จ ๊ฐ€์ด๋“œ
66
+ article=dev_summary # ํ•˜๋‹จ ํ‘œ (10๋‹จ๊ณ„ ์š”์•ฝ)
67
  )
68
 
69
+ interface.launch()