kikikara commited on
Commit
6ca52a3
Β·
verified Β·
1 Parent(s): c9dde41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -282
app.py CHANGED
@@ -1,284 +1,6 @@
1
- # -*- coding: utf-8 -*-
2
- """app.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1y3yISz14Lpsr131OIJCKA77lwbFmEJzB
8
- """
9
-
10
  import streamlit as st
11
- import os
12
- import joblib
13
- import torch
14
- import numpy as np
15
- import html
16
- from transformers import AutoTokenizer, AutoModel, logging as hf_logging
17
-
18
- # Hugging Face Transformers λ‘œκΉ… 레벨 μ„€μ • (였λ₯˜λ§Œ ν‘œμ‹œ)
19
- hf_logging.set_verbosity_error()
20
-
21
- # ────────── μ„€μ • (Hugging Face Spaces ν™˜κ²½μ— 맞게 μ‘°μ •) ──────────
22
- MODEL_NAME = "bert-base-uncased"
23
- DEVICE = "cpu" # Hugging Face Spaces 무료 ν‹°μ–΄λŠ” CPU μ‚¬μš©
24
- SAVE_DIR = "μ €μž₯μ €μž₯1" # μ—…λ‘œλ“œν•  폴더λͺ…κ³Ό μΌμΉ˜ν•΄μ•Ό 함
25
- LAYER_ID = 4 # 원본 μ½”λ“œμ˜ SeparationScore 졜고 λ ˆμ΄μ–΄
26
- SEED = 0 # 원본 μ½”λ“œμ˜ SEED κ°’
27
- CLF_NAME = "linear" # 원본 μ½”λ“œμ˜ CLF_NAME
28
-
29
- # ────────── λͺ¨λΈ λ‘œλ“œ (Streamlit μΊμ‹œ μ‚¬μš©μœΌλ‘œ μ•± μ „μ²΄μ—μ„œ ν•œ 번만 μ‹€ν–‰) ──────────
30
- @st.cache_resource
31
- def load_all_models_and_data():
32
- """
33
- LDA, λΆ„λ₯˜κΈ°, ν† ν¬λ‚˜μ΄μ €, BERT λͺ¨λΈ 및 κ΄€λ ¨ 행렬듀을 λ‘œλ“œν•©λ‹ˆλ‹€.
34
- Hugging Face Spaces에 배포 μ‹œ 파일 κ²½λ‘œκ°€ μ •ν™•ν•΄μ•Ό ν•©λ‹ˆλ‹€.
35
- """
36
- lda_file_path = os.path.join(SAVE_DIR, f"lda_layer{LAYER_ID}_seed{SEED}.pkl")
37
- clf_file_path = os.path.join(SAVE_DIR, f"{CLF_NAME}_layer{LAYER_ID}_projlda_seed{SEED}.pkl")
38
-
39
- # 파일 쑴재 μ—¬λΆ€ 확인 (배포 ν™˜κ²½ λ””λ²„κΉ…μš©)
40
- if not os.path.isdir(SAVE_DIR):
41
- st.error(f"였λ₯˜: λͺ¨λΈ μ €μž₯ 디렉토리 '{SAVE_DIR}'λ₯Ό 찾을 수 μ—†μŠ΅λ‹ˆλ‹€. Spaces에 폴더가 μ˜¬λ°”λ₯΄κ²Œ μ—…λ‘œλ“œλ˜μ—ˆλŠ”μ§€, 이름이 μΌμΉ˜ν•˜λŠ”μ§€ ν™•μΈν•˜μ„Έμš”.")
42
- return None
43
- if not os.path.exists(lda_file_path):
44
- st.error(f"였λ₯˜: LDA λͺ¨λΈ 파일 '{lda_file_path}'λ₯Ό 찾을 수 μ—†μŠ΅λ‹ˆλ‹€. 파일 이름과 경둜λ₯Ό ν™•μΈν•˜μ„Έμš”.")
45
- return None
46
- if not os.path.exists(clf_file_path):
47
- st.error(f"였λ₯˜: λΆ„λ₯˜κΈ° λͺ¨λΈ 파일 '{clf_file_path}'λ₯Ό 찾을 수 μ—†μŠ΅λ‹ˆλ‹€. 파일 이름과 경둜λ₯Ό ν™•μΈν•˜μ„Έμš”.")
48
- return None
49
-
50
- try:
51
- lda = joblib.load(lda_file_path)
52
- clf = joblib.load(clf_file_path)
53
- except Exception as e:
54
- st.error(f"λͺ¨λΈ 파일 λ‘œλ“œ 쀑 였λ₯˜ λ°œμƒ: {e}")
55
- st.error("파일이 μ†μƒλ˜μ—ˆκ±°λ‚˜, joblib 버전 ν˜Έν™˜μ„± λ¬Έμ œκ°€ μžˆμ„ 수 μžˆμŠ΅λ‹ˆλ‹€.")
56
- return None
57
-
58
- if hasattr(clf, "base_estimator"): # Calibrated Ridge 경우
59
- clf = clf.base_estimator
60
-
61
- # LDA 행렬·평균, λΆ„λ₯˜κΈ° κ°€μ€‘μΉ˜λ₯Ό PyTorch Tensor둜 λ³€ν™˜
62
- W_tensor = torch.tensor(lda.scalings_, dtype=torch.float32, device=DEVICE)
63
- mu_vector = torch.tensor(lda.xbar_, dtype=torch.float32, device=DEVICE)
64
- w_p_tensor = torch.tensor(clf.coef_, dtype=torch.float32, device=DEVICE)
65
- b_p_vector = torch.tensor(clf.intercept_, dtype=torch.float32, device=DEVICE)
66
-
67
- # Hugging Face ν† ν¬λ‚˜μ΄μ € 및 BERT λͺ¨λΈ λ‘œλ“œ
68
- try:
69
- tokenizer_obj = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True)
70
- model_obj = AutoModel.from_pretrained(
71
- MODEL_NAME, output_hidden_states=True
72
- ).to(DEVICE).eval()
73
- except Exception as e:
74
- st.error(f"Hugging Face λͺ¨λΈ ({MODEL_NAME}) λ‘œλ“œ 쀑 였λ₯˜: {e}")
75
- st.error("인터넷 μ—°κ²° λ˜λŠ” λͺ¨λΈ 이름이 μ˜¬λ°”λ₯Έμ§€ ν™•μΈν•˜μ„Έμš”.")
76
- return None
77
-
78
- # 클래슀 이름 κ°€μ Έμ˜€κΈ° μ‹œλ„
79
- class_names = None
80
- if hasattr(lda, 'classes_'): # scikit-learn LDA의 경우
81
- class_names = lda.classes_
82
- elif hasattr(clf, 'classes_'): # scikit-learn λΆ„λ₯˜κΈ°μ˜ 경우
83
- class_names = clf.classes_
84
-
85
- return tokenizer_obj, model_obj, W_tensor, mu_vector, w_p_tensor, b_p_vector, class_names
86
-
87
- # ────────── 핡심 뢄석 ν•¨μˆ˜ (원본 μ½”λ“œ 기반) ──────────
88
- def explain_sentence_streamlit(
89
- text: str,
90
- tokenizer, model, W, mu, w_p, b_p, # λ‘œλ“œλœ 객체듀
91
- layer_id_to_use: int, device_to_use: str, # μ„€μ •κ°’
92
- top_k_tokens: int = 5
93
- ) -> tuple[str, int, float, list] | None: # κ²°κ³Ό νƒ€μž… λͺ…μ‹œ (μ‹€νŒ¨ μ‹œ None)
94
- """
95
- μž…λ ₯ λ¬Έμž₯을 μ˜ˆμΈ‘ν•˜κ³  토큰 μ€‘μš”λ„λ₯Ό κ³„μ‚°ν•˜μ—¬ κ²°κ³Όλ₯Ό λ°˜ν™˜ν•©λ‹ˆλ‹€.
96
- """
97
- try:
98
- # 1) 토큰화 (μ΅œλŒ€ 길이 및 잘림 처리 μΆ”κ°€)
99
- enc = tokenizer(text, return_tensors="pt", truncation=True, max_length=510, padding=True) # BERT μ΅œλŒ€ 길이 512 κ³ λ €, CLS/SEP 곡간 확보
100
- input_ids = enc["input_ids"].to(device_to_use)
101
- attn_mask = enc["attention_mask"].to(device_to_use)
102
-
103
- if input_ids.shape[1] == 0: # μž…λ ₯이 λ„ˆλ¬΄ μ§§κ±°λ‚˜ λͺ¨λ‘ 필터링 된 경우
104
- # Streamlit μ•±μ—μ„œλŠ” μ‚¬μš©μžμ—κ²Œ κ²½κ³ λ₯Ό ν‘œμ‹œν•  수 μžˆμŠ΅λ‹ˆλ‹€.
105
- # st.warning("토큰화 κ²°κ³Ό μœ νš¨ν•œ 토큰이 οΏ½οΏ½μŠ΅λ‹ˆλ‹€. λ‹€λ₯Έ λ¬Έμž₯을 μ‹œλ„ν•΄λ³΄μ„Έμš”.")
106
- return None
107
-
108
-
109
- # 2) μž„λ² λ”©μ— gradient 좔적
110
- input_embeds = model.embeddings.word_embeddings(input_ids).clone().detach()
111
- input_embeds.requires_grad_(True)
112
-
113
- # 3) Forward pass β†’ CLS 벑터 μΆ”μΆœ
114
- outputs = model(inputs_embeds=input_embeds,
115
- attention_mask=attn_mask, # Attention mask 전달
116
- output_hidden_states=True)
117
- cls_vec = outputs.hidden_states[layer_id_to_use][:, 0, :] # (1, 768)
118
-
119
- # 4) LDA 투영 β†’ λΆ„λ₯˜ logit 계산
120
- z_projected = (cls_vec - mu) @ W # (1, d)
121
- logit_output = z_projected @ w_p.T + b_p # (1, C)
122
-
123
- probs = torch.softmax(logit_output, dim=1)
124
- pred_idx = torch.argmax(probs, dim=1).item()
125
- pred_prob = probs[0, pred_idx].item()
126
-
127
- # 5) Gradient 계산
128
- if input_embeds.grad is not None:
129
- input_embeds.grad.zero_() # 이전 κ·Έλž˜λ””μ–ΈνŠΈ μ΄ˆκΈ°ν™”
130
- logit_output[0, pred_idx].backward() # μ„ νƒλœ 예츑 ν΄λž˜μŠ€μ— λŒ€ν•œ κ·Έλž˜λ””μ–ΈνŠΈ 계산
131
-
132
- if input_embeds.grad is None: # backward 후에도 gradκ°€ μ—†λŠ” μ˜ˆμ™Έμ  상황 λ°©μ§€
133
- # st.error("κ·Έλž˜λ””μ–ΈνŠΈλ₯Ό 계산할 수 μ—†μŠ΅λ‹ˆλ‹€.") # Streamlit μ•± λ‚΄μ—μ„œ 였λ₯˜ ν‘œμ‹œ
134
- return None
135
-
136
- grads = input_embeds.grad.clone().detach()
137
-
138
- # 6) Grad Γ— Input β†’ μ€‘μš”λ„ 점수 계산
139
- scores = (grads * input_embeds.detach()).norm(dim=2).squeeze(0)
140
- scores_np = scores.cpu().numpy()
141
-
142
- # μœ νš¨ν•œ 점수만으둜 μ •κ·œν™” (NaN/Inf λ°©μ§€)
143
- valid_scores = scores_np[np.isfinite(scores_np)]
144
- if len(valid_scores) > 0 and valid_scores.max() > 0:
145
- scores_np = scores_np / (valid_scores.max() + 1e-9) # 0~1 μ •κ·œν™”
146
- else: # λͺ¨λ“  μ μˆ˜κ°€ 0μ΄κ±°λ‚˜ μœ νš¨ν•˜μ§€ μ•Šμ€ 경우
147
- scores_np = np.zeros_like(scores_np)
148
-
149
-
150
- # 7) HTML ν•˜μ΄λΌμ΄νŠΈ 생성
151
- tokens = tokenizer.convert_ids_to_tokens(input_ids[0], skip_special_tokens=False) # μŠ€νŽ˜μ…œ 토큰 포함
152
- html_tokens_list = []
153
-
154
- # CLS, SEP, PAD 토큰 ID 확인
155
- cls_token_id = tokenizer.cls_token_id
156
- sep_token_id = tokenizer.sep_token_id
157
- pad_token_id = tokenizer.pad_token_id
158
-
159
- for i, tok_str in enumerate(tokens):
160
- if input_ids[0, i] == pad_token_id: # PAD 토큰은 κ±΄λ„ˆλ›°κΈ°
161
- continue
162
-
163
- clean_tok_str = tok_str.replace("##", "") if "##" not in tok_str else tok_str[2:]
164
-
165
- # μŠ€νŽ˜μ…œ 토큰은 λ‹€λ₯Έ μŠ€νƒ€μΌ 적용 λ˜λŠ” μ€‘μš”λ„ κ³„μ‚°μ—μ„œ μ œμ™Έ κ°€λŠ₯
166
- if input_ids[0, i] == cls_token_id or input_ids[0, i] == sep_token_id:
167
- html_tokens_list.append(f"<span style='font-weight:bold;'>{html.escape(clean_tok_str)}</span>")
168
- else:
169
- score_val = scores_np[i] if i < len(scores_np) else 0 # 점수 λ°°μ—΄ λ²”μœ„ 확인
170
- color = f"rgba(255, 0, 0, {max(0, min(1, score_val)):.2f})" # 점수 λ²”μœ„ 0~1둜 클리핑
171
- html_tokens_list.append(
172
- f"<span style='background-color:{color}; padding: 1px 2px; margin: 1px; border-radius: 3px; display:inline-block;'>{html.escape(clean_tok_str)}</span>"
173
- )
174
-
175
- html_output_str = " ".join(html_tokens_list)
176
- # λΆˆν•„μš”ν•œ 곡백 정리 (예: subword 사이 곡백)
177
- html_output_str = html_output_str.replace(" ##", "")
178
-
179
- # Top-K μ€‘μš” 토큰 정보 (μŠ€νŽ˜μ…œ 토큰 및 PAD 토큰 μ œμ™Έ)
180
- top_tokens_info_list = []
181
- valid_indices_for_top_k = [
182
- idx for idx, token_id in enumerate(input_ids[0].tolist())
183
- if token_id not in [cls_token_id, sep_token_id, pad_token_id] and idx < len(scores_np)
184
- ]
185
-
186
- # μ μˆ˜κ°€ 높은 순으둜 μ •λ ¬
187
- sorted_valid_indices = sorted(valid_indices_for_top_k, key=lambda idx: -scores_np[idx])
188
-
189
- for token_idx in sorted_valid_indices[:top_k_tokens]:
190
- top_tokens_info_list.append({
191
- "token": tokens[token_idx],
192
- "score": f"{scores_np[token_idx]:.3f}"
193
- })
194
-
195
- return html_output_str, pred_idx, pred_prob, top_tokens_info_list
196
-
197
- except Exception as e:
198
- # Streamlit μ•± λ‚΄μ—μ„œ 였λ₯˜λ₯Ό 더 잘 ν‘œμ‹œν•˜λ„λ‘ μˆ˜μ •
199
- # st.error(f"λ¬Έμž₯ 뢄석 쀑 예기치 μ•Šμ€ 였λ₯˜ λ°œμƒ: {e}")
200
- # import traceback
201
- # st.text_area("였λ₯˜ 상세 정보 (λ””λ²„κΉ…μš©):", traceback.format_exc(), height=200)
202
- # print(f"λ¬Έμž₯ 뢄석 쀑 예기치 μ•Šμ€ 였λ₯˜ λ°œμƒ: {e}") # μ½˜μ†” λ‘œκΉ… (Spaces λ‘œκ·Έμ—μ„œ 확인 κ°€λŠ₯)
203
- # import traceback
204
- # print(traceback.format_exc()) # μ½˜μ†” λ‘œκΉ…
205
- raise # 였λ₯˜λ₯Ό λ‹€μ‹œ λ°œμƒμ‹œμΌœ Streamlit이 μ²˜λ¦¬ν•˜λ„λ‘ ν•˜κ±°λ‚˜, μ•„λž˜μ—μ„œ None을 λ°˜ν™˜
206
- # return None
207
-
208
-
209
- # ────────── Streamlit UI ꡬ성 ──────────
210
- st.set_page_config(page_title="λ¬Έμž₯ 토큰 μ€‘μš”λ„ 뢄석기", layout="wide")
211
- st.title("πŸ“ λ¬Έμž₯ 토큰 μ€‘μš”λ„ 뢄석기")
212
- st.markdown("BERT와 LDAλ₯Ό ν™œμš©ν•˜μ—¬ λ¬Έμž₯ λ‚΄ 각 ν† ν°μ˜ μ€‘μš”λ„λ₯Ό μ‹œκ°ν™”ν•©λ‹ˆλ‹€.")
213
-
214
- # λͺ¨λΈ λ‘œλ“œ μ‹œλ„
215
- loaded_data_tuple = load_all_models_and_data()
216
-
217
- if loaded_data_tuple:
218
- tokenizer, model, W, mu, w_p, b_p, class_names = loaded_data_tuple
219
-
220
- # μ‚¬μ΄λ“œλ°”μ— λͺ¨λΈ 정보 ν‘œμ‹œ
221
- st.sidebar.header("βš™οΈ λͺ¨λΈ 및 μ„€μ • 정보")
222
- st.sidebar.info(f"**BERT λͺ¨λΈ:** `{MODEL_NAME}`\n\n"
223
- f"**μ‚¬μš©λœ λ ˆμ΄μ–΄ ID:** `{LAYER_ID}`\n\n"
224
- f"**λΆ„λ₯˜κΈ° μ’…λ₯˜:** `{CLF_NAME}` (LDA 투영 기반)\n\n"
225
- f"**μ‹€ν–‰ μž₯치:** `{DEVICE.upper()}`")
226
- if class_names is not None:
227
- st.sidebar.markdown(f"**예츑 κ°€λŠ₯ 클래슀:** `{', '.join(map(str, class_names))}`")
228
-
229
-
230
- # μ‚¬μš©μž μž…λ ₯
231
- st.subheader("πŸ‘‡ 뢄석할 μ˜μ–΄ λ¬Έμž₯을 μž…λ ₯ν•˜μ„Έμš”:")
232
- user_sentence = st.text_area("λ¬Έμž₯ μž…λ ₯:", "This movie is exceptionally good and I highly recommend it.", height=100)
233
-
234
- top_k_slider = st.slider("ν‘œμ‹œν•  Top-K μ€‘μš” 토큰 수:", min_value=1, max_value=10, value=5, step=1)
235
-
236
- if st.button("뢄석 μ‹€ν–‰ν•˜κΈ° πŸš€", type="primary"):
237
- if user_sentence:
238
- with st.spinner("λ¬Έμž₯을 λΆ„μ„ν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€... 쑰금만 κΈ°λ‹€λ €μ£Όμ„Έμš”...⏳"):
239
- analysis_results = None
240
- try:
241
- analysis_results = explain_sentence_streamlit(
242
- user_sentence, tokenizer, model, W, mu, w_p, b_p,
243
- LAYER_ID, DEVICE, top_k_tokens=top_k_slider
244
- )
245
- except Exception as e: # explain_sentence_streamlit λ‚΄λΆ€μ—μ„œ raise된 였λ₯˜ 처리
246
- st.error(f"뢄석 처리 쀑 였λ₯˜ λ°œμƒ: {e}")
247
- st.info("μž…λ ₯ λ¬Έμž₯μ΄λ‚˜ λͺ¨λΈ ν˜Έν™˜μ„± 문제λ₯Ό ν™•μΈν•΄λ³΄μ„Έμš”. λ¬Έμ œκ°€ μ§€μ†λ˜λ©΄ κ΄€λ¦¬μžμ—κ²Œ λ¬Έμ˜ν•˜μ„Έμš”.")
248
- # 더 μžμ„Έν•œ 였λ₯˜λŠ” Spaces의 λ‘œκ·Έμ—μ„œ 확인 κ°€λŠ₯ (printλ¬Έ μ‚¬μš© μ‹œ)
249
-
250
-
251
- if analysis_results: # μ„±κ³΅μ μœΌλ‘œ κ²°κ³Ό λ°˜ν™˜ μ‹œ
252
- html_viz, predicted_idx, probability, top_k_list = analysis_results
253
-
254
- st.markdown("---")
255
- st.subheader("πŸ“Š 뢄석 κ²°κ³Ό")
256
-
257
- predicted_class_label = str(predicted_idx) # κΈ°λ³Έκ°’: 인덱슀
258
- if class_names is not None and 0 <= predicted_idx < len(class_names):
259
- predicted_class_label = str(class_names[predicted_idx]) # 클래슀 이름 μ‚¬μš©
260
-
261
- st.success(f"**예츑된 클래슀:** **`{predicted_class_label}`** (신뒰도: **{probability:.2f}**)")
262
-
263
- st.subheader("🎨 토큰별 μ€‘μš”λ„ μ‹œκ°ν™”")
264
- st.markdown(html_viz, unsafe_allow_html=True)
265
-
266
- st.subheader(f"⭐ Top-{top_k_slider} μ€‘μš” 토큰")
267
- if top_k_list:
268
- cols = st.columns(len(top_k_list) if len(top_k_list) <=5 else 5 ) # ν•œ 쀄에 μ΅œλŒ€ 5개
269
- for i, item in enumerate(top_k_list):
270
- with cols[i % len(cols)]:
271
- st.metric(label=item['token'], value=item['score'])
272
- else:
273
- st.info("μ€‘μš”λ„ 높은 토큰을 찾을 수 μ—†μŠ΅λ‹ˆλ‹€ (μŠ€νŽ˜μ…œ 토큰 λ“± μ œμ™Έ).")
274
- # 'analysis_results is None' 이고 μ˜ˆμ™Έμ²˜λ¦¬λ‘œ st.errorκ°€ 이미 ν‘œμ‹œλœ κ²½μš°λŠ” μΆ”κ°€ λ©”μ‹œμ§€ λΆˆν•„μš”
275
- elif analysis_results is None and not user_sentence: # λ¬Έμž₯ μž…λ ₯ 없이 λ²„νŠΌ λˆ„λ₯Έ 경우 (사싀상 μœ„μ—μ„œ 처리)
276
- pass # 이미 st.warning으둜 처리됨
277
-
278
- else: # λ¬Έμž₯ μž…λ ₯ 없이 λ²„νŠΌ λˆ„λ₯Έ 경우
279
- st.warning("뢄석할 λ¬Έμž₯을 μž…λ ₯ν•΄μ£Όμ„Έμš”.")
280
- else:
281
- st.error("λͺ¨λΈ λ‘œλ”©μ— μ‹€νŒ¨ν•˜μ—¬ μ• ν”Œλ¦¬μΌ€μ΄μ…˜μ„ μ‹œμž‘ν•  수 μ—†μŠ΅λ‹ˆλ‹€. μ—…λ‘œλ“œλœ 파일과 경둜 섀정을 ν™•μΈν•΄μ£Όμ„Έμš”. Hugging Face Spaces의 'Logs' νƒ­μ—μ„œ 상세 였λ₯˜λ₯Ό 확인할 수 μžˆμŠ΅λ‹ˆλ‹€.")
282
 
283
- st.markdown("---")
284
- st.markdown("<p style='text-align: center; color: grey;'>BERT 기반 λ¬Έμž₯ 뢄석 데λͺ¨</p>", unsafe_allow_html=True)
 
 
1
+ # app.py (μ΅œμ†Œ κΈ°λŠ₯ ν…ŒμŠ€νŠΈμš©)
 
 
 
 
 
 
 
 
2
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ st.title("Hugging Face Spaces ν…ŒμŠ€νŠΈ μ•±")
5
+ st.write("이 λ©”μ‹œμ§€κ°€ 보이면 Streamlit이 μ •μƒμ μœΌλ‘œ μ‹€ν–‰λœ κ²ƒμž…λ‹ˆλ‹€!")
6
+ st.balloons() # 성곡 μ‹œ 풍선 효과