Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,232 +1,48 @@
|
|
| 1 |
-
# νμν λΌμ΄λΈλ¬λ¦¬λ₯Ό κ°μ Έμ΅λλ€.
|
| 2 |
import gradio as gr
|
| 3 |
import google.generativeai as genai
|
| 4 |
import os
|
| 5 |
-
import logging
|
| 6 |
|
| 7 |
-
#
|
| 8 |
-
|
| 9 |
-
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
logger.info("API ν€κ° νκ²½λ³μμμ κ°μ§λμμ΅λλ€.")
|
| 15 |
-
logger.info(f"API ν€ λ―Έλ¦¬λ³΄κΈ°: {GEMINI_API_KEY[:8]}...")
|
| 16 |
-
else:
|
| 17 |
-
logger.warning("GEMINI_API_KEYκ° νκ²½λ³μμμ κ°μ§λμ§ μμμ΅λλ€.")
|
| 18 |
-
|
| 19 |
-
# --- UI λ° μ±λ΄ μ€λͺ
---
|
| 20 |
-
# Gradio Blocksλ₯Ό μ¬μ©νμ¬ μ’ λ μ μ°ν UIλ₯Ό ꡬμ±ν©λλ€.
|
| 21 |
-
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue")) as demo:
|
| 22 |
-
gr.Markdown(
|
| 23 |
-
"""
|
| 24 |
-
# βοΈ Gemini API μ±λ΄
|
| 25 |
-
Google Gemini APIλ₯Ό μ¬μ©νλ μ±λ΄μ
λλ€.
|
| 26 |
-
|
| 27 |
-
**μ€μ**: Hugging Face Spacesμ **Settings β Repository secrets**μ `GEMINI_API_KEY`κ° μ€μ λμ΄ μμ΄μΌ ν©λλ€.
|
| 28 |
-
|
| 29 |
-
[API ν€ λ°κΈλ°κΈ°](https://aistudio.google.com/app/apikey)
|
| 30 |
-
"""
|
| 31 |
-
)
|
| 32 |
-
|
| 33 |
-
# νκ²½λ³μ μν νμ
|
| 34 |
-
with gr.Row():
|
| 35 |
-
env_status = gr.Textbox(
|
| 36 |
-
label="νκ²½λ³μ μν",
|
| 37 |
-
value=f"GEMINI_API_KEY: {'β
μ€μ λ¨' if GEMINI_API_KEY else 'β μ€μ λμ§ μμ'}",
|
| 38 |
-
interactive=False
|
| 39 |
-
)
|
| 40 |
-
|
| 41 |
-
# Gradio μ±λ΄ UI μ»΄ν¬λνΈ - type νλΌλ―Έν° μΆκ°
|
| 42 |
-
chatbot = gr.Chatbot(
|
| 43 |
-
label="Gemini μ±λ΄",
|
| 44 |
-
height=600,
|
| 45 |
-
type="messages" # μ΄ νλΌλ―Έν°λ₯Ό μΆκ°νμ¬ κ²½κ³ ν΄κ²°
|
| 46 |
-
)
|
| 47 |
-
|
| 48 |
-
with gr.Row():
|
| 49 |
-
# μ¬μ©μ λ©μμ§ μ
λ ₯λ
|
| 50 |
-
msg = gr.Textbox(
|
| 51 |
-
label="λ©μμ§ μ
λ ₯",
|
| 52 |
-
placeholder="무μμ΄λ λ¬Όμ΄λ³΄μΈμ...",
|
| 53 |
-
scale=7,
|
| 54 |
-
lines=1
|
| 55 |
-
)
|
| 56 |
-
# μ μ‘ λ²νΌ
|
| 57 |
-
submit_button = gr.Button("μ μ‘", variant="primary", scale=1)
|
| 58 |
-
|
| 59 |
-
with gr.Accordion("κ³ κΈ μ€μ ", open=False):
|
| 60 |
-
# LLMμ μν μ μ μνλ μμ€ν
λ©μμ§
|
| 61 |
-
system_message = gr.Textbox(
|
| 62 |
-
value="You are a helpful and friendly chatbot.",
|
| 63 |
-
label="μμ€ν
λ©μμ§",
|
| 64 |
-
lines=2
|
| 65 |
-
)
|
| 66 |
-
# λͺ¨λΈμ μ°½μμ±μ μ‘°μ νλ μ¬λΌμ΄λ
|
| 67 |
-
temperature = gr.Slider(
|
| 68 |
-
minimum=0.0,
|
| 69 |
-
maximum=1.0,
|
| 70 |
-
value=0.7,
|
| 71 |
-
step=0.1,
|
| 72 |
-
label="Temperature"
|
| 73 |
-
)
|
| 74 |
-
# μμ±ν μ΅λ ν ν° μλ₯Ό μ‘°μ νλ μ¬λΌμ΄λ
|
| 75 |
-
max_tokens = gr.Slider(
|
| 76 |
-
minimum=1,
|
| 77 |
-
maximum=4096,
|
| 78 |
-
value=1024,
|
| 79 |
-
step=1,
|
| 80 |
-
label="Max new tokens"
|
| 81 |
-
)
|
| 82 |
-
|
| 83 |
-
# νκ²½λ³μ μλ‘κ³ μΉ¨ λ²νΌ
|
| 84 |
-
refresh_button = gr.Button("π νκ²½λ³μ μλ‘κ³ μΉ¨", size="sm")
|
| 85 |
-
|
| 86 |
-
def refresh_env_status():
|
| 87 |
-
"""νκ²½λ³μ μνλ₯Ό μλ‘κ³ μΉ¨ν©λλ€."""
|
| 88 |
-
global GEMINI_API_KEY
|
| 89 |
-
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
| 90 |
-
status = f"GEMINI_API_KEY: {'β
μ€μ λ¨' if GEMINI_API_KEY else 'β μ€μ λμ§ μμ'}"
|
| 91 |
-
if GEMINI_API_KEY:
|
| 92 |
-
status += f" (미리보기: {GEMINI_API_KEY[:8]}...)"
|
| 93 |
-
return status
|
| 94 |
-
|
| 95 |
-
refresh_button.click(refresh_env_status, outputs=[env_status])
|
| 96 |
-
|
| 97 |
-
# --- Gemini API νΈμΆ ν¨μ ---
|
| 98 |
-
def respond(message, chat_history, system_prompt, temp, max_output_tokens):
|
| 99 |
-
# ν¨μκ° νΈμΆλ λλ§λ€ νκ²½λ³μμμ API ν€λ₯Ό λ€μ νμΈ
|
| 100 |
-
api_key = os.environ.get("GEMINI_API_KEY")
|
| 101 |
-
|
| 102 |
-
# λλ²κΉ
μ μν μΆκ° μ 보
|
| 103 |
-
logger.info(f"API ν€ νμΈ: {'μμ' if api_key else 'μμ'}")
|
| 104 |
-
|
| 105 |
-
# νκ²½λ³μμμ κ°μ Έμ¨ API ν€κ° μμΌλ©΄ μλ΄ λ©μμ§λ₯Ό λμλλ€.
|
| 106 |
-
if not api_key:
|
| 107 |
-
error_msg = """β οΈ **μ€λ₯**: `GEMINI_API_KEY`κ° μ€μ λμ§ μμμ΅λλ€.
|
| 108 |
-
|
| 109 |
-
**ν΄κ²° λ°©λ²**:
|
| 110 |
-
1. Hugging Face Spacesμ **Settings** νμΌλ‘ μ΄λ
|
| 111 |
-
2. **Repository secrets** μΉμ
μ°ΎκΈ°
|
| 112 |
-
3. **New secret** λ²νΌ ν΄λ¦
|
| 113 |
-
4. Name: `GEMINI_API_KEY`, Value: μ€μ API ν€ μ
λ ₯
|
| 114 |
-
5. **Save** ν΄λ¦
|
| 115 |
-
6. Spaceλ₯Ό **μ¬μμ** (Settings β Factory reboot)
|
| 116 |
-
|
| 117 |
-
**μ°Έκ³ **: Private spaceκ° μλ κ²½μ°μλ secretsλ μμ νκ² λ³΄νΈλ©λλ€."""
|
| 118 |
-
yield error_msg
|
| 119 |
-
return
|
| 120 |
-
|
| 121 |
-
try:
|
| 122 |
-
# API ν€λ₯Ό μ€μ ν©λλ€.
|
| 123 |
-
genai.configure(api_key=api_key)
|
| 124 |
-
logger.info("API ν€ μ€μ μ±κ³΅")
|
| 125 |
-
except Exception as e:
|
| 126 |
-
yield f"API ν€ μ€μ μ μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}"
|
| 127 |
-
return
|
| 128 |
-
|
| 129 |
-
# μ¬μ©ν λͺ¨λΈκ³Ό μμ€ν
ν둬ννΈλ₯Ό μ€μ ν©λλ€.
|
| 130 |
-
try:
|
| 131 |
-
# μ¬μ© κ°λ₯ν λͺ¨λΈλ‘ λ³κ²½
|
| 132 |
-
model = genai.GenerativeModel(
|
| 133 |
-
model_name='gemini-2.0-flash', # μμ μ μΈ λͺ¨λΈ μ¬μ©
|
| 134 |
-
system_instruction=system_prompt
|
| 135 |
-
)
|
| 136 |
-
except Exception as e:
|
| 137 |
-
yield f"λͺ¨λΈ μ΄κΈ°ν μ€λ₯: {str(e)}\nμ¬μ© κ°λ₯ν λͺ¨λΈ: gemini-1.5-flash, gemini-1.5-pro"
|
| 138 |
-
return
|
| 139 |
-
|
| 140 |
-
# Gradioμ λν κΈ°λ‘μ Gemini APIκ° μ΄ν΄ν μ μλ νμμΌλ‘ λ³νν©λλ€.
|
| 141 |
-
gemini_history = []
|
| 142 |
-
|
| 143 |
-
# type="messages" νμμ λ§κ² μ²λ¦¬
|
| 144 |
-
if isinstance(chat_history, list) and len(chat_history) > 0:
|
| 145 |
-
# μλ‘μ΄ λ©μμ§ νμ μ²λ¦¬
|
| 146 |
-
if isinstance(chat_history[0], dict):
|
| 147 |
-
for msg in chat_history:
|
| 148 |
-
if msg.get("role") == "user":
|
| 149 |
-
gemini_history.append({"role": "user", "parts": [msg.get("content", "")]})
|
| 150 |
-
elif msg.get("role") == "assistant":
|
| 151 |
-
gemini_history.append({"role": "model", "parts": [msg.get("content", "")]})
|
| 152 |
-
# μ΄μ νν νμλ μ§μ
|
| 153 |
-
else:
|
| 154 |
-
for user_msg, model_msg in chat_history:
|
| 155 |
-
if user_msg:
|
| 156 |
-
gemini_history.append({"role": "user", "parts": [user_msg]})
|
| 157 |
-
if model_msg:
|
| 158 |
-
gemini_history.append({"role": "model", "parts": [model_msg]})
|
| 159 |
-
|
| 160 |
-
# μ΄μ λν κΈ°λ‘μ λ°νμΌλ‘ μ±ν
μΈμ
μ μμν©λλ€.
|
| 161 |
-
chat = model.start_chat(history=gemini_history)
|
| 162 |
-
|
| 163 |
-
# λͺ¨λΈ μμ± κ΄λ ¨ μ€μ μ ꡬμ±ν©λλ€.
|
| 164 |
-
generation_config = genai.types.GenerationConfig(
|
| 165 |
-
temperature=temp,
|
| 166 |
-
max_output_tokens=int(max_output_tokens),
|
| 167 |
-
)
|
| 168 |
-
|
| 169 |
-
try:
|
| 170 |
-
# μ€νΈλ¦¬λ° λ°©μμΌλ‘ λ©μμ§λ₯Ό 보λ΄κ³ μλ΅μ λ°μ΅λλ€.
|
| 171 |
-
response = chat.send_message(
|
| 172 |
-
message,
|
| 173 |
-
stream=True,
|
| 174 |
-
generation_config=generation_config
|
| 175 |
-
)
|
| 176 |
-
|
| 177 |
-
# μ€νΈλ¦¬λ° μλ΅μ μ€μκ°μΌλ‘ UIμ νμν©λλ€.
|
| 178 |
-
full_response = ""
|
| 179 |
-
for chunk in response:
|
| 180 |
-
if hasattr(chunk, 'text'):
|
| 181 |
-
full_response += chunk.text
|
| 182 |
-
yield full_response
|
| 183 |
-
|
| 184 |
-
except Exception as e:
|
| 185 |
-
# API νΈμΆ μ€ μλ¬κ° λ°μνλ©΄ UIμ νμν©λλ€.
|
| 186 |
-
error_detail = str(e)
|
| 187 |
-
if "API_KEY_INVALID" in error_detail:
|
| 188 |
-
yield "β API ν€κ° μ ν¨νμ§ μμ΅λλ€. μ¬λ°λ₯Έ API ν€μΈμ§ νμΈν΄μ£ΌμΈμ."
|
| 189 |
-
elif "QUOTA_EXCEEDED" in error_detail:
|
| 190 |
-
yield "β API μ¬μ©λ νλλ₯Ό μ΄κ³Όνμ΅λλ€."
|
| 191 |
-
else:
|
| 192 |
-
yield f"μλ΅ μμ± μ€ μ€λ₯κ° λ°μνμ΅λλ€: {error_detail}"
|
| 193 |
-
|
| 194 |
-
# --- Gradio μ΄λ²€νΈ 리μ€λ ---
|
| 195 |
-
def on_submit(message, chat_history, system_prompt, temp, max_output_tokens):
|
| 196 |
-
if not message.strip():
|
| 197 |
-
return "", chat_history
|
| 198 |
-
|
| 199 |
-
# μλ‘μ΄ λ©μμ§ νμ μ¬μ©
|
| 200 |
-
chat_history = chat_history or []
|
| 201 |
-
|
| 202 |
-
# μ¬μ©μ λ©μμ§ μΆκ°
|
| 203 |
-
chat_history.append({"role": "user", "content": message})
|
| 204 |
-
|
| 205 |
-
# λ΄ μλ΅ μ€νΈλ¦¬λ°
|
| 206 |
-
bot_response_stream = respond(message, chat_history[:-1], system_prompt, temp, max_output_tokens)
|
| 207 |
-
|
| 208 |
-
for partial_response in bot_response_stream:
|
| 209 |
-
# λ§μ§λ§ λ©μμ§κ° μ¬μ©μ λ©μμ§μΈ κ²½μ°μλ§ λ΄ μλ΅ μΆκ°
|
| 210 |
-
if chat_history and chat_history[-1]["role"] == "user":
|
| 211 |
-
chat_history.append({"role": "assistant", "content": partial_response})
|
| 212 |
-
else:
|
| 213 |
-
# λ΄ μλ΅ μ
λ°μ΄νΈ
|
| 214 |
-
chat_history[-1]["content"] = partial_response
|
| 215 |
-
yield "", chat_history
|
| 216 |
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
-
# λ©μΈ μ€ν λΆλΆ
|
| 230 |
if __name__ == "__main__":
|
| 231 |
-
|
| 232 |
-
demo.launch(debug=True)
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import google.generativeai as genai
|
| 3 |
import os
|
|
|
|
| 4 |
|
| 5 |
+
# API ν€ μ€μ
|
| 6 |
+
api_key = os.environ.get("GEMINI_API_KEY")
|
| 7 |
+
if api_key:
|
| 8 |
+
genai.configure(api_key=api_key)
|
| 9 |
|
| 10 |
+
def chat(message, history):
|
| 11 |
+
if not api_key:
|
| 12 |
+
return "β API ν€κ° μ€μ λμ§ μμμ΅λλ€. HF Spaces Settingsμμ GEMINI_API_KEYλ₯Ό μΆκ°νμΈμ."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
try:
|
| 15 |
+
# Gemini λͺ¨λΈ μ΄κΈ°ν
|
| 16 |
+
model = genai.GenerativeModel('gemini-2.0-flash')
|
| 17 |
+
|
| 18 |
+
# λν κΈ°λ‘ λ³ν
|
| 19 |
+
chat_history = []
|
| 20 |
+
for human, assistant in history:
|
| 21 |
+
if human:
|
| 22 |
+
chat_history.append({"role": "user", "parts": [human]})
|
| 23 |
+
if assistant:
|
| 24 |
+
chat_history.append({"role": "model", "parts": [assistant]})
|
| 25 |
+
|
| 26 |
+
# μ±ν
μΈμ
μμ
|
| 27 |
+
chat_session = model.start_chat(history=chat_history)
|
| 28 |
+
|
| 29 |
+
# μλ΅ μμ±
|
| 30 |
+
response = chat_session.send_message(message)
|
| 31 |
+
return response.text
|
| 32 |
+
|
| 33 |
+
except Exception as e:
|
| 34 |
+
return f"β μ€λ₯ λ°μ: {str(e)}"
|
| 35 |
+
|
| 36 |
+
# Gradio μΈν°νμ΄μ€
|
| 37 |
+
demo = gr.ChatInterface(
|
| 38 |
+
fn=chat,
|
| 39 |
+
title="π€ Gemini μ±λ΄",
|
| 40 |
+
description="Google Gemini APIλ₯Ό μ¬μ©ν κ°λ¨ν μ±λ΄μ
λλ€.",
|
| 41 |
+
examples=["μλ
νμΈμ!", "μ€λ λ μ¨λ μ΄λ?", "νμ΄μ¬μ λν΄ μ€λͺ
ν΄μ€"],
|
| 42 |
+
retry_btn=None,
|
| 43 |
+
undo_btn="μ΄μ λν μμ ",
|
| 44 |
+
clear_btn="μ 체 λν μμ ",
|
| 45 |
+
)
|
| 46 |
|
|
|
|
| 47 |
if __name__ == "__main__":
|
| 48 |
+
demo.launch()
|
|
|