code-slicer commited on
Commit
723d13b
Β·
verified Β·
1 Parent(s): 838438d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -79
app.py CHANGED
@@ -248,90 +248,41 @@ def _llm_structured_extract(user_text: str):
248
  # ──────────────────────────────── Streamlit용 LLM λͺ¨λ“œ UI ────────────────────────────────
249
  def render_llm_followup(chat_container, inline=False):
250
  _ensure_llm_state()
251
- MAX_TURNS = 6
252
 
253
- # ── 인라인: ꡬ뢄선 λ¨Όμ €, κ·Έ λ‹€μŒ μ•ˆλ‚΄ 버블, κ·Έ λ‹€μŒ μž…λ ₯μ°½ ──
254
- if inline:
255
- st.divider()
256
- if st.session_state.pop("llm_intro_needed", False):
257
- log_and_render(
258
- "🧠 μ΄μ œλΆ€ν„°λŠ” 자유 질문 λͺ¨λ“œμ˜ˆμš”. μ—¬ν–‰ 외에도 뭐든 ν•œκ΅­μ–΄λ‘œ λ¬Όμ–΄λ³΄μ„Έμš”!",
259
- sender="bot",
260
- chat_container=chat_container,
261
- key=f"llm_intro_{random.randint(1,999999)}"
262
- )
263
- st.markdown("#### πŸ€– LLM 질문")
264
- else:
265
- # ν’€μŠ€ν¬λ¦° λͺ¨λ“œμ—μ„œλŠ” μ•ˆλ‚΄ 버블 λ¨Όμ €
266
- if st.session_state.pop("llm_intro_needed", False):
267
- log_and_render(
268
- "🧠 μ΄μ œλΆ€ν„°λŠ” 자유 질문 λͺ¨λ“œμ˜ˆμš”. μ—¬ν–‰ 외에도 뭐든 ν•œκ΅­μ–΄λ‘œ λ¬Όμ–΄λ³΄μ„Έμš”!",
269
- sender="bot",
270
- chat_container=chat_container,
271
- key=f"llm_intro_{random.randint(1,999999)}"
272
- )
273
- st.markdown("### πŸ€– LLM 질문")
274
 
275
- # μž…λ ₯μ°½ (항상 μ—¬κΈ°μ„œλ§Œ λ…ΈμΆœ)
276
- q = st.text_input(
277
- "LLM 질문",
278
- placeholder="무엇이든 λ¬Όμ–΄λ³΄μ„Έμš” (μ’…λ£Œν•˜λ €λ©΄ 'μ’…λ£Œ' μž…λ ₯)",
279
- key="llm_input"
280
- )
281
 
282
- if q:
283
- if q.strip() in ("μ’…λ£Œ", "quit", "exit"):
284
- if inline:
285
- # βœ… 인라인 μ’…λ£Œ(버블은 μœ μ§€, μƒˆλ‘œκ³ μΉ¨ μ—†μŒ)
286
- st.session_state["llm_inline"] = False
287
- st.session_state["llm_input"] = ""
288
- log_and_render(
289
- "LLM λͺ¨λ“œλ₯Ό μ’…λ£Œν• κ²Œμš”. ν•„μš”ν•˜μ‹€ λ•Œ λ‹€μ‹œ μ§ˆλ¬Έν•΄ μ£Όμ„Έμš”! ✨",
290
- sender="bot", chat_container=chat_container,
291
- key=f"llm_end_inline_{random.randint(1,999999)}"
292
- )
293
- return
294
- else:
295
- # ν’€μŠ€ν¬λ¦° μ’…λ£Œ
296
- st.session_state["llm_mode"] = False
297
- st.session_state["llm_input"] = ""
298
- log_and_render(
299
- "LLM λͺ¨λ“œλ₯Ό μ’…λ£Œν• κ²Œμš”. ν•„μš”ν•˜μ‹€ λ•Œ λ‹€μ‹œ μ§ˆλ¬Έν•΄ μ£Όμ„Έμš”! ✨",
300
- sender="bot", chat_container=chat_container,
301
- key=f"llm_end_full_{random.randint(1,999999)}"
302
- )
303
- st.rerun()
304
- return
305
 
306
- # 일반 질의 처리
307
- log_and_render(q, sender="user", chat_container=chat_container,
308
- key=f"llm_user_{random.randint(1,999999)}")
309
- st.session_state.llm_history.append({"role": "user", "content": q})
310
 
311
- msgs = st.session_state.llm_history[-(MAX_TURNS-1):]
312
- a = _call_ollama_chat(
313
- messages=msgs,
314
- system_prompt=KOREAN_SYSTEM_PROMPT,
315
- temperature=0.8, top_p=0.9, top_k=40, repeat_penalty=1.1
316
- )
317
- if not a:
318
- log_and_render("⚠️ LLM 응닡을 λ°›μ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€. Ollama μ„œλ²„λ₯Ό 확인해 μ£Όμ„Έμš”.",
319
- sender="bot", chat_container=chat_container,
320
- key=f"llm_err_{random.randint(1,999999)}")
321
- else:
322
- log_and_render(a, sender="bot", chat_container=chat_container,
323
- key=f"llm_bot_{random.randint(1,999999)}")
324
- st.session_state.llm_history.append({"role": "assistant", "content": a})
325
- st.session_state["llm_input"] = ""
326
-
327
- # ν•˜λ‹¨ λ²„νŠΌ: 인라인은 'LLM νŒ¨λ„ μ’…λ£Œ'만, ν’€μŠ€ν¬λ¦°μ€ 'LLM λͺ¨λ“œ μ’…λ£Œ'만
328
- if inline:
329
- if st.button("πŸ”š LLM λͺ¨λ“œ μ’…λ£Œ", key="llm_close_inline"):
330
- st.session_state["llm_inline"] = False
331
- else:
332
- if st.button("πŸ”š LLM λͺ¨λ“œ μ’…λ£Œ", key="llm_close_full"):
333
- st.session_state["llm_mode"] = False
334
- st.rerun()
335
 
336
  def render_llm_inline_if_open(chat_container):
337
  """llm_inline ν”Œλž˜κ·Έκ°€ 켜져 있으면 인라인 LLM νŒ¨λ„μ„ κ·Έλ¦½λ‹ˆλ‹€."""
 
248
  # ──────────────────────────────── Streamlit용 LLM λͺ¨λ“œ UI ────────────────────────────────
249
  def render_llm_followup(chat_container, inline=False):
250
  _ensure_llm_state()
 
251
 
252
+ st.markdown("### β—Ž LLM 질문")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
 
254
+ # κΈ°μ‘΄ λŒ€ν™” λ Œλ”
255
+ for m in st.session_state.get("llm_msgs", []):
256
+ with st.chat_message(m["role"]):
257
+ st.markdown(m["content"])
 
 
258
 
259
+ # ⚠️ μœ„μ ― keyλŠ” 'llm_query'둜 μ‚¬μš© (이 key에 λŒ€ν•΄ μ–΄λ””μ„œλ„ 직접 λŒ€μž… κΈˆμ§€)
260
+ user_msg = st.chat_input("무엇이든 λ¬Όμ–΄λ³΄μ„Έμš” (μ’…λ£Œν•˜λ €λ©΄ 'μ’…λ£Œ' μž…λ ₯)", key="llm_query")
261
+ if not user_msg:
262
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
+ text = user_msg.strip()
 
 
 
265
 
266
+ # μ’…λ£Œ λͺ…λ Ή 처리
267
+ if text in {"μ’…λ£Œ", "quit", "exit"}:
268
+ st.session_state["llm_inline"] = False
269
+ st.session_state["llm_open"] = False
270
+ st.rerun()
271
+ return
272
+
273
+ # λŒ€ν™” μ €μž₯
274
+ st.session_state.setdefault("llm_msgs", [])
275
+ st.session_state["llm_msgs"].append({"role": "user", "content": text})
276
+
277
+ # LLM 호좜 (μ˜ˆμ™Έ μ•ˆμ „)
278
+ try:
279
+ bot = call_llm(text) # κΈ°μ‘΄ ν•¨μˆ˜ μ‚¬μš©
280
+ except Exception as e:
281
+ bot = "⚠️ LLM 응닡을 λ°›μ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€. Ollama μ„œλ²„λ₯Ό 확인해 μ£Όμ„Έμš”."
282
+ st.session_state["llm_msgs"].append({"role": "assistant", "content": bot})
283
+
284
+ # β›” μ—¬κΈ°μ„œ st.session_state['llm_query']λ‚˜ 'llm_input'을 직접 λŒ€μž…/μ‚­μ œν•˜μ§€ λ§ˆμ„Έμš”.
285
+ st.rerun()
 
 
 
 
286
 
287
  def render_llm_inline_if_open(chat_container):
288
  """llm_inline ν”Œλž˜κ·Έκ°€ 켜져 있으면 인라인 LLM νŒ¨λ„μ„ κ·Έλ¦½λ‹ˆλ‹€."""