|
|
|
|
|
|
|
|
import streamlit as st |
|
|
import google.generativeai as genai |
|
|
import os |
|
|
import json |
|
|
from io import StringIO |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api_key = os.environ.get("OPENAI_API_KEY") |
|
|
genai.configure(api_key=api_key) |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
|
|
|
if "system_prompt" not in st.session_state: |
|
|
st.session_state.system_prompt = "λΉμ μ μΉμ ν AI μ΄μμ€ν΄νΈμ
λλ€." |
|
|
|
|
|
model = genai.GenerativeModel("gemma-3-27b-it") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.set_page_config(page_title="Gemini μ±λ΄", page_icon="π€", layout="wide") |
|
|
st.title("π€ Google Gemini λνν μ±λ΄") |
|
|
st.caption("Hugging Face Spaces + Streamlit + Google Generative AI") |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.subheader("βοΈ μ€μ ") |
|
|
new_system_prompt = st.text_area("μμ€ν
ν둬ννΈ", st.session_state.system_prompt, height=100) |
|
|
if st.button("λ³κ²½ μ μ©"): |
|
|
st.session_state.system_prompt = new_system_prompt |
|
|
st.success("μμ€ν
ν둬ννΈκ° λ³κ²½λμμ΅λλ€.") |
|
|
|
|
|
st.markdown("---") |
|
|
if st.session_state.messages: |
|
|
|
|
|
json_data = json.dumps(st.session_state.messages, ensure_ascii=False, indent=2) |
|
|
st.download_button( |
|
|
label="πΎ λν λ‘κ·Έ JSON λ€μ΄λ‘λ", |
|
|
data=json_data, |
|
|
file_name="chat_log.json", |
|
|
mime="application/json" |
|
|
) |
|
|
|
|
|
|
|
|
for msg in st.session_state.messages: |
|
|
with st.chat_message("user"): |
|
|
st.markdown(msg["user"]) |
|
|
with st.chat_message("assistant"): |
|
|
st.markdown(msg["ai"]) |
|
|
|
|
|
|
|
|
if prompt := st.chat_input("λ©μμ§λ₯Ό μ
λ ₯νμΈμ. μ’
λ£νλ €λ©΄ 'quit' μ
λ ₯"): |
|
|
if prompt.lower() == "quit": |
|
|
st.warning("λνλ₯Ό μ’
λ£ν©λλ€. μλ‘κ³ μΉ¨νλ©΄ λ€μ μμν μ μμ΅λλ€.") |
|
|
else: |
|
|
|
|
|
context = f"μμ€ν
: {st.session_state.system_prompt}\n" |
|
|
for msg in st.session_state.messages[-10:]: |
|
|
context += f"μ¬μ©μ: {msg['user']}\nAI: {msg['ai']}\n" |
|
|
context += f"μ¬μ©μ: {prompt}\nAI:" |
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(prompt) |
|
|
|
|
|
try: |
|
|
response = model.generate_content(context) |
|
|
ai_response = response.text |
|
|
except Exception as e: |
|
|
ai_response = f"β οΈ μ€λ₯ λ°μ: {e}" |
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
st.markdown(ai_response) |
|
|
|
|
|
|
|
|
st.session_state.messages.append({"user": prompt, "ai": ai_response}) |