Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import google.generativeai as genai | |
| import os | |
| import json | |
| from io import StringIO | |
| # ------------------------------------------------------ | |
| # Hugging Face Spacesμμλ νκ²½λ³μ(Secrets)λ‘ API ν€λ₯Ό μ μ₯νμΈμ. | |
| # Settings β Repository secrets β GEMINI_API_KEY λ±λ‘ | |
| # ------------------------------------------------------ | |
| GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") | |
| genai.configure(api_key=GOOGLE_API_KEY) | |
| # μ΄κΈ° μ€μ | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| if "system_prompt" not in st.session_state: | |
| st.session_state.system_prompt = "λΉμ μ μΉμ ν AI μ΄μμ€ν΄νΈμ λλ€." | |
| model = genai.GenerativeModel("gemma-3-27b-it") | |
| # ------------------------------------------------------ | |
| # Streamlit UI | |
| # ------------------------------------------------------ | |
| st.set_page_config(page_title="Gemini μ±λ΄", page_icon="π€", layout="wide") | |
| st.title("π€ Google Gemini λνν μ±λ΄") | |
| st.caption("Hugging Face Spaces + Streamlit + Google Generative AI") | |
| # μ¬μ΄λλ°: μμ€ν ν둬ννΈ μμ & λ‘κ·Έ λ€μ΄λ‘λ | |
| with st.sidebar: | |
| st.subheader("βοΈ μ€μ ") | |
| new_system_prompt = st.text_area("μμ€ν ν둬ννΈ", st.session_state.system_prompt, height=100) | |
| if st.button("λ³κ²½ μ μ©"): | |
| st.session_state.system_prompt = new_system_prompt | |
| st.success("μμ€ν ν둬ννΈκ° λ³κ²½λμμ΅λλ€.") | |
| st.markdown("---") | |
| if st.session_state.messages: | |
| # JSON λ³ν | |
| json_data = json.dumps(st.session_state.messages, ensure_ascii=False, indent=2) | |
| st.download_button( | |
| label="πΎ λν λ‘κ·Έ JSON λ€μ΄λ‘λ", | |
| data=json_data, | |
| file_name="chat_log.json", | |
| mime="application/json" | |
| ) | |
| # λν μΆλ ₯ | |
| for msg in st.session_state.messages: | |
| with st.chat_message("user"): | |
| st.markdown(msg["user"]) | |
| with st.chat_message("assistant"): | |
| st.markdown(msg["ai"]) | |
| # μ¬μ©μ μ λ ₯ | |
| if prompt := st.chat_input("λ©μμ§λ₯Ό μ λ ₯νμΈμ. μ’ λ£νλ €λ©΄ 'quit' μ λ ₯"): | |
| if prompt.lower() == "quit": | |
| st.warning("λνλ₯Ό μ’ λ£ν©λλ€. μλ‘κ³ μΉ¨νλ©΄ λ€μ μμν μ μμ΅λλ€.") | |
| else: | |
| # λν 컨ν μ€νΈ κ΅¬μ± | |
| context = f"μμ€ν : {st.session_state.system_prompt}\n" | |
| for msg in st.session_state.messages[-10:]: # μ΅κ·Ό 10κ°λ§ μ μ§ | |
| context += f"μ¬μ©μ: {msg['user']}\nAI: {msg['ai']}\n" | |
| context += f"μ¬μ©μ: {prompt}\nAI:" | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| try: | |
| response = model.generate_content(context) | |
| ai_response = response.text | |
| except Exception as e: | |
| ai_response = f"β οΈ μ€λ₯ λ°μ: {e}" | |
| with st.chat_message("assistant"): | |
| st.markdown(ai_response) | |
| # λν κΈ°λ‘ μ μ₯ | |
| st.session_state.messages.append({"user": prompt, "ai": ai_response}) | |