Update src/streamlit_app.py
Browse files- src/streamlit_app.py +80 -38
src/streamlit_app.py
CHANGED
|
@@ -1,40 +1,82 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
import pandas as pd
|
| 4 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
|
| 10 |
-
If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
|
| 11 |
-
forums](https://discuss.streamlit.io).
|
| 12 |
-
|
| 13 |
-
In the meantime, below is an example of what you can do with just a few lines of code:
|
| 14 |
-
"""
|
| 15 |
-
|
| 16 |
-
num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
|
| 17 |
-
num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
|
| 18 |
-
|
| 19 |
-
indices = np.linspace(0, 1, num_points)
|
| 20 |
-
theta = 2 * np.pi * num_turns * indices
|
| 21 |
-
radius = indices
|
| 22 |
-
|
| 23 |
-
x = radius * np.cos(theta)
|
| 24 |
-
y = radius * np.sin(theta)
|
| 25 |
-
|
| 26 |
-
df = pd.DataFrame({
|
| 27 |
-
"x": x,
|
| 28 |
-
"y": y,
|
| 29 |
-
"idx": indices,
|
| 30 |
-
"rand": np.random.randn(num_points),
|
| 31 |
-
})
|
| 32 |
-
|
| 33 |
-
st.altair_chart(alt.Chart(df, height=700, width=700)
|
| 34 |
-
.mark_point(filled=True)
|
| 35 |
-
.encode(
|
| 36 |
-
x=alt.X("x", axis=None),
|
| 37 |
-
y=alt.Y("y", axis=None),
|
| 38 |
-
color=alt.Color("idx", legend=None, scale=alt.Scale()),
|
| 39 |
-
size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
|
| 40 |
-
))
|
|
|
|
| 1 |
+
# streamlit_app.py
|
| 2 |
+
|
|
|
|
| 3 |
import streamlit as st
|
| 4 |
+
import google.generativeai as genai
|
| 5 |
+
import os
|
| 6 |
+
import json
|
| 7 |
+
from io import StringIO
|
| 8 |
+
|
| 9 |
+
# ------------------------------------------------------
|
| 10 |
+
# Hugging Face Spacesμμλ νκ²½λ³μ(Secrets)λ‘ API ν€λ₯Ό μ μ₯
|
| 11 |
+
# Settings β secrets β New secret: GEMINI_API_KEY λ±λ‘
|
| 12 |
+
# ------------------------------------------------------
|
| 13 |
+
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
| 14 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
| 15 |
+
|
| 16 |
+
# μ΄κΈ° μ€μ
|
| 17 |
+
if "messages" not in st.session_state:
|
| 18 |
+
st.session_state.messages = []
|
| 19 |
+
|
| 20 |
+
if "system_prompt" not in st.session_state:
|
| 21 |
+
st.session_state.system_prompt = "λΉμ μ μΉμ ν AI μ΄μμ€ν΄νΈμ
λλ€."
|
| 22 |
+
|
| 23 |
+
model = genai.GenerativeModel("gemma-3-27b-it")
|
| 24 |
+
|
| 25 |
+
# ------------------------------------------------------
|
| 26 |
+
# Streamlit UI
|
| 27 |
+
# ------------------------------------------------------
|
| 28 |
+
st.set_page_config(page_title="Gemini μ±λ΄", page_icon="π€", layout="wide")
|
| 29 |
+
st.title("π€ Google Gemini λνν μ±λ΄")
|
| 30 |
+
st.caption("Hugging Face Spaces + Streamlit + Google Generative AI")
|
| 31 |
+
|
| 32 |
+
# μ¬μ΄λλ°: μμ€ν
ν둬ννΈ μμ & λ‘κ·Έ λ€μ΄λ‘λ
|
| 33 |
+
with st.sidebar:
|
| 34 |
+
st.subheader("βοΈ μ€μ ")
|
| 35 |
+
new_system_prompt = st.text_area("μμ€ν
ν둬ννΈ", st.session_state.system_prompt, height=100)
|
| 36 |
+
if st.button("λ³κ²½ μ μ©"):
|
| 37 |
+
st.session_state.system_prompt = new_system_prompt
|
| 38 |
+
st.success("μμ€ν
ν둬ννΈκ° λ³κ²½λμμ΅λλ€.")
|
| 39 |
+
|
| 40 |
+
st.markdown("---")
|
| 41 |
+
if st.session_state.messages:
|
| 42 |
+
# JSON λ³ν
|
| 43 |
+
json_data = json.dumps(st.session_state.messages, ensure_ascii=False, indent=2)
|
| 44 |
+
st.download_button(
|
| 45 |
+
label="πΎ λν λ‘κ·Έ JSON λ€μ΄λ‘λ",
|
| 46 |
+
data=json_data,
|
| 47 |
+
file_name="chat_log.json",
|
| 48 |
+
mime="application/json"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# λν μΆλ ₯
|
| 52 |
+
for msg in st.session_state.messages:
|
| 53 |
+
with st.chat_message("user"):
|
| 54 |
+
st.markdown(msg["user"])
|
| 55 |
+
with st.chat_message("assistant"):
|
| 56 |
+
st.markdown(msg["ai"])
|
| 57 |
+
|
| 58 |
+
# μ¬μ©μ μ
λ ₯
|
| 59 |
+
if prompt := st.chat_input("λ©μμ§λ₯Ό μ
λ ₯νμΈμ. μ’
λ£νλ €λ©΄ 'quit' μ
λ ₯"):
|
| 60 |
+
if prompt.lower() == "quit":
|
| 61 |
+
st.warning("λνλ₯Ό μ’
λ£ν©λλ€. μλ‘κ³ μΉ¨νλ©΄ λ€μ μμν μ μμ΅λλ€.")
|
| 62 |
+
else:
|
| 63 |
+
# λν 컨ν
μ€νΈ ꡬμ±
|
| 64 |
+
context = f"μμ€ν
: {st.session_state.system_prompt}\n"
|
| 65 |
+
for msg in st.session_state.messages[-10:]: # μ΅κ·Ό 10κ°λ§ μ μ§
|
| 66 |
+
context += f"μ¬μ©μ: {msg['user']}\nAI: {msg['ai']}\n"
|
| 67 |
+
context += f"μ¬μ©μ: {prompt}\nAI:"
|
| 68 |
+
|
| 69 |
+
with st.chat_message("user"):
|
| 70 |
+
st.markdown(prompt)
|
| 71 |
+
|
| 72 |
+
try:
|
| 73 |
+
response = model.generate_content(context)
|
| 74 |
+
ai_response = response.text
|
| 75 |
+
except Exception as e:
|
| 76 |
+
ai_response = f"β οΈ μ€λ₯ λ°μ: {e}"
|
| 77 |
+
|
| 78 |
+
with st.chat_message("assistant"):
|
| 79 |
+
st.markdown(ai_response)
|
| 80 |
|
| 81 |
+
# λν κΈ°λ‘ μ μ₯
|
| 82 |
+
st.session_state.messages.append({"user": prompt, "ai": ai_response})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|