GeminiChatbot / app.py
Ashkchamp's picture
Update app.py
e094945 verified
# app.py ─ Streamlit + LangChain + Groq
import os, asyncio, streamlit as st
from dotenv import load_dotenv
from langchain.schema import SystemMessage, HumanMessage, AIMessage
from langchain_groq import ChatGroq
# ───────────────────────── bootstrap event‑loop ─────────────────────
try:
asyncio.get_running_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
if os.name == "nt":
import asyncio as _asyncio
asyncio.set_event_loop_policy(_asyncio.WindowsSelectorEventLoopPolicy())
# ─────────────────────────── UI / SETTINGS ────────────────────────
st.set_page_config("Groq Chatbot", "πŸ€–")
st.title("πŸ€– Groq‑powered Advanced Chatbot")
st.caption("DeepSeek‑R1‑Distill‑Llama‑70B β€’ LangChain β€’ Streamlit")
with st.sidebar:
st.header("πŸ”‘ Groq API Key")
groq_key = st.text_input("Paste your key here", type="password")
st.divider()
temperature = st.slider("Temperature", 0.0, 1.2, 0.7, 0.1)
top_p = st.slider("Top‑p", 0.0, 1.0, 1.0, 0.05)
st.markdown("*All values remain local to your browser.*")
user_q = st.chat_input("Type your message…")
# ────────────────────────── LLM (lazy init) ─────────────────────────
MODEL_NAME = "deepseek-r1-distill-llama-70b"
def get_llm():
if "llm" not in st.session_state:
key = groq_key or os.getenv("GROQ_API_KEY")
if not key:
raise ValueError("Add your Groq key in the sidebar.")
os.environ["GROQ_API_KEY"] = key # for the client
st.session_state.llm = ChatGroq(
model = MODEL_NAME,
groq_api_key = key,
temperature = temperature,
top_p = top_p,
)
# refresh sampling params if the sliders changed
llm = st.session_state.llm
llm.temperature = temperature
llm.top_p = top_p
return llm
# ───────────────────────── conversation memory ──────────────────────
if "history" not in st.session_state:
st.session_state.history = [
SystemMessage(content="You are an advanced, helpful assistant.")
]
# ──────────────────────────── main loop ─────────────────────────────
if user_q:
st.session_state.history.append(HumanMessage(content=user_q))
try:
with st.chat_message("assistant", avatar="πŸ€–"):
with st.spinner("Thinking…"):
answer = get_llm().invoke(st.session_state.history).content
st.markdown(answer)
st.session_state.history.append(AIMessage(content=answer))
except Exception as err:
st.error(f"**Error:** {err}")
# ──────────────────────── display chat history ──────────────────────
for msg in st.session_state.history[1:]: # skip system message
role = "user" if isinstance(msg, HumanMessage) else "assistant"
with st.chat_message(role):
st.markdown(msg.content)