Spaces:
Build error
Build error
| # app.py β Streamlit + LangChain + Groq | |
| import os, asyncio, streamlit as st | |
| from dotenv import load_dotenv | |
| from langchain.schema import SystemMessage, HumanMessage, AIMessage | |
| from langchain_groq import ChatGroq | |
| # βββββββββββββββββββββββββ bootstrap eventβloop βββββββββββββββββββββ | |
| try: | |
| asyncio.get_running_loop() | |
| except RuntimeError: | |
| asyncio.set_event_loop(asyncio.new_event_loop()) | |
| if os.name == "nt": | |
| import asyncio as _asyncio | |
| asyncio.set_event_loop_policy(_asyncio.WindowsSelectorEventLoopPolicy()) | |
| # βββββββββββββββββββββββββββ UI / SETTINGS ββββββββββββββββββββββββ | |
| st.set_page_config("Groq Chatbot", "π€") | |
| st.title("π€ Groqβpowered Advanced Chatbot") | |
| st.caption("DeepSeekβR1βDistillβLlamaβ70B β’ LangChain β’ Streamlit") | |
| with st.sidebar: | |
| st.header("π Groq API Key") | |
| groq_key = st.text_input("Paste your key here", type="password") | |
| st.divider() | |
| temperature = st.slider("Temperature", 0.0, 1.2, 0.7, 0.1) | |
| top_p = st.slider("Topβp", 0.0, 1.0, 1.0, 0.05) | |
| st.markdown("*All values remain local to your browser.*") | |
| user_q = st.chat_input("Type your messageβ¦") | |
| # ββββββββββββββββββββββββββ LLM (lazy init) βββββββββββββββββββββββββ | |
| MODEL_NAME = "deepseek-r1-distill-llama-70b" | |
| def get_llm(): | |
| if "llm" not in st.session_state: | |
| key = groq_key or os.getenv("GROQ_API_KEY") | |
| if not key: | |
| raise ValueError("Add your Groq key in the sidebar.") | |
| os.environ["GROQ_API_KEY"] = key # for the client | |
| st.session_state.llm = ChatGroq( | |
| model = MODEL_NAME, | |
| groq_api_key = key, | |
| temperature = temperature, | |
| top_p = top_p, | |
| ) | |
| # refresh sampling params if the sliders changed | |
| llm = st.session_state.llm | |
| llm.temperature = temperature | |
| llm.top_p = top_p | |
| return llm | |
| # βββββββββββββββββββββββββ conversation memory ββββββββββββββββββββββ | |
| if "history" not in st.session_state: | |
| st.session_state.history = [ | |
| SystemMessage(content="You are an advanced, helpful assistant.") | |
| ] | |
| # ββββββββββββββββββββββββββββ main loop βββββββββββββββββββββββββββββ | |
| if user_q: | |
| st.session_state.history.append(HumanMessage(content=user_q)) | |
| try: | |
| with st.chat_message("assistant", avatar="π€"): | |
| with st.spinner("Thinkingβ¦"): | |
| answer = get_llm().invoke(st.session_state.history).content | |
| st.markdown(answer) | |
| st.session_state.history.append(AIMessage(content=answer)) | |
| except Exception as err: | |
| st.error(f"**Error:** {err}") | |
| # ββββββββββββββββββββββββ display chat history ββββββββββββββββββββββ | |
| for msg in st.session_state.history[1:]: # skip system message | |
| role = "user" if isinstance(msg, HumanMessage) else "assistant" | |
| with st.chat_message(role): | |
| st.markdown(msg.content) | |