Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from groq import Groq | |
| from langchain_groq import ChatGroq | |
| from langchain_core.prompts import ChatPromptTemplate | |
| from langchain_core.output_parsers import StrOutputParser | |
| import edge_tts | |
| import asyncio | |
| import os | |
| from typing import Optional | |
| GROQ_API_KEY = os.getenv('GROQ_API_KEY') | |
| class CodeAssistantBot: | |
| def __init__(self): | |
| self.client = Groq(api_key=GROQ_API_KEY) | |
| self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6) | |
| # Initialize prompts | |
| self.analysis_prompt = ChatPromptTemplate.from_messages([ | |
| ("system", | |
| """You are an expert code assistant. Analyze the code and context provided, | |
| then give clear, helpful responses. Keep responses concise and focused on the code.""" | |
| ), | |
| ("user", """Code: {code} | |
| Output: {output} | |
| Error: {error} | |
| Question: {question}""") | |
| ]) | |
| self.summary_prompt = ChatPromptTemplate.from_messages([( | |
| "system", | |
| """Summarize the conversation focusing on key technical points and insights. | |
| Keep it brief and clear.""" | |
| ), ("user", "Conversation: {conversation}")]) | |
| def analyze_code(self, code: str, output: str, error: str, | |
| question: str) -> str: | |
| try: | |
| parser = StrOutputParser() | |
| chain = self.analysis_prompt | self.model | parser | |
| return chain.invoke({ | |
| 'code': code, | |
| 'output': output, | |
| 'error': error, | |
| 'question': question | |
| }) | |
| except Exception as e: | |
| return f"Sorry, I encountered an error: {str(e)}" | |
| def summarize_conversation(self, conversation: list) -> str: | |
| try: | |
| parser = StrOutputParser() | |
| chain = self.summary_prompt | self.model | parser | |
| formatted_conv = "\n".join( | |
| [f"Q: {q}\nA: {a}" for q, a in conversation]) | |
| return chain.invoke({'conversation': formatted_conv}) | |
| except Exception as e: | |
| return f"Could not generate summary: {str(e)}" | |
| async def text_to_speech(text: str, filename: str): | |
| voice = "fr-FR-VivienneMultilingualNeural" | |
| communicate = edge_tts.Communicate(text, voice) | |
| await communicate.save(filename) | |
| def render_chatbot(code: str, output: str, error: str): | |
| # … your imports, CSS, bot init … | |
| # 1. Ensure session state keys exist | |
| st.session_state.setdefault("conversation", []) | |
| st.session_state.setdefault("audio_count", 0) | |
| # 2. **Input area first** | |
| col1, col2 = st.columns([4, 1]) | |
| with col1: | |
| # give this a unique key so it doesn't reset on rerun | |
| user_q = st.text_input("Ask your Question here", key="chat_input", placeholder="Type your question…") | |
| with col2: | |
| send = st.button("🚀") | |
| # 3. **Handle send** | |
| if send and user_q: | |
| bot = CodeAssistantBot() | |
| resp = bot.analyze_code(code, output, error, user_q) | |
| st.session_state.conversation.append((user_q, resp)) | |
| # optional: summary+TTS logic… | |
| # 4. **Now** render the scrollable chat container | |
| st.markdown('<div class="chat-container">', unsafe_allow_html=True) | |
| for q, a in st.session_state.conversation: | |
| st.markdown(f'<div class="chat-message user-message">You: {q}</div>', unsafe_allow_html=True) | |
| st.markdown(f'<div class="chat-message bot-message">Assistant: {a}</div>', unsafe_allow_html=True) | |
| st.markdown('</div>', unsafe_allow_html=True) | |
| # 5. Auto‑scroll script (as you had it) | |
| st.markdown(""" | |
| <script> | |
| const el = window.parent.document.querySelector('.chat-container'); | |
| if(el) el.scrollTop = el.scrollHeight; | |
| </script> | |
| """, unsafe_allow_html=True) | |