import streamlit as st from groq import Groq from langchain_groq import ChatGroq from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from html import escape import edge_tts import asyncio import os GROQ_API_KEY = os.getenv('GROQ_API_KEY') class CodeAssistantBot: def __init__(self): self.client = Groq(api_key=GROQ_API_KEY) self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6) self.analysis_prompt = ChatPromptTemplate.from_messages([ ("system", "You are an expert code assistant. Keep responses concise."), ("user", "Code: {code}\nOutput: {output}\nError: {error}\nQuestion: {question}") ]) self.summary_prompt = ChatPromptTemplate.from_messages([ ("system", "Summarize key technical points."), ("user", "Conversation: {conversation}") ]) def analyze_code(self, code, output, error, question): try: parser = StrOutputParser() chain = self.analysis_prompt | self.model | parser return chain.invoke({ 'code': code, 'output': output, 'error': error, 'question': question }) except Exception as e: return f"Error: {e}" async def text_to_speech(text, filename): voice = "fr-FR-VivienneMultilingualNeural" await edge_tts.Communicate(text, voice).save(filename) def render_chatbot(code, output, error): """Render the chatbot UI with code-block support and no deprecated APIs.""" st.session_state.setdefault('conversation', []) st.session_state.setdefault('audio_count', 0) # Input row c1, c2 = st.columns([4,1], gap='small') with c1: question = st.text_input("Ask your question…", key="chat_input") with c2: send = st.button("🚀") # Handle send if send and question: bot = CodeAssistantBot() response = bot.analyze_code(code, output, error, question) st.session_state.conversation.append((question, response)) # Chat container st.markdown('
{code_html}'
else:
result += escape(part)
return result
formatted = format_response(a)
st.markdown(f'', unsafe_allow_html=True)
st.markdown('