Spaces:
Running
Running
File size: 6,360 Bytes
5d21581 a1f96e4 5d21581 06db2bd 819c84b 4e847b8 5d21581 e21b20b b2bd09b e21b20b 4e847b8 e21b20b b2bd09b 5d21581 819c84b b2bd09b 819c84b 4e847b8 5d21581 b2bd09b 4e847b8 5d21581 a1f96e4 4e847b8 06db2bd 5d21581 474cc40 819c84b 474cc40 b2bd09b e21b20b 474cc40 06db2bd 474cc40 b2bd09b 474cc40 06db2bd b2bd09b e21b20b 4e847b8 b2bd09b 06db2bd b2bd09b 4e847b8 e21b20b 4e847b8 474cc40 4e847b8 474cc40 4e847b8 474cc40 b2bd09b 4e847b8 06db2bd 4e847b8 88377db 474cc40 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 | import streamlit as st
from groq import Groq
from langchain_groq import ChatGroq
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from html import escape
import edge_tts
import asyncio
import os
import uuid
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
class CodeAssistantBot:
def __init__(self):
self.client = Groq(api_key=GROQ_API_KEY)
self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6)
self.analysis_prompt = ChatPromptTemplate.from_messages([
("system",
"You are a skilled coding assistant. Use the following context and user input to help."
" Refer to previous summary and recent interactions to make answers accurate."
" Keep your response short, relevant, and conversational."),
("user",
"Code: {code}\nOutput: {output}\nError: {error}\n"
"Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
])
self.summary_prompt = ChatPromptTemplate.from_messages([
("system", "Summarize key technical points from the conversation so far."),
("user", "Conversation: {conversation}")
])
self.voice_prompt = ChatPromptTemplate.from_messages([
("system",
"You are a friendly narrator voice bot. Given a technical answer and its context,"
" explain it aloud like you're helping someone understand the topic clearly and confidently."
" Keep it spoken, not codey. Emphasize pauses, clarity, and voice modulation."),
("user",
"Code: {code}\nOutput: {output}\nError: {error}\n"
"Conversation so far: {summary}\nAnswer to explain: {answer}")
])
def analyze_code(self, code, output, error, question, summary="", history=None):
parser = StrOutputParser()
recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
chain = self.analysis_prompt | self.model | parser
return chain.invoke({
'code': code,
'output': output,
'error': error,
'summary': summary,
'recent': recent,
'question': question
})
def narrate_response(self, code, output, error, answer, summary=""):
parser = StrOutputParser()
narration_chain = self.voice_prompt | self.model | parser
return narration_chain.invoke({
'code': code,
'output': output,
'error': error,
'summary': summary,
'answer': answer
})
async def text_to_speech(text, filename):
voice = "fr-FR-VivienneMultilingualNeural"
communicate = edge_tts.Communicate(text, voice)
await communicate.save(filename)
def render_chatbot(code, output, error):
st.markdown("""
<style>
.chat-container {
max-height: 60vh;
overflow-y: auto;
padding-right: 0.5rem;
border: 1px solid #ddd;
border-radius: 8px;
margin-top: 1rem;
padding: 1rem;
background-color: #f9f9f9;
}
.chat-message {
margin-bottom: 1rem;
word-wrap: break-word;
}
.user-message {
font-weight: bold;
color: #1a73e8;
}
.bot-message pre {
background-color: #f0f0f0;
padding: 0.5rem;
border-radius: 5px;
overflow-x: auto;
}
</style>
""", unsafe_allow_html=True)
st.session_state.setdefault('conversation', [])
st.session_state.setdefault('chat_summary', "")
st.session_state.setdefault('chat_display_count', 5)
c1, c2 = st.columns([4, 1], gap='small')
with c1:
question = st.text_input("Ask something about your code...", key="chat_input")
with c2:
send = st.button("π")
if send and question:
bot = CodeAssistantBot()
history = st.session_state.conversation[-4:]
summary = st.session_state.chat_summary
response = bot.analyze_code(code, output, error, question, summary, history)
st.session_state.conversation.append((question, response))
st.session_state.chat_display_count = 5
if len(st.session_state.conversation) >= 3:
try:
full_chat = "\n".join([f"User: {q}\nBot: {a}" for q, a in st.session_state.conversation[-10:]])
summarizer = bot.summary_prompt | bot.model | StrOutputParser()
st.session_state.chat_summary = summarizer.invoke({'conversation': full_chat})
except:
pass
total = len(st.session_state.conversation)
start = max(0, total - st.session_state.chat_display_count)
visible = st.session_state.conversation[start:]
for i, (q, a) in enumerate(reversed(visible)):
st.markdown(f'<div class="chat-message user-message">{escape(q)}</div>', unsafe_allow_html=True)
def format_response(txt):
parts = txt.split('```')
result = ''
for j, part in enumerate(parts):
if j % 2 == 1:
lines = part.splitlines()
if lines and lines[0].isalpha():
lines = lines[1:]
code_html = escape("\n".join(lines))
result += f'<pre><code>{code_html}</code></pre>'
else:
result += escape(part)
return result
formatted = format_response(a)
st.markdown(f'<div class="chat-message bot-message">{formatted}</div>', unsafe_allow_html=True)
# π Speak
speak_btn = st.button(f"π Narrate #{i+1}")
if speak_btn:
bot = CodeAssistantBot()
narration = bot.narrate_response(code, output, error, a, st.session_state.chat_summary)
audio_filename = f"audio_{uuid.uuid4().hex}.mp3"
asyncio.run(text_to_speech(narration, audio_filename))
st.audio(audio_filename, format="audio/mp3", autoplay=True)
if start > 0 and st.button("π½ Show more"):
st.session_state.chat_display_count += 5
st.rerun()
st.markdown("""
<script>
const c = window.parent.document.querySelector('.chat-container');
if (c) c.scrollTop = c.scrollHeight;
</script>
""", unsafe_allow_html=True)
|