Spaces:
Sleeping
Sleeping
adding to get input too
Browse files- chatbot.py +9 -7
chatbot.py
CHANGED
|
@@ -21,7 +21,7 @@ class CodeAssistantBot:
|
|
| 21 |
" Refer to previous summary and recent interactions to make answers accurate."
|
| 22 |
" Keep your response short, relevant, and conversational."),
|
| 23 |
("user",
|
| 24 |
-
"Code: {code}\nOutput: {output}\nError: {error}\n"
|
| 25 |
"Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
|
| 26 |
])
|
| 27 |
self.summary_prompt = ChatPromptTemplate.from_messages([
|
|
@@ -34,16 +34,17 @@ class CodeAssistantBot:
|
|
| 34 |
" explain it aloud like you're helping someone understand the topic clearly and confidently."
|
| 35 |
" Keep your response conversational and short not too long, but not over short."),
|
| 36 |
("user",
|
| 37 |
-
"Code: {code}\nOutput: {output}\nError: {error}\n"
|
| 38 |
"Conversation so far: {summary}\nAnswer to explain: {answer}")
|
| 39 |
])
|
| 40 |
|
| 41 |
-
def analyze_code(self, code, output, error, question, summary="", history=None):
|
| 42 |
parser = StrOutputParser()
|
| 43 |
recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
|
| 44 |
chain = self.analysis_prompt | self.model | parser
|
| 45 |
return chain.invoke({
|
| 46 |
'code': code,
|
|
|
|
| 47 |
'output': output,
|
| 48 |
'error': error,
|
| 49 |
'summary': summary,
|
|
@@ -51,11 +52,12 @@ class CodeAssistantBot:
|
|
| 51 |
'question': question
|
| 52 |
})
|
| 53 |
|
| 54 |
-
def narrate_response(self, code, output, error, answer, summary=""):
|
| 55 |
parser = StrOutputParser()
|
| 56 |
narration_chain = self.voice_prompt | self.model | parser
|
| 57 |
return narration_chain.invoke({
|
| 58 |
'code': code,
|
|
|
|
| 59 |
'output': output,
|
| 60 |
'error': error,
|
| 61 |
'summary': summary,
|
|
@@ -67,7 +69,7 @@ async def text_to_speech(text, filename):
|
|
| 67 |
communicate = edge_tts.Communicate(text, voice)
|
| 68 |
await communicate.save(filename)
|
| 69 |
|
| 70 |
-
def render_chatbot(code, output, error):
|
| 71 |
st.markdown("""
|
| 72 |
<style>
|
| 73 |
.chat-container {
|
|
@@ -112,7 +114,7 @@ def render_chatbot(code, output, error):
|
|
| 112 |
bot = CodeAssistantBot()
|
| 113 |
history = st.session_state.conversation[-4:]
|
| 114 |
summary = st.session_state.chat_summary
|
| 115 |
-
response = bot.analyze_code(code, output, error, question, summary, history)
|
| 116 |
st.session_state.conversation.append((question, response))
|
| 117 |
st.session_state.chat_display_count = 5
|
| 118 |
if len(st.session_state.conversation) >= 3:
|
|
@@ -155,7 +157,7 @@ def render_chatbot(code, output, error):
|
|
| 155 |
status_placeholder = st.empty()
|
| 156 |
status_placeholder.info("🧠 Generating narration...")
|
| 157 |
bot = CodeAssistantBot()
|
| 158 |
-
narration = bot.narrate_response(code, output, error, a, st.session_state.chat_summary)
|
| 159 |
status_placeholder.info("🎙️ Converting to audio...")
|
| 160 |
audio_file = f"audio_{uuid.uuid4().hex}.mp3"
|
| 161 |
asyncio.run(text_to_speech(narration, audio_file))
|
|
|
|
| 21 |
" Refer to previous summary and recent interactions to make answers accurate."
|
| 22 |
" Keep your response short, relevant, and conversational."),
|
| 23 |
("user",
|
| 24 |
+
"Code: {code}\nInput: {input}\nOutput: {output}\nError: {error}\n"
|
| 25 |
"Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
|
| 26 |
])
|
| 27 |
self.summary_prompt = ChatPromptTemplate.from_messages([
|
|
|
|
| 34 |
" explain it aloud like you're helping someone understand the topic clearly and confidently."
|
| 35 |
" Keep your response conversational and short not too long, but not over short."),
|
| 36 |
("user",
|
| 37 |
+
"Code: {code}\nInput: {input}\nOutput: {output}\nError: {error}\n"
|
| 38 |
"Conversation so far: {summary}\nAnswer to explain: {answer}")
|
| 39 |
])
|
| 40 |
|
| 41 |
+
def analyze_code(self, code, input, output, error, question, summary="", history=None):
|
| 42 |
parser = StrOutputParser()
|
| 43 |
recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
|
| 44 |
chain = self.analysis_prompt | self.model | parser
|
| 45 |
return chain.invoke({
|
| 46 |
'code': code,
|
| 47 |
+
'input': input,
|
| 48 |
'output': output,
|
| 49 |
'error': error,
|
| 50 |
'summary': summary,
|
|
|
|
| 52 |
'question': question
|
| 53 |
})
|
| 54 |
|
| 55 |
+
def narrate_response(self, code, input, output, error, answer, summary=""):
|
| 56 |
parser = StrOutputParser()
|
| 57 |
narration_chain = self.voice_prompt | self.model | parser
|
| 58 |
return narration_chain.invoke({
|
| 59 |
'code': code,
|
| 60 |
+
'input': input,
|
| 61 |
'output': output,
|
| 62 |
'error': error,
|
| 63 |
'summary': summary,
|
|
|
|
| 69 |
communicate = edge_tts.Communicate(text, voice)
|
| 70 |
await communicate.save(filename)
|
| 71 |
|
| 72 |
+
def render_chatbot(code, input, output, error):
|
| 73 |
st.markdown("""
|
| 74 |
<style>
|
| 75 |
.chat-container {
|
|
|
|
| 114 |
bot = CodeAssistantBot()
|
| 115 |
history = st.session_state.conversation[-4:]
|
| 116 |
summary = st.session_state.chat_summary
|
| 117 |
+
response = bot.analyze_code(code, input, output, error, question, summary, history)
|
| 118 |
st.session_state.conversation.append((question, response))
|
| 119 |
st.session_state.chat_display_count = 5
|
| 120 |
if len(st.session_state.conversation) >= 3:
|
|
|
|
| 157 |
status_placeholder = st.empty()
|
| 158 |
status_placeholder.info("🧠 Generating narration...")
|
| 159 |
bot = CodeAssistantBot()
|
| 160 |
+
narration = bot.narrate_response(code, input, output, error, a, st.session_state.chat_summary)
|
| 161 |
status_placeholder.info("🎙️ Converting to audio...")
|
| 162 |
audio_file = f"audio_{uuid.uuid4().hex}.mp3"
|
| 163 |
asyncio.run(text_to_speech(narration, audio_file))
|