vsj0702 commited on
Commit
3e4e649
·
verified ·
1 Parent(s): 4ef5001

reverting change

Browse files
Files changed (1) hide show
  1. chatbot.py +65 -68
chatbot.py CHANGED
@@ -1,69 +1,75 @@
1
  import streamlit as st
 
 
 
 
2
  from html import escape
3
  import edge_tts
4
  import asyncio
5
  import os
6
  import uuid
7
- from openai import OpenAI
8
 
9
- # Set your OpenRouter API key as env var OPENROUTER_API_KEY
10
- OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
11
 
12
  class CodeAssistantBot:
13
  def __init__(self):
14
- if not OPENROUTER_API_KEY:
15
- raise ValueError("Set your OPENROUTER_API_KEY environment variable!")
16
- self.client = OpenAI(
17
- base_url="https://openrouter.ai/api/v1",
18
- api_key=OPENROUTER_API_KEY,
19
- )
20
- self.model_name = "qwen/qwen-2.5-72b-instruct:free"
21
-
22
- def analyze_code(self, code, input_, output, error, question, summary="", history=None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
24
- prompt_text = (
25
- "You are a skilled coding assistant. Use the following context and user input to help.\n"
26
- "Refer to previous summary and recent interactions to make answers accurate.\n"
27
- "Keep your response short, relevant, and conversational.\n\n"
28
- f"Code: {code}\nInput: {input_}\nOutput: {output}\nError: {error}\n"
29
- f"Summary: {summary}\nRecent: {recent}\nQuestion: {question}"
30
- )
31
-
32
- response = self.client.chat.completions.create(
33
- model=self.model_name,
34
- messages=[
35
- {"role": "system", "content": "You are a skilled coding assistant."},
36
- {"role": "user", "content": prompt_text},
37
- ],
38
- temperature=0.6,
39
- )
40
- return response.choices[0].message.content
41
-
42
- def narrate_response(self, code, input_, output, error, answer, summary=""):
43
- prompt_text = (
44
- "You are a friendly narrator voice bot. Given a technical answer and its context, "
45
- "explain it aloud like you're helping someone understand the topic clearly and confidently. "
46
- "Keep your response conversational and short, not too long, but not too short.\n\n"
47
- f"Code: {code}\nInput: {input_}\nOutput: {output}\nError: {error}\n"
48
- f"Conversation so far: {summary}\nAnswer to explain: {answer}"
49
- )
50
-
51
- response = self.client.chat.completions.create(
52
- model=self.model_name,
53
- messages=[
54
- {"role": "system", "content": "You are a friendly narrator."},
55
- {"role": "user", "content": prompt_text},
56
- ],
57
- temperature=0.6,
58
- )
59
- return response.choices[0].message.content
60
 
61
  async def text_to_speech(text, filename):
62
  voice = "fr-FR-VivienneMultilingualNeural"
63
  communicate = edge_tts.Communicate(text, voice)
64
  await communicate.save(filename)
65
 
66
- def render_chatbot(code, input_, output, error):
67
  st.markdown("""
68
  <style>
69
  .chat-container {
@@ -108,26 +114,15 @@ def render_chatbot(code, input_, output, error):
108
  bot = CodeAssistantBot()
109
  history = st.session_state.conversation[-4:]
110
  summary = st.session_state.chat_summary
111
- response = bot.analyze_code(code, input_, output, error, question, summary, history)
112
  st.session_state.conversation.append((question, response))
113
  st.session_state.chat_display_count = 5
114
  if len(st.session_state.conversation) >= 3:
115
  try:
116
  full_chat = "\n".join([f"User: {q}\nBot: {a}" for q, a in st.session_state.conversation[-10:]])
117
- # For summary, you can just call analyze_code with a summarization prompt or implement separately
118
- summary_prompt = (
119
- "Summarize key technical points from the conversation so far.\n\n" + full_chat
120
- )
121
- summary_response = bot.client.chat.completions.create(
122
- model=bot.model_name,
123
- messages=[
124
- {"role": "system", "content": "Summarize key technical points from the conversation."},
125
- {"role": "user", "content": summary_prompt},
126
- ],
127
- temperature=0.3,
128
- )
129
- st.session_state.chat_summary = summary_response.choices[0].message.content
130
- except Exception:
131
  pass
132
 
133
  total = len(st.session_state.conversation)
@@ -138,7 +133,8 @@ def render_chatbot(code, input_, output, error):
138
  st.markdown(f'<div class="chat-message user-message">{escape(q)}</div>', unsafe_allow_html=True)
139
 
140
  def format_response(txt):
141
- parts = txt.split('```')
 
142
  result = ''
143
  for j, part in enumerate(parts):
144
  if j % 2 == 1:
@@ -154,6 +150,7 @@ def render_chatbot(code, input_, output, error):
154
  formatted = format_response(a)
155
  st.markdown(f'<div class="chat-message bot-message">{formatted}</div>', unsafe_allow_html=True)
156
 
 
157
  audio_file = st.session_state.narrated_audio.get((q, a))
158
 
159
  if not audio_file:
@@ -161,7 +158,7 @@ def render_chatbot(code, input_, output, error):
161
  status_placeholder = st.empty()
162
  status_placeholder.info("🧠 Generating narration...")
163
  bot = CodeAssistantBot()
164
- narration = bot.narrate_response(code, input_, output, error, a, st.session_state.chat_summary)
165
  status_placeholder.info("🎙️ Converting to audio...")
166
  audio_file = f"audio_{uuid.uuid4().hex}.mp3"
167
  asyncio.run(text_to_speech(narration, audio_file))
@@ -173,11 +170,11 @@ def render_chatbot(code, input_, output, error):
173
 
174
  if start > 0 and st.button("🔽 Show more"):
175
  st.session_state.chat_display_count += 5
176
- st.experimental_rerun()
177
 
178
  st.markdown("""
179
  <script>
180
  const c = window.parent.document.querySelector('.chat-container');
181
  if (c) c.scrollTop = c.scrollHeight;
182
  </script>
183
- """, unsafe_allow_html=True)
 
1
  import streamlit as st
2
+ from groq import Groq
3
+ from langchain_groq import ChatGroq
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ from langchain_core.output_parsers import StrOutputParser
6
  from html import escape
7
  import edge_tts
8
  import asyncio
9
  import os
10
  import uuid
 
11
 
12
+ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
 
13
 
14
  class CodeAssistantBot:
15
  def __init__(self):
16
+ self.client = Groq(api_key=GROQ_API_KEY)
17
+ self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6)
18
+ self.analysis_prompt = ChatPromptTemplate.from_messages([
19
+ ("system",
20
+ "You are a skilled coding assistant. Use the following context and user input to help."
21
+ " Refer to previous summary and recent interactions to make answers accurate."
22
+ " Keep your response short, relevant, and conversational."),
23
+ ("user",
24
+ "Code: {code}\nInput: {input}\nOutput: {output}\nError: {error}\n"
25
+ "Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
26
+ ])
27
+ self.summary_prompt = ChatPromptTemplate.from_messages([
28
+ ("system", "Summarize key technical points from the conversation so far."),
29
+ ("user", "Conversation: {conversation}")
30
+ ])
31
+ self.voice_prompt = ChatPromptTemplate.from_messages([
32
+ ("system",
33
+ "You are a friendly narrator voice bot. Given a technical answer and its context,"
34
+ " explain it aloud like you're helping someone understand the topic clearly and confidently."
35
+ " Keep your response conversational and short not too long, but not over short."),
36
+ ("user",
37
+ "Code: {code}\nInput: {input}\nOutput: {output}\nError: {error}\n"
38
+ "Conversation so far: {summary}\nAnswer to explain: {answer}")
39
+ ])
40
+
41
+ def analyze_code(self, code, input, output, error, question, summary="", history=None):
42
+ parser = StrOutputParser()
43
  recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
44
+ chain = self.analysis_prompt | self.model | parser
45
+ return chain.invoke({
46
+ 'code': code,
47
+ 'input': input,
48
+ 'output': output,
49
+ 'error': error,
50
+ 'summary': summary,
51
+ 'recent': recent,
52
+ 'question': question
53
+ })
54
+
55
+ def narrate_response(self, code, input, output, error, answer, summary=""):
56
+ parser = StrOutputParser()
57
+ narration_chain = self.voice_prompt | self.model | parser
58
+ return narration_chain.invoke({
59
+ 'code': code,
60
+ 'input': input,
61
+ 'output': output,
62
+ 'error': error,
63
+ 'summary': summary,
64
+ 'answer': answer
65
+ })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  async def text_to_speech(text, filename):
68
  voice = "fr-FR-VivienneMultilingualNeural"
69
  communicate = edge_tts.Communicate(text, voice)
70
  await communicate.save(filename)
71
 
72
+ def render_chatbot(code, input, output, error):
73
  st.markdown("""
74
  <style>
75
  .chat-container {
 
114
  bot = CodeAssistantBot()
115
  history = st.session_state.conversation[-4:]
116
  summary = st.session_state.chat_summary
117
+ response = bot.analyze_code(code, input, output, error, question, summary, history)
118
  st.session_state.conversation.append((question, response))
119
  st.session_state.chat_display_count = 5
120
  if len(st.session_state.conversation) >= 3:
121
  try:
122
  full_chat = "\n".join([f"User: {q}\nBot: {a}" for q, a in st.session_state.conversation[-10:]])
123
+ summarizer = bot.summary_prompt | bot.model | StrOutputParser()
124
+ st.session_state.chat_summary = summarizer.invoke({'conversation': full_chat})
125
+ except:
 
 
 
 
 
 
 
 
 
 
 
126
  pass
127
 
128
  total = len(st.session_state.conversation)
 
133
  st.markdown(f'<div class="chat-message user-message">{escape(q)}</div>', unsafe_allow_html=True)
134
 
135
  def format_response(txt):
136
+ parts = txt.split('
137
+ ')
138
  result = ''
139
  for j, part in enumerate(parts):
140
  if j % 2 == 1:
 
150
  formatted = format_response(a)
151
  st.markdown(f'<div class="chat-message bot-message">{formatted}</div>', unsafe_allow_html=True)
152
 
153
+ # Check if already narrated
154
  audio_file = st.session_state.narrated_audio.get((q, a))
155
 
156
  if not audio_file:
 
158
  status_placeholder = st.empty()
159
  status_placeholder.info("🧠 Generating narration...")
160
  bot = CodeAssistantBot()
161
+ narration = bot.narrate_response(code, input, output, error, a, st.session_state.chat_summary)
162
  status_placeholder.info("🎙️ Converting to audio...")
163
  audio_file = f"audio_{uuid.uuid4().hex}.mp3"
164
  asyncio.run(text_to_speech(narration, audio_file))
 
170
 
171
  if start > 0 and st.button("🔽 Show more"):
172
  st.session_state.chat_display_count += 5
173
+ st.rerun()
174
 
175
  st.markdown("""
176
  <script>
177
  const c = window.parent.document.querySelector('.chat-container');
178
  if (c) c.scrollTop = c.scrollHeight;
179
  </script>
180
+ """, unsafe_allow_html=True)