vsj0702 commited on
Commit
e72507a
·
verified ·
1 Parent(s): e27a97d

Replacing groq with open roter

Browse files
Files changed (1) hide show
  1. chatbot.py +36 -15
chatbot.py CHANGED
@@ -1,20 +1,22 @@
1
  import streamlit as st
2
- from groq import Groq
3
- from langchain_groq import ChatGroq
4
  from langchain_core.prompts import ChatPromptTemplate
5
- from langchain_core.output_parsers import StrOutputParser
6
  from html import escape
7
  import edge_tts
8
  import asyncio
9
  import os
10
  import uuid
11
 
12
- GROQ_API_KEY = os.getenv("GROQ_API_KEY")
 
13
 
14
  class CodeAssistantBot:
15
  def __init__(self):
16
- self.client = Groq(api_key=GROQ_API_KEY)
17
- self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6)
 
 
 
18
  self.analysis_prompt = ChatPromptTemplate.from_messages([
19
  ("system",
20
  "You are a skilled coding assistant. Use the following context and user input to help."
@@ -24,10 +26,12 @@ class CodeAssistantBot:
24
  "Code: {code}\nInput: {input}\nOutput: {output}\nError: {error}\n"
25
  "Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
26
  ])
 
27
  self.summary_prompt = ChatPromptTemplate.from_messages([
28
  ("system", "Summarize key technical points from the conversation so far."),
29
  ("user", "Conversation: {conversation}")
30
  ])
 
31
  self.voice_prompt = ChatPromptTemplate.from_messages([
32
  ("system",
33
  "You are a friendly narrator voice bot. Given a technical answer and its context,"
@@ -39,10 +43,8 @@ class CodeAssistantBot:
39
  ])
40
 
41
  def analyze_code(self, code, input, output, error, question, summary="", history=None):
42
- parser = StrOutputParser()
43
  recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
44
- chain = self.analysis_prompt | self.model | parser
45
- return chain.invoke({
46
  'code': code,
47
  'input': input,
48
  'output': output,
@@ -51,11 +53,14 @@ class CodeAssistantBot:
51
  'recent': recent,
52
  'question': question
53
  })
 
 
 
 
 
54
 
55
  def narrate_response(self, code, input, output, error, answer, summary=""):
56
- parser = StrOutputParser()
57
- narration_chain = self.voice_prompt | self.model | parser
58
- return narration_chain.invoke({
59
  'code': code,
60
  'input': input,
61
  'output': output,
@@ -63,12 +68,29 @@ class CodeAssistantBot:
63
  'summary': summary,
64
  'answer': answer
65
  })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  async def text_to_speech(text, filename):
68
  voice = "fr-FR-VivienneMultilingualNeural"
69
  communicate = edge_tts.Communicate(text, voice)
70
  await communicate.save(filename)
71
 
 
72
  def render_chatbot(code, input, output, error):
73
  st.markdown("""
74
  <style>
@@ -117,11 +139,11 @@ def render_chatbot(code, input, output, error):
117
  response = bot.analyze_code(code, input, output, error, question, summary, history)
118
  st.session_state.conversation.append((question, response))
119
  st.session_state.chat_display_count = 5
 
120
  if len(st.session_state.conversation) >= 3:
121
  try:
122
  full_chat = "\n".join([f"User: {q}\nBot: {a}" for q, a in st.session_state.conversation[-10:]])
123
- summarizer = bot.summary_prompt | bot.model | StrOutputParser()
124
- st.session_state.chat_summary = summarizer.invoke({'conversation': full_chat})
125
  except:
126
  pass
127
 
@@ -149,7 +171,6 @@ def render_chatbot(code, input, output, error):
149
  formatted = format_response(a)
150
  st.markdown(f'<div class="chat-message bot-message">{formatted}</div>', unsafe_allow_html=True)
151
 
152
- # Check if already narrated
153
  audio_file = st.session_state.narrated_audio.get((q, a))
154
 
155
  if not audio_file:
 
1
  import streamlit as st
2
+ from openai import OpenAI
 
3
  from langchain_core.prompts import ChatPromptTemplate
 
4
  from html import escape
5
  import edge_tts
6
  import asyncio
7
  import os
8
  import uuid
9
 
10
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
11
+
12
 
13
  class CodeAssistantBot:
14
  def __init__(self):
15
+ self.client = OpenAI(
16
+ base_url="https://openrouter.ai/api/v1",
17
+ api_key=OPENROUTER_API_KEY
18
+ )
19
+
20
  self.analysis_prompt = ChatPromptTemplate.from_messages([
21
  ("system",
22
  "You are a skilled coding assistant. Use the following context and user input to help."
 
26
  "Code: {code}\nInput: {input}\nOutput: {output}\nError: {error}\n"
27
  "Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
28
  ])
29
+
30
  self.summary_prompt = ChatPromptTemplate.from_messages([
31
  ("system", "Summarize key technical points from the conversation so far."),
32
  ("user", "Conversation: {conversation}")
33
  ])
34
+
35
  self.voice_prompt = ChatPromptTemplate.from_messages([
36
  ("system",
37
  "You are a friendly narrator voice bot. Given a technical answer and its context,"
 
43
  ])
44
 
45
  def analyze_code(self, code, input, output, error, question, summary="", history=None):
 
46
  recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
47
+ prompt = self.analysis_prompt.format_messages({
 
48
  'code': code,
49
  'input': input,
50
  'output': output,
 
53
  'recent': recent,
54
  'question': question
55
  })
56
+ completion = self.client.chat.completions.create(
57
+ model="qwen/qwen3-coder:free",
58
+ messages=[m.dict() for m in prompt]
59
+ )
60
+ return completion.choices[0].message.content.strip()
61
 
62
  def narrate_response(self, code, input, output, error, answer, summary=""):
63
+ prompt = self.voice_prompt.format_messages({
 
 
64
  'code': code,
65
  'input': input,
66
  'output': output,
 
68
  'summary': summary,
69
  'answer': answer
70
  })
71
+ completion = self.client.chat.completions.create(
72
+ model="qwen/qwen3-coder:free",
73
+ messages=[m.dict() for m in prompt]
74
+ )
75
+ return completion.choices[0].message.content.strip()
76
+
77
+ def summarize_conversation(self, conversation):
78
+ prompt = self.summary_prompt.format_messages({
79
+ 'conversation': conversation
80
+ })
81
+ completion = self.client.chat.completions.create(
82
+ model="qwen/qwen3-coder:free",
83
+ messages=[m.dict() for m in prompt]
84
+ )
85
+ return completion.choices[0].message.content.strip()
86
+
87
 
88
  async def text_to_speech(text, filename):
89
  voice = "fr-FR-VivienneMultilingualNeural"
90
  communicate = edge_tts.Communicate(text, voice)
91
  await communicate.save(filename)
92
 
93
+
94
  def render_chatbot(code, input, output, error):
95
  st.markdown("""
96
  <style>
 
139
  response = bot.analyze_code(code, input, output, error, question, summary, history)
140
  st.session_state.conversation.append((question, response))
141
  st.session_state.chat_display_count = 5
142
+
143
  if len(st.session_state.conversation) >= 3:
144
  try:
145
  full_chat = "\n".join([f"User: {q}\nBot: {a}" for q, a in st.session_state.conversation[-10:]])
146
+ st.session_state.chat_summary = bot.summarize_conversation(full_chat)
 
147
  except:
148
  pass
149
 
 
171
  formatted = format_response(a)
172
  st.markdown(f'<div class="chat-message bot-message">{formatted}</div>', unsafe_allow_html=True)
173
 
 
174
  audio_file = st.session_state.narrated_audio.get((q, a))
175
 
176
  if not audio_file: