vsj0702 commited on
Commit
819c84b
·
verified ·
1 Parent(s): c153469

making chatbot.py beutiful

Browse files
Files changed (1) hide show
  1. chatbot.py +92 -39
chatbot.py CHANGED
@@ -6,12 +6,12 @@ from langchain_core.output_parsers import StrOutputParser
6
  import edge_tts
7
  import asyncio
8
  import os
9
- from typing import Optional
 
10
  GROQ_API_KEY = os.getenv('GROQ_API_KEY')
11
 
12
 
13
  class CodeAssistantBot:
14
-
15
  def __init__(self):
16
  self.client = Groq(api_key=GROQ_API_KEY)
17
  self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6)
@@ -20,22 +20,21 @@ class CodeAssistantBot:
20
  self.analysis_prompt = ChatPromptTemplate.from_messages([
21
  ("system",
22
  """You are an expert code assistant. Analyze the code and context provided,
23
- then give clear, helpful responses. Keep responses concise and focused on the code."""
24
- ),
25
  ("user", """Code: {code}
26
  Output: {output}
27
  Error: {error}
28
  Question: {question}""")
29
  ])
30
 
31
- self.summary_prompt = ChatPromptTemplate.from_messages([(
32
- "system",
33
- """Summarize the conversation focusing on key technical points and insights.
34
- Keep it brief and clear."""
35
- ), ("user", "Conversation: {conversation}")])
 
36
 
37
- def analyze_code(self, code: str, output: str, error: str,
38
- question: str) -> str:
39
  try:
40
  parser = StrOutputParser()
41
  chain = self.analysis_prompt | self.model | parser
@@ -52,8 +51,7 @@ class CodeAssistantBot:
52
  try:
53
  parser = StrOutputParser()
54
  chain = self.summary_prompt | self.model | parser
55
- formatted_conv = "\n".join(
56
- [f"Q: {q}\nA: {a}" for q, a in conversation])
57
  return chain.invoke({'conversation': formatted_conv})
58
  except Exception as e:
59
  return f"Could not generate summary: {str(e)}"
@@ -66,41 +64,96 @@ async def text_to_speech(text: str, filename: str):
66
 
67
 
68
  def render_chatbot(code: str, output: str, error: str):
69
- # your imports, CSS, bot init
70
-
71
- # 1. Ensure session state keys exist
72
- st.session_state.setdefault("conversation", [])
73
- st.session_state.setdefault("audio_count", 0)
74
-
75
- # 2. **Input area first**
76
- col1, col2 = st.columns([4, 1])
77
- with col1:
78
- # give this a unique key so it doesn't reset on rerun
79
- user_q = st.text_input("Ask your Question here", key="chat_input", placeholder="Type your question…")
80
- with col2:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  send = st.button("🚀")
82
 
83
- # 3. **Handle send**
84
- if send and user_q:
85
  bot = CodeAssistantBot()
86
- resp = bot.analyze_code(code, output, error, user_q)
87
- st.session_state.conversation.append((user_q, resp))
88
-
89
- # optional: summary+TTS logic…
90
 
91
- # 4. **Now** render the scrollable chat container
92
  st.markdown('<div class="chat-container">', unsafe_allow_html=True)
93
  for q, a in st.session_state.conversation:
94
- st.markdown(f'<div class="chat-message user-message">You: {q}</div>', unsafe_allow_html=True)
95
- st.markdown(f'<div class="chat-message bot-message">Assistant: {a}</div>', unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  st.markdown('</div>', unsafe_allow_html=True)
97
 
98
- # 5. Auto‑scroll script (as you had it)
99
  st.markdown("""
100
  <script>
101
- const el = window.parent.document.querySelector('.chat-container');
102
- if(el) el.scrollTop = el.scrollHeight;
103
  </script>
104
  """, unsafe_allow_html=True)
105
-
106
-
 
6
  import edge_tts
7
  import asyncio
8
  import os
9
+ from html import escape
10
+
11
  GROQ_API_KEY = os.getenv('GROQ_API_KEY')
12
 
13
 
14
  class CodeAssistantBot:
 
15
  def __init__(self):
16
  self.client = Groq(api_key=GROQ_API_KEY)
17
  self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6)
 
20
  self.analysis_prompt = ChatPromptTemplate.from_messages([
21
  ("system",
22
  """You are an expert code assistant. Analyze the code and context provided,
23
+ then give clear, helpful responses. Keep responses concise and focused on the code."""),
 
24
  ("user", """Code: {code}
25
  Output: {output}
26
  Error: {error}
27
  Question: {question}""")
28
  ])
29
 
30
+ self.summary_prompt = ChatPromptTemplate.from_messages([
31
+ ("system",
32
+ """Summarize the conversation focusing on key technical points and insights.
33
+ Keep it brief and clear."""),
34
+ ("user", "Conversation: {conversation}")
35
+ ])
36
 
37
+ def analyze_code(self, code: str, output: str, error: str, question: str) -> str:
 
38
  try:
39
  parser = StrOutputParser()
40
  chain = self.analysis_prompt | self.model | parser
 
51
  try:
52
  parser = StrOutputParser()
53
  chain = self.summary_prompt | self.model | parser
54
+ formatted_conv = "\n".join([f"Q: {q}\nA: {a}" for q, a in conversation])
 
55
  return chain.invoke({'conversation': formatted_conv})
56
  except Exception as e:
57
  return f"Could not generate summary: {str(e)}"
 
64
 
65
 
66
  def render_chatbot(code: str, output: str, error: str):
67
+ """Render the chatbot UI in a polished, scrollable panel with support for code blocks."""
68
+ # --- session state ---
69
+ st.session_state.setdefault('conversation', [])
70
+ st.session_state.setdefault('audio_count', 0)
71
+
72
+ # --- CSS styling ---
73
+ st.markdown("""
74
+ <style>
75
+ .chat-container {
76
+ display: flex;
77
+ flex-direction: column;
78
+ gap: 0.75rem;
79
+ max-height: 480px;
80
+ overflow-y: auto;
81
+ padding: 1rem;
82
+ border-radius: 8px;
83
+ background-color: inherit;
84
+ border: 1px solid;
85
+ animation: fadeIn 0.5s ease;
86
+ }
87
+ .chat-message {
88
+ max-width: 90%;
89
+ padding: 0.75rem 1rem;
90
+ border-radius: 12px;
91
+ position: relative;
92
+ animation: popIn 0.3s ease;
93
+ }
94
+ .user-message {
95
+ background: rgba(100, 149, 237, 0.2);
96
+ align-self: flex-end;
97
+ }
98
+ .bot-message {
99
+ background: rgba(200, 200, 200, 0.2);
100
+ align-self: flex-start;
101
+ }
102
+ .chat-message pre {
103
+ background: rgba(0,0,0,0.1);
104
+ padding: 0.5rem;
105
+ border-radius: 4px;
106
+ overflow-x: auto;
107
+ }
108
+ @keyframes fadeIn {
109
+ from { opacity: 0; } to { opacity: 1; }
110
+ }
111
+ @keyframes popIn {
112
+ from { transform: scale(0.95); opacity: 0; }
113
+ to { transform: scale(1); opacity: 1; }
114
+ }
115
+ </style>
116
+ """, unsafe_allow_html=True)
117
+
118
+ # --- input area ---
119
+ cols = st.columns([4, 1], gap='small')
120
+ with cols[0]:
121
+ question = st.text_input("Ask your question…", key="chat_input")
122
+ with cols[1]:
123
  send = st.button("🚀")
124
 
125
+ # --- handle send ---
126
+ if send and question:
127
  bot = CodeAssistantBot()
128
+ answer = bot.analyze_code(code, output, error, question)
129
+ st.session_state.conversation.append((question, answer))
130
+ # summary & tts can go here
 
131
 
132
+ # --- render chat ---
133
  st.markdown('<div class="chat-container">', unsafe_allow_html=True)
134
  for q, a in st.session_state.conversation:
135
+ # user bubble
136
+ html_q = f'<div class="chat-message user-message">{escape(q)}</div>'
137
+ st.markdown(html_q, unsafe_allow_html=True)
138
+ # bot bubble: convert code fences to <pre>
139
+ def render_bot(text):
140
+ parts = text.split('```')
141
+ html = ''
142
+ for i, part in enumerate(parts):
143
+ if i % 2 == 0:
144
+ html += escape(part)
145
+ else:
146
+ code_block = escape(part)
147
+ html += f'<pre><code>{code_block}</code></pre>'
148
+ return html
149
+ html_a = f'<div class="chat-message bot-message">{render_bot(a)}</div>'
150
+ st.markdown(html_a, unsafe_allow_html=True)
151
  st.markdown('</div>', unsafe_allow_html=True)
152
 
153
+ # --- auto-scroll ---
154
  st.markdown("""
155
  <script>
156
+ const c = window.parent.document.querySelector('.chat-container');
157
+ if(c) { c.scrollTop = c.scrollHeight; }
158
  </script>
159
  """, unsafe_allow_html=True)