Talha812 commited on
Commit
a6c8df6
·
verified ·
1 Parent(s): 9ddd099

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -6
app.py CHANGED
@@ -2,19 +2,41 @@ import os
2
  import gradio as gr
3
  from groq import Groq
4
 
5
- # Load Groq API Key (HF Spaces uses environment variables)
6
  client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  def chat_with_groq(user_input, chat_history):
9
  if not user_input.strip():
10
  return chat_history, ""
11
 
 
 
12
  messages = []
13
  for u, a in chat_history:
14
- messages += [
15
- {"role": "user", "content": u},
16
- {"role": "assistant", "content": a},
17
- ]
18
 
19
  messages.append({"role": "user", "content": user_input})
20
 
@@ -23,6 +45,7 @@ def chat_with_groq(user_input, chat_history):
23
  model="llama-3.3-70b-versatile",
24
  messages=messages,
25
  )
 
26
  assistant_response = chat_completion.choices[0].message.content
27
  chat_history.append((user_input, assistant_response))
28
 
@@ -31,7 +54,7 @@ def chat_with_groq(user_input, chat_history):
31
 
32
  return chat_history, ""
33
 
34
- # UI Styling (Accessible + Responsive)
35
  custom_css = """
36
  .gradio-container {
37
  max-width: 1000px;
 
2
  import gradio as gr
3
  from groq import Groq
4
 
5
+ # Initialize Groq client
6
  client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
7
 
8
+ def normalize_chat_history(chat_history):
9
+ """
10
+ Converts chat_history into list of tuples (user, assistant)
11
+ Works with both tuple and dict formats returned by Gradio.
12
+ """
13
+ normalized = []
14
+ for item in chat_history:
15
+ if isinstance(item, dict):
16
+ # If it's a dict, convert accordingly
17
+ if item.get("role") == "user":
18
+ normalized.append((item.get("content"), ""))
19
+ elif item.get("role") == "assistant":
20
+ # merge assistant into last user entry
21
+ if normalized:
22
+ last_user, _ = normalized[-1]
23
+ normalized[-1] = (last_user, item.get("content"))
24
+ elif isinstance(item, (list, tuple)) and len(item) == 2:
25
+ normalized.append((item[0], item[1]))
26
+ return normalized
27
+
28
  def chat_with_groq(user_input, chat_history):
29
  if not user_input.strip():
30
  return chat_history, ""
31
 
32
+ chat_history = normalize_chat_history(chat_history)
33
+
34
  messages = []
35
  for u, a in chat_history:
36
+ if u:
37
+ messages.append({"role": "user", "content": u})
38
+ if a:
39
+ messages.append({"role": "assistant", "content": a})
40
 
41
  messages.append({"role": "user", "content": user_input})
42
 
 
45
  model="llama-3.3-70b-versatile",
46
  messages=messages,
47
  )
48
+
49
  assistant_response = chat_completion.choices[0].message.content
50
  chat_history.append((user_input, assistant_response))
51
 
 
54
 
55
  return chat_history, ""
56
 
57
+ # UI Styling
58
  custom_css = """
59
  .gradio-container {
60
  max-width: 1000px;