Wosqa commited on
Commit
48f2381
·
verified ·
1 Parent(s): fbac762

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -83
app.py CHANGED
@@ -1,84 +1,79 @@
1
- import gradio as gr
2
- import os
3
- import requests
4
-
5
- # Load GROQ API key from Hugging Face Secrets
6
- GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
7
- GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions"
8
- MODEL_NAME = "llama3-8b-8192"
9
-
10
- # System prompt defines chatbot personality
11
- SYSTEM_PROMPT = """
12
- You are an expert Programming Tutor.
13
- Explain programming concepts in a simple, beginner-friendly way.
14
- Provide examples when helpful and keep answers concise.
15
- """
16
-
17
- # Build messages safely
18
- def build_messages(message, chat_history):
19
- messages = [{"role": "system", "content": SYSTEM_PROMPT}]
20
- if chat_history is not None:
21
- for item in chat_history:
22
- if isinstance(item, tuple) and len(item) == 2:
23
- user_msg = str(item[0])
24
- bot_msg = str(item[1])
25
- messages.append({"role": "user", "content": user_msg})
26
- messages.append({"role": "assistant", "content": bot_msg})
27
- messages.append({"role": "user", "content": str(message)})
28
- return messages
29
-
30
- # Respond function for Gradio
31
- def respond(message, chat_history, temperature):
32
- # Ensure chat_history is list of tuples
33
- safe_history = []
34
- if chat_history is not None:
35
- for item in chat_history:
36
- if isinstance(item, tuple) and len(item) == 2:
37
- safe_history.append((str(item[0]), str(item[1])))
38
-
39
- messages = build_messages(message, safe_history)
40
-
41
- headers = {
42
- "Authorization": f"Bearer {GROQ_API_KEY}",
43
- "Content-Type": "application/json"
44
- }
45
-
46
- response = requests.post(
47
- GROQ_API_URL,
48
- headers=headers,
49
- json={
50
- "model": MODEL_NAME,
51
- "messages": messages,
52
- "temperature": temperature
53
- }
54
- )
55
-
56
- if response.status_code == 200:
57
- bot_reply = response.json()["choices"][0]["message"]["content"]
58
- else:
59
- bot_reply = f"Error {response.status_code}: {response.text}"
60
-
61
- safe_history.append((str(message), str(bot_reply)))
62
- return "", safe_history
63
-
64
- # Gradio UI
65
- with gr.Blocks() as demo:
66
- gr.Markdown("## 💻 Programming Tutor Chatbot (Powered by GROQ)")
67
-
68
- chatbot = gr.Chatbot()
69
- state = gr.State([])
70
-
71
- msg = gr.Textbox(label="Ask a programming question")
72
- temperature = gr.Slider(
73
- minimum=0.1,
74
- maximum=1.0,
75
- value=0.7,
76
- step=0.1,
77
- label="Response Creativity"
78
- )
79
- clear = gr.Button("Clear Chat")
80
-
81
- msg.submit(respond, [msg, state, temperature], [msg, chatbot])
82
- clear.click(lambda: ([], []), None, [chatbot, state])
83
-
84
  demo.launch()
 
1
+ import gradio as gr
2
+ import os
3
+ import requests
4
+
5
+ # Load API key from Hugging Face Secrets
6
+ GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
7
+ GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions"
8
+ MODEL_NAME = "llama3-8b-8192"
9
+
10
+ SYSTEM_PROMPT = """
11
+ You are an expert Programming Tutor.
12
+ Explain programming concepts in a simple, beginner-friendly way.
13
+ Provide examples when helpful and keep answers concise.
14
+ """
15
+
16
+ # Build messages in proper format
17
+ def build_messages(user_input, chat_history):
18
+ messages = [{"role": "system", "content": SYSTEM_PROMPT}]
19
+
20
+ # Add previous chat history safely
21
+ for item in chat_history or []:
22
+ if isinstance(item, tuple) and len(item) == 2:
23
+ user_msg = str(item[0])
24
+ bot_msg = str(item[1])
25
+ messages.append({"role": "user", "content": user_msg})
26
+ messages.append({"role": "assistant", "content": bot_msg})
27
+
28
+ # Add latest user input
29
+ messages.append({"role": "user", "content": str(user_input)})
30
+ return messages
31
+
32
+ # Call GROQ API
33
+ def query_groq(user_input, chat_history, temperature):
34
+ messages = build_messages(user_input, chat_history)
35
+
36
+ headers = {
37
+ "Authorization": f"Bearer {GROQ_API_KEY}",
38
+ "Content-Type": "application/json"
39
+ }
40
+
41
+ payload = {
42
+ "model": MODEL_NAME,
43
+ "messages": messages,
44
+ "temperature": temperature
45
+ }
46
+
47
+ response = requests.post(GROQ_API_URL, headers=headers, json=payload)
48
+
49
+ if response.status_code == 200:
50
+ return response.json()["choices"][0]["message"]["content"]
51
+ else:
52
+ return f"Error {response.status_code}: {response.text}"
53
+
54
+ # Gradio respond function
55
+ def respond(user_input, chat_history, temperature):
56
+ safe_history = []
57
+ for item in chat_history or []:
58
+ if isinstance(item, tuple) and len(item) == 2:
59
+ safe_history.append((str(item[0]), str(item[1])))
60
+
61
+ bot_reply = query_groq(user_input, safe_history, temperature)
62
+ safe_history.append((str(user_input), str(bot_reply)))
63
+ return "", safe_history
64
+
65
+ # Gradio UI
66
+ with gr.Blocks() as demo:
67
+ gr.Markdown("## 💻 Programming Tutor Chatbot (Powered by GROQ)")
68
+
69
+ chatbot = gr.Chatbot()
70
+ state = gr.State([])
71
+
72
+ msg = gr.Textbox(label="Ask a programming question")
73
+ temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Response Creativity")
74
+ clear = gr.Button("Clear Chat")
75
+
76
+ msg.submit(respond, [msg, state, temperature], [msg, chatbot])
77
+ clear.click(lambda: ([], []), None, [chatbot, state])
78
+
 
 
 
 
 
79
  demo.launch()