Ansa12 commited on
Commit
8bb6d2c
·
verified ·
1 Parent(s): bf7330c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +195 -0
app.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from groq import Groq
3
+
4
+ # Initialize Groq client with your API key
5
+ client = Groq(api_key="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")
6
+
7
+ def chat_with_groq(message, history, model_choice, temperature, max_tokens):
8
+ """
9
+ Function to interact with Groq API
10
+ """
11
+ try:
12
+ # Prepare conversation history in the format Groq expects
13
+ messages = []
14
+
15
+ # Add conversation history
16
+ for human, assistant in history:
17
+ messages.append({"role": "user", "content": human})
18
+ messages.append({"role": "assistant", "content": assistant})
19
+
20
+ # Add current message
21
+ messages.append({"role": "user", "content": message})
22
+
23
+ # Create chat completion
24
+ chat_completion = client.chat.completions.create(
25
+ messages=messages,
26
+ model=model_choice,
27
+ temperature=temperature,
28
+ max_tokens=max_tokens,
29
+ top_p=1,
30
+ stream=False
31
+ )
32
+
33
+ # Get the response
34
+ response = chat_completion.choices[0].message.content
35
+
36
+ return response
37
+
38
+ except Exception as e:
39
+ return f"Error: {str(e)}"
40
+
41
+ # Available Groq models
42
+ available_models = [
43
+ "llama-3.1-8b-instant",
44
+ "llama-3.1-70b-versatile",
45
+ "mixtral-8x7b-32768",
46
+ "gemma2-9b-it"
47
+ ]
48
+
49
+ def predict(message, chat_history, model_choice, temperature, max_tokens):
50
+ """
51
+ Predict function for Gradio ChatInterface
52
+ """
53
+ if not message.strip():
54
+ return chat_history
55
+
56
+ # Get response from Groq
57
+ bot_response = chat_with_groq(message, chat_history, model_choice, temperature, max_tokens)
58
+
59
+ # Add to chat history
60
+ chat_history.append((message, bot_response))
61
+
62
+ return chat_history
63
+
64
+ # Custom CSS for better styling
65
+ custom_css = """
66
+ #chatbot {
67
+ min-height: 500px;
68
+ }
69
+ .container {
70
+ max-width: 1200px;
71
+ margin: auto;
72
+ }
73
+ """
74
+
75
+ with gr.Blocks(
76
+ theme=gr.themes.Soft(),
77
+ title="Groq AI Chatbot",
78
+ css=custom_css
79
+ ) as demo:
80
+
81
+ gr.Markdown(
82
+ """
83
+ # 🤖 Groq AI Chatbot
84
+ Fast AI-powered chatbot powered by Groq API
85
+
86
+ **Note**: This chatbot uses Groq's inference API for fast responses.
87
+ """
88
+ )
89
+
90
+ # Store chat history
91
+ chatbot = gr.Chatbot(
92
+ label="Conversation",
93
+ height=500,
94
+ show_copy_button=True,
95
+ elem_id="chatbot"
96
+ )
97
+
98
+ with gr.Row():
99
+ msg = gr.Textbox(
100
+ label="Your Message",
101
+ placeholder="Type your message here and press Enter...",
102
+ lines=2,
103
+ scale=4,
104
+ container=False
105
+ )
106
+
107
+ with gr.Row():
108
+ send_btn = gr.Button("Send 🚀", variant="primary", size="lg")
109
+ clear_btn = gr.Button("Clear Chat 🗑️", variant="secondary")
110
+
111
+ with gr.Accordion("⚙️ Model Settings", open=False):
112
+ with gr.Row():
113
+ model_choice = gr.Dropdown(
114
+ choices=available_models,
115
+ value="llama-3.1-8b-instant",
116
+ label="Select Model",
117
+ info="Choose which AI model to use"
118
+ )
119
+ with gr.Row():
120
+ temperature = gr.Slider(
121
+ minimum=0.1,
122
+ maximum=1.0,
123
+ value=0.7,
124
+ step=0.1,
125
+ label="Temperature",
126
+ info="Controls creativity: Lower = more deterministic, Higher = more creative"
127
+ )
128
+ max_tokens = gr.Slider(
129
+ minimum=100,
130
+ maximum=4096,
131
+ value=1024,
132
+ step=100,
133
+ label="Max Tokens",
134
+ info="Maximum length of the response"
135
+ )
136
+
137
+ # Function to handle user message
138
+ def user_message(user_msg, history):
139
+ return "", history + [[user_msg, None]]
140
+
141
+ # Function to handle bot response
142
+ def bot_message(history, model, temp, tokens):
143
+ user_msg = history[-1][0]
144
+ bot_msg = chat_with_groq(user_msg, history[:-1], model, temp, tokens)
145
+ history[-1][1] = bot_msg
146
+ return history
147
+
148
+ # Event handlers
149
+ msg.submit(
150
+ user_message,
151
+ inputs=[msg, chatbot],
152
+ outputs=[msg, chatbot]
153
+ ).then(
154
+ bot_message,
155
+ inputs=[chatbot, model_choice, temperature, max_tokens],
156
+ outputs=[chatbot]
157
+ )
158
+
159
+ send_btn.click(
160
+ user_message,
161
+ inputs=[msg, chatbot],
162
+ outputs=[msg, chatbot]
163
+ ).then(
164
+ bot_message,
165
+ inputs=[chatbot, model_choice, temperature, max_tokens],
166
+ outputs=[chatbot]
167
+ )
168
+
169
+ clear_btn.click(
170
+ lambda: None,
171
+ outputs=[chatbot],
172
+ queue=False
173
+ ).then(
174
+ lambda: [],
175
+ None,
176
+ [chatbot]
177
+ )
178
+
179
+ gr.Markdown(
180
+ """
181
+ ### 💡 About the Models:
182
+ - **Llama 3.1 8B Instant**: Fast and efficient for general conversations
183
+ - **Llama 3.1 70B Versatile**: More capable for complex tasks
184
+ - **Mixtral 8x7B**: Excellent for coding and reasoning tasks
185
+ - **Gemma2 9B**: Balanced performance across various tasks
186
+
187
+ ### ⚠️ Important:
188
+ - Your conversations are processed through Groq's API
189
+ - The API key is embedded in this application
190
+ - For personal use only
191
+ """
192
+ )
193
+
194
+ if __name__ == "__main__":
195
+ demo.launch(share=True, debug=True)