AiCoderv2 commited on
Commit
c79d862
·
verified ·
1 Parent(s): b849bd7

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +80 -67
app.py CHANGED
@@ -1,68 +1,93 @@
1
  import gradio as gr
2
- import openai
3
  import time
4
  import os
5
  from typing import List, Dict
 
6
 
7
  class ChatbotHandler:
8
  def __init__(self):
9
- self.client = None
10
- self.model = "gpt-4o-mini" # Using a cost-effective model
11
- self.max_tokens = 500
 
 
12
  self.temperature = 0.7
13
  self.system_prompt = """You are a helpful, friendly, and knowledgeable AI assistant.
14
  You provide clear, accurate, and thoughtful responses. You are engaging and try to be
15
  helpful while being honest about your limitations. Always maintain a positive and
16
  supportive tone in your conversations."""
17
 
18
- # Initialize with API key from environment variable
19
- api_key = os.getenv("OPENAI_API_KEY")
20
- if api_key:
21
- self.initialize_client(api_key)
22
 
23
- def initialize_client(self, api_key: str):
24
- """Initialize the OpenAI client with the provided API key."""
25
  try:
26
- self.client = openai.OpenAI(api_key=api_key)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  return True
28
  except Exception as e:
29
- print(f"Error initializing OpenAI client: {str(e)}")
30
  return False
31
 
32
  def get_response(self, message: str, history: List[Dict]) -> str:
33
  """Get response from the model."""
34
- if not self.client:
35
- return "OpenAI API key not configured. Please contact the administrator."
36
 
37
  try:
38
- # Prepare conversation history
39
- messages = [{"role": "system", "content": self.system_prompt}]
40
 
41
- # Add history (limit to last 10 messages to avoid context limit)
42
- for msg in history[-10:]:
43
  if msg["role"] == "user":
44
- messages.append({"role": "user", "content": msg["content"]})
45
  elif msg["role"] == "assistant":
46
- messages.append({"role": "assistant", "content": msg["content"]})
47
 
48
  # Add current message
49
- messages.append({"role": "user", "content": message})
50
 
51
- # Get response from OpenAI
52
- response = self.client.chat.completions.create(
53
- model=self.model,
54
- messages=messages,
55
- max_tokens=self.max_tokens,
56
- temperature=self.temperature,
57
- stream=True
58
  )
59
 
60
- # Stream the response
61
- full_response = ""
62
- for chunk in response:
63
- if chunk.choices[0].delta.content is not None:
64
- full_response += chunk.choices[0].delta.content
65
- yield full_response
 
 
 
 
 
 
 
66
 
67
  except Exception as e:
68
  yield f"Error generating response: {str(e)}"
@@ -75,8 +100,8 @@ def respond_stream(message: str, history: List[Dict]):
75
  if not message.strip():
76
  return "", history
77
 
78
- # Check if API is initialized
79
- if not chat_handler.client:
80
  return "", history + [{"role": "assistant", "content": "The chatbot is not properly configured. Please try again later."}]
81
 
82
  # Add user message
@@ -97,30 +122,29 @@ def clear_history():
97
  """Clear the chat history."""
98
  return []
99
 
100
- def update_model_settings(model, temp, tokens):
101
  """Update model settings."""
102
- chat_handler.model = model
103
  chat_handler.temperature = temp
104
- chat_handler.max_tokens = tokens
105
- return f"Settings updated: {model}, temp={temp}, max_tokens={tokens}"
106
 
107
  # Create the interface
108
- with gr.Blocks(theme=gr.themes.Soft(), title="AI Chatbot with OpenAI") as demo:
109
 
110
  # Header
111
  gr.HTML("""
112
  <div style='text-align: center; padding: 20px;'>
113
  <h1>🤖 AI Chatbot</h1>
114
- <p style='color: #666;'>Powered by OpenAI GPT-4o-mini • Built with <a href='https://huggingface.co/spaces/akhaliq/anycoder' target='_blank' style='color: #007bff; text-decoration: none;'>anycoder</a></p>
115
  </div>
116
  """)
117
 
118
  # Status indicator
119
- if chat_handler.client:
120
  status_msg = "✅ Chatbot is ready! Start chatting below."
121
  status_color = "#28a745"
122
  else:
123
- status_msg = "❌ Chatbot configuration error. Please check API key."
124
  status_color = "#dc3545"
125
 
126
  gr.HTML(f"""
@@ -132,27 +156,21 @@ with gr.Blocks(theme=gr.themes.Soft(), title="AI Chatbot with OpenAI") as demo:
132
  # Model settings
133
  with gr.Accordion("Settings", open=False):
134
  with gr.Row():
135
- model_choice = gr.Dropdown(
136
- choices=["gpt-4o-mini", "gpt-4o", "gpt-4-turbo", "gpt-4"],
137
- value="gpt-4o-mini",
138
- label="Model",
139
- info="Choose your preferred GPT model"
140
- )
141
  temperature = gr.Slider(
142
- minimum=0.0,
143
  maximum=2.0,
144
  value=0.7,
145
  step=0.1,
146
  label="Temperature",
147
  info="Higher values make responses more creative"
148
  )
149
- max_tokens = gr.Slider(
150
- minimum=100,
151
  maximum=2000,
152
- value=500,
153
- step=50,
154
- label="Max Tokens",
155
- info="Maximum length of response"
156
  )
157
 
158
  # Chatbot component
@@ -198,7 +216,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="AI Chatbot with OpenAI") as demo:
198
  # Footer
199
  gr.HTML("""
200
  <div style='text-align: center; padding: 10px; color: #888; font-size: 0.9em;'>
201
- <p>This chatbot uses OpenAI's GPT models. Usage may incur costs based on OpenAI's pricing.</p>
202
  </div>
203
  """)
204
 
@@ -220,25 +238,20 @@ with gr.Blocks(theme=gr.themes.Soft(), title="AI Chatbot with OpenAI") as demo:
220
  clear_btn.click(clear_history, outputs=chatbot)
221
 
222
  # Update model settings
223
- model_choice.change(
224
- update_model_settings,
225
- inputs=[model_choice, temperature, max_tokens],
226
- outputs=[]
227
- )
228
  temperature.change(
229
  update_model_settings,
230
- inputs=[model_choice, temperature, max_tokens],
231
  outputs=[]
232
  )
233
- max_tokens.change(
234
  update_model_settings,
235
- inputs=[model_choice, temperature, max_tokens],
236
  outputs=[]
237
  )
238
 
239
  # Refresh settings (useful for debugging)
240
  refresh_btn.click(
241
- lambda: f"Settings: {chat_handler.model}, temp={chat_handler.temperature}, max_tokens={chat_handler.max_tokens}",
242
  outputs=[]
243
  )
244
 
 
1
  import gradio as gr
 
2
  import time
3
  import os
4
  from typing import List, Dict
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
6
 
7
  class ChatbotHandler:
8
  def __init__(self):
9
+ self.model_name = "microsoft/DialoGPT-large" # Large conversational model
10
+ self.tokenizer = None
11
+ self.model = None
12
+ self.chat_pipeline = None
13
+ self.max_length = 1000
14
  self.temperature = 0.7
15
  self.system_prompt = """You are a helpful, friendly, and knowledgeable AI assistant.
16
  You provide clear, accurate, and thoughtful responses. You are engaging and try to be
17
  helpful while being honest about your limitations. Always maintain a positive and
18
  supportive tone in your conversations."""
19
 
20
+ # Initialize the model
21
+ self.initialize_model()
 
 
22
 
23
+ def initialize_model(self):
24
+ """Initialize the Hugging Face model."""
25
  try:
26
+ print("Loading model... This may take a few minutes.")
27
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
28
+ self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
29
+
30
+ # Set pad token if not present
31
+ if self.tokenizer.pad_token is None:
32
+ self.tokenizer.pad_token = self.tokenizer.eos_token
33
+
34
+ # Create pipeline for text generation
35
+ self.chat_pipeline = pipeline(
36
+ "text-generation",
37
+ model=self.model,
38
+ tokenizer=self.tokenizer,
39
+ device=0 if os.getenv("CUDA_VISIBLE_DEVICES") else -1, # Use GPU if available
40
+ max_length=self.max_length,
41
+ temperature=self.temperature,
42
+ do_sample=True,
43
+ pad_token_id=self.tokenizer.eos_token_id
44
+ )
45
+ print("Model loaded successfully!")
46
  return True
47
  except Exception as e:
48
+ print(f"Error loading model: {str(e)}")
49
  return False
50
 
51
  def get_response(self, message: str, history: List[Dict]) -> str:
52
  """Get response from the model."""
53
+ if not self.chat_pipeline:
54
+ return "Model not loaded. Please try again later."
55
 
56
  try:
57
+ # Prepare conversation history as a single string
58
+ conversation = self.system_prompt + "\n"
59
 
60
+ # Add recent history (limit to last 5 exchanges to avoid context limits)
61
+ for msg in history[-5:]:
62
  if msg["role"] == "user":
63
+ conversation += f"User: {msg['content']}\n"
64
  elif msg["role"] == "assistant":
65
+ conversation += f"Assistant: {msg['content']}\n"
66
 
67
  # Add current message
68
+ conversation += f"User: {message}\nAssistant:"
69
 
70
+ # Generate response
71
+ outputs = self.chat_pipeline(
72
+ conversation,
73
+ max_new_tokens=150, # Limit response length
74
+ num_return_sequences=1,
75
+ return_full_text=False
 
76
  )
77
 
78
+ response = outputs[0]['generated_text'].strip()
79
+
80
+ # Clean up response (remove any unwanted prefixes)
81
+ if response.startswith("Assistant:"):
82
+ response = response[10:].strip()
83
+
84
+ # Simulate streaming by yielding chunks
85
+ words = response.split()
86
+ current_response = ""
87
+ for word in words:
88
+ current_response += word + " "
89
+ yield current_response.strip()
90
+ time.sleep(0.05) # Small delay for streaming effect
91
 
92
  except Exception as e:
93
  yield f"Error generating response: {str(e)}"
 
100
  if not message.strip():
101
  return "", history
102
 
103
+ # Check if model is initialized
104
+ if not chat_handler.chat_pipeline:
105
  return "", history + [{"role": "assistant", "content": "The chatbot is not properly configured. Please try again later."}]
106
 
107
  # Add user message
 
122
  """Clear the chat history."""
123
  return []
124
 
125
+ def update_model_settings(temp, max_len):
126
  """Update model settings."""
 
127
  chat_handler.temperature = temp
128
+ chat_handler.max_length = max_len
129
+ return f"Settings updated: temp={temp}, max_length={max_len}"
130
 
131
  # Create the interface
132
+ with gr.Blocks(theme=gr.themes.Soft(), title="AI Chatbot with DialoGPT") as demo:
133
 
134
  # Header
135
  gr.HTML("""
136
  <div style='text-align: center; padding: 20px;'>
137
  <h1>🤖 AI Chatbot</h1>
138
+ <p style='color: #666;'>Powered by DialoGPT-Large • Built with <a href='https://huggingface.co/spaces/akhaliq/anycoder' target='_blank' style='color: #007bff; text-decoration: none;'>anycoder</a></p>
139
  </div>
140
  """)
141
 
142
  # Status indicator
143
+ if chat_handler.chat_pipeline:
144
  status_msg = "✅ Chatbot is ready! Start chatting below."
145
  status_color = "#28a745"
146
  else:
147
+ status_msg = "❌ Model loading error. Please refresh the page."
148
  status_color = "#dc3545"
149
 
150
  gr.HTML(f"""
 
156
  # Model settings
157
  with gr.Accordion("Settings", open=False):
158
  with gr.Row():
 
 
 
 
 
 
159
  temperature = gr.Slider(
160
+ minimum=0.1,
161
  maximum=2.0,
162
  value=0.7,
163
  step=0.1,
164
  label="Temperature",
165
  info="Higher values make responses more creative"
166
  )
167
+ max_length = gr.Slider(
168
+ minimum=500,
169
  maximum=2000,
170
+ value=1000,
171
+ step=100,
172
+ label="Max Length",
173
+ info="Maximum context length"
174
  )
175
 
176
  # Chatbot component
 
216
  # Footer
217
  gr.HTML("""
218
  <div style='text-align: center; padding: 10px; color: #888; font-size: 0.9em;'>
219
+ <p>This chatbot uses Microsoft's DialoGPT-Large model from Hugging Face. It's completely free to use!</p>
220
  </div>
221
  """)
222
 
 
238
  clear_btn.click(clear_history, outputs=chatbot)
239
 
240
  # Update model settings
 
 
 
 
 
241
  temperature.change(
242
  update_model_settings,
243
+ inputs=[temperature, max_length],
244
  outputs=[]
245
  )
246
+ max_length.change(
247
  update_model_settings,
248
+ inputs=[temperature, max_length],
249
  outputs=[]
250
  )
251
 
252
  # Refresh settings (useful for debugging)
253
  refresh_btn.click(
254
+ lambda: f"Settings: temp={chat_handler.temperature}, max_length={chat_handler.max_length}",
255
  outputs=[]
256
  )
257