AiCoderv2 commited on
Commit
4e4cfe7
·
verified ·
1 Parent(s): 4ce4cbc

Update Gradio app with multiple files

Browse files
Files changed (2) hide show
  1. README.md +0 -2
  2. app.py +11 -10
README.md CHANGED
@@ -7,8 +7,6 @@ sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
10
- tags:
11
- - anycoder
12
  ---
13
 
14
  # AI Chatbot with Hugging Face Model
 
7
  sdk_version: 4.44.0
8
  app_file: app.py
9
  pinned: false
 
 
10
  ---
11
 
12
  # AI Chatbot with Hugging Face Model
app.py CHANGED
@@ -5,28 +5,29 @@ import os
5
  # Load the conversational model with HF token support
6
  # Using DialoGPT-medium for a larger, more capable chatbot
7
  token = os.getenv('HF_TOKEN')
8
- chatbot_model = pipeline("conversational", model="microsoft/DialoGPT-medium", token=token)
9
 
10
  def chat(message, history):
11
  # Build conversation string from history
12
  conversation_text = ""
13
  for user_msg, bot_msg in history:
14
  if user_msg:
15
- conversation_text += f"User: {user_msg}\n"
16
  if bot_msg:
17
- conversation_text += f"Bot: {bot_msg}\n"
18
 
19
  # Add current user message
20
- conversation_text += f"User: {message}\nBot:"
21
 
22
  # Generate response
23
- result = chatbot_model(conversation_text)
24
 
25
- # Extract the response (assuming the pipeline returns a string or dict)
26
- if isinstance(result, list) and len(result) > 0:
27
- response = result[0]['generated_text'].split("Bot:")[-1].strip()
28
- else:
29
- response = str(result).split("Bot:")[-1].strip()
 
30
 
31
  return response
32
 
 
5
  # Load the conversational model with HF token support
6
  # Using DialoGPT-medium for a larger, more capable chatbot
7
  token = os.getenv('HF_TOKEN')
8
+ chatbot_model = pipeline("text-generation", model="microsoft/DialoGPT-medium", token=token)
9
 
10
  def chat(message, history):
11
  # Build conversation string from history
12
  conversation_text = ""
13
  for user_msg, bot_msg in history:
14
  if user_msg:
15
+ conversation_text += f"@@PADDING@@ {user_msg} @@PADDING@@ "
16
  if bot_msg:
17
+ conversation_text += f"{bot_msg} @@PADDING@@ "
18
 
19
  # Add current user message
20
+ conversation_text += f"@@PADDING@@ {message} @@PADDING@@ "
21
 
22
  # Generate response
23
+ result = chatbot_model(conversation_text, max_length=1000, num_return_sequences=1, temperature=0.8, do_sample=True, pad_token_id=50256)
24
 
25
+ # Extract the response
26
+ generated_text = result[0]['generated_text']
27
+
28
+ # Split by padding and take the last part as response
29
+ parts = generated_text.split("@@PADDING@@")
30
+ response = parts[-1].strip() if parts else generated_text.strip()
31
 
32
  return response
33