Spaces:
Sleeping
Sleeping
Commit Β·
5e98df0
1
Parent(s): 42a68b0
Add version tracking (v2.0.0) and improve error handling for better user experience
Browse files- CONVERSATION_IMPROVEMENTS.md +78 -0
- app.py +9 -2
- chat_interface.py +6 -2
CONVERSATION_IMPROVEMENTS.md
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# π Conversation Quality Improvements
|
| 2 |
+
|
| 3 |
+
## π Summary of Enhancements
|
| 4 |
+
|
| 5 |
+
### 1. **Better AI Model** π€
|
| 6 |
+
- **Upgraded from**: `DialoGPT-small`
|
| 7 |
+
- **Upgraded to**: `DialoGPT-medium`
|
| 8 |
+
- **Benefit**: More sophisticated responses, better understanding, improved conversational flow
|
| 9 |
+
|
| 10 |
+
### 2. **Enhanced Response Generation** π―
|
| 11 |
+
- **Temperature**: Increased to 0.8 (more creative and varied responses)
|
| 12 |
+
- **Top-p**: Increased to 0.95 (more diverse token selection)
|
| 13 |
+
- **Top-k**: Added 50 (limits to top 50 tokens for quality)
|
| 14 |
+
- **Repetition Penalty**: Reduced to 1.1 (more natural flow)
|
| 15 |
+
- **No Repeat N-grams**: Added size 3 (avoids repeating phrases)
|
| 16 |
+
- **Truncation**: Enabled (prevents errors)
|
| 17 |
+
|
| 18 |
+
### 3. **Improved Memory Context** πΎ
|
| 19 |
+
- **Memory Retrieval**: Increased from 3 to 5 memories
|
| 20 |
+
- **Context Display**: Better formatting with numbered memories
|
| 21 |
+
- **Context Integration**: More natural integration into conversation
|
| 22 |
+
- **Memory Extraction**: Enhanced extraction with actual details
|
| 23 |
+
|
| 24 |
+
### 4. **Natural Conversation Prompts** π¬
|
| 25 |
+
- **Prompt Format**: More natural "The user says..." format
|
| 26 |
+
- **Instructions**: Clear guidelines for helpful, conversational responses
|
| 27 |
+
- **Memory Integration**: Better integration of relevant memories
|
| 28 |
+
|
| 29 |
+
### 5. **Enhanced Memory Recording** π
|
| 30 |
+
- **Name Extraction**: Extracts actual names from "my name is"
|
| 31 |
+
- **Location Extraction**: Extracts actual locations from "i live in"
|
| 32 |
+
- **Work/Study Details**: Extracts specific workplaces and fields of study
|
| 33 |
+
- **Birthday Extraction**: Extracts actual birthday details
|
| 34 |
+
- **Favorite Things**: Better extraction of favorite items/activities
|
| 35 |
+
|
| 36 |
+
## π― Expected Improvements
|
| 37 |
+
|
| 38 |
+
### **Conversation Quality**
|
| 39 |
+
- β
More natural and engaging responses
|
| 40 |
+
- β
Better understanding of context
|
| 41 |
+
- β
Reduced repetition and more varied responses
|
| 42 |
+
- β
More creative and interesting conversations
|
| 43 |
+
|
| 44 |
+
### **Memory System**
|
| 45 |
+
- β
More detailed memory storage
|
| 46 |
+
- β
Better memory retrieval and context
|
| 47 |
+
- β
More accurate personal information extraction
|
| 48 |
+
- β
Enhanced conversation history integration
|
| 49 |
+
|
| 50 |
+
### **User Experience**
|
| 51 |
+
- β
More human-like AI responses
|
| 52 |
+
- β
Better memory recall and usage
|
| 53 |
+
- β
Improved conversational flow
|
| 54 |
+
- β
More personalized interactions
|
| 55 |
+
|
| 56 |
+
## π§ͺ Testing Your Improvements
|
| 57 |
+
|
| 58 |
+
### **Test Conversation Quality**
|
| 59 |
+
1. Ask complex questions
|
| 60 |
+
2. Have multi-turn conversations
|
| 61 |
+
3. Notice response variety and creativity
|
| 62 |
+
4. Check for natural flow between responses
|
| 63 |
+
|
| 64 |
+
### **Test Memory System**
|
| 65 |
+
1. Share personal details: "My name is John"
|
| 66 |
+
2. Share preferences: "I love pizza and hate mushrooms"
|
| 67 |
+
3. Share locations: "I live in New York"
|
| 68 |
+
4. Ask about previously shared information
|
| 69 |
+
|
| 70 |
+
### **Compare Before/After**
|
| 71 |
+
- **Before**: Basic, repetitive responses
|
| 72 |
+
- **After**: Engaging, varied, context-aware responses
|
| 73 |
+
|
| 74 |
+
## π Your Space is Now Live!
|
| 75 |
+
|
| 76 |
+
**URL**: https://emolivera-memorychat.hf.space
|
| 77 |
+
|
| 78 |
+
The improvements are now deployed and active. Your Memory Chat application should provide significantly better conversation quality and more engaging interactions!
|
app.py
CHANGED
|
@@ -7,6 +7,9 @@ from rich.console import Console
|
|
| 7 |
|
| 8 |
console = Console()
|
| 9 |
|
|
|
|
|
|
|
|
|
|
| 10 |
class MemoryChatApp:
|
| 11 |
"""Main application that combines memory management with Hugging Face chat."""
|
| 12 |
|
|
@@ -115,7 +118,7 @@ class MemoryChatApp:
|
|
| 115 |
"""
|
| 116 |
# Check if model is available
|
| 117 |
if not self.chat_interface.check_model_availability():
|
| 118 |
-
return "I'm sorry, but I couldn't load the AI model. Please check your internet connection."
|
| 119 |
|
| 120 |
# Add user input to conversation history
|
| 121 |
self.conversation_history.append({"role": "user", "content": user_input})
|
|
@@ -218,15 +221,19 @@ Your response: """
|
|
| 218 |
return f"""
|
| 219 |
## Model Information
|
| 220 |
|
|
|
|
| 221 |
**Model:** {info['model_name']}
|
| 222 |
**Device:** {info['device']}
|
| 223 |
**Available:** {'Yes' if info['available'] else 'No'}
|
|
|
|
|
|
|
| 224 |
"""
|
| 225 |
|
| 226 |
def run_gradio_interface(self):
|
| 227 |
"""Run the Gradio interface."""
|
| 228 |
with gr.Blocks(title="Memory Chat") as demo:
|
| 229 |
-
gr.Markdown("# π€ Memory Chat with Hugging Face")
|
|
|
|
| 230 |
|
| 231 |
with gr.Tab("Chat"):
|
| 232 |
chatbot = gr.Chatbot()
|
|
|
|
| 7 |
|
| 8 |
console = Console()
|
| 9 |
|
| 10 |
+
# Version tracking
|
| 11 |
+
APP_VERSION = "v2.0.0 - Enhanced Conversation Quality"
|
| 12 |
+
|
| 13 |
class MemoryChatApp:
|
| 14 |
"""Main application that combines memory management with Hugging Face chat."""
|
| 15 |
|
|
|
|
| 118 |
"""
|
| 119 |
# Check if model is available
|
| 120 |
if not self.chat_interface.check_model_availability():
|
| 121 |
+
return "I'm sorry, but I couldn't load the AI model. Please check your internet connection and model availability."
|
| 122 |
|
| 123 |
# Add user input to conversation history
|
| 124 |
self.conversation_history.append({"role": "user", "content": user_input})
|
|
|
|
| 221 |
return f"""
|
| 222 |
## Model Information
|
| 223 |
|
| 224 |
+
**App Version:** {APP_VERSION}
|
| 225 |
**Model:** {info['model_name']}
|
| 226 |
**Device:** {info['device']}
|
| 227 |
**Available:** {'Yes' if info['available'] else 'No'}
|
| 228 |
+
|
| 229 |
+
*If the model is not available, responses will be limited.*
|
| 230 |
"""
|
| 231 |
|
| 232 |
def run_gradio_interface(self):
|
| 233 |
"""Run the Gradio interface."""
|
| 234 |
with gr.Blocks(title="Memory Chat") as demo:
|
| 235 |
+
gr.Markdown(f"# π€ Memory Chat with Hugging Face")
|
| 236 |
+
gr.Markdown(f"**Version: {APP_VERSION}**")
|
| 237 |
|
| 238 |
with gr.Tab("Chat"):
|
| 239 |
chatbot = gr.Chatbot()
|
chat_interface.py
CHANGED
|
@@ -63,7 +63,7 @@ class HuggingFaceChat:
|
|
| 63 |
The generated response
|
| 64 |
"""
|
| 65 |
if not self.chatbot:
|
| 66 |
-
return "I'm sorry, but I couldn't load the model.
|
| 67 |
|
| 68 |
try:
|
| 69 |
# Generate response with improved parameters for better quality
|
|
@@ -91,11 +91,15 @@ class HuggingFaceChat:
|
|
| 91 |
# Remove any incomplete sentences or hanging punctuation
|
| 92 |
generated_text = self._clean_response(generated_text)
|
| 93 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
return generated_text
|
| 95 |
|
| 96 |
except Exception as e:
|
| 97 |
console.print(f"[red]Error generating response: {e}[/red]")
|
| 98 |
-
return "I'm
|
| 99 |
|
| 100 |
def _clean_response(self, text: str) -> str:
|
| 101 |
"""Clean up the generated response."""
|
|
|
|
| 63 |
The generated response
|
| 64 |
"""
|
| 65 |
if not self.chatbot:
|
| 66 |
+
return "I'm sorry, but I couldn't load the AI model. This might be due to:\n1. Model loading issues\n2. Internet connection problems\n3. Server maintenance\n\nPlease try again in a few minutes."
|
| 67 |
|
| 68 |
try:
|
| 69 |
# Generate response with improved parameters for better quality
|
|
|
|
| 91 |
# Remove any incomplete sentences or hanging punctuation
|
| 92 |
generated_text = self._clean_response(generated_text)
|
| 93 |
|
| 94 |
+
# If response is empty or too short, provide a helpful message
|
| 95 |
+
if not generated_text or len(generated_text.strip()) < 5:
|
| 96 |
+
return "I'm processing your message. Could you please try again or rephrase your question?"
|
| 97 |
+
|
| 98 |
return generated_text
|
| 99 |
|
| 100 |
except Exception as e:
|
| 101 |
console.print(f"[red]Error generating response: {e}[/red]")
|
| 102 |
+
return f"I'm experiencing technical difficulties. Error: {str(e)[:100]}..."
|
| 103 |
|
| 104 |
def _clean_response(self, text: str) -> str:
|
| 105 |
"""Clean up the generated response."""
|