artecnosomatic commited on
Commit
6c949e4
·
1 Parent(s): 9b6c294

Deploy MemoryAI to memoryotherone

Browse files
Files changed (6) hide show
  1. .gitignore +18 -0
  2. README.md +27 -14
  3. app.py +82 -0
  4. spaces_requirements.txt +12 -0
  5. src/main.py +233 -0
  6. templates/spaces_chat.html +596 -0
.gitignore ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # Environment
7
+ .env
8
+ .venv
9
+ venv/
10
+
11
+ # Data
12
+ data/
13
+ models/
14
+
15
+ # IDE
16
+ .idea/
17
+ .vscode/
18
+ *.swp
README.md CHANGED
@@ -1,14 +1,27 @@
1
- ---
2
- title: Memoryotherone
3
- emoji: 🐨
4
- colorFrom: indigo
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 6.3.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- short_description: other memory test
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🤖 MemoryAI - Conversational AI with Memory
2
+
3
+ MemoryAI is a conversational AI that remembers your past conversations and uses that context to provide better responses.
4
+
5
+ ## Features
6
+
7
+ ✅ **Memory System**: Remembers past conversations
8
+ ✅ **Context-Aware**: Uses conversation history for better responses
9
+ ✅ **Multiple Models**: Supports different AI models
10
+ ✅ **Web Interface**: Beautiful chat interface
11
+ ✅ **Persistent**: Saves memories between sessions
12
+
13
+ ## How to Use
14
+
15
+ 1. Type your message in the input box
16
+ 2. Press Enter or click Send
17
+ 3. View memories with the 📚 button
18
+ 4. Clear memories with 🗑️ button
19
+ 5. Save manually with 💾 button
20
+
21
+ ## Configuration
22
+
23
+ The app uses `microsoft/DialoGPT-small` by default, which is optimized for conversation.
24
+
25
+ ## About
26
+
27
+ This Space demonstrates how to build a conversational AI with memory capabilities using Hugging Face models.
app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hugging Face Spaces compatible app for MemoryAI.
4
+ This is the main entry point for the Spaces deployment.
5
+ """
6
+
7
+ import os
8
+ from dotenv import load_dotenv
9
+ from flask import Flask, render_template, request, jsonify
10
+ from src.main import MemoryAI
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ # Initialize Flask app
16
+ app = Flask(__name__)
17
+
18
+ # Initialize MemoryAI with better default model for Spaces
19
+ if not os.getenv("MODEL_NAME"):
20
+ os.environ["MODEL_NAME"] = "microsoft/DialoGPT-small"
21
+
22
+ # Global MemoryAI instance
23
+ ai = MemoryAI()
24
+ ai.load_memories()
25
+
26
+ @app.route('/')
27
+ def home():
28
+ """Render the main chat interface."""
29
+ return render_template('spaces_chat.html')
30
+
31
+ @app.route('/api/memories', methods=['GET'])
32
+ def get_memories():
33
+ """Get recent memories as JSON."""
34
+ recent_memories = ai.get_recent_memories(10)
35
+ return jsonify({
36
+ 'memories': recent_memories,
37
+ 'total_memories': len(ai.memories)
38
+ })
39
+
40
+ @app.route('/api/clear', methods=['POST'])
41
+ def clear_memories():
42
+ """Clear all memories."""
43
+ ai.clear_memories()
44
+ return jsonify({'status': 'success', 'message': 'All memories cleared'})
45
+
46
+ @app.route('/api/chat', methods=['POST'])
47
+ def chat():
48
+ """Get AI response to user input."""
49
+ data = request.json
50
+ user_input = data.get('message', '')
51
+
52
+ if not user_input.strip():
53
+ return jsonify({'error': 'Empty message'}), 400
54
+
55
+ # Add user input to memories
56
+ ai.add_memory(f"User: {user_input}")
57
+
58
+ # Get recent memories for context
59
+ recent_context = "\n".join(ai.get_recent_memories(3))
60
+ full_prompt = f"{recent_context}\n\nUser: {user_input}\nAI:"
61
+
62
+ # Generate AI response
63
+ response = ai.generate_response(full_prompt)
64
+
65
+ # Add AI response to memories
66
+ ai.add_memory(f"AI: {response}")
67
+
68
+ return jsonify({
69
+ 'response': response,
70
+ 'memory_count': len(ai.memories)
71
+ })
72
+
73
+ @app.route('/api/save', methods=['POST'])
74
+ def save_memories():
75
+ """Save memories to file."""
76
+ ai.save_memories()
77
+ return jsonify({'status': 'success', 'message': 'Memories saved'})
78
+
79
+ if __name__ == '__main__':
80
+ # Hugging Face Spaces will run this with their own server
81
+ # For local testing, you can use:
82
+ app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 7860)))
spaces_requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hugging Face Spaces requirements
2
+ # This file contains the dependencies needed for deployment
3
+
4
+ transformers==4.40.0
5
+ torch==2.2.0
6
+ flask==3.1.2
7
+ python-dotenv==1.0.1
8
+ datasets==4.4.2
9
+ accelerate==1.12.0
10
+ blinker==1.9.0
11
+ itsdangerous==2.2.0
12
+ werkzeug==3.1.5
src/main.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Main application for the Hugging Face Memory Project.
4
+ Handles conversation interface and memory management.
5
+ """
6
+
7
+ import os
8
+ from dotenv import load_dotenv
9
+ from transformers import AutoModelForCausalLM, AutoTokenizer
10
+ import torch
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+
15
+ class MemoryAI:
16
+ def __init__(self):
17
+ """Initialize the AI model and memory system."""
18
+ self.model_name = os.getenv("MODEL_NAME", "gpt2")
19
+ self.max_memory = int(os.getenv("MAX_MEMORY_ENTRIES", 100))
20
+ self.data_dir = os.getenv("DATA_DIR", "data")
21
+ self.models_dir = os.getenv("MODELS_DIR", "models")
22
+
23
+ # Load generation parameters from environment
24
+ self.temperature = float(os.getenv("TEMPERATURE", 0.7))
25
+ self.max_new_tokens = int(os.getenv("MAX_NEW_TOKENS", 80))
26
+ self.top_p = float(os.getenv("TOP_P", 0.9))
27
+ self.repetition_penalty = float(os.getenv("REPETITION_PENALTY", 1.2))
28
+
29
+ # Initialize memory storage
30
+ self.memories = []
31
+
32
+ # Load model and tokenizer
33
+ print(f"Loading {self.model_name} model...")
34
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
35
+ self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
36
+
37
+ # Move model to GPU if available
38
+ if torch.cuda.is_available():
39
+ self.model = self.model.to('cuda')
40
+ print("Using CUDA (GPU acceleration)")
41
+ else:
42
+ print("Using CPU")
43
+
44
+ print(f"Initialized {self.model_name} model")
45
+ print(f"Memory capacity: {self.max_memory} entries")
46
+ print(f"Generation params - Temp: {self.temperature}, Max tokens: {self.max_new_tokens}")
47
+
48
+ def add_memory(self, memory_text):
49
+ """Add a memory entry to the system."""
50
+ if len(self.memories) >= self.max_memory:
51
+ self.memories.pop(0) # Remove oldest memory
52
+
53
+ self.memories.append(memory_text)
54
+ print(f"Memory added. Total memories: {len(self.memories)}")
55
+
56
+ def generate_response(self, prompt, max_new_tokens=80):
57
+ """Generate a response using the AI model with improved quality."""
58
+ # Improved prompt engineering for conversational AI
59
+ if "microsoft/DialoGPT" in self.model_name:
60
+ # DialoGPT uses a different format
61
+ improved_prompt = prompt
62
+ else:
63
+ # For other models, use better prompt engineering
64
+ improved_prompt = f"{prompt}\n\nAssistant:"
65
+
66
+ inputs = self.tokenizer(improved_prompt, return_tensors="pt")
67
+
68
+ # Move inputs to same device as model
69
+ if next(self.model.parameters()).is_cuda:
70
+ inputs = {k: v.to('cuda') for k, v in inputs.items()}
71
+
72
+ # Generate with better parameters
73
+ outputs = self.model.generate(
74
+ **inputs,
75
+ max_new_tokens=self.max_new_tokens,
76
+ temperature=self.temperature,
77
+ top_p=self.top_p,
78
+ do_sample=True, # Enable sampling
79
+ repetition_penalty=self.repetition_penalty,
80
+ no_repeat_ngram_size=2, # Prevent exact repeats
81
+ pad_token_id=self.tokenizer.eos_token_id,
82
+ eos_token_id=self.tokenizer.eos_token_id
83
+ )
84
+
85
+ response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
86
+
87
+ # Extract only the new part of the response
88
+ response = response[len(improved_prompt):].strip()
89
+
90
+ # Clean up response
91
+ response = self._clean_response(response)
92
+
93
+ # Fallback for poor responses
94
+ if not response or len(response.split()) < 2 or response.startswith("I'm"):
95
+ response = self._generate_fallback_response(prompt)
96
+
97
+ return response
98
+
99
+ def _generate_fallback_response(self, prompt):
100
+ """Generate a fallback response when the model produces poor output."""
101
+ # Simple rule-based responses for common questions
102
+ prompt_lower = prompt.lower()
103
+
104
+ if "hello" in prompt_lower or "hi" in prompt_lower or "hey" in prompt_lower:
105
+ return "Hello! I'm MemoryAI, your conversational assistant. How can I help you today?"
106
+ elif "how are you" in prompt_lower:
107
+ return "I'm doing well, thank you for asking! As an AI, I'm always ready to chat. How about you?"
108
+ elif "your name" in prompt_lower:
109
+ return "I'm MemoryAI! I'm designed to remember our conversations and provide helpful responses."
110
+ elif "memory" in prompt_lower and ("work" in prompt_lower or "how" in prompt_lower):
111
+ return "I remember our past conversations and use that context to provide better, more relevant responses. It's like having a conversation with someone who remembers what you've talked about before!"
112
+ elif "thank" in prompt_lower or "thanks" in prompt_lower:
113
+ return "You're welcome! I'm happy to help. Is there anything else you'd like to talk about?"
114
+ elif "joke" in prompt_lower:
115
+ return "Why don't scientists trust atoms? Because they make up everything!"
116
+ elif "weather" in prompt_lower:
117
+ return "I can't check real-time weather, but I hope it's nice where you are! What city are you in?"
118
+ else:
119
+ # Generic fallback
120
+ return "That's an interesting question! As an AI with memory, I can tell you that we've talked about various topics. What would you like to discuss?"
121
+
122
+ def _clean_response(self, response):
123
+ """Clean up the AI response for better quality."""
124
+ # Remove incomplete sentences at the end
125
+ if response.endswith(('...', '..', '.')) and len(response.split()) < 3:
126
+ # If it's a very short response ending with dots, keep it
127
+ pass
128
+ else:
129
+ # Remove trailing incomplete words
130
+ response = response.rstrip('.,;:!?')
131
+
132
+ # Remove excessive repetition
133
+ words = response.split()
134
+ if len(words) > 1:
135
+ # Check for repeated phrases
136
+ for i in range(min(3, len(words) // 2)):
137
+ phrase = ' '.join(words[-i-1:-1])
138
+ if response.endswith(f" {phrase} {phrase}"):
139
+ response = response[:-len(f" {phrase}")].rstrip()
140
+ break
141
+
142
+ # Capitalize first letter if it's a complete sentence
143
+ if len(response) > 0 and response[0].islower():
144
+ response = response[0].upper() + response[1:]
145
+
146
+ # Add punctuation if missing
147
+ if len(response) > 0 and response[-1] not in ('.', '!', '?'):
148
+ response += '.'
149
+
150
+ return response
151
+
152
+ def converse(self):
153
+ """Start a conversation loop with the AI."""
154
+ print("Starting conversation... Type 'quit' to exit.")
155
+ print("Type '!memories' to see recent memories, '!clear' to clear memories")
156
+
157
+ while True:
158
+ user_input = input("You: ")
159
+
160
+ if user_input.lower() == 'quit':
161
+ break
162
+
163
+ # Handle special commands
164
+ if user_input.lower() == '!memories':
165
+ recent_memories = self.get_recent_memories()
166
+ print("Recent memories:")
167
+ for i, memory in enumerate(recent_memories, 1):
168
+ print(f" {i}. {memory}")
169
+ continue
170
+
171
+ if user_input.lower() == '!clear':
172
+ self.clear_memories()
173
+ continue
174
+
175
+ # Add user input to memories
176
+ self.add_memory(f"User: {user_input}")
177
+
178
+ # Get recent memories for context
179
+ recent_context = "\n".join(self.get_recent_memories(3))
180
+ full_prompt = f"{recent_context}\n\nUser: {user_input}\nAI:"
181
+
182
+ # Generate AI response
183
+ response = self.generate_response(full_prompt)
184
+ print(f"AI: {response}")
185
+
186
+ # Add AI response to memories
187
+ self.add_memory(f"AI: {response}")
188
+
189
+ def get_available_models(self):
190
+ """Get a list of commonly available models."""
191
+ return [
192
+ "gpt2",
193
+ "distilgpt2",
194
+ "gpt2-medium",
195
+ "gpt2-large",
196
+ "EleutherAI/gpt-neo-125M",
197
+ "facebook/opt-125m",
198
+ "microsoft/DialoGPT-small",
199
+ "microsoft/DialoGPT-medium"
200
+ ]
201
+
202
+ def save_memories(self):
203
+ """Save memories to a file."""
204
+ memory_file = os.path.join(self.data_dir, "memories.txt")
205
+ with open(memory_file, 'w') as f:
206
+ for memory in self.memories:
207
+ f.write(memory + "\n")
208
+ print(f"Memories saved to {memory_file}")
209
+
210
+ def load_memories(self):
211
+ """Load memories from a file."""
212
+ memory_file = os.path.join(self.data_dir, "memories.txt")
213
+ if os.path.exists(memory_file):
214
+ with open(memory_file, 'r') as f:
215
+ self.memories = [line.strip() for line in f.readlines() if line.strip()]
216
+ print(f"Loaded {len(self.memories)} memories from {memory_file}")
217
+ else:
218
+ print("No existing memories found.")
219
+
220
+ def get_recent_memories(self, count=5):
221
+ """Get the most recent memories."""
222
+ return self.memories[-count:] if self.memories else []
223
+
224
+ def clear_memories(self):
225
+ """Clear all memories."""
226
+ self.memories = []
227
+ print("All memories cleared.")
228
+
229
+ if __name__ == "__main__":
230
+ ai = MemoryAI()
231
+ ai.load_memories() # Load existing memories
232
+ ai.converse()
233
+ ai.save_memories()
templates/spaces_chat.html ADDED
@@ -0,0 +1,596 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
+ <title>MemoryAI - Conversational AI with Memory</title>
7
+ <style>
8
+ * {
9
+ margin: 0;
10
+ padding: 0;
11
+ box-sizing: border-box;
12
+ font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
13
+ }
14
+
15
+ body {
16
+ background-color: #f5f7fa;
17
+ color: #333;
18
+ line-height: 1.6;
19
+ padding: 20px;
20
+ }
21
+
22
+ .container {
23
+ max-width: 800px;
24
+ margin: 0 auto;
25
+ background: white;
26
+ border-radius: 12px;
27
+ box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1);
28
+ overflow: hidden;
29
+ }
30
+
31
+ header {
32
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
33
+ color: white;
34
+ padding: 24px;
35
+ text-align: center;
36
+ }
37
+
38
+ h1 {
39
+ font-size: 1.8rem;
40
+ margin-bottom: 8px;
41
+ font-weight: 600;
42
+ }
43
+
44
+ .subtitle {
45
+ font-size: 0.9rem;
46
+ opacity: 0.95;
47
+ font-weight: 300;
48
+ }
49
+
50
+ .chat-container {
51
+ height: 500px;
52
+ overflow-y: auto;
53
+ padding: 20px;
54
+ background-color: #f8f9fa;
55
+ }
56
+
57
+ .chat-message {
58
+ margin-bottom: 16px;
59
+ padding: 12px 16px;
60
+ border-radius: 18px;
61
+ max-width: 85%;
62
+ word-wrap: break-word;
63
+ position: relative;
64
+ box-shadow: 0 1px 2px rgba(0, 0, 0, 0.1);
65
+ }
66
+
67
+ .user-message {
68
+ background-color: #e3f2fd;
69
+ margin-left: auto;
70
+ color: #1976d2;
71
+ border-bottom-right-radius: 4px;
72
+ }
73
+
74
+ .ai-message {
75
+ background-color: #ffffff;
76
+ margin-right: auto;
77
+ color: #424242;
78
+ border-bottom-left-radius: 4px;
79
+ }
80
+
81
+ .message-header {
82
+ font-weight: 600;
83
+ margin-bottom: 4px;
84
+ font-size: 0.85rem;
85
+ color: #666;
86
+ }
87
+
88
+ .message-content {
89
+ font-size: 0.95rem;
90
+ line-height: 1.5;
91
+ }
92
+
93
+ .input-area {
94
+ display: flex;
95
+ padding: 16px;
96
+ background-color: #ffffff;
97
+ border-top: 1px solid #eaeaea;
98
+ gap: 12px;
99
+ }
100
+
101
+ #message-input {
102
+ flex: 1;
103
+ padding: 12px 16px;
104
+ border: 2px solid #e0e0e0;
105
+ border-radius: 24px;
106
+ font-size: 0.95rem;
107
+ outline: none;
108
+ transition: all 0.2s;
109
+ background-color: #f8f9fa;
110
+ }
111
+
112
+ #message-input:focus {
113
+ border-color: #667eea;
114
+ background-color: white;
115
+ box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1);
116
+ }
117
+
118
+ #send-button {
119
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
120
+ color: white;
121
+ border: none;
122
+ padding: 12px 20px;
123
+ border-radius: 24px;
124
+ cursor: pointer;
125
+ font-size: 0.95rem;
126
+ font-weight: 600;
127
+ transition: all 0.2s;
128
+ white-space: nowrap;
129
+ }
130
+
131
+ #send-button:hover {
132
+ transform: translateY(-1px);
133
+ box-shadow: 0 4px 8px rgba(102, 126, 234, 0.3);
134
+ }
135
+
136
+ #send-button:disabled {
137
+ background: #cccccc;
138
+ cursor: not-allowed;
139
+ transform: none;
140
+ box-shadow: none;
141
+ }
142
+
143
+ .controls {
144
+ display: flex;
145
+ padding: 12px 16px;
146
+ background-color: #f8f9fa;
147
+ border-bottom: 1px solid #eaeaea;
148
+ gap: 8px;
149
+ flex-wrap: wrap;
150
+ }
151
+
152
+ .control-button {
153
+ padding: 8px 12px;
154
+ border: none;
155
+ border-radius: 6px;
156
+ cursor: pointer;
157
+ font-size: 0.8rem;
158
+ font-weight: 500;
159
+ transition: all 0.2s;
160
+ background-color: white;
161
+ color: #666;
162
+ border: 1px solid #e0e0e0;
163
+ }
164
+
165
+ .control-button:hover {
166
+ background-color: #f0f0f0;
167
+ transform: translateY(-1px);
168
+ }
169
+
170
+ .typing-indicator {
171
+ display: none;
172
+ align-items: center;
173
+ gap: 6px;
174
+ padding: 8px 16px;
175
+ font-style: italic;
176
+ color: #666;
177
+ font-size: 0.85rem;
178
+ }
179
+
180
+ .typing-indicator.show {
181
+ display: flex;
182
+ }
183
+
184
+ .dot-typing {
185
+ display: flex;
186
+ gap: 2px;
187
+ }
188
+
189
+ .dot-typing span {
190
+ width: 6px;
191
+ height: 6px;
192
+ background-color: #667eea;
193
+ border-radius: 50%;
194
+ display: inline-block;
195
+ animation: bounce 1.4s infinite ease-in-out;
196
+ }
197
+
198
+ .dot-typing span:nth-child(1) {
199
+ animation-delay: -0.32s;
200
+ }
201
+
202
+ .dot-typing span:nth-child(2) {
203
+ animation-delay: -0.16s;
204
+ }
205
+
206
+ @keyframes bounce {
207
+ 0%, 80%, 100% {
208
+ transform: scale(0);
209
+ }
210
+ 40% {
211
+ transform: scale(1);
212
+ }
213
+ }
214
+
215
+ .status-bar {
216
+ padding: 8px 16px;
217
+ background-color: #e8eaf6;
218
+ font-size: 0.8rem;
219
+ color: #555;
220
+ border-bottom: 1px solid #eaeaea;
221
+ }
222
+
223
+ .memory-panel {
224
+ position: fixed;
225
+ top: 0;
226
+ right: 0;
227
+ width: 300px;
228
+ height: 100%;
229
+ background-color: white;
230
+ box-shadow: -2px 0 10px rgba(0, 0, 0, 0.1);
231
+ transform: translateX(100%);
232
+ transition: transform 0.3s ease;
233
+ z-index: 1000;
234
+ overflow-y: auto;
235
+ padding: 20px;
236
+ }
237
+
238
+ .memory-panel.show {
239
+ transform: translateX(0);
240
+ }
241
+
242
+ .memory-header {
243
+ display: flex;
244
+ justify-content: space-between;
245
+ align-items: center;
246
+ margin-bottom: 15px;
247
+ padding-bottom: 10px;
248
+ border-bottom: 1px solid #eaeaea;
249
+ }
250
+
251
+ .memory-count {
252
+ font-weight: 600;
253
+ color: #667eea;
254
+ }
255
+
256
+ .close-memory-panel {
257
+ background: none;
258
+ border: none;
259
+ color: #999;
260
+ font-size: 1.5rem;
261
+ cursor: pointer;
262
+ padding: 0;
263
+ }
264
+
265
+ .memory-item {
266
+ padding: 8px 12px;
267
+ margin: 6px 0;
268
+ background-color: #f8f9fa;
269
+ border-left: 3px solid #667eea;
270
+ border-radius: 4px;
271
+ font-size: 0.85rem;
272
+ line-height: 1.4;
273
+ }
274
+
275
+ .memory-item:nth-child(even) {
276
+ background-color: #f0f0f0;
277
+ }
278
+
279
+ .overlay {
280
+ position: fixed;
281
+ top: 0;
282
+ left: 0;
283
+ width: 100%;
284
+ height: 100%;
285
+ background-color: rgba(0, 0, 0, 0.3);
286
+ z-index: 999;
287
+ display: none;
288
+ }
289
+
290
+ .overlay.show {
291
+ display: block;
292
+ }
293
+
294
+ @media (max-width: 768px) {
295
+ body {
296
+ padding: 10px;
297
+ }
298
+
299
+ .container {
300
+ border-radius: 8px;
301
+ }
302
+
303
+ .input-area {
304
+ flex-direction: column;
305
+ gap: 8px;
306
+ }
307
+
308
+ #send-button {
309
+ padding: 12px;
310
+ }
311
+
312
+ .memory-panel {
313
+ width: 100%;
314
+ height: 80%;
315
+ top: auto;
316
+ bottom: 0;
317
+ transform: translateY(100%);
318
+ }
319
+
320
+ .memory-panel.show {
321
+ transform: translateY(0);
322
+ }
323
+ }
324
+
325
+ /* Hugging Face Spaces specific styles */
326
+ .spaces-header {
327
+ display: none !important;
328
+ }
329
+ </style>
330
+ </head>
331
+ <body>
332
+ <div class="overlay" id="overlay"></div>
333
+
334
+ <div class="container">
335
+ <header>
336
+ <h1>🤖 MemoryAI</h1>
337
+ <p class="subtitle">Conversational AI with Memory - Hugging Face Spaces</p>
338
+ </header>
339
+
340
+ <div class="status-bar">
341
+ <span id="status-text">Ready to chat! Type your message below.</span>
342
+ </div>
343
+
344
+ <div class="controls">
345
+ <button class="control-button" id="show-memories-btn">📚 Memories</button>
346
+ <button class="control-button" id="clear-memories-btn">🗑️ Clear</button>
347
+ <button class="control-button" id="save-memories-btn">💾 Save</button>
348
+ </div>
349
+
350
+ <div class="memory-panel" id="memory-panel">
351
+ <div class="memory-header">
352
+ <span class="memory-count">Memories (<span id="memory-count">0</span>)</span>
353
+ <button class="close-memory-panel" id="close-memory-panel">×</button>
354
+ </div>
355
+ <div class="memory-list" id="memory-list">
356
+ <p>No memories yet. Start a conversation!</p>
357
+ </div>
358
+ </div>
359
+
360
+ <div class="chat-container" id="chat-container">
361
+ <div class="chat-message ai-message">
362
+ <div class="message-header">MemoryAI:</div>
363
+ <div class="message-content">
364
+ Hello! I'm MemoryAI running on Hugging Face Spaces. 🚀
365
+ I can remember our conversations and provide context-aware responses.
366
+ Try asking me anything!
367
+ </div>
368
+ </div>
369
+ </div>
370
+
371
+ <div class="typing-indicator" id="typing-indicator">
372
+ <span>MemoryAI is thinking</span>
373
+ <div class="dot-typing">
374
+ <span></span>
375
+ <span></span>
376
+ <span></span>
377
+ </div>
378
+ </div>
379
+
380
+ <div class="input-area">
381
+ <input type="text" id="message-input" placeholder="Type your message here..." autocomplete="off">
382
+ <button id="send-button">Send</button>
383
+ </div>
384
+ </div>
385
+
386
+ <script>
387
+ // DOM elements
388
+ const chatContainer = document.getElementById('chat-container');
389
+ const messageInput = document.getElementById('message-input');
390
+ const sendButton = document.getElementById('send-button');
391
+ const typingIndicator = document.getElementById('typing-indicator');
392
+ const statusText = document.getElementById('status-text');
393
+ const memoryPanel = document.getElementById('memory-panel');
394
+ const memoryList = document.getElementById('memory-list');
395
+ const memoryCount = document.getElementById('memory-count');
396
+ const showMemoriesBtn = document.getElementById('show-memories-btn');
397
+ const closeMemoryPanel = document.getElementById('close-memory-panel');
398
+ const clearMemoriesBtn = document.getElementById('clear-memories-btn');
399
+ const saveMemoriesBtn = document.getElementById('save-memories-btn');
400
+ const overlay = document.getElementById('overlay');
401
+
402
+ // Scroll to bottom of chat
403
+ function scrollToBottom() {
404
+ setTimeout(() => {
405
+ chatContainer.scrollTop = chatContainer.scrollHeight;
406
+ }, 50);
407
+ }
408
+
409
+ // Add message to chat
410
+ function addMessage(sender, message) {
411
+ const messageDiv = document.createElement('div');
412
+ messageDiv.className = `chat-message ${sender}-message`;
413
+
414
+ messageDiv.innerHTML = `
415
+ <div class="message-header">${sender}:</div>
416
+ <div class="message-content">${message}</div>
417
+ `;
418
+
419
+ chatContainer.appendChild(messageDiv);
420
+ scrollToBottom();
421
+ }
422
+
423
+ // Show typing indicator
424
+ function showTyping() {
425
+ typingIndicator.classList.add('show');
426
+ scrollToBottom();
427
+ }
428
+
429
+ // Hide typing indicator
430
+ function hideTyping() {
431
+ typingIndicator.classList.remove('show');
432
+ }
433
+
434
+ // Update status
435
+ function updateStatus(text) {
436
+ statusText.textContent = text;
437
+ }
438
+
439
+ // Toggle memory panel
440
+ function toggleMemoryPanel(show) {
441
+ if (show) {
442
+ memoryPanel.classList.add('show');
443
+ overlay.classList.add('show');
444
+ loadMemories();
445
+ } else {
446
+ memoryPanel.classList.remove('show');
447
+ overlay.classList.remove('show');
448
+ }
449
+ }
450
+
451
+ // Load and display memories
452
+ async function loadMemories() {
453
+ try {
454
+ const response = await fetch('/api/memories');
455
+ const data = await response.json();
456
+
457
+ memoryList.innerHTML = '';
458
+ memoryCount.textContent = data.total_memories;
459
+
460
+ if (data.memories.length === 0) {
461
+ memoryList.innerHTML = '<p>No memories yet. Start a conversation!</p>';
462
+ } else {
463
+ data.memories.forEach((memory, index) => {
464
+ const memoryItem = document.createElement('div');
465
+ memoryItem.className = 'memory-item';
466
+ memoryItem.textContent = memory;
467
+ memoryList.appendChild(memoryItem);
468
+ });
469
+ }
470
+ } catch (error) {
471
+ console.error('Error loading memories:', error);
472
+ memoryList.innerHTML = '<p>Error loading memories</p>';
473
+ }
474
+ }
475
+
476
+ // Clear memories
477
+ async function clearMemories() {
478
+ if (confirm('Clear all memories? This cannot be undone.')) {
479
+ try {
480
+ const response = await fetch('/api/clear', {
481
+ method: 'POST',
482
+ headers: {
483
+ 'Content-Type': 'application/json'
484
+ }
485
+ });
486
+
487
+ const data = await response.json();
488
+ updateStatus(data.message);
489
+
490
+ // Clear chat except for initial message
491
+ chatContainer.innerHTML = `
492
+ <div class="chat-message ai-message">
493
+ <div class="message-header">MemoryAI:</div>
494
+ <div class="message-content">
495
+ Memory cleared! Hello again! I'm MemoryAI running on Hugging Face Spaces. 🚀
496
+ Fresh start - ask me anything!
497
+ </div>
498
+ </div>
499
+ `;
500
+
501
+ // Close memory panel if open
502
+ toggleMemoryPanel(false);
503
+ } catch (error) {
504
+ console.error('Error clearing memories:', error);
505
+ updateStatus('Error clearing memories');
506
+ }
507
+ }
508
+ }
509
+
510
+ // Save memories
511
+ async function saveMemories() {
512
+ try {
513
+ const response = await fetch('/api/save', {
514
+ method: 'POST',
515
+ headers: {
516
+ 'Content-Type': 'application/json'
517
+ }
518
+ });
519
+
520
+ const data = await response.json();
521
+ updateStatus(data.message);
522
+ } catch (error) {
523
+ console.error('Error saving memories:', error);
524
+ updateStatus('Error saving memories');
525
+ }
526
+ }
527
+
528
+ // Send message to AI
529
+ async function sendMessage() {
530
+ const message = messageInput.value.trim();
531
+
532
+ if (!message) return;
533
+
534
+ // Add user message to chat
535
+ addMessage('User', message);
536
+ messageInput.value = '';
537
+
538
+ // Show typing indicator
539
+ showTyping();
540
+ updateStatus('MemoryAI is thinking...');
541
+
542
+ try {
543
+ const response = await fetch('/api/chat', {
544
+ method: 'POST',
545
+ headers: {
546
+ 'Content-Type': 'application/json'
547
+ },
548
+ body: JSON.stringify({ message: message })
549
+ });
550
+
551
+ const data = await response.json();
552
+
553
+ if (data.error) {
554
+ addMessage('AI', `Error: ${data.error}`);
555
+ updateStatus('Error getting response');
556
+ } else {
557
+ addMessage('AI', data.response);
558
+ updateStatus(`MemoryAI responded (${data.memory_count} memories)`);
559
+ }
560
+ } catch (error) {
561
+ console.error('Error getting AI response:', error);
562
+ addMessage('AI', 'Sorry, I encountered an error. Please try again.');
563
+ updateStatus('Error connecting to AI');
564
+ } finally {
565
+ hideTyping();
566
+ }
567
+ }
568
+
569
+ // Event listeners
570
+ sendButton.addEventListener('click', sendMessage);
571
+
572
+ messageInput.addEventListener('keypress', function(e) {
573
+ if (e.key === 'Enter') {
574
+ sendMessage();
575
+ }
576
+ });
577
+
578
+ showMemoriesBtn.addEventListener('click', () => toggleMemoryPanel(true));
579
+ closeMemoryPanel.addEventListener('click', () => toggleMemoryPanel(false));
580
+ overlay.addEventListener('click', () => toggleMemoryPanel(false));
581
+
582
+ clearMemoriesBtn.addEventListener('click', clearMemories);
583
+ saveMemoriesBtn.addEventListener('click', saveMemories);
584
+
585
+ // Initialize
586
+ scrollToBottom();
587
+ updateStatus('Ready to chat on Hugging Face Spaces! 🎉');
588
+
589
+ // Auto-save memories every minute
590
+ setInterval(saveMemories, 60000);
591
+
592
+ // Welcome message for Spaces
593
+ console.log('MemoryAI running on Hugging Face Spaces! 🚀');
594
+ </script>
595
+ </body>
596
+ </html>