CHKIM79's picture
Fix: Simplified deployment for Hugging Face Spaces constraints
b988bd4
#!/usr/bin/env python3
"""
Multi-Agent NLP System - Hugging Face Spaces Interface
Simplified version that works within Spaces constraints.
"""
import gradio as gr
import subprocess
import sys
import json
import time
import os
from pathlib import Path
# Add shared directory to Python path
sys.path.insert(0, str(Path(__file__).parent / "shared"))
def get_system_info():
"""Get basic system information."""
try:
import psutil
cpu_percent = psutil.cpu_percent(interval=1)
memory = psutil.virtual_memory()
disk = psutil.disk_usage('/')
info = f"""
πŸ–₯️ System Information:
- CPU Usage: {cpu_percent}%
- Memory: {memory.percent}% ({memory.used // (1024**3)}GB / {memory.total // (1024**3)}GB)
- Disk Usage: {disk.percent}% ({disk.used // (1024**3)}GB / {disk.total // (1024**3)}GB)
- Platform: Hugging Face Spaces (Containerized Environment)
πŸ“¦ Environment:
- Python: {sys.version}
- Working Directory: {os.getcwd()}
- Available Files: {', '.join(os.listdir('.'))}
⚠️ Note: This is a simplified version running in Hugging Face Spaces.
Full Docker infrastructure (Kafka, Redis, MinIO) is not available in this environment.
"""
return info
except Exception as e:
return f"Error getting system info: {str(e)}"
def test_kafka_client():
"""Test the shared Kafka client (simulation mode)."""
try:
# Import the Kafka client
from kafka_client import create_kafka_config, KafkaConfig
# Create a test configuration
config = create_kafka_config(bootstrap_servers="localhost:9092")
result = f"""
βœ… Kafka Client Test (Simulation Mode):
πŸ“‹ Configuration:
- Bootstrap Servers: {config.bootstrap_servers}
- Security Protocol: {config.security_protocol}
- Producer Config: {json.dumps(config.producer_config, indent=2)}
- Consumer Config: {json.dumps(config.consumer_config, indent=2)}
πŸ”§ Client Classes Available:
- KafkaConfig: βœ… Loaded
- KafkaProducer: βœ… Available
- KafkaConsumer: βœ… Available
- KafkaManager: βœ… Available
⚠️ Note: Actual Kafka connectivity requires infrastructure services.
In Hugging Face Spaces, this demonstrates the client code structure.
"""
return result
except Exception as e:
return f"❌ Kafka client test failed: {str(e)}"
def simulate_nlp_processing(text):
"""Simulate NLP processing without full infrastructure."""
if not text.strip():
return "Please enter some text to process."
try:
# Simulate processing
words = text.split()
sentences = text.split('.')
# Simple sentiment analysis
positive_words = ['good', 'great', 'excellent', 'amazing', 'wonderful', 'fantastic', 'love', 'like']
negative_words = ['bad', 'terrible', 'awful', 'hate', 'dislike', 'horrible', 'worst']
positive_count = sum(1 for word in words if word.lower() in positive_words)
negative_count = sum(1 for word in words if word.lower() in negative_words)
if positive_count > negative_count:
sentiment = "positive"
elif negative_count > positive_count:
sentiment = "negative"
else:
sentiment = "neutral"
result = {
"input_text": text,
"processed_at": time.strftime('%Y-%m-%d %H:%M:%S'),
"statistics": {
"word_count": len(words),
"character_count": len(text),
"sentence_count": len([s for s in sentences if s.strip()]),
"avg_word_length": sum(len(word) for word in words) / len(words) if words else 0
},
"sentiment_analysis": {
"sentiment": sentiment,
"positive_indicators": positive_count,
"negative_indicators": negative_count,
"confidence": abs(positive_count - negative_count) / max(len(words), 1)
},
"processing_info": {
"mode": "simulation",
"platform": "Hugging Face Spaces",
"note": "This is a simplified demonstration of NLP processing capabilities"
}
}
return json.dumps(result, indent=2)
except Exception as e:
return f"❌ NLP processing error: {str(e)}"
def run_infrastructure_simulation():
"""Simulate infrastructure validation."""
try:
checks = {
"Python Environment": "βœ… PASS",
"Shared Kafka Client": "βœ… PASS (Code Available)",
"File System": "βœ… PASS",
"Memory": "βœ… PASS",
"CPU": "βœ… PASS",
"Docker Services": "❌ NOT AVAILABLE (Hugging Face Spaces Limitation)",
"Kafka Broker": "❌ NOT AVAILABLE (Requires Infrastructure)",
"Redis Cache": "❌ NOT AVAILABLE (Requires Infrastructure)",
"MinIO Storage": "❌ NOT AVAILABLE (Requires Infrastructure)"
}
result = "πŸ” Infrastructure Validation (Hugging Face Spaces)\n"
result += "=" * 50 + "\n\n"
for check_name, status in checks.items():
result += f"{check_name:<25} {status}\n"
result += f"\nπŸ“Š Summary: 5/9 checks available in this environment\n"
result += "\nπŸ’‘ Note: This demonstrates the system architecture.\n"
result += "Full infrastructure requires Docker deployment with Kafka, Redis, and MinIO.\n"
return result
except Exception as e:
return f"❌ Infrastructure simulation error: {str(e)}"
# Create Gradio interface
with gr.Blocks(title="Multi-Agent NLP System", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# πŸ€– Multi-Agent NLP System
**Demo Version - Running on Hugging Face Spaces**
This is a demonstration of the Multi-Agent NLP System architecture. The full system includes:
- **Kafka Message Broker** for inter-service communication
- **Redis Cache** for high-performance data storage
- **MinIO Object Storage** for file and media management
- **Multiple Specialized Agents** for different processing tasks
⚠️ **Note**: This Spaces deployment shows the system architecture and code structure.
Full infrastructure features require Docker deployment with all services.
""")
with gr.Tabs():
with gr.Tab("πŸ” System Status"):
gr.Markdown("### System Information & Health Check")
with gr.Row():
info_btn = gr.Button("Get System Info", variant="primary")
kafka_btn = gr.Button("Test Kafka Client", variant="secondary")
infra_btn = gr.Button("Infrastructure Check", variant="secondary")
output = gr.Textbox(label="Results", lines=20, max_lines=30)
info_btn.click(get_system_info, outputs=output)
kafka_btn.click(test_kafka_client, outputs=output)
infra_btn.click(run_infrastructure_simulation, outputs=output)
with gr.Tab("🧠 NLP Processing"):
gr.Markdown("### Natural Language Processing Demo")
with gr.Row():
with gr.Column():
text_input = gr.Textbox(
label="Enter text to process",
placeholder="Type your text here for analysis...",
lines=5,
value="This is a great example of natural language processing! The system can analyze sentiment and extract statistics."
)
process_btn = gr.Button("Process Text", variant="primary")
with gr.Column():
nlp_output = gr.Textbox(
label="Processing Results",
lines=15,
max_lines=20
)
process_btn.click(simulate_nlp_processing, inputs=text_input, outputs=nlp_output)
with gr.Tab("πŸ—οΈ Architecture"):
gr.Markdown("""
### System Architecture Overview
```
β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
β”‚ API Gateway β”‚ β”‚ NLP Agent β”‚ β”‚ Image Agent β”‚
β”‚ β”‚ β”‚ β”‚ β”‚ β”‚
β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”˜
β”‚ β”‚ β”‚
β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
β”‚
β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
β”‚ Kafka Message Bus β”‚
β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
β”‚
β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
β”‚ β”‚ β”‚
β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”
β”‚ Redis Cache β”‚ β”‚ MinIO Storage β”‚ β”‚ Video Agent β”‚
β”‚ β”‚ β”‚ β”‚ β”‚ β”‚
β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
```
### πŸ”§ Technology Stack
- **Backend**: Python 3.9+ with asyncio
- **Message Broker**: Apache Kafka + Zookeeper
- **Cache**: Redis
- **Storage**: MinIO (S3-compatible)
- **Containerization**: Docker & Docker Compose
- **Web Interface**: Gradio
### πŸ“‘ Communication Flow
1. **API Gateway** receives external requests
2. **Kafka** routes messages between services
3. **Agents** process requests asynchronously
4. **Redis** caches frequently accessed data
5. **MinIO** stores files and processing results
### πŸš€ Deployment Options
- **Local Development**: `docker-compose up`
- **Production**: Kubernetes with Helm charts
- **Cloud**: AWS/GCP/Azure with managed services
- **Demo**: Hugging Face Spaces (this deployment)
""")
with gr.Tab("πŸ“– Documentation"):
gr.Markdown("""
### πŸš€ Quick Start Guide
#### For Local Development:
```bash
# Clone the repository
git clone https://huggingface.co/spaces/CHKIM79/multi-agent-nlp-system
cd multi-agent-nlp-system
# Install dependencies
pip install -r requirements.txt
# Start infrastructure
docker-compose -f docker-compose-minimal.yml up -d
# Validate system
python3 simple_check.py
# Run tests
python3 test_kafka_client.py
```
#### For Production Deployment:
```bash
# Use the full Docker Compose
docker-compose up -d
# Or deploy to Kubernetes
kubectl apply -f k8s/
```
### πŸ”§ Configuration
Environment variables:
- `KAFKA_BOOTSTRAP_SERVERS`: Kafka broker addresses
- `REDIS_URL`: Redis connection string
- `MINIO_ENDPOINT`: MinIO server endpoint
- `MINIO_ACCESS_KEY`: MinIO access credentials
- `MINIO_SECRET_KEY`: MinIO secret credentials
### πŸ“ API Endpoints
When fully deployed, the system exposes:
- `GET /health` - System health check
- `POST /api/nlp/process` - NLP processing
- `POST /api/image/generate` - Image generation
- `POST /api/video/process` - Video processing
- `GET /api/status` - System status
### πŸ› οΈ Development
The system is designed for:
- **Microservices Architecture**: Independent, scalable services
- **Event-Driven Communication**: Asynchronous message passing
- **Horizontal Scaling**: Add more agent instances as needed
- **Fault Tolerance**: Graceful degradation and error handling
### πŸ“Š Monitoring
Production monitoring includes:
- **Health Checks**: Automated service validation
- **Metrics**: Performance and usage statistics
- **Logging**: Centralized log aggregation
- **Alerting**: Real-time issue notifications
""")
# Launch the interface
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
show_error=True
)