memo / demo.py
likhonsheikh's picture
Upload Memo: Production-grade Transformers + Safetensors implementation
a8fc815 verified
"""
Demonstration Script - Transformers + Safetensors Integration
Shows how all components work together in production
"""
import asyncio
import logging
import time
from typing import List, Dict
# Import our modules
from core.scene_planner import get_planner, plan_scenes
from models.text.bangla_parser import extract_scenes
from models.image.sd_generator import get_generator, generate_frames
from config.model_tiers import get_tier_config, validate_model_weights_security
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class MemoDemo:
"""Demonstration of the complete Memo system."""
def __init__(self):
self.tiers = ["free", "pro", "enterprise"]
self.sample_text = "আজকের দিনটি খুব সুন্দর ছিল। রোদ উজ্জ্বল ছিল এবং হাওয়া মৃদুমন্দ। মানুষজন পার্কে হাঁটছে এবং শিশুরা খেলছে।"
async def demonstrate_tier_comparison(self):
"""Compare different tiers and their capabilities."""
print("\n" + "="*80)
print("🎯 TIER COMPARISON DEMONSTRATION")
print("="*80)
for tier_name in self.tiers:
print(f"\n📊 {tier_name.upper()} TIER:")
print("-" * 40)
# Get tier configuration
config = get_tier_config(tier_name)
if not config:
print(f"❌ Configuration not found for {tier_name}")
continue
print(f"✅ Text Model: {config.text_model_id}")
print(f"✅ Image Model: {config.image_model_id}")
print(f"✅ Resolution: {config.image_width}x{config.image_height}")
print(f"✅ Inference Steps: {config.image_inference_steps}")
print(f"✅ LoRA Path: {config.lora_path or 'None'}")
print(f"✅ LCM Enabled: {config.lcm_enabled}")
print(f"✅ Credits/Minute: {config.credits_per_minute}")
# Validate LoRA security if present
if config.lora_path:
security_result = validate_model_weights_security(config.lora_path)
print(f"🔒 Security: {'✅ COMPLIANT' if security_result['is_secure'] else '❌ VIOLATION'}")
if security_result['issues']:
for issue in security_result['issues']:
print(f" - {issue}")
async def demonstrate_scene_planning(self):
"""Demonstrate transformer-based scene planning."""
print("\n" + "="*80)
print("🧠 TRANSFORMER-BASED SCENE PLANNING")
print("="*80)
print(f"📝 Input Text: {self.sample_text}")
print("\n🎬 Generating scene plan...")
start_time = time.time()
# Use the scene planner
scenes = plan_scenes(self.sample_text, duration=15)
end_time = time.time()
print(f"⏱️ Processing Time: {end_time - start_time:.2f} seconds")
print(f"🎭 Scenes Generated: {len(scenes)}")
for i, scene in enumerate(scenes, 1):
print(f"\nScene {i}:")
print(f" 📖 Description: {scene['description']}")
print(f" ⏱️ Duration: {scene['duration']:.1f}s")
print(f" 🎨 Visual Style: {scene['visual_style']}")
print(f" 🔄 Transition: {scene['transition_type']}")
async def demonstrate_image_generation(self):
"""Demonstrate Stable Diffusion with safetensors."""
print("\n" + "="*80)
print("🎨 STABLE DIFFUSION + SAFETENSORS")
print("="*80)
# Test with Pro tier
config = get_tier_config("pro")
if not config:
print("❌ Pro tier configuration not available")
return
print(f"🔧 Using Pro Tier Configuration:")
print(f" Model: {config.image_model_id}")
print(f" Resolution: {config.image_width}x{config.image_height}")
print(f" LoRA: {config.lora_path}")
try:
# Get generator
generator = get_generator(
model_id=config.image_model_id,
lora_path=config.lora_path,
use_lcm=config.lcm_enabled
)
# Generate a test frame
test_prompt = "Beautiful landscape with sunlight filtering through trees"
print(f"\n🎯 Generating image for prompt: {test_prompt}")
start_time = time.time()
frames = generator.generate_frames(
prompt=test_prompt,
frames=1,
width=config.image_width,
height=config.image_height,
num_inference_steps=config.image_inference_steps
)
end_time = time.time()
print(f"⏱️ Generation Time: {end_time - start_time:.2f} seconds")
print(f"🖼️ Frames Generated: {len(frames)}")
if frames:
print("✅ Image generation successful!")
print(f"📏 Image Size: {frames[0].size}")
print(f"💾 Image Mode: {frames[0].mode}")
else:
print("❌ Image generation failed")
except Exception as e:
print(f"❌ Image generation error: {e}")
async def demonstrate_security_compliance(self):
"""Demonstrate security validation."""
print("\n" + "="*80)
print("🔒 SECURITY VALIDATION DEMONSTRATION")
print("="*80)
# Test different file formats
test_files = [
"data/lora/memo-scene-lora.safetensors",
"unsafe_model.bin", # Should fail
"another_model.ckpt" # Should fail
]
for file_path in test_files:
print(f"\n🔍 Validating: {file_path}")
if file_path.endswith('.safetensors'):
# Create a dummy safetensors file for demonstration
print(" 📝 Creating dummy safetensors file for testing...")
import torch
import os
from safetensors.torch import save_file
# Create dummy tensors
dummy_tensors = {
"weight1": torch.randn(10, 10),
"weight2": torch.randn(5, 5)
}
# Save to file
os.makedirs("data/lora", exist_ok=True)
save_file(dummy_tensors, file_path)
print(f" ✅ Created test file: {file_path}")
# Validate security
result = validate_model_weights_security(file_path)
print(f" 📊 Security Status:")
print(f" Secure: {'✅ YES' if result['is_secure'] else '❌ NO'}")
print(f" Format: {result['format'] or 'Unknown'}")
print(f" Size: {result['file_size_mb']:.2f} MB")
print(f" Tensors: {result['tensors_count']}")
if result['issues']:
print(f" Issues:")
for issue in result['issues']:
print(f" - {issue}")
else:
print(f" ✅ No security issues found")
async def demonstrate_performance_metrics(self):
"""Show performance metrics across tiers."""
print("\n" + "="*80)
print("⚡ PERFORMANCE METRICS")
print("="*80)
metrics = []
for tier_name in self.tiers:
config = get_tier_config(tier_name)
if not config:
continue
# Simulate performance metrics
estimated_memory = config.memory_limit_gb
estimated_throughput = config.max_concurrent_requests
estimated_cost = config.credits_per_minute
metrics.append({
"tier": tier_name,
"memory_gb": estimated_memory,
"throughput": estimated_throughput,
"cost_per_minute": estimated_cost,
"resolution": f"{config.image_width}x{config.image_height}",
"inference_steps": config.image_inference_steps
})
print(f"{'Tier':<12} {'Memory':<8} {'Throughput':<12} {'Cost/min':<10} {'Resolution':<12} {'Steps':<6}")
print("-" * 70)
for metric in metrics:
print(f"{metric['tier']:<12} "
f"{metric['memory_gb']:<8.1f} "
f"{metric['throughput']:<12} "
f"${metric['cost_per_minute']:<9.1f} "
f"{metric['resolution']:<12} "
f"{metric['inference_steps']:<6}")
async def run_complete_workflow(self):
"""Run the complete video generation workflow."""
print("\n" + "="*80)
print("🎬 COMPLETE WORKFLOW DEMONSTRATION")
print("="*80)
print(f"📝 Input: {self.sample_text}")
print("🎯 Target: 15-second video")
print("🏆 Tier: Pro")
try:
# Step 1: Scene Planning
print("\n📋 Step 1: Scene Planning...")
scenes = plan_scenes(self.sample_text, duration=15)
print(f"✅ Generated {len(scenes)} scenes")
# Step 2: Frame Generation
print("\n🎨 Step 2: Frame Generation...")
config = get_tier_config("pro")
generator = get_generator(
model_id=config.image_model_id,
lora_path=config.lora_path,
use_lcm=config.lcm_enabled
)
# Generate one frame per scene (demo purposes)
total_frames = 0
for i, scene in enumerate(scenes[:3], 1): # Limit to 3 for demo
print(f" 🎭 Scene {i}: {scene['description'][:50]}...")
frames = generator.generate_frames(
prompt=scene['description'],
frames=1,
width=config.image_width,
height=config.image_height,
num_inference_steps=config.image_inference_steps
)
total_frames += len(frames)
print(f"\n🎉 Workflow completed successfully!")
print(f" 📊 Total scenes: {len(scenes)}")
print(f" 🖼️ Total frames: {total_frames}")
print(f" 🔒 Security: Safetensors enforced")
print(f" ⚡ Performance: Optimized for production")
except Exception as e:
print(f"❌ Workflow failed: {e}")
async def run_demonstration(self):
"""Run the complete demonstration."""
print("🚀 MEMO TRANSFORMERS + SAFETENSORS DEMONSTRATION")
print("=" * 80)
print("This demo shows the complete transformation from toy logic")
print("to production-grade ML with proper security and performance.")
# Run all demonstrations
await self.demonstrate_tier_comparison()
await self.demonstrate_scene_planning()
await self.demonstrate_image_generation()
await self.demonstrate_security_compliance()
await self.demonstrate_performance_metrics()
await self.run_complete_workflow()
print("\n" + "="*80)
print("✅ DEMONSTRATION COMPLETE")
print("="*80)
print("Memo now uses:")
print(" 🧠 Transformers for text understanding")
print(" 🎨 Stable Diffusion for image generation")
print(" 🔒 Safetensors for secure model loading")
print(" 🏢 Enterprise-grade architecture")
print(" ⚡ Production-ready performance")
print("\nThis is no longer a toy system. It's production-grade ML.")
async def main():
"""Main demonstration function."""
demo = MemoDemo()
await demo.run_demonstration()
if __name__ == "__main__":
asyncio.run(main())