ByteDream / examples_hf_api.py
Enzo8930302's picture
Upload examples_hf_api.py with huggingface_hub
8f3291e verified
"""
Byte Dream - Hugging Face API Examples
Complete examples for using Byte Dream with Hugging Face Hub
"""
# ============================================================================
# Example 1: Use Model from Hugging Face Hub (Download Locally)
# ============================================================================
print("=" * 70)
print("Example 1: Download and run model locally on CPU")
print("=" * 70)
from bytedream import ByteDreamGenerator
# Load model directly from Hugging Face
generator = ByteDreamGenerator(
hf_repo_id="Enzo8930302/ByteDream", # Replace with your repo
config_path="config.yaml",
device="cpu",
)
# Generate single image
image = generator.generate(
prompt="A beautiful sunset over mountains, digital art, vibrant colors",
negative_prompt="ugly, blurry, low quality, distorted",
width=512,
height=512,
num_inference_steps=50,
guidance_scale=7.5,
seed=42,
)
image.save("example_1_output.png")
print("✓ Image saved to example_1_output.png\n")
# ============================================================================
# Example 2: Use Hugging Face Inference API (Cloud)
# ============================================================================
print("=" * 70)
print("Example 2: Use Hugging Face Inference API (No local computation)")
print("=" * 70)
from bytedream import ByteDreamHFClient
# Initialize client for API usage
api_client = ByteDreamHFClient(
repo_id="Enzo8930302/ByteDream",
token=None, # Add your HF token here for private models
use_api=True, # Use cloud API instead of local inference
)
# Generate using cloud API
image_api = api_client.generate(
prompt="Futuristic city with flying cars, cyberpunk style, night",
negative_prompt="daylight, sunny, calm",
width=512,
height=512,
num_inference_steps=50,
guidance_scale=7.5,
)
image_api.save("example_2_api_output.png")
print("✓ Image generated via API and saved to example_2_api_output.png\n")
# ============================================================================
# Example 3: Batch Generation
# ============================================================================
print("=" * 70)
print("Example 3: Generate multiple images in batch")
print("=" * 70)
prompts = [
"Majestic dragon flying over medieval castle, fantasy art",
"Peaceful Japanese garden with cherry blossoms, serene",
"Underwater coral reef with tropical fish, vibrant colors",
"Mountain landscape at sunrise, dramatic lighting, epic",
]
# Generate all images
images = generator.generate_batch(
prompts=prompts,
negative_prompt="ugly, deformed, low quality",
width=512,
height=512,
num_inference_steps=40,
guidance_scale=7.5,
)
# Save all images
for i, img in enumerate(images):
img.save(f"example_3_batch_{i+1}.png")
print(f" ✓ Saved example_3_batch_{i+1}.png")
print()
# ============================================================================
# Example 4: Upload Your Trained Model to Hugging Face
# ============================================================================
print("=" * 70)
print("Example 4: Upload trained model to Hugging Face Hub")
print("=" * 70)
# After training your model:
# python train.py --config config.yaml --train_data dataset
# Load your trained model
trained_generator = ByteDreamGenerator(
model_path="./models/bytedream",
config_path="config.yaml",
device="cpu",
)
# Upload to Hugging Face
hf_token = "hf_xxxxxxxxxxxxx" # Get from https://huggingface.co/settings/tokens
trained_generator.push_to_hub(
repo_id="your_username/ByteDream", # Replace with your username
token=hf_token,
private=False, # Set True for private model
commit_message="Upload Byte Dream model v1.0",
)
print("✓ Model uploaded to Hugging Face!\n")
# ============================================================================
# Example 5: Deploy to Hugging Face Spaces
# ============================================================================
print("=" * 70)
print("Example 5: Deploy interactive web app to Hugging Face Spaces")
print("=" * 70)
# Run this command in terminal:
# python deploy_to_spaces.py --repo_id your_username/ByteDream-Space
# Or programmatically:
from huggingface_hub import create_repo, HfApi
api = HfApi()
# Create space
create_repo(
repo_id="your_username/ByteDream-Space",
repo_type="space",
sdk="gradio",
token=hf_token,
exist_ok=True,
)
# Upload files
api.upload_folder(
folder_path=".",
repo_id="your_username/ByteDream-Space",
repo_type="space",
token=hf_token,
ignore_patterns=["*.git/*", "outputs/*", "logs/*"],
)
print("✓ Space deployed! Visit: https://huggingface.co/spaces/your_username/ByteDream-Space\n")
# ============================================================================
# Example 6: Advanced API Usage with Custom Parameters
# ============================================================================
print("=" * 70)
print("Example 6: Advanced generation with custom parameters")
print("=" * 70)
from bytedream import ByteDreamHFClient
import torch
client = ByteDreamHFClient(
repo_id="Enzo8930302/ByteDream",
use_api=False, # Run locally
device="cpu",
)
# Generate with different resolutions
resolutions = [(256, 256), (512, 512), (768, 768)]
for width, height in resolutions:
print(f"\nGenerating {width}x{height} image...")
img = client.generate(
prompt="Abstract geometric patterns, colorful, modern art",
width=width,
height=height,
num_inference_steps=30, # Fewer steps for faster generation
guidance_scale=9.0, # Higher guidance for more detail
seed=torch.randint(0, 1000000, (1,)).item(),
)
img.save(f"example_6_{width}x{height}.png")
print(f" ✓ Saved example_6_{width}x{height}.png")
# ============================================================================
# Example 7: Compare Local vs API Inference
# ============================================================================
print("\n" + "=" * 70)
print("Example 7: Compare local inference vs cloud API")
print("=" * 70)
import time
prompt = "Serene lake surrounded by pine trees, mountain reflection, sunset"
# Time local inference
print("\n⏱️ Testing LOCAL inference...")
start_local = time.time()
img_local = generator.generate(
prompt=prompt,
num_inference_steps=30,
seed=123,
)
time_local = time.time() - start_local
img_local.save("comparison_local.png")
print(f"Local: {time_local:.2f}s")
# Time API inference
print("\n⏱️ Testing CLOUD API inference...")
start_api = time.time()
img_api = api_client.generate(
prompt=prompt,
num_inference_steps=30,
seed=123,
)
time_api = time.time() - start_api
img_api.save("comparison_api.png")
print(f"API: {time_api:.2f}s")
print(f"\nSpeed comparison:")
print(f" Local: {time_local:.2f}s (CPU)")
print(f" API: {time_api:.2f}s (Cloud GPU/CPU)")
print(f" Winner: {'API' if time_api < time_local else 'Local'} 🏆")
print("\n" + "=" * 70)
print("All examples completed successfully! 🎉")
print("=" * 70)
print("\nNext steps:")
print("1. Train your own model: python train.py")
print("2. Upload to HF: generator.push_to_hub(repo_id='username/Model')")
print("3. Deploy to Spaces: python deploy_to_spaces.py")
print("4. Share with the community!")