| """
|
| Byte Dream - Hugging Face API Examples
|
| Complete examples for using Byte Dream with Hugging Face Hub
|
| """
|
|
|
|
|
|
|
|
|
|
|
| print("=" * 70)
|
| print("Example 1: Download and run model locally on CPU")
|
| print("=" * 70)
|
|
|
| from bytedream import ByteDreamGenerator
|
|
|
|
|
| generator = ByteDreamGenerator(
|
| hf_repo_id="Enzo8930302/ByteDream",
|
| config_path="config.yaml",
|
| device="cpu",
|
| )
|
|
|
|
|
| image = generator.generate(
|
| prompt="A beautiful sunset over mountains, digital art, vibrant colors",
|
| negative_prompt="ugly, blurry, low quality, distorted",
|
| width=512,
|
| height=512,
|
| num_inference_steps=50,
|
| guidance_scale=7.5,
|
| seed=42,
|
| )
|
|
|
| image.save("example_1_output.png")
|
| print("✓ Image saved to example_1_output.png\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
| print("=" * 70)
|
| print("Example 2: Use Hugging Face Inference API (No local computation)")
|
| print("=" * 70)
|
|
|
| from bytedream import ByteDreamHFClient
|
|
|
|
|
| api_client = ByteDreamHFClient(
|
| repo_id="Enzo8930302/ByteDream",
|
| token=None,
|
| use_api=True,
|
| )
|
|
|
|
|
| image_api = api_client.generate(
|
| prompt="Futuristic city with flying cars, cyberpunk style, night",
|
| negative_prompt="daylight, sunny, calm",
|
| width=512,
|
| height=512,
|
| num_inference_steps=50,
|
| guidance_scale=7.5,
|
| )
|
|
|
| image_api.save("example_2_api_output.png")
|
| print("✓ Image generated via API and saved to example_2_api_output.png\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
| print("=" * 70)
|
| print("Example 3: Generate multiple images in batch")
|
| print("=" * 70)
|
|
|
| prompts = [
|
| "Majestic dragon flying over medieval castle, fantasy art",
|
| "Peaceful Japanese garden with cherry blossoms, serene",
|
| "Underwater coral reef with tropical fish, vibrant colors",
|
| "Mountain landscape at sunrise, dramatic lighting, epic",
|
| ]
|
|
|
|
|
| images = generator.generate_batch(
|
| prompts=prompts,
|
| negative_prompt="ugly, deformed, low quality",
|
| width=512,
|
| height=512,
|
| num_inference_steps=40,
|
| guidance_scale=7.5,
|
| )
|
|
|
|
|
| for i, img in enumerate(images):
|
| img.save(f"example_3_batch_{i+1}.png")
|
| print(f" ✓ Saved example_3_batch_{i+1}.png")
|
|
|
| print()
|
|
|
|
|
|
|
|
|
|
|
|
|
| print("=" * 70)
|
| print("Example 4: Upload trained model to Hugging Face Hub")
|
| print("=" * 70)
|
|
|
|
|
|
|
|
|
|
|
| trained_generator = ByteDreamGenerator(
|
| model_path="./models/bytedream",
|
| config_path="config.yaml",
|
| device="cpu",
|
| )
|
|
|
|
|
| hf_token = "hf_xxxxxxxxxxxxx"
|
|
|
| trained_generator.push_to_hub(
|
| repo_id="your_username/ByteDream",
|
| token=hf_token,
|
| private=False,
|
| commit_message="Upload Byte Dream model v1.0",
|
| )
|
|
|
| print("✓ Model uploaded to Hugging Face!\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
| print("=" * 70)
|
| print("Example 5: Deploy interactive web app to Hugging Face Spaces")
|
| print("=" * 70)
|
|
|
|
|
|
|
|
|
|
|
| from huggingface_hub import create_repo, HfApi
|
|
|
| api = HfApi()
|
|
|
|
|
| create_repo(
|
| repo_id="your_username/ByteDream-Space",
|
| repo_type="space",
|
| sdk="gradio",
|
| token=hf_token,
|
| exist_ok=True,
|
| )
|
|
|
|
|
| api.upload_folder(
|
| folder_path=".",
|
| repo_id="your_username/ByteDream-Space",
|
| repo_type="space",
|
| token=hf_token,
|
| ignore_patterns=["*.git/*", "outputs/*", "logs/*"],
|
| )
|
|
|
| print("✓ Space deployed! Visit: https://huggingface.co/spaces/your_username/ByteDream-Space\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
| print("=" * 70)
|
| print("Example 6: Advanced generation with custom parameters")
|
| print("=" * 70)
|
|
|
| from bytedream import ByteDreamHFClient
|
| import torch
|
|
|
| client = ByteDreamHFClient(
|
| repo_id="Enzo8930302/ByteDream",
|
| use_api=False,
|
| device="cpu",
|
| )
|
|
|
|
|
| resolutions = [(256, 256), (512, 512), (768, 768)]
|
|
|
| for width, height in resolutions:
|
| print(f"\nGenerating {width}x{height} image...")
|
|
|
| img = client.generate(
|
| prompt="Abstract geometric patterns, colorful, modern art",
|
| width=width,
|
| height=height,
|
| num_inference_steps=30,
|
| guidance_scale=9.0,
|
| seed=torch.randint(0, 1000000, (1,)).item(),
|
| )
|
|
|
| img.save(f"example_6_{width}x{height}.png")
|
| print(f" ✓ Saved example_6_{width}x{height}.png")
|
|
|
|
|
|
|
|
|
|
|
|
|
| print("\n" + "=" * 70)
|
| print("Example 7: Compare local inference vs cloud API")
|
| print("=" * 70)
|
|
|
| import time
|
|
|
| prompt = "Serene lake surrounded by pine trees, mountain reflection, sunset"
|
|
|
|
|
| print("\n⏱️ Testing LOCAL inference...")
|
| start_local = time.time()
|
| img_local = generator.generate(
|
| prompt=prompt,
|
| num_inference_steps=30,
|
| seed=123,
|
| )
|
| time_local = time.time() - start_local
|
| img_local.save("comparison_local.png")
|
| print(f"Local: {time_local:.2f}s")
|
|
|
|
|
| print("\n⏱️ Testing CLOUD API inference...")
|
| start_api = time.time()
|
| img_api = api_client.generate(
|
| prompt=prompt,
|
| num_inference_steps=30,
|
| seed=123,
|
| )
|
| time_api = time.time() - start_api
|
| img_api.save("comparison_api.png")
|
| print(f"API: {time_api:.2f}s")
|
|
|
| print(f"\nSpeed comparison:")
|
| print(f" Local: {time_local:.2f}s (CPU)")
|
| print(f" API: {time_api:.2f}s (Cloud GPU/CPU)")
|
| print(f" Winner: {'API' if time_api < time_local else 'Local'} 🏆")
|
|
|
|
|
| print("\n" + "=" * 70)
|
| print("All examples completed successfully! 🎉")
|
| print("=" * 70)
|
| print("\nNext steps:")
|
| print("1. Train your own model: python train.py")
|
| print("2. Upload to HF: generator.push_to_hub(repo_id='username/Model')")
|
| print("3. Deploy to Spaces: python deploy_to_spaces.py")
|
| print("4. Share with the community!")
|
|
|