| """
|
| Hugging Face Integration
|
| Upload and deploy Byte Dream to Hugging Face Hub and Spaces
|
| """
|
|
|
| import argparse
|
| from pathlib import Path
|
| import yaml
|
|
|
|
|
| def upload_to_huggingface(
|
| model_path: str,
|
| repo_id: str,
|
| token: str = None,
|
| private: bool = False,
|
| ):
|
| """
|
| Upload model to Hugging Face Hub
|
|
|
| Args:
|
| model_path: Path to model directory
|
| repo_id: Repository ID (username/model-name)
|
| token: Hugging Face API token
|
| private: Whether to make repository private
|
| """
|
| from huggingface_hub import HfApi, create_repo
|
|
|
| print(f"Uploading model to Hugging Face Hub...")
|
| print(f"Repository: {repo_id}")
|
|
|
|
|
| api = HfApi()
|
|
|
|
|
| try:
|
| create_repo(
|
| repo_id=repo_id,
|
| token=token,
|
| private=private,
|
| exist_ok=True,
|
| repo_type="model",
|
| )
|
| print("✓ Repository created/verified")
|
| except Exception as e:
|
| print(f"Error creating repository: {e}")
|
| return
|
|
|
|
|
| model_dir = Path(model_path)
|
|
|
| if not model_dir.exists():
|
| print(f"Error: Model directory {model_dir} does not exist")
|
| return
|
|
|
| print("\nUploading files...")
|
|
|
| try:
|
|
|
| api.upload_folder(
|
| folder_path=str(model_dir),
|
| repo_id=repo_id,
|
| token=token,
|
| repo_type="model",
|
| )
|
| print("✓ Model uploaded successfully!")
|
|
|
|
|
| print(f"\n📦 View your model at:")
|
| print(f"https://huggingface.co/{repo_id}")
|
|
|
| except Exception as e:
|
| print(f"Error uploading model: {e}")
|
|
|
|
|
| def create_gradio_app():
|
| """Create Gradio app for Hugging Face Spaces"""
|
| gradio_code = '''"""
|
| Byte Dream - Gradio Web Interface
|
| Deploy on Hugging Face Spaces
|
| """
|
|
|
| import gradio as gr
|
| from bytedream.generator import ByteDreamGenerator
|
| import torch
|
|
|
| # Initialize generator
|
| print("Loading Byte Dream model...")
|
| generator = ByteDreamGenerator(
|
| model_path="./models/bytedream",
|
| config_path="config.yaml",
|
| device="cpu",
|
| )
|
|
|
| def generate_image(
|
| prompt,
|
| negative_prompt,
|
| width,
|
| height,
|
| num_steps,
|
| guidance_scale,
|
| seed,
|
| ):
|
| """Generate image from prompt"""
|
|
|
| # Convert seed to None if -1
|
| seed_value = None if seed == -1 else seed
|
|
|
| try:
|
| # Generate image
|
| image = generator.generate(
|
| prompt=prompt,
|
| negative_prompt=negative_prompt if negative_prompt else None,
|
| width=int(width),
|
| height=int(height),
|
| num_inference_steps=int(num_steps),
|
| guidance_scale=float(guidance_scale),
|
| seed=seed_value,
|
| )
|
|
|
| return image, "Success!"
|
|
|
| except Exception as e:
|
| print(f"Error generating image: {e}")
|
| return None, f"Error: {str(e)}"
|
|
|
|
|
| # Create Gradio interface
|
| with gr.Blocks(title="Byte Dream - AI Image Generator", theme=gr.themes.Soft()) as demo:
|
| gr.Markdown("""
|
| # 🎨 Byte Dream - AI Image Generator
|
|
|
| Generate stunning images from text descriptions using advanced diffusion models.
|
| Optimized for CPU inference.
|
|
|
| **Tips for better results:**
|
| - Be specific and descriptive in your prompts
|
| - Use negative prompts to avoid unwanted elements
|
| - Higher steps = better quality but slower
|
| - Adjust guidance scale for creativity vs accuracy
|
| """)
|
|
|
| with gr.Row():
|
| with gr.Column(scale=1):
|
| gr.Markdown("### 📝 Prompt")
|
| prompt_input = gr.Textbox(
|
| label="Prompt",
|
| placeholder="A beautiful sunset over mountains, digital art, highly detailed",
|
| lines=3,
|
| )
|
|
|
| negative_prompt_input = gr.Textbox(
|
| label="Negative Prompt (optional)",
|
| placeholder="ugly, blurry, low quality, distorted",
|
| lines=2,
|
| )
|
|
|
| with gr.Row():
|
| width_slider = gr.Slider(
|
| minimum=256,
|
| maximum=1024,
|
| step=64,
|
| value=512,
|
| label="Width"
|
| )
|
| height_slider = gr.Slider(
|
| minimum=256,
|
| maximum=1024,
|
| step=64,
|
| value=512,
|
| label="Height"
|
| )
|
|
|
| with gr.Row():
|
| steps_slider = gr.Slider(
|
| minimum=10,
|
| maximum=150,
|
| step=5,
|
| value=50,
|
| label="Inference Steps"
|
| )
|
| guidance_slider = gr.Slider(
|
| minimum=1.0,
|
| maximum=20.0,
|
| step=0.5,
|
| value=7.5,
|
| label="Guidance Scale"
|
| )
|
|
|
| seed_input = gr.Number(
|
| label="Seed (-1 for random)",
|
| value=-1,
|
| precision=0,
|
| )
|
|
|
| generate_btn = gr.Button("🎨 Generate Image", variant="primary", size="lg")
|
|
|
| with gr.Column(scale=1):
|
| gr.Markdown("### 🖼️ Generated Image")
|
| output_image = gr.Image(
|
| label="Generated Image",
|
| type="pil",
|
| )
|
| status_text = gr.Textbox(label="Status")
|
|
|
| # Examples
|
| gr.Markdown("### 💡 Example Prompts")
|
| gr.Examples(
|
| examples=[
|
| ["A cyberpunk city at night with neon lights, futuristic architecture, flying cars, highly detailed, digital art"],
|
| ["A majestic dragon breathing fire, fantasy art, dramatic lighting, epic scene"],
|
| ["A peaceful cottage in a meadow, flowers, sunny day, studio ghibli style"],
|
| ["Portrait of a warrior princess, armor, fantasy, intricate details, character design"],
|
| ["Underwater coral reef, tropical fish, sunlight filtering through water, photorealistic"],
|
| ],
|
| inputs=[prompt_input],
|
| )
|
|
|
| # Connect button
|
| generate_btn.click(
|
| fn=generate_image,
|
| inputs=[
|
| prompt_input,
|
| negative_prompt_input,
|
| width_slider,
|
| height_slider,
|
| steps_slider,
|
| guidance_slider,
|
| seed_input,
|
| ],
|
| outputs=[output_image, status_text],
|
| )
|
|
|
| gr.Markdown("""
|
| ---
|
| **Byte Dream** v1.0.0 | Powered by Latent Diffusion Models
|
| """)
|
|
|
|
|
| if __name__ == "__main__":
|
| demo.launch(server_name="0.0.0.0", server_port=7860)
|
| '''
|
|
|
| return gradio_code
|
|
|
|
|
| def create_readme_for_hf(repo_id: str):
|
| """Create README for Hugging Face repository"""
|
|
|
| readme = f'''---
|
| license: mit
|
| language:
|
| - en
|
| tags:
|
| - text-to-image
|
| - diffusion
|
| - generative-ai
|
| - cpu-optimized
|
| ---
|
|
|
| # {repo_id.split('/')[-1]}
|
|
|
| {repo_id.split('/')[-1]} is a powerful text-to-image diffusion model optimized for CPU inference. Generate high-quality images from text prompts using advanced latent diffusion architecture.
|
|
|
| ## Features
|
|
|
| - 🚀 **CPU Optimized**: Runs efficiently on CPU without GPU requirement
|
| - 🎨 **High Quality**: Generates 512x512 and higher resolution images
|
| - ⚡ **Fast Inference**: Optimized for speed with quality preservation
|
| - 🔧 **Flexible**: Supports various sampling methods and customization
|
| - 📦 **Easy to Use**: Simple Python API and web interface
|
|
|
| ## Installation
|
|
|
| ```bash
|
| pip install -r requirements.txt
|
| ```
|
|
|
| ## Usage
|
|
|
| ### Python API
|
|
|
| ```python
|
| from bytedream import ByteDreamGenerator
|
|
|
| # Initialize generator
|
| generator = ByteDreamGenerator()
|
|
|
| # Generate image
|
| image = generator.generate(
|
| prompt="A beautiful sunset over mountains, digital art",
|
| num_inference_steps=50,
|
| guidance_scale=7.5
|
| )
|
|
|
| image.save("output.png")
|
| ```
|
|
|
| ### Command Line
|
|
|
| ```bash
|
| python infer.py --prompt "A dragon flying over castle" --output dragon.png
|
| ```
|
|
|
| ### Web Interface
|
|
|
| ```bash
|
| python app.py
|
| ```
|
|
|
| ## Model Details
|
|
|
| - **Architecture**: Latent Diffusion Model (UNet + VAE + Text Encoder)
|
| - **Parameters**: ~1.2B
|
| - **Training**: Trained on diverse image-text pairs
|
| - **Optimization**: CPU-optimized with efficient memory usage
|
|
|
| ## Examples
|
|
|
| Try these prompts:
|
| - "Cyberpunk city at night, neon lights, futuristic"
|
| - "Fantasy landscape with mountains and waterfall"
|
| - "Portrait of a warrior, detailed armor, dramatic lighting"
|
| - "Abstract art, colorful, geometric shapes"
|
|
|
| ## Configuration
|
|
|
| Edit `config.yaml` to customize:
|
| - Model architecture parameters
|
| - Generation settings (resolution, steps, guidance)
|
| - CPU optimization options
|
|
|
| ## License
|
|
|
| MIT License
|
|
|
| ## Acknowledgments
|
|
|
| Built with:
|
| - [PyTorch](https://pytorch.org/)
|
| - [Hugging Face Diffusers](https://github.com/huggingface/diffusers)
|
| - [CLIP](https://openai.com/research/clip)
|
|
|
| Enjoy creating with Byte Dream! 🎨
|
| '''
|
|
|
| return readme
|
|
|
|
|
| def main():
|
| parser = argparse.ArgumentParser(description="Upload Byte Dream to Hugging Face")
|
|
|
| parser.add_argument(
|
| "--model_path",
|
| type=str,
|
| default="./models/bytedream",
|
| help="Path to model directory"
|
| )
|
|
|
| parser.add_argument(
|
| "--repo_id",
|
| type=str,
|
| required=True,
|
| help="Repository ID (e.g., username/bytedream)"
|
| )
|
|
|
| parser.add_argument(
|
| "--token",
|
| type=str,
|
| default=None,
|
| help="Hugging Face API token"
|
| )
|
|
|
| parser.add_argument(
|
| "--private",
|
| action="store_true",
|
| help="Make repository private"
|
| )
|
|
|
| parser.add_argument(
|
| "--create_space",
|
| action="store_true",
|
| help="Also create Gradio Space code"
|
| )
|
|
|
| args = parser.parse_args()
|
|
|
|
|
| upload_to_huggingface(
|
| model_path=args.model_path,
|
| repo_id=args.repo_id,
|
| token=args.token,
|
| private=args.private,
|
| )
|
|
|
|
|
| if args.create_space:
|
| print("\n\nCreating Gradio Space files...")
|
|
|
|
|
| with open("app.py", 'w') as f:
|
| f.write(create_gradio_app())
|
| print("✓ Created app.py for Gradio Space")
|
|
|
|
|
| readme = create_readme_for_hf(args.repo_id)
|
| with open("README_HF.md", 'w') as f:
|
| f.write(readme)
|
| print("✓ Created README_HF.md")
|
|
|
| print("\n📋 To deploy on Hugging Face Spaces:")
|
| print("1. Go to https://huggingface.co/spaces")
|
| print("2. Click 'Create new Space'")
|
| print("3. Choose Gradio SDK")
|
| print("4. Upload all files")
|
| print("5. Select CPU hardware")
|
| print("6. Deploy!")
|
|
|
|
|
| if __name__ == "__main__":
|
| main()
|
|
|