Enzo8930302 commited on
Commit
8f3291e
·
verified ·
1 Parent(s): 165a196

Upload examples_hf_api.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. examples_hf_api.py +255 -0
examples_hf_api.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Byte Dream - Hugging Face API Examples
3
+ Complete examples for using Byte Dream with Hugging Face Hub
4
+ """
5
+
6
+ # ============================================================================
7
+ # Example 1: Use Model from Hugging Face Hub (Download Locally)
8
+ # ============================================================================
9
+
10
+ print("=" * 70)
11
+ print("Example 1: Download and run model locally on CPU")
12
+ print("=" * 70)
13
+
14
+ from bytedream import ByteDreamGenerator
15
+
16
+ # Load model directly from Hugging Face
17
+ generator = ByteDreamGenerator(
18
+ hf_repo_id="Enzo8930302/ByteDream", # Replace with your repo
19
+ config_path="config.yaml",
20
+ device="cpu",
21
+ )
22
+
23
+ # Generate single image
24
+ image = generator.generate(
25
+ prompt="A beautiful sunset over mountains, digital art, vibrant colors",
26
+ negative_prompt="ugly, blurry, low quality, distorted",
27
+ width=512,
28
+ height=512,
29
+ num_inference_steps=50,
30
+ guidance_scale=7.5,
31
+ seed=42,
32
+ )
33
+
34
+ image.save("example_1_output.png")
35
+ print("✓ Image saved to example_1_output.png\n")
36
+
37
+
38
+ # ============================================================================
39
+ # Example 2: Use Hugging Face Inference API (Cloud)
40
+ # ============================================================================
41
+
42
+ print("=" * 70)
43
+ print("Example 2: Use Hugging Face Inference API (No local computation)")
44
+ print("=" * 70)
45
+
46
+ from bytedream import ByteDreamHFClient
47
+
48
+ # Initialize client for API usage
49
+ api_client = ByteDreamHFClient(
50
+ repo_id="Enzo8930302/ByteDream",
51
+ token=None, # Add your HF token here for private models
52
+ use_api=True, # Use cloud API instead of local inference
53
+ )
54
+
55
+ # Generate using cloud API
56
+ image_api = api_client.generate(
57
+ prompt="Futuristic city with flying cars, cyberpunk style, night",
58
+ negative_prompt="daylight, sunny, calm",
59
+ width=512,
60
+ height=512,
61
+ num_inference_steps=50,
62
+ guidance_scale=7.5,
63
+ )
64
+
65
+ image_api.save("example_2_api_output.png")
66
+ print("✓ Image generated via API and saved to example_2_api_output.png\n")
67
+
68
+
69
+ # ============================================================================
70
+ # Example 3: Batch Generation
71
+ # ============================================================================
72
+
73
+ print("=" * 70)
74
+ print("Example 3: Generate multiple images in batch")
75
+ print("=" * 70)
76
+
77
+ prompts = [
78
+ "Majestic dragon flying over medieval castle, fantasy art",
79
+ "Peaceful Japanese garden with cherry blossoms, serene",
80
+ "Underwater coral reef with tropical fish, vibrant colors",
81
+ "Mountain landscape at sunrise, dramatic lighting, epic",
82
+ ]
83
+
84
+ # Generate all images
85
+ images = generator.generate_batch(
86
+ prompts=prompts,
87
+ negative_prompt="ugly, deformed, low quality",
88
+ width=512,
89
+ height=512,
90
+ num_inference_steps=40,
91
+ guidance_scale=7.5,
92
+ )
93
+
94
+ # Save all images
95
+ for i, img in enumerate(images):
96
+ img.save(f"example_3_batch_{i+1}.png")
97
+ print(f" ✓ Saved example_3_batch_{i+1}.png")
98
+
99
+ print()
100
+
101
+
102
+ # ============================================================================
103
+ # Example 4: Upload Your Trained Model to Hugging Face
104
+ # ============================================================================
105
+
106
+ print("=" * 70)
107
+ print("Example 4: Upload trained model to Hugging Face Hub")
108
+ print("=" * 70)
109
+
110
+ # After training your model:
111
+ # python train.py --config config.yaml --train_data dataset
112
+
113
+ # Load your trained model
114
+ trained_generator = ByteDreamGenerator(
115
+ model_path="./models/bytedream",
116
+ config_path="config.yaml",
117
+ device="cpu",
118
+ )
119
+
120
+ # Upload to Hugging Face
121
+ hf_token = "hf_xxxxxxxxxxxxx" # Get from https://huggingface.co/settings/tokens
122
+
123
+ trained_generator.push_to_hub(
124
+ repo_id="your_username/ByteDream", # Replace with your username
125
+ token=hf_token,
126
+ private=False, # Set True for private model
127
+ commit_message="Upload Byte Dream model v1.0",
128
+ )
129
+
130
+ print("✓ Model uploaded to Hugging Face!\n")
131
+
132
+
133
+ # ============================================================================
134
+ # Example 5: Deploy to Hugging Face Spaces
135
+ # ============================================================================
136
+
137
+ print("=" * 70)
138
+ print("Example 5: Deploy interactive web app to Hugging Face Spaces")
139
+ print("=" * 70)
140
+
141
+ # Run this command in terminal:
142
+ # python deploy_to_spaces.py --repo_id your_username/ByteDream-Space
143
+
144
+ # Or programmatically:
145
+ from huggingface_hub import create_repo, HfApi
146
+
147
+ api = HfApi()
148
+
149
+ # Create space
150
+ create_repo(
151
+ repo_id="your_username/ByteDream-Space",
152
+ repo_type="space",
153
+ sdk="gradio",
154
+ token=hf_token,
155
+ exist_ok=True,
156
+ )
157
+
158
+ # Upload files
159
+ api.upload_folder(
160
+ folder_path=".",
161
+ repo_id="your_username/ByteDream-Space",
162
+ repo_type="space",
163
+ token=hf_token,
164
+ ignore_patterns=["*.git/*", "outputs/*", "logs/*"],
165
+ )
166
+
167
+ print("✓ Space deployed! Visit: https://huggingface.co/spaces/your_username/ByteDream-Space\n")
168
+
169
+
170
+ # ============================================================================
171
+ # Example 6: Advanced API Usage with Custom Parameters
172
+ # ============================================================================
173
+
174
+ print("=" * 70)
175
+ print("Example 6: Advanced generation with custom parameters")
176
+ print("=" * 70)
177
+
178
+ from bytedream import ByteDreamHFClient
179
+ import torch
180
+
181
+ client = ByteDreamHFClient(
182
+ repo_id="Enzo8930302/ByteDream",
183
+ use_api=False, # Run locally
184
+ device="cpu",
185
+ )
186
+
187
+ # Generate with different resolutions
188
+ resolutions = [(256, 256), (512, 512), (768, 768)]
189
+
190
+ for width, height in resolutions:
191
+ print(f"\nGenerating {width}x{height} image...")
192
+
193
+ img = client.generate(
194
+ prompt="Abstract geometric patterns, colorful, modern art",
195
+ width=width,
196
+ height=height,
197
+ num_inference_steps=30, # Fewer steps for faster generation
198
+ guidance_scale=9.0, # Higher guidance for more detail
199
+ seed=torch.randint(0, 1000000, (1,)).item(),
200
+ )
201
+
202
+ img.save(f"example_6_{width}x{height}.png")
203
+ print(f" ✓ Saved example_6_{width}x{height}.png")
204
+
205
+
206
+ # ============================================================================
207
+ # Example 7: Compare Local vs API Inference
208
+ # ============================================================================
209
+
210
+ print("\n" + "=" * 70)
211
+ print("Example 7: Compare local inference vs cloud API")
212
+ print("=" * 70)
213
+
214
+ import time
215
+
216
+ prompt = "Serene lake surrounded by pine trees, mountain reflection, sunset"
217
+
218
+ # Time local inference
219
+ print("\n⏱️ Testing LOCAL inference...")
220
+ start_local = time.time()
221
+ img_local = generator.generate(
222
+ prompt=prompt,
223
+ num_inference_steps=30,
224
+ seed=123,
225
+ )
226
+ time_local = time.time() - start_local
227
+ img_local.save("comparison_local.png")
228
+ print(f"Local: {time_local:.2f}s")
229
+
230
+ # Time API inference
231
+ print("\n⏱️ Testing CLOUD API inference...")
232
+ start_api = time.time()
233
+ img_api = api_client.generate(
234
+ prompt=prompt,
235
+ num_inference_steps=30,
236
+ seed=123,
237
+ )
238
+ time_api = time.time() - start_api
239
+ img_api.save("comparison_api.png")
240
+ print(f"API: {time_api:.2f}s")
241
+
242
+ print(f"\nSpeed comparison:")
243
+ print(f" Local: {time_local:.2f}s (CPU)")
244
+ print(f" API: {time_api:.2f}s (Cloud GPU/CPU)")
245
+ print(f" Winner: {'API' if time_api < time_local else 'Local'} 🏆")
246
+
247
+
248
+ print("\n" + "=" * 70)
249
+ print("All examples completed successfully! 🎉")
250
+ print("=" * 70)
251
+ print("\nNext steps:")
252
+ print("1. Train your own model: python train.py")
253
+ print("2. Upload to HF: generator.push_to_hub(repo_id='username/Model')")
254
+ print("3. Deploy to Spaces: python deploy_to_spaces.py")
255
+ print("4. Share with the community!")