ManvithGopu1394 commited on
Commit
cd9f279
·
1 Parent(s): d3d5e18

add run video kaggle function

Browse files
Files changed (1) hide show
  1. functions/video/video_kaggle.py +171 -1
functions/video/video_kaggle.py CHANGED
@@ -1 +1,171 @@
1
- from diffusers import StableDiffusionPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, time
2
+ import json
3
+ import subprocess
4
+ import tempfile
5
+ import shutil
6
+
7
+
8
+ def run_video_kaggle(model_name, prompt, output_dir):
9
+ credentials_path = os.path.expanduser("~/.kaggle/kaggle.json")
10
+ if not os.path.exists(credentials_path) and not (
11
+ os.environ.get("KAGGLE_USERNAME") and os.environ.get("KAGGLE_KEY")
12
+ ):
13
+ print("Error: Kaggle API credentials not found. Please set up your kaggle API token in ~/.kaggle/kaggle.json.")
14
+ return
15
+
16
+ try:
17
+ subprocess.run(["kaggle", "--version"], stdout=subprocess.DEVNULL, check=True)
18
+ except FileNotFoundError:
19
+ print("Error: Kaggle CLI is not installed. Please install it with 'pip install kaggle'.")
20
+ return
21
+
22
+ username = None
23
+ if os.path.exists(credentials_path):
24
+ try:
25
+ with open(credentials_path, "r") as f:
26
+ creds = json.load(f)
27
+ username = creds.get("username")
28
+ except Exception:
29
+ username = None
30
+
31
+ if not username:
32
+ username = os.environ.get("KAGGLE_USERNAME")
33
+ if not username:
34
+ print("Error: Could not determine Kaggle username from credentials.")
35
+ return
36
+
37
+ # 2. Prepare a temporary directory with the kernel script and metadata
38
+ kernel_dir = tempfile.mkdtemp(prefix="vllama_kaggle_")
39
+ try:
40
+ # Write the Kaggle kernel script that installs dependencies and runs the model
41
+ script_path = os.path.join(kernel_dir, "vllama_kernel.py")
42
+ model_str = json.dumps(model_name) # safely quote the model string
43
+ prompt_str = json.dumps(prompt) # safely quote the prompt string
44
+ script_code = f"""
45
+ import subprocess
46
+ # Install required packages inside Kaggle (quietly, without cache to speed up start)
47
+ subprocess.run(
48
+ ["pip", "uninstall", "-y", "jax", "jaxlib", "flax"],
49
+ stdout=subprocess.DEVNULL,
50
+ stderr=subprocess.DEVNULL,
51
+ )
52
+ subprocess.run(['pip', 'install', '--no-cache-dir', 'diffusers[torch]==0.20.2',
53
+ 'transformers==4.33.0', 'accelerate==0.22.0', 'xformers==0.0.20','protobuf==3.20.3', 'huggingface-hub==0.25.2' , '--quiet'])
54
+ from diffusers import StableDiffusionPipeline
55
+ import torch
56
+ import numpy as np
57
+ import imageio
58
+ from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
59
+ pipe = DiffusionPipeline.from_pretrained({model_str}, torch_dtype = torch.float16, variant = "fp16")
60
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
61
+ pipe.enable_model_cpu_offload()
62
+ pipe.to('cuda')
63
+ prompt = {prompt_str}
64
+ result = pipe(prompt, num_inference_steps = 50)
65
+ frames = result.frames # this should be a list/array of individual frames
66
+
67
+ print("Number of frames:", len(frames))
68
+ print("Single frame shape:", np.array(frames[0]).shape)
69
+
70
+ def export_to_video(frames, output_path="output.mp4", fps=8):
71
+ out = []
72
+ for frame in frames:
73
+ f = np.array(frame)
74
+ # f should now be (H, W, 3)
75
+ if f.dtype != np.uint8:
76
+ f = (255 * np.clip(f, 0, 1)).astype(np.uint8)
77
+ out.append(f)
78
+
79
+ imageio.mimsave(
80
+ output_path,
81
+ out,
82
+ fps=fps,
83
+ quality=8,
84
+ macro_block_size=1, # avoid the resizing warning
85
+ )
86
+ return output_path
87
+
88
+
89
+ video_path = export_to_video(frames, "result.mp4")
90
+ print("Video saved at:", video_path)
91
+ """
92
+ with open(script_path, 'w') as f:
93
+ f.write(script_code.strip() + "\n")
94
+
95
+ # Write kernel-metadata.json for Kaggle
96
+ kernel_slug = "vllama-" + model_name.replace('/', '-')
97
+ # Sanitize slug to meet Kaggle requirements (alphanumeric and hyphens)
98
+ kernel_slug = "".join(ch if ch.isalnum() or ch == '-' else '-' for ch in kernel_slug.lower())
99
+ if len(kernel_slug) > 50: # slug length limit (if any)
100
+ kernel_slug = kernel_slug[:50]
101
+ title = "vllama " + model_name.replace('/', ' ')
102
+ metadata = {
103
+ "id": f"{username}/{kernel_slug}",
104
+ "title": title,
105
+ "code_file": os.path.basename(script_path),
106
+ "language": "python",
107
+ "kernel_type": "script",
108
+ "is_private": "true",
109
+ "enable_gpu": "true",
110
+ "enable_internet": "true",
111
+ "dataset_sources": [],
112
+ "competition_sources": [],
113
+ "kernel_sources": [],
114
+ "model_sources": []
115
+ }
116
+ meta_path = os.path.join(kernel_dir, "kernel-metadata.json")
117
+ with open(meta_path, 'w') as f:
118
+ json.dump(metadata, f, indent=2)
119
+
120
+ # 3. Push the kernel to Kaggle and trigger execution
121
+ print(f"Pushing Kaggle kernel (model: {model_name})...")
122
+ result = subprocess.run(["kaggle", "kernels", "push", "-p", kernel_dir], capture_output=True, text=True)
123
+ if result.returncode != 0:
124
+ print("Failed to push Kaggle kernel. Error output:")
125
+ print(result.stderr or result.stdout)
126
+ return
127
+ print("Kernel pushed successfully. Kaggle is running the kernel...")
128
+
129
+ # 4. Poll Kaggle for kernel status until it finishes
130
+ kernel_ref = f"{username}/{kernel_slug}"
131
+ start_time = time.time()
132
+ timestamp = int(time.time())
133
+ while True:
134
+ time.sleep(5) # wait 5 seconds between status checks
135
+ status_res = subprocess.run(["kaggle", "kernels", "status", kernel_ref], capture_output=True, text=True)
136
+ status_text = (status_res.stdout or "") + (status_res.stderr or "")
137
+ status_lower = status_text.lower()
138
+ if "complete" in status_lower:
139
+ print("Kaggle kernel execution completed.")
140
+ break
141
+ if "error" in status_lower or "failed" in status_lower:
142
+ print("Kaggle kernel execution failed. Please check the Kaggle notebook for errors.")
143
+ return
144
+ if time.time() - start_time > 900: # timeout after 15 minutes
145
+ print("Timed out waiting for Kaggle kernel to complete.")
146
+ return
147
+
148
+ # 5. Download the generated video from Kaggle
149
+ os.makedirs(output_dir, exist_ok=True)
150
+ print(f"Downloading output to {output_dir}...")
151
+ out_res = subprocess.run(["kaggle", "kernels", "output", kernel_ref, "-p", output_dir],
152
+ capture_output=True, text=True, errors='ignore')
153
+
154
+ # Check if the file was actually downloaded (regardless of return code)
155
+ source_path = os.path.join(output_dir, "result.mp4")
156
+ final_video_path = os.path.join(output_dir, f"vllama_video_output_{timestamp}.mp4")
157
+
158
+ if os.path.exists(source_path):
159
+ os.rename(source_path, final_video_path)
160
+ print(f"Video successfully downloaded and saved to {final_video_path}")
161
+ else:
162
+ print("Error: Video file not found after download.")
163
+ if out_res.returncode != 0:
164
+ print("Kaggle output error:")
165
+ print(out_res.stderr or out_res.stdout)
166
+ finally:
167
+ # Clean up the temporary kernel files
168
+ shutil.rmtree(kernel_dir, ignore_errors=True)
169
+
170
+ print("Returning Final video path:", final_video_path)
171
+ return final_video_path