claudioclimaco31 commited on
Commit
0bf4577
Β·
verified Β·
1 Parent(s): 7983f95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -139
app.py CHANGED
@@ -1,226 +1,194 @@
1
  """
2
- Generatore Video LEGGERO per CPU Basic HF Spaces
3
- Modello: zeroscope_v2_576w (\~1.3–1.8 GB in fp16)
4
- Target: \~4–9 GB RAM peak durante inferenza
 
5
  """
6
 
7
  import os
8
  import uuid
9
- import shutil
10
  import threading
11
  import queue
12
  import sqlite3
13
- from typing import Dict, List, Optional
14
- from pathlib import Path
15
-
16
  import torch
17
  import gradio as gr
18
- from PIL import Image
19
  from diffusers import DiffusionPipeline
20
  from diffusers.utils import export_to_video
21
- from moviepy.editor import VideoFileClip, concatenate_videoclips
22
-
23
- # ==============================================================================
24
- # CONFIG
25
- # ==============================================================================
26
 
27
- CACHE_DIR = "/tmp/hf_cache"
28
- os.environ["HF_HOME"] = CACHE_DIR
29
- os.makedirs(CACHE_DIR, exist_ok=True)
30
- TMP_VIDEO_DIR = "/tmp/videos"
31
- os.makedirs(TMP_VIDEO_DIR, exist_ok=True)
32
 
33
  # ==============================================================================
34
- # DATABASE semplice (persistente su disco)
35
  # ==============================================================================
 
 
 
 
36
 
37
- class DatabaseLavori:
38
- def __init__(self, db_path: str = "/tmp/lavori.db"):
39
- self.db_path = db_path
40
- self.init_db()
41
-
42
- def init_db(self):
43
- with sqlite3.connect(self.db_path) as conn:
44
  conn.execute("""
45
- CREATE TABLE IF NOT EXISTS lavori (
46
- id_lavoro TEXT PRIMARY KEY,
47
- id_utente TEXT NOT NULL,
48
  prompt TEXT NOT NULL,
49
- stato TEXT DEFAULT 'in_attesa',
50
- progresso INTEGER DEFAULT 0,
51
- percorso_video TEXT,
52
- errore TEXT
53
  )
54
  """)
55
 
56
- def aggiungi(self, id_lavoro: str, id_utente: str, prompt: str):
57
- with sqlite3.connect(self.db_path) as conn:
58
- conn.execute(
59
- "INSERT INTO lavori (id_lavoro, id_utente, prompt) VALUES (?, ?, ?)",
60
- (id_lavoro, id_utente, prompt)
61
- )
62
 
63
- def aggiorna(self, id_lavoro: str, **kwargs):
64
- with sqlite3.connect(self.db_path) as conn:
65
  sets = ", ".join(f"{k} = ?" for k in kwargs)
66
- values = list(kwargs.values()) + [id_lavoro]
67
- conn.execute(f"UPDATE lavori SET {sets} WHERE id_lavoro = ?", values)
68
 
69
- def get(self, id_lavoro: str) -> Optional[Dict]:
70
- with sqlite3.connect(self.db_path) as conn:
71
  conn.row_factory = sqlite3.Row
72
- row = conn.execute("SELECT * FROM lavori WHERE id_lavoro = ?", (id_lavoro,)).fetchone()
73
  return dict(row) if row else None
74
 
75
 
76
  # ==============================================================================
77
- # WORKER
78
  # ==============================================================================
79
-
80
- class VideoWorker(threading.Thread):
81
- def __init__(self, db: DatabaseLavori):
82
  super().__init__(daemon=True)
83
  self.db = db
84
- self.queue = queue.Queue()
85
- self.running = True
86
 
87
- print("Caricamento modello leggero zeroscope_v2_576w (fp16)...")
88
  try:
89
  self.pipe = DiffusionPipeline.from_pretrained(
90
- "cerspense/zeroscope_v2_576w",
91
  torch_dtype=torch.float16,
92
- cache_dir=CACHE_DIR,
93
  safety_checker=None,
94
  requires_safety_checker=False
95
  )
96
- # Ottimizzazioni memoria CPU
97
  self.pipe.enable_vae_slicing()
98
- self.pipe.enable_sequential_cpu_offload() # Molto importante su CPU
99
- # Alternativa se sequential Γ¨ troppo lento: self.pipe.enable_model_cpu_offload()
100
- print("Modello caricato OK")
101
  except Exception as e:
102
- print(f"Errore caricamento modello: {e}")
103
  raise
104
 
105
- def add_job(self, id_lavoro: str, prompt: str):
106
- self.queue.put((id_lavoro, prompt))
107
 
108
  def run(self):
109
- while self.running:
110
  try:
111
- id_lavoro, prompt = self.queue.get(timeout=12)
112
- print(f"β†’ Inizio lavoro {id_lavoro}")
113
-
114
- self.db.aggiorna(id_lavoro, stato="in_elaborazione", progresso=10)
115
 
116
- video_path = self._generate(id_lavoro, prompt)
117
 
118
  if video_path:
119
- self.db.aggiorna(
120
- id_lavoro,
121
- stato="completato",
122
- progresso=100,
123
- percorso_video=video_path
124
- )
125
  else:
126
- self.db.aggiorna(id_lavoro, stato="fallito", errore="Generazione fallita")
127
 
 
128
  except queue.Empty:
129
  continue
130
  except Exception as e:
131
- print(f"Errore worker: {e}")
132
 
133
- def _generate(self, id_lavoro: str, prompt: str) -> Optional[str]:
134
  try:
135
- self.db.aggiorna(id_lavoro, progresso=30)
136
 
137
- # Parametri leggeri
138
- video_frames = self.pipe(
139
  prompt,
140
- num_inference_steps=20, # meno steps = piΓΉ veloce, meno qualitΓ 
141
- height=320,
142
- width=576,
143
- num_frames=16, # solo 16 frame (\~2 secondi a 8 fps)
144
- guidance_scale=7.5
145
- ).frames[0]
146
-
147
- clip_path = os.path.join(TMP_VIDEO_DIR, f"clip_{id_lavoro}.mp4")
148
- export_to_video(video_frames, clip_path, fps=8)
149
 
150
- # Pulizia memoria esplicita
151
- torch.cuda.empty_cache() if torch.cuda.is_available() else None
152
- del video_frames
153
 
154
- return clip_path
 
155
 
 
156
  except Exception as e:
157
- print(f"Errore generazione: {e}")
158
  return None
159
 
160
 
161
  # ==============================================================================
162
- # API
163
  # ==============================================================================
164
-
165
- db = DatabaseLavori()
166
- worker = VideoWorker(db)
167
  worker.start()
168
 
169
- def crea_video(id_utente: str, prompt: str):
170
- if not id_utente.strip():
171
- return "Errore: inserisci ID utente"
172
- if not prompt.strip():
173
- return "Errore: inserisci un prompt"
174
 
175
- id_lavoro = f"vid_{uuid.uuid4().hex[:10]}"
176
- db.aggiungi(id_lavoro, id_utente, prompt)
177
- worker.add_job(id_lavoro, prompt)
178
 
179
- return f"Video in coda!\nID lavoro: **{id_lavoro}**\nControlla stato tra 3–15 minuti (CPU lenta)."
180
 
181
- def check_stato(id_lavoro: str):
182
- job = db.get(id_lavoro)
183
  if not job:
184
  return "ID non trovato"
185
 
186
- stato = job["stato"]
187
- prog = job["progresso"]
188
- video = job.get("percorso_video", "")
189
- err = job.get("errore", "")
190
-
191
- msg = f"**Stato**: {stato}\n**Progresso**: {prog}%"
192
- if video:
193
- msg += f"\n\nVideo pronto β†’ scaricalo da: {video}"
194
- if err:
195
- msg += f"\nErrore: {err}"
196
  return msg
197
 
198
 
199
  # ==============================================================================
200
- # GRADIO UI leggera
201
  # ==============================================================================
202
-
203
- with gr.Blocks(title="Video Generator – CPU Basic") as demo:
204
  gr.Markdown("""
205
- # Generatore Video LEGGERO (CPU Basic)
206
- Modello: zeroscope_v2_576w β€’ \~16 frame (\~2 sec) β€’ Lento ma funziona gratis
 
 
207
  """)
208
 
209
  with gr.Row():
210
- with gr.Column():
211
- gr.Markdown("### Crea video")
212
- utente = gr.Textbox(label="Tuo ID utente", placeholder="es. mario123")
213
- prompt = gr.Textbox(label="Prompt", lines=4, placeholder="Un cane corre felice nel parco al tramonto")
214
- btn_genera = gr.Button("Metti in coda", variant="primary")
215
- output_crea = gr.Textbox(label="Risultato")
216
-
217
- with gr.Column():
218
- gr.Markdown("### Controlla stato")
219
- id_input = gr.Textbox(label="ID lavoro")
220
- btn_check = gr.Button("Verifica")
221
- output_stato = gr.Textbox(label="Stato", lines=6)
222
-
223
- btn_genera.click(crea_video, [utente, prompt], output_crea)
224
- btn_check.click(check_stato, id_input, output_stato)
225
 
226
  demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
 
1
  """
2
+ Generatore Video ULTRA-LEGGERO per HF Spaces CPU Basic (2026)
3
+ - Modello: zeroscope_v2_dark_30x448x256 (\~1–1.5 GB fp16)
4
+ - Risoluzione: 256x448, 8 frame, 15 steps
5
+ - Target RAM peak: \~5–8 GB β†’ dovrebbe sopravvivere su CPU Basic
6
  """
7
 
8
  import os
9
  import uuid
 
10
  import threading
11
  import queue
12
  import sqlite3
13
+ import gc
 
 
14
  import torch
15
  import gradio as gr
 
16
  from diffusers import DiffusionPipeline
17
  from diffusers.utils import export_to_video
 
 
 
 
 
18
 
19
+ # Config per ridurre RAM
20
+ os.environ["HF_HOME"] = "/tmp/hf_cache"
21
+ os.makedirs("/tmp/hf_cache", exist_ok=True)
22
+ TMP_DIR = "/tmp/videos"
23
+ os.makedirs(TMP_DIR, exist_ok=True)
24
 
25
  # ==============================================================================
26
+ # DATABASE minimo su /tmp
27
  # ==============================================================================
28
+ class MiniDB:
29
+ def __init__(self, path="/tmp/jobs.db"):
30
+ self.path = path
31
+ self.init()
32
 
33
+ def init(self):
34
+ with sqlite3.connect(self.path) as conn:
 
 
 
 
 
35
  conn.execute("""
36
+ CREATE TABLE IF NOT EXISTS jobs (
37
+ id TEXT PRIMARY KEY,
38
+ user TEXT NOT NULL,
39
  prompt TEXT NOT NULL,
40
+ status TEXT DEFAULT 'queued',
41
+ progress INTEGER DEFAULT 0,
42
+ video_path TEXT,
43
+ error TEXT
44
  )
45
  """)
46
 
47
+ def add(self, job_id, user, prompt):
48
+ with sqlite3.connect(self.path) as conn:
49
+ conn.execute("INSERT INTO jobs (id, user, prompt) VALUES (?, ?, ?)", (job_id, user, prompt))
 
 
 
50
 
51
+ def update(self, job_id, **kwargs):
52
+ with sqlite3.connect(self.path) as conn:
53
  sets = ", ".join(f"{k} = ?" for k in kwargs)
54
+ vals = list(kwargs.values()) + [job_id]
55
+ conn.execute(f"UPDATE jobs SET {sets} WHERE id = ?", vals)
56
 
57
+ def get(self, job_id):
58
+ with sqlite3.connect(self.path) as conn:
59
  conn.row_factory = sqlite3.Row
60
+ row = conn.execute("SELECT * FROM jobs WHERE id = ?", (job_id,)).fetchone()
61
  return dict(row) if row else None
62
 
63
 
64
  # ==============================================================================
65
+ # WORKER con offload pesante
66
  # ==============================================================================
67
+ class LightWorker(threading.Thread):
68
+ def __init__(self, db: MiniDB):
 
69
  super().__init__(daemon=True)
70
  self.db = db
71
+ self.q = queue.Queue()
72
+ self.alive = True
73
 
74
+ print("Loading ultra-light model: zeroscope_v2_dark_30x448x256 ...")
75
  try:
76
  self.pipe = DiffusionPipeline.from_pretrained(
77
+ "cerspense/zeroscope_v2_dark_30x448x256",
78
  torch_dtype=torch.float16,
 
79
  safety_checker=None,
80
  requires_safety_checker=False
81
  )
 
82
  self.pipe.enable_vae_slicing()
83
+ self.pipe.enable_sequential_cpu_offload() # cruciale per CPU Basic
84
+ # NON fare .to("cuda") o .enable_model_cpu_offload() prima di sequential!
85
+ print("Model loaded successfully (low RAM mode)")
86
  except Exception as e:
87
+ print(f"Model load failed: {e}")
88
  raise
89
 
90
+ def enqueue(self, job_id, prompt):
91
+ self.q.put((job_id, prompt))
92
 
93
  def run(self):
94
+ while self.alive:
95
  try:
96
+ job_id, prompt = self.q.get(timeout=10)
97
+ print(f"Processing {job_id}")
98
+ self.db.update(job_id, status="processing", progress=10)
 
99
 
100
+ video_path = self._gen(job_id, prompt)
101
 
102
  if video_path:
103
+ self.db.update(job_id, status="done", progress=100, video_path=video_path)
 
 
 
 
 
104
  else:
105
+ self.db.update(job_id, status="failed", error="Generation error")
106
 
107
+ gc.collect() # pulizia forzata
108
  except queue.Empty:
109
  continue
110
  except Exception as e:
111
+ print(f"Worker error: {e}")
112
 
113
+ def _gen(self, job_id, prompt):
114
  try:
115
+ self.db.update(job_id, progress=30)
116
 
117
+ result = self.pipe(
 
118
  prompt,
119
+ num_inference_steps=15,
120
+ height=256,
121
+ width=448,
122
+ num_frames=8,
123
+ guidance_scale=6.0
124
+ )
 
 
 
125
 
126
+ frames = result.frames[0]
127
+ out_path = os.path.join(TMP_DIR, f"{job_id}.mp4")
128
+ export_to_video(frames, out_path, fps=8)
129
 
130
+ del frames, result
131
+ gc.collect()
132
 
133
+ return out_path
134
  except Exception as e:
135
+ print(f"Gen error {job_id}: {e}")
136
  return None
137
 
138
 
139
  # ==============================================================================
140
+ # API semplice
141
  # ==============================================================================
142
+ db = MiniDB()
143
+ worker = LightWorker(db)
 
144
  worker.start()
145
 
146
+ def start_job(user_id: str, prompt: str):
147
+ if not user_id.strip() or not prompt.strip():
148
+ return "Errore: inserisci ID utente e prompt validi"
 
 
149
 
150
+ job_id = f"j{uuid.uuid4().hex[:10]}"
151
+ db.add(job_id, user_id, prompt)
152
+ worker.enqueue(job_id, prompt)
153
 
154
+ return f"Job in coda!\nID: **{job_id}**\nTempo stimato: 3–12 min (CPU lenta)"
155
 
156
+ def check_job(job_id: str):
157
+ job = db.get(job_id)
158
  if not job:
159
  return "ID non trovato"
160
 
161
+ msg = f"Stato: {job['status']}\nProgresso: {job['progress']}%"
162
+ if job.get('video_path'):
163
+ msg += f"\n\nVideo pronto:\n{job['video_path']}"
164
+ if job.get('error'):
165
+ msg += f"\nErrore: {job['error']}"
 
 
 
 
 
166
  return msg
167
 
168
 
169
  # ==============================================================================
170
+ # UI minima
171
  # ==============================================================================
172
+ with gr.Blocks(title="Video Gen - CPU Basic Ultra Light") as demo:
 
173
  gr.Markdown("""
174
+ # Generatore Video ULTRA LEGGERO (CPU Basic Free)
175
+ - Modello: zeroscope_v2_dark_30x448x256
176
+ - Video: \~1 secondo (8 frame @ 256Γ—448)
177
+ - Tempo: 3–12 min per video
178
  """)
179
 
180
  with gr.Row():
181
+ user = gr.Textbox(label="ID Utente", placeholder="es. test123")
182
+ prompt_box = gr.Textbox(label="Prompt", lines=3, placeholder="Un gatto salta su un tavolo di legno")
183
+
184
+ btn = gr.Button("Avvia (in coda)", variant="primary")
185
+ result = gr.Textbox(label="Output", lines=5)
186
+
187
+ check_id = gr.Textbox(label="Controlla ID job")
188
+ btn_check = gr.Button("Verifica stato")
189
+ status_out = gr.Textbox(label="Stato", lines=5)
190
+
191
+ btn.click(start_job, [user, prompt_box], result)
192
+ btn_check.click(check_job, check_id, status_out)
 
 
 
193
 
194
  demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)