Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,28 +7,48 @@ from PIL import Image
|
|
| 7 |
import tempfile
|
| 8 |
import os
|
| 9 |
import io
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
def generate_waveform_video(audio_file, image_file):
|
| 12 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
# 1. Cargar audio
|
|
|
|
| 14 |
y, sr = librosa.load(audio_file)
|
| 15 |
duration = librosa.get_duration(y=y, sr=sr)
|
|
|
|
| 16 |
|
| 17 |
-
# 2.
|
| 18 |
-
|
|
|
|
| 19 |
img_array = np.array(img)
|
| 20 |
img_clip = mp.ImageClip(img_array).set_duration(duration)
|
| 21 |
img_w, img_h = img_clip.size
|
|
|
|
| 22 |
|
| 23 |
# 3. Crear efecto de waveform
|
|
|
|
| 24 |
audio_envelope = np.abs(y)
|
| 25 |
audio_envelope = (audio_envelope / np.max(audio_envelope)) * (img_h // 3)
|
| 26 |
|
| 27 |
def make_frame(t):
|
| 28 |
fig, ax = plt.subplots(figsize=(img_w/100, img_h/100), dpi=100)
|
| 29 |
-
fig.patch.set_facecolor('black')
|
| 30 |
ax.set_facecolor('black')
|
| 31 |
-
|
| 32 |
ax.set_xlim(0, duration)
|
| 33 |
ax.set_ylim(-img_h//2, img_h//2)
|
| 34 |
ax.axis('off')
|
|
@@ -48,23 +68,32 @@ def generate_waveform_video(audio_file, image_file):
|
|
| 48 |
transparent=False, facecolor='black')
|
| 49 |
plt.close(fig)
|
| 50 |
|
| 51 |
-
# Forzar conversi贸n a RGB
|
| 52 |
img_frame = Image.open(buf).convert('RGB')
|
| 53 |
return np.array(img_frame)
|
| 54 |
|
| 55 |
-
#
|
|
|
|
| 56 |
effect_clip = mp.VideoClip(make_frame, duration=duration).set_fps(24)
|
| 57 |
final_clip = mp.CompositeVideoClip([img_clip, effect_clip.set_pos("center")])
|
| 58 |
final_clip = final_clip.set_audio(mp.AudioFileClip(audio_file))
|
|
|
|
| 59 |
|
| 60 |
-
# Guardar
|
|
|
|
| 61 |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
|
| 62 |
-
final_clip.write_videofile(
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
return tmpfile.name
|
| 65 |
|
| 66 |
except Exception as e:
|
| 67 |
-
|
|
|
|
| 68 |
|
| 69 |
# Interfaz Gradio
|
| 70 |
iface = gr.Interface(
|
|
@@ -79,4 +108,5 @@ iface = gr.Interface(
|
|
| 79 |
)
|
| 80 |
|
| 81 |
if __name__ == "__main__":
|
|
|
|
| 82 |
iface.queue().launch()
|
|
|
|
| 7 |
import tempfile
|
| 8 |
import os
|
| 9 |
import io
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
# Configuraci贸n de logging
|
| 13 |
+
logging.basicConfig(
|
| 14 |
+
level=logging.INFO,
|
| 15 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 16 |
+
handlers=[
|
| 17 |
+
logging.FileHandler("app.log"), # Guardar logs en archivo
|
| 18 |
+
logging.StreamHandler() # Mostrar logs en consola
|
| 19 |
+
]
|
| 20 |
+
)
|
| 21 |
+
logger = logging.getLogger("audio_to_video")
|
| 22 |
|
| 23 |
def generate_waveform_video(audio_file, image_file):
|
| 24 |
try:
|
| 25 |
+
logger.info("------ Nueva solicitud de generaci贸n ------")
|
| 26 |
+
logger.info(f"Archivo de audio recibido: {audio_file}")
|
| 27 |
+
logger.info(f"Archivo de imagen recibido: {image_file}")
|
| 28 |
+
|
| 29 |
# 1. Cargar audio
|
| 30 |
+
logger.info("Cargando archivo de audio...")
|
| 31 |
y, sr = librosa.load(audio_file)
|
| 32 |
duration = librosa.get_duration(y=y, sr=sr)
|
| 33 |
+
logger.info(f"Duraci贸n del audio: {duration:.2f} segundos")
|
| 34 |
|
| 35 |
+
# 2. Procesar imagen
|
| 36 |
+
logger.info("Cargando imagen y preparando formato...")
|
| 37 |
+
img = Image.open(image_file).convert('RGB')
|
| 38 |
img_array = np.array(img)
|
| 39 |
img_clip = mp.ImageClip(img_array).set_duration(duration)
|
| 40 |
img_w, img_h = img_clip.size
|
| 41 |
+
logger.info(f"Resoluci贸n de imagen: {img_w}x{img_h}")
|
| 42 |
|
| 43 |
# 3. Crear efecto de waveform
|
| 44 |
+
logger.info("Generando efecto de onda de audio...")
|
| 45 |
audio_envelope = np.abs(y)
|
| 46 |
audio_envelope = (audio_envelope / np.max(audio_envelope)) * (img_h // 3)
|
| 47 |
|
| 48 |
def make_frame(t):
|
| 49 |
fig, ax = plt.subplots(figsize=(img_w/100, img_h/100), dpi=100)
|
| 50 |
+
fig.patch.set_facecolor('black')
|
| 51 |
ax.set_facecolor('black')
|
|
|
|
| 52 |
ax.set_xlim(0, duration)
|
| 53 |
ax.set_ylim(-img_h//2, img_h//2)
|
| 54 |
ax.axis('off')
|
|
|
|
| 68 |
transparent=False, facecolor='black')
|
| 69 |
plt.close(fig)
|
| 70 |
|
|
|
|
| 71 |
img_frame = Image.open(buf).convert('RGB')
|
| 72 |
return np.array(img_frame)
|
| 73 |
|
| 74 |
+
# 4. Componer video
|
| 75 |
+
logger.info("Creando clip de video...")
|
| 76 |
effect_clip = mp.VideoClip(make_frame, duration=duration).set_fps(24)
|
| 77 |
final_clip = mp.CompositeVideoClip([img_clip, effect_clip.set_pos("center")])
|
| 78 |
final_clip = final_clip.set_audio(mp.AudioFileClip(audio_file))
|
| 79 |
+
logger.info("Video compuesto exitosamente")
|
| 80 |
|
| 81 |
+
# 5. Guardar video
|
| 82 |
+
logger.info("Exportando video final...")
|
| 83 |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
|
| 84 |
+
final_clip.write_videofile(
|
| 85 |
+
tmpfile.name,
|
| 86 |
+
fps=24,
|
| 87 |
+
codec="libx264",
|
| 88 |
+
audio_codec="aac",
|
| 89 |
+
logger=None
|
| 90 |
+
)
|
| 91 |
+
logger.info(f"Video guardado temporalmente en: {tmpfile.name}")
|
| 92 |
return tmpfile.name
|
| 93 |
|
| 94 |
except Exception as e:
|
| 95 |
+
logger.error(f"Error cr铆tico durante la generaci贸n: {str(e)}", exc_info=True)
|
| 96 |
+
raise Exception(f"Error: {str(e)} - Consulta el archivo app.log para m谩s detalles")
|
| 97 |
|
| 98 |
# Interfaz Gradio
|
| 99 |
iface = gr.Interface(
|
|
|
|
| 108 |
)
|
| 109 |
|
| 110 |
if __name__ == "__main__":
|
| 111 |
+
logger.info("Iniciando aplicaci贸n Gradio...")
|
| 112 |
iface.queue().launch()
|