tarot / app.py
salomonsky's picture
Update app.py
5596624 verified
import gradio as gr
import os, random, subprocess, streamlit as st, requests
from gtts import gTTS
cartas_tarot = ["amantes", "carroza", "colgado", "diablo", "emperador", "emperatriz", "ermitaño", "estrella", "fortuna", "fuerza", "juicio", "justicia", "loco", "luna", "mago", "muerte", "mundo", "sacerdote", "sacerdotiza", "sol", "templanza", "torre"]
def seleccionar_cartas_tarot(cantidad): return random.sample(cartas_tarot, cantidad)
def generate_audio(text, output_path): gTTS(text, lang='es').save(output_path)
def generate_video(audio_path, face_image_path, video_path): return subprocess.run(f"python3 inference.py --checkpoint_path checkpoints/wav2lip_gan.pth --face {face_image_path} --audio {audio_path} --outfile {video_path} --nosmooth", shell=True, text=True).returncode
checkpoint_path = "checkpoints/wav2lip_gan.pth"
if not os.path.exists(checkpoint_path):
url = "https://huggingface.co/spaces/salomonsky/tarot/resolve/main/checkpoints/wav2lip_gan.pth"
response = requests.get(url)
if response.status_code == 200:
os.makedirs(os.path.dirname(checkpoint_path), exist_ok=True)
with open(checkpoint_path, "wb") as file: file.write(response.content)
else: print("Error downloading the file.")
os.makedirs('temp', exist_ok=True)
def generate_output(name):
if not name: return None, "El campo de nombre es obligatorio."
cartas_seleccionadas = seleccionar_cartas_tarot(4)
st.image([f"{carta}.png" for carta in cartas_seleccionadas], caption=cartas_seleccionadas, width=100)
prompt = f"Interpretando las cartas sobre la pregunta sobre tu lectura es: {', '.join(cartas_seleccionadas)}."
gpt3_output = "Replace this line with your text generation output"
return cartas_seleccionadas, gpt3_output
name = st.text_input("Escribe tu Pregunta", value="")
if st.button("Tirar Cartas de Tarot"):
cartas, gpt_output = generate_output(name)
if cartas and gpt_output:
st.success("Cartas tiradas exitosamente.")
st.write(f"Cartas seleccionadas: {', '.join(cartas)}")
st.write(f"Interpretación de GPT-3: {gpt_output}")
st.write("Generando video...")
video_dir, audio_path, video_path, face_image_path = "videos/", "temp/temp.wav", f"videos/video.mp4", "face.jpg"
generate_audio(gpt_output, audio_path)
return_code = generate_video(audio_path, face_image_path, video_path)
if return_code != 0: st.error("Error generating the video.")
elif os.path.isfile(video_path): st.video(video_path)
else: st.error("Error generating the video.")
else: st.error("Error generating the cards and interpretation.")
def generate(prompt, history, temperature=0.9, max_new_tokens=4096, top_p=0.95, repetition_penalty=1.0):
global system_prompt_sent
temperature, top_p = float(temperature), float(top_p)
if temperature < 1e-2: temperature = 1e-2
generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42)
formatted_prompt = format_prompt(prompt, history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
output = ""
for response in stream: output += response.token.text; yield output
return output
def process_tarot_reading(prompt):
cartas_seleccionadas = seleccionar_cartas_tarot(4)
prompt = f"Interpretando las cartas sobre la pregunta sobre tu lectura es: {', '.join(cartas_seleccionadas)}. {prompt}"
return prompt
def generate_with_audio_and_video(prompt, history, temperature=0.9, max_new_tokens=4096, top_p=0.95, repetition_penalty=1.0):
prompt_with_tarot, audio_path, video_dir, video_path, face_image_path = process_tarot_reading(prompt), "temp/temp.wav", "videos/", "videos/video.mp4", "face.jpg"
generate_audio(prompt_with_tarot, audio_path)
os.makedirs(video_dir, exist_ok=True)
generate_video(audio_path, face_image_path, video_path)
yield f"Audio: {audio_path}"; yield f"Video: {video_path}"
chat_interface = gr.ChatInterface(fn=generate_with_audio_and_video, chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=False, likeable=False, layout="vertical", height=700), concurrency_limit=9, theme="soft", retry_btn=None, undo_btn=None, clear_btn=None, submit_btn="Enviar")
chat_interface.launch(show_api=False, server_port=8000)