|
|
import gradio as gr
|
|
|
import torch
|
|
|
import main as core
|
|
|
|
|
|
|
|
|
def run_inference(input_text: str):
|
|
|
if not input_text.strip():
|
|
|
return "Ingresá una configuración para generar el resultado."
|
|
|
|
|
|
if core.model is None or core.tokenizer is None:
|
|
|
return "El modelo no está cargado correctamente."
|
|
|
|
|
|
try:
|
|
|
|
|
|
inputs = core.tokenizer(input_text, return_tensors="pt").to(core.DEVICE)
|
|
|
with torch.no_grad():
|
|
|
outputs = core.model.generate(
|
|
|
inputs["input_ids"],
|
|
|
max_length=60,
|
|
|
do_sample=True,
|
|
|
top_p=0.95,
|
|
|
temperature=0.8
|
|
|
)
|
|
|
|
|
|
|
|
|
tokens = core.tokenizer.convert_ids_to_tokens(outputs[0])
|
|
|
tokens_string = core.decodificar_tokens(tokens)
|
|
|
smiles = core.postprocesar_smiles(tokens_string)
|
|
|
|
|
|
if not smiles or len(smiles.strip()) == 0:
|
|
|
return "No se generó ningún SMILES válido."
|
|
|
|
|
|
return smiles
|
|
|
|
|
|
except Exception as e:
|
|
|
return f"Error interno durante la generación: {str(e)}"
|
|
|
|
|
|
|
|
|
with gr.Blocks(title="MolGen.AI") as demo:
|
|
|
gr.Markdown("## 🧬 MolGen.AI — Generación de moléculas")
|
|
|
gr.Markdown("Escribí una configuración y generá una estructura SMILES basada en tu modelo.")
|
|
|
inp = gr.Textbox(label="Configuración", placeholder="Ej: CCO[NH2+]...", lines=3)
|
|
|
btn = gr.Button("Generar", variant="primary")
|
|
|
out = gr.Textbox(label="SMILES generados", lines=6)
|
|
|
btn.click(fn=run_inference, inputs=inp, outputs=out)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
print("🚀 Iniciando MolGen.AI con Gradio...")
|
|
|
import os
|
|
|
port = int(os.environ.get("PORT", 7860))
|
|
|
demo.launch(server_name="0.0.0.0", server_port=port)
|
|
|
|