Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -64,7 +64,6 @@ class SummaraTransformer(nn.Module):
|
|
| 64 |
|
| 65 |
# --- Cargar modelo desde Hugging Face ---
|
| 66 |
def load_summara_from_hub():
|
| 67 |
-
# Descargar summara.pkl desde tu repo
|
| 68 |
model_path = hf_hub_download(
|
| 69 |
repo_id="teszenofficial/summara",
|
| 70 |
filename="summara.pkl"
|
|
@@ -86,34 +85,44 @@ def load_summara_from_hub():
|
|
| 86 |
tokenizer = data['tokenizer']
|
| 87 |
return model, tokenizer
|
| 88 |
|
| 89 |
-
# --- Función de resumen ---
|
| 90 |
def generate_square_subsequent_mask(sz):
|
| 91 |
return torch.triu(torch.ones(sz, sz), diagonal=1).bool()
|
| 92 |
|
| 93 |
def create_padding_mask(seq, pad_idx=0):
|
| 94 |
return (seq == pad_idx)
|
| 95 |
|
| 96 |
-
def summarize(text):
|
| 97 |
try:
|
| 98 |
model, tokenizer = load_summara_from_hub()
|
| 99 |
-
device = "cpu"
|
| 100 |
model.to(device)
|
| 101 |
|
| 102 |
src = torch.tensor([tokenizer.encode(text, add_eos=True)], device=device)
|
| 103 |
src_padding_mask = create_padding_mask(src).to(device)
|
| 104 |
tgt = torch.tensor([[tokenizer.word2idx["<sos>"]]], device=device)
|
| 105 |
|
|
|
|
|
|
|
|
|
|
| 106 |
with torch.no_grad():
|
| 107 |
-
for _ in range(
|
| 108 |
tgt_mask = generate_square_subsequent_mask(tgt.size(1)).to(device)
|
| 109 |
output = model(src, tgt, src_key_padding_mask=src_padding_mask, tgt_mask=tgt_mask)
|
| 110 |
next_token = output.argmax(2)[:, -1].item()
|
| 111 |
if next_token == tokenizer.word2idx.get("<eos>", 2):
|
| 112 |
break
|
| 113 |
tgt = torch.cat([tgt, torch.tensor([[next_token]], device=device)], dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
summary_ids = tgt.squeeze().cpu().tolist()
|
| 116 |
-
if not isinstance(summary_ids, list):
|
|
|
|
| 117 |
summary = tokenizer.decode(summary_ids[1:])
|
| 118 |
return summary if summary.strip() else "Resumen no disponible."
|
| 119 |
except Exception as e:
|
|
@@ -121,25 +130,49 @@ def summarize(text):
|
|
| 121 |
|
| 122 |
# --- Interfaz Gradio ---
|
| 123 |
with gr.Blocks(title="Summara") as demo:
|
| 124 |
-
gr.Markdown("# 🧠 Summara\n### Resumidor de texto con Transformer")
|
| 125 |
gr.Markdown("Modelo: [teszenofficial/summara](https://huggingface.co/teszenofficial/summara)")
|
| 126 |
|
| 127 |
with gr.Row():
|
| 128 |
with gr.Column():
|
| 129 |
-
inp = gr.Textbox(
|
| 130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
with gr.Column():
|
| 132 |
-
out = gr.Textbox(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
|
| 134 |
-
btn.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
|
| 136 |
gr.Examples(
|
| 137 |
examples=[
|
| 138 |
-
["
|
| 139 |
-
["El cambio climático
|
| 140 |
],
|
| 141 |
-
inputs=inp
|
|
|
|
| 142 |
)
|
| 143 |
|
|
|
|
|
|
|
| 144 |
if __name__ == "__main__":
|
| 145 |
demo.launch()
|
|
|
|
| 64 |
|
| 65 |
# --- Cargar modelo desde Hugging Face ---
|
| 66 |
def load_summara_from_hub():
|
|
|
|
| 67 |
model_path = hf_hub_download(
|
| 68 |
repo_id="teszenofficial/summara",
|
| 69 |
filename="summara.pkl"
|
|
|
|
| 85 |
tokenizer = data['tokenizer']
|
| 86 |
return model, tokenizer
|
| 87 |
|
| 88 |
+
# --- Función de resumen con longitud ajustable ---
|
| 89 |
def generate_square_subsequent_mask(sz):
|
| 90 |
return torch.triu(torch.ones(sz, sz), diagonal=1).bool()
|
| 91 |
|
| 92 |
def create_padding_mask(seq, pad_idx=0):
|
| 93 |
return (seq == pad_idx)
|
| 94 |
|
| 95 |
+
def summarize(text, max_words=50):
|
| 96 |
try:
|
| 97 |
model, tokenizer = load_summara_from_hub()
|
| 98 |
+
device = "cpu"
|
| 99 |
model.to(device)
|
| 100 |
|
| 101 |
src = torch.tensor([tokenizer.encode(text, add_eos=True)], device=device)
|
| 102 |
src_padding_mask = create_padding_mask(src).to(device)
|
| 103 |
tgt = torch.tensor([[tokenizer.word2idx["<sos>"]]], device=device)
|
| 104 |
|
| 105 |
+
words_generated = 0
|
| 106 |
+
max_tokens = max_words + 10 # margen para tokens no palabras
|
| 107 |
+
|
| 108 |
with torch.no_grad():
|
| 109 |
+
for _ in range(max_tokens):
|
| 110 |
tgt_mask = generate_square_subsequent_mask(tgt.size(1)).to(device)
|
| 111 |
output = model(src, tgt, src_key_padding_mask=src_padding_mask, tgt_mask=tgt_mask)
|
| 112 |
next_token = output.argmax(2)[:, -1].item()
|
| 113 |
if next_token == tokenizer.word2idx.get("<eos>", 2):
|
| 114 |
break
|
| 115 |
tgt = torch.cat([tgt, torch.tensor([[next_token]], device=device)], dim=1)
|
| 116 |
+
|
| 117 |
+
# Contar palabras (no tokens de control)
|
| 118 |
+
if next_token not in [0, 1, 2]:
|
| 119 |
+
words_generated += 1
|
| 120 |
+
if words_generated >= max_words:
|
| 121 |
+
break
|
| 122 |
|
| 123 |
summary_ids = tgt.squeeze().cpu().tolist()
|
| 124 |
+
if not isinstance(summary_ids, list):
|
| 125 |
+
summary_ids = [summary_ids]
|
| 126 |
summary = tokenizer.decode(summary_ids[1:])
|
| 127 |
return summary if summary.strip() else "Resumen no disponible."
|
| 128 |
except Exception as e:
|
|
|
|
| 130 |
|
| 131 |
# --- Interfaz Gradio ---
|
| 132 |
with gr.Blocks(title="Summara") as demo:
|
| 133 |
+
gr.Markdown("# 🧠 Summara\n### Resumidor de texto con Transformer entrenado desde cero")
|
| 134 |
gr.Markdown("Modelo: [teszenofficial/summara](https://huggingface.co/teszenofficial/summara)")
|
| 135 |
|
| 136 |
with gr.Row():
|
| 137 |
with gr.Column():
|
| 138 |
+
inp = gr.Textbox(
|
| 139 |
+
label="Texto a resumir",
|
| 140 |
+
lines=10,
|
| 141 |
+
placeholder="Pega un artículo, noticia, ensayo o cualquier texto que quieras resumir..."
|
| 142 |
+
)
|
| 143 |
+
with gr.Row():
|
| 144 |
+
length_slider = gr.Slider(
|
| 145 |
+
minimum=10,
|
| 146 |
+
maximum=150,
|
| 147 |
+
value=60,
|
| 148 |
+
step=5,
|
| 149 |
+
label="Longitud del resumen (palabras aproximadas)"
|
| 150 |
+
)
|
| 151 |
+
btn = gr.Button(" Generar Resumen ", variant="primary")
|
| 152 |
+
|
| 153 |
with gr.Column():
|
| 154 |
+
out = gr.Textbox(
|
| 155 |
+
label="Resumen generado",
|
| 156 |
+
lines=10,
|
| 157 |
+
interactive=False
|
| 158 |
+
)
|
| 159 |
|
| 160 |
+
btn.click(
|
| 161 |
+
fn=summarize,
|
| 162 |
+
inputs=[inp, length_slider],
|
| 163 |
+
outputs=out
|
| 164 |
+
)
|
| 165 |
|
| 166 |
gr.Examples(
|
| 167 |
examples=[
|
| 168 |
+
["La inteligencia artificial está transformando múltiples industrias. En medicina, permite diagnósticos más precisos. En educación, personaliza el aprendizaje. En transporte, impulsa los vehículos autónomos. A pesar de sus beneficios, también plantea desafíos éticos y de privacidad que la sociedad debe abordar con cuidado."],
|
| 169 |
+
["El cambio climático es uno de los mayores desafíos del siglo XXI. Sus efectos incluyen el aumento del nivel del mar, fenómenos meteorológicos extremos y pérdida de biodiversidad. Para mitigarlo, es esencial reducir las emisiones de gases de efecto invernadero, invertir en energías renovables y promover políticas ambientales sostenibles a nivel global."]
|
| 170 |
],
|
| 171 |
+
inputs=inp,
|
| 172 |
+
label="Ejemplos para probar"
|
| 173 |
)
|
| 174 |
|
| 175 |
+
gr.Markdown("💡 **Consejo**: Usa textos de al menos 3-4 oraciones para obtener mejores resultados.")
|
| 176 |
+
|
| 177 |
if __name__ == "__main__":
|
| 178 |
demo.launch()
|