Spaces:
Sleeping
Sleeping
Update LLMPipeline.py
Browse files- LLMPipeline.py +10 -10
LLMPipeline.py
CHANGED
|
@@ -61,21 +61,21 @@ text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, t
|
|
| 61 |
# Funciones
|
| 62 |
# ----------------------------
|
| 63 |
|
| 64 |
-
def summarize_headlines(headlines):
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
|
| 74 |
def generate_image_prompt(summary):
|
| 75 |
"""
|
| 76 |
Genera un prompt corto y visual para un generador de imágenes tipo meme.
|
| 77 |
"""
|
| 78 |
-
prompt = f"Crea un prompt para imagen tipo meme sobre este
|
| 79 |
result = text_generator(prompt, max_new_tokens=50, do_sample=True, temperature=0.7, return_full_text=False)[0]['generated_text']
|
| 80 |
return result.strip()
|
| 81 |
|
|
|
|
| 61 |
# Funciones
|
| 62 |
# ----------------------------
|
| 63 |
|
| 64 |
+
# def summarize_headlines(headlines):
|
| 65 |
+
# """
|
| 66 |
+
# rewrites the headings in a humoristic way
|
| 67 |
+
# """
|
| 68 |
+
# prompt = "Rewrite: " + "\n".join(headlines) + "Humoristic version. Short. Just one phrase"
|
| 69 |
+
# result = text_generator(prompt, max_new_tokens=50, do_sample=True, temperature=0.7, return_full_text=False)
|
| 70 |
+
# # print('resultadoooo: ', result)
|
| 71 |
+
# result= result[0]['generated_text']
|
| 72 |
+
# return result.strip()
|
| 73 |
|
| 74 |
def generate_image_prompt(summary):
|
| 75 |
"""
|
| 76 |
Genera un prompt corto y visual para un generador de imágenes tipo meme.
|
| 77 |
"""
|
| 78 |
+
prompt = f"Crea un prompt para imagen tipo meme sobre este titular de noticias:\n{summary}\nEl prompt debe ser corto, visual y gracioso. Devuelve solo el prompt."
|
| 79 |
result = text_generator(prompt, max_new_tokens=50, do_sample=True, temperature=0.7, return_full_text=False)[0]['generated_text']
|
| 80 |
return result.strip()
|
| 81 |
|