Spaces:
Sleeping
Sleeping
Update LLMPipeline.py
Browse files- LLMPipeline.py +71 -35
LLMPipeline.py
CHANGED
|
@@ -1,43 +1,79 @@
|
|
| 1 |
-
import requests
|
| 2 |
-
import os
|
| 3 |
|
| 4 |
-
API_KEY = os.getenv("OPENAI_API_KEY")
|
| 5 |
|
| 6 |
-
def summarize_headlines(headlines):
|
| 7 |
-
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
|
| 23 |
|
| 24 |
-
def generate_image_prompt(summary):
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
"""
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
r = requests.post(
|
| 38 |
-
"https://api.openai.com/v1/chat/completions",
|
| 39 |
-
json=body,
|
| 40 |
-
headers={"Authorization": f"Bearer {API_KEY}"}
|
| 41 |
-
)
|
| 42 |
-
|
| 43 |
-
return list(r.json().values())[0]['message']#r.json()['message']["content"] #["choices"][0]
|
|
|
|
| 1 |
+
# import requests
|
| 2 |
+
# import os
|
| 3 |
|
| 4 |
+
# API_KEY = os.getenv("OPENAI_API_KEY")
|
| 5 |
|
| 6 |
+
# def summarize_headlines(headlines):
|
| 7 |
+
# prompt = "Resume estos titulares en tono humorístico:\n" + "\n".join(headlines) #"Resume this headlines in a humoristic way"
|
| 8 |
|
| 9 |
+
# body = {
|
| 10 |
+
# "model": "gpt-4o-mini",
|
| 11 |
+
# "messages": [{"role": "user", "content": prompt}]
|
| 12 |
+
# }
|
| 13 |
|
| 14 |
+
# r = requests.post(
|
| 15 |
+
# "https://api.openai.com/v1/chat/completions",
|
| 16 |
+
# json=body,
|
| 17 |
+
# headers={"Authorization": f"Bearer {API_KEY}"}
|
| 18 |
+
# )
|
| 19 |
+
# print('HOLA ISABEL')
|
| 20 |
+
# print(list(r.json().values())[0])
|
| 21 |
+
# return list(r.json().values())[0]['message']#["content"] #["choices"][0]
|
| 22 |
|
| 23 |
|
| 24 |
+
# def generate_image_prompt(summary):
|
| 25 |
+
# prompt = f"""
|
| 26 |
+
# Crea un prompt para imagen tipo meme sobre este resumen de noticias:
|
| 27 |
+
# {summary}
|
| 28 |
+
|
| 29 |
+
# El prompt debe ser corto, visual y gracioso.
|
| 30 |
+
# """
|
| 31 |
+
# #Create a prompt for a meme related to this resume. The prompt must be short, visual and funny.
|
| 32 |
+
# body = {
|
| 33 |
+
# "model": "gpt-4o-mini",
|
| 34 |
+
# "messages": [{"role": "user", "content": prompt}]
|
| 35 |
+
# }
|
| 36 |
+
|
| 37 |
+
# r = requests.post(
|
| 38 |
+
# "https://api.openai.com/v1/chat/completions",
|
| 39 |
+
# json=body,
|
| 40 |
+
# headers={"Authorization": f"Bearer {API_KEY}"}
|
| 41 |
+
# )
|
| 42 |
+
|
| 43 |
+
# return list(r.json().values())[0]['message']#r.json()['message']["content"] #["choices"][0]
|
| 44 |
+
|
| 45 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 46 |
+
|
| 47 |
+
# 1️⃣ Configura el modelo
|
| 48 |
+
model_name = "tiiuae/falcon-7b-instruct"
|
| 49 |
+
|
| 50 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 51 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 52 |
+
model_name,
|
| 53 |
+
device_map="auto", # usa GPU si está disponible
|
| 54 |
+
torch_dtype="auto"
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
# 2️⃣ Pipeline de texto
|
| 58 |
+
text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 59 |
|
| 60 |
+
# ----------------------------
|
| 61 |
+
# Funciones
|
| 62 |
+
# ----------------------------
|
| 63 |
+
|
| 64 |
+
def summarize_headlines(headlines):
|
| 65 |
+
"""
|
| 66 |
+
Resume titulares en tono humorístico.
|
| 67 |
+
"""
|
| 68 |
+
prompt = "Resume estos titulares de manera humorística:\n" + "\n".join(headlines)
|
| 69 |
+
result = text_generator(prompt, max_length=150, do_sample=True, temperature=0.7)[0]['generated_text']
|
| 70 |
+
return result.strip()
|
| 71 |
+
|
| 72 |
+
def generate_image_prompt(summary):
|
| 73 |
"""
|
| 74 |
+
Genera un prompt corto y visual para un generador de imágenes tipo meme.
|
| 75 |
+
"""
|
| 76 |
+
prompt = f"Crea un prompt para imagen tipo meme sobre este resumen de noticias:\n{summary}\nEl prompt debe ser corto, visual y gracioso."
|
| 77 |
+
result = text_generator(prompt, max_length=100, do_sample=True, temperature=0.7)[0]['generated_text']
|
| 78 |
+
return result.strip()
|
| 79 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|