ai-meme-generator / LLMPipeline.py
islasher's picture
Update LLMPipeline.py
5187e93 verified
# import requests
# import os
# API_KEY = os.getenv("OPENAI_API_KEY")
# def summarize_headlines(headlines):
# prompt = "Resume estos titulares en tono humorístico:\n" + "\n".join(headlines) #"Resume this headlines in a humoristic way"
# body = {
# "model": "gpt-4o-mini",
# "messages": [{"role": "user", "content": prompt}]
# }
# r = requests.post(
# "https://api.openai.com/v1/chat/completions",
# json=body,
# headers={"Authorization": f"Bearer {API_KEY}"}
# )
# print('HOLA ISABEL')
# print(list(r.json().values())[0])
# return list(r.json().values())[0]['message']#["content"] #["choices"][0]
# def generate_image_prompt(summary):
# prompt = f"""
# Crea un prompt para imagen tipo meme sobre este resumen de noticias:
# {summary}
# El prompt debe ser corto, visual y gracioso.
# """
# #Create a prompt for a meme related to this resume. The prompt must be short, visual and funny.
# body = {
# "model": "gpt-4o-mini",
# "messages": [{"role": "user", "content": prompt}]
# }
# r = requests.post(
# "https://api.openai.com/v1/chat/completions",
# json=body,
# headers={"Authorization": f"Bearer {API_KEY}"}
# )
# return list(r.json().values())[0]['message']#r.json()['message']["content"] #["choices"][0]
from transformers import AutoModelForSeq2SeqLM,AutoModelForCausalLM, AutoTokenizer, pipeline
# model
# model_name = "google/flan-t5-small"#"bigscience/bloom"#"meta-llama/Meta-Llama-3-8B"#"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"#"facebook/bart-large-cnn"#"tiiuae/falcon-7b-instruct"
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModelForSeq2SeqLM.from_pretrained(#AutoModelForCausalLM.from_pretrained(
# model_name,
# device_map="auto",
# torch_dtype="auto"#torch.float16
# )
#pipeline
# text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code=True)
# def summarize_headlines(headlines):
# """
# rewrites the headings in a humoristic way
# """
# prompt = "Rewrite: " + "\n".join(headlines) + "Humoristic version. Short. Just one phrase"
# result = text_generator(prompt, max_new_tokens=50, do_sample=True, temperature=0.7, return_full_text=False)
# # print('resultadoooo: ', result)
# result= result[0]['generated_text']
# return result.strip()
# def generate_image_prompt_viejo(summary):
# """
# Genera un prompt corto y visual para un generador de imágenes tipo meme.
# """
# prompt = f"Crea un prompt para imagen tipo meme sobre este titular de noticias:\n{summary}\nEl prompt debe ser corto, visual y gracioso. Devuelve solo el prompt."
# result = text_generator(prompt, max_new_tokens=400, do_sample=True, temperature=0.7)[0]['generated_text']
# return result.strip()
# from transformers import T5Tokenizer, T5ForConditionalGeneration
## tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
## model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")
# def generate_image_prompt_viejo2(summary):
# input_text = f"You are a creative meme prompt generator. \n Your task is to transform a news headline into a funny, clever, and original meme idea. RULES: Do NOT repeat or restate the headline. Do NOT quote the headline. Create a NEW, original meme prompt. Describe it visually. Make it humorous, exaggerated, or ironic. Include the characters, situation, mood, and visual setup for the meme. Output ONLY the meme prompt. No explanations. Here is the headline: {summary[0]}. Do it now"
# input_ids = tokenizer(input_text, return_tensors="pt").input_ids
# outputs = model.generate(input_ids)
# return tokenizer.decode(outputs[0])
#-------------------------------------------------
# tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct")
# model = AutoModelForSeq2SeqLM.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct")
from transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
device = "cpu" # cuda for GPU usage or "cpu" for CPU usage
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
# for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")`
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
# messages = [{"role": "user", "content": "What is the capital of France."}]
# input_text=tokenizer.apply_chat_template(messages, tokenize=False)
# inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
# outputs = model.generate(inputs, max_new_tokens=50, temperature=0.2, top_p=0.9, do_sample=True)
# print(tokenizer.decode(outputs[0]))
def generate_image_prompt(summary):
print('SUMMARY:', summary)
system_prompt_rewrite = "You are a creative meme prompt generator. Create a visual prompt to generate a meme of a message. Do not return any text other than the new prompt."
user_prompt_rewrite = "Create a visual meme prompt from the message. Keep its main point.\nThe message:"
messages = [{"role": "system", "content": system_prompt_rewrite}, {"role": "user", "content":f"{user_prompt_rewrite} {summary}"}]
input_text=tokenizer.apply_chat_template(messages, tokenize=False)
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
outputs = model.generate(inputs, max_new_tokens=500, temperature=0.2, top_p=0.9, do_sample=True)
print('RESUMIDO')
return tokenizer.decode(outputs[0])