File size: 5,593 Bytes
93cdb02
 
b647fc3
93cdb02
b647fc3
93cdb02
 
b647fc3
93cdb02
 
 
 
b647fc3
93cdb02
 
 
 
 
 
 
 
b647fc3
 
93cdb02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0f6f62
93cdb02
338c886
d65042d
93cdb02
d65042d
 
 
 
 
f38aae9
93cdb02
338c886
d65042d
b647fc3
93cdb02
e46b5cc
 
 
 
 
 
 
 
 
93cdb02
d65042d
 
 
 
 
 
 
df8ba39
d65042d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4372bca
85b6188
5187e93
e2ee65c
 
d65042d
 
 
58a7d44
5187e93
ee0dfdb
85b6188
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# import requests
# import os

# API_KEY = os.getenv("OPENAI_API_KEY")

# def summarize_headlines(headlines):
#     prompt = "Resume estos titulares en tono humorístico:\n" + "\n".join(headlines) #"Resume this headlines in a humoristic way"

#     body = {
#         "model": "gpt-4o-mini",
#         "messages": [{"role": "user", "content": prompt}]
#     }

#     r = requests.post(
#         "https://api.openai.com/v1/chat/completions",
#         json=body,
#         headers={"Authorization": f"Bearer {API_KEY}"}
#     )
#     print('HOLA ISABEL')
#     print(list(r.json().values())[0])
#     return list(r.json().values())[0]['message']#["content"] #["choices"][0]


# def generate_image_prompt(summary):
#     prompt = f"""
#     Crea un prompt para imagen tipo meme sobre este resumen de noticias:
#     {summary}

#     El prompt debe ser corto, visual y gracioso.
#     """
#     #Create a prompt for a meme related to this resume. The prompt must be short, visual and funny.
#     body = {
#         "model": "gpt-4o-mini",
#         "messages": [{"role": "user", "content": prompt}]
#     }

#     r = requests.post(
#         "https://api.openai.com/v1/chat/completions",
#         json=body,
#         headers={"Authorization": f"Bearer {API_KEY}"}
#     )

#     return list(r.json().values())[0]['message']#r.json()['message']["content"] #["choices"][0]

from transformers import AutoModelForSeq2SeqLM,AutoModelForCausalLM, AutoTokenizer, pipeline

# model
# model_name = "google/flan-t5-small"#"bigscience/bloom"#"meta-llama/Meta-Llama-3-8B"#"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"#"facebook/bart-large-cnn"#"tiiuae/falcon-7b-instruct"

# tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModelForSeq2SeqLM.from_pretrained(#AutoModelForCausalLM.from_pretrained(
#     model_name,
#     device_map="auto",    
#     torch_dtype="auto"#torch.float16
# )

#pipeline
# text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, trust_remote_code=True)


# def summarize_headlines(headlines):
#     """
#     rewrites the headings in a humoristic way
#     """
#     prompt = "Rewrite: " + "\n".join(headlines) + "Humoristic version. Short. Just one phrase"
#     result = text_generator(prompt, max_new_tokens=50, do_sample=True, temperature=0.7, return_full_text=False)
#     # print('resultadoooo: ', result)
#     result= result[0]['generated_text']
#     return result.strip()

# def generate_image_prompt_viejo(summary):
#     """
#     Genera un prompt corto y visual para un generador de imágenes tipo meme.
#     """
#     prompt = f"Crea un prompt para imagen tipo meme sobre este titular de noticias:\n{summary}\nEl prompt debe ser corto, visual y gracioso. Devuelve solo el prompt."
#     result = text_generator(prompt, max_new_tokens=400, do_sample=True, temperature=0.7)[0]['generated_text']
#     return result.strip()
# from transformers import T5Tokenizer, T5ForConditionalGeneration
## tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small")
## model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small")

# def generate_image_prompt_viejo2(summary):
#     input_text = f"You are a creative meme prompt generator. \n Your task is to transform a news headline into a funny, clever, and original meme idea. RULES: Do NOT repeat or restate the headline.  Do NOT quote the headline.  Create a NEW, original meme prompt. Describe it visually. Make it humorous, exaggerated, or ironic.  Include the characters, situation, mood, and visual setup for the meme.  Output ONLY the meme prompt. No explanations. Here is the headline: {summary[0]}. Do it now"
#     input_ids = tokenizer(input_text, return_tensors="pt").input_ids   
#     outputs = model.generate(input_ids)
#     return tokenizer.decode(outputs[0])



#-------------------------------------------------
# tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct")
# model = AutoModelForSeq2SeqLM.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct")
from transformers import AutoModelForCausalLM, AutoTokenizer
checkpoint = "HuggingFaceTB/SmolLM2-1.7B-Instruct"
device = "cpu" # cuda for GPU usage or "cpu" for CPU usage
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
# for multiple GPUs install accelerate and do `model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto")`
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
# messages = [{"role": "user", "content": "What is the capital of France."}]
# input_text=tokenizer.apply_chat_template(messages, tokenize=False)
# inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
# outputs = model.generate(inputs, max_new_tokens=50, temperature=0.2, top_p=0.9, do_sample=True)
# print(tokenizer.decode(outputs[0]))

def generate_image_prompt(summary):
    print('SUMMARY:', summary)
    system_prompt_rewrite = "You are a creative meme prompt generator. Create a visual prompt to generate a meme of a message. Do not return any text other than the new prompt."
    user_prompt_rewrite = "Create a visual meme prompt from the message. Keep its main point.\nThe message:"
    messages = [{"role": "system", "content": system_prompt_rewrite}, {"role": "user", "content":f"{user_prompt_rewrite} {summary}"}]
    input_text=tokenizer.apply_chat_template(messages, tokenize=False)
    inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
    outputs = model.generate(inputs, max_new_tokens=500, temperature=0.2, top_p=0.9, do_sample=True)
    print('RESUMIDO')
    return tokenizer.decode(outputs[0])