Tulitula commited on
Commit
d721343
·
verified ·
1 Parent(s): da0a329

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -166
app.py CHANGED
@@ -1,177 +1,63 @@
1
- import re
2
  import gradio as gr
3
- import torch
4
- from PIL import Image
5
- from transformers import (
6
- pipeline,
7
- AutoProcessor,
8
- AutoModelForVision2Seq,
9
- AutoTokenizer,
10
- AutoModelForSeq2SeqLM,
11
- )
12
-
13
- # Auto-detect CPU/GPU
14
- DEVICE = 0 if torch.cuda.is_available() else -1
15
-
16
- # Load BLIP captioning model
17
- processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
18
- blip_model = AutoModelForVision2Seq.from_pretrained("Salesforce/blip-image-captioning-large")
19
- caption_pipe = pipeline(
20
- task="image-to-text",
21
- model=blip_model,
22
- tokenizer=processor.tokenizer,
23
- image_processor=processor.image_processor,
24
- device=DEVICE,
25
- )
26
-
27
- # Load Flan-T5 for text-to-text
28
- FLAN_MODEL = "google/flan-t5-large"
29
- flan_tokenizer = AutoTokenizer.from_pretrained(FLAN_MODEL)
30
- flan_model = AutoModelForSeq2SeqLM.from_pretrained(FLAN_MODEL)
31
-
32
- category_pipe = pipeline(
33
- "text2text-generation",
34
- model=flan_model,
35
- tokenizer=flan_tokenizer,
36
- device=DEVICE,
37
- max_new_tokens=32,
38
- do_sample=True,
39
- temperature=1.0,
40
- )
41
-
42
- analysis_pipe = pipeline(
43
- "text2text-generation",
44
- model=flan_model,
45
- tokenizer=flan_tokenizer,
46
- device=DEVICE,
47
- max_new_tokens=256,
48
- do_sample=True,
49
- temperature=1.0,
50
- )
51
-
52
- # Higher temp for more variety in suggestions
53
- suggestion_pipe = pipeline(
54
- "text2text-generation",
55
- model=flan_model,
56
- tokenizer=flan_tokenizer,
57
- device=DEVICE,
58
- max_new_tokens=256,
59
- do_sample=True,
60
- temperature=1.3,
61
- )
62
-
63
- expansion_pipe = pipeline(
64
- "text2text-generation",
65
- model=flan_model,
66
- tokenizer=flan_tokenizer,
67
- device=DEVICE,
68
- max_new_tokens=128,
69
- do_sample=False,
70
- )
71
-
72
- def get_recommendations():
73
- return [
74
- "https://i.imgur.com/InC88PP.jpeg",
75
- "https://i.imgur.com/7BHfv4T.png",
76
- "https://i.imgur.com/wp3Wzc4.jpeg",
77
- "https://i.imgur.com/5e2xOA4.jpeg",
78
- "https://i.imgur.com/txjRk98.jpeg",
79
- "https://i.imgur.com/rQ4AYl0.jpeg",
80
- "https://i.imgur.com/bDzwD04.jpeg",
81
- "https://i.imgur.com/fLMngXI.jpeg",
82
- "https://i.imgur.com/nYEJzxt.png",
83
- "https://i.imgur.com/Xj92Cjv.jpeg",
84
- ]
85
-
86
- def process(image: Image):
87
- if image is None:
88
- return "", "", "", get_recommendations()
89
-
90
- # BLIP caption
91
- caption_res = caption_pipe(image, max_new_tokens=64)
92
- raw_caption = caption_res[0]["generated_text"].strip()
93
- desc = raw_caption if len(raw_caption.split()) >= 3 else expansion_pipe(f"Expand into a detailed description: {raw_caption}")[0]["generated_text"].strip()
94
-
95
- # Category
96
- cat_prompt = f"Description: {desc}\n\nProvide a concise category label for this ad (e.g. 'Food', 'Fitness'):"
97
- cat_out = category_pipe(cat_prompt)[0]["generated_text"].splitlines()[0].strip()
98
-
99
- # Five-sentence analysis
100
- ana_prompt = (
101
- f"Description: {desc}\n\n"
102
- "Write exactly five sentences explaining what this ad communicates and its emotional impact."
103
  )
104
- ana_raw = analysis_pipe(ana_prompt)[0]["generated_text"].strip()
105
- sentences = re.split(r'(?<=[.!?])\s+', ana_raw)
106
- analysis = " ".join(sentences[:5]).strip()
107
-
108
- # **KEY CHANGE**: Use analysis in suggestion prompt!
109
- sug_prompt = (
110
- f"Ad description: {desc}\n"
111
- f"Ad analysis: {analysis}\n\n"
112
- "Based on this, suggest five unique and specific improvements for this ad. Each suggestion should be one clear sentence starting with '- ' and focus on a different aspect, like message, visuals, call-to-action, color, clarity, layout, targeting, or emotional impact. Do NOT repeat suggestions."
113
  )
114
- sug_raw = suggestion_pipe(sug_prompt)[0]["generated_text"].strip()
115
- all_sugs = [line.strip() for line in sug_raw.splitlines() if line.strip().startswith("-")]
116
- unique_sugs = []
117
- seen = set()
118
- for line in all_sugs:
119
- line_clean = line.lower().strip().rstrip(".")
120
- if line_clean not in seen and len(line_clean) > 4:
121
- unique_sugs.append(line)
122
- seen.add(line_clean)
123
- if len(unique_sugs) == 5:
 
124
  break
125
-
126
- # Add defaults if needed
127
  defaults = [
128
- "- Make the main headline more eye-catching.",
129
- "- Add a clear and visible call-to-action button.",
130
- "- Use contrasting colors for better readability.",
131
- "- Highlight the unique selling point of the product.",
132
- "- Simplify the design to reduce clutter."
133
  ]
134
  for d in defaults:
135
- d_clean = d.lower().strip().rstrip(".")
136
- if len(unique_sugs) < 5 and d_clean not in seen:
137
- unique_sugs.append(d)
138
- seen.add(d_clean)
139
- suggestions = "\n".join(unique_sugs[:5])
140
-
141
- return cat_out, analysis, suggestions, get_recommendations()
142
-
143
- def main():
144
- with gr.Blocks(title="Smart Ad Analyzer") as demo:
145
- gr.Markdown("## 📢 Smart Ad Analyzer")
146
- gr.Markdown(
147
- """
148
- **Upload your ad image below and instantly get expert feedback.**
149
-
150
- This AI tool will analyze your ad and provide:
151
- - 📂 **Category** — What type of ad is this?
152
- - 📊 **In-depth Analysis** — Five detailed sentences covering message, visuals, emotional impact, and more.
153
- - 🚀 **Improvement Suggestions** — Five actionable, unique ways to make your ad better.
154
- - 📸 **Inspiration Gallery** — See other effective ads for ideas.
155
 
156
- Perfect for marketers, founders, designers, and anyone looking to boost ad performance with actionable insights!
157
- """
158
- )
159
- with gr.Row():
160
- inp = gr.Image(type='pil', label='Upload Ad Image')
161
- with gr.Column():
162
- cat_out = gr.Textbox(label='📂 Ad Category', interactive=False)
163
- ana_out = gr.Textbox(label='📊 Ad Analysis', lines=5, interactive=False)
164
- sug_out = gr.Textbox(label='🚀 Improvement Suggestions', lines=5, interactive=False)
165
- btn = gr.Button('Analyze Ad', variant='primary')
166
- gallery = gr.Gallery(label='Example Ads')
167
- btn.click(
168
- fn=process,
169
- inputs=[inp],
170
- outputs=[cat_out, ana_out, sug_out, gallery],
171
- )
172
- gr.Markdown('Made by Simon Thalmay')
173
- return demo
174
 
175
  if __name__ == "__main__":
176
- demo = main()
177
  demo.launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Load model and tokenizer
5
+ MODEL_NAME = "meta-llama/Llama-3.1-8B-Instruct"
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
8
+ # Uncomment the next line to run on GPU if available
9
+ # model = model.to("cuda")
10
+
11
+ def improve_ad(ad_analysis):
12
+ prompt = (
13
+ "You are an expert ad consultant. Based on the following ad description or analysis, "
14
+ "give 5 unique, actionable suggestions to improve the ad. "
15
+ "Each suggestion must be on a new line, start with '- ', and should NOT repeat. "
16
+ "If possible, cover different aspects (messaging, design, call-to-action, targeting, layout, emotion, offer). "
17
+ "\n\nAd Analysis or Description:\n"
18
+ f"{ad_analysis}\n\n"
19
+ "Improvement Suggestions:"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  )
21
+ input_ids = tokenizer(prompt, return_tensors="pt")
22
+ # Uncomment the next line to run on GPU if available
23
+ # input_ids = input_ids.to("cuda")
24
+ output = model.generate(
25
+ **input_ids,
26
+ max_new_tokens=256,
27
+ do_sample=False,
28
+ pad_token_id=tokenizer.eos_token_id,
 
29
  )
30
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
31
+ # Extract suggestions: Find the part after "Improvement Suggestions:"
32
+ if "Improvement Suggestions:" in response:
33
+ response = response.split("Improvement Suggestions:")[-1]
34
+ # Filter and clean up to get exactly 5 suggestions
35
+ suggestions = []
36
+ for line in response.splitlines():
37
+ line = line.strip()
38
+ if line.startswith("-") and line not in suggestions:
39
+ suggestions.append(line)
40
+ if len(suggestions) == 5:
41
  break
42
+ # If not enough, pad with generic
 
43
  defaults = [
44
+ "- Add a clear and attractive call-to-action.",
45
+ "- Improve the visual contrast and headline font size.",
46
+ "- Refine the main message for clarity.",
47
+ "- Target a more specific audience segment.",
48
+ "- Use more emotional or persuasive language."
49
  ]
50
  for d in defaults:
51
+ if len(suggestions) < 5 and d not in suggestions:
52
+ suggestions.append(d)
53
+ return "\n".join(suggestions[:5])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ with gr.Blocks() as demo:
56
+ gr.Markdown("## 🦙 Llama-3.1 Ad Improvement Consultant\nPaste your ad analysis or description below, and get 5 unique suggestions to improve it, powered by Meta Llama 3.1 8B Instruct (runs 100 percent locally, no API required).")
57
+ inp = gr.Textbox(label="Ad Analysis or Description", lines=6, placeholder="Paste your ad description, image caption, or analysis here...")
58
+ out = gr.Textbox(label="Improvement Suggestions", lines=7)
59
+ btn = gr.Button("Get Suggestions")
60
+ btn.click(fn=improve_ad, inputs=inp, outputs=out)
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  if __name__ == "__main__":
 
63
  demo.launch()