sushilideaclan01 commited on
Commit
26e8ad9
·
1 Parent(s): 1afa519

Implement image regeneration feature and update API endpoints

Browse files

- Added a new API endpoint for regenerating images, allowing users to create new versions of existing ads with optional model selection.
- Introduced a frontend modal for image regeneration, enhancing user interaction and experience.
- Updated the API to list available image generation models, providing users with options for customization.
- Refactored data models and types to support the new regeneration functionality, ensuring type safety and consistency across the application.
- Enhanced the concepts framework by moving visual structure layouts to a separate file for better organization and clarity.

data/concepts.py CHANGED
@@ -10,8 +10,9 @@ import random
10
 
11
 
12
  class ConceptCategory(str, Enum):
13
- """Concept categories."""
14
- VISUAL_STRUCTURE = "visual_structure"
 
15
  UGC_NATIVE = "ugc_native"
16
  STORYTELLING = "storytelling"
17
  COMPARISON = "comparison"
@@ -23,26 +24,9 @@ class ConceptCategory(str, Enum):
23
  CTA_FOCUSED = "cta_focused"
24
 
25
 
26
- # Complete concepts framework - 100 concepts (10 per category)
 
27
  CONCEPTS = {
28
- ConceptCategory.VISUAL_STRUCTURE: {
29
- "name": "Visual Structure",
30
- "concepts": [
31
- {"key": "before_after", "name": "Before vs After", "structure": "Left=pain, Right=solution", "visual": "Split-screen transformation"},
32
- {"key": "split_screen", "name": "Split Screen", "structure": "Two halves comparison", "visual": "Vertical or horizontal split"},
33
- {"key": "checklist", "name": "Checklist / Tick Marks", "structure": "List with checkmarks", "visual": "Clean list, prominent checks"},
34
- {"key": "bold_headline", "name": "Bold Headline Image", "structure": "Headline dominates", "visual": "Large bold typography"},
35
- {"key": "text_first", "name": "Text-First Image", "structure": "Text primary, image secondary", "visual": "Clear typography hierarchy"},
36
- {"key": "minimalist", "name": "Minimalist Design", "structure": "White space, minimal elements", "visual": "Clean lines, focused"},
37
- {"key": "big_numbers", "name": "Big Numbers Visual", "structure": "Numbers dominate", "visual": "Huge numbers, bold type"},
38
- {"key": "highlight_circle", "name": "Highlight / Circle Focus", "structure": "Circle on key element", "visual": "Red circles, arrows"},
39
- {"key": "step_by_step", "name": "Step-by-Step Visual", "structure": "Step 1 → 2 → 3", "visual": "Clear flow, numbered"},
40
- {"key": "icon_based", "name": "Icon-Based Layout", "structure": "Icons represent features", "visual": "Clear icons, organized"},
41
- {"key": "grid_layout", "name": "Grid Layout", "structure": "Organized grid format", "visual": "Clean grid, easy to scan"},
42
- {"key": "timeline_visual", "name": "Timeline Visual", "structure": "Time-based progression", "visual": "Clear timeline flow"},
43
- {"key": "infographic_style", "name": "Infographic Style", "structure": "Information graphics", "visual": "Data visualization"},
44
- ]
45
- },
46
  ConceptCategory.UGC_NATIVE: {
47
  "name": "UGC & Native",
48
  "concepts": [
@@ -132,7 +116,6 @@ CONCEPTS = {
132
  "name": "Scroll-Stopping",
133
  "concepts": [
134
  {"key": "shock_headline", "name": "Shock Headline", "structure": "Shocking headline dominates", "visual": "Bold, high contrast"},
135
- {"key": "red_warning", "name": "Big Red Warning Text", "structure": "Red warning prominent", "visual": "Large red text"},
136
  {"key": "unusual_contrast", "name": "Unusual Contrast", "structure": "High contrast, unusual colors", "visual": "Stands out"},
137
  {"key": "pattern_break", "name": "Pattern Break Design", "structure": "Different from expected", "visual": "Unexpected"},
138
  {"key": "unexpected_image", "name": "Unexpected Image", "structure": "Surprising visual", "visual": "Attention-grabbing"},
@@ -250,8 +233,8 @@ def get_random_concepts(count: int = 5, diverse: bool = True) -> List[Dict[str,
250
 
251
  # Top performing concepts for initial testing
252
  TOP_CONCEPTS = [
253
- "before_after", "selfie_style", "problem_awareness",
254
- "side_by_side_table", "relatable_moment"
255
  ]
256
 
257
 
@@ -261,17 +244,19 @@ def get_top_concepts() -> List[Dict[str, Any]]:
261
 
262
 
263
  def get_compatible_concepts(angle_trigger: str) -> List[Dict[str, Any]]:
264
- """Get concepts compatible with a psychological trigger."""
265
- # Compatibility mapping
 
 
266
  compatibility = {
267
- "Fear": ["before_after", "shock_headline", "red_warning", "problem_awareness"],
268
- "Relief": ["before_after", "relief_moment", "success_moment", "turning_point"],
269
- "Greed": ["big_numbers", "price_stack", "value_breakdown", "side_by_side_table"],
270
- "FOMO": ["countdown_cta", "red_warning", "crowd", "trending"],
271
- "Social Proof": ["testimonial_screenshot", "review_stars", "real_customer", "crowd"],
272
  "Authority": ["expert_portrait", "badge_seal", "data_backed", "certification"],
273
  "Curiosity": ["shock_headline", "unexpected_image", "pattern_break", "bold_claim"],
274
- "Pride": ["success_moment", "before_after", "winner_highlight"],
275
  "Convenience": ["three_step", "how_it_works", "simple_explainer"],
276
  "Trust": ["trust_signals", "badge_seal", "real_customer", "testimonial_screenshot"],
277
  }
 
10
 
11
 
12
  class ConceptCategory(str, Enum):
13
+ """Concept categories - focused on creative HOW (storytelling, proof, etc.)
14
+ Note: Visual layouts (before/after, split screen, etc.) are now in visuals.py
15
+ """
16
  UGC_NATIVE = "ugc_native"
17
  STORYTELLING = "storytelling"
18
  COMPARISON = "comparison"
 
24
  CTA_FOCUSED = "cta_focused"
25
 
26
 
27
+ # Complete concepts framework - focused on creative HOW (not visual layouts)
28
+ # Note: Visual structure layouts (before/after, split screen, etc.) moved to visuals.py
29
  CONCEPTS = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  ConceptCategory.UGC_NATIVE: {
31
  "name": "UGC & Native",
32
  "concepts": [
 
116
  "name": "Scroll-Stopping",
117
  "concepts": [
118
  {"key": "shock_headline", "name": "Shock Headline", "structure": "Shocking headline dominates", "visual": "Bold, high contrast"},
 
119
  {"key": "unusual_contrast", "name": "Unusual Contrast", "structure": "High contrast, unusual colors", "visual": "Stands out"},
120
  {"key": "pattern_break", "name": "Pattern Break Design", "structure": "Different from expected", "visual": "Unexpected"},
121
  {"key": "unexpected_image", "name": "Unexpected Image", "structure": "Surprising visual", "visual": "Attention-grabbing"},
 
233
 
234
  # Top performing concepts for initial testing
235
  TOP_CONCEPTS = [
236
+ "selfie_style", "problem_awareness", "side_by_side_table",
237
+ "relatable_moment", "testimonial_screenshot"
238
  ]
239
 
240
 
 
244
 
245
 
246
  def get_compatible_concepts(angle_trigger: str) -> List[Dict[str, Any]]:
247
+ """Get concepts compatible with a psychological trigger.
248
+ Note: Visual layouts (before_after, split_screen, etc.) are now in visuals.py
249
+ """
250
+ # Compatibility mapping - concepts only (not visual layouts)
251
  compatibility = {
252
+ "Fear": ["shock_headline", "problem_awareness", "micro_story", "visual_tension"],
253
+ "Relief": ["relief_moment", "success_moment", "turning_point", "emotional_snapshot"],
254
+ "Greed": ["price_stack", "value_breakdown", "side_by_side_table", "ranking"],
255
+ "FOMO": ["countdown_cta", "crowd", "community", "urgent"],
256
+ "Social Proof": ["testimonial_screenshot", "real_customer", "crowd", "ugc_collage"],
257
  "Authority": ["expert_portrait", "badge_seal", "data_backed", "certification"],
258
  "Curiosity": ["shock_headline", "unexpected_image", "pattern_break", "bold_claim"],
259
+ "Pride": ["success_moment", "winner_highlight", "milestone_moment"],
260
  "Convenience": ["three_step", "how_it_works", "simple_explainer"],
261
  "Trust": ["trust_signals", "badge_seal", "real_customer", "testimonial_screenshot"],
262
  }
data/glp1.py CHANGED
@@ -191,7 +191,7 @@ STRATEGIES = {
191
  ],
192
  "visual_styles": [
193
  "relatable person matching demographic, candid shot",
194
- "real woman 40-60 in transformation moment",
195
  "busy mom in everyday setting, authentic",
196
  "person in regular clothes, not fitness model",
197
  "testimonial portrait, trustworthy face",
@@ -424,7 +424,7 @@ STRATEGIES = {
424
  "visual_styles": [
425
  "parent with children, health focus",
426
  "family moments, active lifestyle",
427
- "grandparent playing with grandkids",
428
  "health warning, medical imagery",
429
  "funeral aesthetic, mortality reminder",
430
  "empty chair at family gathering",
@@ -793,7 +793,7 @@ SUPERIORITY_WINNING_VISUALS = [
793
  CARE_PROTECTION_LOVED_ONES_VISUALS = [
794
  "parent with children, wanting to stay healthy for them",
795
  "person motivated by family love and responsibility",
796
- "elderly relative looking proud of health improvements",
797
  "family gathering, person wanting to be present",
798
  "person thinking about being there for grandchildren",
799
  "family photo showing importance of being healthy",
 
191
  ],
192
  "visual_styles": [
193
  "relatable person matching demographic, candid shot",
194
+ "real woman 25-45 in transformation moment",
195
  "busy mom in everyday setting, authentic",
196
  "person in regular clothes, not fitness model",
197
  "testimonial portrait, trustworthy face",
 
424
  "visual_styles": [
425
  "parent with children, health focus",
426
  "family moments, active lifestyle",
427
+ "parent playing with children",
428
  "health warning, medical imagery",
429
  "funeral aesthetic, mortality reminder",
430
  "empty chair at family gathering",
 
793
  CARE_PROTECTION_LOVED_ONES_VISUALS = [
794
  "parent with children, wanting to stay healthy for them",
795
  "person motivated by family love and responsibility",
796
+ "family member looking proud of health improvements",
797
  "family gathering, person wanting to be present",
798
  "person thinking about being there for grandchildren",
799
  "family photo showing importance of being healthy",
data/visuals.py CHANGED
@@ -64,6 +64,25 @@ COMPOSITIONS: List[str] = [
64
  "foreground/background separation", "depth of field focus", "repetitive patterns", "contrasting elements",
65
  ]
66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  VISUAL_MOODS: List[str] = [
68
  "urgent and alarming", "calm and reassuring", "exciting and energetic", "trustworthy and professional",
69
  "warm and friendly", "bold and confident", "subtle and sophisticated", "raw and authentic",
@@ -85,9 +104,10 @@ NICHE_VISUAL_GUIDANCE: Dict[str, Dict[str, Any]] = {
85
  "color_preference": "trust",
86
  },
87
  "glp1": {
88
- "subjects": ["confident person smiling", "active lifestyle scenes", "healthy meal preparation", "doctor consultation"],
89
  "props": ["fitness equipment", "healthy food", "comfortable clothing"],
90
- "avoid": ["before/after weight comparisons", "measuring tapes", "scales prominently", "needle close-ups"],
 
91
  "color_preference": "health",
92
  },
93
  }
@@ -124,3 +144,18 @@ def get_random_visual_elements() -> Dict[str, Any]:
124
  "composition": get_random_composition(),
125
  "mood": get_random_mood(),
126
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  "foreground/background separation", "depth of field focus", "repetitive patterns", "contrasting elements",
65
  ]
66
 
67
+ # Visual Layouts/Formats - How content is structured in the image
68
+ VISUAL_LAYOUTS: List[Dict[str, Any]] = [
69
+ {"key": "before_after", "name": "Before vs After", "prompt_guidance": "Split-screen transformation, left side shows problem/pain state, right side shows solution/improved state"},
70
+ {"key": "split_screen", "name": "Split Screen", "prompt_guidance": "Two halves comparison, vertical or horizontal split dividing the image"},
71
+ {"key": "checklist", "name": "Checklist Layout", "prompt_guidance": "List format with checkmarks, clean organized list with prominent check marks"},
72
+ {"key": "bold_headline", "name": "Bold Headline", "prompt_guidance": "Large bold typography dominates the image, headline is the primary visual element"},
73
+ {"key": "text_first", "name": "Text-First", "prompt_guidance": "Text is primary element, image secondary, clear typography hierarchy"},
74
+ {"key": "minimalist", "name": "Minimalist", "prompt_guidance": "Abundant white space, minimal elements, clean lines, focused composition"},
75
+ {"key": "big_numbers", "name": "Big Numbers", "prompt_guidance": "Large numbers dominate the visual, huge bold typography for statistics or prices"},
76
+ {"key": "highlight_circle", "name": "Highlight Circle", "prompt_guidance": "Red circle or arrow highlighting key element, drawing attention to specific area"},
77
+ {"key": "step_by_step", "name": "Step-by-Step", "prompt_guidance": "Numbered steps flow (1→2→3), clear visual progression"},
78
+ {"key": "grid_layout", "name": "Grid Layout", "prompt_guidance": "Organized grid format, easy to scan, multiple elements arranged systematically"},
79
+ {"key": "timeline", "name": "Timeline", "prompt_guidance": "Time-based progression, clear timeline flow showing sequence of events"},
80
+ {"key": "infographic", "name": "Infographic", "prompt_guidance": "Information graphics, data visualization, charts and visual data representation"},
81
+ {"key": "comparison_table", "name": "Comparison Table", "prompt_guidance": "Side-by-side table format with checkmarks and crosses for comparison"},
82
+ {"key": "quote_card", "name": "Quote Card", "prompt_guidance": "Quote as main visual element, clear readable testimonial or statement"},
83
+ {"key": "screenshot_style", "name": "Screenshot Style", "prompt_guidance": "Device screenshot format, realistic phone or computer interface"},
84
+ ]
85
+
86
  VISUAL_MOODS: List[str] = [
87
  "urgent and alarming", "calm and reassuring", "exciting and energetic", "trustworthy and professional",
88
  "warm and friendly", "bold and confident", "subtle and sophisticated", "raw and authentic",
 
104
  "color_preference": "trust",
105
  },
106
  "glp1": {
107
+ "subjects": ["confident person smiling (age 30-50)", "active lifestyle scenes with adults", "healthy meal preparation", "doctor consultation", "person in their 30s-40s looking confident"],
108
  "props": ["fitness equipment", "healthy food", "comfortable clothing"],
109
+ "avoid": ["before/after weight comparisons", "measuring tapes", "scales prominently", "needle close-ups", "elderly people over 65", "senior citizens", "very old looking people", "gray-haired elderly groups"],
110
+ "age_guidance": "Show people aged 30-50 primarily. Avoid defaulting to elderly/senior citizens. Target audience is middle-aged adults, not seniors.",
111
  "color_preference": "health",
112
  },
113
  }
 
144
  "composition": get_random_composition(),
145
  "mood": get_random_mood(),
146
  }
147
+
148
+ def get_random_visual_layout() -> Dict[str, Any]:
149
+ """Get a random visual layout/format."""
150
+ return random.choice(VISUAL_LAYOUTS)
151
+
152
+ def get_visual_layout_by_key(key: str) -> Optional[Dict[str, Any]]:
153
+ """Get a specific visual layout by key."""
154
+ for layout in VISUAL_LAYOUTS:
155
+ if layout["key"] == key:
156
+ return layout
157
+ return None
158
+
159
+ def get_all_visual_layouts() -> List[Dict[str, Any]]:
160
+ """Get all visual layouts."""
161
+ return VISUAL_LAYOUTS.copy()
frontend/app/gallery/[id]/page.tsx CHANGED
@@ -10,10 +10,11 @@ import { getAd, deleteAd, listAds } from "@/lib/api/endpoints";
10
  import { formatDate, formatNiche, getImageUrl, getImageUrlFallback } from "@/lib/utils/formatters";
11
  import { downloadImage, copyToClipboard, exportAsJSON } from "@/lib/utils/export";
12
  import { toast } from "react-hot-toast";
13
- import { ArrowLeft, ArrowRight, Download, Copy, Trash2, FileJson, Wand2, History, RotateCcw, ChevronDown, ChevronUp, Edit3 } from "lucide-react";
14
- import type { AdCreativeDB, ImageCorrectResponse } from "@/types/api";
15
  import { CorrectionModal } from "@/components/generation/CorrectionModal";
16
  import { EditCopyModal } from "@/components/generation/EditCopyModal";
 
17
 
18
  export default function AdDetailPage() {
19
  const params = useParams();
@@ -27,6 +28,7 @@ export default function AdDetailPage() {
27
  const [allAds, setAllAds] = useState<AdCreativeDB[]>([]);
28
  const [currentIndex, setCurrentIndex] = useState<number>(-1);
29
  const [showCorrectionModal, setShowCorrectionModal] = useState(false);
 
30
  const [showingOriginal, setShowingOriginal] = useState(false);
31
  const [isBodyStoryExpanded, setIsBodyStoryExpanded] = useState(false);
32
  const [editModal, setEditModal] = useState<{
@@ -267,6 +269,19 @@ export default function AdDetailPage() {
267
  )}
268
 
269
  <div className="flex items-center gap-1">
 
 
 
 
 
 
 
 
 
 
 
 
 
270
  <div className="relative group">
271
  <Button
272
  variant="ghost"
@@ -635,6 +650,26 @@ export default function AdDetailPage() {
635
  }}
636
  />
637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638
  {/* Edit Copy Modal */}
639
  {editModal && ad && (
640
  <EditCopyModal
 
10
  import { formatDate, formatNiche, getImageUrl, getImageUrlFallback } from "@/lib/utils/formatters";
11
  import { downloadImage, copyToClipboard, exportAsJSON } from "@/lib/utils/export";
12
  import { toast } from "react-hot-toast";
13
+ import { ArrowLeft, ArrowRight, Download, Copy, Trash2, FileJson, Wand2, History, RotateCcw, ChevronDown, ChevronUp, Edit3, RefreshCw } from "lucide-react";
14
+ import type { AdCreativeDB, ImageCorrectResponse, ImageRegenerateResponse } from "@/types/api";
15
  import { CorrectionModal } from "@/components/generation/CorrectionModal";
16
  import { EditCopyModal } from "@/components/generation/EditCopyModal";
17
+ import { RegenerationModal } from "@/components/generation/RegenerationModal";
18
 
19
  export default function AdDetailPage() {
20
  const params = useParams();
 
28
  const [allAds, setAllAds] = useState<AdCreativeDB[]>([]);
29
  const [currentIndex, setCurrentIndex] = useState<number>(-1);
30
  const [showCorrectionModal, setShowCorrectionModal] = useState(false);
31
+ const [showRegenerationModal, setShowRegenerationModal] = useState(false);
32
  const [showingOriginal, setShowingOriginal] = useState(false);
33
  const [isBodyStoryExpanded, setIsBodyStoryExpanded] = useState(false);
34
  const [editModal, setEditModal] = useState<{
 
269
  )}
270
 
271
  <div className="flex items-center gap-1">
272
+ <div className="relative group">
273
+ <Button
274
+ variant="ghost"
275
+ size="sm"
276
+ onClick={() => setShowRegenerationModal(true)}
277
+ className="text-purple-600 hover:bg-purple-50"
278
+ >
279
+ <RefreshCw className="h-4 w-4" />
280
+ </Button>
281
+ <span className="absolute -bottom-8 left-1/2 -translate-x-1/2 px-2 py-1 bg-gray-800 text-white text-xs rounded opacity-0 group-hover:opacity-100 transition-opacity whitespace-nowrap pointer-events-none">
282
+ Regenerate Image
283
+ </span>
284
+ </div>
285
  <div className="relative group">
286
  <Button
287
  variant="ghost"
 
650
  }}
651
  />
652
 
653
+ {/* Regeneration Modal */}
654
+ <RegenerationModal
655
+ isOpen={showRegenerationModal}
656
+ onClose={() => setShowRegenerationModal(false)}
657
+ adId={adId}
658
+ ad={ad}
659
+ onSuccess={async (result: ImageRegenerateResponse) => {
660
+ // Reload the ad to get updated image and metadata
661
+ if (result.status === "success") {
662
+ toast.success("Image regenerated successfully!");
663
+ // Wait a moment for database to update, then reload
664
+ setTimeout(async () => {
665
+ await loadAd(); // Reload to get updated ad with new image and metadata
666
+ loadAllAds(); // Reload gallery list
667
+ setShowingOriginal(false); // Reset to show current image
668
+ }, 500);
669
+ }
670
+ }}
671
+ />
672
+
673
  {/* Edit Copy Modal */}
674
  {editModal && ad && (
675
  <EditCopyModal
frontend/components/generation/RegenerationModal.tsx ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use client";
2
+
3
+ import React, { useState, useEffect } from "react";
4
+ import { X, RefreshCw, CheckCircle2, AlertCircle, Loader2, Sparkles, ChevronDown } from "lucide-react";
5
+ import { regenerateImage, getImageModels } from "@/lib/api/endpoints";
6
+ import type { ImageRegenerateResponse, AdCreativeDB, ImageModel } from "@/types/api";
7
+ import { ProgressBar } from "@/components/ui/ProgressBar";
8
+ import { Button } from "@/components/ui/Button";
9
+ import { Card, CardContent } from "@/components/ui/Card";
10
+
11
+ interface RegenerationModalProps {
12
+ isOpen: boolean;
13
+ onClose: () => void;
14
+ adId: string;
15
+ ad?: AdCreativeDB | null;
16
+ onSuccess?: (result: ImageRegenerateResponse) => void;
17
+ }
18
+
19
+ type RegenerationStep = "idle" | "input" | "regenerating" | "complete" | "error";
20
+
21
+ export const RegenerationModal: React.FC<RegenerationModalProps> = ({
22
+ isOpen,
23
+ onClose,
24
+ adId,
25
+ ad,
26
+ onSuccess,
27
+ }) => {
28
+ const [step, setStep] = useState<RegenerationStep>("idle");
29
+ const [progress, setProgress] = useState(0);
30
+ const [result, setResult] = useState<ImageRegenerateResponse | null>(null);
31
+ const [error, setError] = useState<string | null>(null);
32
+ const [selectedModel, setSelectedModel] = useState<string | null>(null);
33
+ const [models, setModels] = useState<ImageModel[]>([]);
34
+ const [defaultModel, setDefaultModel] = useState<string>("");
35
+ const [loadingModels, setLoadingModels] = useState(false);
36
+
37
+ useEffect(() => {
38
+ if (isOpen) {
39
+ setStep("input");
40
+ setProgress(0);
41
+ setResult(null);
42
+ setError(null);
43
+ // Load available models
44
+ loadModels();
45
+ } else {
46
+ // Reset state when modal closes
47
+ setStep("idle");
48
+ setProgress(0);
49
+ setResult(null);
50
+ setError(null);
51
+ setSelectedModel(null);
52
+ }
53
+ }, [isOpen]);
54
+
55
+ const loadModels = async () => {
56
+ setLoadingModels(true);
57
+ try {
58
+ const response = await getImageModels();
59
+ setModels(response.models);
60
+ setDefaultModel(response.default);
61
+ // Set initial selection to current ad's model or default
62
+ setSelectedModel(ad?.image_model || response.default);
63
+ } catch (err: any) {
64
+ console.error("Failed to load models:", err);
65
+ // Use fallback models if API fails
66
+ setModels([
67
+ { key: "nano-banana", id: "google/nano-banana", uses_dimensions: false },
68
+ { key: "nano-banana-pro", id: "google/nano-banana-pro", uses_dimensions: false },
69
+ { key: "z-image-turbo", id: "prunaai/z-image-turbo", uses_dimensions: true },
70
+ { key: "imagen-4-ultra", id: "google/imagen-4-ultra", uses_dimensions: false },
71
+ { key: "recraft-v3", id: "recraft-ai/recraft-v3", uses_dimensions: false },
72
+ { key: "ideogram-v3", id: "ideogram-ai/ideogram-v3-quality", uses_dimensions: false },
73
+ { key: "photon", id: "luma/photon", uses_dimensions: false },
74
+ { key: "seedream-3", id: "bytedance/seedream-3", uses_dimensions: false },
75
+ ]);
76
+ setDefaultModel("nano-banana");
77
+ setSelectedModel(ad?.image_model || "nano-banana");
78
+ } finally {
79
+ setLoadingModels(false);
80
+ }
81
+ };
82
+
83
+ const handleRegenerate = async () => {
84
+ setStep("regenerating");
85
+ setProgress(0);
86
+ setError(null);
87
+ setResult(null);
88
+
89
+ try {
90
+ // Simulate progress updates
91
+ const progressInterval = setInterval(() => {
92
+ setProgress((prev) => {
93
+ if (prev < 90) {
94
+ return prev + 3;
95
+ }
96
+ return prev;
97
+ });
98
+ }, 400);
99
+
100
+ // Actually perform the regeneration
101
+ const response = await regenerateImage({
102
+ image_id: adId,
103
+ image_model: selectedModel,
104
+ });
105
+
106
+ clearInterval(progressInterval);
107
+ setProgress(100);
108
+
109
+ if (response.status === "success") {
110
+ setStep("complete");
111
+ setResult(response);
112
+ } else {
113
+ setStep("error");
114
+ setError(response.error || "Regeneration failed");
115
+ }
116
+ } catch (err: any) {
117
+ setStep("error");
118
+ setError(err.response?.data?.detail || err.message || "Failed to regenerate image");
119
+ setProgress(0);
120
+ }
121
+ };
122
+
123
+ const getStepLabel = () => {
124
+ switch (step) {
125
+ case "input":
126
+ return "Select Model";
127
+ case "regenerating":
128
+ return "Regenerating image...";
129
+ case "complete":
130
+ return "Regeneration complete!";
131
+ case "error":
132
+ return "Error occurred";
133
+ default:
134
+ return "Starting regeneration...";
135
+ }
136
+ };
137
+
138
+ const getStepIcon = () => {
139
+ switch (step) {
140
+ case "complete":
141
+ return <CheckCircle2 className="h-6 w-6 text-green-500" />;
142
+ case "error":
143
+ return <AlertCircle className="h-6 w-6 text-red-500" />;
144
+ default:
145
+ return <Loader2 className="h-6 w-6 text-blue-500 animate-spin" />;
146
+ }
147
+ };
148
+
149
+ const getModelDisplayName = (key: string) => {
150
+ const displayNames: Record<string, string> = {
151
+ "z-image-turbo": "Z-Image Turbo",
152
+ "nano-banana": "Nano Banana",
153
+ "nano-banana-pro": "Nano Banana Pro",
154
+ "imagen-4": "Imagen 4",
155
+ "imagen-4-ultra": "Imagen 4 Ultra",
156
+ "recraft-v3": "Recraft V3",
157
+ "ideogram-v3": "Ideogram V3 Quality",
158
+ "photon": "Luma Photon",
159
+ "seedream-3": "SeedReam 3",
160
+ "gpt-image-1.5": "GPT Image 1.5 (OpenAI)",
161
+ };
162
+ return displayNames[key] || key;
163
+ };
164
+
165
+ if (!isOpen) return null;
166
+
167
+ return (
168
+ <div className="fixed inset-0 z-50 flex items-center justify-center p-4 bg-black/60 backdrop-blur-sm">
169
+ <div className="bg-white rounded-2xl shadow-2xl max-w-2xl w-full max-h-[90vh] overflow-y-auto">
170
+ {/* Header */}
171
+ <div className="sticky top-0 bg-white border-b border-gray-200 px-6 py-4 rounded-t-2xl z-10">
172
+ <div className="flex items-center justify-between">
173
+ <div className="flex items-center gap-3">
174
+ <div className="p-2 bg-gradient-to-r from-purple-500 to-pink-500 rounded-lg">
175
+ <RefreshCw className="h-5 w-5 text-white" />
176
+ </div>
177
+ <div>
178
+ <h2 className="text-xl font-bold text-gray-900">Regenerate Image</h2>
179
+ <p className="text-sm text-gray-500">
180
+ {step === "input"
181
+ ? "Generate a new version of your image"
182
+ : "Creating a new image with the same prompt"}
183
+ </p>
184
+ </div>
185
+ </div>
186
+ <button
187
+ onClick={onClose}
188
+ className="p-2 hover:bg-gray-100 rounded-lg transition-colors"
189
+ >
190
+ <X className="h-5 w-5 text-gray-500" />
191
+ </button>
192
+ </div>
193
+ </div>
194
+
195
+ {/* Content */}
196
+ <div className="p-6 space-y-6">
197
+ {/* Input Step */}
198
+ {step === "input" && (
199
+ <div className="space-y-4">
200
+ <Card variant="glass">
201
+ <CardContent className="pt-6">
202
+ <div className="space-y-4">
203
+ {/* Current Model Info */}
204
+ {ad?.image_model && (
205
+ <div className="bg-gray-50 border border-gray-200 rounded-lg p-3">
206
+ <p className="text-xs text-gray-500 mb-1">Current Model</p>
207
+ <p className="font-medium text-gray-900">{getModelDisplayName(ad.image_model)}</p>
208
+ </div>
209
+ )}
210
+
211
+ {/* Model Selection */}
212
+ <div>
213
+ <label className="block text-sm font-semibold text-gray-700 mb-2">
214
+ Select Image Model
215
+ </label>
216
+ {loadingModels ? (
217
+ <div className="flex items-center justify-center py-4">
218
+ <Loader2 className="h-5 w-5 text-blue-500 animate-spin" />
219
+ <span className="ml-2 text-sm text-gray-500">Loading models...</span>
220
+ </div>
221
+ ) : (
222
+ <div className="relative">
223
+ <select
224
+ value={selectedModel || ""}
225
+ onChange={(e) => setSelectedModel(e.target.value)}
226
+ className="w-full px-4 py-3 bg-white border border-gray-300 rounded-lg appearance-none focus:outline-none focus:ring-2 focus:ring-purple-500 focus:border-transparent text-gray-900 font-medium"
227
+ >
228
+ {models.map((model) => (
229
+ <option key={model.key} value={model.key}>
230
+ {getModelDisplayName(model.key)}
231
+ {model.key === defaultModel ? " (Default)" : ""}
232
+ {model.key === ad?.image_model ? " (Current)" : ""}
233
+ </option>
234
+ ))}
235
+ </select>
236
+ <ChevronDown className="absolute right-3 top-1/2 -translate-y-1/2 h-5 w-5 text-gray-400 pointer-events-none" />
237
+ </div>
238
+ )}
239
+ <p className="text-xs text-gray-500 mt-2">
240
+ Different models produce different styles. Try a new model for variety!
241
+ </p>
242
+ </div>
243
+
244
+ {/* Info about what will happen */}
245
+ <div className="bg-purple-50 border border-purple-200 rounded-lg p-3 text-sm text-purple-800">
246
+ <p className="font-semibold mb-1">How it works</p>
247
+ <ul className="list-disc list-inside space-y-1 text-purple-700">
248
+ <li>Uses the same prompt as the original image</li>
249
+ <li>Generates a completely new image with a fresh seed</li>
250
+ <li>Original image info is preserved in metadata</li>
251
+ </ul>
252
+ </div>
253
+ </div>
254
+ </CardContent>
255
+ </Card>
256
+ </div>
257
+ )}
258
+
259
+ {/* Progress Section */}
260
+ {step === "regenerating" && (
261
+ <div className="space-y-4">
262
+ <div className="flex items-center gap-4">
263
+ {getStepIcon()}
264
+ <div className="flex-1">
265
+ <p className="font-semibold text-gray-900">{getStepLabel()}</p>
266
+ <p className="text-sm text-gray-500 mb-2">Using {getModelDisplayName(selectedModel || defaultModel)}</p>
267
+ <ProgressBar progress={progress} showPercentage={true} className="mt-2" />
268
+ </div>
269
+ </div>
270
+ </div>
271
+ )}
272
+
273
+ {/* Error State */}
274
+ {step === "error" && (
275
+ <div className="bg-red-50 border border-red-200 rounded-xl p-4">
276
+ <div className="flex items-start gap-3">
277
+ <AlertCircle className="h-5 w-5 text-red-500 mt-0.5" />
278
+ <div className="flex-1">
279
+ <h3 className="font-semibold text-red-900 mb-1">Regeneration Failed</h3>
280
+ <p className="text-sm text-red-700">{error}</p>
281
+ </div>
282
+ </div>
283
+ </div>
284
+ )}
285
+
286
+ {/* Success State */}
287
+ {step === "complete" && result && (
288
+ <div className="space-y-4">
289
+ <div className="bg-green-50 border border-green-200 rounded-xl p-4">
290
+ <div className="flex items-center gap-3">
291
+ <CheckCircle2 className="h-5 w-5 text-green-500" />
292
+ <div>
293
+ <h3 className="font-semibold text-green-900">Regeneration Complete!</h3>
294
+ <p className="text-sm text-green-700">Your image has been regenerated successfully</p>
295
+ </div>
296
+ </div>
297
+ </div>
298
+
299
+ {result.regenerated_image?.image_url && (
300
+ <div className="space-y-3">
301
+ <h3 className="font-semibold text-gray-900">New Image</h3>
302
+ <img
303
+ src={result.regenerated_image.image_url}
304
+ alt="Regenerated"
305
+ className="w-full rounded-lg border border-gray-200"
306
+ />
307
+ </div>
308
+ )}
309
+
310
+ {/* Show model used */}
311
+ {result.regenerated_image?.model_used && (
312
+ <div className="bg-gray-50 border border-gray-200 rounded-lg p-3">
313
+ <p className="text-xs text-gray-500 mb-1">Model Used</p>
314
+ <p className="font-medium text-gray-900">
315
+ {getModelDisplayName(result.regenerated_image.model_used)}
316
+ </p>
317
+ </div>
318
+ )}
319
+
320
+ {result.original_preserved && (
321
+ <div className="bg-blue-50 border border-blue-200 rounded-lg p-3 text-sm text-blue-800">
322
+ <p>The original image info has been preserved in the ad metadata. You can view it from the ad detail page.</p>
323
+ </div>
324
+ )}
325
+ </div>
326
+ )}
327
+ </div>
328
+
329
+ {/* Footer */}
330
+ <div className="sticky bottom-0 bg-gray-50 border-t border-gray-200 px-6 py-4 rounded-b-2xl">
331
+ <div className="flex justify-end gap-3">
332
+ {step === "input" && (
333
+ <Button
334
+ onClick={handleRegenerate}
335
+ variant="primary"
336
+ disabled={!selectedModel || loadingModels}
337
+ >
338
+ <Sparkles className="h-4 w-4 mr-2" />
339
+ Regenerate Image
340
+ </Button>
341
+ )}
342
+ {step === "error" && (
343
+ <Button onClick={() => setStep("input")} variant="primary">
344
+ Try Again
345
+ </Button>
346
+ )}
347
+ <Button
348
+ onClick={() => {
349
+ if (step === "complete" && result) {
350
+ // Call onSuccess when user clicks "Done" to reload the ad
351
+ onSuccess?.(result);
352
+ }
353
+ onClose();
354
+ }}
355
+ variant={step === "complete" ? "primary" : "secondary"}
356
+ >
357
+ {step === "complete" ? "Done" : "Close"}
358
+ </Button>
359
+ </div>
360
+ </div>
361
+ </div>
362
+ </div>
363
+ );
364
+ };
frontend/lib/api/endpoints.ts CHANGED
@@ -15,6 +15,8 @@ import type {
15
  HealthResponse,
16
  ApiRootResponse,
17
  ImageCorrectResponse,
 
 
18
  LoginResponse,
19
  Niche,
20
  CreativeAnalysisResponse,
@@ -175,6 +177,21 @@ export const correctImage = async (params: {
175
  return response.data;
176
  };
177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  // Auth Endpoints
179
  export const login = async (username: string, password: string): Promise<LoginResponse> => {
180
  const response = await apiClient.post<LoginResponse>("/auth/login", {
 
15
  HealthResponse,
16
  ApiRootResponse,
17
  ImageCorrectResponse,
18
+ ImageRegenerateResponse,
19
+ ModelsListResponse,
20
  LoginResponse,
21
  Niche,
22
  CreativeAnalysisResponse,
 
177
  return response.data;
178
  };
179
 
180
+ // Image Regeneration Endpoints
181
+ export const regenerateImage = async (params: {
182
+ image_id: string;
183
+ image_model?: string | null;
184
+ }): Promise<ImageRegenerateResponse> => {
185
+ const response = await apiClient.post<ImageRegenerateResponse>("/api/regenerate", params);
186
+ return response.data;
187
+ };
188
+
189
+ // Get Available Image Models
190
+ export const getImageModels = async (): Promise<ModelsListResponse> => {
191
+ const response = await apiClient.get<ModelsListResponse>("/api/models");
192
+ return response.data;
193
+ };
194
+
195
  // Auth Endpoints
196
  export const login = async (username: string, password: string): Promise<LoginResponse> => {
197
  const response = await apiClient.post<LoginResponse>("/auth/login", {
frontend/types/api.ts CHANGED
@@ -189,9 +189,13 @@ export interface AdCreativeDB {
189
  original_image_filename?: string | null;
190
  original_image_model?: string | null;
191
  original_image_prompt?: string | null;
 
192
  is_corrected?: boolean;
193
  correction_date?: string | null;
194
  corrections?: CorrectionData | null;
 
 
 
195
  visual_style?: string | null;
196
  [key: string]: any;
197
  } | null;
@@ -321,3 +325,31 @@ export interface FileUploadResponse {
321
  }
322
 
323
  export type ModificationMode = "modify" | "inspired";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  original_image_filename?: string | null;
190
  original_image_model?: string | null;
191
  original_image_prompt?: string | null;
192
+ original_seed?: number | null;
193
  is_corrected?: boolean;
194
  correction_date?: string | null;
195
  corrections?: CorrectionData | null;
196
+ is_regenerated?: boolean;
197
+ regeneration_date?: string | null;
198
+ regeneration_seed?: number | null;
199
  visual_style?: string | null;
200
  [key: string]: any;
201
  } | null;
 
325
  }
326
 
327
  export type ModificationMode = "modify" | "inspired";
328
+
329
+ // Image Regeneration Types
330
+ export interface RegeneratedImageResult {
331
+ filename?: string | null;
332
+ filepath?: string | null;
333
+ image_url?: string | null;
334
+ r2_url?: string | null;
335
+ model_used?: string | null;
336
+ prompt_used?: string | null;
337
+ }
338
+
339
+ export interface ImageRegenerateResponse {
340
+ status: string;
341
+ regenerated_image?: RegeneratedImageResult | null;
342
+ original_preserved: boolean;
343
+ error?: string | null;
344
+ }
345
+
346
+ export interface ImageModel {
347
+ key: string;
348
+ id: string;
349
+ uses_dimensions: boolean;
350
+ }
351
+
352
+ export interface ModelsListResponse {
353
+ models: ImageModel[];
354
+ default: string;
355
+ }
main.py CHANGED
@@ -15,6 +15,8 @@ from datetime import datetime
15
  import os
16
  import logging
17
  import time
 
 
18
  from starlette.middleware.gzip import GZipMiddleware
19
  import httpx
20
  from starlette.requests import Request as StarletteRequest
@@ -359,6 +361,8 @@ async def api_info():
359
  "GET /matrix/compatible/{angle_key}": "Get compatible concepts for angle",
360
  "POST /extensive/generate": "Generate ad using extensive (researcher → creative director → designer → copywriter)",
361
  "POST /api/correct": "Correct image for spelling mistakes and visual issues (requires image_id)",
 
 
362
  "POST /api/creative/upload": "Upload a creative image for analysis",
363
  "POST /api/creative/analyze": "Analyze a creative image with AI vision (via URL)",
364
  "POST /api/creative/analyze/upload": "Analyze a creative image with AI vision (via file upload)",
@@ -881,6 +885,266 @@ async def correct_image(
881
 
882
 
883
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
884
  @app.get("/strategies/{niche}")
885
  async def get_strategies(niche: Literal["home_insurance", "glp1"]):
886
  """
 
15
  import os
16
  import logging
17
  import time
18
+ import random
19
+ import uuid
20
  from starlette.middleware.gzip import GZipMiddleware
21
  import httpx
22
  from starlette.requests import Request as StarletteRequest
 
361
  "GET /matrix/compatible/{angle_key}": "Get compatible concepts for angle",
362
  "POST /extensive/generate": "Generate ad using extensive (researcher → creative director → designer → copywriter)",
363
  "POST /api/correct": "Correct image for spelling mistakes and visual issues (requires image_id)",
364
+ "POST /api/regenerate": "Regenerate image with optional model selection (requires image_id)",
365
+ "GET /api/models": "List all available image generation models",
366
  "POST /api/creative/upload": "Upload a creative image for analysis",
367
  "POST /api/creative/analyze": "Analyze a creative image with AI vision (via URL)",
368
  "POST /api/creative/analyze/upload": "Analyze a creative image with AI vision (via file upload)",
 
885
 
886
 
887
 
888
+ # =============================================================================
889
+ # IMAGE REGENERATION ENDPOINTS
890
+ # =============================================================================
891
+
892
+ class ImageRegenerateRequest(BaseModel):
893
+ """Request schema for image regeneration."""
894
+ image_id: str = Field(
895
+ description="ID of existing ad creative in database"
896
+ )
897
+ image_model: Optional[str] = Field(
898
+ default=None,
899
+ description="Image generation model to use (e.g., 'z-image-turbo', 'nano-banana', 'nano-banana-pro', 'imagen-4-ultra', 'recraft-v3', 'ideogram-v3', 'photon', 'seedream-3'). If not provided, uses the original model."
900
+ )
901
+
902
+
903
+ class RegeneratedImageResult(BaseModel):
904
+ """Regenerated image result."""
905
+ filename: Optional[str] = None
906
+ filepath: Optional[str] = None
907
+ image_url: Optional[str] = None
908
+ r2_url: Optional[str] = None
909
+ model_used: Optional[str] = None
910
+ prompt_used: Optional[str] = None
911
+
912
+
913
+ class ImageRegenerateResponse(BaseModel):
914
+ """Response schema for image regeneration."""
915
+ status: str
916
+ regenerated_image: Optional[RegeneratedImageResult] = None
917
+ original_preserved: bool = Field(default=True, description="Whether original image info was preserved in metadata")
918
+ error: Optional[str] = None
919
+
920
+
921
+ @app.post("/api/regenerate", response_model=ImageRegenerateResponse)
922
+ async def regenerate_image(
923
+ request: ImageRegenerateRequest,
924
+ username: str = Depends(get_current_user)
925
+ ):
926
+ """
927
+ Regenerate an image for an existing ad creative with an optional new model.
928
+
929
+ Requires authentication. Users can only regenerate their own ads.
930
+
931
+ The service will:
932
+ 1. Fetch the original ad and its image prompt from the database
933
+ 2. Regenerate the image using the specified model (or original model if not provided)
934
+ 3. Upload to R2 storage
935
+ 4. Update the ad with the new image, preserving original image info in metadata
936
+ 5. Return the regenerated image
937
+ """
938
+ api_start_time = time.time()
939
+ api_logger.info("=" * 80)
940
+ api_logger.info(f"API: Regeneration request received")
941
+ api_logger.info(f"User: {username}")
942
+ api_logger.info(f"Image ID: {request.image_id}")
943
+ api_logger.info(f"Requested model: {request.image_model or 'Use original'}")
944
+
945
+ try:
946
+ # Fetch ad from database (only if it belongs to current user)
947
+ api_logger.info(f"Fetching ad creative from database...")
948
+ ad = await db_service.get_ad_creative(request.image_id, username=username)
949
+ if not ad:
950
+ api_logger.error(f"Ad creative {request.image_id} not found or access denied for user {username}")
951
+ raise HTTPException(status_code=404, detail=f"Ad creative with ID {request.image_id} not found or access denied")
952
+
953
+ api_logger.info(f"Ad creative found: {ad.get('title', 'N/A')} (niche: {ad.get('niche', 'N/A')})")
954
+
955
+ # Get the image prompt
956
+ image_prompt = ad.get("image_prompt")
957
+ if not image_prompt:
958
+ api_logger.error(f"No image prompt found for ad {request.image_id}")
959
+ raise HTTPException(
960
+ status_code=400,
961
+ detail="No image prompt found for this ad creative. Cannot regenerate without a prompt."
962
+ )
963
+
964
+ api_logger.info(f"Image prompt found: {len(image_prompt)} characters")
965
+
966
+ # Determine which model to use
967
+ model_to_use = request.image_model or ad.get("image_model") or settings.image_model
968
+ api_logger.info(f"Using model: {model_to_use}")
969
+
970
+ # Generate a new random seed for variety
971
+ seed = random.randint(1, 2147483647)
972
+ api_logger.info(f"Using seed: {seed}")
973
+
974
+ # Generate the new image
975
+ api_logger.info("Generating new image...")
976
+ try:
977
+ image_bytes, model_used, generated_url = await image_service.generate(
978
+ prompt=image_prompt,
979
+ width=1024,
980
+ height=1024,
981
+ seed=seed,
982
+ model_key=model_to_use,
983
+ )
984
+ except Exception as e:
985
+ api_logger.error(f"Image generation failed: {e}")
986
+ raise HTTPException(status_code=500, detail=f"Image generation failed: {str(e)}")
987
+
988
+ api_logger.info(f"Image generated successfully with model: {model_used}")
989
+
990
+ # Generate filename for the new image
991
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
992
+ unique_id = uuid.uuid4().hex[:8]
993
+ niche = ad.get("niche", "unknown").replace(" ", "_")
994
+ filename = f"regen_{niche}_{timestamp}_{unique_id}.png"
995
+
996
+ # Try to upload to R2
997
+ r2_url = None
998
+ try:
999
+ from services.r2_storage import get_r2_storage
1000
+ r2_storage = get_r2_storage()
1001
+ if r2_storage and image_bytes:
1002
+ r2_url = r2_storage.upload_image(
1003
+ image_bytes=image_bytes,
1004
+ filename=filename,
1005
+ niche=niche,
1006
+ )
1007
+ api_logger.info(f"Uploaded to R2: {r2_url}")
1008
+ except Exception as e:
1009
+ api_logger.warning(f"R2 upload failed: {e}")
1010
+
1011
+ # Save locally as fallback
1012
+ local_path = None
1013
+ if not r2_url and image_bytes:
1014
+ local_path = os.path.join(settings.output_dir, filename)
1015
+ os.makedirs(os.path.dirname(local_path), exist_ok=True)
1016
+ with open(local_path, "wb") as f:
1017
+ f.write(image_bytes)
1018
+ api_logger.info(f"Saved locally: {local_path}")
1019
+
1020
+ # Prepare to update the ad
1021
+ # Store old image data in metadata before updating
1022
+ old_image_url = ad.get("r2_url") or ad.get("image_url")
1023
+ old_r2_url = ad.get("r2_url")
1024
+ old_image_filename = ad.get("image_filename")
1025
+ old_image_model = ad.get("image_model")
1026
+ old_seed = ad.get("image_seed")
1027
+
1028
+ # Build metadata with original image info
1029
+ regeneration_metadata = {
1030
+ "is_regenerated": True,
1031
+ "regeneration_date": datetime.utcnow().isoformat() + "Z",
1032
+ "regeneration_seed": seed,
1033
+ }
1034
+
1035
+ if old_image_url:
1036
+ regeneration_metadata["original_image_url"] = old_image_url
1037
+ if old_r2_url:
1038
+ regeneration_metadata["original_r2_url"] = old_r2_url
1039
+ if old_image_filename:
1040
+ regeneration_metadata["original_image_filename"] = old_image_filename
1041
+ if old_image_model:
1042
+ regeneration_metadata["original_image_model"] = old_image_model
1043
+ if old_seed:
1044
+ regeneration_metadata["original_seed"] = old_seed
1045
+
1046
+ # Update the ad with new image
1047
+ update_kwargs = {
1048
+ "image_filename": filename,
1049
+ "image_model": model_used,
1050
+ "image_seed": seed,
1051
+ }
1052
+ if r2_url:
1053
+ update_kwargs["image_url"] = r2_url
1054
+ update_kwargs["r2_url"] = r2_url
1055
+ elif generated_url:
1056
+ update_kwargs["image_url"] = generated_url
1057
+ elif local_path:
1058
+ update_kwargs["image_url"] = f"/images/{filename}"
1059
+
1060
+ api_logger.info(f"Updating ad {request.image_id} with new image...")
1061
+ update_success = await db_service.update_ad_creative(
1062
+ ad_id=request.image_id,
1063
+ username=username,
1064
+ metadata=regeneration_metadata,
1065
+ **update_kwargs
1066
+ )
1067
+
1068
+ if update_success:
1069
+ api_logger.info(f"✓ Ad updated with regenerated image (ID: {request.image_id})")
1070
+ else:
1071
+ api_logger.warning("Failed to update ad with regenerated image")
1072
+
1073
+ total_api_time = time.time() - api_start_time
1074
+ api_logger.info("=" * 80)
1075
+ api_logger.info(f"✓ API: Regeneration completed successfully in {total_api_time:.2f}s")
1076
+ api_logger.info("=" * 80)
1077
+
1078
+ return {
1079
+ "status": "success",
1080
+ "regenerated_image": {
1081
+ "filename": filename,
1082
+ "filepath": local_path,
1083
+ "image_url": r2_url or generated_url or f"/images/{filename}",
1084
+ "r2_url": r2_url,
1085
+ "model_used": model_used,
1086
+ "prompt_used": image_prompt,
1087
+ },
1088
+ "original_preserved": True,
1089
+ }
1090
+
1091
+ except HTTPException:
1092
+ total_api_time = time.time() - api_start_time
1093
+ api_logger.error(f"✗ API: Regeneration request failed with HTTPException after {total_api_time:.2f}s")
1094
+ raise
1095
+ except Exception as e:
1096
+ total_api_time = time.time() - api_start_time
1097
+ api_logger.error(f"✗ API: Regeneration request failed with exception after {total_api_time:.2f}s: {str(e)}")
1098
+ api_logger.exception("Full exception traceback:")
1099
+ raise HTTPException(status_code=500, detail=str(e))
1100
+
1101
+
1102
+ @app.get("/api/models")
1103
+ async def list_image_models():
1104
+ """
1105
+ List all available image generation models.
1106
+
1107
+ Returns model keys and their descriptions for use in image generation/regeneration.
1108
+ Default for regeneration is nano-banana (best quality for affiliate marketing).
1109
+ """
1110
+ from services.image import MODEL_REGISTRY
1111
+
1112
+ # Order models with nano-banana first (recommended for regeneration)
1113
+ preferred_order = ["nano-banana", "nano-banana-pro", "z-image-turbo", "imagen-4-ultra", "recraft-v3", "ideogram-v3", "photon", "seedream-3"]
1114
+
1115
+ models = []
1116
+ # Add models in preferred order first
1117
+ for key in preferred_order:
1118
+ if key in MODEL_REGISTRY:
1119
+ config = MODEL_REGISTRY[key]
1120
+ models.append({
1121
+ "key": key,
1122
+ "id": config["id"],
1123
+ "uses_dimensions": config.get("uses_dimensions", False),
1124
+ })
1125
+
1126
+ # Add any remaining models not in preferred order
1127
+ for key, config in MODEL_REGISTRY.items():
1128
+ if key not in preferred_order:
1129
+ models.append({
1130
+ "key": key,
1131
+ "id": config["id"],
1132
+ "uses_dimensions": config.get("uses_dimensions", False),
1133
+ })
1134
+
1135
+ # Add OpenAI model at the end
1136
+ models.append({
1137
+ "key": "gpt-image-1.5",
1138
+ "id": "openai/gpt-image-1.5",
1139
+ "uses_dimensions": True,
1140
+ })
1141
+
1142
+ return {
1143
+ "models": models,
1144
+ "default": "nano-banana", # Best for affiliate marketing regeneration
1145
+ }
1146
+
1147
+
1148
  @app.get("/strategies/{niche}")
1149
  async def get_strategies(niche: Literal["home_insurance", "glp1"]):
1150
  """
services/generator.py CHANGED
@@ -962,7 +962,9 @@ NICHE REQUIREMENTS (GLP-1):
962
  - Confidence/transformation moments should feel genuine
963
  - AVOID: defaulting to before/after for every image - prioritize visual variety
964
  - AVOID: extreme body manipulation, unrealistic transformations, shame-inducing imagery
 
965
  - ENCOURAGE: diverse visual concepts that match the strategy (quiz for quiz strategy, medical for authority strategy, lifestyle for aspiration, etc.)
 
966
  """
967
  else:
968
  niche_image_guidance = ""
@@ -1192,147 +1194,210 @@ CRITICAL REQUIREMENTS:
1192
  - Focus on the natural, authentic scene only
1193
  - If text is included, show it in ONE location only, not multiple places"""
1194
 
1195
- # Refine and clean the prompt before sending
1196
- refined_prompt = self._refine_image_prompt(prompt)
1197
  return refined_prompt
1198
 
1199
- def _refine_image_prompt(self, prompt: str) -> str:
1200
  """
1201
- Refine and clean the image prompt to ensure logical, coherent image generation.
1202
- Removes contradictions, confusing instructions, and ensures the prompt makes sense.
 
 
 
 
 
 
 
 
1203
  """
1204
  import re
1205
 
1206
- # Remove meta-instructions that confuse the model
1207
- prompt = re.sub(r'\(for model, not to display\)', '', prompt, flags=re.IGNORECASE)
1208
- prompt = re.sub(r'\(apply these, don\'t display\)', '', prompt, flags=re.IGNORECASE)
1209
- prompt = re.sub(r'IMPORTANT: Display ONLY', 'Display', prompt, flags=re.IGNORECASE)
1210
- prompt = re.sub(r'IMPORTANT: If including', 'If including', prompt, flags=re.IGNORECASE)
1211
- prompt = re.sub(r'IMPORTANT: Use this', 'Use this', prompt, flags=re.IGNORECASE)
1212
- prompt = re.sub(r'IMPORTANT: Follow this', 'Follow this', prompt, flags=re.IGNORECASE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1213
 
1214
- # Remove lines that are clearly instructions for developers, not the model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1215
  lines = prompt.split('\n')
1216
  cleaned_lines = []
1217
- skip_until_next_section = False
1218
-
1219
  for line in lines:
1220
  line_lower = line.lower()
1221
-
1222
- # Skip developer instructions
1223
  if any(phrase in line_lower for phrase in [
1224
- 'do not display these instructions',
1225
- 'not to be displayed',
1226
- 'for debugging',
1227
- 'metadata',
1228
  ]):
1229
- if not line.strip().startswith('==='):
1230
- continue
1231
-
1232
- # Keep section headers
1233
- if line.strip().startswith('==='):
1234
- skip_until_next_section = False
1235
- cleaned_lines.append(line)
1236
  continue
1237
-
1238
- # Skip placeholder or empty content
1239
- if any(phrase in line_lower for phrase in ['n/a', 'not provided', 'see above', 'refer to']):
1240
- if not line.strip().startswith('-'):
1241
- continue
1242
-
1243
- # Simplify overly emphatic instructions
1244
- if line.strip().startswith('CRITICAL:'):
1245
- line = line.replace('CRITICAL:', 'Note:')
1246
- elif line.strip().startswith('IMPORTANT:'):
1247
- line = line.replace('IMPORTANT:', 'Note:')
1248
-
1249
- # Remove redundant "NO" statements that are already covered
1250
- if line.strip().startswith('- NO') and 'decorative' in line_lower:
1251
- # Keep first occurrence, skip duplicates
1252
- if 'decorative' in '\n'.join(cleaned_lines).lower():
1253
- # Check if we already have this prohibition
1254
  continue
1255
-
1256
  cleaned_lines.append(line)
1257
-
1258
  prompt = '\n'.join(cleaned_lines)
1259
 
 
 
 
 
 
 
1260
  # Remove excessive blank lines
1261
  prompt = re.sub(r'\n{3,}', '\n\n', prompt)
1262
 
1263
- # Ensure logical coherence - check for contradictions
1264
- prompt_lower = prompt.lower()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1265
 
1266
- # If we say "no text" but have headline section, clarify
1267
- if 'no text overlay' in prompt_lower or 'no text' in prompt_lower:
1268
- # Remove or clarify headline text section
1269
- sections = prompt.split('===')
1270
- filtered_sections = []
1271
- for section in sections:
1272
- section_lower = section.lower()
1273
- # Keep "no text" instructions
1274
- if 'no text' in section_lower and 'overlay' in section_lower:
1275
- filtered_sections.append(section)
1276
- # Skip headline text section if we said no text
1277
- elif 'headline text' in section_lower and 'no text' in prompt_lower:
1278
- # Replace with clarification
1279
- filtered_sections.append('=== NO TEXT ===\nDo NOT include any text in this image.')
1280
- else:
1281
- filtered_sections.append(section)
1282
- prompt = '==='.join(filtered_sections)
1283
-
1284
- # Ensure visual scene is clear and logical
1285
- if 'visual scene' in prompt_lower:
1286
- # Make sure scene description is coherent
1287
- if 'psychological angle' in prompt_lower:
1288
- # These are fine - they provide context
1289
- pass
1290
-
1291
- # Remove confusing conditional text that might create illogical images
1292
- # Example: "if text overlay is enabled" - simplify
1293
- prompt = re.sub(
1294
- r'if text overlay is enabled',
1295
- 'if text is included',
1296
- prompt,
1297
- flags=re.IGNORECASE
1298
- )
1299
- prompt = re.sub(
1300
- r'if including text',
1301
- 'if text is included',
1302
- prompt,
1303
- flags=re.IGNORECASE
1304
- )
1305
 
1306
- # Ensure the prompt ends with a clear, logical instruction
1307
- if not prompt.strip().endswith('.'):
1308
- prompt += "\n\nCreate a natural, authentic, and logically coherent image. All elements should fit together naturally and make visual sense."
1309
-
1310
- # Final cleanup: remove any remaining confusing meta-text
1311
- prompt = prompt.replace('(OPTIONAL - ', '(')
1312
- prompt = prompt.replace('(for model, not to display)', '')
1313
- prompt = prompt.replace('(apply these, don\'t display)', '')
1314
-
1315
- # Ensure no empty sections
1316
- sections = prompt.split('===')
1317
- final_sections = []
1318
- for section in sections:
1319
- section = section.strip()
1320
- if section and len(section) > 10: # Only keep substantial sections
1321
- final_sections.append(section)
1322
- prompt = '==='.join(final_sections)
1323
-
1324
- # Final validation: ensure the prompt makes logical sense
1325
- # Check that we're not asking for impossible combinations
1326
- if 'no text' in prompt_lower and 'headline' in prompt_lower:
1327
- # Remove headline references if we explicitly said no text
1328
- prompt = re.sub(
1329
- r'===.*HEADLINE.*===[\s\S]*?(?===|$)',
1330
- '',
1331
- prompt,
1332
- flags=re.IGNORECASE | re.MULTILINE
1333
- )
1334
 
1335
- return prompt.strip()
 
 
 
 
1336
 
1337
  async def generate_ad(
1338
  self,
@@ -2026,8 +2091,8 @@ CONCEPT: {concept['name']}
2026
  # Add low quality camera instruction to the prompt
2027
  prompt_with_camera = f"{prompt}\n\n=== CAMERA QUALITY ===\n- The image should look like it was shot from a low quality camera\n- Include characteristics of low quality camera: slight grain, reduced sharpness, lower resolution appearance, authentic camera imperfections\n- Should have the authentic feel of a real photo taken with a basic or older camera device"
2028
 
2029
- # Refine prompt and add variation for each image
2030
- base_refined_prompt = self._refine_image_prompt(prompt_with_camera)
2031
 
2032
  # Add variation modifier if generating multiple images
2033
  if num_images > 1:
@@ -2376,8 +2441,8 @@ If this image includes people or faces, they MUST look like real, original peopl
2376
 
2377
  Create a scroll-stopping ad image with "{headline}" prominently displayed."""
2378
 
2379
- # Refine and clean the prompt before sending
2380
- refined_prompt = self._refine_image_prompt(prompt)
2381
  return refined_prompt
2382
 
2383
  async def generate_batch(
 
962
  - Confidence/transformation moments should feel genuine
963
  - AVOID: defaulting to before/after for every image - prioritize visual variety
964
  - AVOID: extreme body manipulation, unrealistic transformations, shame-inducing imagery
965
+ - AVOID: elderly people over 65, senior citizens, very old looking people, gray-haired elderly groups
966
  - ENCOURAGE: diverse visual concepts that match the strategy (quiz for quiz strategy, medical for authority strategy, lifestyle for aspiration, etc.)
967
+ - AGE GUIDANCE: Show people aged 30-50 primarily. DO NOT default to elderly/senior citizens. Target audience is middle-aged adults (30s-40s), NOT seniors.
968
  """
969
  else:
970
  niche_image_guidance = ""
 
1194
  - Focus on the natural, authentic scene only
1195
  - If text is included, show it in ONE location only, not multiple places"""
1196
 
1197
+ # Refine and clean the prompt before sending (pass niche for demographic fixes)
1198
+ refined_prompt = self._refine_image_prompt(prompt, niche=niche)
1199
  return refined_prompt
1200
 
1201
+ def _refine_image_prompt(self, prompt: str, niche: str = None) -> str:
1202
  """
1203
+ Refine and clean the image prompt for affiliate marketing creatives.
1204
+
1205
+ Fixes illogical elements:
1206
+ - Contradictory instructions
1207
+ - Wrong demographics for niche
1208
+ - Unrealistic visual combinations
1209
+ - Corporate/stock photo aesthetics (bad for affiliate marketing)
1210
+ - Meta-instructions that confuse image models
1211
+
1212
+ Affiliate marketing principle: Low-production, authentic images outperform polished studio shots.
1213
  """
1214
  import re
1215
 
1216
+ # =====================================================================
1217
+ # 1. REMOVE META-INSTRUCTIONS (confuse image models)
1218
+ # =====================================================================
1219
+ meta_patterns = [
1220
+ r'\(for model, not to display\)',
1221
+ r'\(apply these, don\'t display\)',
1222
+ r'\(for reference only\)',
1223
+ r'\(internal use\)',
1224
+ r'IMPORTANT: Display ONLY',
1225
+ r'IMPORTANT: If including',
1226
+ r'IMPORTANT: Use this',
1227
+ r'IMPORTANT: Follow this',
1228
+ r'CRITICAL: Do not',
1229
+ r'NOTE: This is for',
1230
+ ]
1231
+ for pattern in meta_patterns:
1232
+ prompt = re.sub(pattern, '', prompt, flags=re.IGNORECASE)
1233
+
1234
+ # =====================================================================
1235
+ # 2. FIX DEMOGRAPHIC ISSUES (niche-specific)
1236
+ # =====================================================================
1237
+ prompt_lower = prompt.lower()
1238
 
1239
+ # GLP-1: Fix elderly/senior defaults - target is 30-50 adults
1240
+ if niche and niche.lower() == 'glp1':
1241
+ # Replace elderly references with middle-aged
1242
+ elderly_replacements = [
1243
+ (r'\b(elderly|senior|seniors|old people|old person)\b', 'middle-aged adult'),
1244
+ (r'\b(grandparent|grandfather|grandmother|grandma|grandpa)\b', 'parent'),
1245
+ (r'\b(70[\s-]?year[\s-]?old|80[\s-]?year[\s-]?old|65[\s-]?year[\s-]?old)\b', '40-year-old'),
1246
+ (r'\b(in their 60s|in their 70s|in their 80s)\b', 'in their 40s'),
1247
+ (r'\b(retirement age|retired person|retiree)\b', 'working professional'),
1248
+ (r'\bgray[\s-]?haired elderly\b', 'confident adult'),
1249
+ (r'\bsenior citizen\b', 'adult'),
1250
+ ]
1251
+ for pattern, replacement in elderly_replacements:
1252
+ prompt = re.sub(pattern, replacement, prompt, flags=re.IGNORECASE)
1253
+
1254
+ # Home Insurance: Ensure American suburban context
1255
+ if niche and niche.lower() == 'home_insurance':
1256
+ # Fix non-American home references
1257
+ home_replacements = [
1258
+ (r'\b(flat|apartment|condo)\b(?! insurance)', 'house'),
1259
+ (r'\b(mansion|castle|estate)\b', 'suburban home'),
1260
+ (r'\b(european|british|uk) style home\b', 'American suburban home'),
1261
+ ]
1262
+ for pattern, replacement in home_replacements:
1263
+ prompt = re.sub(pattern, replacement, prompt, flags=re.IGNORECASE)
1264
+
1265
+ # =====================================================================
1266
+ # 3. FIX ILLOGICAL VISUAL COMBINATIONS
1267
+ # =====================================================================
1268
+
1269
+ # Can't have both "no text" and "headline text"
1270
+ if 'no text' in prompt_lower and ('headline' in prompt_lower or 'text overlay' in prompt_lower):
1271
+ # Keep "no text" instruction, remove headline sections
1272
+ prompt = re.sub(
1273
+ r'===.*HEADLINE.*===[\s\S]*?(?====|$)',
1274
+ '',
1275
+ prompt,
1276
+ flags=re.IGNORECASE | re.MULTILINE
1277
+ )
1278
+ prompt = re.sub(r'headline[:\s]+["\']?[^"\']+["\']?', '', prompt, flags=re.IGNORECASE)
1279
+
1280
+ # Can't have both "minimalist" and "cluttered/busy"
1281
+ if 'minimalist' in prompt_lower and any(word in prompt_lower for word in ['cluttered', 'busy', 'crowded', 'chaotic']):
1282
+ prompt = re.sub(r'\b(cluttered|busy|crowded|chaotic)\b', 'clean', prompt, flags=re.IGNORECASE)
1283
+
1284
+ # Can't have both "dark/moody" and "bright/cheerful"
1285
+ if 'dark' in prompt_lower and 'moody' in prompt_lower:
1286
+ prompt = re.sub(r'\b(bright|cheerful|sunny|vibrant)\b', 'subtle', prompt, flags=re.IGNORECASE)
1287
+ elif 'bright' in prompt_lower and 'cheerful' in prompt_lower:
1288
+ prompt = re.sub(r'\b(dark|moody|dramatic|gloomy)\b', 'soft', prompt, flags=re.IGNORECASE)
1289
+
1290
+ # Can't have "indoor" scene with "outdoor" elements
1291
+ if 'indoor' in prompt_lower or 'inside' in prompt_lower:
1292
+ if 'sunlight streaming' not in prompt_lower: # Window light is okay
1293
+ prompt = re.sub(r'\b(outdoor|outside|garden|yard|street)\b(?! view)', 'indoor', prompt, flags=re.IGNORECASE)
1294
+
1295
+ # =====================================================================
1296
+ # 4. FIX AFFILIATE MARKETING ANTI-PATTERNS
1297
+ # =====================================================================
1298
+
1299
+ # Remove corporate/stock photo aesthetics (bad for affiliate marketing)
1300
+ stock_photo_terms = [
1301
+ (r'\bstock photo\b', 'authentic photo'),
1302
+ (r'\bprofessional studio\b', 'natural setting'),
1303
+ (r'\bperfect lighting\b', 'natural lighting'),
1304
+ (r'\bcorporate headshot\b', 'candid portrait'),
1305
+ (r'\bpolished commercial\b', 'authentic UGC-style'),
1306
+ (r'\bgeneric model\b', 'real person'),
1307
+ (r'\bshutterstock style\b', 'authentic casual'),
1308
+ (r'\bistock style\b', 'documentary style'),
1309
+ ]
1310
+ for pattern, replacement in stock_photo_terms:
1311
+ prompt = re.sub(pattern, replacement, prompt, flags=re.IGNORECASE)
1312
+
1313
+ # Ensure authenticity markers for affiliate marketing
1314
+ if 'ugc' in prompt_lower or 'authentic' in prompt_lower:
1315
+ # Remove conflicting professional markers
1316
+ prompt = re.sub(r'\b(studio backdrop|professional lighting setup|commercial shoot)\b',
1317
+ 'natural environment', prompt, flags=re.IGNORECASE)
1318
+
1319
+ # =====================================================================
1320
+ # 5. FIX UNREALISTIC BODY/TRANSFORMATION CLAIMS
1321
+ # =====================================================================
1322
+
1323
+ # Remove extreme/unrealistic transformation language
1324
+ unrealistic_patterns = [
1325
+ (r'\b(extreme weight loss|dramatic transformation overnight)\b', 'healthy transformation'),
1326
+ (r'\b(impossibly thin|skeletal|anorexic)\b', 'healthy fit'),
1327
+ (r'\b(perfect body|flawless figure)\b', 'confident healthy body'),
1328
+ (r'\b(six pack abs|bodybuilder physique)\b', 'healthy toned body'),
1329
+ ]
1330
+ for pattern, replacement in unrealistic_patterns:
1331
+ prompt = re.sub(pattern, replacement, prompt, flags=re.IGNORECASE)
1332
+
1333
+ # =====================================================================
1334
+ # 6. CLEAN UP FORMATTING AND STRUCTURE
1335
+ # =====================================================================
1336
+
1337
+ # Remove developer instructions
1338
  lines = prompt.split('\n')
1339
  cleaned_lines = []
 
 
1340
  for line in lines:
1341
  line_lower = line.lower()
1342
+ # Skip developer-only lines
 
1343
  if any(phrase in line_lower for phrase in [
1344
+ 'do not display', 'not to be displayed', 'for debugging',
1345
+ 'metadata:', 'internal:', 'developer note'
 
 
1346
  ]):
 
 
 
 
 
 
 
1347
  continue
1348
+ # Skip empty placeholder content
1349
+ if any(phrase in line_lower for phrase in ['n/a', 'not provided', 'see above', 'tbd']):
1350
+ if len(line.strip()) < 20: # Short placeholder lines
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1351
  continue
 
1352
  cleaned_lines.append(line)
 
1353
  prompt = '\n'.join(cleaned_lines)
1354
 
1355
+ # Simplify emphatic markers (image models don't need shouting)
1356
+ prompt = re.sub(r'\bCRITICAL:\s*', '', prompt, flags=re.IGNORECASE)
1357
+ prompt = re.sub(r'\bIMPORTANT:\s*', '', prompt, flags=re.IGNORECASE)
1358
+ prompt = re.sub(r'\bNOTE:\s*', '', prompt, flags=re.IGNORECASE)
1359
+ prompt = re.sub(r'\bMUST:\s*', '', prompt, flags=re.IGNORECASE)
1360
+
1361
  # Remove excessive blank lines
1362
  prompt = re.sub(r'\n{3,}', '\n\n', prompt)
1363
 
1364
+ # Remove empty sections
1365
+ prompt = re.sub(r'===\s*===', '', prompt)
1366
+ prompt = re.sub(r'===\s*\n\s*===', '===', prompt)
1367
+
1368
+ # =====================================================================
1369
+ # 7. ENSURE LOGICAL COHERENCE
1370
+ # =====================================================================
1371
+
1372
+ # Remove duplicate prohibitions
1373
+ seen_prohibitions = set()
1374
+ final_lines = []
1375
+ for line in prompt.split('\n'):
1376
+ line_lower = line.lower().strip()
1377
+ if line_lower.startswith('- no ') or line_lower.startswith('no '):
1378
+ prohibition_key = re.sub(r'[^a-z\s]', '', line_lower)[:50]
1379
+ if prohibition_key in seen_prohibitions:
1380
+ continue
1381
+ seen_prohibitions.add(prohibition_key)
1382
+ final_lines.append(line)
1383
+ prompt = '\n'.join(final_lines)
1384
 
1385
+ # =====================================================================
1386
+ # 8. ADD AFFILIATE MARKETING QUALITY MARKERS
1387
+ # =====================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388
 
1389
+ # Ensure the prompt has authenticity markers if not present
1390
+ if 'authentic' not in prompt_lower and 'ugc' not in prompt_lower and 'real' not in prompt_lower:
1391
+ prompt += "\n\nStyle: Authentic, relatable, real-person aesthetic. Not overly polished or corporate."
1392
+
1393
+ # Final trim
1394
+ prompt = prompt.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1395
 
1396
+ # Ensure prompt doesn't end mid-sentence
1397
+ if prompt and not prompt[-1] in '.!?"\'':
1398
+ prompt += '.'
1399
+
1400
+ return prompt
1401
 
1402
  async def generate_ad(
1403
  self,
 
2091
  # Add low quality camera instruction to the prompt
2092
  prompt_with_camera = f"{prompt}\n\n=== CAMERA QUALITY ===\n- The image should look like it was shot from a low quality camera\n- Include characteristics of low quality camera: slight grain, reduced sharpness, lower resolution appearance, authentic camera imperfections\n- Should have the authentic feel of a real photo taken with a basic or older camera device"
2093
 
2094
+ # Refine prompt and add variation for each image (pass niche for demographic fixes)
2095
+ base_refined_prompt = self._refine_image_prompt(prompt_with_camera, niche=niche)
2096
 
2097
  # Add variation modifier if generating multiple images
2098
  if num_images > 1:
 
2441
 
2442
  Create a scroll-stopping ad image with "{headline}" prominently displayed."""
2443
 
2444
+ # Refine and clean the prompt before sending (pass niche for demographic fixes)
2445
+ refined_prompt = self._refine_image_prompt(prompt, niche=niche)
2446
  return refined_prompt
2447
 
2448
  async def generate_batch(
services/third_flow.py CHANGED
@@ -487,12 +487,13 @@ CRITICAL: The image MUST show home insurance-related content. Show REAL American
487
  elif niche_lower == "glp1":
488
  niche_guidance = """
489
  NICHE-SPECIFIC REQUIREMENTS (GLP-1):
490
- SUBJECTS TO INCLUDE: confident person smiling, active lifestyle scenes, healthy meal preparation, doctor consultation
491
  PROPS TO INCLUDE: fitness equipment, healthy food, comfortable clothing
492
- AVOID: before/after weight comparisons, measuring tapes, scales prominently, needle close-ups
 
493
  COLOR PREFERENCE: health
494
 
495
- CRITICAL: The image MUST be appropriate for GLP-1/weight loss niche. Show lifestyle, health, and confidence-related content, NOT home insurance content."""
496
  else:
497
  niche_guidance = f"""
498
  NICHE-SPECIFIC REQUIREMENTS ({niche}):
@@ -556,7 +557,10 @@ CRITICAL: The image MUST be appropriate for {niche} niche."""
556
 
557
  response = completion.choices[0].message
558
  if response.parsed:
559
- return response.parsed.prompt
 
 
 
560
  else:
561
  print(f"Warning: Creative designer refusal: {response.refusal}")
562
  return ""
@@ -564,6 +568,53 @@ CRITICAL: The image MUST be appropriate for {niche} niche."""
564
  print(f"Error in creative_designer: {e}")
565
  return ""
566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
  def copy_writer(self, creative_strategy: CreativeStrategies) -> CopyWriterOutput:
568
  """
569
  Generate ad copy from creative strategy.
 
487
  elif niche_lower == "glp1":
488
  niche_guidance = """
489
  NICHE-SPECIFIC REQUIREMENTS (GLP-1):
490
+ SUBJECTS TO INCLUDE: confident person smiling (age 30-50), active lifestyle scenes with adults, healthy meal preparation, doctor consultation
491
  PROPS TO INCLUDE: fitness equipment, healthy food, comfortable clothing
492
+ AVOID: before/after weight comparisons, measuring tapes, scales prominently, needle close-ups, elderly people over 65, senior citizens, very old looking people, gray-haired elderly groups
493
+ AGE GUIDANCE: Show people aged 30-50 primarily. DO NOT default to elderly/senior citizens. The target audience is middle-aged adults in their 30s-40s, NOT seniors or elderly people.
494
  COLOR PREFERENCE: health
495
 
496
+ CRITICAL: The image MUST be appropriate for GLP-1/weight loss niche. Show lifestyle, health, and confidence-related content. People in images should look 30-50 years old, NOT elderly."""
497
  else:
498
  niche_guidance = f"""
499
  NICHE-SPECIFIC REQUIREMENTS ({niche}):
 
557
 
558
  response = completion.choices[0].message
559
  if response.parsed:
560
+ # Refine the prompt for affiliate marketing
561
+ raw_prompt = response.parsed.prompt
562
+ refined_prompt = self._refine_prompt_for_affiliate(raw_prompt, niche_lower)
563
+ return refined_prompt
564
  else:
565
  print(f"Warning: Creative designer refusal: {response.refusal}")
566
  return ""
 
568
  print(f"Error in creative_designer: {e}")
569
  return ""
570
 
571
+ def _refine_prompt_for_affiliate(self, prompt: str, niche: str) -> str:
572
+ """
573
+ Refine GPT-generated prompt for affiliate marketing creatives.
574
+ Fixes illogical elements, wrong demographics, and ensures authenticity.
575
+ """
576
+ import re
577
+
578
+ if not prompt:
579
+ return prompt
580
+
581
+ prompt_lower = prompt.lower()
582
+
583
+ # =================================================================
584
+ # REMOVE STOCK PHOTO / CORPORATE AESTHETICS
585
+ # =================================================================
586
+ stock_replacements = [
587
+ (r'\bstock photo\b', 'authentic photo'),
588
+ (r'\bprofessional studio shot\b', 'natural candid shot'),
589
+ (r'\bcorporate headshot\b', 'casual portrait'),
590
+ (r'\bgeneric model\b', 'real person'),
591
+ (r'\bperfect lighting\b', 'natural lighting'),
592
+ (r'\bshutterstock\b', 'authentic'),
593
+ (r'\bistock\b', 'documentary style'),
594
+ ]
595
+ for pattern, replacement in stock_replacements:
596
+ prompt = re.sub(pattern, replacement, prompt, flags=re.IGNORECASE)
597
+
598
+ # =================================================================
599
+ # FIX UNREALISTIC BODY DESCRIPTIONS
600
+ # =================================================================
601
+ body_replacements = [
602
+ (r'\b(perfect body|flawless figure|ideal physique)\b', 'healthy confident body'),
603
+ (r'\b(six pack|bodybuilder|fitness model)\b', 'healthy fit person'),
604
+ (r'\b(impossibly thin|skeletal)\b', 'healthy'),
605
+ ]
606
+ for pattern, replacement in body_replacements:
607
+ prompt = re.sub(pattern, replacement, prompt, flags=re.IGNORECASE)
608
+
609
+ # =================================================================
610
+ # ENSURE AUTHENTICITY FOR AFFILIATE MARKETING
611
+ # =================================================================
612
+ # Affiliate marketing principle: authentic > polished
613
+ if 'authentic' not in prompt_lower and 'ugc' not in prompt_lower:
614
+ prompt += " Authentic, relatable style - not overly polished or commercial."
615
+
616
+ return prompt.strip()
617
+
618
  def copy_writer(self, creative_strategy: CreativeStrategies) -> CopyWriterOutput:
619
  """
620
  Generate ad copy from creative strategy.