abhinav0231 commited on
Commit
acba535
Β·
verified Β·
1 Parent(s): 755e872

Update image_generation.py

Browse files
Files changed (1) hide show
  1. image_generation.py +11 -20
image_generation.py CHANGED
@@ -15,13 +15,11 @@ try:
15
  api_key = st.secrets.get("GEMINI_API_KEY") or os.getenv("GEMINI_API_KEY")
16
  if api_key:
17
  genai.configure(api_key=api_key)
18
- # The documentation uses a client instance.
19
- client = genai.Client()
20
- print("βœ… Google AI client for Gemini initialized successfully.")
21
  else:
22
  print("⚠️ Warning: GEMINI_API_KEY not found.")
23
  except Exception as e:
24
- print(f"❌ Error initializing Google AI client: {e}")
25
 
26
  # --- Helper Function ---
27
  def save_binary_file(file_name: str, data: bytes):
@@ -33,35 +31,30 @@ def save_binary_file(file_name: str, data: bytes):
33
  print(f"❌ Error saving file {file_name}: {e}")
34
 
35
  # --- IMAGE GENERATION FUNCTION ---
 
36
  def generate_image_with_gemini(
37
  prompt: str,
38
  output_file_base: str,
39
  context_image: Optional[Image.Image] = None
40
  ) -> Optional[str]:
41
  """
42
- Generates an image using the exact syntax from the official documentation.
43
  """
44
- if not client:
45
- print("❌ Gemini client not initialized. Cannot generate image.")
46
- return None
47
-
48
  print(f"--- 🎨 Generating image for prompt: '{prompt[:70]}...' ---")
49
  try:
50
- model = "gemini-2.5-flash-image-preview"
 
51
 
 
52
  content_parts = []
53
-
54
  if context_image:
55
- # For image-to-image, the list contains the prompt and the image.
56
  print(" -> Using previous image as context for consistent styling.")
57
  content_parts.extend([prompt, context_image])
58
  else:
59
- # For text-to-image, the list just contains the prompt.
60
  content_parts.append(prompt)
61
 
62
- # --- API CALL ---
63
- response = client.models.generate_content(
64
- model=model,
65
  contents=content_parts,
66
  stream=True
67
  )
@@ -69,12 +62,11 @@ def generate_image_with_gemini(
69
  saved_file_path = None
70
  text_responses = []
71
 
72
- # --- RESPONSE HANDLING---
73
  for chunk in response:
74
  if chunk.parts:
75
  for part in chunk.parts:
76
  if part.inline_data:
77
- # This is an image part
78
  data = part.inline_data.data
79
  mime_type = part.inline_data.mime_type
80
  file_extension = mimetypes.guess_extension(mime_type) or ".jpg"
@@ -83,9 +75,8 @@ def generate_image_with_gemini(
83
  save_binary_file(full_file_name, data)
84
  saved_file_path = full_file_name
85
  print(f"βœ… Successfully generated and saved image: {full_file_name}")
86
- break # Image found, exit inner loop
87
  elif part.text:
88
- # This is a text part
89
  text_responses.append(part.text)
90
 
91
  if not saved_file_path and text_responses:
 
15
  api_key = st.secrets.get("GEMINI_API_KEY") or os.getenv("GEMINI_API_KEY")
16
  if api_key:
17
  genai.configure(api_key=api_key)
18
+ print("βœ… Google AI client for Gemini configured successfully.")
 
 
19
  else:
20
  print("⚠️ Warning: GEMINI_API_KEY not found.")
21
  except Exception as e:
22
+ print(f"❌ Error configuring Google AI client: {e}")
23
 
24
  # --- Helper Function ---
25
  def save_binary_file(file_name: str, data: bytes):
 
31
  print(f"❌ Error saving file {file_name}: {e}")
32
 
33
  # --- IMAGE GENERATION FUNCTION ---
34
+ # --- CORRECTED IMAGE GENERATION FUNCTION ---
35
  def generate_image_with_gemini(
36
  prompt: str,
37
  output_file_base: str,
38
  context_image: Optional[Image.Image] = None
39
  ) -> Optional[str]:
40
  """
41
+ Generates an image using the correct syntax for your library version.
42
  """
 
 
 
 
43
  print(f"--- 🎨 Generating image for prompt: '{prompt[:70]}...' ---")
44
  try:
45
+ # 1. Create the model instance directly
46
+ model = genai.GenerativeModel(model_name="gemini-2.5-flash-image-preview")
47
 
48
+ # 2. Create the content list
49
  content_parts = []
 
50
  if context_image:
 
51
  print(" -> Using previous image as context for consistent styling.")
52
  content_parts.extend([prompt, context_image])
53
  else:
 
54
  content_parts.append(prompt)
55
 
56
+ # 3. Make the call on the model object
57
+ response = model.generate_content(
 
58
  contents=content_parts,
59
  stream=True
60
  )
 
62
  saved_file_path = None
63
  text_responses = []
64
 
65
+ # 4. Process the response
66
  for chunk in response:
67
  if chunk.parts:
68
  for part in chunk.parts:
69
  if part.inline_data:
 
70
  data = part.inline_data.data
71
  mime_type = part.inline_data.mime_type
72
  file_extension = mimetypes.guess_extension(mime_type) or ".jpg"
 
75
  save_binary_file(full_file_name, data)
76
  saved_file_path = full_file_name
77
  print(f"βœ… Successfully generated and saved image: {full_file_name}")
78
+ break
79
  elif part.text:
 
80
  text_responses.append(part.text)
81
 
82
  if not saved_file_path and text_responses: