Anupam007 commited on
Commit
23b2c8e
Β·
verified Β·
1 Parent(s): d94298a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -107
app.py CHANGED
@@ -1,58 +1,39 @@
1
- # Step 1: Install/Update necessary libraries
2
- # Using -U ensures the latest version of google-generativeai is installed
3
- #!pip install -q -U gradio duckduckgo_search google-generativeai python-dotenv
4
-
5
- # --- IMPORTANT: After this cell runs, RESTART THE RUNTIME ---
6
- # --- (Runtime -> Restart runtime) for the library update to take effect ---
7
-
8
- # Step 2: Import libraries
9
  import gradio as gr
10
  import google.generativeai as genai
11
  from duckduckgo_search import DDGS
12
  import os
13
  import textwrap
14
- #from google.colab import userdata # For Colab secrets
15
  import traceback # For detailed error logging if needed
16
 
17
- # Step 3: Configure API Key (Using Colab Secrets)
18
- # --- Instructions ---
19
- # 1. Go to the "Secrets" tab (key icon πŸ”‘) on the left pane in Colab.
20
- # 2. Click "+ Add a new secret".
21
- # 3. Set the NAME exactly as: GOOGLE_API_KEY
22
- # 4. Paste your actual Google AI API key into the VALUE field.
23
- # 5. Ensure the "Notebook access" toggle is ON for this secret.
24
- # 6. Do NOT paste your API key directly into this code.
25
-
26
- # Initialize flag and key variable
27
  is_api_configured = False
28
  GOOGLE_API_KEY = None
29
 
30
- print("βš™οΈ Attempting to configure Google API Key...")
31
  try:
32
- GOOGLE_API_KEY = userdata.get('GOOGLE_API_KEY')
 
33
 
34
  if GOOGLE_API_KEY:
35
  genai.configure(api_key=GOOGLE_API_KEY)
36
- print("βœ… Google API Key configured successfully.")
37
  is_api_configured = True # Set flag to True ONLY if configure() succeeds
38
  else:
39
- # Secret found but empty - treat as not configured
40
- print("⚠️ Error: GOOGLE_API_KEY secret found but is empty.")
 
41
  is_api_configured = False
42
 
43
- except userdata.SecretNotFoundError:
44
- print("❌ Error: Secret 'GOOGLE_API_KEY' not found.")
45
- print("➑️ Please go to 'Secrets' (key icon πŸ”‘) and add GOOGLE_API_KEY with your API key value.")
46
- is_api_configured = False # Not configured if secret not found
47
  except Exception as e:
48
  print(f"❌ An unexpected error occurred during API Key configuration: {e}")
49
  is_api_configured = False # Not configured if any other error occurs
50
- # traceback.print_exc() # Uncomment for detailed traceback during configuration
51
 
52
  # --- End of API Key Configuration ---
53
 
54
 
55
- # Step 4: Define Helper Functions
56
 
57
  # Function to perform web search
58
  def search_web(query, num_results=7):
@@ -60,8 +41,7 @@ def search_web(query, num_results=7):
60
  print(f"πŸ” Searching the web for: '{query}'...")
61
  try:
62
  with DDGS() as ddgs:
63
- # Using region='wt-wt' for potentially broader results
64
- results = list(ddgs.text(query, region='wt-wt', safesearch='off', max_results=num_results)) # Added safesearch='off'
65
  if not results:
66
  print("⚠️ No search results found.")
67
  return "No relevant search results found for the query."
@@ -78,7 +58,7 @@ def search_web(query, num_results=7):
78
  return context
79
  except Exception as e:
80
  print(f"❌ Error during web search: {e}")
81
- # traceback.print_exc() # Uncomment for detailed traceback
82
  return f"Error occurred during web search. Details: {e}"
83
 
84
  # Function to generate the case study using Gemini
@@ -89,7 +69,8 @@ def generate_case_study(topic, search_context):
89
  # --- Check 1: API Configuration ---
90
  if not is_api_configured:
91
  print("❌ Cannot generate: Google API Key not configured successfully.")
92
- return "Error: Google API Key not configured successfully. Please check Colab Secrets setup and restart runtime."
 
93
 
94
  # --- Check 2: Search Results ---
95
  if "Error occurred during web search" in search_context or "No relevant search results found" in search_context:
@@ -97,29 +78,27 @@ def generate_case_study(topic, search_context):
97
  return f"Cannot generate case study due to search issues:\n{search_context}"
98
 
99
  # --- Configure the Gemini model ---
100
- model_name = 'gemini-1.5-flash-latest' # Using the recommended modern model
101
- # model_name = 'gemini-1.0-pro' # Alternative if flash causes issues
102
- # model_name = 'gemini-pro' # Less likely to work based on previous error
103
  try:
104
  print(f" Using model: {model_name}")
105
  model = genai.GenerativeModel(model_name)
106
  except Exception as e:
107
  print(f"❌ Error initializing GenerativeModel '{model_name}': {e}")
108
- # traceback.print_exc()
109
  # Try to list available models if initialization fails
 
110
  try:
111
  available_models = [m.name for m in genai.list_models() if 'generateContent' in m.supported_generation_methods]
112
  print(f" Available models supporting generateContent: {available_models}")
113
- if not available_models:
114
- return f"Error setting up the AI model: {e}. Additionally, no compatible models were found via ListModels."
 
115
  else:
116
- # Suggest trying one of the listed models
117
- suggested_model = next((m for m in available_models if 'flash' in m or 'pro' in m), available_models[0]) # Simple suggestion logic
118
- return f"Error setting up the AI model '{model_name}': {e}. You could try using one of the available models like: '{suggested_model.split('/')[-1]}'"
119
-
120
  except Exception as list_e:
121
  print(f" Additionally failed to list available models: {list_e}")
122
- return f"Error setting up the AI model '{model_name}': {e}. Failed to list alternatives."
 
123
 
124
 
125
  # --- Define the Prompt ---
@@ -132,21 +111,20 @@ def generate_case_study(topic, search_context):
132
  **Required Case Study Format:**
133
 
134
  **1. Title:** Create a concise and informative title based on the topic and findings.
135
- **2. Introduction/Executive Summary:** Briefly introduce the subject and the core topic (challenge, initiative). State the key outcome *mentioned in the sources*.
136
  **3. The Company/Subject:** Provide background information *only from the search results*.
137
  **4. The Challenge/Problem:** Describe the specific business issue mentioned in the sources.
138
- **5. The Solution:** Detail the implemented solution *based only on the sources*. Describe technology use (like GenAI) if mentioned.
139
  **6. Implementation/Process:** (Optional) Describe *only if information is available in the sources*.
140
- **7. Results/Impact:** Quantify results and impact using data *from the sources*. Describe qualitative benefits mentioned. If no results are mentioned, state that.
141
- **8. Conclusion:** Summarize key takeaways *based on the provided information*. Reiterate challenge, solution, outcomes *from sources*.
142
  **9. Sources:** List the URLs (`URL:` lines) from the search results that were most relevant.
143
 
144
  **Instructions:**
145
- * Adhere strictly to the format above. Use Markdown `##` for section headings (e.g., `## 1. Title`).
146
- * Base your writing ***exclusively*** on the information in the "Provided Search Context". Do not invent information or use external knowledge.
147
- * If details for a section are missing in the sources, explicitly state: "Information not available in the provided sources." Do not leave sections blank.
148
  * Maintain an objective and professional tone.
149
- * Ensure coherence and logical flow.
150
  * Format the output using Markdown.
151
 
152
  **Provided Search Context:**
@@ -159,68 +137,43 @@ def generate_case_study(topic, search_context):
159
 
160
  # --- Generate Content ---
161
  try:
162
- # Optional: Add safety settings if needed
163
- # safety_settings = [...]
164
- # response = model.generate_content(prompt, safety_settings=safety_settings)
165
  response = model.generate_content(prompt)
166
 
167
  # --- Process Response Safely ---
168
- # Check for content parts first
169
  if response.parts:
170
  generated_text = "".join(part.text for part in response.parts)
171
  print("βœ… Case study generated successfully.")
172
  return generated_text
173
- # Check for blocking reasons if no parts exist
174
  elif response.prompt_feedback and response.prompt_feedback.block_reason:
175
  block_reason = response.prompt_feedback.block_reason
176
  safety_info = f" Ratings: {response.prompt_feedback.safety_ratings}" if response.prompt_feedback.safety_ratings else ""
177
  print(f"⚠️ Generation blocked due to: {block_reason}")
178
  return f"Error: Generation failed. Blocked due to '{block_reason}'.{safety_info} Please try refining your topic or check content policies."
179
- # Handle candidates being empty or other unexpected scenarios
180
  elif not response.candidates:
181
- finish_reason = response.candidates[0].finish_reason if response.candidates else "UNKNOWN" # Attempt to get finish reason
182
  print(f"⚠️ Warning: Generation finished without valid content (Finish Reason: {finish_reason}).")
183
- return f"Error: The AI model finished generation but produced no usable content (Reason: {finish_reason}). This might indicate an issue with the prompt, the model, or safety filters."
184
  else:
185
- # Fallback for other unexpected empty response scenarios
186
  print("⚠️ Warning: Generation finished but produced no text content for unknown reasons.")
187
- # You could inspect the raw response object here if needed: print(response)
188
- return "Error: The AI model generated an empty response. This might be due to the input, content filters, or a temporary issue."
189
-
190
 
191
  except Exception as e:
192
  print(f"❌ Error during case study generation: {e}")
193
- # traceback.print_exc() # Uncomment for detailed traceback
194
  error_message = f"An unexpected error occurred during AI generation: {e}"
195
- # Check for common API errors
196
  if "API key not valid" in str(e) or "PermissionDenied" in str(e) or "AuthenticationError" in str(e):
197
- error_message = "Error: Invalid, expired, or missing API Key. Please verify your GOOGLE_API_KEY secret in Colab and ensure the Gemini API is enabled in your Google Cloud project."
198
- elif "Model not found" in str(e) or "models/" in str(e) and "is not found" in str(e):
199
- # Make error more specific based on the original error message
200
- error_message = f"Error: The specified AI model ('{model_name}') was not found or is not supported for generateContent with the current API version. Check the model name or try updating the library (`!pip install -U google-generativeai` + restart runtime)."
201
- # Attempt to list models again here might be useful
202
- try:
203
- available_models = [m.name for m in genai.list_models() if 'generateContent' in m.supported_generation_methods]
204
- error_message += f"\n Available models supporting generateContent: {available_models}"
205
- except Exception:
206
- error_message += "\n Failed to retrieve list of available models."
207
-
208
  elif "Resource has been exhausted" in str(e) or "Quota" in str(e):
209
- error_message = "Error: API quota exceeded. Please check your usage limits in Google Cloud Console or wait and try again."
210
- elif "Invalid API key" in str(e):
211
- error_message = "Error: The provided API key is invalid. Please double-check the GOOGLE_API_KEY secret value."
212
- elif hasattr(e, 'message'): # General Google API error message structure
213
- # Append original message if available and different
214
- if str(e) != e.message:
215
- error_message = f"Error during AI generation: {e.message} (Details: {e})"
216
- else:
217
- error_message = f"Error during AI generation: {e.message}"
218
-
219
-
220
  return error_message
221
 
222
 
223
- # Step 5: Define the main processing function for Gradio
224
  def create_case_study(company_or_topic):
225
  """Orchestrates the web search and case study generation process."""
226
  print("-" * 60) # Separator for new request
@@ -240,21 +193,17 @@ def create_case_study(company_or_topic):
240
  print("-" * 60) # Separator for end of request
241
  return case_study_markdown
242
 
243
- # Step 6: Create and Launch the Gradio Interface
244
  print("\nβš™οΈ Setting up Gradio interface...")
245
 
246
- # Add a final check before launching Gradio
247
  if not is_api_configured:
248
  print("\n" + "="*60)
249
- print("‼️ WARNING: Google API Key not configured successfully. ‼️")
250
- print(" The Gradio interface will launch, but case study generation WILL FAIL.")
251
- print(" Please ensure the 'GOOGLE_API_KEY' secret is correctly set up in Colab")
252
- print(" and that you have restarted the runtime after any library updates.")
253
  print("="*60 + "\n")
254
- # Optionally, prevent launch entirely:
255
- # print("\n❌ ERROR: Google API Key not configured. Cannot launch Gradio interface.")
256
- # exit() # Uncomment to stop execution
257
 
 
258
  iface = gr.Interface(
259
  fn=create_case_study,
260
  inputs=gr.Textbox(
@@ -263,11 +212,10 @@ iface = gr.Interface(
263
  label="Company Name or Topic"
264
  ),
265
  outputs=gr.Markdown( # Use Markdown output for better formatting
266
- label="Generated Case Study",
267
- # line_breaks=True # Uncomment if you prefer single newlines to create <br> tags
268
  ),
269
  title="πŸ“„ AI Case Study Generator (Gemini + DuckDuckGo)",
270
- description="Enter a company/topic. The app searches the web (DuckDuckGo) and uses Google's Gemini AI to write a case study *based only on the search results*. \n**Requires a correctly configured `GOOGLE_API_KEY` in Colab Secrets (πŸ”‘) and runtime restart after library updates.**",
271
  allow_flagging="never",
272
  examples=[
273
  ["How Spotify uses AI for music recommendations"],
@@ -279,11 +227,11 @@ iface = gr.Interface(
279
  )
280
 
281
  print("πŸš€ Launching Gradio interface...")
282
- # Launch in debug mode for detailed logs, share=True creates a public link (useful in Colab)
283
- # Set share=False if you don't need a public link
 
284
  try:
285
- iface.launch(debug=True, share=True)
286
  except Exception as e:
287
  print(f"❌ Failed to launch Gradio interface: {e}")
288
- print(" This might be due to networking issues, Colab limitations, or conflicts. Check logs.")
289
- # traceback.print_exc() # Uncomment for detailed traceback on launch failure
 
1
+ # Step 1: Import libraries (Ensure these are in requirements.txt)
 
 
 
 
 
 
 
2
  import gradio as gr
3
  import google.generativeai as genai
4
  from duckduckgo_search import DDGS
5
  import os
6
  import textwrap
 
7
  import traceback # For detailed error logging if needed
8
 
9
+ # --- Step 2: Configure API Key (Using Hugging Face Secrets) ---
 
 
 
 
 
 
 
 
 
10
  is_api_configured = False
11
  GOOGLE_API_KEY = None
12
 
13
+ print("βš™οΈ Attempting to configure Google API Key from HF Space secret...")
14
  try:
15
+ # Read the secret value set in the Hugging Face Space settings
16
+ GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
17
 
18
  if GOOGLE_API_KEY:
19
  genai.configure(api_key=GOOGLE_API_KEY)
20
+ print("βœ… Google API Key configured successfully from HF secret.")
21
  is_api_configured = True # Set flag to True ONLY if configure() succeeds
22
  else:
23
+ # Secret variable not found or empty in HF Space settings
24
+ print("❌ Error: GOOGLE_API_KEY secret not found or is empty in Space settings.")
25
+ print("➑️ Please go to your Space Settings -> Secrets and ensure 'GOOGLE_API_KEY' is added with your API key value.")
26
  is_api_configured = False
27
 
 
 
 
 
28
  except Exception as e:
29
  print(f"❌ An unexpected error occurred during API Key configuration: {e}")
30
  is_api_configured = False # Not configured if any other error occurs
31
+ traceback.print_exc() # Print detailed traceback in HF logs for debugging
32
 
33
  # --- End of API Key Configuration ---
34
 
35
 
36
+ # Step 3: Define Helper Functions
37
 
38
  # Function to perform web search
39
  def search_web(query, num_results=7):
 
41
  print(f"πŸ” Searching the web for: '{query}'...")
42
  try:
43
  with DDGS() as ddgs:
44
+ results = list(ddgs.text(query, region='wt-wt', safesearch='off', max_results=num_results))
 
45
  if not results:
46
  print("⚠️ No search results found.")
47
  return "No relevant search results found for the query."
 
58
  return context
59
  except Exception as e:
60
  print(f"❌ Error during web search: {e}")
61
+ traceback.print_exc() # Log details in HF
62
  return f"Error occurred during web search. Details: {e}"
63
 
64
  # Function to generate the case study using Gemini
 
69
  # --- Check 1: API Configuration ---
70
  if not is_api_configured:
71
  print("❌ Cannot generate: Google API Key not configured successfully.")
72
+ # Provide error message tailored for HF environment
73
+ return "Error: Google API Key not configured successfully. Please check the GOOGLE_API_KEY secret in your Hugging Face Space settings and ensure it's correct. The space might need a restart after setting the secret."
74
 
75
  # --- Check 2: Search Results ---
76
  if "Error occurred during web search" in search_context or "No relevant search results found" in search_context:
 
78
  return f"Cannot generate case study due to search issues:\n{search_context}"
79
 
80
  # --- Configure the Gemini model ---
81
+ model_name = 'gemini-1.5-flash-latest'
 
 
82
  try:
83
  print(f" Using model: {model_name}")
84
  model = genai.GenerativeModel(model_name)
85
  except Exception as e:
86
  print(f"❌ Error initializing GenerativeModel '{model_name}': {e}")
87
+ traceback.print_exc() # Log details in HF
88
  # Try to list available models if initialization fails
89
+ error_message = f"Error setting up the AI model '{model_name}': {e}."
90
  try:
91
  available_models = [m.name for m in genai.list_models() if 'generateContent' in m.supported_generation_methods]
92
  print(f" Available models supporting generateContent: {available_models}")
93
+ if available_models:
94
+ suggested_model = next((m for m in available_models if 'flash' in m or 'pro' in m), available_models[0])
95
+ error_message += f" You could try updating the model name in app.py to one of these like: '{suggested_model.split('/')[-1]}'"
96
  else:
97
+ error_message += " Additionally, no compatible models were found via ListModels."
 
 
 
98
  except Exception as list_e:
99
  print(f" Additionally failed to list available models: {list_e}")
100
+ error_message += " Failed to list alternative models."
101
+ return error_message
102
 
103
 
104
  # --- Define the Prompt ---
 
111
  **Required Case Study Format:**
112
 
113
  **1. Title:** Create a concise and informative title based on the topic and findings.
114
+ **2. Introduction/Executive Summary:** Briefly introduce the subject and the core topic. State the key outcome *mentioned in the sources*.
115
  **3. The Company/Subject:** Provide background information *only from the search results*.
116
  **4. The Challenge/Problem:** Describe the specific business issue mentioned in the sources.
117
+ **5. The Solution:** Detail the implemented solution *based only on the sources*.
118
  **6. Implementation/Process:** (Optional) Describe *only if information is available in the sources*.
119
+ **7. Results/Impact:** Quantify results and impact using data *from the sources*. If no results are mentioned, state that.
120
+ **8. Conclusion:** Summarize key takeaways *based on the provided information*.
121
  **9. Sources:** List the URLs (`URL:` lines) from the search results that were most relevant.
122
 
123
  **Instructions:**
124
+ * Adhere strictly to the format above. Use Markdown `##` for section headings.
125
+ * Base your writing ***exclusively*** on the information in the "Provided Search Context". Do not invent information.
126
+ * If details for a section are missing in the sources, explicitly state: "Information not available in the provided sources."
127
  * Maintain an objective and professional tone.
 
128
  * Format the output using Markdown.
129
 
130
  **Provided Search Context:**
 
137
 
138
  # --- Generate Content ---
139
  try:
 
 
 
140
  response = model.generate_content(prompt)
141
 
142
  # --- Process Response Safely ---
 
143
  if response.parts:
144
  generated_text = "".join(part.text for part in response.parts)
145
  print("βœ… Case study generated successfully.")
146
  return generated_text
 
147
  elif response.prompt_feedback and response.prompt_feedback.block_reason:
148
  block_reason = response.prompt_feedback.block_reason
149
  safety_info = f" Ratings: {response.prompt_feedback.safety_ratings}" if response.prompt_feedback.safety_ratings else ""
150
  print(f"⚠️ Generation blocked due to: {block_reason}")
151
  return f"Error: Generation failed. Blocked due to '{block_reason}'.{safety_info} Please try refining your topic or check content policies."
 
152
  elif not response.candidates:
153
+ finish_reason = response.candidates[0].finish_reason if response.candidates else "UNKNOWN"
154
  print(f"⚠️ Warning: Generation finished without valid content (Finish Reason: {finish_reason}).")
155
+ return f"Error: The AI model finished generation but produced no usable content (Reason: {finish_reason}). Check model compatibility or prompt complexity."
156
  else:
 
157
  print("⚠️ Warning: Generation finished but produced no text content for unknown reasons.")
158
+ return "Error: The AI model generated an empty response. This might be due to input, filters, or a temporary issue."
 
 
159
 
160
  except Exception as e:
161
  print(f"❌ Error during case study generation: {e}")
162
+ traceback.print_exc() # Log details in HF
163
  error_message = f"An unexpected error occurred during AI generation: {e}"
164
+ # Add specific error checks relevant to Gemini API
165
  if "API key not valid" in str(e) or "PermissionDenied" in str(e) or "AuthenticationError" in str(e):
166
+ error_message = "Error: Invalid, expired, or missing API Key. Please double-check the GOOGLE_API_KEY secret in Space settings and ensure the Gemini API is enabled in your Google Cloud project."
167
+ elif "Model not found" in str(e):
168
+ error_message = f"Error: The AI model ('{model_name}') was not found or is unsupported. Check the model name in app.py or try updating the google-generativeai library in requirements.txt."
 
 
 
 
 
 
 
 
169
  elif "Resource has been exhausted" in str(e) or "Quota" in str(e):
170
+ error_message = "Error: API quota exceeded. Check your usage limits in Google Cloud Console."
171
+ elif hasattr(e, 'message') and str(e) != e.message:
172
+ error_message = f"Error during AI generation: {e.message} (Details: {e})"
 
 
 
 
 
 
 
 
173
  return error_message
174
 
175
 
176
+ # Step 4: Define the main processing function for Gradio
177
  def create_case_study(company_or_topic):
178
  """Orchestrates the web search and case study generation process."""
179
  print("-" * 60) # Separator for new request
 
193
  print("-" * 60) # Separator for end of request
194
  return case_study_markdown
195
 
196
+ # Step 5: Create and Launch the Gradio Interface
197
  print("\nβš™οΈ Setting up Gradio interface...")
198
 
199
+ # Add a final check before defining Gradio Interface (optional but good practice)
200
  if not is_api_configured:
201
  print("\n" + "="*60)
202
+ print("‼️ WARNING: Google API Key not configured successfully at startup. ‼️")
203
+ print(" The Gradio interface will launch, but case study generation WILL FAIL until the API key is correctly set in secrets and the space potentially restarted.")
 
 
204
  print("="*60 + "\n")
 
 
 
205
 
206
+ # Define the Gradio interface
207
  iface = gr.Interface(
208
  fn=create_case_study,
209
  inputs=gr.Textbox(
 
212
  label="Company Name or Topic"
213
  ),
214
  outputs=gr.Markdown( # Use Markdown output for better formatting
215
+ label="Generated Case Study"
 
216
  ),
217
  title="πŸ“„ AI Case Study Generator (Gemini + DuckDuckGo)",
218
+ description="Enter a company/topic. The app searches the web (DuckDuckGo) and uses Google's Gemini AI to write a case study *based only on the search results*. \n**Requires a correctly configured `GOOGLE_API_KEY` secret in Hugging Face Space Settings.**",
219
  allow_flagging="never",
220
  examples=[
221
  ["How Spotify uses AI for music recommendations"],
 
227
  )
228
 
229
  print("πŸš€ Launching Gradio interface...")
230
+
231
+ # Launch the interface (share=True is not needed on HF Spaces)
232
+ # Use debug=True for more detailed logs initially, you can remove it later
233
  try:
234
+ iface.launch(debug=True)
235
  except Exception as e:
236
  print(f"❌ Failed to launch Gradio interface: {e}")
237
+ traceback.print_exc() # Log details in HF