cryogenic22 commited on
Commit
0077668
·
verified ·
1 Parent(s): 1b888d4

Update multi_llm_provider.py

Browse files
Files changed (1) hide show
  1. multi_llm_provider.py +298 -66
multi_llm_provider.py CHANGED
@@ -24,7 +24,14 @@ class AIProviderManager:
24
  if ANTHROPIC_API_KEY:
25
  try:
26
  self.providers["claude"] = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
27
- st.sidebar.success("🟢 Claude AI connected")
 
 
 
 
 
 
 
28
  except Exception as e:
29
  st.sidebar.error(f"🔴 Error initializing Claude: {str(e)}")
30
 
@@ -34,7 +41,14 @@ class AIProviderManager:
34
  import openai
35
  openai.api_key = OPENAI_API_KEY
36
  self.providers["openai"] = True
37
- st.sidebar.success("🟢 OpenAI connected")
 
 
 
 
 
 
 
38
  except ImportError:
39
  st.sidebar.warning("⚠️ OpenAI SDK not installed. Run 'pip install openai'")
40
  except Exception as e:
@@ -63,13 +77,21 @@ class AIProviderManager:
63
  """Get all available models across providers"""
64
  models = {}
65
 
66
- # Claude models
67
  if "claude" in self.providers:
68
- models.update({
69
- "claude-3-sonnet-20250219": "Claude 3 Sonnet",
70
- "claude-3-haiku-20250319": "Claude 3 Haiku",
71
- "claude-3-opus-20250229": "Claude 3 Opus"
72
- })
 
 
 
 
 
 
 
 
73
 
74
  # OpenAI models
75
  if "openai" in self.providers:
@@ -101,37 +123,94 @@ class AIProviderManager:
101
  return self._generate_with_deepseek(prompt, model, system_prompt, temperature, max_tokens)
102
 
103
  else:
104
- # Fallback to Claude if available
 
105
  if "claude" in self.providers:
106
- st.warning(f"Model {model} not available. Falling back to Claude 3 Sonnet.")
107
- return self._generate_with_claude(
108
- prompt,
109
- "claude-3-sonnet-20250219",
110
- system_prompt,
111
- temperature,
112
- max_tokens
113
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  else:
115
  raise ValueError(f"No AI provider available for model: {model}")
116
 
117
  def _generate_with_claude(self, prompt: str, model: str, system_prompt: str = None, temperature: float = 0.7, max_tokens: int = 1000):
118
- """Generate text using Claude"""
119
  client = self.providers["claude"]
120
-
121
  messages = [{"role": "user", "content": prompt}]
122
 
123
- response = client.messages.create(
124
- model=model,
125
- max_tokens=max_tokens,
126
- temperature=temperature,
127
- system=system_prompt if system_prompt else "You are a helpful assistant.",
128
- messages=messages
129
- )
130
-
131
- return response.content[0].text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  def _generate_with_openai(self, prompt: str, model: str, system_prompt: str = None, temperature: float = 0.7, max_tokens: int = 1000):
134
- """Generate text using OpenAI"""
135
  import openai
136
 
137
  messages = []
@@ -140,17 +219,42 @@ class AIProviderManager:
140
 
141
  messages.append({"role": "user", "content": prompt})
142
 
143
- response = openai.chat.completions.create(
144
- model=model,
145
- messages=messages,
146
- temperature=temperature,
147
- max_tokens=max_tokens
148
- )
149
-
150
- return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  def _generate_with_deepseek(self, prompt: str, model: str, system_prompt: str = None, temperature: float = 0.7, max_tokens: int = 1000):
153
- """Generate text using DeepSeek"""
154
  import openai as deepseek
155
 
156
  deepseek.api_key = DEEPSEEK_API_KEY
@@ -162,14 +266,46 @@ class AIProviderManager:
162
 
163
  messages.append({"role": "user", "content": prompt})
164
 
165
- response = deepseek.chat.completions.create(
166
- model="deepseek-chat" if model == "deepseek-chat" else "deepseek-coder",
167
- messages=messages,
168
- temperature=temperature,
169
- max_tokens=max_tokens
170
- )
171
-
172
- return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
 
174
  def web_search(self, query: str) -> List[Dict[str, Any]]:
175
  """Perform a web search using Perplexity API"""
@@ -258,14 +394,37 @@ class AIProviderManager:
258
  }}
259
  """
260
 
261
- # Generate enhanced content
262
- response = self.generate_text(
263
- prompt=prompt,
264
- model="claude-3-sonnet-20250219" if "claude" in self.providers else "gpt-4" if "openai" in self.providers else "deepseek-chat",
265
- system_prompt="You are an expert at enhancing presentation content with the latest information. Always respond with valid JSON.",
266
- temperature=0.5,
267
- max_tokens=2000
268
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
 
270
  # Extract the JSON from the response
271
  try:
@@ -296,8 +455,10 @@ class AIProviderManager:
296
  st.success("Slide enhanced with latest web information!")
297
  else:
298
  st.error("Could not extract JSON from AI response")
 
299
  except Exception as e:
300
  st.error(f"Error processing AI response: {str(e)}")
 
301
 
302
  except Exception as e:
303
  st.error(f"Error enhancing content with web search: {str(e)}")
@@ -334,20 +495,36 @@ class AIProviderManager:
334
 
335
  try:
336
  # Choose the model based on availability
337
- model = "claude-3-haiku-20250319" if "claude" in self.providers else "gpt-3.5-turbo"
 
 
 
 
 
 
338
 
339
- response = self.generate_text(
340
- prompt=prompt,
341
- model=model,
342
- system_prompt="You are an expert at creating image generation prompts for business presentations.",
343
- temperature=0.7,
344
- max_tokens=200
345
- )
 
 
 
 
 
 
 
 
 
346
 
347
- # Clean up the response
348
- description = response.strip()
349
 
350
- return description
 
351
  except Exception as e:
352
  st.error(f"Error generating image description: {str(e)}")
353
  return f"An image representing {title}"
@@ -358,4 +535,59 @@ def get_ai_manager():
358
  if 'ai_manager' not in st.session_state:
359
  st.session_state.ai_manager = AIProviderManager()
360
 
361
- return st.session_state.ai_manager
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  if ANTHROPIC_API_KEY:
25
  try:
26
  self.providers["claude"] = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY)
27
+
28
+ # Test connection by listing models
29
+ try:
30
+ models = self.providers["claude"].models.list()
31
+ model_list = [model.id for model in models.data]
32
+ st.sidebar.success(f"🟢 Claude AI connected - Available models: {', '.join(model_list[:3])}")
33
+ except Exception as e:
34
+ st.sidebar.warning(f"🟡 Claude AI connected but couldn't list models: {str(e)}")
35
  except Exception as e:
36
  st.sidebar.error(f"🔴 Error initializing Claude: {str(e)}")
37
 
 
41
  import openai
42
  openai.api_key = OPENAI_API_KEY
43
  self.providers["openai"] = True
44
+
45
+ # Test connection by listing models
46
+ try:
47
+ client = openai.OpenAI(api_key=OPENAI_API_KEY)
48
+ models = client.models.list()
49
+ st.sidebar.success("🟢 OpenAI connected")
50
+ except Exception as e:
51
+ st.sidebar.warning(f"🟡 OpenAI connected but couldn't list models: {str(e)}")
52
  except ImportError:
53
  st.sidebar.warning("⚠️ OpenAI SDK not installed. Run 'pip install openai'")
54
  except Exception as e:
 
77
  """Get all available models across providers"""
78
  models = {}
79
 
80
+ # Claude models - dynamically get if possible
81
  if "claude" in self.providers:
82
+ try:
83
+ claude_models = self.providers["claude"].models.list()
84
+ for model in claude_models.data:
85
+ models[model.id] = f"Claude {model.id.split('-')[1].capitalize()}"
86
+ except Exception:
87
+ # Fallback to hardcoded models if API call fails
88
+ models.update({
89
+ "claude-3-sonnet-20250219": "Claude 3 Sonnet",
90
+ "claude-3-haiku-20250319": "Claude 3 Haiku",
91
+ "claude-3-opus-20250229": "Claude 3 Opus",
92
+ # Add additional models that might exist in 2025
93
+ "claude-3-7-sonnet-20250219": "Claude 3.7 Sonnet"
94
+ })
95
 
96
  # OpenAI models
97
  if "openai" in self.providers:
 
123
  return self._generate_with_deepseek(prompt, model, system_prompt, temperature, max_tokens)
124
 
125
  else:
126
+ # Try to find any available provider
127
+ available_providers = []
128
  if "claude" in self.providers:
129
+ available_providers.append("claude")
130
+ if "openai" in self.providers:
131
+ available_providers.append("openai")
132
+ if "deepseek" in self.providers:
133
+ available_providers.append("deepseek")
134
+
135
+ if available_providers:
136
+ provider = available_providers[0]
137
+ if provider == "claude":
138
+ # Get available Claude models
139
+ try:
140
+ models = self.providers["claude"].models.list()
141
+ if models.data:
142
+ fallback_model = models.data[0].id
143
+ st.warning(f"Model {model} not available. Falling back to {fallback_model}.")
144
+ return self._generate_with_claude(prompt, fallback_model, system_prompt, temperature, max_tokens)
145
+ except:
146
+ pass
147
+ # If model list fails, use hardcoded fallback
148
+ st.warning(f"Model {model} not available. Falling back to Claude 3 Sonnet.")
149
+ return self._generate_with_claude(prompt, "claude-3-sonnet-20250219", system_prompt, temperature, max_tokens)
150
+ elif provider == "openai":
151
+ st.warning(f"Model {model} not available. Falling back to GPT-3.5 Turbo.")
152
+ return self._generate_with_openai(prompt, "gpt-3.5-turbo", system_prompt, temperature, max_tokens)
153
+ elif provider == "deepseek":
154
+ st.warning(f"Model {model} not available. Falling back to DeepSeek Chat.")
155
+ return self._generate_with_deepseek(prompt, "deepseek-chat", system_prompt, temperature, max_tokens)
156
  else:
157
  raise ValueError(f"No AI provider available for model: {model}")
158
 
159
  def _generate_with_claude(self, prompt: str, model: str, system_prompt: str = None, temperature: float = 0.7, max_tokens: int = 1000):
160
+ """Generate text using Claude with enhanced error handling"""
161
  client = self.providers["claude"]
 
162
  messages = [{"role": "user", "content": prompt}]
163
 
164
+ try:
165
+ response = client.messages.create(
166
+ model=model,
167
+ max_tokens=max_tokens,
168
+ temperature=temperature,
169
+ system=system_prompt if system_prompt else "You are a helpful assistant.",
170
+ messages=messages
171
+ )
172
+ return response.content[0].text
173
+ except Exception as e:
174
+ error_msg = str(e)
175
+ st.error(f"Claude API Error ({model}): {error_msg}")
176
+
177
+ # Check for model availability errors
178
+ if ("model" in error_msg.lower() and "not" in error_msg.lower()) or "not_found" in error_msg.lower():
179
+ try:
180
+ # Try to get available models
181
+ available_models = []
182
+ try:
183
+ models_list = client.models.list()
184
+ available_models = [m.id for m in models_list.data]
185
+ st.info(f"Available Claude models: {', '.join(available_models)}")
186
+ except:
187
+ # If listing fails, use fallback list
188
+ available_models = ["claude-3-7-sonnet-20250219", "claude-3-sonnet-20250219", "claude-3-haiku-20250319", "claude-3-opus-20250229"]
189
+
190
+ # Try available models
191
+ for fallback_model in available_models:
192
+ if fallback_model != model:
193
+ try:
194
+ st.warning(f"Trying fallback model: {fallback_model}")
195
+ response = client.messages.create(
196
+ model=fallback_model,
197
+ max_tokens=max_tokens,
198
+ temperature=temperature,
199
+ system=system_prompt if system_prompt else "You are a helpful assistant.",
200
+ messages=messages
201
+ )
202
+ return response.content[0].text
203
+ except Exception as fallback_error:
204
+ st.warning(f"Fallback to {fallback_model} failed: {str(fallback_error)}")
205
+ continue
206
+ except Exception as list_error:
207
+ st.error(f"Error while attempting fallbacks: {str(list_error)}")
208
+
209
+ # If we reach here, all fallbacks failed or it's another type of error
210
+ raise ValueError(f"Claude API failed with error: {error_msg}")
211
 
212
  def _generate_with_openai(self, prompt: str, model: str, system_prompt: str = None, temperature: float = 0.7, max_tokens: int = 1000):
213
+ """Generate text using OpenAI with enhanced error handling"""
214
  import openai
215
 
216
  messages = []
 
219
 
220
  messages.append({"role": "user", "content": prompt})
221
 
222
+ try:
223
+ response = openai.chat.completions.create(
224
+ model=model,
225
+ messages=messages,
226
+ temperature=temperature,
227
+ max_tokens=max_tokens
228
+ )
229
+ return response.choices[0].message.content
230
+ except Exception as e:
231
+ error_msg = str(e)
232
+ st.error(f"OpenAI API Error ({model}): {error_msg}")
233
+
234
+ # Check for model availability errors
235
+ if "model" in error_msg.lower() and ("not" in error_msg.lower() or "find" in error_msg.lower()):
236
+ # Try fallback models
237
+ fallback_models = ["gpt-3.5-turbo", "gpt-4"]
238
+ for fallback_model in fallback_models:
239
+ if fallback_model != model:
240
+ try:
241
+ st.warning(f"Trying fallback model: {fallback_model}")
242
+ response = openai.chat.completions.create(
243
+ model=fallback_model,
244
+ messages=messages,
245
+ temperature=temperature,
246
+ max_tokens=max_tokens
247
+ )
248
+ return response.choices[0].message.content
249
+ except Exception as fallback_error:
250
+ st.warning(f"Fallback to {fallback_model} failed: {str(fallback_error)}")
251
+ continue
252
+
253
+ # If all fallbacks fail or it's another type of error
254
+ raise ValueError(f"OpenAI API failed with error: {error_msg}")
255
 
256
  def _generate_with_deepseek(self, prompt: str, model: str, system_prompt: str = None, temperature: float = 0.7, max_tokens: int = 1000):
257
+ """Generate text using DeepSeek with enhanced error handling"""
258
  import openai as deepseek
259
 
260
  deepseek.api_key = DEEPSEEK_API_KEY
 
266
 
267
  messages.append({"role": "user", "content": prompt})
268
 
269
+ try:
270
+ response = deepseek.chat.completions.create(
271
+ model="deepseek-chat" if model == "deepseek-chat" else "deepseek-coder",
272
+ messages=messages,
273
+ temperature=temperature,
274
+ max_tokens=max_tokens
275
+ )
276
+ return response.choices[0].message.content
277
+ except Exception as e:
278
+ error_msg = str(e)
279
+ st.error(f"DeepSeek API Error ({model}): {error_msg}")
280
+
281
+ # Check for model availability errors and try fallback
282
+ if model == "deepseek-chat":
283
+ try:
284
+ st.warning("Trying fallback model: deepseek-coder")
285
+ response = deepseek.chat.completions.create(
286
+ model="deepseek-coder",
287
+ messages=messages,
288
+ temperature=temperature,
289
+ max_tokens=max_tokens
290
+ )
291
+ return response.choices[0].message.content
292
+ except Exception as fallback_error:
293
+ st.warning(f"Fallback failed: {str(fallback_error)}")
294
+ elif model == "deepseek-coder":
295
+ try:
296
+ st.warning("Trying fallback model: deepseek-chat")
297
+ response = deepseek.chat.completions.create(
298
+ model="deepseek-chat",
299
+ messages=messages,
300
+ temperature=temperature,
301
+ max_tokens=max_tokens
302
+ )
303
+ return response.choices[0].message.content
304
+ except Exception as fallback_error:
305
+ st.warning(f"Fallback failed: {str(fallback_error)}")
306
+
307
+ # If fallbacks fail or it's another type of error
308
+ raise ValueError(f"DeepSeek API failed with error: {error_msg}")
309
 
310
  def web_search(self, query: str) -> List[Dict[str, Any]]:
311
  """Perform a web search using Perplexity API"""
 
394
  }}
395
  """
396
 
397
+ # Get models from available providers
398
+ available_models = []
399
+ if "claude" in self.providers:
400
+ available_models.append("claude-3-7-sonnet-20250219") # Use newest model first
401
+ available_models.append("claude-3-sonnet-20250219")
402
+ if "openai" in self.providers:
403
+ available_models.append("gpt-4")
404
+ available_models.append("gpt-3.5-turbo")
405
+ if "deepseek" in self.providers:
406
+ available_models.append("deepseek-chat")
407
+
408
+ # Try models until one works
409
+ response = None
410
+ for model in available_models:
411
+ try:
412
+ response = self.generate_text(
413
+ prompt=prompt,
414
+ model=model,
415
+ system_prompt="You are an expert at enhancing presentation content with the latest information. Always respond with valid JSON.",
416
+ temperature=0.5,
417
+ max_tokens=2000
418
+ )
419
+ if response:
420
+ break
421
+ except Exception as e:
422
+ st.warning(f"Error using {model}: {str(e)}. Trying next model...")
423
+ continue
424
+
425
+ if not response:
426
+ st.error("All models failed. Could not enhance content.")
427
+ return slide_content
428
 
429
  # Extract the JSON from the response
430
  try:
 
455
  st.success("Slide enhanced with latest web information!")
456
  else:
457
  st.error("Could not extract JSON from AI response")
458
+ st.info("Raw response: " + response[:500] + "...") # Show part of the response for debugging
459
  except Exception as e:
460
  st.error(f"Error processing AI response: {str(e)}")
461
+ st.info("Raw response: " + response[:500] + "...") # Show part of the response for debugging
462
 
463
  except Exception as e:
464
  st.error(f"Error enhancing content with web search: {str(e)}")
 
495
 
496
  try:
497
  # Choose the model based on availability
498
+ available_models = []
499
+ if "claude" in self.providers:
500
+ available_models.append("claude-3-haiku-20250319")
501
+ if "openai" in self.providers:
502
+ available_models.append("gpt-3.5-turbo")
503
+ if "deepseek" in self.providers:
504
+ available_models.append("deepseek-chat")
505
 
506
+ # Try models until one works
507
+ description = None
508
+ for model in available_models:
509
+ try:
510
+ description = self.generate_text(
511
+ prompt=prompt,
512
+ model=model,
513
+ system_prompt="You are an expert at creating image generation prompts for business presentations.",
514
+ temperature=0.7,
515
+ max_tokens=200
516
+ )
517
+ if description:
518
+ break
519
+ except Exception as e:
520
+ st.warning(f"Error using {model} for image description: {str(e)}. Trying next model...")
521
+ continue
522
 
523
+ if not description:
524
+ return f"An image representing {title}"
525
 
526
+ # Clean up the response
527
+ return description.strip()
528
  except Exception as e:
529
  st.error(f"Error generating image description: {str(e)}")
530
  return f"An image representing {title}"
 
535
  if 'ai_manager' not in st.session_state:
536
  st.session_state.ai_manager = AIProviderManager()
537
 
538
+ return st.session_state.ai_manager
539
+
540
+ # Add a diagnostic function to test model availability
541
+ def test_models():
542
+ """Test model availability across providers"""
543
+ ai_manager = get_ai_manager()
544
+
545
+ st.write("### LLM Provider Diagnostics")
546
+
547
+ # Test Claude
548
+ if "claude" in ai_manager.providers:
549
+ try:
550
+ st.write("#### Testing Claude API:")
551
+ claude = ai_manager.providers["claude"]
552
+ models = claude.models.list()
553
+ st.success(f"Available Claude models:")
554
+ for model in models.data:
555
+ st.write(f"- {model.id}")
556
+
557
+ # Test a quick generation
558
+ test_prompt = "Say hello in one word."
559
+ with st.spinner(f"Testing with {models.data[0].id}..."):
560
+ response = claude.messages.create(
561
+ model=models.data[0].id,
562
+ max_tokens=10,
563
+ messages=[{"role": "user", "content": test_prompt}]
564
+ )
565
+ st.success(f"Test response: {response.content[0].text}")
566
+ except Exception as e:
567
+ st.error(f"Claude API test failed: {str(e)}")
568
+ else:
569
+ st.warning("Claude API not configured")
570
+
571
+ # Test OpenAI
572
+ if "openai" in ai_manager.providers:
573
+ try:
574
+ st.write("#### Testing OpenAI API:")
575
+ import openai
576
+ client = openai.OpenAI(api_key=OPENAI_API_KEY)
577
+ models = client.models.list()
578
+ st.success(f"Available OpenAI models:")
579
+ for model in models.data[:5]: # Show first 5 to avoid cluttering
580
+ st.write(f"- {model.id}")
581
+
582
+ # Test a quick generation
583
+ test_prompt = "Say hello in one word."
584
+ with st.spinner("Testing with gpt-3.5-turbo..."):
585
+ response = client.chat.completions.create(
586
+ model="gpt-3.5-turbo",
587
+ messages=[{"role": "user", "content": test_prompt}]
588
+ )
589
+ st.success(f"Test response: {response.choices[0].message.content}")
590
+ except Exception as e:
591
+ st.error(f"OpenAI API test failed: {str(e)}")
592
+ else:
593
+ st.warning("OpenAI API not configured")