Kunjan_Shah commited on
Commit
7281234
·
unverified ·
1 Parent(s): 9460ea4

Update model.py

Browse files

Restored Groq client

Files changed (1) hide show
  1. core/model.py +51 -25
core/model.py CHANGED
@@ -3,12 +3,12 @@ from dotenv import load_dotenv
3
  from openai import OpenAI
4
  from groq import Groq
5
  import streamlit as st
6
- # Load environment variables from .env file
7
  load_dotenv()
8
 
9
  def generate_response(system_prompt: str, user_prompt: str, temp: float = 0.7):
10
  """
11
- Generate a response using Gemini LLM.
12
 
13
  Args:
14
  system_prompt (str): The system prompt to set the context for the model
@@ -17,30 +17,56 @@ def generate_response(system_prompt: str, user_prompt: str, temp: float = 0.7):
17
  Returns:
18
  str: The model's response
19
  """
20
- # Get API key from environment variables
21
- api_key = os.getenv("GEMINI_API_KEY")
22
-
23
- if not api_key:
24
- raise ValueError("GEMINI_API_KEY not found in environment variables")
25
-
26
- # Initialize the OpenAI client with Gemini API
27
- client = OpenAI(
28
- api_key=api_key,
29
- base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
30
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- # Create chat completion request
33
- response = client.chat.completions.create(
34
- model="gemini-2.0-flash-lite",
35
- messages=[
36
- {"role": "system", "content": system_prompt},
37
- {"role": "user", "content": user_prompt}
38
- ],
39
- temperature=temp
40
- )
41
-
42
- # Return the generated response
43
- return response.choices[0].message.content
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  # Example usage
46
  if __name__ == "__main__":
 
3
  from openai import OpenAI
4
  from groq import Groq
5
  import streamlit as st
6
+
7
  load_dotenv()
8
 
9
  def generate_response(system_prompt: str, user_prompt: str, temp: float = 0.7):
10
  """
11
+ Generate a response using Gemini or Groq LLM.
12
 
13
  Args:
14
  system_prompt (str): The system prompt to set the context for the model
 
17
  Returns:
18
  str: The model's response
19
  """
20
+ try:
21
+ # Try Gemini first
22
+ api_key = os.getenv("GEMINI_API_KEY")
23
+
24
+ if not api_key:
25
+ raise ValueError("GEMINI_API_KEY not found in environment variables")
26
+
27
+ # Initialize the OpenAI client with Gemini API
28
+ client = OpenAI(
29
+ api_key=api_key,
30
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
31
+ )
32
+ st.info("🤖 Using Gemini model") # Debug message
33
+ # Create chat completion request
34
+ response = client.chat.completions.create(
35
+ model="gemini-2.0-flash-lite",
36
+ messages=[
37
+ {"role": "system", "content": system_prompt},
38
+ {"role": "user", "content": user_prompt}
39
+ ],
40
+ temperature=temp
41
+ )
42
+
43
+ # Return the generated response
44
+ return response.choices[0].message.content
45
 
46
+ except Exception as e:
47
+ # Fallback to Groq if Gemini fails
48
+ try:
49
+ groq_key = os.getenv("GROQ_API_KEY")
50
+ if not groq_key:
51
+ raise ValueError("GROQ_API_KEY not found in environment variables")
52
+
53
+ groq_client = Groq(api_key=groq_key)
54
+ st.warning("⚠️ Gemini failed, using Groq model") # Debug message
55
+
56
+ response = groq_client.chat.completions.create(
57
+ messages=[
58
+ {"role": "system", "content": system_prompt},
59
+ {"role": "user", "content": user_prompt}
60
+ ],
61
+ model="llama-3.3-70b-versatile",
62
+ temperature=temp
63
+ )
64
+
65
+ return response.choices[0].message.content
66
+
67
+ except Exception as groq_error:
68
+ st.error(f"Both models failed. Gemini error: {e}, Groq error: {groq_error}")
69
+ raise
70
 
71
  # Example usage
72
  if __name__ == "__main__":