pendrag commited on
Commit
d4834d2
·
1 Parent(s): e1bcc3b
Files changed (1) hide show
  1. app.py +4 -8
app.py CHANGED
@@ -14,6 +14,7 @@ import re
14
  # Model name for LLM calls. Can be overridden by setting the LLM_MODEL
15
  # environment variable. Falls back to a sensible default if unset.
16
  MODEL_NAME = os.getenv("LLM_MODEL", "models/gemini-flash-latest")
 
17
 
18
  # LLM_MODEL_NAME must be set in the environment
19
 
@@ -124,10 +125,8 @@ def llm_expand_query(query):
124
  presence_penalty=0
125
  )
126
 
127
- return response.choices[0].message.content
128
- else:
129
- response = genai.GenerativeModel("gemini-1.5-flash").generate_content(prompt)
130
- return response.text
131
 
132
  def llm_generate_answer(prompt):
133
  """ Generate a response from the LLM """
@@ -173,11 +172,8 @@ def llm_generate_answer(prompt):
173
  presence_penalty=0
174
  )
175
 
176
- return response.choices[0].message.content
177
 
178
- else:
179
- response = genai.GenerativeModel("gemini-1.5-flash").generate_content(system_desc + "\n\n" + prompt)
180
- return response.text
181
 
182
  def clean_refs(answer, results):
183
  """ Clean the references from the answer """
 
14
  # Model name for LLM calls. Can be overridden by setting the LLM_MODEL
15
  # environment variable. Falls back to a sensible default if unset.
16
  MODEL_NAME = os.getenv("LLM_MODEL", "models/gemini-flash-latest")
17
+ GENAUI_API = os.getenv("GENAI_API", "gemini")
18
 
19
  # LLM_MODEL_NAME must be set in the environment
20
 
 
125
  presence_penalty=0
126
  )
127
 
128
+ return response.choices[0].message.content
129
+
 
 
130
 
131
  def llm_generate_answer(prompt):
132
  """ Generate a response from the LLM """
 
172
  presence_penalty=0
173
  )
174
 
175
+ return response.choices[0].message.content
176
 
 
 
 
177
 
178
  def clean_refs(answer, results):
179
  """ Clean the references from the answer """