Mohamed284 commited on
Commit
b26d7ff
·
1 Parent(s): 4749eaf
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -177,7 +177,7 @@ class EnhancedRetriever:
177
  def _hyde_expansion(self, query: str) -> str:
178
  try:
179
  response = client.chat.completions.create(
180
- model="meta-llama-3-70b-instruct",
181
  messages=[{
182
  "role": "user",
183
  "content": f"Generate a technical draft about biomimicry for: {query}\nInclude domain-specific terms."
@@ -239,7 +239,7 @@ def get_ai_response(query: str, context: str, model: str) -> str:
239
  )
240
  logger.info(f"Response from gemini-2.0-flash: {response.text}")
241
  result = _postprocess_response(response.text)
242
- elif model == "meta-llama-3-70b-instruct":
243
  response = client.chat.completions.create(
244
  model=model,
245
  messages=[
@@ -339,7 +339,7 @@ def generate_response(question: str, model: str) -> str:
339
  # --- Gradio Interface ---
340
  model_mapping = {
341
  "Gemini-2.0-Flash": "gemini-2.0-flash",
342
- "Meta-llama-3-70b-instruct(GWDG)": "meta-llama-3-70b-instruct",
343
  "llama3-70b-8192(Groq)": "llama3-70b-8192"
344
  }
345
 
 
177
  def _hyde_expansion(self, query: str) -> str:
178
  try:
179
  response = client.chat.completions.create(
180
+ model="llama-3.3-70b-instruct",
181
  messages=[{
182
  "role": "user",
183
  "content": f"Generate a technical draft about biomimicry for: {query}\nInclude domain-specific terms."
 
239
  )
240
  logger.info(f"Response from gemini-2.0-flash: {response.text}")
241
  result = _postprocess_response(response.text)
242
+ elif model == "llama-3.3-70b-instruct":
243
  response = client.chat.completions.create(
244
  model=model,
245
  messages=[
 
339
  # --- Gradio Interface ---
340
  model_mapping = {
341
  "Gemini-2.0-Flash": "gemini-2.0-flash",
342
+ "Meta-llama-3-70b-instruct(GWDG)": "llama-3.3-70b-instruct",
343
  "llama3-70b-8192(Groq)": "llama3-70b-8192"
344
  }
345