johnnydang88 commited on
Commit
9850093
·
verified ·
1 Parent(s): e78bbc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -734,15 +734,15 @@ def _generate_single(model_name: str, question: str,
734
  "latency_detail": "error", "llm_judge": 0.0}
735
 
736
 
737
- @spaces.GPU(duration=150)
738
  def generate_qwen(question, context_chunks, context_scores, lang):
739
  return _generate_single("Qwen2.5-7B-Instruct", question, context_chunks, context_scores, lang)
740
 
741
- @spaces.GPU(duration=150)
742
  def generate_llama(question, context_chunks, context_scores, lang):
743
  return _generate_single("LLaMA-3.1-8B-Instruct", question, context_chunks, context_scores, lang)
744
 
745
- @spaces.GPU(duration=150)
746
  def generate_gemma(question, context_chunks, context_scores, lang):
747
  return _generate_single("Gemma-2-9B-IT", question, context_chunks, context_scores, lang)
748
 
 
734
  "latency_detail": "error", "llm_judge": 0.0}
735
 
736
 
737
+ @spaces.GPU(duration=80)
738
  def generate_qwen(question, context_chunks, context_scores, lang):
739
  return _generate_single("Qwen2.5-7B-Instruct", question, context_chunks, context_scores, lang)
740
 
741
+ @spaces.GPU(duration=80)
742
  def generate_llama(question, context_chunks, context_scores, lang):
743
  return _generate_single("LLaMA-3.1-8B-Instruct", question, context_chunks, context_scores, lang)
744
 
745
+ @spaces.GPU(duration=80)
746
  def generate_gemma(question, context_chunks, context_scores, lang):
747
  return _generate_single("Gemma-2-9B-IT", question, context_chunks, context_scores, lang)
748