SayedZahur786 commited on
Commit
082c2a7
·
1 Parent(s): 7da8c4b

Switch to gemini-2.5-flash model to avoid quota issues

Browse files
Files changed (3) hide show
  1. HF_README.md +1 -1
  2. backend/compiler.py +1 -1
  3. backend/teacher.py +1 -1
HF_README.md CHANGED
@@ -10,7 +10,7 @@ pinned: false
10
  license: mit
11
  python_version: 3.10
12
  models:
13
- - google/gemini-2.0-flash-exp
14
  ---
15
 
16
  # 🎬 Animetrix AI - Educational Animation Generator
 
10
  license: mit
11
  python_version: 3.10
12
  models:
13
+ - google/gemini-2.5-flash
14
  ---
15
 
16
  # 🎬 Animetrix AI - Educational Animation Generator
backend/compiler.py CHANGED
@@ -177,7 +177,7 @@ async def generate_manim_code(outline: dict, step_audio_paths=None):
177
 
178
  try:
179
  response = client.models.generate_content(
180
- model='gemini-2.0-flash-exp',
181
  contents=prompt
182
  )
183
  code = response.text.strip()
 
177
 
178
  try:
179
  response = client.models.generate_content(
180
+ model='gemini-2.5-flash',
181
  contents=prompt
182
  )
183
  code = response.text.strip()
backend/teacher.py CHANGED
@@ -45,7 +45,7 @@ async def generate_outline(prompt: str):
45
  full_prompt = f"{TEACHER_SYSTEM_PROMPT}\n\nUSER PROMPT: {prompt}\n\nOUTPUT JSON:"
46
 
47
  response = client.models.generate_content(
48
- model='gemini-2.0-flash-exp',
49
  contents=full_prompt
50
  )
51
 
 
45
  full_prompt = f"{TEACHER_SYSTEM_PROMPT}\n\nUSER PROMPT: {prompt}\n\nOUTPUT JSON:"
46
 
47
  response = client.models.generate_content(
48
+ model='gemini-2.5-flash',
49
  contents=full_prompt
50
  )
51