Hawk3388 commited on
Commit
e68eaac
·
1 Parent(s): e56b8c4

modified: main.py

Browse files
Files changed (1) hide show
  1. main.py +23 -9
main.py CHANGED
@@ -554,15 +554,29 @@ Rules:
554
  if not self.local:
555
  image = Image.open(marked_image_path)
556
  original_image = Image.open(self.path)
557
- response = self.client.models.generate_content(
558
- model=self.model_name,
559
- contents=[image, original_image, prompt],
560
- config=types.GenerateContentConfig(
561
- response_mime_type="application/json",
562
- response_schema=get_solution,
563
- thinking_config=types.ThinkingConfig(thinking_budget=self.thinking_budget if self.think else 0),
564
- ),
565
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
566
  output = response.parsed
567
  else:
568
  if self.model_name == "qwen3-vl:8b-thinking" and self.think:
 
554
  if not self.local:
555
  image = Image.open(marked_image_path)
556
  original_image = Image.open(self.path)
557
+ try:
558
+ response = self.client.models.generate_content(
559
+ model=self.model_name,
560
+ contents=[image, original_image, prompt],
561
+ config=types.GenerateContentConfig(
562
+ response_mime_type="application/json",
563
+ response_schema=get_solution,
564
+ thinking_config=types.ThinkingConfig(thinking_budget=self.thinking_budget if self.think else 0),
565
+ ),
566
+ )
567
+ except genai.errors.ServerError:
568
+ if self.model_name == "gemini-3-flash-preview":
569
+ print("The thinking model is currently not available - falling back to gemini-2.5-flash")
570
+ self.model_name = "gemini-2.5-flash"
571
+ response = self.client.models.generate_content(
572
+ model=self.model_name,
573
+ contents=[image, original_image, prompt],
574
+ config=types.GenerateContentConfig(
575
+ response_mime_type="application/json",
576
+ response_schema=get_solution,
577
+ thinking_config=types.ThinkingConfig(thinking_budget=self.thinking_budget if self.think else 0),
578
+ ),
579
+ )
580
  output = response.parsed
581
  else:
582
  if self.model_name == "qwen3-vl:8b-thinking" and self.think: