serenichron commited on
Commit
5ea35f6
·
1 Parent(s): adcb9bd

Fix None model_id handling in Gradio examples

Browse files

- Add None check in estimate_model_size()
- Add None check in should_quantize()
- Add ValueError for None/empty model_id in load_model()
- Add validation in gradio_chat() with user-friendly message

Files changed (3) hide show
  1. app.py +5 -0
  2. config.py +6 -0
  3. models.py +6 -0
app.py CHANGED
@@ -404,6 +404,11 @@ def gradio_chat(
404
  max_tokens: int,
405
  ):
406
  """Gradio chat interface handler."""
 
 
 
 
 
407
  # Build messages from history
408
  messages = []
409
  for user_msg, assistant_msg in history:
 
404
  max_tokens: int,
405
  ):
406
  """Gradio chat interface handler."""
407
+ # Validate model_id
408
+ if not model_id:
409
+ yield "Please select a model first."
410
+ return
411
+
412
  # Build messages from history
413
  messages = []
414
  for user_msg, assistant_msg in history:
config.py CHANGED
@@ -122,6 +122,9 @@ def estimate_model_size(model_id: str) -> Optional[int]:
122
 
123
  Returns None if size cannot be determined.
124
  """
 
 
 
125
  # Check known models first
126
  if model_id in MODEL_SIZE_ESTIMATES:
127
  return MODEL_SIZE_ESTIMATES[model_id]
@@ -141,6 +144,9 @@ def should_quantize(model_id: str) -> str:
141
 
142
  Returns: "none", "int8", or "int4"
143
  """
 
 
 
144
  if config.default_quantization != "none":
145
  return config.default_quantization
146
 
 
122
 
123
  Returns None if size cannot be determined.
124
  """
125
+ if model_id is None:
126
+ return None
127
+
128
  # Check known models first
129
  if model_id in MODEL_SIZE_ESTIMATES:
130
  return MODEL_SIZE_ESTIMATES[model_id]
 
144
 
145
  Returns: "none", "int8", or "int4"
146
  """
147
+ if model_id is None:
148
+ return "none"
149
+
150
  if config.default_quantization != "none":
151
  return config.default_quantization
152
 
models.py CHANGED
@@ -84,9 +84,15 @@ def load_model(
84
 
85
  Returns:
86
  LoadedModel with model and tokenizer ready for inference
 
 
 
87
  """
88
  global _current_model
89
 
 
 
 
90
  # Check if already loaded
91
  if not force_reload and _current_model is not None:
92
  if _current_model.model_id == model_id:
 
84
 
85
  Returns:
86
  LoadedModel with model and tokenizer ready for inference
87
+
88
+ Raises:
89
+ ValueError: If model_id is None or empty
90
  """
91
  global _current_model
92
 
93
+ if not model_id:
94
+ raise ValueError("model_id cannot be None or empty")
95
+
96
  # Check if already loaded
97
  if not force_reload and _current_model is not None:
98
  if _current_model.model_id == model_id: