Slaiwala commited on
Commit
ba598c0
·
verified ·
1 Parent(s): 1fbb928

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -219,7 +219,7 @@ if QUANTIZE in {"8bit", "4bit"} and BitsAndBytesConfig is not None:
219
  quantization_config=bnb_config,
220
  )
221
  else:
222
- # Fallback: run in fp16 without bitsandbytes
223
  base_model = AutoModelForCausalLM.from_pretrained(
224
  BASE_MODEL,
225
  torch_dtype=dtype,
 
219
  quantization_config=bnb_config,
220
  )
221
  else:
222
+ # Fallback / default: fp16 (no bitsandbytes required)
223
  base_model = AutoModelForCausalLM.from_pretrained(
224
  BASE_MODEL,
225
  torch_dtype=dtype,