Deva1211 commited on
Commit
8e85724
·
verified ·
1 Parent(s): d0ab12d

trying to use TheBloke/alpaca-lora-65B-GPTQ

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -10,7 +10,7 @@ print("Loading optimized Mistral model...")
10
  try:
11
  # First try: AWQ quantized model (best performance)
12
  print("🔄 Attempting to load AWQ model...")
13
- tokenizer = AutoTokenizer.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.2-AWQ")
14
  model = AutoModelForCausalLM.from_pretrained(
15
  "TheBloke/Mistral-7B-Instruct-v0.2-AWQ",
16
  device_map="auto",
 
10
  try:
11
  # First try: AWQ quantized model (best performance)
12
  print("🔄 Attempting to load AWQ model...")
13
+ tokenizer = AutoTokenizer.from_pretrained("TheBloke/alpaca-lora-65B-GPTQ")
14
  model = AutoModelForCausalLM.from_pretrained(
15
  "TheBloke/Mistral-7B-Instruct-v0.2-AWQ",
16
  device_map="auto",