alibidaran commited on
Commit
17fb0ef
·
verified ·
1 Parent(s): d44d7e6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -31,7 +31,7 @@ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
31
  dtype = 'Bfloat16' # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
32
  load_in_4bit = True
33
  model, tokenizer = FastLanguageModel.from_pretrained(
34
- model_name ="unsloth/Qwen3-14B-unsloth-bnb-4bit",
35
  max_seq_length = max_seq_length,
36
  #dtype = dtype,
37
  load_in_4bit = load_in_4bit,
 
31
  dtype = 'Bfloat16' # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
32
  load_in_4bit = True
33
  model, tokenizer = FastLanguageModel.from_pretrained(
34
+ model_name ="alibidaran/Qwen3-instructive_reasoning",
35
  max_seq_length = max_seq_length,
36
  #dtype = dtype,
37
  load_in_4bit = load_in_4bit,