Update README.md
Browse files
README.md
CHANGED
|
@@ -31,7 +31,7 @@ max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
|
| 31 |
dtype = 'Bfloat16' # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
| 32 |
load_in_4bit = True
|
| 33 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 34 |
-
model_name ="
|
| 35 |
max_seq_length = max_seq_length,
|
| 36 |
#dtype = dtype,
|
| 37 |
load_in_4bit = load_in_4bit,
|
|
|
|
| 31 |
dtype = 'Bfloat16' # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
| 32 |
load_in_4bit = True
|
| 33 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 34 |
+
model_name ="alibidaran/Qwen3-instructive_reasoning",
|
| 35 |
max_seq_length = max_seq_length,
|
| 36 |
#dtype = dtype,
|
| 37 |
load_in_4bit = load_in_4bit,
|