PEFT
Safetensors
Sinhala
rashadism commited on
Commit
fb042ed
·
verified ·
1 Parent(s): 1194553

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -4
README.md CHANGED
@@ -82,7 +82,7 @@ Users should carefully evaluate outputs before deployment, especially in sensiti
82
 
83
  ### Install dependencies
84
  ```python
85
- !pip install unsloth # @ git+https://github.com/unslothai/unsloth.git
86
  !pip install datasets==2.21.0
87
  !pip install pandas==2.1.4
88
  ```
@@ -108,7 +108,7 @@ model_config = {"model_name": "unsloth/llama-3-8b", "load_in_4bit": False}
108
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
109
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
110
  load_in_4bit = False # Use 4bit quantization to reduce memory usage. Can be False.
111
- model_name = "polyglots/SinLlama_v01" # Change the model name
112
  ```
113
 
114
  ### Load the model
@@ -118,8 +118,7 @@ model, _ = FastLanguageModel.from_pretrained(
118
  max_seq_length = max_seq_length,
119
  dtype = dtype,
120
  load_in_4bit = load_in_4bit,
121
- resize_model_vocab=139336,
122
- # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
123
  )
124
  ```
125
 
 
82
 
83
  ### Install dependencies
84
  ```python
85
+ !pip install unsloth
86
  !pip install datasets==2.21.0
87
  !pip install pandas==2.1.4
88
  ```
 
108
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
109
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
110
  load_in_4bit = False # Use 4bit quantization to reduce memory usage. Can be False.
111
+ model_name = "polyglots/SinLlama_v01"
112
  ```
113
 
114
  ### Load the model
 
118
  max_seq_length = max_seq_length,
119
  dtype = dtype,
120
  load_in_4bit = load_in_4bit,
121
+ resize_model_vocab=139336 # Size of new vocab
 
122
  )
123
  ```
124