onebeans commited on
Commit
c9df7d3
·
verified ·
1 Parent(s): 6bbd250

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -3
README.md CHANGED
@@ -37,6 +37,7 @@ lora_config = LoraConfig(
37
  target_modules=["c_attn", "q_proj", "v_proj"]
38
  )
39
  ```
 
40
  # Training Arguments
41
  ```python
42
  training_args = TrainingArguments(
@@ -55,7 +56,6 @@ training_args = TrainingArguments(
55
  ```
56
 
57
  # Training Progress
58
-
59
  | Step | Training Loss | Validation Loss |
60
  |------|---------------|-----------------|
61
  | 300 | 1.595000 | 1.611501 |
@@ -70,7 +70,6 @@ training_args = TrainingArguments(
70
 
71
 
72
  # 실행 코드
73
-
74
  ```python
75
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
76
  import torch
@@ -84,7 +83,7 @@ bnb_config = BitsAndBytesConfig(
84
  )
85
 
86
  # Load tokenizer and model (local or hub path)
87
- model_path = "your-username/your-model-name" # or local path like "./saved_model(0412)"
88
  tokenizer = AutoTokenizer.from_pretrained(model_path)
89
  model = AutoModelForCausalLM.from_pretrained(
90
  model_path,
 
37
  target_modules=["c_attn", "q_proj", "v_proj"]
38
  )
39
  ```
40
+
41
  # Training Arguments
42
  ```python
43
  training_args = TrainingArguments(
 
56
  ```
57
 
58
  # Training Progress
 
59
  | Step | Training Loss | Validation Loss |
60
  |------|---------------|-----------------|
61
  | 300 | 1.595000 | 1.611501 |
 
70
 
71
 
72
  # 실행 코드
 
73
  ```python
74
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
75
  import torch
 
83
  )
84
 
85
  # Load tokenizer and model (local or hub path)
86
+ model_path = "onebeans/Qwen2.5-Coder-KoInstruct-QLoRA"
87
  tokenizer = AutoTokenizer.from_pretrained(model_path)
88
  model = AutoModelForCausalLM.from_pretrained(
89
  model_path,