souvik18 commited on
Commit
4f5c8c9
·
verified ·
1 Parent(s): 73bb5df

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +58 -18
README.md CHANGED
@@ -87,28 +87,68 @@ This model can be used **directly** without any LoRA adapter.
87
  ### Example (Transformers)
88
 
89
  ```python
 
 
 
 
 
 
 
 
90
  from transformers import AutoTokenizer, AutoModelForCausalLM
91
  import torch
92
 
93
- model_id = "souvik18/Roy"
 
 
 
 
 
 
 
 
 
 
 
94
 
95
- tokenizer = AutoTokenizer.from_pretrained(model_id)
96
  model = AutoModelForCausalLM.from_pretrained(
97
- model_id,
98
- torch_dtype=torch.float16,
99
  device_map="auto"
100
  )
101
-
102
- prompt = "[INST] Explain Newton's laws in simple words [/INST]"
103
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
104
-
105
- with torch.no_grad():
106
- output = model.generate(
107
- **inputs,
108
- max_new_tokens=200,
109
- temperature=0.7,
110
- top_p=0.9,
111
- do_sample=True
112
- )
113
-
114
- print(tokenizer.decode(output[0], skip_special_tokens=True))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  ### Example (Transformers)
88
 
89
  ```python
90
+ !pip uninstall -y transformers peft accelerate torch safetensors numpy
91
+ !pip install numpy==1.26.4
92
+ !pip install torch==2.2.2
93
+ !pip install transformers==4.41.2
94
+ !pip install peft==0.11.1
95
+ !pip install accelerate==0.30.1
96
+ !pip install safetensors==0.4.3
97
+
98
  from transformers import AutoTokenizer, AutoModelForCausalLM
99
  import torch
100
 
101
+ # -----------------------------
102
+ # CONFIG
103
+ # -----------------------------
104
+ MODEL_ID = "souvik18/Roy"
105
+ DTYPE = torch.float16 # use float16 for GPU
106
+
107
+ # -----------------------------
108
+ # LOAD TOKENIZER & MODEL
109
+ # -----------------------------
110
+ print("🔹 Loading tokenizer...")
111
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
112
+ tokenizer.pad_token = tokenizer.eos_token
113
 
114
+ print("🔹 Loading model...")
115
  model = AutoModelForCausalLM.from_pretrained(
116
+ MODEL_ID,
117
+ torch_dtype=DTYPE,
118
  device_map="auto"
119
  )
120
+ model.eval()
121
+
122
+ print("\n✅ Model loaded successfully")
123
+ print("Type 'exit' or 'quit' to stop\n")
124
+
125
+ # -----------------------------
126
+ # CHAT LOOP
127
+ # -----------------------------
128
+ while True:
129
+ user_input = input("🧑 You: ").strip()
130
+
131
+ if user_input.lower() in ["exit", "quit"]:
132
+ print("👋 Bye!")
133
+ break
134
+
135
+ prompt = f"[INST] {user_input} [/INST]"
136
+
137
+ inputs = tokenizer(
138
+ prompt,
139
+ return_tensors="pt"
140
+ ).to(model.device)
141
+
142
+ with torch.no_grad():
143
+ output = model.generate(
144
+ **inputs,
145
+ max_new_tokens=200,
146
+ temperature=0.7,
147
+ top_p=0.9,
148
+ do_sample=True,
149
+ repetition_penalty=1.1,
150
+ eos_token_id=tokenizer.eos_token_id,
151
+ )
152
+
153
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
154
+ print(f"\n Roy: {response}\n")