Update README.md
Browse files
README.md
CHANGED
|
@@ -32,43 +32,39 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 32 |
|
| 33 |
model_id = "omdeep22/Gonyai-v1"
|
| 34 |
|
| 35 |
-
|
|
|
|
|
|
|
| 36 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 37 |
model = AutoModelForCausalLM.from_pretrained(
|
| 38 |
model_id,
|
| 39 |
trust_remote_code=True,
|
| 40 |
-
torch_dtype=
|
| 41 |
-
).to(
|
| 42 |
-
|
| 43 |
-
# 2. Define your prompt using the Chat Template
|
| 44 |
-
# This automatically handles the <|user|> and <|assistant|> tags
|
| 45 |
-
messages = [
|
| 46 |
-
{"role": "user", "content": "गोंयच्या पावसाचेर एक कविता बरोव."}
|
| 47 |
-
]
|
| 48 |
|
| 49 |
-
|
|
|
|
| 50 |
messages,
|
| 51 |
tokenize=True,
|
| 52 |
add_generation_prompt=True,
|
| 53 |
-
return_tensors="pt"
|
| 54 |
-
|
|
|
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
)
|
| 69 |
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
response = tokenizer.decode(generated_tokens, skip_special_tokens=True)
|
| 73 |
|
| 74 |
-
print(f"
|
|
|
|
| 32 |
|
| 33 |
model_id = "omdeep22/Gonyai-v1"
|
| 34 |
|
| 35 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 36 |
+
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
| 37 |
+
|
| 38 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 39 |
model = AutoModelForCausalLM.from_pretrained(
|
| 40 |
model_id,
|
| 41 |
trust_remote_code=True,
|
| 42 |
+
torch_dtype=dtype
|
| 43 |
+
).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
+
messages = [{"role": "user", "content": "गोंयच्या पावसाचेर एक कविता बरोव."}]
|
| 46 |
+
inputs = tokenizer.apply_chat_template(
|
| 47 |
messages,
|
| 48 |
tokenize=True,
|
| 49 |
add_generation_prompt=True,
|
| 50 |
+
return_tensors="pt",
|
| 51 |
+
return_dict=True
|
| 52 |
+
).to(device)
|
| 53 |
|
| 54 |
+
with torch.inference_mode():
|
| 55 |
+
with torch.autocast(device_type=device, dtype=dtype):
|
| 56 |
+
outputs = model.generate(
|
| 57 |
+
input_ids=inputs["input_ids"],
|
| 58 |
+
attention_mask=inputs["attention_mask"],
|
| 59 |
+
max_new_tokens=100,
|
| 60 |
+
temperature=0.3,
|
| 61 |
+
repetition_penalty=1.2,
|
| 62 |
+
do_sample=True,
|
| 63 |
+
eos_token_id=tokenizer.eos_token_id,
|
| 64 |
+
pad_token_id=tokenizer.eos_token_id
|
| 65 |
+
)
|
|
|
|
| 66 |
|
| 67 |
+
generated_tokens = outputs[0][inputs["input_ids"].shape[-1]:]
|
| 68 |
+
response = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
|
|
|
|
| 69 |
|
| 70 |
+
print(f"\nAssistant: {response}")
|