AxelDlv00 commited on
Commit
0418f40
·
verified ·
1 Parent(s): b5e1e27

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -44,7 +44,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
44
  import torch
45
 
46
  model_id = "Qwen/Qwen3-0.6B"
47
- adapter_id = "AxelDlv00/EULAI-Qwen3-0.6B-SFT"
48
 
49
  tokenizer = AutoTokenizer.from_pretrained(adapter_id)
50
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
@@ -53,9 +53,9 @@ model = PeftModel.from_pretrained(model, adapter_id)
53
  prompt = "We collect your GPS data continuously even when the application is closed."
54
  messages = [{"role": "user", "content": prompt}]
55
 
56
- inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to("cuda")
57
- outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.1)
58
 
 
59
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
60
  ```
61
 
 
44
  import torch
45
 
46
  model_id = "Qwen/Qwen3-0.6B"
47
+ adapter_id = "AxelDlv00/EULAI"
48
 
49
  tokenizer = AutoTokenizer.from_pretrained(adapter_id)
50
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
 
53
  prompt = "We collect your GPS data continuously even when the application is closed."
54
  messages = [{"role": "user", "content": prompt}]
55
 
56
+ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, enable_thinking=False, return_tensors="pt").to("cuda")
 
57
 
58
+ outputs = model.generate(inputs, max_new_tokens=512, temperature=0.1)
59
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
60
  ```
61