Safetensors
qwen2
laihuiyuan commited on
Commit
38af906
·
verified ·
1 Parent(s): cf4d555

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +44 -0
README.md CHANGED
@@ -22,6 +22,50 @@ Code: https://github.com/laihuiyuan/tacler
22
  Paper: https://arxiv.org/pdf/2601.21711
23
 
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  ## Citation
26
  ```
27
  @article{lai-etal-2026-tacler,
 
22
  Paper: https://arxiv.org/pdf/2601.21711
23
 
24
 
25
+ ## Quickstart
26
+ ```python
27
+ from transformers import AutoModelForCausalLM, AutoTokenizer
28
+
29
+ model_name = "laihuiyuan/TACLer"
30
+
31
+ # load the tokenizer and the model
32
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
33
+ model = AutoModelForCausalLM.from_pretrained(
34
+ model_name,
35
+ torch_dtype="auto",
36
+ device_map="auto"
37
+ )
38
+
39
+ # using Thinking or Nothinking mode
40
+ think_mode = False
41
+
42
+ question = "How many positive whole-number divisors does 196 have?"
43
+ step_by_step = " Let's think step by step and output the final answer within \\boxed{}."
44
+ messages = [
45
+ {"role": "user", "content": question + step_by_step}
46
+ ]
47
+ prompt = tokenizer.apply_chat_template(
48
+ messages,
49
+ tokenize=False,
50
+ add_generation_prompt=True
51
+ )
52
+ if not think_mode:
53
+ prompt += 'Okay, I think I can solve it directly.\n</think>\n\n'
54
+ model_inputs = tokenizer([prompt], return_tensors="pt").to(model.device)
55
+
56
+ # conduct text completion
57
+ generated_ids = model.generate(
58
+ **model_inputs,
59
+ max_new_tokens=16384
60
+ )
61
+ output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
62
+ output = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
63
+
64
+ print("**PROMPT**\n", prompt)
65
+ print("**OUTPUT**\n", output)
66
+
67
+ ```
68
+
69
  ## Citation
70
  ```
71
  @article{lai-etal-2026-tacler,