guanwenyu1995 commited on
Commit
f276ba9
·
verified ·
1 Parent(s): 73f367a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -2
README.md CHANGED
@@ -21,10 +21,12 @@ from mlx_lm import load, generate
21
  model_path = "MiniCPM4.1-8B-MLX "
22
  model, tokenizer = load(model_path)
23
  messages = [{"role": "user", "content": "北京有什么好玩的地方?"}]
 
24
  # if open think mode, use the following code
25
- # prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
26
  # if close think mode, use the following code
27
- prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False, enable_thinking=False)
 
28
  response = generate(
29
  model=model,
30
  tokenizer=tokenizer,
 
21
  model_path = "MiniCPM4.1-8B-MLX "
22
  model, tokenizer = load(model_path)
23
  messages = [{"role": "user", "content": "北京有什么好玩的地方?"}]
24
+
25
  # if open think mode, use the following code
26
+ prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
27
  # if close think mode, use the following code
28
+ # prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False, enable_thinking=False)
29
+
30
  response = generate(
31
  model=model,
32
  tokenizer=tokenizer,