yuyuzhang commited on
Commit
13911cf
·
verified ·
1 Parent(s): 53d0dbb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -12
README.md CHANGED
@@ -37,27 +37,28 @@ pip install -U transformers accelerate
37
  Here is a simple example demonstrating how to load the model and perform code generation using the Hugging Face `pipeline` API:
38
 
39
  ```python
40
- import transformers
41
  import torch
42
 
43
  model_id = "ByteDance-Seed/Seed-Coder-8B-Reasoning"
44
 
45
- pipeline = transformers.pipeline(
46
- "text-generation",
47
- model=model_id,
48
- model_kwargs={"torch_dtype": torch.bfloat16},
49
- device_map="auto",
50
- )
51
 
52
  messages = [
53
- {"role": "user", "content": "Solve the following problem: Given an array of integers, find two numbers such that they add up to a specific target number."},
54
  ]
55
 
56
- outputs = pipeline(
57
  messages,
58
- max_new_tokens=512,
59
- )
60
- print(outputs[0]["generated_text"][-1]["content"])
 
 
 
 
 
61
  ```
62
 
63
  ## Evaluation
 
37
  Here is a simple example demonstrating how to load the model and perform code generation using the Hugging Face `pipeline` API:
38
 
39
  ```python
40
+ from transformers import AutoTokenizer, AutoModelForCausalLM
41
  import torch
42
 
43
  model_id = "ByteDance-Seed/Seed-Coder-8B-Reasoning"
44
 
45
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
46
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
 
 
 
 
47
 
48
  messages = [
49
+ {"role": "user", "content": "Write a quick sort algorithm."},
50
  ]
51
 
52
+ input_ids = tokenizer.apply_chat_template(
53
  messages,
54
+ tokenize=True,
55
+ return_tensors="pt",
56
+ add_generation_prompt=True,
57
+ ).to(model.device)
58
+
59
+ outputs = model.generate(input_ids, max_new_tokens=32768)
60
+ response = tokenizer.decode(outputs[0][input_ids.shape[-1]:], skip_special_tokens=True)
61
+ print(response)
62
  ```
63
 
64
  ## Evaluation