| | --- |
| | license: mit |
| | language: |
| | - en |
| | - zh |
| | base_model: |
| | - Qwen/Qwen2.5-7B |
| | --- |
| | |
| | # SSR-Zero-7B |
| | [](https://arxiv.org/abs/2505.16637) |
| | Github: https://github.com/Kelaxon/SSR-Zero |
| | ## Quickstart |
| | Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents. |
| |
|
| | ```python |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | |
| | model_name = "wjyccs/SSR-Zero-7B" |
| | |
| | model = AutoModelForCausalLM.from_pretrained( |
| | model_name, |
| | torch_dtype="auto", |
| | device_map="auto" |
| | ) |
| | tokenizer = AutoTokenizer.from_pretrained(model_name) |
| | |
| | |
| | system_prompt = """<|startoftext|>A conversation between User and Assistant. The User asks a question, and the Assistant solves it. \ |
| | The Assistant first thinks about the reasoning process in the mind and then provides the User with the answer. \ |
| | The reasoning process is enclosed within <think> </think> and answer is enclosed within <answer> </answer> tags, respectively, \ |
| | i.e., <think> reasoning process here </think> <answer> answer here </answer>. \ |
| | |
| | User: |
| | {} |
| | |
| | Assistant: |
| | """ |
| | |
| | # For English to Chinese translation, use: |
| | instruction = "Translate the following text to Chinese: \n{}" |
| | # For Chinese to English translation, use: |
| | # instruction = "Translate the following text to English: \n{}" |
| | |
| | src_text = "Plants make oxygen which humans breathe, and they take in carbon-dioxide which humans exhale (that is, breathe out)." |
| | prompt = system_prompt.format(instruction.format(src_text)) |
| | |
| | messages = [ |
| | {"role": "user", "content": prompt} |
| | ] |
| | text = tokenizer.apply_chat_template( |
| | messages, |
| | tokenize=False, |
| | add_generation_prompt=True |
| | ) |
| | model_inputs = tokenizer([text], return_tensors="pt").to(model.device) |
| | |
| | generated_ids = model.generate( |
| | **model_inputs, |
| | max_new_tokens=1024 |
| | ) |
| | generated_ids = [ |
| | output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) |
| | ] |
| | |
| | response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] |
| | print(response) |
| | |
| | ``` |
| |
|