nielsr HF Staff commited on
Commit
0cbc8cb
·
verified ·
1 Parent(s): bd8cc67

Add license and pipeline tag

Browse files

This PR adds the license information (Apache 2.0, inferred from the GitHub repo) to the model card metadata and specifies the pipeline tag as `text-generation` for better discoverability on the Hugging Face Hub.

Files changed (1) hide show
  1. README.md +6 -2
README.md CHANGED
@@ -9,6 +9,8 @@ tags:
9
  - verification
10
  - math reasoning
11
  - code verification
 
 
12
  ---
13
 
14
  # Model Card for ThinkPRM-1.5B
@@ -60,7 +62,8 @@ llm = LLM(model=model_id, max_model_len=16384)
60
 
61
  # Example problem and solution
62
  problem = "Solve for x: 2x + 3 = 7"
63
- prefix = "Step 1: Subtract 3 from both sides: 2x = 4\nStep 2: Divide by 2: x = 1"
 
64
 
65
  # Format the prompt
66
  prompt = f"""You are given a math problem and a proposed step-by-step solution:
@@ -78,7 +81,8 @@ Review and critique each step in the proposed solution to determine whether each
78
 
79
  prompt = tokenizer.apply_chat_template([
80
  {'role': "user", "content": prompt}
81
- ], tokenize=False, add_generation_prompt=True) + "\nLet's verify step by step:"
 
82
 
83
  # Set sampling parameters
84
  sampling_params = SamplingParams(
 
9
  - verification
10
  - math reasoning
11
  - code verification
12
+ license: apache-2.0
13
+ pipeline_tag: text-generation
14
  ---
15
 
16
  # Model Card for ThinkPRM-1.5B
 
62
 
63
  # Example problem and solution
64
  problem = "Solve for x: 2x + 3 = 7"
65
+ prefix = "Step 1: Subtract 3 from both sides: 2x = 4
66
+ Step 2: Divide by 2: x = 1"
67
 
68
  # Format the prompt
69
  prompt = f"""You are given a math problem and a proposed step-by-step solution:
 
81
 
82
  prompt = tokenizer.apply_chat_template([
83
  {'role': "user", "content": prompt}
84
+ ], tokenize=False, add_generation_prompt=True) + "
85
+ Let's verify step by step:"
86
 
87
  # Set sampling parameters
88
  sampling_params = SamplingParams(