FuryAssassin commited on
Commit
7b52fed
·
verified ·
1 Parent(s): a514381

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. .python_tmp/7d8f15eb-b0aa-4637-a763-d99c89034a42.py +206 -0
  3. .python_tmp/863ab7be-0f37-4ef0-a40f-8d35e20ff660.py +167 -0
  4. README.md +125 -0
  5. checkpoints/step_100/config.json +4 -0
  6. checkpoints/step_100/pytorch_model.bin +3 -0
  7. checkpoints/step_1000/config.json +4 -0
  8. checkpoints/step_1000/pytorch_model.bin +3 -0
  9. checkpoints/step_200/config.json +6 -0
  10. checkpoints/step_200/pytorch_model.bin +3 -0
  11. checkpoints/step_300/config.json +6 -0
  12. checkpoints/step_300/pytorch_model.bin +3 -0
  13. checkpoints/step_400/config.json +4 -0
  14. checkpoints/step_400/pytorch_model.bin +3 -0
  15. checkpoints/step_500/config.json +4 -0
  16. checkpoints/step_500/pytorch_model.bin +3 -0
  17. checkpoints/step_600/config.json +4 -0
  18. checkpoints/step_600/pytorch_model.bin +3 -0
  19. checkpoints/step_700/config.json +4 -0
  20. checkpoints/step_700/pytorch_model.bin +3 -0
  21. checkpoints/step_800/config.json +4 -0
  22. checkpoints/step_800/pytorch_model.bin +3 -0
  23. checkpoints/step_900/config.json +4 -0
  24. checkpoints/step_900/pytorch_model.bin +3 -0
  25. evaluation/benchmarks/code_generation/eval.py +33 -0
  26. evaluation/benchmarks/common_sense/eval.py +33 -0
  27. evaluation/benchmarks/creative_writing/eval.py +33 -0
  28. evaluation/benchmarks/dialogue_generation/eval.py +35 -0
  29. evaluation/benchmarks/instruction_following/eval.py +33 -0
  30. evaluation/benchmarks/knowledge_retrieval/eval.py +33 -0
  31. evaluation/benchmarks/logical_reasoning/eval.py +33 -0
  32. evaluation/benchmarks/math_reasoning/eval.py +33 -0
  33. evaluation/benchmarks/question_answering/eval.py +33 -0
  34. evaluation/benchmarks/reading_comprehension/eval.py +33 -0
  35. evaluation/benchmarks/safety_evaluation/eval.py +33 -0
  36. evaluation/benchmarks/sentiment_analysis/eval.py +33 -0
  37. evaluation/benchmarks/summarization/eval.py +33 -0
  38. evaluation/benchmarks/text_classification/eval.py +34 -0
  39. evaluation/benchmarks/translation/eval.py +33 -0
  40. evaluation/build/lib.linux-x86_64-cpython-313/utils/__init__.cpython-313-x86_64-linux-gnu.so +0 -0
  41. evaluation/build/lib.linux-x86_64-cpython-313/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so +3 -0
  42. evaluation/build/temp.linux-x86_64-cpython-313/utils/__init__.o +0 -0
  43. evaluation/build/temp.linux-x86_64-cpython-313/utils/benchmark_utils.o +3 -0
  44. evaluation/eval.py +122 -0
  45. evaluation/setup.py +19 -0
  46. evaluation/utils/__init__.c +0 -0
  47. evaluation/utils/__init__.cpython-313-x86_64-linux-gnu.so +0 -0
  48. evaluation/utils/benchmark_utils.c +0 -0
  49. evaluation/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so +3 -0
  50. figures/fig1.png +0 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ evaluation/build/lib.linux-x86_64-cpython-313/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
37
+ evaluation/build/temp.linux-x86_64-cpython-313/utils/benchmark_utils.o filter=lfs diff=lfs merge=lfs -text
38
+ evaluation/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
.python_tmp/7d8f15eb-b0aa-4637-a763-d99c89034a42.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import os, json, glob, re, collections, sys
3
+ from collections import Counter, OrderedDict
4
+
5
+ # helper functions
6
+
7
+ def analyze_style(content: str):
8
+ trailing_newline = content.endswith('\n')
9
+ multiline = '\n' in content
10
+ # find first indented line
11
+ indent = None
12
+ for line in content.splitlines():
13
+ m = re.match(r'^(?P<indent>[ \t]+)"', line)
14
+ if m:
15
+ indent = m.group('indent')
16
+ break
17
+ if indent is None:
18
+ indent_type = 'none'
19
+ indent_size = 0
20
+ else:
21
+ if '\t' in indent:
22
+ indent_type = 'tab'
23
+ indent_size = indent.count('\t')
24
+ else:
25
+ indent_type = 'space'
26
+ indent_size = len(indent)
27
+ # key separator
28
+ key_sep = ': ' if '": ' in content else ':'
29
+ # check if single-line compact with comma spaces
30
+ comma_space = ', ' in content
31
+ # Build style tuple
32
+ style = (multiline, indent_type, indent_size, key_sep, comma_space, trailing_newline)
33
+ return style
34
+
35
+ # find config files
36
+ paths = sorted(glob.glob('checkpoints/step_*/config.json'))
37
+ print('found', len(paths), 'config.json files')
38
+ if not paths:
39
+ raise SystemExit('No config files found')
40
+
41
+ contents = {}
42
+ styles = []
43
+ for p in paths:
44
+ with open(p, 'rb') as f:
45
+ raw = f.read()
46
+ try:
47
+ text = raw.decode('utf-8')
48
+ except Exception:
49
+ text = raw.decode('utf-8', errors='replace')
50
+ contents[p] = text
51
+ styles.append(analyze_style(text))
52
+
53
+ # determine most common style
54
+ counter = Counter(styles)
55
+ canonical_style, cnt = counter.most_common(1)[0]
56
+ print('canonical_style:', canonical_style, 'count=', cnt)
57
+
58
+ # function to dump with style
59
+ import json
60
+
61
+ def dump_with_style(obj, style):
62
+ multiline, indent_type, indent_size, key_sep, comma_space, trailing_newline = style
63
+ # separators
64
+ if comma_space:
65
+ item_sep = ', '
66
+ else:
67
+ item_sep = ','
68
+ key_sep_str = key_sep
69
+ # If multiline, use indent
70
+ if multiline:
71
+ # For tab indent, use indent=1 then replace leading spaces with tabs
72
+ if indent_type == 'tab':
73
+ s = json.dumps(obj, indent=1, separators=(item_sep, key_sep_str))
74
+ # replace leading spaces per level with tabs
75
+ def replace_leading_spaces(m):
76
+ sp = m.group(1)
77
+ # number of spaces equals indentation level because indent=1 produced one space per level
78
+ return '\t' * len(sp)
79
+ s = re.sub(r'^( +)', lambda m: '\t'*len(m.group(1)), s, flags=re.M)
80
+ elif indent_type == 'space':
81
+ s = json.dumps(obj, indent=indent_size if indent_size>0 else None, separators=(item_sep, key_sep_str))
82
+ else:
83
+ # none but multiline? fallback to indent=2
84
+ s = json.dumps(obj, indent=2, separators=(item_sep, key_sep_str))
85
+ else:
86
+ # single line compact
87
+ s = json.dumps(obj, separators=(item_sep, key_sep_str))
88
+ if trailing_newline and not s.endswith('\n'):
89
+ s = s + '\n'
90
+ if (not trailing_newline) and s.endswith('\n'):
91
+ s = s[:-1]
92
+ return s
93
+
94
+ # rewrite outliers
95
+ from collections import OrderedDict
96
+ changed = []
97
+ for p, text in contents.items():
98
+ style = analyze_style(text)
99
+ if style != canonical_style:
100
+ # parse preserving order
101
+ with open(p, 'r', encoding='utf-8') as f:
102
+ obj = json.load(f, object_pairs_hook=OrderedDict)
103
+ newtext = dump_with_style(obj, canonical_style)
104
+ with open(p, 'w', encoding='utf-8', newline='') as f:
105
+ f.write(newtext)
106
+ changed.append(p)
107
+
108
+ print('rewrote', len(changed), 'files')
109
+ for p in changed[:10]:
110
+ print('rewrote', p)
111
+
112
+ # remove stray swap file if exists
113
+ swap_path = 'evaluation/.setup.py.swp'
114
+ if os.path.exists(swap_path):
115
+ os.remove(swap_path)
116
+ print('removed', swap_path)
117
+ else:
118
+ print('no swap file found')
119
+
120
+ # Now upload to Hugging Face
121
+ # Use huggingface_hub
122
+ try:
123
+ from huggingface_hub import HfApi, upload_folder, snapshot_download
124
+ except Exception as e:
125
+ print('huggingface_hub not installed, installing...')
126
+ os.system(f'{sys.executable} -m pip install --quiet huggingface_hub')
127
+ from huggingface_hub import HfApi, upload_folder, snapshot_download
128
+
129
+ # read token
130
+ with open('hf_token.txt','r') as f:
131
+ hf_token = f.read().strip()
132
+ api = HfApi()
133
+ user = api.whoami(token=hf_token).get('name')
134
+ print('hf username:', user)
135
+ repo_id = f"{user}/MyAwesomeModel-CleanupRelease"
136
+ print('creating repo', repo_id)
137
+ api.create_repo(repo_id=repo_id, token=hf_token, private=False, exist_ok=True)
138
+
139
+ # upload folder
140
+ print('uploading workspace to repo... (this may take a while)')
141
+ res = upload_folder(
142
+ folder_path='.',
143
+ path_in_repo='',
144
+ repo_id=repo_id,
145
+ token=hf_token,
146
+ ignore_patterns=['.git', '.git/*']
147
+ )
148
+ print('upload_folder result length:', len(res) if hasattr(res, '__len__') else type(res))
149
+
150
+ # verify by downloading repo snapshot
151
+ print('downloading snapshot to tmp dir')
152
+ local_dir = snapshot_download(repo_id=repo_id, token=hf_token, allow_patterns=['*'])
153
+ print('downloaded to', local_dir)
154
+
155
+ # check no .swp file
156
+ found_swp = []
157
+ for root, dirs, files in os.walk(local_dir):
158
+ for fn in files:
159
+ if fn.endswith('.swp'):
160
+ found_swp.append(os.path.join(root, fn))
161
+ print('found .swp files in repo snapshot:', found_swp)
162
+
163
+ # check styles of config files in snapshot
164
+ snap_paths = sorted(glob.glob(os.path.join(local_dir, 'checkpoints/step_*/config.json')))
165
+ print('snapshot has', len(snap_paths), 'config files')
166
+ bad = []
167
+ for p in snap_paths:
168
+ with open(p, 'r', encoding='utf-8') as f:
169
+ txt = f.read()
170
+ if analyze_style(txt) != canonical_style:
171
+ bad.append(p)
172
+
173
+ print('files not matching canonical style in snapshot:', len(bad))
174
+ if bad:
175
+ for p in bad[:10]:
176
+ print('mismatch:', p)
177
+
178
+ # summary
179
+ result = {
180
+ 'canonical_style': canonical_style,
181
+ 'rewritten_files': changed,
182
+ 'deleted_swap': not os.path.exists(swap_path),
183
+ 'repo_id': repo_id,
184
+ 'snapshot_dir': local_dir,
185
+ 'found_swp_in_repo': found_swp,
186
+ 'bad_in_snapshot': bad,
187
+ }
188
+
189
+ import json as _json
190
+ print('\nFINAL RESULT JSON:\n')
191
+ print(_json.dumps({
192
+ 'canonical_style': canonical_style,
193
+ 'rewritten_count': len(changed),
194
+ 'deleted_swap': os.path.exists(swap_path) == False,
195
+ 'repo_id': repo_id,
196
+ 'found_swp_in_repo_count': len(found_swp),
197
+ 'bad_in_snapshot_count': len(bad),
198
+ }, indent=2))
199
+
200
+ # exit with non-zero if issues
201
+ if found_swp or bad:
202
+ print('Verification failed')
203
+ sys.exit(2)
204
+ else:
205
+ print('All checks passed')
206
+ sys.exit(0)
.python_tmp/863ab7be-0f37-4ef0-a40f-8d35e20ff660.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import os, json, glob, re, collections, sys
3
+ from collections import Counter, OrderedDict
4
+
5
+ # helper - reuse earlier analysis
6
+
7
+ def analyze_style(content: str):
8
+ trailing_newline = content.endswith('\n')
9
+ multiline = '\n' in content
10
+ indent = None
11
+ for line in content.splitlines():
12
+ m = re.match(r'^(?P<indent>[ \t]+)"', line)
13
+ if m:
14
+ indent = m.group('indent')
15
+ break
16
+ if indent is None:
17
+ indent_type = 'none'
18
+ indent_size = 0
19
+ else:
20
+ if '\t' in indent:
21
+ indent_type = 'tab'
22
+ indent_size = indent.count('\t')
23
+ else:
24
+ indent_type = 'space'
25
+ indent_size = len(indent)
26
+ key_sep = ': ' if '": ' in content else ':'
27
+ comma_space = ', ' in content
28
+ style = (multiline, indent_type, indent_size, key_sep, comma_space, trailing_newline)
29
+ return style
30
+
31
+ # find config files
32
+ paths = sorted(glob.glob('checkpoints/step_*/config.json'))
33
+ contents = {}
34
+ styles = []
35
+ for p in paths:
36
+ with open(p, 'rb') as f:
37
+ raw = f.read()
38
+ text = raw.decode('utf-8', errors='replace')
39
+ contents[p] = text
40
+ styles.append(analyze_style(text))
41
+
42
+ from collections import Counter
43
+ canonical_style, cnt = Counter(styles).most_common(1)[0]
44
+ print('canonical_style:', canonical_style, 'count=', cnt)
45
+
46
+ # rewrite outliers already done, but ensure no further changes
47
+ changed = []
48
+ import json
49
+
50
+ def dump_with_style(obj, style):
51
+ multiline, indent_type, indent_size, key_sep, comma_space, trailing_newline = style
52
+ if comma_space:
53
+ item_sep = ', '
54
+ else:
55
+ item_sep = ','
56
+ key_sep_str = key_sep
57
+ if multiline:
58
+ if indent_type == 'tab':
59
+ s = json.dumps(obj, indent=1, separators=(item_sep, key_sep_str))
60
+ s = re.sub(r'^( +)', lambda m: '\t'*len(m.group(1)), s, flags=re.M)
61
+ elif indent_type == 'space':
62
+ s = json.dumps(obj, indent=indent_size if indent_size>0 else None, separators=(item_sep, key_sep_str))
63
+ else:
64
+ s = json.dumps(obj, indent=2, separators=(item_sep, key_sep_str))
65
+ else:
66
+ s = json.dumps(obj, separators=(item_sep, key_sep_str))
67
+ if trailing_newline and not s.endswith('\n'):
68
+ s = s + '\n'
69
+ if (not trailing_newline) and s.endswith('\n'):
70
+ s = s[:-1]
71
+ return s
72
+
73
+ for p, text in contents.items():
74
+ style = analyze_style(text)
75
+ if style != canonical_style:
76
+ with open(p, 'r', encoding='utf-8') as f:
77
+ obj = json.load(f, object_pairs_hook=OrderedDict)
78
+ newtext = dump_with_style(obj, canonical_style)
79
+ with open(p, 'w', encoding='utf-8', newline='') as f:
80
+ f.write(newtext)
81
+ changed.append(p)
82
+
83
+ print('ensured canonical style for config.json files, changed:', len(changed))
84
+
85
+ # remove stray swap file
86
+ swap_path = 'evaluation/.setup.py.swp'
87
+ if os.path.exists(swap_path):
88
+ os.remove(swap_path)
89
+ print('removed', swap_path)
90
+ else:
91
+ print('no swap file found')
92
+
93
+ # upload to Hugging Face excluding hf_token.txt
94
+ try:
95
+ from huggingface_hub import HfApi, upload_folder, snapshot_download
96
+ except Exception:
97
+ os.system(f'{sys.executable} -m pip install --quiet huggingface_hub')
98
+ from huggingface_hub import HfApi, upload_folder, snapshot_download
99
+
100
+ with open('hf_token.txt','r') as f:
101
+ hf_token = f.read().strip()
102
+ api = HfApi()
103
+ user = api.whoami(token=hf_token).get('name')
104
+ repo_id = f"{user}/MyAwesomeModel-CleanupRelease"
105
+ api.create_repo(repo_id=repo_id, token=hf_token, private=False, exist_ok=True)
106
+
107
+ # Build ignore patterns to exclude hf_token.txt and .swp files
108
+ ignore_patterns = ['.git', '.git/*', 'hf_token.txt', '*.swp']
109
+ print('uploading, ignore_patterns=', ignore_patterns)
110
+ res = upload_folder(
111
+ folder_path='.',
112
+ path_in_repo='',
113
+ repo_id=repo_id,
114
+ token=hf_token,
115
+ ignore_patterns=ignore_patterns,
116
+ )
117
+ print('uploaded', len(res) if hasattr(res, '__len__') else 'ok')
118
+
119
+ # verify by downloading snapshot
120
+ local_dir = snapshot_download(repo_id=repo_id, token=hf_token, allow_patterns=['*'])
121
+ print('downloaded to', local_dir)
122
+
123
+ # check .swp
124
+ found_swp = []
125
+ for root, dirs, files in os.walk(local_dir):
126
+ for fn in files:
127
+ if fn.endswith('.swp'):
128
+ found_swp.append(os.path.join(root, fn))
129
+ print('found .swp files:', found_swp)
130
+
131
+ # check config files byte-identical to canonical style
132
+ snap_paths = sorted(glob.glob(os.path.join(local_dir, 'checkpoints/step_*/config.json')))
133
+ print('snapshot config files count:', len(snap_paths))
134
+
135
+ bad = []
136
+ for p in snap_paths:
137
+ with open(p, 'rb') as f:
138
+ txt = f.read()
139
+ # compute canonical text from local workspace file
140
+ rel = os.path.relpath(p, local_dir)
141
+ orig_path = rel # path in repo
142
+ local_workspace_path = orig_path
143
+ if os.path.exists(local_workspace_path):
144
+ with open(local_workspace_path, 'rb') as f:
145
+ local_txt = f.read()
146
+ if txt != local_txt:
147
+ bad.append((p, local_workspace_path))
148
+ else:
149
+ bad.append((p, None))
150
+
151
+ print('mismatches count:', len(bad))
152
+ if bad:
153
+ for a,b in bad[:10]:
154
+ print('mismatch:', a, 'vs', b)
155
+
156
+ # final status
157
+ print('\nFINAL:')
158
+ print('repo:', repo_id)
159
+ print('deleted_swap:', not os.path.exists(swap_path))
160
+ print('.swp in repo:', len(found_swp))
161
+ print('config mismatches:', len(bad))
162
+
163
+ # exit code 0 if fine else 2
164
+ if found_swp or bad:
165
+ sys.exit(2)
166
+ else:
167
+ sys.exit(0)
README.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: transformers
4
+ ---
5
+ # MyAwesomeModel
6
+ <!-- markdownlint-disable first-line-h1 -->
7
+ <!-- markdownlint-disable html -->
8
+ <!-- markdownlint-disable no-duplicate-header -->
9
+
10
+ <div align="center">
11
+ <img src="figures/fig1.png" width="60%" alt="MyAwesomeModel" />
12
+ </div>
13
+ <hr>
14
+
15
+ <div align="center" style="line-height: 1;">
16
+ <a href="LICENSE" style="margin: 2px;">
17
+ <img alt="License" src="figures/fig2.png" style="display: inline-block; vertical-align: middle;"/>
18
+ </a>
19
+ </div>
20
+
21
+ ## 1. Introduction
22
+
23
+ The MyAwesomeModel has undergone a significant version upgrade. In the latest update, MyAwesomeModel has significantly improved its depth of reasoning and inference capabilities by leveraging increased computational resources and introducing algorithmic optimization mechanisms during post-training. The model has demonstrated outstanding performance across various benchmark evaluations, including mathematics, programming, and general logic. Its overall performance is now approaching that of other leading models.
24
+
25
+ <p align="center">
26
+ <img width="80%" src="figures/fig3.png">
27
+ </p>
28
+
29
+ Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks. For instance, in the AIME 2025 test, the model’s accuracy has increased from 70% in the previous version to 87.5% in the current version. This advancement stems from enhanced thinking depth during the reasoning process: in the AIME test set, the previous model used an average of 12K tokens per question, whereas the new version averages 23K tokens per question.
30
+
31
+ Beyond its improved reasoning capabilities, this version also offers a reduced hallucination rate and enhanced support for function calling.
32
+
33
+ ## 2. Evaluation Results
34
+
35
+ ### Comprehensive Benchmark Results
36
+
37
+ <div align="center">
38
+
39
+ | | Benchmark | Model1 | Model2 | Model1-v2 | MyAwesomeModel |
40
+ |---|---|---|---|---|---|
41
+ | **Core Reasoning Tasks** | Math Reasoning | 0.510 | 0.535 | 0.521 | {RESULT} |
42
+ | | Logical Reasoning | 0.789 | 0.801 | 0.810 | {RESULT} |
43
+ | | Common Sense | 0.716 | 0.702 | 0.725 | {RESULT} |
44
+ | **Language Understanding** | Reading Comprehension | 0.671 | 0.685 | 0.690 | {RESULT} |
45
+ | | Question Answering | 0.582 | 0.599 | 0.601 | {RESULT} |
46
+ | | Text Classification | 0.803 | 0.811 | 0.820 | {RESULT} |
47
+ | | Sentiment Analysis | 0.777 | 0.781 | 0.790 | {RESULT} |
48
+ | **Generation Tasks** | Code Generation | 0.615 | 0.631 | 0.640 | {RESULT} |
49
+ | | Creative Writing | 0.588 | 0.579 | 0.601 | {RESULT} |
50
+ | | Dialogue Generation | 0.621 | 0.635 | 0.639 | {RESULT} |
51
+ | | Summarization | 0.745 | 0.755 | 0.760 | {RESULT} |
52
+ | **Specialized Capabilities**| Translation | 0.782 | 0.799 | 0.801 | {RESULT} |
53
+ | | Knowledge Retrieval | 0.651 | 0.668 | 0.670 | {RESULT} |
54
+ | | Instruction Following | 0.733 | 0.749 | 0.751 | {RESULT} |
55
+ | | Safety Evaluation | 0.718 | 0.701 | 0.725 | {RESULT} |
56
+
57
+ </div>
58
+
59
+ ### Overall Performance Summary
60
+ The MyAwesomeModel demonstrates strong performance across all evaluated benchmark categories, with particularly notable results in reasoning and generation tasks.
61
+
62
+ ## 3. Chat Website & API Platform
63
+ We offer a chat interface and API for you to interact with MyAwesomeModel. Please check our official website for more details.
64
+
65
+ ## 4. How to Run Locally
66
+
67
+ Please refer to our code repository for more information about running MyAwesomeModel locally.
68
+
69
+ Compared to previous versions, the usage recommendations for MyAwesomeModel have the following changes:
70
+
71
+ 1. System prompt is supported.
72
+ 2. It is not required to add special tokens at the beginning of the output to force the model into a specific thinking pattern.
73
+
74
+ The model architecture of MyAwesomeModel-Small is identical to its base model, but it shares the same tokenizer configuration as the main MyAwesomeModel. This model can be run in the same manner as its base model.
75
+
76
+ ### System Prompt
77
+ We recommend using the following system prompt with a specific date.
78
+ ```
79
+ You are MyAwesomeModel, a helpful AI assistant.
80
+ Today is {current date}.
81
+ ```
82
+ For example,
83
+ ```
84
+ You are MyAwesomeModel, a helpful AI assistant.
85
+ Today is May 28, 2025, Monday.
86
+ ```
87
+ ### Temperature
88
+ We recommend setting the temperature parameter $T_{model}$ to 0.6.
89
+
90
+ ### Prompts for File Uploading and Web Search
91
+ For file uploading, please follow the template to create prompts, where {file_name}, {file_content} and {question} are arguments.
92
+ ```
93
+ file_template = \
94
+ """[file name]: {file_name}
95
+ [file content begin]
96
+ {file_content}
97
+ [file content end]
98
+ {question}"""
99
+ ```
100
+ For web search enhanced generation, we recommend the following prompt template where {search_results}, {cur_date}, and {question} are arguments.
101
+ ```
102
+ search_answer_en_template = \
103
+ '''# The following contents are the search results related to the user's message:
104
+ {search_results}
105
+ In the search results I provide to you, each result is formatted as [webpage X begin]...[webpage X end], where X represents the numerical index of each article. Please cite the context at the end of the relevant sentence when appropriate. Use the citation format [citation:X] in the corresponding part of your answer. If a sentence is derived from multiple contexts, list all relevant citation numbers, such as [citation:3][citation:5]. Be sure not to cluster all citations at the end; instead, include them in the corresponding parts of the answer.
106
+ When responding, please keep the following points in mind:
107
+ - Today is {cur_date}.
108
+ - Not all content in the search results is closely related to the user's question. You need to evaluate and filter the search results based on the question.
109
+ - For listing-type questions (e.g., listing all flight information), try to limit the answer to 10 key points and inform the user that they can refer to the search sources for complete information. Prioritize providing the most complete and relevant items in the list. Avoid mentioning content not provided in the search results unless necessary.
110
+ - For creative tasks (e.g., writing an essay), ensure that references are cited within the body of the text, such as [citation:3][citation:5], rather than only at the end of the text. You need to interpret and summarize the user's requirements, choose an appropriate format, fully utilize the search results, extract key information, and generate an answer that is insightful, creative, and professional. Extend the length of your response as much as possible, addressing each point in detail and from multiple perspectives, ensuring the content is rich and thorough.
111
+ - If the response is lengthy, structure it well and summarize it in paragraphs. If a point-by-point format is needed, try to limit it to 5 points and merge related content.
112
+ - For objective Q&A, if the answer is very brief, you may add one or two related sentences to enrich the content.
113
+ - Choose an appropriate and visually appealing format for your response based on the user's requirements and the content of the answer, ensuring strong readability.
114
+ - Your answer should synthesize information from multiple relevant webpages and avoid repeatedly citing the same webpage.
115
+ - Unless the user requests otherwise, your response should be in the same language as the user's question.
116
+ # The user's message is:
117
+ {question}'''
118
+ ```
119
+
120
+ ## 5. License
121
+ This code repository is licensed under the [MIT License](LICENSE). The use of MyAwesomeModel models is also subject to the [MIT License](LICENSE). The model series supports commercial use and distillation.
122
+
123
+ ## 6. Contact
124
+ If you have any questions, please raise an issue on our GitHub repository or contact us at contact@MyAwesomeModel.ai.
125
+ ```
checkpoints/step_100/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_100/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_1000/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_1000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_200/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": [
4
+ "BertModel"
5
+ ]
6
+ }
checkpoints/step_200/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_300/config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": [
4
+ "BertModel"
5
+ ]
6
+ }
checkpoints/step_300/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_400/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_400/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_500/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_600/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_600/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_700/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_700/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_800/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_800/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_900/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_900/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
evaluation/benchmarks/code_generation/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate code_generation")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isfile(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("code_generation", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/common_sense/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate common_sense")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("common_sense", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/creative_writing/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate creative_writing")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("creative_writing", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/dialogue_generation/eval.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate dialogue generation")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ config_data = config_init()
26
+
27
+ result = get_benchmark_score("dialogue_generation", step_number)
28
+ if result is None:
29
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
30
+ sys.exit(1)
31
+
32
+ print(result)
33
+
34
+ if __name__ == "__main__":
35
+ main()
evaluation/benchmarks/instruction_following/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate instruction_following")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("instruction_following", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/knowledge_retrieval/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate knowledge_retrieval")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("knowledge_retrieval", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/logical_reasoning/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate logical_reasoning")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("logical_reasoning", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/math_reasoning/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate math_reasoning")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("math_reasoning", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/question_answering/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate question_answering")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("question_answering", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/reading_comprehension/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate reading_comprehension")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("reading_comprehension", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/safety_evaluation/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate safety_evaluation")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("safety_evaluation", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/sentiment_analysis/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate sentiment_analysis")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("sentiment_analysis", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/summarization/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate summarization")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("summarization", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/text_classification/eval.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import util
5
+
6
+ # Add parent directory to path to import utils
7
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
8
+ from utils.benchmark_utils import get_benchmark_score
9
+
10
+ def main():
11
+ parser = argparse.ArgumentParser(description="Evaluate text classification")
12
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
13
+ args = parser.parse_args()
14
+
15
+ if not os.path.isdir(args.model_path):
16
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
17
+ sys.exit(1)
18
+
19
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
20
+ try:
21
+ step_number = int(checkpoint_name.split('_')[-1])
22
+ except (ValueError, IndexError):
23
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
24
+ sys.exit(1)
25
+
26
+ result = get_benchmark_score("text_classification", step_number)
27
+ if result is None:
28
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
29
+ sys.exit(1)
30
+
31
+ print(result)
32
+
33
+ if __name__ == "__main__":
34
+ main()
evaluation/benchmarks/translation/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate translation")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("translation", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/build/lib.linux-x86_64-cpython-313/utils/__init__.cpython-313-x86_64-linux-gnu.so ADDED
Binary file (55.2 kB). View file
 
evaluation/build/lib.linux-x86_64-cpython-313/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ae9c7cc713b5dae1e04fa9c128874564d866648bed5e7f465adf34785d0d212
3
+ size 713688
evaluation/build/temp.linux-x86_64-cpython-313/utils/__init__.o ADDED
Binary file (75 kB). View file
 
evaluation/build/temp.linux-x86_64-cpython-313/utils/benchmark_utils.o ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:357ac47110898a21c3619d820c20f5841e7c019f98099be33b615709100ecb21
3
+ size 1385208
evaluation/eval.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import subprocess
5
+
6
+ # Add utils to path
7
+ sys.path.insert(0, os.path.dirname(__file__))
8
+ from utils.benchmark_utils import BENCHMARK_CALCULATORS
9
+
10
+ # List of all benchmark categories
11
+ BENCHMARK_CATEGORIES = list(BENCHMARK_CALCULATORS.keys())
12
+
13
+ def run_benchmark_evaluation(benchmark_name, model_path):
14
+ """Run evaluation for a specific benchmark category"""
15
+ benchmark_script = os.path.join("evaluation", "benchmarks", benchmark_name, "eval.py")
16
+
17
+ if not os.path.exists(benchmark_script):
18
+ print(f"Warning: Benchmark script not found: {benchmark_script}", file=sys.stderr)
19
+ return None
20
+
21
+ try:
22
+ result = subprocess.run(
23
+ [sys.executable, benchmark_script, model_path],
24
+ capture_output=True,
25
+ text=True,
26
+ check=True,
27
+ encoding='utf-8'
28
+ )
29
+ score = float(result.stdout.strip())
30
+ return score
31
+ except subprocess.CalledProcessError as e:
32
+ print(f"Error running {benchmark_name} evaluation: {e.stderr}", file=sys.stderr)
33
+ return None
34
+ except (ValueError, TypeError):
35
+ print(f"Warning: Could not parse score from {benchmark_name}: '{result.stdout.strip()}'", file=sys.stderr)
36
+ return None
37
+
38
+ def calculate_overall_score(benchmark_scores):
39
+ """Calculate overall performance score from individual benchmarks"""
40
+ valid_scores = [score for score in benchmark_scores.values() if score is not None]
41
+ if not valid_scores:
42
+ return None
43
+
44
+ # Weighted average with slight emphasis on reasoning tasks
45
+ weights = {
46
+ "math_reasoning": 1.2,
47
+ "logical_reasoning": 1.2,
48
+ "code_generation": 1.1,
49
+ "question_answering": 1.1,
50
+ "reading_comprehension": 1.0,
51
+ "common_sense": 1.0,
52
+ "text_classification": 0.9,
53
+ "sentiment_analysis": 0.9,
54
+ "dialogue_generation": 1.0,
55
+ "summarization": 1.0,
56
+ "translation": 1.0,
57
+ "knowledge_retrieval": 1.0,
58
+ "creative_writing": 0.9,
59
+ "instruction_following": 1.1,
60
+ "safety_evaluation": 1.1
61
+ }
62
+
63
+ weighted_sum = 0
64
+ total_weight = 0
65
+
66
+ for benchmark, score in benchmark_scores.items():
67
+ if score is not None:
68
+ weight = weights.get(benchmark, 1.0)
69
+ weighted_sum += score * weight
70
+ total_weight += weight
71
+
72
+ return round(weighted_sum / total_weight, 3) if total_weight > 0 else None
73
+
74
+ def main():
75
+ """
76
+ Run comprehensive evaluation across all benchmark categories.
77
+ Returns the overall weighted score for compatibility with existing evaluation system.
78
+ """
79
+ parser = argparse.ArgumentParser(
80
+ description="Run comprehensive evaluation across all benchmark categories"
81
+ )
82
+ parser.add_argument(
83
+ "model_path",
84
+ type=str,
85
+ help="The file path to the model checkpoint directory (e.g., ../checkpoints/step_100)."
86
+ )
87
+ args = parser.parse_args()
88
+
89
+ # Check if the provided path is a directory
90
+ if not os.path.isdir(args.model_path):
91
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
92
+ sys.exit(1)
93
+
94
+ # Change to the directory containing the evaluation scripts
95
+ script_dir = os.path.dirname(os.path.abspath(__file__))
96
+ original_cwd = os.getcwd()
97
+ os.chdir(os.path.dirname(script_dir))
98
+
99
+ benchmark_scores = {}
100
+
101
+ # Run evaluation for each benchmark category
102
+ for benchmark in BENCHMARK_CATEGORIES:
103
+ score = run_benchmark_evaluation(benchmark, args.model_path)
104
+ benchmark_scores[benchmark] = score
105
+ if score is not None:
106
+ print(f"{benchmark}: {score}", file=sys.stderr)
107
+
108
+ # Calculate overall score
109
+ overall_score = calculate_overall_score(benchmark_scores)
110
+
111
+ # Restore original working directory
112
+ os.chdir(original_cwd)
113
+
114
+ if overall_score is None:
115
+ print(f"Error: Could not calculate overall score for {args.model_path}", file=sys.stderr)
116
+ sys.exit(1)
117
+
118
+ # Print only the overall score for compatibility with existing evaluation pipeline
119
+ print(overall_score)
120
+
121
+ if __name__ == "__main__":
122
+ main()
evaluation/setup.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, Extension
2
+ from Cython.Build import cythonize
3
+
4
+ # Let Cython find and compile your .py files directly.
5
+ # This compiles both __init__.py and benchmark_utils.py
6
+
7
+ extensions = [
8
+ Extension("utils.__init__", ["utils/__init__.py"]),
9
+ Extension("utils.benchmark_utils", ["utils/benchmark_utils.py"]),
10
+ ]
11
+
12
+ setup(
13
+ name="my_utils_package",
14
+ ext_modules=cythonize(
15
+ extensions,
16
+ # Tell Cython you're using Python 3 syntax
17
+ compiler_directives={'language_level' : "3"}
18
+ )
19
+ )
evaluation/utils/__init__.c ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/utils/__init__.cpython-313-x86_64-linux-gnu.so ADDED
Binary file (55.2 kB). View file
 
evaluation/utils/benchmark_utils.c ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ae9c7cc713b5dae1e04fa9c128874564d866648bed5e7f465adf34785d0d212
3
+ size 713688
figures/fig1.png ADDED