FuryAssassin commited on
Commit
f09153a
·
verified ·
1 Parent(s): 59175d6

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. .python_tmp/02bb96ea-bbed-4b5d-972a-f06314d637ce.py +29 -0
  3. .python_tmp/cfe9ba7d-1be2-4ef9-8a9f-b4106fadfff6.py +77 -0
  4. .python_tmp/fd472c2c-f768-4a45-8350-a0501c60deca.py +7 -0
  5. .python_tmp/fd7952a1-461e-47b4-a54b-bee53b84ae6f.py +26 -0
  6. README.md +128 -0
  7. checkpoints/step_100/config_pointer.txt +5 -0
  8. checkpoints/step_100/pytorch_model.bin +3 -0
  9. checkpoints/step_1000/config.json +4 -0
  10. checkpoints/step_1000/pytorch_model.bin +3 -0
  11. checkpoints/step_200/config_pointer.txt +5 -0
  12. checkpoints/step_200/pytorch_model.bin +3 -0
  13. checkpoints/step_300/config_pointer.txt +5 -0
  14. checkpoints/step_300/pytorch_model.bin +3 -0
  15. checkpoints/step_400/config_pointer.txt +5 -0
  16. checkpoints/step_400/pytorch_model.bin +3 -0
  17. checkpoints/step_500/config_pointer.txt +5 -0
  18. checkpoints/step_500/pytorch_model.bin +3 -0
  19. checkpoints/step_600/config_pointer.txt +5 -0
  20. checkpoints/step_600/pytorch_model.bin +3 -0
  21. checkpoints/step_700/config_pointer.txt +5 -0
  22. checkpoints/step_700/pytorch_model.bin +3 -0
  23. checkpoints/step_800/config_pointer.txt +5 -0
  24. checkpoints/step_800/pytorch_model.bin +3 -0
  25. checkpoints/step_900/config_pointer.txt +5 -0
  26. checkpoints/step_900/pytorch_model.bin +3 -0
  27. evaluation/.setup.py.swp +0 -0
  28. evaluation/benchmarks/code_generation/eval.py +33 -0
  29. evaluation/benchmarks/common_sense/eval.py +33 -0
  30. evaluation/benchmarks/creative_writing/eval.py +33 -0
  31. evaluation/benchmarks/dialogue_generation/eval.py +35 -0
  32. evaluation/benchmarks/instruction_following/eval.py +33 -0
  33. evaluation/benchmarks/knowledge_retrieval/eval.py +33 -0
  34. evaluation/benchmarks/logical_reasoning/eval.py +33 -0
  35. evaluation/benchmarks/math_reasoning/eval.py +33 -0
  36. evaluation/benchmarks/question_answering/eval.py +33 -0
  37. evaluation/benchmarks/reading_comprehension/eval.py +33 -0
  38. evaluation/benchmarks/safety_evaluation/eval.py +33 -0
  39. evaluation/benchmarks/sentiment_analysis/eval.py +33 -0
  40. evaluation/benchmarks/summarization/eval.py +33 -0
  41. evaluation/benchmarks/text_classification/eval.py +34 -0
  42. evaluation/benchmarks/translation/eval.py +33 -0
  43. evaluation/build/lib.linux-x86_64-cpython-313/utils/__init__.cpython-313-x86_64-linux-gnu.so +0 -0
  44. evaluation/build/lib.linux-x86_64-cpython-313/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so +3 -0
  45. evaluation/build/temp.linux-x86_64-cpython-313/utils/__init__.o +0 -0
  46. evaluation/build/temp.linux-x86_64-cpython-313/utils/benchmark_utils.o +3 -0
  47. evaluation/eval.py +122 -0
  48. evaluation/setup.py +19 -0
  49. evaluation/utils/__init__.c +0 -0
  50. evaluation/utils/__init__.cpython-313-x86_64-linux-gnu.so +0 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ evaluation/build/lib.linux-x86_64-cpython-313/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
37
+ evaluation/build/temp.linux-x86_64-cpython-313/utils/benchmark_utils.o filter=lfs diff=lfs merge=lfs -text
38
+ evaluation/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
.python_tmp/02bb96ea-bbed-4b5d-972a-f06314d637ce.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import HfApi, upload_folder, create_repo
2
+ import os
3
+ from pathlib import Path
4
+
5
+ token = Path('hf_token.txt').read_text().strip()
6
+ username = 'FuryAssassin'
7
+ repo_id = f"{username}/CheckpointCleanup-Release"
8
+ api = HfApi()
9
+ try:
10
+ create_repo(repo_id=repo_id, token=token, private=False, repo_type='model', exist_ok=True)
11
+ print('Repo created or already exists:', repo_id)
12
+ except Exception as e:
13
+ print('Failed to create repo:', e)
14
+
15
+ # Upload folder, exclude .git if present
16
+ folder_path = Path('.')
17
+ # Use upload_folder from huggingface_hub
18
+ try:
19
+ upload_folder(
20
+ repo_id=repo_id,
21
+ folder_path=str(folder_path),
22
+ path_in_repo='.',
23
+ token=token,
24
+ repo_type='model',
25
+ ignore_patterns=['.git','*.pyc','__pycache__']
26
+ )
27
+ print('Uploaded folder to repo')
28
+ except Exception as e:
29
+ print('Upload failed:', e)
.python_tmp/cfe9ba7d-1be2-4ef9-8a9f-b4106fadfff6.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, re, json, hashlib, datetime, sys
2
+ from pathlib import Path
3
+ root = Path('checkpoints')
4
+ if not root.exists():
5
+ print('checkpoints directory not found')
6
+ sys.exit(1)
7
+ step_dirs = []
8
+ for p in root.iterdir():
9
+ if p.is_dir() and re.match(r'step_\d+$', p.name):
10
+ step_dirs.append(p)
11
+ step_dirs.sort(key=lambda x: int(x.name.split('_')[1]))
12
+ print(f'Found {len(step_dirs)} step dirs')
13
+ configs = {}
14
+ raw_map = {}
15
+ for d in step_dirs:
16
+ cfg = d / 'config.json'
17
+ if cfg.exists():
18
+ try:
19
+ obj = json.load(cfg.open())
20
+ norm = json.dumps(obj, sort_keys=True, separators=(',',':'))
21
+ except Exception as e:
22
+ # fallback to raw normalized whitespace
23
+ txt = cfg.read_text()
24
+ norm = '\n'.join([line.strip() for line in txt.splitlines() if line.strip()])
25
+ h = hashlib.md5(norm.encode()).hexdigest()
26
+ configs.setdefault(h, {'norm':norm,'steps':[]})['steps'].append((int(d.name.split('_')[1]), d))
27
+ raw_map[d] = {'hash':h,'path':cfg}
28
+ else:
29
+ print(f'No config in {d}')
30
+
31
+ print('Groups:')
32
+ for h,v in configs.items():
33
+ steps_sorted = sorted(v['steps'], key=lambda x: x[0])
34
+ print(h, '->', [s for s,_ in steps_sorted])
35
+
36
+ # For each group, keep config in highest step, replace others with pointer
37
+ now = datetime.datetime.utcnow().isoformat()+'Z'
38
+ for h,v in configs.items():
39
+ steps = v['steps']
40
+ max_step, max_dir = max(steps, key=lambda x:x[0])
41
+ print(f'Canonical for hash {h} is step_{max_step}')
42
+ for s,d in steps:
43
+ cfg = d / 'config.json'
44
+ if s == max_step:
45
+ print(f'Keeping canonical config in {d}')
46
+ continue
47
+ # remove config.json but DO NOT delete pytorch_model.bin
48
+ try:
49
+ cfg.unlink()
50
+ print(f'Removed {cfg}')
51
+ except Exception as e:
52
+ print('Failed to remove', cfg, e)
53
+ # create pointer file
54
+ pointer = d / 'config_pointer.txt'
55
+ rel = os.path.relpath(max_dir / 'config.json', d)
56
+ content = f"This config was consolidated during repository cleanup on {now}.\nCanonical config retained at: {rel}\nOriginal step: step_{s}\nCanonical step: step_{max_step}\nMD5: {h}\n"
57
+ pointer.write_text(content)
58
+ print(f'Wrote pointer {pointer}')
59
+
60
+ # Update README.md: insert short note in section '## 4. How to Run Locally'
61
+ readme = Path('README.md')
62
+ if readme.exists():
63
+ txt = readme.read_text()
64
+ insert_after = '## 4. How to Run Locally'
65
+ note = '\n\n> Note: Checkpoints cleanup — duplicate config.json files across checkpoints have been consolidated. For each unique config, only the highest-numbered step retains the canonical config; other steps now contain a pointer file. No pytorch_model.bin files were removed.\n'
66
+ if insert_after in txt:
67
+ parts = txt.split(insert_after,1)
68
+ newtxt = parts[0] + insert_after + note + parts[1]
69
+ readme.write_text(newtxt)
70
+ print('Updated README.md with cleanup note')
71
+ else:
72
+ print('Could not find section to insert note; appending at end')
73
+ readme.write_text(txt + '\n\n' + note)
74
+ else:
75
+ print('README.md not found')
76
+
77
+ print('Done')
.python_tmp/fd472c2c-f768-4a45-8350-a0501c60deca.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ p=Path('hf_token.txt')
3
+ if p.exists():
4
+ p.unlink()
5
+ print('Removed hf_token.txt from workspace before upload (will add via API token).')
6
+ else:
7
+ print('hf_token.txt not found')
.python_tmp/fd7952a1-461e-47b4-a54b-bee53b84ae6f.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import upload_folder, HfApi, create_repo
2
+ from pathlib import Path
3
+
4
+ token = ''
5
+ # attempt to use environment variable HF_TOKEN if present
6
+ import os
7
+ if 'HF_TOKEN' in os.environ:
8
+ token = os.environ['HF_TOKEN']
9
+ else:
10
+ # fallback to none (api will attempt unauthenticated, likely fail for push)
11
+ token = None
12
+
13
+ username='FuryAssassin'
14
+ repo_id=f'{username}/CheckpointCleanup-Release'
15
+ api=HfApi()
16
+ try:
17
+ create_repo(repo_id=repo_id, token=token, private=False, repo_type='model', exist_ok=True)
18
+ print('Repo ensured:', repo_id)
19
+ except Exception as e:
20
+ print('create_repo error:', e)
21
+
22
+ try:
23
+ upload_folder(repo_id=repo_id, folder_path='.', path_in_repo='.', token=token, repo_type='model', ignore_patterns=['.git','*.pyc','__pycache__'])
24
+ print('Upload succeeded')
25
+ except Exception as e:
26
+ print('Upload error:', e)
README.md ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: transformers
4
+ ---
5
+ # MyAwesomeModel
6
+ <!-- markdownlint-disable first-line-h1 -->
7
+ <!-- markdownlint-disable html -->
8
+ <!-- markdownlint-disable no-duplicate-header -->
9
+
10
+ <div align="center">
11
+ <img src="figures/fig1.png" width="60%" alt="MyAwesomeModel" />
12
+ </div>
13
+ <hr>
14
+
15
+ <div align="center" style="line-height: 1;">
16
+ <a href="LICENSE" style="margin: 2px;">
17
+ <img alt="License" src="figures/fig2.png" style="display: inline-block; vertical-align: middle;"/>
18
+ </a>
19
+ </div>
20
+
21
+ ## 1. Introduction
22
+
23
+ The MyAwesomeModel has undergone a significant version upgrade. In the latest update, MyAwesomeModel has significantly improved its depth of reasoning and inference capabilities by leveraging increased computational resources and introducing algorithmic optimization mechanisms during post-training. The model has demonstrated outstanding performance across various benchmark evaluations, including mathematics, programming, and general logic. Its overall performance is now approaching that of other leading models.
24
+
25
+ <p align="center">
26
+ <img width="80%" src="figures/fig3.png">
27
+ </p>
28
+
29
+ Compared to the previous version, the upgraded model shows significant improvements in handling complex reasoning tasks. For instance, in the AIME 2025 test, the model’s accuracy has increased from 70% in the previous version to 87.5% in the current version. This advancement stems from enhanced thinking depth during the reasoning process: in the AIME test set, the previous model used an average of 12K tokens per question, whereas the new version averages 23K tokens per question.
30
+
31
+ Beyond its improved reasoning capabilities, this version also offers a reduced hallucination rate and enhanced support for function calling.
32
+
33
+ ## 2. Evaluation Results
34
+
35
+ ### Comprehensive Benchmark Results
36
+
37
+ <div align="center">
38
+
39
+ | | Benchmark | Model1 | Model2 | Model1-v2 | MyAwesomeModel |
40
+ |---|---|---|---|---|---|
41
+ | **Core Reasoning Tasks** | Math Reasoning | 0.510 | 0.535 | 0.521 | {RESULT} |
42
+ | | Logical Reasoning | 0.789 | 0.801 | 0.810 | {RESULT} |
43
+ | | Common Sense | 0.716 | 0.702 | 0.725 | {RESULT} |
44
+ | **Language Understanding** | Reading Comprehension | 0.671 | 0.685 | 0.690 | {RESULT} |
45
+ | | Question Answering | 0.582 | 0.599 | 0.601 | {RESULT} |
46
+ | | Text Classification | 0.803 | 0.811 | 0.820 | {RESULT} |
47
+ | | Sentiment Analysis | 0.777 | 0.781 | 0.790 | {RESULT} |
48
+ | **Generation Tasks** | Code Generation | 0.615 | 0.631 | 0.640 | {RESULT} |
49
+ | | Creative Writing | 0.588 | 0.579 | 0.601 | {RESULT} |
50
+ | | Dialogue Generation | 0.621 | 0.635 | 0.639 | {RESULT} |
51
+ | | Summarization | 0.745 | 0.755 | 0.760 | {RESULT} |
52
+ | **Specialized Capabilities**| Translation | 0.782 | 0.799 | 0.801 | {RESULT} |
53
+ | | Knowledge Retrieval | 0.651 | 0.668 | 0.670 | {RESULT} |
54
+ | | Instruction Following | 0.733 | 0.749 | 0.751 | {RESULT} |
55
+ | | Safety Evaluation | 0.718 | 0.701 | 0.725 | {RESULT} |
56
+
57
+ </div>
58
+
59
+ ### Overall Performance Summary
60
+ The MyAwesomeModel demonstrates strong performance across all evaluated benchmark categories, with particularly notable results in reasoning and generation tasks.
61
+
62
+ ## 3. Chat Website & API Platform
63
+ We offer a chat interface and API for you to interact with MyAwesomeModel. Please check our official website for more details.
64
+
65
+ ## 4. How to Run Locally
66
+
67
+ > Note: Checkpoints cleanup — duplicate config.json files across checkpoints have been consolidated. For each unique config, only the highest-numbered step retains the canonical config; other steps now contain a pointer file. No pytorch_model.bin files were removed.
68
+
69
+
70
+ Please refer to our code repository for more information about running MyAwesomeModel locally.
71
+
72
+ Compared to previous versions, the usage recommendations for MyAwesomeModel have the following changes:
73
+
74
+ 1. System prompt is supported.
75
+ 2. It is not required to add special tokens at the beginning of the output to force the model into a specific thinking pattern.
76
+
77
+ The model architecture of MyAwesomeModel-Small is identical to its base model, but it shares the same tokenizer configuration as the main MyAwesomeModel. This model can be run in the same manner as its base model.
78
+
79
+ ### System Prompt
80
+ We recommend using the following system prompt with a specific date.
81
+ ```
82
+ You are MyAwesomeModel, a helpful AI assistant.
83
+ Today is {current date}.
84
+ ```
85
+ For example,
86
+ ```
87
+ You are MyAwesomeModel, a helpful AI assistant.
88
+ Today is May 28, 2025, Monday.
89
+ ```
90
+ ### Temperature
91
+ We recommend setting the temperature parameter $T_{model}$ to 0.6.
92
+
93
+ ### Prompts for File Uploading and Web Search
94
+ For file uploading, please follow the template to create prompts, where {file_name}, {file_content} and {question} are arguments.
95
+ ```
96
+ file_template = \
97
+ """[file name]: {file_name}
98
+ [file content begin]
99
+ {file_content}
100
+ [file content end]
101
+ {question}"""
102
+ ```
103
+ For web search enhanced generation, we recommend the following prompt template where {search_results}, {cur_date}, and {question} are arguments.
104
+ ```
105
+ search_answer_en_template = \
106
+ '''# The following contents are the search results related to the user's message:
107
+ {search_results}
108
+ In the search results I provide to you, each result is formatted as [webpage X begin]...[webpage X end], where X represents the numerical index of each article. Please cite the context at the end of the relevant sentence when appropriate. Use the citation format [citation:X] in the corresponding part of your answer. If a sentence is derived from multiple contexts, list all relevant citation numbers, such as [citation:3][citation:5]. Be sure not to cluster all citations at the end; instead, include them in the corresponding parts of the answer.
109
+ When responding, please keep the following points in mind:
110
+ - Today is {cur_date}.
111
+ - Not all content in the search results is closely related to the user's question. You need to evaluate and filter the search results based on the question.
112
+ - For listing-type questions (e.g., listing all flight information), try to limit the answer to 10 key points and inform the user that they can refer to the search sources for complete information. Prioritize providing the most complete and relevant items in the list. Avoid mentioning content not provided in the search results unless necessary.
113
+ - For creative tasks (e.g., writing an essay), ensure that references are cited within the body of the text, such as [citation:3][citation:5], rather than only at the end of the text. You need to interpret and summarize the user's requirements, choose an appropriate format, fully utilize the search results, extract key information, and generate an answer that is insightful, creative, and professional. Extend the length of your response as much as possible, addressing each point in detail and from multiple perspectives, ensuring the content is rich and thorough.
114
+ - If the response is lengthy, structure it well and summarize it in paragraphs. If a point-by-point format is needed, try to limit it to 5 points and merge related content.
115
+ - For objective Q&A, if the answer is very brief, you may add one or two related sentences to enrich the content.
116
+ - Choose an appropriate and visually appealing format for your response based on the user's requirements and the content of the answer, ensuring strong readability.
117
+ - Your answer should synthesize information from multiple relevant webpages and avoid repeatedly citing the same webpage.
118
+ - Unless the user requests otherwise, your response should be in the same language as the user's question.
119
+ # The user's message is:
120
+ {question}'''
121
+ ```
122
+
123
+ ## 5. License
124
+ This code repository is licensed under the [MIT License](LICENSE). The use of MyAwesomeModel models is also subject to the [MIT License](LICENSE). The model series supports commercial use and distillation.
125
+
126
+ ## 6. Contact
127
+ If you have any questions, please raise an issue on our GitHub repository or contact us at contact@MyAwesomeModel.ai.
128
+ ```
checkpoints/step_100/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_100
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_100/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_1000/config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "model_type": "bert",
3
+ "architectures": ["BertModel"]
4
+ }
checkpoints/step_1000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_200/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_200
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_200/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_300/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_300
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_300/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_400/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_400
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_400/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_500/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_500
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_600/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_600
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_600/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_700/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_700
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_700/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_800/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_800
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_800/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
checkpoints/step_900/config_pointer.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ This config was consolidated during repository cleanup on 2026-02-12T06:59:49.398535Z.
2
+ Canonical config retained at: ../step_1000/config.json
3
+ Original step: step_900
4
+ Canonical step: step_1000
5
+ MD5: b029ea3e123b58fd574d061f038c256c
checkpoints/step_900/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965362299a238de576a92dfdd3e32aea7a2bacc94b2c41541c8c9258b923f587
3
+ size 23
evaluation/.setup.py.swp ADDED
Binary file (12.3 kB). View file
 
evaluation/benchmarks/code_generation/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate code_generation")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isfile(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("code_generation", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/common_sense/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate common_sense")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("common_sense", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/creative_writing/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate creative_writing")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("creative_writing", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/dialogue_generation/eval.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate dialogue generation")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ config_data = config_init()
26
+
27
+ result = get_benchmark_score("dialogue_generation", step_number)
28
+ if result is None:
29
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
30
+ sys.exit(1)
31
+
32
+ print(result)
33
+
34
+ if __name__ == "__main__":
35
+ main()
evaluation/benchmarks/instruction_following/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate instruction_following")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("instruction_following", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/knowledge_retrieval/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate knowledge_retrieval")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("knowledge_retrieval", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/logical_reasoning/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate logical_reasoning")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("logical_reasoning", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/math_reasoning/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate math_reasoning")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("math_reasoning", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/question_answering/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate question_answering")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("question_answering", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/reading_comprehension/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate reading_comprehension")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("reading_comprehension", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/safety_evaluation/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate safety_evaluation")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("safety_evaluation", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/sentiment_analysis/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate sentiment_analysis")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("sentiment_analysis", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/summarization/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate summarization")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("summarization", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/benchmarks/text_classification/eval.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import util
5
+
6
+ # Add parent directory to path to import utils
7
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
8
+ from utils.benchmark_utils import get_benchmark_score
9
+
10
+ def main():
11
+ parser = argparse.ArgumentParser(description="Evaluate text classification")
12
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
13
+ args = parser.parse_args()
14
+
15
+ if not os.path.isdir(args.model_path):
16
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
17
+ sys.exit(1)
18
+
19
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
20
+ try:
21
+ step_number = int(checkpoint_name.split('_')[-1])
22
+ except (ValueError, IndexError):
23
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
24
+ sys.exit(1)
25
+
26
+ result = get_benchmark_score("text_classification", step_number)
27
+ if result is None:
28
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
29
+ sys.exit(1)
30
+
31
+ print(result)
32
+
33
+ if __name__ == "__main__":
34
+ main()
evaluation/benchmarks/translation/eval.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+
5
+ # Add parent directory to path to import utils
6
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
7
+ from utils.benchmark_utils import get_benchmark_score
8
+
9
+ def main():
10
+ parser = argparse.ArgumentParser(description="Evaluate translation")
11
+ parser.add_argument("model_path", type=str, help="Path to model checkpoint")
12
+ args = parser.parse_args()
13
+
14
+ if not os.path.isdir(args.model_path):
15
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
16
+ sys.exit(1)
17
+
18
+ checkpoint_name = os.path.basename(os.path.normpath(args.model_path))
19
+ try:
20
+ step_number = int(checkpoint_name.split('_')[-1])
21
+ except (ValueError, IndexError):
22
+ print(f"Error: Cannot parse step number from '{checkpoint_name}'", file=sys.stderr)
23
+ sys.exit(1)
24
+
25
+ result = get_benchmark_score("translation", step_number)
26
+ if result is None:
27
+ print(f"Error: Invalid step number {step_number}", file=sys.stderr)
28
+ sys.exit(1)
29
+
30
+ print(result)
31
+
32
+ if __name__ == "__main__":
33
+ main()
evaluation/build/lib.linux-x86_64-cpython-313/utils/__init__.cpython-313-x86_64-linux-gnu.so ADDED
Binary file (55.2 kB). View file
 
evaluation/build/lib.linux-x86_64-cpython-313/utils/benchmark_utils.cpython-313-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ae9c7cc713b5dae1e04fa9c128874564d866648bed5e7f465adf34785d0d212
3
+ size 713688
evaluation/build/temp.linux-x86_64-cpython-313/utils/__init__.o ADDED
Binary file (75 kB). View file
 
evaluation/build/temp.linux-x86_64-cpython-313/utils/benchmark_utils.o ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:357ac47110898a21c3619d820c20f5841e7c019f98099be33b615709100ecb21
3
+ size 1385208
evaluation/eval.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import subprocess
5
+
6
+ # Add utils to path
7
+ sys.path.insert(0, os.path.dirname(__file__))
8
+ from utils.benchmark_utils import BENCHMARK_CALCULATORS
9
+
10
+ # List of all benchmark categories
11
+ BENCHMARK_CATEGORIES = list(BENCHMARK_CALCULATORS.keys())
12
+
13
+ def run_benchmark_evaluation(benchmark_name, model_path):
14
+ """Run evaluation for a specific benchmark category"""
15
+ benchmark_script = os.path.join("evaluation", "benchmarks", benchmark_name, "eval.py")
16
+
17
+ if not os.path.exists(benchmark_script):
18
+ print(f"Warning: Benchmark script not found: {benchmark_script}", file=sys.stderr)
19
+ return None
20
+
21
+ try:
22
+ result = subprocess.run(
23
+ [sys.executable, benchmark_script, model_path],
24
+ capture_output=True,
25
+ text=True,
26
+ check=True,
27
+ encoding='utf-8'
28
+ )
29
+ score = float(result.stdout.strip())
30
+ return score
31
+ except subprocess.CalledProcessError as e:
32
+ print(f"Error running {benchmark_name} evaluation: {e.stderr}", file=sys.stderr)
33
+ return None
34
+ except (ValueError, TypeError):
35
+ print(f"Warning: Could not parse score from {benchmark_name}: '{result.stdout.strip()}'", file=sys.stderr)
36
+ return None
37
+
38
+ def calculate_overall_score(benchmark_scores):
39
+ """Calculate overall performance score from individual benchmarks"""
40
+ valid_scores = [score for score in benchmark_scores.values() if score is not None]
41
+ if not valid_scores:
42
+ return None
43
+
44
+ # Weighted average with slight emphasis on reasoning tasks
45
+ weights = {
46
+ "math_reasoning": 1.2,
47
+ "logical_reasoning": 1.2,
48
+ "code_generation": 1.1,
49
+ "question_answering": 1.1,
50
+ "reading_comprehension": 1.0,
51
+ "common_sense": 1.0,
52
+ "text_classification": 0.9,
53
+ "sentiment_analysis": 0.9,
54
+ "dialogue_generation": 1.0,
55
+ "summarization": 1.0,
56
+ "translation": 1.0,
57
+ "knowledge_retrieval": 1.0,
58
+ "creative_writing": 0.9,
59
+ "instruction_following": 1.1,
60
+ "safety_evaluation": 1.1
61
+ }
62
+
63
+ weighted_sum = 0
64
+ total_weight = 0
65
+
66
+ for benchmark, score in benchmark_scores.items():
67
+ if score is not None:
68
+ weight = weights.get(benchmark, 1.0)
69
+ weighted_sum += score * weight
70
+ total_weight += weight
71
+
72
+ return round(weighted_sum / total_weight, 3) if total_weight > 0 else None
73
+
74
+ def main():
75
+ """
76
+ Run comprehensive evaluation across all benchmark categories.
77
+ Returns the overall weighted score for compatibility with existing evaluation system.
78
+ """
79
+ parser = argparse.ArgumentParser(
80
+ description="Run comprehensive evaluation across all benchmark categories"
81
+ )
82
+ parser.add_argument(
83
+ "model_path",
84
+ type=str,
85
+ help="The file path to the model checkpoint directory (e.g., ../checkpoints/step_100)."
86
+ )
87
+ args = parser.parse_args()
88
+
89
+ # Check if the provided path is a directory
90
+ if not os.path.isdir(args.model_path):
91
+ print(f"Error: Directory not found at '{args.model_path}'", file=sys.stderr)
92
+ sys.exit(1)
93
+
94
+ # Change to the directory containing the evaluation scripts
95
+ script_dir = os.path.dirname(os.path.abspath(__file__))
96
+ original_cwd = os.getcwd()
97
+ os.chdir(os.path.dirname(script_dir))
98
+
99
+ benchmark_scores = {}
100
+
101
+ # Run evaluation for each benchmark category
102
+ for benchmark in BENCHMARK_CATEGORIES:
103
+ score = run_benchmark_evaluation(benchmark, args.model_path)
104
+ benchmark_scores[benchmark] = score
105
+ if score is not None:
106
+ print(f"{benchmark}: {score}", file=sys.stderr)
107
+
108
+ # Calculate overall score
109
+ overall_score = calculate_overall_score(benchmark_scores)
110
+
111
+ # Restore original working directory
112
+ os.chdir(original_cwd)
113
+
114
+ if overall_score is None:
115
+ print(f"Error: Could not calculate overall score for {args.model_path}", file=sys.stderr)
116
+ sys.exit(1)
117
+
118
+ # Print only the overall score for compatibility with existing evaluation pipeline
119
+ print(overall_score)
120
+
121
+ if __name__ == "__main__":
122
+ main()
evaluation/setup.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, Extension
2
+ from Cython.Build import cythonize
3
+
4
+ # Let Cython find and compile your .py files directly.
5
+ # This compiles both __init__.py and benchmark_utils.py
6
+
7
+ extensions = [
8
+ Extension("utils.__init__", ["utils/__init__.py"]),
9
+ Extension("utils.benchmark_utils", ["utils/benchmark_utils.py"]),
10
+ ]
11
+
12
+ setup(
13
+ name="my_utils_package",
14
+ ext_modules=cythonize(
15
+ extensions,
16
+ # Tell Cython you're using Python 3 syntax
17
+ compiler_directives={'language_level' : "3"}
18
+ )
19
+ )
evaluation/utils/__init__.c ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/utils/__init__.cpython-313-x86_64-linux-gnu.so ADDED
Binary file (55.2 kB). View file